Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic stack depot for storing stack traces.
4 *
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9 * that).
10 *
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
14 * another in a contiguous memory allocation.
15 *
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
18 *
19 * Based on code by Dmitry Chernenkov.
20 */
21
22#include <linux/gfp.h>
23#include <linux/interrupt.h>
24#include <linux/jhash.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/percpu.h>
28#include <linux/printk.h>
29#include <linux/slab.h>
30#include <linux/stacktrace.h>
31#include <linux/stackdepot.h>
32#include <linux/string.h>
33#include <linux/types.h>
34#include <linux/memblock.h>
35
36#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
37
38#define STACK_ALLOC_NULL_PROTECTION_BITS 1
39#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
40#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
41#define STACK_ALLOC_ALIGN 4
42#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
43 STACK_ALLOC_ALIGN)
44#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
45 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
46#define STACK_ALLOC_SLABS_CAP 8192
47#define STACK_ALLOC_MAX_SLABS \
48 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
49 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
50
51/* The compact structure to store the reference to stacks. */
52union handle_parts {
53 depot_stack_handle_t handle;
54 struct {
55 u32 slabindex : STACK_ALLOC_INDEX_BITS;
56 u32 offset : STACK_ALLOC_OFFSET_BITS;
57 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
58 };
59};
60
61struct stack_record {
62 struct stack_record *next; /* Link in the hashtable */
63 u32 hash; /* Hash in the hastable */
64 u32 size; /* Number of frames in the stack */
65 union handle_parts handle;
66 unsigned long entries[]; /* Variable-sized array of entries. */
67};
68
69static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
70
71static int depot_index;
72static int next_slab_inited;
73static size_t depot_offset;
74static DEFINE_RAW_SPINLOCK(depot_lock);
75
76static bool init_stack_slab(void **prealloc)
77{
78 if (!*prealloc)
79 return false;
80 /*
81 * This smp_load_acquire() pairs with smp_store_release() to
82 * |next_slab_inited| below and in depot_alloc_stack().
83 */
84 if (smp_load_acquire(&next_slab_inited))
85 return true;
86 if (stack_slabs[depot_index] == NULL) {
87 stack_slabs[depot_index] = *prealloc;
88 *prealloc = NULL;
89 } else {
90 /* If this is the last depot slab, do not touch the next one. */
91 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
92 stack_slabs[depot_index + 1] = *prealloc;
93 *prealloc = NULL;
94 }
95 /*
96 * This smp_store_release pairs with smp_load_acquire() from
97 * |next_slab_inited| above and in stack_depot_save().
98 */
99 smp_store_release(&next_slab_inited, 1);
100 }
101 return true;
102}
103
104/* Allocation of a new stack in raw storage */
105static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
106 u32 hash, void **prealloc, gfp_t alloc_flags)
107{
108 struct stack_record *stack;
109 size_t required_size = struct_size(stack, entries, size);
110
111 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
112
113 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
114 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
115 WARN_ONCE(1, "Stack depot reached limit capacity");
116 return NULL;
117 }
118 depot_index++;
119 depot_offset = 0;
120 /*
121 * smp_store_release() here pairs with smp_load_acquire() from
122 * |next_slab_inited| in stack_depot_save() and
123 * init_stack_slab().
124 */
125 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
126 smp_store_release(&next_slab_inited, 0);
127 }
128 init_stack_slab(prealloc);
129 if (stack_slabs[depot_index] == NULL)
130 return NULL;
131
132 stack = stack_slabs[depot_index] + depot_offset;
133
134 stack->hash = hash;
135 stack->size = size;
136 stack->handle.slabindex = depot_index;
137 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
138 stack->handle.valid = 1;
139 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
140 depot_offset += required_size;
141
142 return stack;
143}
144
145#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
146#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
147#define STACK_HASH_SEED 0x9747b28c
148
149static bool stack_depot_disable;
150static struct stack_record **stack_table;
151
152static int __init is_stack_depot_disabled(char *str)
153{
154 int ret;
155
156 ret = kstrtobool(str, &stack_depot_disable);
157 if (!ret && stack_depot_disable) {
158 pr_info("Stack Depot is disabled\n");
159 stack_table = NULL;
160 }
161 return 0;
162}
163early_param("stack_depot_disable", is_stack_depot_disabled);
164
165int __init stack_depot_init(void)
166{
167 if (!stack_depot_disable) {
168 size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
169 int i;
170
171 stack_table = memblock_alloc(size, size);
172 for (i = 0; i < STACK_HASH_SIZE; i++)
173 stack_table[i] = NULL;
174 }
175 return 0;
176}
177
178/* Calculate hash for a stack */
179static inline u32 hash_stack(unsigned long *entries, unsigned int size)
180{
181 return jhash2((u32 *)entries,
182 array_size(size, sizeof(*entries)) / sizeof(u32),
183 STACK_HASH_SEED);
184}
185
186/* Use our own, non-instrumented version of memcmp().
187 *
188 * We actually don't care about the order, just the equality.
189 */
190static inline
191int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
192 unsigned int n)
193{
194 for ( ; n-- ; u1++, u2++) {
195 if (*u1 != *u2)
196 return 1;
197 }
198 return 0;
199}
200
201/* Find a stack that is equal to the one stored in entries in the hash */
202static inline struct stack_record *find_stack(struct stack_record *bucket,
203 unsigned long *entries, int size,
204 u32 hash)
205{
206 struct stack_record *found;
207
208 for (found = bucket; found; found = found->next) {
209 if (found->hash == hash &&
210 found->size == size &&
211 !stackdepot_memcmp(entries, found->entries, size))
212 return found;
213 }
214 return NULL;
215}
216
217/**
218 * stack_depot_fetch - Fetch stack entries from a depot
219 *
220 * @handle: Stack depot handle which was returned from
221 * stack_depot_save().
222 * @entries: Pointer to store the entries address
223 *
224 * Return: The number of trace entries for this depot.
225 */
226unsigned int stack_depot_fetch(depot_stack_handle_t handle,
227 unsigned long **entries)
228{
229 union handle_parts parts = { .handle = handle };
230 void *slab;
231 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
232 struct stack_record *stack;
233
234 *entries = NULL;
235 if (parts.slabindex > depot_index) {
236 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
237 parts.slabindex, depot_index, handle);
238 return 0;
239 }
240 slab = stack_slabs[parts.slabindex];
241 if (!slab)
242 return 0;
243 stack = slab + offset;
244
245 *entries = stack->entries;
246 return stack->size;
247}
248EXPORT_SYMBOL_GPL(stack_depot_fetch);
249
250/**
251 * stack_depot_save - Save a stack trace from an array
252 *
253 * @entries: Pointer to storage array
254 * @nr_entries: Size of the storage array
255 * @alloc_flags: Allocation gfp flags
256 *
257 * Return: The handle of the stack struct stored in depot
258 */
259depot_stack_handle_t stack_depot_save(unsigned long *entries,
260 unsigned int nr_entries,
261 gfp_t alloc_flags)
262{
263 struct stack_record *found = NULL, **bucket;
264 depot_stack_handle_t retval = 0;
265 struct page *page = NULL;
266 void *prealloc = NULL;
267 unsigned long flags;
268 u32 hash;
269
270 if (unlikely(nr_entries == 0) || stack_depot_disable)
271 goto fast_exit;
272
273 hash = hash_stack(entries, nr_entries);
274 bucket = &stack_table[hash & STACK_HASH_MASK];
275
276 /*
277 * Fast path: look the stack trace up without locking.
278 * The smp_load_acquire() here pairs with smp_store_release() to
279 * |bucket| below.
280 */
281 found = find_stack(smp_load_acquire(bucket), entries,
282 nr_entries, hash);
283 if (found)
284 goto exit;
285
286 /*
287 * Check if the current or the next stack slab need to be initialized.
288 * If so, allocate the memory - we won't be able to do that under the
289 * lock.
290 *
291 * The smp_load_acquire() here pairs with smp_store_release() to
292 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
293 */
294 if (unlikely(!smp_load_acquire(&next_slab_inited))) {
295 /*
296 * Zero out zone modifiers, as we don't have specific zone
297 * requirements. Keep the flags related to allocation in atomic
298 * contexts and I/O.
299 */
300 alloc_flags &= ~GFP_ZONEMASK;
301 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
302 alloc_flags |= __GFP_NOWARN;
303 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
304 if (page)
305 prealloc = page_address(page);
306 }
307
308 raw_spin_lock_irqsave(&depot_lock, flags);
309
310 found = find_stack(*bucket, entries, nr_entries, hash);
311 if (!found) {
312 struct stack_record *new =
313 depot_alloc_stack(entries, nr_entries,
314 hash, &prealloc, alloc_flags);
315 if (new) {
316 new->next = *bucket;
317 /*
318 * This smp_store_release() pairs with
319 * smp_load_acquire() from |bucket| above.
320 */
321 smp_store_release(bucket, new);
322 found = new;
323 }
324 } else if (prealloc) {
325 /*
326 * We didn't need to store this stack trace, but let's keep
327 * the preallocated memory for the future.
328 */
329 WARN_ON(!init_stack_slab(&prealloc));
330 }
331
332 raw_spin_unlock_irqrestore(&depot_lock, flags);
333exit:
334 if (prealloc) {
335 /* Nobody used this memory, ok to free it. */
336 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
337 }
338 if (found)
339 retval = found->handle.handle;
340fast_exit:
341 return retval;
342}
343EXPORT_SYMBOL_GPL(stack_depot_save);
344
345static inline int in_irqentry_text(unsigned long ptr)
346{
347 return (ptr >= (unsigned long)&__irqentry_text_start &&
348 ptr < (unsigned long)&__irqentry_text_end) ||
349 (ptr >= (unsigned long)&__softirqentry_text_start &&
350 ptr < (unsigned long)&__softirqentry_text_end);
351}
352
353unsigned int filter_irq_stacks(unsigned long *entries,
354 unsigned int nr_entries)
355{
356 unsigned int i;
357
358 for (i = 0; i < nr_entries; i++) {
359 if (in_irqentry_text(entries[i])) {
360 /* Include the irqentry function into the stack. */
361 return i + 1;
362 }
363 }
364 return nr_entries;
365}
366EXPORT_SYMBOL_GPL(filter_irq_stacks);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic stack depot for storing stack traces.
4 *
5 * Some debugging tools need to save stack traces of certain events which can
6 * be later presented to the user. For example, KASAN needs to safe alloc and
7 * free stacks for each object, but storing two stack traces per object
8 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
9 * that).
10 *
11 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
12 * and free stacks repeat a lot, we save about 100x space.
13 * Stacks are never removed from depot, so we store them contiguously one after
14 * another in a contiguous memory allocation.
15 *
16 * Author: Alexander Potapenko <glider@google.com>
17 * Copyright (C) 2016 Google, Inc.
18 *
19 * Based on code by Dmitry Chernenkov.
20 */
21
22#include <linux/gfp.h>
23#include <linux/jhash.h>
24#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/mutex.h>
27#include <linux/percpu.h>
28#include <linux/printk.h>
29#include <linux/slab.h>
30#include <linux/stacktrace.h>
31#include <linux/stackdepot.h>
32#include <linux/string.h>
33#include <linux/types.h>
34#include <linux/memblock.h>
35#include <linux/kasan-enabled.h>
36
37#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
38
39#define STACK_ALLOC_NULL_PROTECTION_BITS 1
40#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
41#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
42#define STACK_ALLOC_ALIGN 4
43#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
44 STACK_ALLOC_ALIGN)
45#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
46 STACK_ALLOC_NULL_PROTECTION_BITS - \
47 STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
48#define STACK_ALLOC_SLABS_CAP 8192
49#define STACK_ALLOC_MAX_SLABS \
50 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
51 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
52
53/* The compact structure to store the reference to stacks. */
54union handle_parts {
55 depot_stack_handle_t handle;
56 struct {
57 u32 slabindex : STACK_ALLOC_INDEX_BITS;
58 u32 offset : STACK_ALLOC_OFFSET_BITS;
59 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
60 u32 extra : STACK_DEPOT_EXTRA_BITS;
61 };
62};
63
64struct stack_record {
65 struct stack_record *next; /* Link in the hashtable */
66 u32 hash; /* Hash in the hastable */
67 u32 size; /* Number of frames in the stack */
68 union handle_parts handle;
69 unsigned long entries[]; /* Variable-sized array of entries. */
70};
71
72static bool __stack_depot_want_early_init __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
73static bool __stack_depot_early_init_passed __initdata;
74
75static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
76
77static int depot_index;
78static int next_slab_inited;
79static size_t depot_offset;
80static DEFINE_RAW_SPINLOCK(depot_lock);
81
82unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
83{
84 union handle_parts parts = { .handle = handle };
85
86 return parts.extra;
87}
88EXPORT_SYMBOL(stack_depot_get_extra_bits);
89
90static bool init_stack_slab(void **prealloc)
91{
92 if (!*prealloc)
93 return false;
94 /*
95 * This smp_load_acquire() pairs with smp_store_release() to
96 * |next_slab_inited| below and in depot_alloc_stack().
97 */
98 if (smp_load_acquire(&next_slab_inited))
99 return true;
100 if (stack_slabs[depot_index] == NULL) {
101 stack_slabs[depot_index] = *prealloc;
102 *prealloc = NULL;
103 } else {
104 /* If this is the last depot slab, do not touch the next one. */
105 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
106 stack_slabs[depot_index + 1] = *prealloc;
107 *prealloc = NULL;
108 }
109 /*
110 * This smp_store_release pairs with smp_load_acquire() from
111 * |next_slab_inited| above and in stack_depot_save().
112 */
113 smp_store_release(&next_slab_inited, 1);
114 }
115 return true;
116}
117
118/* Allocation of a new stack in raw storage */
119static struct stack_record *
120depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
121{
122 struct stack_record *stack;
123 size_t required_size = struct_size(stack, entries, size);
124
125 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
126
127 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
128 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
129 WARN_ONCE(1, "Stack depot reached limit capacity");
130 return NULL;
131 }
132 depot_index++;
133 depot_offset = 0;
134 /*
135 * smp_store_release() here pairs with smp_load_acquire() from
136 * |next_slab_inited| in stack_depot_save() and
137 * init_stack_slab().
138 */
139 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
140 smp_store_release(&next_slab_inited, 0);
141 }
142 init_stack_slab(prealloc);
143 if (stack_slabs[depot_index] == NULL)
144 return NULL;
145
146 stack = stack_slabs[depot_index] + depot_offset;
147
148 stack->hash = hash;
149 stack->size = size;
150 stack->handle.slabindex = depot_index;
151 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
152 stack->handle.valid = 1;
153 stack->handle.extra = 0;
154 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
155 depot_offset += required_size;
156
157 return stack;
158}
159
160/* one hash table bucket entry per 16kB of memory */
161#define STACK_HASH_SCALE 14
162/* limited between 4k and 1M buckets */
163#define STACK_HASH_ORDER_MIN 12
164#define STACK_HASH_ORDER_MAX 20
165#define STACK_HASH_SEED 0x9747b28c
166
167static unsigned int stack_hash_order;
168static unsigned int stack_hash_mask;
169
170static bool stack_depot_disable;
171static struct stack_record **stack_table;
172
173static int __init is_stack_depot_disabled(char *str)
174{
175 int ret;
176
177 ret = kstrtobool(str, &stack_depot_disable);
178 if (!ret && stack_depot_disable) {
179 pr_info("Stack Depot is disabled\n");
180 stack_table = NULL;
181 }
182 return 0;
183}
184early_param("stack_depot_disable", is_stack_depot_disabled);
185
186void __init stack_depot_want_early_init(void)
187{
188 /* Too late to request early init now */
189 WARN_ON(__stack_depot_early_init_passed);
190
191 __stack_depot_want_early_init = true;
192}
193
194int __init stack_depot_early_init(void)
195{
196 unsigned long entries = 0;
197
198 /* This is supposed to be called only once, from mm_init() */
199 if (WARN_ON(__stack_depot_early_init_passed))
200 return 0;
201
202 __stack_depot_early_init_passed = true;
203
204 if (kasan_enabled() && !stack_hash_order)
205 stack_hash_order = STACK_HASH_ORDER_MAX;
206
207 if (!__stack_depot_want_early_init || stack_depot_disable)
208 return 0;
209
210 if (stack_hash_order)
211 entries = 1UL << stack_hash_order;
212 stack_table = alloc_large_system_hash("stackdepot",
213 sizeof(struct stack_record *),
214 entries,
215 STACK_HASH_SCALE,
216 HASH_EARLY | HASH_ZERO,
217 NULL,
218 &stack_hash_mask,
219 1UL << STACK_HASH_ORDER_MIN,
220 1UL << STACK_HASH_ORDER_MAX);
221
222 if (!stack_table) {
223 pr_err("Stack Depot hash table allocation failed, disabling\n");
224 stack_depot_disable = true;
225 return -ENOMEM;
226 }
227
228 return 0;
229}
230
231int stack_depot_init(void)
232{
233 static DEFINE_MUTEX(stack_depot_init_mutex);
234 int ret = 0;
235
236 mutex_lock(&stack_depot_init_mutex);
237 if (!stack_depot_disable && !stack_table) {
238 unsigned long entries;
239 int scale = STACK_HASH_SCALE;
240
241 if (stack_hash_order) {
242 entries = 1UL << stack_hash_order;
243 } else {
244 entries = nr_free_buffer_pages();
245 entries = roundup_pow_of_two(entries);
246
247 if (scale > PAGE_SHIFT)
248 entries >>= (scale - PAGE_SHIFT);
249 else
250 entries <<= (PAGE_SHIFT - scale);
251 }
252
253 if (entries < 1UL << STACK_HASH_ORDER_MIN)
254 entries = 1UL << STACK_HASH_ORDER_MIN;
255 if (entries > 1UL << STACK_HASH_ORDER_MAX)
256 entries = 1UL << STACK_HASH_ORDER_MAX;
257
258 pr_info("Stack Depot allocating hash table of %lu entries with kvcalloc\n",
259 entries);
260 stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
261 if (!stack_table) {
262 pr_err("Stack Depot hash table allocation failed, disabling\n");
263 stack_depot_disable = true;
264 ret = -ENOMEM;
265 }
266 stack_hash_mask = entries - 1;
267 }
268 mutex_unlock(&stack_depot_init_mutex);
269 return ret;
270}
271EXPORT_SYMBOL_GPL(stack_depot_init);
272
273/* Calculate hash for a stack */
274static inline u32 hash_stack(unsigned long *entries, unsigned int size)
275{
276 return jhash2((u32 *)entries,
277 array_size(size, sizeof(*entries)) / sizeof(u32),
278 STACK_HASH_SEED);
279}
280
281/* Use our own, non-instrumented version of memcmp().
282 *
283 * We actually don't care about the order, just the equality.
284 */
285static inline
286int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
287 unsigned int n)
288{
289 for ( ; n-- ; u1++, u2++) {
290 if (*u1 != *u2)
291 return 1;
292 }
293 return 0;
294}
295
296/* Find a stack that is equal to the one stored in entries in the hash */
297static inline struct stack_record *find_stack(struct stack_record *bucket,
298 unsigned long *entries, int size,
299 u32 hash)
300{
301 struct stack_record *found;
302
303 for (found = bucket; found; found = found->next) {
304 if (found->hash == hash &&
305 found->size == size &&
306 !stackdepot_memcmp(entries, found->entries, size))
307 return found;
308 }
309 return NULL;
310}
311
312/**
313 * stack_depot_snprint - print stack entries from a depot into a buffer
314 *
315 * @handle: Stack depot handle which was returned from
316 * stack_depot_save().
317 * @buf: Pointer to the print buffer
318 *
319 * @size: Size of the print buffer
320 *
321 * @spaces: Number of leading spaces to print
322 *
323 * Return: Number of bytes printed.
324 */
325int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
326 int spaces)
327{
328 unsigned long *entries;
329 unsigned int nr_entries;
330
331 nr_entries = stack_depot_fetch(handle, &entries);
332 return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
333 spaces) : 0;
334}
335EXPORT_SYMBOL_GPL(stack_depot_snprint);
336
337/**
338 * stack_depot_print - print stack entries from a depot
339 *
340 * @stack: Stack depot handle which was returned from
341 * stack_depot_save().
342 *
343 */
344void stack_depot_print(depot_stack_handle_t stack)
345{
346 unsigned long *entries;
347 unsigned int nr_entries;
348
349 nr_entries = stack_depot_fetch(stack, &entries);
350 if (nr_entries > 0)
351 stack_trace_print(entries, nr_entries, 0);
352}
353EXPORT_SYMBOL_GPL(stack_depot_print);
354
355/**
356 * stack_depot_fetch - Fetch stack entries from a depot
357 *
358 * @handle: Stack depot handle which was returned from
359 * stack_depot_save().
360 * @entries: Pointer to store the entries address
361 *
362 * Return: The number of trace entries for this depot.
363 */
364unsigned int stack_depot_fetch(depot_stack_handle_t handle,
365 unsigned long **entries)
366{
367 union handle_parts parts = { .handle = handle };
368 void *slab;
369 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
370 struct stack_record *stack;
371
372 *entries = NULL;
373 if (!handle)
374 return 0;
375
376 if (parts.slabindex > depot_index) {
377 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
378 parts.slabindex, depot_index, handle);
379 return 0;
380 }
381 slab = stack_slabs[parts.slabindex];
382 if (!slab)
383 return 0;
384 stack = slab + offset;
385
386 *entries = stack->entries;
387 return stack->size;
388}
389EXPORT_SYMBOL_GPL(stack_depot_fetch);
390
391/**
392 * __stack_depot_save - Save a stack trace from an array
393 *
394 * @entries: Pointer to storage array
395 * @nr_entries: Size of the storage array
396 * @extra_bits: Flags to store in unused bits of depot_stack_handle_t
397 * @alloc_flags: Allocation gfp flags
398 * @can_alloc: Allocate stack slabs (increased chance of failure if false)
399 *
400 * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
401 * %true, is allowed to replenish the stack slab pool in case no space is left
402 * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
403 * any allocations and will fail if no space is left to store the stack trace.
404 *
405 * If the stack trace in @entries is from an interrupt, only the portion up to
406 * interrupt entry is saved.
407 *
408 * Additional opaque flags can be passed in @extra_bits, stored in the unused
409 * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
410 * without calling stack_depot_fetch().
411 *
412 * Context: Any context, but setting @can_alloc to %false is required if
413 * alloc_pages() cannot be used from the current context. Currently
414 * this is the case from contexts where neither %GFP_ATOMIC nor
415 * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
416 *
417 * Return: The handle of the stack struct stored in depot, 0 on failure.
418 */
419depot_stack_handle_t __stack_depot_save(unsigned long *entries,
420 unsigned int nr_entries,
421 unsigned int extra_bits,
422 gfp_t alloc_flags, bool can_alloc)
423{
424 struct stack_record *found = NULL, **bucket;
425 union handle_parts retval = { .handle = 0 };
426 struct page *page = NULL;
427 void *prealloc = NULL;
428 unsigned long flags;
429 u32 hash;
430
431 /*
432 * If this stack trace is from an interrupt, including anything before
433 * interrupt entry usually leads to unbounded stackdepot growth.
434 *
435 * Because use of filter_irq_stacks() is a requirement to ensure
436 * stackdepot can efficiently deduplicate interrupt stacks, always
437 * filter_irq_stacks() to simplify all callers' use of stackdepot.
438 */
439 nr_entries = filter_irq_stacks(entries, nr_entries);
440
441 if (unlikely(nr_entries == 0) || stack_depot_disable)
442 goto fast_exit;
443
444 hash = hash_stack(entries, nr_entries);
445 bucket = &stack_table[hash & stack_hash_mask];
446
447 /*
448 * Fast path: look the stack trace up without locking.
449 * The smp_load_acquire() here pairs with smp_store_release() to
450 * |bucket| below.
451 */
452 found = find_stack(smp_load_acquire(bucket), entries,
453 nr_entries, hash);
454 if (found)
455 goto exit;
456
457 /*
458 * Check if the current or the next stack slab need to be initialized.
459 * If so, allocate the memory - we won't be able to do that under the
460 * lock.
461 *
462 * The smp_load_acquire() here pairs with smp_store_release() to
463 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
464 */
465 if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) {
466 /*
467 * Zero out zone modifiers, as we don't have specific zone
468 * requirements. Keep the flags related to allocation in atomic
469 * contexts and I/O.
470 */
471 alloc_flags &= ~GFP_ZONEMASK;
472 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
473 alloc_flags |= __GFP_NOWARN;
474 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
475 if (page)
476 prealloc = page_address(page);
477 }
478
479 raw_spin_lock_irqsave(&depot_lock, flags);
480
481 found = find_stack(*bucket, entries, nr_entries, hash);
482 if (!found) {
483 struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
484
485 if (new) {
486 new->next = *bucket;
487 /*
488 * This smp_store_release() pairs with
489 * smp_load_acquire() from |bucket| above.
490 */
491 smp_store_release(bucket, new);
492 found = new;
493 }
494 } else if (prealloc) {
495 /*
496 * We didn't need to store this stack trace, but let's keep
497 * the preallocated memory for the future.
498 */
499 WARN_ON(!init_stack_slab(&prealloc));
500 }
501
502 raw_spin_unlock_irqrestore(&depot_lock, flags);
503exit:
504 if (prealloc) {
505 /* Nobody used this memory, ok to free it. */
506 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
507 }
508 if (found)
509 retval.handle = found->handle.handle;
510fast_exit:
511 retval.extra = extra_bits;
512
513 return retval.handle;
514}
515EXPORT_SYMBOL_GPL(__stack_depot_save);
516
517/**
518 * stack_depot_save - Save a stack trace from an array
519 *
520 * @entries: Pointer to storage array
521 * @nr_entries: Size of the storage array
522 * @alloc_flags: Allocation gfp flags
523 *
524 * Context: Contexts where allocations via alloc_pages() are allowed.
525 * See __stack_depot_save() for more details.
526 *
527 * Return: The handle of the stack struct stored in depot, 0 on failure.
528 */
529depot_stack_handle_t stack_depot_save(unsigned long *entries,
530 unsigned int nr_entries,
531 gfp_t alloc_flags)
532{
533 return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
534}
535EXPORT_SYMBOL_GPL(stack_depot_save);