Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * Generic stack depot for storing stack traces.
  3 *
  4 * Some debugging tools need to save stack traces of certain events which can
  5 * be later presented to the user. For example, KASAN needs to safe alloc and
  6 * free stacks for each object, but storing two stack traces per object
  7 * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
  8 * that).
  9 *
 10 * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
 11 * and free stacks repeat a lot, we save about 100x space.
 12 * Stacks are never removed from depot, so we store them contiguously one after
 13 * another in a contiguos memory allocation.
 14 *
 15 * Author: Alexander Potapenko <glider@google.com>
 16 * Copyright (C) 2016 Google, Inc.
 17 *
 18 * Based on code by Dmitry Chernenkov.
 19 *
 20 * This program is free software; you can redistribute it and/or
 21 * modify it under the terms of the GNU General Public License
 22 * version 2 as published by the Free Software Foundation.
 23 *
 24 * This program is distributed in the hope that it will be useful, but
 25 * WITHOUT ANY WARRANTY; without even the implied warranty of
 26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 27 * General Public License for more details.
 28 *
 29 */
 30
 31#include <linux/gfp.h>
 32#include <linux/jhash.h>
 33#include <linux/kernel.h>
 34#include <linux/mm.h>
 35#include <linux/percpu.h>
 36#include <linux/printk.h>
 37#include <linux/slab.h>
 38#include <linux/stacktrace.h>
 39#include <linux/stackdepot.h>
 40#include <linux/string.h>
 41#include <linux/types.h>
 42
 43#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
 44
 45#define STACK_ALLOC_NULL_PROTECTION_BITS 1
 46#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
 47#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
 48#define STACK_ALLOC_ALIGN 4
 49#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
 50					STACK_ALLOC_ALIGN)
 51#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
 52		STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
 53#define STACK_ALLOC_SLABS_CAP 1024
 54#define STACK_ALLOC_MAX_SLABS \
 55	(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
 56	 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
 57
 58/* The compact structure to store the reference to stacks. */
 59union handle_parts {
 60	depot_stack_handle_t handle;
 61	struct {
 62		u32 slabindex : STACK_ALLOC_INDEX_BITS;
 63		u32 offset : STACK_ALLOC_OFFSET_BITS;
 64		u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
 65	};
 66};
 67
 68struct stack_record {
 69	struct stack_record *next;	/* Link in the hashtable */
 70	u32 hash;			/* Hash in the hastable */
 71	u32 size;			/* Number of frames in the stack */
 72	union handle_parts handle;
 73	unsigned long entries[1];	/* Variable-sized array of entries. */
 74};
 75
 76static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
 77
 78static int depot_index;
 79static int next_slab_inited;
 80static size_t depot_offset;
 81static DEFINE_SPINLOCK(depot_lock);
 82
 83static bool init_stack_slab(void **prealloc)
 84{
 85	if (!*prealloc)
 86		return false;
 87	/*
 88	 * This smp_load_acquire() pairs with smp_store_release() to
 89	 * |next_slab_inited| below and in depot_alloc_stack().
 90	 */
 91	if (smp_load_acquire(&next_slab_inited))
 92		return true;
 93	if (stack_slabs[depot_index] == NULL) {
 94		stack_slabs[depot_index] = *prealloc;
 95	} else {
 96		stack_slabs[depot_index + 1] = *prealloc;
 97		/*
 98		 * This smp_store_release pairs with smp_load_acquire() from
 99		 * |next_slab_inited| above and in depot_save_stack().
100		 */
101		smp_store_release(&next_slab_inited, 1);
102	}
103	*prealloc = NULL;
104	return true;
105}
106
107/* Allocation of a new stack in raw storage */
108static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
109		u32 hash, void **prealloc, gfp_t alloc_flags)
110{
111	int required_size = offsetof(struct stack_record, entries) +
112		sizeof(unsigned long) * size;
113	struct stack_record *stack;
114
115	required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
116
117	if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
118		if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
119			WARN_ONCE(1, "Stack depot reached limit capacity");
120			return NULL;
121		}
122		depot_index++;
123		depot_offset = 0;
124		/*
125		 * smp_store_release() here pairs with smp_load_acquire() from
126		 * |next_slab_inited| in depot_save_stack() and
127		 * init_stack_slab().
128		 */
129		if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
130			smp_store_release(&next_slab_inited, 0);
131	}
132	init_stack_slab(prealloc);
133	if (stack_slabs[depot_index] == NULL)
134		return NULL;
135
136	stack = stack_slabs[depot_index] + depot_offset;
137
138	stack->hash = hash;
139	stack->size = size;
140	stack->handle.slabindex = depot_index;
141	stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
142	stack->handle.valid = 1;
143	memcpy(stack->entries, entries, size * sizeof(unsigned long));
144	depot_offset += required_size;
145
146	return stack;
147}
148
149#define STACK_HASH_ORDER 20
150#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
151#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
152#define STACK_HASH_SEED 0x9747b28c
153
154static struct stack_record *stack_table[STACK_HASH_SIZE] = {
155	[0 ...	STACK_HASH_SIZE - 1] = NULL
156};
157
158/* Calculate hash for a stack */
159static inline u32 hash_stack(unsigned long *entries, unsigned int size)
160{
161	return jhash2((u32 *)entries,
162			       size * sizeof(unsigned long) / sizeof(u32),
163			       STACK_HASH_SEED);
164}
165
166/* Find a stack that is equal to the one stored in entries in the hash */
167static inline struct stack_record *find_stack(struct stack_record *bucket,
168					     unsigned long *entries, int size,
169					     u32 hash)
170{
171	struct stack_record *found;
172
173	for (found = bucket; found; found = found->next) {
174		if (found->hash == hash &&
175		    found->size == size &&
176		    !memcmp(entries, found->entries,
177			    size * sizeof(unsigned long))) {
178			return found;
179		}
180	}
181	return NULL;
182}
183
184void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
185{
186	union handle_parts parts = { .handle = handle };
187	void *slab = stack_slabs[parts.slabindex];
188	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
189	struct stack_record *stack = slab + offset;
190
191	trace->nr_entries = trace->max_entries = stack->size;
192	trace->entries = stack->entries;
193	trace->skip = 0;
194}
195
196/**
197 * depot_save_stack - save stack in a stack depot.
198 * @trace - the stacktrace to save.
199 * @alloc_flags - flags for allocating additional memory if required.
200 *
201 * Returns the handle of the stack struct stored in depot.
202 */
203depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
204				    gfp_t alloc_flags)
205{
206	u32 hash;
207	depot_stack_handle_t retval = 0;
208	struct stack_record *found = NULL, **bucket;
209	unsigned long flags;
210	struct page *page = NULL;
211	void *prealloc = NULL;
212
213	if (unlikely(trace->nr_entries == 0))
214		goto fast_exit;
215
216	hash = hash_stack(trace->entries, trace->nr_entries);
217	bucket = &stack_table[hash & STACK_HASH_MASK];
218
219	/*
220	 * Fast path: look the stack trace up without locking.
221	 * The smp_load_acquire() here pairs with smp_store_release() to
222	 * |bucket| below.
223	 */
224	found = find_stack(smp_load_acquire(bucket), trace->entries,
225			   trace->nr_entries, hash);
226	if (found)
227		goto exit;
228
229	/*
230	 * Check if the current or the next stack slab need to be initialized.
231	 * If so, allocate the memory - we won't be able to do that under the
232	 * lock.
233	 *
234	 * The smp_load_acquire() here pairs with smp_store_release() to
235	 * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
236	 */
237	if (unlikely(!smp_load_acquire(&next_slab_inited))) {
238		/*
239		 * Zero out zone modifiers, as we don't have specific zone
240		 * requirements. Keep the flags related to allocation in atomic
241		 * contexts and I/O.
242		 */
243		alloc_flags &= ~GFP_ZONEMASK;
244		alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
245		page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
246		if (page)
247			prealloc = page_address(page);
248	}
249
250	spin_lock_irqsave(&depot_lock, flags);
251
252	found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
253	if (!found) {
254		struct stack_record *new =
255			depot_alloc_stack(trace->entries, trace->nr_entries,
256					  hash, &prealloc, alloc_flags);
257		if (new) {
258			new->next = *bucket;
259			/*
260			 * This smp_store_release() pairs with
261			 * smp_load_acquire() from |bucket| above.
262			 */
263			smp_store_release(bucket, new);
264			found = new;
265		}
266	} else if (prealloc) {
267		/*
268		 * We didn't need to store this stack trace, but let's keep
269		 * the preallocated memory for the future.
270		 */
271		WARN_ON(!init_stack_slab(&prealloc));
272	}
273
274	spin_unlock_irqrestore(&depot_lock, flags);
275exit:
276	if (prealloc) {
277		/* Nobody used this memory, ok to free it. */
278		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
279	}
280	if (found)
281		retval = found->handle.handle;
282fast_exit:
283	return retval;
284}