Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  3 *
  4 */
  5#include <linux/stacktrace.h>
  6#include <linux/kallsyms.h>
  7#include <linux/seq_file.h>
  8#include <linux/spinlock.h>
  9#include <linux/uaccess.h>
 10#include <linux/debugfs.h>
 11#include <linux/ftrace.h>
 12#include <linux/module.h>
 13#include <linux/sysctl.h>
 14#include <linux/init.h>
 15#include <linux/fs.h>
 16
 17#include <asm/setup.h>
 18
 19#include "trace.h"
 20
 21#define STACK_TRACE_ENTRIES 500
 22
 23static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
 24	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
 25static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
 26
 27static struct stack_trace max_stack_trace = {
 28	.max_entries		= STACK_TRACE_ENTRIES,
 29	.entries		= stack_dump_trace,
 30};
 31
 32static unsigned long max_stack_size;
 33static arch_spinlock_t max_stack_lock =
 34	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 35
 36static int stack_trace_disabled __read_mostly;
 37static DEFINE_PER_CPU(int, trace_active);
 38static DEFINE_MUTEX(stack_sysctl_mutex);
 39
 40int stack_tracer_enabled;
 41static int last_stack_tracer_enabled;
 42
 43static inline void check_stack(void)
 44{
 45	unsigned long this_size, flags;
 46	unsigned long *p, *top, *start;
 47	int i;
 48
 49	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
 50	this_size = THREAD_SIZE - this_size;
 51
 52	if (this_size <= max_stack_size)
 53		return;
 54
 55	/* we do not handle interrupt stacks yet */
 56	if (!object_is_on_stack(&this_size))
 57		return;
 58
 59	local_irq_save(flags);
 60	arch_spin_lock(&max_stack_lock);
 61
 62	/* a race could have already updated it */
 63	if (this_size <= max_stack_size)
 64		goto out;
 65
 66	max_stack_size = this_size;
 67
 68	max_stack_trace.nr_entries	= 0;
 69	max_stack_trace.skip		= 3;
 70
 71	save_stack_trace(&max_stack_trace);
 72
 73	/*
 74	 * Now find where in the stack these are.
 75	 */
 76	i = 0;
 77	start = &this_size;
 78	top = (unsigned long *)
 79		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
 80
 81	/*
 82	 * Loop through all the entries. One of the entries may
 83	 * for some reason be missed on the stack, so we may
 84	 * have to account for them. If they are all there, this
 85	 * loop will only happen once. This code only takes place
 86	 * on a new max, so it is far from a fast path.
 87	 */
 88	while (i < max_stack_trace.nr_entries) {
 89		int found = 0;
 90
 91		stack_dump_index[i] = this_size;
 92		p = start;
 93
 94		for (; p < top && i < max_stack_trace.nr_entries; p++) {
 95			if (*p == stack_dump_trace[i]) {
 96				this_size = stack_dump_index[i++] =
 97					(top - p) * sizeof(unsigned long);
 98				found = 1;
 99				/* Start the search from here */
100				start = p + 1;
101			}
102		}
103
104		if (!found)
105			i++;
106	}
107
108 out:
109	arch_spin_unlock(&max_stack_lock);
110	local_irq_restore(flags);
111}
112
113static void
114stack_trace_call(unsigned long ip, unsigned long parent_ip)
115{
116	int cpu;
117
118	if (unlikely(!ftrace_enabled || stack_trace_disabled))
119		return;
120
121	preempt_disable_notrace();
122
123	cpu = raw_smp_processor_id();
124	/* no atomic needed, we only modify this variable by this cpu */
125	if (per_cpu(trace_active, cpu)++ != 0)
126		goto out;
127
128	check_stack();
129
130 out:
131	per_cpu(trace_active, cpu)--;
132	/* prevent recursion in schedule */
133	preempt_enable_notrace();
134}
135
136static struct ftrace_ops trace_ops __read_mostly =
137{
138	.func = stack_trace_call,
 
139};
140
141static ssize_t
142stack_max_size_read(struct file *filp, char __user *ubuf,
143		    size_t count, loff_t *ppos)
144{
145	unsigned long *ptr = filp->private_data;
146	char buf[64];
147	int r;
148
149	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
150	if (r > sizeof(buf))
151		r = sizeof(buf);
152	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
153}
154
155static ssize_t
156stack_max_size_write(struct file *filp, const char __user *ubuf,
157		     size_t count, loff_t *ppos)
158{
159	long *ptr = filp->private_data;
160	unsigned long val, flags;
161	int ret;
162	int cpu;
163
164	ret = kstrtoul_from_user(ubuf, count, 10, &val);
165	if (ret)
166		return ret;
167
168	local_irq_save(flags);
169
170	/*
171	 * In case we trace inside arch_spin_lock() or after (NMI),
172	 * we will cause circular lock, so we also need to increase
173	 * the percpu trace_active here.
174	 */
175	cpu = smp_processor_id();
176	per_cpu(trace_active, cpu)++;
177
178	arch_spin_lock(&max_stack_lock);
179	*ptr = val;
180	arch_spin_unlock(&max_stack_lock);
181
182	per_cpu(trace_active, cpu)--;
183	local_irq_restore(flags);
184
185	return count;
186}
187
188static const struct file_operations stack_max_size_fops = {
189	.open		= tracing_open_generic,
190	.read		= stack_max_size_read,
191	.write		= stack_max_size_write,
192	.llseek		= default_llseek,
193};
194
195static void *
196__next(struct seq_file *m, loff_t *pos)
197{
198	long n = *pos - 1;
199
200	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
201		return NULL;
202
203	m->private = (void *)n;
204	return &m->private;
205}
206
207static void *
208t_next(struct seq_file *m, void *v, loff_t *pos)
209{
210	(*pos)++;
211	return __next(m, pos);
212}
213
214static void *t_start(struct seq_file *m, loff_t *pos)
215{
216	int cpu;
217
218	local_irq_disable();
219
220	cpu = smp_processor_id();
221	per_cpu(trace_active, cpu)++;
222
223	arch_spin_lock(&max_stack_lock);
224
225	if (*pos == 0)
226		return SEQ_START_TOKEN;
227
228	return __next(m, pos);
229}
230
231static void t_stop(struct seq_file *m, void *p)
232{
233	int cpu;
234
235	arch_spin_unlock(&max_stack_lock);
236
237	cpu = smp_processor_id();
238	per_cpu(trace_active, cpu)--;
239
240	local_irq_enable();
241}
242
243static int trace_lookup_stack(struct seq_file *m, long i)
244{
245	unsigned long addr = stack_dump_trace[i];
246
247	return seq_printf(m, "%pS\n", (void *)addr);
248}
249
250static void print_disabled(struct seq_file *m)
251{
252	seq_puts(m, "#\n"
253		 "#  Stack tracer disabled\n"
254		 "#\n"
255		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
256		 "# kernel command line\n"
257		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
258		 "#\n");
259}
260
261static int t_show(struct seq_file *m, void *v)
262{
263	long i;
264	int size;
265
266	if (v == SEQ_START_TOKEN) {
267		seq_printf(m, "        Depth    Size   Location"
268			   "    (%d entries)\n"
269			   "        -----    ----   --------\n",
270			   max_stack_trace.nr_entries - 1);
271
272		if (!stack_tracer_enabled && !max_stack_size)
273			print_disabled(m);
274
275		return 0;
276	}
277
278	i = *(long *)v;
279
280	if (i >= max_stack_trace.nr_entries ||
281	    stack_dump_trace[i] == ULONG_MAX)
282		return 0;
283
284	if (i+1 == max_stack_trace.nr_entries ||
285	    stack_dump_trace[i+1] == ULONG_MAX)
286		size = stack_dump_index[i];
287	else
288		size = stack_dump_index[i] - stack_dump_index[i+1];
289
290	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
291
292	trace_lookup_stack(m, i);
293
294	return 0;
295}
296
297static const struct seq_operations stack_trace_seq_ops = {
298	.start		= t_start,
299	.next		= t_next,
300	.stop		= t_stop,
301	.show		= t_show,
302};
303
304static int stack_trace_open(struct inode *inode, struct file *file)
305{
306	return seq_open(file, &stack_trace_seq_ops);
307}
308
309static const struct file_operations stack_trace_fops = {
310	.open		= stack_trace_open,
311	.read		= seq_read,
312	.llseek		= seq_lseek,
313	.release	= seq_release,
314};
315
316static int
317stack_trace_filter_open(struct inode *inode, struct file *file)
318{
319	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
320				 inode, file);
321}
322
323static const struct file_operations stack_trace_filter_fops = {
324	.open = stack_trace_filter_open,
325	.read = seq_read,
326	.write = ftrace_filter_write,
327	.llseek = ftrace_regex_lseek,
328	.release = ftrace_regex_release,
329};
330
331int
332stack_trace_sysctl(struct ctl_table *table, int write,
333		   void __user *buffer, size_t *lenp,
334		   loff_t *ppos)
335{
336	int ret;
337
338	mutex_lock(&stack_sysctl_mutex);
339
340	ret = proc_dointvec(table, write, buffer, lenp, ppos);
341
342	if (ret || !write ||
343	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
344		goto out;
345
346	last_stack_tracer_enabled = !!stack_tracer_enabled;
347
348	if (stack_tracer_enabled)
349		register_ftrace_function(&trace_ops);
350	else
351		unregister_ftrace_function(&trace_ops);
352
353 out:
354	mutex_unlock(&stack_sysctl_mutex);
355	return ret;
356}
357
358static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
359
360static __init int enable_stacktrace(char *str)
361{
362	if (strncmp(str, "_filter=", 8) == 0)
363		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
364
365	stack_tracer_enabled = 1;
366	last_stack_tracer_enabled = 1;
367	return 1;
368}
369__setup("stacktrace", enable_stacktrace);
370
371static __init int stack_trace_init(void)
372{
373	struct dentry *d_tracer;
374
375	d_tracer = tracing_init_dentry();
376
377	trace_create_file("stack_max_size", 0644, d_tracer,
378			&max_stack_size, &stack_max_size_fops);
379
380	trace_create_file("stack_trace", 0444, d_tracer,
381			NULL, &stack_trace_fops);
382
383	trace_create_file("stack_trace_filter", 0444, d_tracer,
384			NULL, &stack_trace_filter_fops);
385
386	if (stack_trace_filter_buf[0])
387		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
388
389	if (stack_tracer_enabled)
390		register_ftrace_function(&trace_ops);
391
392	return 0;
393}
394
395device_initcall(stack_trace_init);
v3.1
  1/*
  2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  3 *
  4 */
  5#include <linux/stacktrace.h>
  6#include <linux/kallsyms.h>
  7#include <linux/seq_file.h>
  8#include <linux/spinlock.h>
  9#include <linux/uaccess.h>
 10#include <linux/debugfs.h>
 11#include <linux/ftrace.h>
 12#include <linux/module.h>
 13#include <linux/sysctl.h>
 14#include <linux/init.h>
 15#include <linux/fs.h>
 
 
 
 16#include "trace.h"
 17
 18#define STACK_TRACE_ENTRIES 500
 19
 20static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
 21	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
 22static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
 23
 24static struct stack_trace max_stack_trace = {
 25	.max_entries		= STACK_TRACE_ENTRIES,
 26	.entries		= stack_dump_trace,
 27};
 28
 29static unsigned long max_stack_size;
 30static arch_spinlock_t max_stack_lock =
 31	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 32
 33static int stack_trace_disabled __read_mostly;
 34static DEFINE_PER_CPU(int, trace_active);
 35static DEFINE_MUTEX(stack_sysctl_mutex);
 36
 37int stack_tracer_enabled;
 38static int last_stack_tracer_enabled;
 39
 40static inline void check_stack(void)
 41{
 42	unsigned long this_size, flags;
 43	unsigned long *p, *top, *start;
 44	int i;
 45
 46	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
 47	this_size = THREAD_SIZE - this_size;
 48
 49	if (this_size <= max_stack_size)
 50		return;
 51
 52	/* we do not handle interrupt stacks yet */
 53	if (!object_is_on_stack(&this_size))
 54		return;
 55
 56	local_irq_save(flags);
 57	arch_spin_lock(&max_stack_lock);
 58
 59	/* a race could have already updated it */
 60	if (this_size <= max_stack_size)
 61		goto out;
 62
 63	max_stack_size = this_size;
 64
 65	max_stack_trace.nr_entries	= 0;
 66	max_stack_trace.skip		= 3;
 67
 68	save_stack_trace(&max_stack_trace);
 69
 70	/*
 71	 * Now find where in the stack these are.
 72	 */
 73	i = 0;
 74	start = &this_size;
 75	top = (unsigned long *)
 76		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
 77
 78	/*
 79	 * Loop through all the entries. One of the entries may
 80	 * for some reason be missed on the stack, so we may
 81	 * have to account for them. If they are all there, this
 82	 * loop will only happen once. This code only takes place
 83	 * on a new max, so it is far from a fast path.
 84	 */
 85	while (i < max_stack_trace.nr_entries) {
 86		int found = 0;
 87
 88		stack_dump_index[i] = this_size;
 89		p = start;
 90
 91		for (; p < top && i < max_stack_trace.nr_entries; p++) {
 92			if (*p == stack_dump_trace[i]) {
 93				this_size = stack_dump_index[i++] =
 94					(top - p) * sizeof(unsigned long);
 95				found = 1;
 96				/* Start the search from here */
 97				start = p + 1;
 98			}
 99		}
100
101		if (!found)
102			i++;
103	}
104
105 out:
106	arch_spin_unlock(&max_stack_lock);
107	local_irq_restore(flags);
108}
109
110static void
111stack_trace_call(unsigned long ip, unsigned long parent_ip)
112{
113	int cpu;
114
115	if (unlikely(!ftrace_enabled || stack_trace_disabled))
116		return;
117
118	preempt_disable_notrace();
119
120	cpu = raw_smp_processor_id();
121	/* no atomic needed, we only modify this variable by this cpu */
122	if (per_cpu(trace_active, cpu)++ != 0)
123		goto out;
124
125	check_stack();
126
127 out:
128	per_cpu(trace_active, cpu)--;
129	/* prevent recursion in schedule */
130	preempt_enable_notrace();
131}
132
133static struct ftrace_ops trace_ops __read_mostly =
134{
135	.func = stack_trace_call,
136	.flags = FTRACE_OPS_FL_GLOBAL,
137};
138
139static ssize_t
140stack_max_size_read(struct file *filp, char __user *ubuf,
141		    size_t count, loff_t *ppos)
142{
143	unsigned long *ptr = filp->private_data;
144	char buf[64];
145	int r;
146
147	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
148	if (r > sizeof(buf))
149		r = sizeof(buf);
150	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
151}
152
153static ssize_t
154stack_max_size_write(struct file *filp, const char __user *ubuf,
155		     size_t count, loff_t *ppos)
156{
157	long *ptr = filp->private_data;
158	unsigned long val, flags;
159	int ret;
160	int cpu;
161
162	ret = kstrtoul_from_user(ubuf, count, 10, &val);
163	if (ret)
164		return ret;
165
166	local_irq_save(flags);
167
168	/*
169	 * In case we trace inside arch_spin_lock() or after (NMI),
170	 * we will cause circular lock, so we also need to increase
171	 * the percpu trace_active here.
172	 */
173	cpu = smp_processor_id();
174	per_cpu(trace_active, cpu)++;
175
176	arch_spin_lock(&max_stack_lock);
177	*ptr = val;
178	arch_spin_unlock(&max_stack_lock);
179
180	per_cpu(trace_active, cpu)--;
181	local_irq_restore(flags);
182
183	return count;
184}
185
186static const struct file_operations stack_max_size_fops = {
187	.open		= tracing_open_generic,
188	.read		= stack_max_size_read,
189	.write		= stack_max_size_write,
190	.llseek		= default_llseek,
191};
192
193static void *
194__next(struct seq_file *m, loff_t *pos)
195{
196	long n = *pos - 1;
197
198	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
199		return NULL;
200
201	m->private = (void *)n;
202	return &m->private;
203}
204
205static void *
206t_next(struct seq_file *m, void *v, loff_t *pos)
207{
208	(*pos)++;
209	return __next(m, pos);
210}
211
212static void *t_start(struct seq_file *m, loff_t *pos)
213{
214	int cpu;
215
216	local_irq_disable();
217
218	cpu = smp_processor_id();
219	per_cpu(trace_active, cpu)++;
220
221	arch_spin_lock(&max_stack_lock);
222
223	if (*pos == 0)
224		return SEQ_START_TOKEN;
225
226	return __next(m, pos);
227}
228
229static void t_stop(struct seq_file *m, void *p)
230{
231	int cpu;
232
233	arch_spin_unlock(&max_stack_lock);
234
235	cpu = smp_processor_id();
236	per_cpu(trace_active, cpu)--;
237
238	local_irq_enable();
239}
240
241static int trace_lookup_stack(struct seq_file *m, long i)
242{
243	unsigned long addr = stack_dump_trace[i];
244
245	return seq_printf(m, "%pS\n", (void *)addr);
246}
247
248static void print_disabled(struct seq_file *m)
249{
250	seq_puts(m, "#\n"
251		 "#  Stack tracer disabled\n"
252		 "#\n"
253		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
254		 "# kernel command line\n"
255		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
256		 "#\n");
257}
258
259static int t_show(struct seq_file *m, void *v)
260{
261	long i;
262	int size;
263
264	if (v == SEQ_START_TOKEN) {
265		seq_printf(m, "        Depth    Size   Location"
266			   "    (%d entries)\n"
267			   "        -----    ----   --------\n",
268			   max_stack_trace.nr_entries - 1);
269
270		if (!stack_tracer_enabled && !max_stack_size)
271			print_disabled(m);
272
273		return 0;
274	}
275
276	i = *(long *)v;
277
278	if (i >= max_stack_trace.nr_entries ||
279	    stack_dump_trace[i] == ULONG_MAX)
280		return 0;
281
282	if (i+1 == max_stack_trace.nr_entries ||
283	    stack_dump_trace[i+1] == ULONG_MAX)
284		size = stack_dump_index[i];
285	else
286		size = stack_dump_index[i] - stack_dump_index[i+1];
287
288	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
289
290	trace_lookup_stack(m, i);
291
292	return 0;
293}
294
295static const struct seq_operations stack_trace_seq_ops = {
296	.start		= t_start,
297	.next		= t_next,
298	.stop		= t_stop,
299	.show		= t_show,
300};
301
302static int stack_trace_open(struct inode *inode, struct file *file)
303{
304	return seq_open(file, &stack_trace_seq_ops);
305}
306
307static const struct file_operations stack_trace_fops = {
308	.open		= stack_trace_open,
309	.read		= seq_read,
310	.llseek		= seq_lseek,
311	.release	= seq_release,
312};
313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314int
315stack_trace_sysctl(struct ctl_table *table, int write,
316		   void __user *buffer, size_t *lenp,
317		   loff_t *ppos)
318{
319	int ret;
320
321	mutex_lock(&stack_sysctl_mutex);
322
323	ret = proc_dointvec(table, write, buffer, lenp, ppos);
324
325	if (ret || !write ||
326	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
327		goto out;
328
329	last_stack_tracer_enabled = !!stack_tracer_enabled;
330
331	if (stack_tracer_enabled)
332		register_ftrace_function(&trace_ops);
333	else
334		unregister_ftrace_function(&trace_ops);
335
336 out:
337	mutex_unlock(&stack_sysctl_mutex);
338	return ret;
339}
340
 
 
341static __init int enable_stacktrace(char *str)
342{
 
 
 
343	stack_tracer_enabled = 1;
344	last_stack_tracer_enabled = 1;
345	return 1;
346}
347__setup("stacktrace", enable_stacktrace);
348
349static __init int stack_trace_init(void)
350{
351	struct dentry *d_tracer;
352
353	d_tracer = tracing_init_dentry();
354
355	trace_create_file("stack_max_size", 0644, d_tracer,
356			&max_stack_size, &stack_max_size_fops);
357
358	trace_create_file("stack_trace", 0444, d_tracer,
359			NULL, &stack_trace_fops);
 
 
 
 
 
 
360
361	if (stack_tracer_enabled)
362		register_ftrace_function(&trace_ops);
363
364	return 0;
365}
366
367device_initcall(stack_trace_init);