Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4 *
5 */
6#include <linux/sched/task_stack.h>
7#include <linux/stacktrace.h>
8#include <linux/security.h>
9#include <linux/kallsyms.h>
10#include <linux/seq_file.h>
11#include <linux/spinlock.h>
12#include <linux/uaccess.h>
13#include <linux/ftrace.h>
14#include <linux/module.h>
15#include <linux/sysctl.h>
16#include <linux/init.h>
17
18#include <asm/setup.h>
19
20#include "trace.h"
21
22#define STACK_TRACE_ENTRIES 500
23
24static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
25static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
26
27static unsigned int stack_trace_nr_entries;
28static unsigned long stack_trace_max_size;
29static arch_spinlock_t stack_trace_max_lock =
30 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
31
32DEFINE_PER_CPU(int, disable_stack_tracer);
33static DEFINE_MUTEX(stack_sysctl_mutex);
34
35int stack_tracer_enabled;
36
37static void print_max_stack(void)
38{
39 long i;
40 int size;
41
42 pr_emerg(" Depth Size Location (%d entries)\n"
43 " ----- ---- --------\n",
44 stack_trace_nr_entries);
45
46 for (i = 0; i < stack_trace_nr_entries; i++) {
47 if (i + 1 == stack_trace_nr_entries)
48 size = stack_trace_index[i];
49 else
50 size = stack_trace_index[i] - stack_trace_index[i+1];
51
52 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
53 size, (void *)stack_dump_trace[i]);
54 }
55}
56
57/*
58 * The stack tracer looks for a maximum stack at each call from a function. It
59 * registers a callback from ftrace, and in that callback it examines the stack
60 * size. It determines the stack size from the variable passed in, which is the
61 * address of a local variable in the stack_trace_call() callback function.
62 * The stack size is calculated by the address of the local variable to the top
63 * of the current stack. If that size is smaller than the currently saved max
64 * stack size, nothing more is done.
65 *
66 * If the size of the stack is greater than the maximum recorded size, then the
67 * following algorithm takes place.
68 *
69 * For architectures (like x86) that store the function's return address before
70 * saving the function's local variables, the stack will look something like
71 * this:
72 *
73 * [ top of stack ]
74 * 0: sys call entry frame
75 * 10: return addr to entry code
76 * 11: start of sys_foo frame
77 * 20: return addr to sys_foo
78 * 21: start of kernel_func_bar frame
79 * 30: return addr to kernel_func_bar
80 * 31: [ do trace stack here ]
81 *
82 * The save_stack_trace() is called returning all the functions it finds in the
83 * current stack. Which would be (from the bottom of the stack to the top):
84 *
85 * return addr to kernel_func_bar
86 * return addr to sys_foo
87 * return addr to entry code
88 *
89 * Now to figure out how much each of these functions' local variable size is,
90 * a search of the stack is made to find these values. When a match is made, it
91 * is added to the stack_dump_trace[] array. The offset into the stack is saved
92 * in the stack_trace_index[] array. The above example would show:
93 *
94 * stack_dump_trace[] | stack_trace_index[]
95 * ------------------ + -------------------
96 * return addr to kernel_func_bar | 30
97 * return addr to sys_foo | 20
98 * return addr to entry | 10
99 *
100 * The print_max_stack() function above, uses these values to print the size of
101 * each function's portion of the stack.
102 *
103 * for (i = 0; i < nr_entries; i++) {
104 * size = i == nr_entries - 1 ? stack_trace_index[i] :
105 * stack_trace_index[i] - stack_trace_index[i+1]
106 * print "%d %d %d %s\n", i, stack_trace_index[i], size, stack_dump_trace[i]);
107 * }
108 *
109 * The above shows
110 *
111 * depth size location
112 * ----- ---- --------
113 * 0 30 10 kernel_func_bar
114 * 1 20 10 sys_foo
115 * 2 10 10 entry code
116 *
117 * Now for architectures that might save the return address after the functions
118 * local variables (saving the link register before calling nested functions),
119 * this will cause the stack to look a little different:
120 *
121 * [ top of stack ]
122 * 0: sys call entry frame
123 * 10: start of sys_foo_frame
124 * 19: return addr to entry code << lr saved before calling kernel_func_bar
125 * 20: start of kernel_func_bar frame
126 * 29: return addr to sys_foo_frame << lr saved before calling next function
127 * 30: [ do trace stack here ]
128 *
129 * Although the functions returned by save_stack_trace() may be the same, the
130 * placement in the stack will be different. Using the same algorithm as above
131 * would yield:
132 *
133 * stack_dump_trace[] | stack_trace_index[]
134 * ------------------ + -------------------
135 * return addr to kernel_func_bar | 30
136 * return addr to sys_foo | 29
137 * return addr to entry | 19
138 *
139 * Where the mapping is off by one:
140 *
141 * kernel_func_bar stack frame size is 29 - 19 not 30 - 29!
142 *
143 * To fix this, if the architecture sets ARCH_RET_ADDR_AFTER_LOCAL_VARS the
144 * values in stack_trace_index[] are shifted by one to and the number of
145 * stack trace entries is decremented by one.
146 *
147 * stack_dump_trace[] | stack_trace_index[]
148 * ------------------ + -------------------
149 * return addr to kernel_func_bar | 29
150 * return addr to sys_foo | 19
151 *
152 * Although the entry function is not displayed, the first function (sys_foo)
153 * will still include the stack size of it.
154 */
155static void check_stack(unsigned long ip, unsigned long *stack)
156{
157 unsigned long this_size, flags; unsigned long *p, *top, *start;
158 static int tracer_frame;
159 int frame_size = READ_ONCE(tracer_frame);
160 int i, x;
161
162 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
163 this_size = THREAD_SIZE - this_size;
164 /* Remove the frame of the tracer */
165 this_size -= frame_size;
166
167 if (this_size <= stack_trace_max_size)
168 return;
169
170 /* we do not handle interrupt stacks yet */
171 if (!object_is_on_stack(stack))
172 return;
173
174 /* Can't do this from NMI context (can cause deadlocks) */
175 if (in_nmi())
176 return;
177
178 local_irq_save(flags);
179 arch_spin_lock(&stack_trace_max_lock);
180
181 /* In case another CPU set the tracer_frame on us */
182 if (unlikely(!frame_size))
183 this_size -= tracer_frame;
184
185 /* a race could have already updated it */
186 if (this_size <= stack_trace_max_size)
187 goto out;
188
189 stack_trace_max_size = this_size;
190
191 stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
192 ARRAY_SIZE(stack_dump_trace) - 1,
193 0);
194
195 /* Skip over the overhead of the stack tracer itself */
196 for (i = 0; i < stack_trace_nr_entries; i++) {
197 if (stack_dump_trace[i] == ip)
198 break;
199 }
200
201 /*
202 * Some archs may not have the passed in ip in the dump.
203 * If that happens, we need to show everything.
204 */
205 if (i == stack_trace_nr_entries)
206 i = 0;
207
208 /*
209 * Now find where in the stack these are.
210 */
211 x = 0;
212 start = stack;
213 top = (unsigned long *)
214 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
215
216 /*
217 * Loop through all the entries. One of the entries may
218 * for some reason be missed on the stack, so we may
219 * have to account for them. If they are all there, this
220 * loop will only happen once. This code only takes place
221 * on a new max, so it is far from a fast path.
222 */
223 while (i < stack_trace_nr_entries) {
224 int found = 0;
225
226 stack_trace_index[x] = this_size;
227 p = start;
228
229 for (; p < top && i < stack_trace_nr_entries; p++) {
230 /*
231 * The READ_ONCE_NOCHECK is used to let KASAN know that
232 * this is not a stack-out-of-bounds error.
233 */
234 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
235 stack_dump_trace[x] = stack_dump_trace[i++];
236 this_size = stack_trace_index[x++] =
237 (top - p) * sizeof(unsigned long);
238 found = 1;
239 /* Start the search from here */
240 start = p + 1;
241 /*
242 * We do not want to show the overhead
243 * of the stack tracer stack in the
244 * max stack. If we haven't figured
245 * out what that is, then figure it out
246 * now.
247 */
248 if (unlikely(!tracer_frame)) {
249 tracer_frame = (p - stack) *
250 sizeof(unsigned long);
251 stack_trace_max_size -= tracer_frame;
252 }
253 }
254 }
255
256 if (!found)
257 i++;
258 }
259
260#ifdef ARCH_FTRACE_SHIFT_STACK_TRACER
261 /*
262 * Some archs will store the link register before calling
263 * nested functions. This means the saved return address
264 * comes after the local storage, and we need to shift
265 * for that.
266 */
267 if (x > 1) {
268 memmove(&stack_trace_index[0], &stack_trace_index[1],
269 sizeof(stack_trace_index[0]) * (x - 1));
270 x--;
271 }
272#endif
273
274 stack_trace_nr_entries = x;
275
276 if (task_stack_end_corrupted(current)) {
277 print_max_stack();
278 BUG();
279 }
280
281 out:
282 arch_spin_unlock(&stack_trace_max_lock);
283 local_irq_restore(flags);
284}
285
286static void
287stack_trace_call(unsigned long ip, unsigned long parent_ip,
288 struct ftrace_ops *op, struct pt_regs *pt_regs)
289{
290 unsigned long stack;
291
292 preempt_disable_notrace();
293
294 /* no atomic needed, we only modify this variable by this cpu */
295 __this_cpu_inc(disable_stack_tracer);
296 if (__this_cpu_read(disable_stack_tracer) != 1)
297 goto out;
298
299 /* If rcu is not watching, then save stack trace can fail */
300 if (!rcu_is_watching())
301 goto out;
302
303 ip += MCOUNT_INSN_SIZE;
304
305 check_stack(ip, &stack);
306
307 out:
308 __this_cpu_dec(disable_stack_tracer);
309 /* prevent recursion in schedule */
310 preempt_enable_notrace();
311}
312
313static struct ftrace_ops trace_ops __read_mostly =
314{
315 .func = stack_trace_call,
316 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
317};
318
319static ssize_t
320stack_max_size_read(struct file *filp, char __user *ubuf,
321 size_t count, loff_t *ppos)
322{
323 unsigned long *ptr = filp->private_data;
324 char buf[64];
325 int r;
326
327 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
328 if (r > sizeof(buf))
329 r = sizeof(buf);
330 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
331}
332
333static ssize_t
334stack_max_size_write(struct file *filp, const char __user *ubuf,
335 size_t count, loff_t *ppos)
336{
337 long *ptr = filp->private_data;
338 unsigned long val, flags;
339 int ret;
340
341 ret = kstrtoul_from_user(ubuf, count, 10, &val);
342 if (ret)
343 return ret;
344
345 local_irq_save(flags);
346
347 /*
348 * In case we trace inside arch_spin_lock() or after (NMI),
349 * we will cause circular lock, so we also need to increase
350 * the percpu disable_stack_tracer here.
351 */
352 __this_cpu_inc(disable_stack_tracer);
353
354 arch_spin_lock(&stack_trace_max_lock);
355 *ptr = val;
356 arch_spin_unlock(&stack_trace_max_lock);
357
358 __this_cpu_dec(disable_stack_tracer);
359 local_irq_restore(flags);
360
361 return count;
362}
363
364static const struct file_operations stack_max_size_fops = {
365 .open = tracing_open_generic,
366 .read = stack_max_size_read,
367 .write = stack_max_size_write,
368 .llseek = default_llseek,
369};
370
371static void *
372__next(struct seq_file *m, loff_t *pos)
373{
374 long n = *pos - 1;
375
376 if (n >= stack_trace_nr_entries)
377 return NULL;
378
379 m->private = (void *)n;
380 return &m->private;
381}
382
383static void *
384t_next(struct seq_file *m, void *v, loff_t *pos)
385{
386 (*pos)++;
387 return __next(m, pos);
388}
389
390static void *t_start(struct seq_file *m, loff_t *pos)
391{
392 local_irq_disable();
393
394 __this_cpu_inc(disable_stack_tracer);
395
396 arch_spin_lock(&stack_trace_max_lock);
397
398 if (*pos == 0)
399 return SEQ_START_TOKEN;
400
401 return __next(m, pos);
402}
403
404static void t_stop(struct seq_file *m, void *p)
405{
406 arch_spin_unlock(&stack_trace_max_lock);
407
408 __this_cpu_dec(disable_stack_tracer);
409
410 local_irq_enable();
411}
412
413static void trace_lookup_stack(struct seq_file *m, long i)
414{
415 unsigned long addr = stack_dump_trace[i];
416
417 seq_printf(m, "%pS\n", (void *)addr);
418}
419
420static void print_disabled(struct seq_file *m)
421{
422 seq_puts(m, "#\n"
423 "# Stack tracer disabled\n"
424 "#\n"
425 "# To enable the stack tracer, either add 'stacktrace' to the\n"
426 "# kernel command line\n"
427 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
428 "#\n");
429}
430
431static int t_show(struct seq_file *m, void *v)
432{
433 long i;
434 int size;
435
436 if (v == SEQ_START_TOKEN) {
437 seq_printf(m, " Depth Size Location"
438 " (%d entries)\n"
439 " ----- ---- --------\n",
440 stack_trace_nr_entries);
441
442 if (!stack_tracer_enabled && !stack_trace_max_size)
443 print_disabled(m);
444
445 return 0;
446 }
447
448 i = *(long *)v;
449
450 if (i >= stack_trace_nr_entries)
451 return 0;
452
453 if (i + 1 == stack_trace_nr_entries)
454 size = stack_trace_index[i];
455 else
456 size = stack_trace_index[i] - stack_trace_index[i+1];
457
458 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
459
460 trace_lookup_stack(m, i);
461
462 return 0;
463}
464
465static const struct seq_operations stack_trace_seq_ops = {
466 .start = t_start,
467 .next = t_next,
468 .stop = t_stop,
469 .show = t_show,
470};
471
472static int stack_trace_open(struct inode *inode, struct file *file)
473{
474 int ret;
475
476 ret = security_locked_down(LOCKDOWN_TRACEFS);
477 if (ret)
478 return ret;
479
480 return seq_open(file, &stack_trace_seq_ops);
481}
482
483static const struct file_operations stack_trace_fops = {
484 .open = stack_trace_open,
485 .read = seq_read,
486 .llseek = seq_lseek,
487 .release = seq_release,
488};
489
490#ifdef CONFIG_DYNAMIC_FTRACE
491
492static int
493stack_trace_filter_open(struct inode *inode, struct file *file)
494{
495 struct ftrace_ops *ops = inode->i_private;
496
497 /* Checks for tracefs lockdown */
498 return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
499 inode, file);
500}
501
502static const struct file_operations stack_trace_filter_fops = {
503 .open = stack_trace_filter_open,
504 .read = seq_read,
505 .write = ftrace_filter_write,
506 .llseek = tracing_lseek,
507 .release = ftrace_regex_release,
508};
509
510#endif /* CONFIG_DYNAMIC_FTRACE */
511
512int
513stack_trace_sysctl(struct ctl_table *table, int write,
514 void __user *buffer, size_t *lenp,
515 loff_t *ppos)
516{
517 int was_enabled;
518 int ret;
519
520 mutex_lock(&stack_sysctl_mutex);
521 was_enabled = !!stack_tracer_enabled;
522
523 ret = proc_dointvec(table, write, buffer, lenp, ppos);
524
525 if (ret || !write || (was_enabled == !!stack_tracer_enabled))
526 goto out;
527
528 if (stack_tracer_enabled)
529 register_ftrace_function(&trace_ops);
530 else
531 unregister_ftrace_function(&trace_ops);
532 out:
533 mutex_unlock(&stack_sysctl_mutex);
534 return ret;
535}
536
537static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
538
539static __init int enable_stacktrace(char *str)
540{
541 int len;
542
543 if ((len = str_has_prefix(str, "_filter=")))
544 strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
545
546 stack_tracer_enabled = 1;
547 return 1;
548}
549__setup("stacktrace", enable_stacktrace);
550
551static __init int stack_trace_init(void)
552{
553 struct dentry *d_tracer;
554
555 d_tracer = tracing_init_dentry();
556 if (IS_ERR(d_tracer))
557 return 0;
558
559 trace_create_file("stack_max_size", 0644, d_tracer,
560 &stack_trace_max_size, &stack_max_size_fops);
561
562 trace_create_file("stack_trace", 0444, d_tracer,
563 NULL, &stack_trace_fops);
564
565#ifdef CONFIG_DYNAMIC_FTRACE
566 trace_create_file("stack_trace_filter", 0644, d_tracer,
567 &trace_ops, &stack_trace_filter_fops);
568#endif
569
570 if (stack_trace_filter_buf[0])
571 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
572
573 if (stack_tracer_enabled)
574 register_ftrace_function(&trace_ops);
575
576 return 0;
577}
578
579device_initcall(stack_trace_init);
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
13#include <linux/sysctl.h>
14#include <linux/init.h>
15#include <linux/fs.h>
16#include <linux/magic.h>
17
18#include <asm/setup.h>
19
20#include "trace.h"
21
22#define STACK_TRACE_ENTRIES 500
23
24#ifdef CC_USING_FENTRY
25# define fentry 1
26#else
27# define fentry 0
28#endif
29
30static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
31 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
32static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
33
34/*
35 * Reserve one entry for the passed in ip. This will allow
36 * us to remove most or all of the stack size overhead
37 * added by the stack tracer itself.
38 */
39static struct stack_trace max_stack_trace = {
40 .max_entries = STACK_TRACE_ENTRIES - 1,
41 .entries = &stack_dump_trace[1],
42};
43
44static unsigned long max_stack_size;
45static arch_spinlock_t max_stack_lock =
46 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
47
48static DEFINE_PER_CPU(int, trace_active);
49static DEFINE_MUTEX(stack_sysctl_mutex);
50
51int stack_tracer_enabled;
52static int last_stack_tracer_enabled;
53
54static inline void
55check_stack(unsigned long ip, unsigned long *stack)
56{
57 unsigned long this_size, flags;
58 unsigned long *p, *top, *start;
59 static int tracer_frame;
60 int frame_size = ACCESS_ONCE(tracer_frame);
61 int i;
62
63 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
64 this_size = THREAD_SIZE - this_size;
65 /* Remove the frame of the tracer */
66 this_size -= frame_size;
67
68 if (this_size <= max_stack_size)
69 return;
70
71 /* we do not handle interrupt stacks yet */
72 if (!object_is_on_stack(stack))
73 return;
74
75 local_irq_save(flags);
76 arch_spin_lock(&max_stack_lock);
77
78 /* In case another CPU set the tracer_frame on us */
79 if (unlikely(!frame_size))
80 this_size -= tracer_frame;
81
82 /* a race could have already updated it */
83 if (this_size <= max_stack_size)
84 goto out;
85
86 max_stack_size = this_size;
87
88 max_stack_trace.nr_entries = 0;
89 max_stack_trace.skip = 3;
90
91 save_stack_trace(&max_stack_trace);
92
93 /*
94 * Add the passed in ip from the function tracer.
95 * Searching for this on the stack will skip over
96 * most of the overhead from the stack tracer itself.
97 */
98 stack_dump_trace[0] = ip;
99 max_stack_trace.nr_entries++;
100
101 /*
102 * Now find where in the stack these are.
103 */
104 i = 0;
105 start = stack;
106 top = (unsigned long *)
107 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
108
109 /*
110 * Loop through all the entries. One of the entries may
111 * for some reason be missed on the stack, so we may
112 * have to account for them. If they are all there, this
113 * loop will only happen once. This code only takes place
114 * on a new max, so it is far from a fast path.
115 */
116 while (i < max_stack_trace.nr_entries) {
117 int found = 0;
118
119 stack_dump_index[i] = this_size;
120 p = start;
121
122 for (; p < top && i < max_stack_trace.nr_entries; p++) {
123 if (*p == stack_dump_trace[i]) {
124 this_size = stack_dump_index[i++] =
125 (top - p) * sizeof(unsigned long);
126 found = 1;
127 /* Start the search from here */
128 start = p + 1;
129 /*
130 * We do not want to show the overhead
131 * of the stack tracer stack in the
132 * max stack. If we haven't figured
133 * out what that is, then figure it out
134 * now.
135 */
136 if (unlikely(!tracer_frame) && i == 1) {
137 tracer_frame = (p - stack) *
138 sizeof(unsigned long);
139 max_stack_size -= tracer_frame;
140 }
141 }
142 }
143
144 if (!found)
145 i++;
146 }
147
148 BUG_ON(current != &init_task &&
149 *(end_of_stack(current)) != STACK_END_MAGIC);
150 out:
151 arch_spin_unlock(&max_stack_lock);
152 local_irq_restore(flags);
153}
154
155static void
156stack_trace_call(unsigned long ip, unsigned long parent_ip,
157 struct ftrace_ops *op, struct pt_regs *pt_regs)
158{
159 unsigned long stack;
160 int cpu;
161
162 preempt_disable_notrace();
163
164 cpu = raw_smp_processor_id();
165 /* no atomic needed, we only modify this variable by this cpu */
166 if (per_cpu(trace_active, cpu)++ != 0)
167 goto out;
168
169 /*
170 * When fentry is used, the traced function does not get
171 * its stack frame set up, and we lose the parent.
172 * The ip is pretty useless because the function tracer
173 * was called before that function set up its stack frame.
174 * In this case, we use the parent ip.
175 *
176 * By adding the return address of either the parent ip
177 * or the current ip we can disregard most of the stack usage
178 * caused by the stack tracer itself.
179 *
180 * The function tracer always reports the address of where the
181 * mcount call was, but the stack will hold the return address.
182 */
183 if (fentry)
184 ip = parent_ip;
185 else
186 ip += MCOUNT_INSN_SIZE;
187
188 check_stack(ip, &stack);
189
190 out:
191 per_cpu(trace_active, cpu)--;
192 /* prevent recursion in schedule */
193 preempt_enable_notrace();
194}
195
196static struct ftrace_ops trace_ops __read_mostly =
197{
198 .func = stack_trace_call,
199 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
200};
201
202static ssize_t
203stack_max_size_read(struct file *filp, char __user *ubuf,
204 size_t count, loff_t *ppos)
205{
206 unsigned long *ptr = filp->private_data;
207 char buf[64];
208 int r;
209
210 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
211 if (r > sizeof(buf))
212 r = sizeof(buf);
213 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
214}
215
216static ssize_t
217stack_max_size_write(struct file *filp, const char __user *ubuf,
218 size_t count, loff_t *ppos)
219{
220 long *ptr = filp->private_data;
221 unsigned long val, flags;
222 int ret;
223 int cpu;
224
225 ret = kstrtoul_from_user(ubuf, count, 10, &val);
226 if (ret)
227 return ret;
228
229 local_irq_save(flags);
230
231 /*
232 * In case we trace inside arch_spin_lock() or after (NMI),
233 * we will cause circular lock, so we also need to increase
234 * the percpu trace_active here.
235 */
236 cpu = smp_processor_id();
237 per_cpu(trace_active, cpu)++;
238
239 arch_spin_lock(&max_stack_lock);
240 *ptr = val;
241 arch_spin_unlock(&max_stack_lock);
242
243 per_cpu(trace_active, cpu)--;
244 local_irq_restore(flags);
245
246 return count;
247}
248
249static const struct file_operations stack_max_size_fops = {
250 .open = tracing_open_generic,
251 .read = stack_max_size_read,
252 .write = stack_max_size_write,
253 .llseek = default_llseek,
254};
255
256static void *
257__next(struct seq_file *m, loff_t *pos)
258{
259 long n = *pos - 1;
260
261 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
262 return NULL;
263
264 m->private = (void *)n;
265 return &m->private;
266}
267
268static void *
269t_next(struct seq_file *m, void *v, loff_t *pos)
270{
271 (*pos)++;
272 return __next(m, pos);
273}
274
275static void *t_start(struct seq_file *m, loff_t *pos)
276{
277 int cpu;
278
279 local_irq_disable();
280
281 cpu = smp_processor_id();
282 per_cpu(trace_active, cpu)++;
283
284 arch_spin_lock(&max_stack_lock);
285
286 if (*pos == 0)
287 return SEQ_START_TOKEN;
288
289 return __next(m, pos);
290}
291
292static void t_stop(struct seq_file *m, void *p)
293{
294 int cpu;
295
296 arch_spin_unlock(&max_stack_lock);
297
298 cpu = smp_processor_id();
299 per_cpu(trace_active, cpu)--;
300
301 local_irq_enable();
302}
303
304static int trace_lookup_stack(struct seq_file *m, long i)
305{
306 unsigned long addr = stack_dump_trace[i];
307
308 return seq_printf(m, "%pS\n", (void *)addr);
309}
310
311static void print_disabled(struct seq_file *m)
312{
313 seq_puts(m, "#\n"
314 "# Stack tracer disabled\n"
315 "#\n"
316 "# To enable the stack tracer, either add 'stacktrace' to the\n"
317 "# kernel command line\n"
318 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
319 "#\n");
320}
321
322static int t_show(struct seq_file *m, void *v)
323{
324 long i;
325 int size;
326
327 if (v == SEQ_START_TOKEN) {
328 seq_printf(m, " Depth Size Location"
329 " (%d entries)\n"
330 " ----- ---- --------\n",
331 max_stack_trace.nr_entries - 1);
332
333 if (!stack_tracer_enabled && !max_stack_size)
334 print_disabled(m);
335
336 return 0;
337 }
338
339 i = *(long *)v;
340
341 if (i >= max_stack_trace.nr_entries ||
342 stack_dump_trace[i] == ULONG_MAX)
343 return 0;
344
345 if (i+1 == max_stack_trace.nr_entries ||
346 stack_dump_trace[i+1] == ULONG_MAX)
347 size = stack_dump_index[i];
348 else
349 size = stack_dump_index[i] - stack_dump_index[i+1];
350
351 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
352
353 trace_lookup_stack(m, i);
354
355 return 0;
356}
357
358static const struct seq_operations stack_trace_seq_ops = {
359 .start = t_start,
360 .next = t_next,
361 .stop = t_stop,
362 .show = t_show,
363};
364
365static int stack_trace_open(struct inode *inode, struct file *file)
366{
367 return seq_open(file, &stack_trace_seq_ops);
368}
369
370static const struct file_operations stack_trace_fops = {
371 .open = stack_trace_open,
372 .read = seq_read,
373 .llseek = seq_lseek,
374 .release = seq_release,
375};
376
377static int
378stack_trace_filter_open(struct inode *inode, struct file *file)
379{
380 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
381 inode, file);
382}
383
384static const struct file_operations stack_trace_filter_fops = {
385 .open = stack_trace_filter_open,
386 .read = seq_read,
387 .write = ftrace_filter_write,
388 .llseek = tracing_lseek,
389 .release = ftrace_regex_release,
390};
391
392int
393stack_trace_sysctl(struct ctl_table *table, int write,
394 void __user *buffer, size_t *lenp,
395 loff_t *ppos)
396{
397 int ret;
398
399 mutex_lock(&stack_sysctl_mutex);
400
401 ret = proc_dointvec(table, write, buffer, lenp, ppos);
402
403 if (ret || !write ||
404 (last_stack_tracer_enabled == !!stack_tracer_enabled))
405 goto out;
406
407 last_stack_tracer_enabled = !!stack_tracer_enabled;
408
409 if (stack_tracer_enabled)
410 register_ftrace_function(&trace_ops);
411 else
412 unregister_ftrace_function(&trace_ops);
413
414 out:
415 mutex_unlock(&stack_sysctl_mutex);
416 return ret;
417}
418
419static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
420
421static __init int enable_stacktrace(char *str)
422{
423 if (strncmp(str, "_filter=", 8) == 0)
424 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
425
426 stack_tracer_enabled = 1;
427 last_stack_tracer_enabled = 1;
428 return 1;
429}
430__setup("stacktrace", enable_stacktrace);
431
432static __init int stack_trace_init(void)
433{
434 struct dentry *d_tracer;
435
436 d_tracer = tracing_init_dentry();
437 if (!d_tracer)
438 return 0;
439
440 trace_create_file("stack_max_size", 0644, d_tracer,
441 &max_stack_size, &stack_max_size_fops);
442
443 trace_create_file("stack_trace", 0444, d_tracer,
444 NULL, &stack_trace_fops);
445
446 trace_create_file("stack_trace_filter", 0444, d_tracer,
447 NULL, &stack_trace_filter_fops);
448
449 if (stack_trace_filter_buf[0])
450 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
451
452 if (stack_tracer_enabled)
453 register_ftrace_function(&trace_ops);
454
455 return 0;
456}
457
458device_initcall(stack_trace_init);