Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  3 *
  4 */
  5#include <linux/stacktrace.h>
  6#include <linux/kallsyms.h>
  7#include <linux/seq_file.h>
  8#include <linux/spinlock.h>
  9#include <linux/uaccess.h>
 10#include <linux/debugfs.h>
 11#include <linux/ftrace.h>
 12#include <linux/module.h>
 13#include <linux/sysctl.h>
 14#include <linux/init.h>
 15#include <linux/fs.h>
 16#include <linux/magic.h>
 17
 18#include <asm/setup.h>
 19
 20#include "trace.h"
 21
 22#define STACK_TRACE_ENTRIES 500
 23
 24#ifdef CC_USING_FENTRY
 25# define fentry		1
 26#else
 27# define fentry		0
 28#endif
 29
 30static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
 31	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
 32static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
 33
 34/*
 35 * Reserve one entry for the passed in ip. This will allow
 36 * us to remove most or all of the stack size overhead
 37 * added by the stack tracer itself.
 38 */
 39static struct stack_trace max_stack_trace = {
 40	.max_entries		= STACK_TRACE_ENTRIES - 1,
 41	.entries		= &stack_dump_trace[1],
 42};
 43
 44static unsigned long max_stack_size;
 45static arch_spinlock_t max_stack_lock =
 46	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 47
 
 48static DEFINE_PER_CPU(int, trace_active);
 49static DEFINE_MUTEX(stack_sysctl_mutex);
 50
 51int stack_tracer_enabled;
 52static int last_stack_tracer_enabled;
 53
 54static inline void
 55check_stack(unsigned long ip, unsigned long *stack)
 56{
 57	unsigned long this_size, flags;
 58	unsigned long *p, *top, *start;
 59	static int tracer_frame;
 60	int frame_size = ACCESS_ONCE(tracer_frame);
 61	int i;
 62
 63	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
 64	this_size = THREAD_SIZE - this_size;
 65	/* Remove the frame of the tracer */
 66	this_size -= frame_size;
 67
 68	if (this_size <= max_stack_size)
 69		return;
 70
 71	/* we do not handle interrupt stacks yet */
 72	if (!object_is_on_stack(stack))
 73		return;
 74
 75	local_irq_save(flags);
 76	arch_spin_lock(&max_stack_lock);
 77
 78	/* In case another CPU set the tracer_frame on us */
 79	if (unlikely(!frame_size))
 80		this_size -= tracer_frame;
 81
 82	/* a race could have already updated it */
 83	if (this_size <= max_stack_size)
 84		goto out;
 85
 86	max_stack_size = this_size;
 87
 88	max_stack_trace.nr_entries	= 0;
 89	max_stack_trace.skip		= 3;
 90
 91	save_stack_trace(&max_stack_trace);
 92
 93	/*
 94	 * Add the passed in ip from the function tracer.
 95	 * Searching for this on the stack will skip over
 96	 * most of the overhead from the stack tracer itself.
 97	 */
 98	stack_dump_trace[0] = ip;
 99	max_stack_trace.nr_entries++;
100
101	/*
102	 * Now find where in the stack these are.
103	 */
104	i = 0;
105	start = stack;
106	top = (unsigned long *)
107		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
108
109	/*
110	 * Loop through all the entries. One of the entries may
111	 * for some reason be missed on the stack, so we may
112	 * have to account for them. If they are all there, this
113	 * loop will only happen once. This code only takes place
114	 * on a new max, so it is far from a fast path.
115	 */
116	while (i < max_stack_trace.nr_entries) {
117		int found = 0;
118
119		stack_dump_index[i] = this_size;
120		p = start;
121
122		for (; p < top && i < max_stack_trace.nr_entries; p++) {
123			if (*p == stack_dump_trace[i]) {
124				this_size = stack_dump_index[i++] =
125					(top - p) * sizeof(unsigned long);
126				found = 1;
127				/* Start the search from here */
128				start = p + 1;
129				/*
130				 * We do not want to show the overhead
131				 * of the stack tracer stack in the
132				 * max stack. If we haven't figured
133				 * out what that is, then figure it out
134				 * now.
135				 */
136				if (unlikely(!tracer_frame) && i == 1) {
137					tracer_frame = (p - stack) *
138						sizeof(unsigned long);
139					max_stack_size -= tracer_frame;
140				}
141			}
142		}
143
144		if (!found)
145			i++;
146	}
147
148	BUG_ON(current != &init_task &&
149		*(end_of_stack(current)) != STACK_END_MAGIC);
150 out:
151	arch_spin_unlock(&max_stack_lock);
152	local_irq_restore(flags);
153}
154
155static void
156stack_trace_call(unsigned long ip, unsigned long parent_ip,
157		 struct ftrace_ops *op, struct pt_regs *pt_regs)
158{
159	unsigned long stack;
160	int cpu;
161
 
 
 
162	preempt_disable_notrace();
163
164	cpu = raw_smp_processor_id();
165	/* no atomic needed, we only modify this variable by this cpu */
166	if (per_cpu(trace_active, cpu)++ != 0)
167		goto out;
168
169	/*
170	 * When fentry is used, the traced function does not get
171	 * its stack frame set up, and we lose the parent.
172	 * The ip is pretty useless because the function tracer
173	 * was called before that function set up its stack frame.
174	 * In this case, we use the parent ip.
175	 *
176	 * By adding the return address of either the parent ip
177	 * or the current ip we can disregard most of the stack usage
178	 * caused by the stack tracer itself.
179	 *
180	 * The function tracer always reports the address of where the
181	 * mcount call was, but the stack will hold the return address.
182	 */
183	if (fentry)
184		ip = parent_ip;
185	else
186		ip += MCOUNT_INSN_SIZE;
187
188	check_stack(ip, &stack);
189
190 out:
191	per_cpu(trace_active, cpu)--;
192	/* prevent recursion in schedule */
193	preempt_enable_notrace();
194}
195
196static struct ftrace_ops trace_ops __read_mostly =
197{
198	.func = stack_trace_call,
199	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
200};
201
202static ssize_t
203stack_max_size_read(struct file *filp, char __user *ubuf,
204		    size_t count, loff_t *ppos)
205{
206	unsigned long *ptr = filp->private_data;
207	char buf[64];
208	int r;
209
210	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
211	if (r > sizeof(buf))
212		r = sizeof(buf);
213	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
214}
215
216static ssize_t
217stack_max_size_write(struct file *filp, const char __user *ubuf,
218		     size_t count, loff_t *ppos)
219{
220	long *ptr = filp->private_data;
221	unsigned long val, flags;
222	int ret;
223	int cpu;
224
225	ret = kstrtoul_from_user(ubuf, count, 10, &val);
226	if (ret)
227		return ret;
228
229	local_irq_save(flags);
230
231	/*
232	 * In case we trace inside arch_spin_lock() or after (NMI),
233	 * we will cause circular lock, so we also need to increase
234	 * the percpu trace_active here.
235	 */
236	cpu = smp_processor_id();
237	per_cpu(trace_active, cpu)++;
238
239	arch_spin_lock(&max_stack_lock);
240	*ptr = val;
241	arch_spin_unlock(&max_stack_lock);
242
243	per_cpu(trace_active, cpu)--;
244	local_irq_restore(flags);
245
246	return count;
247}
248
249static const struct file_operations stack_max_size_fops = {
250	.open		= tracing_open_generic,
251	.read		= stack_max_size_read,
252	.write		= stack_max_size_write,
253	.llseek		= default_llseek,
254};
255
256static void *
257__next(struct seq_file *m, loff_t *pos)
258{
259	long n = *pos - 1;
260
261	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
262		return NULL;
263
264	m->private = (void *)n;
265	return &m->private;
266}
267
268static void *
269t_next(struct seq_file *m, void *v, loff_t *pos)
270{
271	(*pos)++;
272	return __next(m, pos);
273}
274
275static void *t_start(struct seq_file *m, loff_t *pos)
276{
277	int cpu;
278
279	local_irq_disable();
280
281	cpu = smp_processor_id();
282	per_cpu(trace_active, cpu)++;
283
284	arch_spin_lock(&max_stack_lock);
285
286	if (*pos == 0)
287		return SEQ_START_TOKEN;
288
289	return __next(m, pos);
290}
291
292static void t_stop(struct seq_file *m, void *p)
293{
294	int cpu;
295
296	arch_spin_unlock(&max_stack_lock);
297
298	cpu = smp_processor_id();
299	per_cpu(trace_active, cpu)--;
300
301	local_irq_enable();
302}
303
304static int trace_lookup_stack(struct seq_file *m, long i)
305{
306	unsigned long addr = stack_dump_trace[i];
307
308	return seq_printf(m, "%pS\n", (void *)addr);
309}
310
311static void print_disabled(struct seq_file *m)
312{
313	seq_puts(m, "#\n"
314		 "#  Stack tracer disabled\n"
315		 "#\n"
316		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
317		 "# kernel command line\n"
318		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
319		 "#\n");
320}
321
322static int t_show(struct seq_file *m, void *v)
323{
324	long i;
325	int size;
326
327	if (v == SEQ_START_TOKEN) {
328		seq_printf(m, "        Depth    Size   Location"
329			   "    (%d entries)\n"
330			   "        -----    ----   --------\n",
331			   max_stack_trace.nr_entries - 1);
332
333		if (!stack_tracer_enabled && !max_stack_size)
334			print_disabled(m);
335
336		return 0;
337	}
338
339	i = *(long *)v;
340
341	if (i >= max_stack_trace.nr_entries ||
342	    stack_dump_trace[i] == ULONG_MAX)
343		return 0;
344
345	if (i+1 == max_stack_trace.nr_entries ||
346	    stack_dump_trace[i+1] == ULONG_MAX)
347		size = stack_dump_index[i];
348	else
349		size = stack_dump_index[i] - stack_dump_index[i+1];
350
351	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
352
353	trace_lookup_stack(m, i);
354
355	return 0;
356}
357
358static const struct seq_operations stack_trace_seq_ops = {
359	.start		= t_start,
360	.next		= t_next,
361	.stop		= t_stop,
362	.show		= t_show,
363};
364
365static int stack_trace_open(struct inode *inode, struct file *file)
366{
367	return seq_open(file, &stack_trace_seq_ops);
368}
369
370static const struct file_operations stack_trace_fops = {
371	.open		= stack_trace_open,
372	.read		= seq_read,
373	.llseek		= seq_lseek,
374	.release	= seq_release,
375};
376
377static int
378stack_trace_filter_open(struct inode *inode, struct file *file)
379{
380	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
381				 inode, file);
382}
383
384static const struct file_operations stack_trace_filter_fops = {
385	.open = stack_trace_filter_open,
386	.read = seq_read,
387	.write = ftrace_filter_write,
388	.llseek = tracing_lseek,
389	.release = ftrace_regex_release,
390};
391
392int
393stack_trace_sysctl(struct ctl_table *table, int write,
394		   void __user *buffer, size_t *lenp,
395		   loff_t *ppos)
396{
397	int ret;
398
399	mutex_lock(&stack_sysctl_mutex);
400
401	ret = proc_dointvec(table, write, buffer, lenp, ppos);
402
403	if (ret || !write ||
404	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
405		goto out;
406
407	last_stack_tracer_enabled = !!stack_tracer_enabled;
408
409	if (stack_tracer_enabled)
410		register_ftrace_function(&trace_ops);
411	else
412		unregister_ftrace_function(&trace_ops);
413
414 out:
415	mutex_unlock(&stack_sysctl_mutex);
416	return ret;
417}
418
419static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
420
421static __init int enable_stacktrace(char *str)
422{
423	if (strncmp(str, "_filter=", 8) == 0)
424		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
425
426	stack_tracer_enabled = 1;
427	last_stack_tracer_enabled = 1;
428	return 1;
429}
430__setup("stacktrace", enable_stacktrace);
431
432static __init int stack_trace_init(void)
433{
434	struct dentry *d_tracer;
435
436	d_tracer = tracing_init_dentry();
437	if (!d_tracer)
438		return 0;
439
440	trace_create_file("stack_max_size", 0644, d_tracer,
441			&max_stack_size, &stack_max_size_fops);
442
443	trace_create_file("stack_trace", 0444, d_tracer,
444			NULL, &stack_trace_fops);
445
446	trace_create_file("stack_trace_filter", 0444, d_tracer,
447			NULL, &stack_trace_filter_fops);
448
449	if (stack_trace_filter_buf[0])
450		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
451
452	if (stack_tracer_enabled)
453		register_ftrace_function(&trace_ops);
454
455	return 0;
456}
457
458device_initcall(stack_trace_init);
v3.5.6
  1/*
  2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  3 *
  4 */
  5#include <linux/stacktrace.h>
  6#include <linux/kallsyms.h>
  7#include <linux/seq_file.h>
  8#include <linux/spinlock.h>
  9#include <linux/uaccess.h>
 10#include <linux/debugfs.h>
 11#include <linux/ftrace.h>
 12#include <linux/module.h>
 13#include <linux/sysctl.h>
 14#include <linux/init.h>
 15#include <linux/fs.h>
 
 16
 17#include <asm/setup.h>
 18
 19#include "trace.h"
 20
 21#define STACK_TRACE_ENTRIES 500
 22
 
 
 
 
 
 
 23static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
 24	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
 25static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
 26
 
 
 
 
 
 27static struct stack_trace max_stack_trace = {
 28	.max_entries		= STACK_TRACE_ENTRIES,
 29	.entries		= stack_dump_trace,
 30};
 31
 32static unsigned long max_stack_size;
 33static arch_spinlock_t max_stack_lock =
 34	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 35
 36static int stack_trace_disabled __read_mostly;
 37static DEFINE_PER_CPU(int, trace_active);
 38static DEFINE_MUTEX(stack_sysctl_mutex);
 39
 40int stack_tracer_enabled;
 41static int last_stack_tracer_enabled;
 42
 43static inline void check_stack(void)
 
 44{
 45	unsigned long this_size, flags;
 46	unsigned long *p, *top, *start;
 
 
 47	int i;
 48
 49	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
 50	this_size = THREAD_SIZE - this_size;
 
 
 51
 52	if (this_size <= max_stack_size)
 53		return;
 54
 55	/* we do not handle interrupt stacks yet */
 56	if (!object_is_on_stack(&this_size))
 57		return;
 58
 59	local_irq_save(flags);
 60	arch_spin_lock(&max_stack_lock);
 61
 
 
 
 
 62	/* a race could have already updated it */
 63	if (this_size <= max_stack_size)
 64		goto out;
 65
 66	max_stack_size = this_size;
 67
 68	max_stack_trace.nr_entries	= 0;
 69	max_stack_trace.skip		= 3;
 70
 71	save_stack_trace(&max_stack_trace);
 72
 73	/*
 
 
 
 
 
 
 
 
 74	 * Now find where in the stack these are.
 75	 */
 76	i = 0;
 77	start = &this_size;
 78	top = (unsigned long *)
 79		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
 80
 81	/*
 82	 * Loop through all the entries. One of the entries may
 83	 * for some reason be missed on the stack, so we may
 84	 * have to account for them. If they are all there, this
 85	 * loop will only happen once. This code only takes place
 86	 * on a new max, so it is far from a fast path.
 87	 */
 88	while (i < max_stack_trace.nr_entries) {
 89		int found = 0;
 90
 91		stack_dump_index[i] = this_size;
 92		p = start;
 93
 94		for (; p < top && i < max_stack_trace.nr_entries; p++) {
 95			if (*p == stack_dump_trace[i]) {
 96				this_size = stack_dump_index[i++] =
 97					(top - p) * sizeof(unsigned long);
 98				found = 1;
 99				/* Start the search from here */
100				start = p + 1;
 
 
 
 
 
 
 
 
 
 
 
 
101			}
102		}
103
104		if (!found)
105			i++;
106	}
107
 
 
108 out:
109	arch_spin_unlock(&max_stack_lock);
110	local_irq_restore(flags);
111}
112
113static void
114stack_trace_call(unsigned long ip, unsigned long parent_ip)
 
115{
 
116	int cpu;
117
118	if (unlikely(!ftrace_enabled || stack_trace_disabled))
119		return;
120
121	preempt_disable_notrace();
122
123	cpu = raw_smp_processor_id();
124	/* no atomic needed, we only modify this variable by this cpu */
125	if (per_cpu(trace_active, cpu)++ != 0)
126		goto out;
127
128	check_stack();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
130 out:
131	per_cpu(trace_active, cpu)--;
132	/* prevent recursion in schedule */
133	preempt_enable_notrace();
134}
135
136static struct ftrace_ops trace_ops __read_mostly =
137{
138	.func = stack_trace_call,
 
139};
140
141static ssize_t
142stack_max_size_read(struct file *filp, char __user *ubuf,
143		    size_t count, loff_t *ppos)
144{
145	unsigned long *ptr = filp->private_data;
146	char buf[64];
147	int r;
148
149	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
150	if (r > sizeof(buf))
151		r = sizeof(buf);
152	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
153}
154
155static ssize_t
156stack_max_size_write(struct file *filp, const char __user *ubuf,
157		     size_t count, loff_t *ppos)
158{
159	long *ptr = filp->private_data;
160	unsigned long val, flags;
161	int ret;
162	int cpu;
163
164	ret = kstrtoul_from_user(ubuf, count, 10, &val);
165	if (ret)
166		return ret;
167
168	local_irq_save(flags);
169
170	/*
171	 * In case we trace inside arch_spin_lock() or after (NMI),
172	 * we will cause circular lock, so we also need to increase
173	 * the percpu trace_active here.
174	 */
175	cpu = smp_processor_id();
176	per_cpu(trace_active, cpu)++;
177
178	arch_spin_lock(&max_stack_lock);
179	*ptr = val;
180	arch_spin_unlock(&max_stack_lock);
181
182	per_cpu(trace_active, cpu)--;
183	local_irq_restore(flags);
184
185	return count;
186}
187
188static const struct file_operations stack_max_size_fops = {
189	.open		= tracing_open_generic,
190	.read		= stack_max_size_read,
191	.write		= stack_max_size_write,
192	.llseek		= default_llseek,
193};
194
195static void *
196__next(struct seq_file *m, loff_t *pos)
197{
198	long n = *pos - 1;
199
200	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
201		return NULL;
202
203	m->private = (void *)n;
204	return &m->private;
205}
206
207static void *
208t_next(struct seq_file *m, void *v, loff_t *pos)
209{
210	(*pos)++;
211	return __next(m, pos);
212}
213
214static void *t_start(struct seq_file *m, loff_t *pos)
215{
216	int cpu;
217
218	local_irq_disable();
219
220	cpu = smp_processor_id();
221	per_cpu(trace_active, cpu)++;
222
223	arch_spin_lock(&max_stack_lock);
224
225	if (*pos == 0)
226		return SEQ_START_TOKEN;
227
228	return __next(m, pos);
229}
230
231static void t_stop(struct seq_file *m, void *p)
232{
233	int cpu;
234
235	arch_spin_unlock(&max_stack_lock);
236
237	cpu = smp_processor_id();
238	per_cpu(trace_active, cpu)--;
239
240	local_irq_enable();
241}
242
243static int trace_lookup_stack(struct seq_file *m, long i)
244{
245	unsigned long addr = stack_dump_trace[i];
246
247	return seq_printf(m, "%pS\n", (void *)addr);
248}
249
250static void print_disabled(struct seq_file *m)
251{
252	seq_puts(m, "#\n"
253		 "#  Stack tracer disabled\n"
254		 "#\n"
255		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
256		 "# kernel command line\n"
257		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
258		 "#\n");
259}
260
261static int t_show(struct seq_file *m, void *v)
262{
263	long i;
264	int size;
265
266	if (v == SEQ_START_TOKEN) {
267		seq_printf(m, "        Depth    Size   Location"
268			   "    (%d entries)\n"
269			   "        -----    ----   --------\n",
270			   max_stack_trace.nr_entries - 1);
271
272		if (!stack_tracer_enabled && !max_stack_size)
273			print_disabled(m);
274
275		return 0;
276	}
277
278	i = *(long *)v;
279
280	if (i >= max_stack_trace.nr_entries ||
281	    stack_dump_trace[i] == ULONG_MAX)
282		return 0;
283
284	if (i+1 == max_stack_trace.nr_entries ||
285	    stack_dump_trace[i+1] == ULONG_MAX)
286		size = stack_dump_index[i];
287	else
288		size = stack_dump_index[i] - stack_dump_index[i+1];
289
290	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
291
292	trace_lookup_stack(m, i);
293
294	return 0;
295}
296
297static const struct seq_operations stack_trace_seq_ops = {
298	.start		= t_start,
299	.next		= t_next,
300	.stop		= t_stop,
301	.show		= t_show,
302};
303
304static int stack_trace_open(struct inode *inode, struct file *file)
305{
306	return seq_open(file, &stack_trace_seq_ops);
307}
308
309static const struct file_operations stack_trace_fops = {
310	.open		= stack_trace_open,
311	.read		= seq_read,
312	.llseek		= seq_lseek,
313	.release	= seq_release,
314};
315
316static int
317stack_trace_filter_open(struct inode *inode, struct file *file)
318{
319	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
320				 inode, file);
321}
322
323static const struct file_operations stack_trace_filter_fops = {
324	.open = stack_trace_filter_open,
325	.read = seq_read,
326	.write = ftrace_filter_write,
327	.llseek = ftrace_regex_lseek,
328	.release = ftrace_regex_release,
329};
330
331int
332stack_trace_sysctl(struct ctl_table *table, int write,
333		   void __user *buffer, size_t *lenp,
334		   loff_t *ppos)
335{
336	int ret;
337
338	mutex_lock(&stack_sysctl_mutex);
339
340	ret = proc_dointvec(table, write, buffer, lenp, ppos);
341
342	if (ret || !write ||
343	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
344		goto out;
345
346	last_stack_tracer_enabled = !!stack_tracer_enabled;
347
348	if (stack_tracer_enabled)
349		register_ftrace_function(&trace_ops);
350	else
351		unregister_ftrace_function(&trace_ops);
352
353 out:
354	mutex_unlock(&stack_sysctl_mutex);
355	return ret;
356}
357
358static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
359
360static __init int enable_stacktrace(char *str)
361{
362	if (strncmp(str, "_filter=", 8) == 0)
363		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
364
365	stack_tracer_enabled = 1;
366	last_stack_tracer_enabled = 1;
367	return 1;
368}
369__setup("stacktrace", enable_stacktrace);
370
371static __init int stack_trace_init(void)
372{
373	struct dentry *d_tracer;
374
375	d_tracer = tracing_init_dentry();
 
 
376
377	trace_create_file("stack_max_size", 0644, d_tracer,
378			&max_stack_size, &stack_max_size_fops);
379
380	trace_create_file("stack_trace", 0444, d_tracer,
381			NULL, &stack_trace_fops);
382
383	trace_create_file("stack_trace_filter", 0444, d_tracer,
384			NULL, &stack_trace_filter_fops);
385
386	if (stack_trace_filter_buf[0])
387		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
388
389	if (stack_tracer_enabled)
390		register_ftrace_function(&trace_ops);
391
392	return 0;
393}
394
395device_initcall(stack_trace_init);