Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * ring buffer based function tracer
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6 *
  7 * Based on code from the latency_tracer, that is:
  8 *
  9 *  Copyright (C) 2004-2006 Ingo Molnar
 10 *  Copyright (C) 2004 Nadia Yvette Chambers
 11 */
 12#include <linux/ring_buffer.h>
 13#include <linux/debugfs.h>
 14#include <linux/uaccess.h>
 15#include <linux/ftrace.h>
 16#include <linux/slab.h>
 17#include <linux/fs.h>
 18
 19#include "trace.h"
 20
 21static void tracing_start_function_trace(struct trace_array *tr);
 22static void tracing_stop_function_trace(struct trace_array *tr);
 23static void
 24function_trace_call(unsigned long ip, unsigned long parent_ip,
 25		    struct ftrace_ops *op, struct pt_regs *pt_regs);
 26static void
 27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 28			  struct ftrace_ops *op, struct pt_regs *pt_regs);
 29static struct tracer_flags func_flags;
 30
 31/* Our option */
 32enum {
 33	TRACE_FUNC_OPT_STACK	= 0x1,
 34};
 35
 36static int allocate_ftrace_ops(struct trace_array *tr)
 37{
 38	struct ftrace_ops *ops;
 39
 40	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 41	if (!ops)
 42		return -ENOMEM;
 43
 44	/* Currently only the non stack verision is supported */
 45	ops->func = function_trace_call;
 46	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
 47
 48	tr->ops = ops;
 49	ops->private = tr;
 50	return 0;
 51}
 52
 53
 54int ftrace_create_function_files(struct trace_array *tr,
 55				 struct dentry *parent)
 56{
 57	int ret;
 58
 59	/*
 60	 * The top level array uses the "global_ops", and the files are
 61	 * created on boot up.
 62	 */
 63	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 64		return 0;
 65
 66	ret = allocate_ftrace_ops(tr);
 67	if (ret)
 68		return ret;
 69
 70	ftrace_create_filter_files(tr->ops, parent);
 71
 72	return 0;
 73}
 74
 75void ftrace_destroy_function_files(struct trace_array *tr)
 76{
 77	ftrace_destroy_filter_files(tr->ops);
 78	kfree(tr->ops);
 79	tr->ops = NULL;
 80}
 81
 82static int function_trace_init(struct trace_array *tr)
 83{
 84	ftrace_func_t func;
 85
 86	/*
 87	 * Instance trace_arrays get their ops allocated
 88	 * at instance creation. Unless it failed
 89	 * the allocation.
 90	 */
 91	if (!tr->ops)
 92		return -ENOMEM;
 93
 94	/* Currently only the global instance can do stack tracing */
 95	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 96	    func_flags.val & TRACE_FUNC_OPT_STACK)
 97		func = function_stack_trace_call;
 98	else
 99		func = function_trace_call;
100
101	ftrace_init_array_ops(tr, func);
102
103	tr->trace_buffer.cpu = get_cpu();
104	put_cpu();
105
106	tracing_start_cmdline_record();
107	tracing_start_function_trace(tr);
108	return 0;
109}
110
111static void function_trace_reset(struct trace_array *tr)
112{
113	tracing_stop_function_trace(tr);
114	tracing_stop_cmdline_record();
115	ftrace_reset_array_ops(tr);
116}
117
118static void function_trace_start(struct trace_array *tr)
119{
120	tracing_reset_online_cpus(&tr->trace_buffer);
121}
122
123static void
124function_trace_call(unsigned long ip, unsigned long parent_ip,
125		    struct ftrace_ops *op, struct pt_regs *pt_regs)
126{
127	struct trace_array *tr = op->private;
128	struct trace_array_cpu *data;
129	unsigned long flags;
130	int bit;
131	int cpu;
132	int pc;
133
134	if (unlikely(!tr->function_enabled))
135		return;
136
137	pc = preempt_count();
138	preempt_disable_notrace();
 
 
 
 
139
140	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
141	if (bit < 0)
142		goto out;
143
144	cpu = smp_processor_id();
145	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146	if (!atomic_read(&data->disabled)) {
147		local_save_flags(flags);
148		trace_function(tr, ip, parent_ip, flags, pc);
149	}
150	trace_clear_recursion(bit);
151
152 out:
153	preempt_enable_notrace();
154}
155
156static void
157function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158			  struct ftrace_ops *op, struct pt_regs *pt_regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159{
160	struct trace_array *tr = op->private;
161	struct trace_array_cpu *data;
162	unsigned long flags;
163	long disabled;
164	int cpu;
165	int pc;
166
167	if (unlikely(!tr->function_enabled))
168		return;
169
170	/*
171	 * Need to use raw, since this must be called before the
172	 * recursive protection is performed.
173	 */
174	local_irq_save(flags);
175	cpu = raw_smp_processor_id();
176	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177	disabled = atomic_inc_return(&data->disabled);
178
179	if (likely(disabled == 1)) {
180		pc = preempt_count();
181		trace_function(tr, ip, parent_ip, flags, pc);
182		/*
183		 * skip over 5 funcs:
184		 *    __ftrace_trace_stack,
185		 *    __trace_stack,
186		 *    function_stack_trace_call
187		 *    ftrace_list_func
188		 *    ftrace_call
189		 */
190		__trace_stack(tr, flags, 5, pc);
191	}
192
193	atomic_dec(&data->disabled);
194	local_irq_restore(flags);
195}
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197static struct tracer_opt func_opts[] = {
198#ifdef CONFIG_STACKTRACE
199	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
200#endif
201	{ } /* Always set a last empty entry */
202};
203
204static struct tracer_flags func_flags = {
205	.val = 0, /* By default: all flags disabled */
206	.opts = func_opts
207};
208
209static void tracing_start_function_trace(struct trace_array *tr)
210{
211	tr->function_enabled = 0;
212	register_ftrace_function(tr->ops);
213	tr->function_enabled = 1;
 
 
 
 
 
 
 
 
 
 
214}
215
216static void tracing_stop_function_trace(struct trace_array *tr)
217{
218	tr->function_enabled = 0;
219	unregister_ftrace_function(tr->ops);
220}
221
222static struct tracer function_trace;
 
 
 
 
223
224static int
225func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
226{
227	switch (bit) {
228	case TRACE_FUNC_OPT_STACK:
229		/* do nothing if already set */
230		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
231			break;
232
233		/* We can change this flag when not running. */
234		if (tr->current_trace != &function_trace)
235			break;
236
237		unregister_ftrace_function(tr->ops);
238
239		if (set) {
240			tr->ops->func = function_stack_trace_call;
241			register_ftrace_function(tr->ops);
242		} else {
243			tr->ops->func = function_trace_call;
244			register_ftrace_function(tr->ops);
245		}
246
247		break;
248	default:
249		return -EINVAL;
250	}
251
252	return 0;
253}
254
255static struct tracer function_trace __tracer_data =
256{
257	.name		= "function",
258	.init		= function_trace_init,
259	.reset		= function_trace_reset,
260	.start		= function_trace_start,
 
261	.flags		= &func_flags,
262	.set_flag	= func_set_flag,
263	.allow_instances = true,
264#ifdef CONFIG_FTRACE_SELFTEST
265	.selftest	= trace_selftest_startup_function,
266#endif
267};
268
269#ifdef CONFIG_DYNAMIC_FTRACE
270static void update_traceon_count(void **data, bool on)
 
271{
272	long *count = (long *)data;
273	long old_count = *count;
274
275	/*
276	 * Tracing gets disabled (or enabled) once per count.
277	 * This function can be called at the same time on multiple CPUs.
278	 * It is fine if both disable (or enable) tracing, as disabling
279	 * (or enabling) the second time doesn't do anything as the
280	 * state of the tracer is already disabled (or enabled).
281	 * What needs to be synchronized in this case is that the count
282	 * only gets decremented once, even if the tracer is disabled
283	 * (or enabled) twice, as the second one is really a nop.
284	 *
285	 * The memory barriers guarantee that we only decrement the
286	 * counter once. First the count is read to a local variable
287	 * and a read barrier is used to make sure that it is loaded
288	 * before checking if the tracer is in the state we want.
289	 * If the tracer is not in the state we want, then the count
290	 * is guaranteed to be the old count.
291	 *
292	 * Next the tracer is set to the state we want (disabled or enabled)
293	 * then a write memory barrier is used to make sure that
294	 * the new state is visible before changing the counter by
295	 * one minus the old counter. This guarantees that another CPU
296	 * executing this code will see the new state before seeing
297	 * the new counter value, and would not do anything if the new
298	 * counter is seen.
299	 *
300	 * Note, there is no synchronization between this and a user
301	 * setting the tracing_on file. But we currently don't care
302	 * about that.
303	 */
304	if (!old_count)
305		return;
306
307	/* Make sure we see count before checking tracing state */
308	smp_rmb();
309
310	if (on == !!tracing_is_on())
311		return;
312
313	if (on)
314		tracing_on();
315	else
316		tracing_off();
317
318	/* unlimited? */
319	if (old_count == -1)
320		return;
321
322	/* Make sure tracing state is visible before updating count */
323	smp_wmb();
324
325	*count = old_count - 1;
326}
327
328static void
329ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
330{
331	update_traceon_count(data, 1);
332}
333
334static void
335ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
336{
337	update_traceon_count(data, 0);
338}
339
340static void
341ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
342{
343	if (tracing_is_on())
344		return;
345
346	tracing_on();
347}
348
349static void
350ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
351{
352	if (!tracing_is_on())
353		return;
354
355	tracing_off();
356}
357
358/*
359 * Skip 4:
360 *   ftrace_stacktrace()
361 *   function_trace_probe_call()
362 *   ftrace_ops_list_func()
363 *   ftrace_call()
364 */
365#define STACK_SKIP 4
366
367static void
368ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
369{
370	trace_dump_stack(STACK_SKIP);
371}
372
373static void
374ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
375{
376	long *count = (long *)data;
377	long old_count;
378	long new_count;
379
380	/*
381	 * Stack traces should only execute the number of times the
382	 * user specified in the counter.
383	 */
384	do {
385
386		if (!tracing_is_on())
387			return;
388
389		old_count = *count;
390
391		if (!old_count)
392			return;
393
394		/* unlimited? */
395		if (old_count == -1) {
396			trace_dump_stack(STACK_SKIP);
397			return;
398		}
399
400		new_count = old_count - 1;
401		new_count = cmpxchg(count, old_count, new_count);
402		if (new_count == old_count)
403			trace_dump_stack(STACK_SKIP);
404
405	} while (new_count != old_count);
406}
407
408static int update_count(void **data)
409{
410	unsigned long *count = (long *)data;
411
412	if (!*count)
413		return 0;
414
415	if (*count != -1)
416		(*count)--;
417
418	return 1;
419}
420
421static void
422ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
423{
424	if (update_count(data))
425		ftrace_dump(DUMP_ALL);
426}
427
428/* Only dump the current CPU buffer. */
429static void
430ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
431{
432	if (update_count(data))
433		ftrace_dump(DUMP_ORIG);
434}
 
 
435
436static int
437ftrace_probe_print(const char *name, struct seq_file *m,
438		   unsigned long ip, void *data)
439{
440	long count = (long)data;
441
442	seq_printf(m, "%ps:%s", (void *)ip, name);
 
 
 
 
 
443
444	if (count == -1)
445		seq_puts(m, ":unlimited\n");
446	else
447		seq_printf(m, ":count=%ld\n", count);
448
449	return 0;
450}
451
452static int
453ftrace_traceon_print(struct seq_file *m, unsigned long ip,
454			 struct ftrace_probe_ops *ops, void *data)
455{
456	return ftrace_probe_print("traceon", m, ip, data);
457}
458
459static int
460ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
461			 struct ftrace_probe_ops *ops, void *data)
462{
463	return ftrace_probe_print("traceoff", m, ip, data);
464}
465
466static int
467ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
468			struct ftrace_probe_ops *ops, void *data)
469{
470	return ftrace_probe_print("stacktrace", m, ip, data);
471}
472
473static int
474ftrace_dump_print(struct seq_file *m, unsigned long ip,
475			struct ftrace_probe_ops *ops, void *data)
476{
477	return ftrace_probe_print("dump", m, ip, data);
478}
479
480static int
481ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
482			struct ftrace_probe_ops *ops, void *data)
483{
484	return ftrace_probe_print("cpudump", m, ip, data);
485}
486
487static struct ftrace_probe_ops traceon_count_probe_ops = {
488	.func			= ftrace_traceon_count,
489	.print			= ftrace_traceon_print,
490};
491
492static struct ftrace_probe_ops traceoff_count_probe_ops = {
493	.func			= ftrace_traceoff_count,
494	.print			= ftrace_traceoff_print,
495};
496
497static struct ftrace_probe_ops stacktrace_count_probe_ops = {
498	.func			= ftrace_stacktrace_count,
499	.print			= ftrace_stacktrace_print,
500};
501
502static struct ftrace_probe_ops dump_probe_ops = {
503	.func			= ftrace_dump_probe,
504	.print			= ftrace_dump_print,
505};
506
507static struct ftrace_probe_ops cpudump_probe_ops = {
508	.func			= ftrace_cpudump_probe,
509	.print			= ftrace_cpudump_print,
510};
511
512static struct ftrace_probe_ops traceon_probe_ops = {
513	.func			= ftrace_traceon,
514	.print			= ftrace_traceon_print,
515};
516
517static struct ftrace_probe_ops traceoff_probe_ops = {
518	.func			= ftrace_traceoff,
519	.print			= ftrace_traceoff_print,
520};
521
522static struct ftrace_probe_ops stacktrace_probe_ops = {
523	.func			= ftrace_stacktrace,
524	.print			= ftrace_stacktrace_print,
525};
526
527static int
528ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
529			    struct ftrace_hash *hash, char *glob,
530			    char *cmd, char *param, int enable)
531{
 
532	void *count = (void *)-1;
533	char *number;
534	int ret;
535
536	/* hash funcs only work with set_ftrace_filter */
537	if (!enable)
538		return -EINVAL;
539
540	if (glob[0] == '!') {
541		unregister_ftrace_function_probe_func(glob+1, ops);
542		return 0;
543	}
 
 
 
 
544
545	if (!param)
546		goto out_reg;
547
548	number = strsep(&param, ":");
549
550	if (!strlen(number))
551		goto out_reg;
552
553	/*
554	 * We use the callback data field (which is a pointer)
555	 * as our counter.
556	 */
557	ret = kstrtoul(number, 0, (unsigned long *)&count);
558	if (ret)
559		return ret;
560
561 out_reg:
562	ret = register_ftrace_function_probe(glob, ops, count);
563
564	return ret < 0 ? ret : 0;
565}
566
567static int
568ftrace_trace_onoff_callback(struct ftrace_hash *hash,
569			    char *glob, char *cmd, char *param, int enable)
570{
571	struct ftrace_probe_ops *ops;
572
573	/* we register both traceon and traceoff to this callback */
574	if (strcmp(cmd, "traceon") == 0)
575		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
576	else
577		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
578
579	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
580					   param, enable);
581}
582
583static int
584ftrace_stacktrace_callback(struct ftrace_hash *hash,
585			   char *glob, char *cmd, char *param, int enable)
586{
587	struct ftrace_probe_ops *ops;
588
589	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
590
591	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
592					   param, enable);
593}
594
595static int
596ftrace_dump_callback(struct ftrace_hash *hash,
597			   char *glob, char *cmd, char *param, int enable)
598{
599	struct ftrace_probe_ops *ops;
600
601	ops = &dump_probe_ops;
602
603	/* Only dump once. */
604	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
605					   "1", enable);
606}
607
608static int
609ftrace_cpudump_callback(struct ftrace_hash *hash,
610			   char *glob, char *cmd, char *param, int enable)
611{
612	struct ftrace_probe_ops *ops;
613
614	ops = &cpudump_probe_ops;
615
616	/* Only dump once. */
617	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
618					   "1", enable);
619}
620
621static struct ftrace_func_command ftrace_traceon_cmd = {
622	.name			= "traceon",
623	.func			= ftrace_trace_onoff_callback,
624};
625
626static struct ftrace_func_command ftrace_traceoff_cmd = {
627	.name			= "traceoff",
628	.func			= ftrace_trace_onoff_callback,
629};
630
631static struct ftrace_func_command ftrace_stacktrace_cmd = {
632	.name			= "stacktrace",
633	.func			= ftrace_stacktrace_callback,
634};
635
636static struct ftrace_func_command ftrace_dump_cmd = {
637	.name			= "dump",
638	.func			= ftrace_dump_callback,
639};
640
641static struct ftrace_func_command ftrace_cpudump_cmd = {
642	.name			= "cpudump",
643	.func			= ftrace_cpudump_callback,
644};
645
646static int __init init_func_cmd_traceon(void)
647{
648	int ret;
649
650	ret = register_ftrace_command(&ftrace_traceoff_cmd);
651	if (ret)
652		return ret;
653
654	ret = register_ftrace_command(&ftrace_traceon_cmd);
655	if (ret)
656		goto out_free_traceoff;
657
658	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
659	if (ret)
660		goto out_free_traceon;
661
662	ret = register_ftrace_command(&ftrace_dump_cmd);
663	if (ret)
664		goto out_free_stacktrace;
665
666	ret = register_ftrace_command(&ftrace_cpudump_cmd);
667	if (ret)
668		goto out_free_dump;
669
670	return 0;
671
672 out_free_dump:
673	unregister_ftrace_command(&ftrace_dump_cmd);
674 out_free_stacktrace:
675	unregister_ftrace_command(&ftrace_stacktrace_cmd);
676 out_free_traceon:
677	unregister_ftrace_command(&ftrace_traceon_cmd);
678 out_free_traceoff:
679	unregister_ftrace_command(&ftrace_traceoff_cmd);
680
681	return ret;
682}
683#else
684static inline int init_func_cmd_traceon(void)
685{
686	return 0;
687}
688#endif /* CONFIG_DYNAMIC_FTRACE */
689
690static __init int init_function_trace(void)
691{
692	init_func_cmd_traceon();
693	return register_tracer(&function_trace);
694}
695core_initcall(init_function_trace);
 
v3.5.6
  1/*
  2 * ring buffer based function tracer
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6 *
  7 * Based on code from the latency_tracer, that is:
  8 *
  9 *  Copyright (C) 2004-2006 Ingo Molnar
 10 *  Copyright (C) 2004 William Lee Irwin III
 11 */
 12#include <linux/ring_buffer.h>
 13#include <linux/debugfs.h>
 14#include <linux/uaccess.h>
 15#include <linux/ftrace.h>
 
 16#include <linux/fs.h>
 17
 18#include "trace.h"
 19
 20/* function tracing enabled */
 21static int			ftrace_function_enabled;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22
 23static struct trace_array	*func_trace;
 
 
 24
 25static void tracing_start_function_trace(void);
 26static void tracing_stop_function_trace(void);
 
 
 
 
 
 
 
 
 
 27
 28static int function_trace_init(struct trace_array *tr)
 29{
 30	func_trace = tr;
 31	tr->cpu = get_cpu();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32	put_cpu();
 33
 34	tracing_start_cmdline_record();
 35	tracing_start_function_trace();
 36	return 0;
 37}
 38
 39static void function_trace_reset(struct trace_array *tr)
 40{
 41	tracing_stop_function_trace();
 42	tracing_stop_cmdline_record();
 
 43}
 44
 45static void function_trace_start(struct trace_array *tr)
 46{
 47	tracing_reset_online_cpus(tr);
 48}
 49
 50static void
 51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
 
 52{
 53	struct trace_array *tr = func_trace;
 54	struct trace_array_cpu *data;
 55	unsigned long flags;
 56	long disabled;
 57	int cpu;
 58	int pc;
 59
 60	if (unlikely(!ftrace_function_enabled))
 61		return;
 62
 63	pc = preempt_count();
 64	preempt_disable_notrace();
 65	local_save_flags(flags);
 66	cpu = raw_smp_processor_id();
 67	data = tr->data[cpu];
 68	disabled = atomic_inc_return(&data->disabled);
 69
 70	if (likely(disabled == 1))
 
 
 
 
 
 
 
 71		trace_function(tr, ip, parent_ip, flags, pc);
 
 
 72
 73	atomic_dec(&data->disabled);
 74	preempt_enable_notrace();
 75}
 76
 77static void
 78function_trace_call(unsigned long ip, unsigned long parent_ip)
 79{
 80	struct trace_array *tr = func_trace;
 81	struct trace_array_cpu *data;
 82	unsigned long flags;
 83	long disabled;
 84	int cpu;
 85	int pc;
 86
 87	if (unlikely(!ftrace_function_enabled))
 88		return;
 89
 90	/*
 91	 * Need to use raw, since this must be called before the
 92	 * recursive protection is performed.
 93	 */
 94	local_irq_save(flags);
 95	cpu = raw_smp_processor_id();
 96	data = tr->data[cpu];
 97	disabled = atomic_inc_return(&data->disabled);
 98
 99	if (likely(disabled == 1)) {
100		pc = preempt_count();
101		trace_function(tr, ip, parent_ip, flags, pc);
102	}
103
104	atomic_dec(&data->disabled);
105	local_irq_restore(flags);
106}
107
108static void
109function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
110{
111	struct trace_array *tr = func_trace;
112	struct trace_array_cpu *data;
113	unsigned long flags;
114	long disabled;
115	int cpu;
116	int pc;
117
118	if (unlikely(!ftrace_function_enabled))
119		return;
120
121	/*
122	 * Need to use raw, since this must be called before the
123	 * recursive protection is performed.
124	 */
125	local_irq_save(flags);
126	cpu = raw_smp_processor_id();
127	data = tr->data[cpu];
128	disabled = atomic_inc_return(&data->disabled);
129
130	if (likely(disabled == 1)) {
131		pc = preempt_count();
132		trace_function(tr, ip, parent_ip, flags, pc);
133		/*
134		 * skip over 5 funcs:
135		 *    __ftrace_trace_stack,
136		 *    __trace_stack,
137		 *    function_stack_trace_call
138		 *    ftrace_list_func
139		 *    ftrace_call
140		 */
141		__trace_stack(tr, flags, 5, pc);
142	}
143
144	atomic_dec(&data->disabled);
145	local_irq_restore(flags);
146}
147
148
149static struct ftrace_ops trace_ops __read_mostly =
150{
151	.func = function_trace_call,
152	.flags = FTRACE_OPS_FL_GLOBAL,
153};
154
155static struct ftrace_ops trace_stack_ops __read_mostly =
156{
157	.func = function_stack_trace_call,
158	.flags = FTRACE_OPS_FL_GLOBAL,
159};
160
161/* Our two options */
162enum {
163	TRACE_FUNC_OPT_STACK = 0x1,
164};
165
166static struct tracer_opt func_opts[] = {
167#ifdef CONFIG_STACKTRACE
168	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
169#endif
170	{ } /* Always set a last empty entry */
171};
172
173static struct tracer_flags func_flags = {
174	.val = 0, /* By default: all flags disabled */
175	.opts = func_opts
176};
177
178static void tracing_start_function_trace(void)
179{
180	ftrace_function_enabled = 0;
181
182	if (trace_flags & TRACE_ITER_PREEMPTONLY)
183		trace_ops.func = function_trace_call_preempt_only;
184	else
185		trace_ops.func = function_trace_call;
186
187	if (func_flags.val & TRACE_FUNC_OPT_STACK)
188		register_ftrace_function(&trace_stack_ops);
189	else
190		register_ftrace_function(&trace_ops);
191
192	ftrace_function_enabled = 1;
193}
194
195static void tracing_stop_function_trace(void)
196{
197	ftrace_function_enabled = 0;
 
 
198
199	if (func_flags.val & TRACE_FUNC_OPT_STACK)
200		unregister_ftrace_function(&trace_stack_ops);
201	else
202		unregister_ftrace_function(&trace_ops);
203}
204
205static int func_set_flag(u32 old_flags, u32 bit, int set)
 
206{
207	if (bit == TRACE_FUNC_OPT_STACK) {
 
208		/* do nothing if already set */
209		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
210			return 0;
 
 
 
 
 
 
211
212		if (set) {
213			unregister_ftrace_function(&trace_ops);
214			register_ftrace_function(&trace_stack_ops);
215		} else {
216			unregister_ftrace_function(&trace_stack_ops);
217			register_ftrace_function(&trace_ops);
218		}
219
220		return 0;
 
 
221	}
222
223	return -EINVAL;
224}
225
226static struct tracer function_trace __read_mostly =
227{
228	.name		= "function",
229	.init		= function_trace_init,
230	.reset		= function_trace_reset,
231	.start		= function_trace_start,
232	.wait_pipe	= poll_wait_pipe,
233	.flags		= &func_flags,
234	.set_flag	= func_set_flag,
 
235#ifdef CONFIG_FTRACE_SELFTEST
236	.selftest	= trace_selftest_startup_function,
237#endif
238};
239
240#ifdef CONFIG_DYNAMIC_FTRACE
241static void
242ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
243{
244	long *count = (long *)data;
 
245
246	if (tracing_is_on())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247		return;
248
249	if (!*count)
 
 
 
 
 
 
250		return;
251
252	if (*count != -1)
253		(*count)--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
255	tracing_on();
256}
257
258static void
259ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
260{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261	long *count = (long *)data;
 
 
262
263	if (!tracing_is_on())
264		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
266	if (!*count)
267		return;
268
269	if (*count != -1)
270		(*count)--;
271
272	tracing_off();
273}
274
275static int
276ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
277			 struct ftrace_probe_ops *ops, void *data);
 
 
 
278
279static struct ftrace_probe_ops traceon_probe_ops = {
280	.func			= ftrace_traceon,
281	.print			= ftrace_trace_onoff_print,
282};
283
284static struct ftrace_probe_ops traceoff_probe_ops = {
285	.func			= ftrace_traceoff,
286	.print			= ftrace_trace_onoff_print,
287};
288
289static int
290ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
291			 struct ftrace_probe_ops *ops, void *data)
292{
293	long count = (long)data;
294
295	seq_printf(m, "%ps:", (void *)ip);
296
297	if (ops == &traceon_probe_ops)
298		seq_printf(m, "traceon");
299	else
300		seq_printf(m, "traceoff");
301
302	if (count == -1)
303		seq_printf(m, ":unlimited\n");
304	else
305		seq_printf(m, ":count=%ld\n", count);
306
307	return 0;
308}
309
310static int
311ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
 
312{
313	struct ftrace_probe_ops *ops;
 
314
315	/* we register both traceon and traceoff to this callback */
316	if (strcmp(cmd, "traceon") == 0)
317		ops = &traceon_probe_ops;
318	else
319		ops = &traceoff_probe_ops;
 
320
321	unregister_ftrace_function_probe_func(glob, ops);
 
 
 
 
 
322
323	return 0;
 
 
 
 
324}
325
326static int
327ftrace_trace_onoff_callback(struct ftrace_hash *hash,
328			    char *glob, char *cmd, char *param, int enable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329{
330	struct ftrace_probe_ops *ops;
331	void *count = (void *)-1;
332	char *number;
333	int ret;
334
335	/* hash funcs only work with set_ftrace_filter */
336	if (!enable)
337		return -EINVAL;
338
339	if (glob[0] == '!')
340		return ftrace_trace_onoff_unreg(glob+1, cmd, param);
341
342	/* we register both traceon and traceoff to this callback */
343	if (strcmp(cmd, "traceon") == 0)
344		ops = &traceon_probe_ops;
345	else
346		ops = &traceoff_probe_ops;
347
348	if (!param)
349		goto out_reg;
350
351	number = strsep(&param, ":");
352
353	if (!strlen(number))
354		goto out_reg;
355
356	/*
357	 * We use the callback data field (which is a pointer)
358	 * as our counter.
359	 */
360	ret = strict_strtoul(number, 0, (unsigned long *)&count);
361	if (ret)
362		return ret;
363
364 out_reg:
365	ret = register_ftrace_function_probe(glob, ops, count);
366
367	return ret < 0 ? ret : 0;
368}
369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370static struct ftrace_func_command ftrace_traceon_cmd = {
371	.name			= "traceon",
372	.func			= ftrace_trace_onoff_callback,
373};
374
375static struct ftrace_func_command ftrace_traceoff_cmd = {
376	.name			= "traceoff",
377	.func			= ftrace_trace_onoff_callback,
378};
379
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380static int __init init_func_cmd_traceon(void)
381{
382	int ret;
383
384	ret = register_ftrace_command(&ftrace_traceoff_cmd);
385	if (ret)
386		return ret;
387
388	ret = register_ftrace_command(&ftrace_traceon_cmd);
389	if (ret)
390		unregister_ftrace_command(&ftrace_traceoff_cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391	return ret;
392}
393#else
394static inline int init_func_cmd_traceon(void)
395{
396	return 0;
397}
398#endif /* CONFIG_DYNAMIC_FTRACE */
399
400static __init int init_function_trace(void)
401{
402	init_func_cmd_traceon();
403	return register_tracer(&function_trace);
404}
405device_initcall(init_function_trace);
406