Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * ring buffer based function tracer
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6 *
  7 * Based on code from the latency_tracer, that is:
  8 *
  9 *  Copyright (C) 2004-2006 Ingo Molnar
 10 *  Copyright (C) 2004 Nadia Yvette Chambers
 11 */
 12#include <linux/ring_buffer.h>
 13#include <linux/debugfs.h>
 14#include <linux/uaccess.h>
 15#include <linux/ftrace.h>
 16#include <linux/slab.h>
 17#include <linux/fs.h>
 18
 19#include "trace.h"
 20
 21static void tracing_start_function_trace(struct trace_array *tr);
 22static void tracing_stop_function_trace(struct trace_array *tr);
 23static void
 24function_trace_call(unsigned long ip, unsigned long parent_ip,
 25		    struct ftrace_ops *op, struct pt_regs *pt_regs);
 26static void
 27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 28			  struct ftrace_ops *op, struct pt_regs *pt_regs);
 
 
 
 
 
 
 
 29static struct tracer_flags func_flags;
 30
 31/* Our option */
 32enum {
 33	TRACE_FUNC_OPT_STACK	= 0x1,
 
 
 
 
 
 
 34};
 35
 36static int allocate_ftrace_ops(struct trace_array *tr)
 
 
 37{
 38	struct ftrace_ops *ops;
 39
 
 
 
 
 40	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 41	if (!ops)
 42		return -ENOMEM;
 43
 44	/* Currently only the non stack verision is supported */
 45	ops->func = function_trace_call;
 46	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
 47
 48	tr->ops = ops;
 49	ops->private = tr;
 
 50	return 0;
 51}
 52
 
 
 
 
 
 53
 54int ftrace_create_function_files(struct trace_array *tr,
 55				 struct dentry *parent)
 56{
 57	int ret;
 58
 59	/*
 60	 * The top level array uses the "global_ops", and the files are
 61	 * created on boot up.
 62	 */
 63	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 64		return 0;
 65
 66	ret = allocate_ftrace_ops(tr);
 67	if (ret)
 68		return ret;
 69
 70	ftrace_create_filter_files(tr->ops, parent);
 71
 72	return 0;
 73}
 74
 75void ftrace_destroy_function_files(struct trace_array *tr)
 76{
 77	ftrace_destroy_filter_files(tr->ops);
 78	kfree(tr->ops);
 79	tr->ops = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80}
 81
 82static int function_trace_init(struct trace_array *tr)
 83{
 84	ftrace_func_t func;
 85
 86	/*
 87	 * Instance trace_arrays get their ops allocated
 88	 * at instance creation. Unless it failed
 89	 * the allocation.
 90	 */
 91	if (!tr->ops)
 92		return -ENOMEM;
 93
 94	/* Currently only the global instance can do stack tracing */
 95	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 96	    func_flags.val & TRACE_FUNC_OPT_STACK)
 97		func = function_stack_trace_call;
 98	else
 99		func = function_trace_call;
100
101	ftrace_init_array_ops(tr, func);
102
103	tr->trace_buffer.cpu = get_cpu();
104	put_cpu();
105
106	tracing_start_cmdline_record();
107	tracing_start_function_trace(tr);
108	return 0;
109}
110
111static void function_trace_reset(struct trace_array *tr)
112{
113	tracing_stop_function_trace(tr);
114	tracing_stop_cmdline_record();
115	ftrace_reset_array_ops(tr);
116}
117
118static void function_trace_start(struct trace_array *tr)
119{
120	tracing_reset_online_cpus(&tr->trace_buffer);
121}
122
123static void
124function_trace_call(unsigned long ip, unsigned long parent_ip,
125		    struct ftrace_ops *op, struct pt_regs *pt_regs)
126{
127	struct trace_array *tr = op->private;
128	struct trace_array_cpu *data;
129	unsigned long flags;
130	int bit;
131	int cpu;
132	int pc;
133
134	if (unlikely(!tr->function_enabled))
135		return;
136
137	pc = preempt_count();
 
 
 
 
138	preempt_disable_notrace();
139
140	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141	if (bit < 0)
142		goto out;
 
 
143
144	cpu = smp_processor_id();
145	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146	if (!atomic_read(&data->disabled)) {
147		local_save_flags(flags);
148		trace_function(tr, ip, parent_ip, flags, pc);
149	}
150	trace_clear_recursion(bit);
151
152 out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153	preempt_enable_notrace();
154}
155
156static void
157function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158			  struct ftrace_ops *op, struct pt_regs *pt_regs)
 
159{
 
160	struct trace_array *tr = op->private;
161	struct trace_array_cpu *data;
162	unsigned long flags;
163	long disabled;
164	int cpu;
165	int pc;
166
167	if (unlikely(!tr->function_enabled))
168		return;
169
170	/*
171	 * Need to use raw, since this must be called before the
172	 * recursive protection is performed.
173	 */
174	local_irq_save(flags);
175	cpu = raw_smp_processor_id();
176	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177	disabled = atomic_inc_return(&data->disabled);
178
179	if (likely(disabled == 1)) {
180		pc = preempt_count();
181		trace_function(tr, ip, parent_ip, flags, pc);
182		/*
183		 * skip over 5 funcs:
184		 *    __ftrace_trace_stack,
185		 *    __trace_stack,
186		 *    function_stack_trace_call
187		 *    ftrace_list_func
188		 *    ftrace_call
189		 */
190		__trace_stack(tr, flags, 5, pc);
191	}
192
 
193	atomic_dec(&data->disabled);
194	local_irq_restore(flags);
195}
196
197static struct tracer_opt func_opts[] = {
198#ifdef CONFIG_STACKTRACE
199	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
200#endif
 
201	{ } /* Always set a last empty entry */
202};
203
204static struct tracer_flags func_flags = {
205	.val = 0, /* By default: all flags disabled */
206	.opts = func_opts
207};
208
209static void tracing_start_function_trace(struct trace_array *tr)
210{
211	tr->function_enabled = 0;
212	register_ftrace_function(tr->ops);
213	tr->function_enabled = 1;
214}
215
216static void tracing_stop_function_trace(struct trace_array *tr)
217{
218	tr->function_enabled = 0;
219	unregister_ftrace_function(tr->ops);
220}
221
222static struct tracer function_trace;
223
224static int
225func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
226{
227	switch (bit) {
228	case TRACE_FUNC_OPT_STACK:
229		/* do nothing if already set */
230		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
231			break;
232
233		/* We can change this flag when not running. */
234		if (tr->current_trace != &function_trace)
235			break;
236
237		unregister_ftrace_function(tr->ops);
238
239		if (set) {
240			tr->ops->func = function_stack_trace_call;
241			register_ftrace_function(tr->ops);
242		} else {
243			tr->ops->func = function_trace_call;
244			register_ftrace_function(tr->ops);
245		}
246
247		break;
248	default:
 
 
 
 
 
 
 
 
 
249		return -EINVAL;
250	}
 
 
 
 
 
 
 
 
 
 
251
252	return 0;
253}
254
255static struct tracer function_trace __tracer_data =
256{
257	.name		= "function",
258	.init		= function_trace_init,
259	.reset		= function_trace_reset,
260	.start		= function_trace_start,
261	.flags		= &func_flags,
262	.set_flag	= func_set_flag,
263	.allow_instances = true,
264#ifdef CONFIG_FTRACE_SELFTEST
265	.selftest	= trace_selftest_startup_function,
266#endif
267};
268
269#ifdef CONFIG_DYNAMIC_FTRACE
270static void update_traceon_count(void **data, bool on)
 
 
 
271{
272	long *count = (long *)data;
273	long old_count = *count;
 
274
275	/*
276	 * Tracing gets disabled (or enabled) once per count.
277	 * This function can be called at the same time on multiple CPUs.
278	 * It is fine if both disable (or enable) tracing, as disabling
279	 * (or enabling) the second time doesn't do anything as the
280	 * state of the tracer is already disabled (or enabled).
281	 * What needs to be synchronized in this case is that the count
282	 * only gets decremented once, even if the tracer is disabled
283	 * (or enabled) twice, as the second one is really a nop.
284	 *
285	 * The memory barriers guarantee that we only decrement the
286	 * counter once. First the count is read to a local variable
287	 * and a read barrier is used to make sure that it is loaded
288	 * before checking if the tracer is in the state we want.
289	 * If the tracer is not in the state we want, then the count
290	 * is guaranteed to be the old count.
291	 *
292	 * Next the tracer is set to the state we want (disabled or enabled)
293	 * then a write memory barrier is used to make sure that
294	 * the new state is visible before changing the counter by
295	 * one minus the old counter. This guarantees that another CPU
296	 * executing this code will see the new state before seeing
297	 * the new counter value, and would not do anything if the new
298	 * counter is seen.
299	 *
300	 * Note, there is no synchronization between this and a user
301	 * setting the tracing_on file. But we currently don't care
302	 * about that.
303	 */
304	if (!old_count)
 
 
 
305		return;
306
307	/* Make sure we see count before checking tracing state */
308	smp_rmb();
309
310	if (on == !!tracing_is_on())
311		return;
312
313	if (on)
314		tracing_on();
315	else
316		tracing_off();
317
318	/* unlimited? */
319	if (old_count == -1)
320		return;
321
322	/* Make sure tracing state is visible before updating count */
323	smp_wmb();
324
325	*count = old_count - 1;
326}
327
328static void
329ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
330{
331	update_traceon_count(data, 1);
332}
333
334static void
335ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
336{
337	update_traceon_count(data, 0);
338}
339
340static void
341ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
 
 
342{
343	if (tracing_is_on())
344		return;
345
346	tracing_on();
347}
348
349static void
350ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
 
 
351{
352	if (!tracing_is_on())
353		return;
354
355	tracing_off();
356}
357
 
 
 
 
 
 
 
 
 
 
358/*
359 * Skip 4:
 
 
360 *   ftrace_stacktrace()
361 *   function_trace_probe_call()
362 *   ftrace_ops_list_func()
363 *   ftrace_call()
364 */
365#define STACK_SKIP 4
 
 
 
 
 
 
 
 
 
 
366
367static void
368ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
 
 
369{
370	trace_dump_stack(STACK_SKIP);
371}
372
373static void
374ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
375{
376	long *count = (long *)data;
 
377	long old_count;
378	long new_count;
379
 
 
 
 
 
 
 
 
 
 
 
380	/*
381	 * Stack traces should only execute the number of times the
382	 * user specified in the counter.
383	 */
384	do {
385
386		if (!tracing_is_on())
387			return;
388
389		old_count = *count;
390
391		if (!old_count)
392			return;
393
394		/* unlimited? */
395		if (old_count == -1) {
396			trace_dump_stack(STACK_SKIP);
397			return;
398		}
399
400		new_count = old_count - 1;
401		new_count = cmpxchg(count, old_count, new_count);
402		if (new_count == old_count)
403			trace_dump_stack(STACK_SKIP);
 
 
 
404
405	} while (new_count != old_count);
406}
407
408static int update_count(void **data)
 
409{
410	unsigned long *count = (long *)data;
 
411
412	if (!*count)
413		return 0;
414
415	if (*count != -1)
 
 
416		(*count)--;
 
417
418	return 1;
419}
420
421static void
422ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 
 
423{
424	if (update_count(data))
425		ftrace_dump(DUMP_ALL);
426}
427
428/* Only dump the current CPU buffer. */
429static void
430ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 
 
431{
432	if (update_count(data))
433		ftrace_dump(DUMP_ORIG);
434}
435
436static int
437ftrace_probe_print(const char *name, struct seq_file *m,
438		   unsigned long ip, void *data)
 
439{
440	long count = (long)data;
 
441
442	seq_printf(m, "%ps:%s", (void *)ip, name);
443
444	if (count == -1)
445		seq_puts(m, ":unlimited\n");
 
 
 
446	else
447		seq_printf(m, ":count=%ld\n", count);
448
449	return 0;
450}
451
452static int
453ftrace_traceon_print(struct seq_file *m, unsigned long ip,
454			 struct ftrace_probe_ops *ops, void *data)
 
455{
456	return ftrace_probe_print("traceon", m, ip, data);
457}
458
459static int
460ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
461			 struct ftrace_probe_ops *ops, void *data)
462{
463	return ftrace_probe_print("traceoff", m, ip, data);
464}
465
466static int
467ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
468			struct ftrace_probe_ops *ops, void *data)
469{
470	return ftrace_probe_print("stacktrace", m, ip, data);
471}
472
473static int
474ftrace_dump_print(struct seq_file *m, unsigned long ip,
475			struct ftrace_probe_ops *ops, void *data)
476{
477	return ftrace_probe_print("dump", m, ip, data);
478}
479
480static int
481ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
482			struct ftrace_probe_ops *ops, void *data)
483{
484	return ftrace_probe_print("cpudump", m, ip, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485}
486
487static struct ftrace_probe_ops traceon_count_probe_ops = {
488	.func			= ftrace_traceon_count,
489	.print			= ftrace_traceon_print,
 
 
490};
491
492static struct ftrace_probe_ops traceoff_count_probe_ops = {
493	.func			= ftrace_traceoff_count,
494	.print			= ftrace_traceoff_print,
 
 
495};
496
497static struct ftrace_probe_ops stacktrace_count_probe_ops = {
498	.func			= ftrace_stacktrace_count,
499	.print			= ftrace_stacktrace_print,
 
 
500};
501
502static struct ftrace_probe_ops dump_probe_ops = {
503	.func			= ftrace_dump_probe,
504	.print			= ftrace_dump_print,
 
 
505};
506
507static struct ftrace_probe_ops cpudump_probe_ops = {
508	.func			= ftrace_cpudump_probe,
509	.print			= ftrace_cpudump_print,
510};
511
512static struct ftrace_probe_ops traceon_probe_ops = {
513	.func			= ftrace_traceon,
514	.print			= ftrace_traceon_print,
515};
516
517static struct ftrace_probe_ops traceoff_probe_ops = {
518	.func			= ftrace_traceoff,
519	.print			= ftrace_traceoff_print,
520};
521
522static struct ftrace_probe_ops stacktrace_probe_ops = {
523	.func			= ftrace_stacktrace,
524	.print			= ftrace_stacktrace_print,
525};
526
527static int
528ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
 
529			    struct ftrace_hash *hash, char *glob,
530			    char *cmd, char *param, int enable)
531{
532	void *count = (void *)-1;
533	char *number;
534	int ret;
535
536	/* hash funcs only work with set_ftrace_filter */
537	if (!enable)
538		return -EINVAL;
539
540	if (glob[0] == '!') {
541		unregister_ftrace_function_probe_func(glob+1, ops);
542		return 0;
543	}
544
545	if (!param)
546		goto out_reg;
547
548	number = strsep(&param, ":");
549
550	if (!strlen(number))
551		goto out_reg;
552
553	/*
554	 * We use the callback data field (which is a pointer)
555	 * as our counter.
556	 */
557	ret = kstrtoul(number, 0, (unsigned long *)&count);
558	if (ret)
559		return ret;
560
561 out_reg:
562	ret = register_ftrace_function_probe(glob, ops, count);
563
564	return ret < 0 ? ret : 0;
565}
566
567static int
568ftrace_trace_onoff_callback(struct ftrace_hash *hash,
569			    char *glob, char *cmd, char *param, int enable)
570{
571	struct ftrace_probe_ops *ops;
572
 
 
 
573	/* we register both traceon and traceoff to this callback */
574	if (strcmp(cmd, "traceon") == 0)
575		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
576	else
577		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
578
579	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
580					   param, enable);
581}
582
583static int
584ftrace_stacktrace_callback(struct ftrace_hash *hash,
585			   char *glob, char *cmd, char *param, int enable)
586{
587	struct ftrace_probe_ops *ops;
588
 
 
 
589	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
590
591	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
592					   param, enable);
593}
594
595static int
596ftrace_dump_callback(struct ftrace_hash *hash,
597			   char *glob, char *cmd, char *param, int enable)
598{
599	struct ftrace_probe_ops *ops;
600
 
 
 
601	ops = &dump_probe_ops;
602
603	/* Only dump once. */
604	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
605					   "1", enable);
606}
607
608static int
609ftrace_cpudump_callback(struct ftrace_hash *hash,
610			   char *glob, char *cmd, char *param, int enable)
611{
612	struct ftrace_probe_ops *ops;
613
 
 
 
614	ops = &cpudump_probe_ops;
615
616	/* Only dump once. */
617	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
618					   "1", enable);
619}
620
621static struct ftrace_func_command ftrace_traceon_cmd = {
622	.name			= "traceon",
623	.func			= ftrace_trace_onoff_callback,
624};
625
626static struct ftrace_func_command ftrace_traceoff_cmd = {
627	.name			= "traceoff",
628	.func			= ftrace_trace_onoff_callback,
629};
630
631static struct ftrace_func_command ftrace_stacktrace_cmd = {
632	.name			= "stacktrace",
633	.func			= ftrace_stacktrace_callback,
634};
635
636static struct ftrace_func_command ftrace_dump_cmd = {
637	.name			= "dump",
638	.func			= ftrace_dump_callback,
639};
640
641static struct ftrace_func_command ftrace_cpudump_cmd = {
642	.name			= "cpudump",
643	.func			= ftrace_cpudump_callback,
644};
645
646static int __init init_func_cmd_traceon(void)
647{
648	int ret;
649
650	ret = register_ftrace_command(&ftrace_traceoff_cmd);
651	if (ret)
652		return ret;
653
654	ret = register_ftrace_command(&ftrace_traceon_cmd);
655	if (ret)
656		goto out_free_traceoff;
657
658	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
659	if (ret)
660		goto out_free_traceon;
661
662	ret = register_ftrace_command(&ftrace_dump_cmd);
663	if (ret)
664		goto out_free_stacktrace;
665
666	ret = register_ftrace_command(&ftrace_cpudump_cmd);
667	if (ret)
668		goto out_free_dump;
669
670	return 0;
671
672 out_free_dump:
673	unregister_ftrace_command(&ftrace_dump_cmd);
674 out_free_stacktrace:
675	unregister_ftrace_command(&ftrace_stacktrace_cmd);
676 out_free_traceon:
677	unregister_ftrace_command(&ftrace_traceon_cmd);
678 out_free_traceoff:
679	unregister_ftrace_command(&ftrace_traceoff_cmd);
680
681	return ret;
682}
683#else
684static inline int init_func_cmd_traceon(void)
685{
686	return 0;
687}
688#endif /* CONFIG_DYNAMIC_FTRACE */
689
690static __init int init_function_trace(void)
691{
692	init_func_cmd_traceon();
693	return register_tracer(&function_trace);
694}
695core_initcall(init_function_trace);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ring buffer based function tracer
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/ring_buffer.h>
 14#include <linux/debugfs.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/slab.h>
 18#include <linux/fs.h>
 19
 20#include "trace.h"
 21
 22static void tracing_start_function_trace(struct trace_array *tr);
 23static void tracing_stop_function_trace(struct trace_array *tr);
 24static void
 25function_trace_call(unsigned long ip, unsigned long parent_ip,
 26		    struct ftrace_ops *op, struct ftrace_regs *fregs);
 27static void
 28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 29			  struct ftrace_ops *op, struct ftrace_regs *fregs);
 30static void
 31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 32			       struct ftrace_ops *op, struct ftrace_regs *fregs);
 33static void
 34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 35				     struct ftrace_ops *op,
 36				     struct ftrace_regs *fregs);
 37static struct tracer_flags func_flags;
 38
 39/* Our option */
 40enum {
 41
 42	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
 43	TRACE_FUNC_OPT_STACK		= 0x1,
 44	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
 45
 46	/* Update this to next highest bit. */
 47	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
 48};
 49
 50#define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
 51
 52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 53{
 54	struct ftrace_ops *ops;
 55
 56	/* The top level array uses the "global_ops" */
 57	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 58		return 0;
 59
 60	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 61	if (!ops)
 62		return -ENOMEM;
 63
 64	/* Currently only the non stack version is supported */
 65	ops->func = function_trace_call;
 66	ops->flags = FTRACE_OPS_FL_PID;
 67
 68	tr->ops = ops;
 69	ops->private = tr;
 70
 71	return 0;
 72}
 73
 74void ftrace_free_ftrace_ops(struct trace_array *tr)
 75{
 76	kfree(tr->ops);
 77	tr->ops = NULL;
 78}
 79
 80int ftrace_create_function_files(struct trace_array *tr,
 81				 struct dentry *parent)
 82{
 
 
 83	/*
 84	 * The top level array uses the "global_ops", and the files are
 85	 * created on boot up.
 86	 */
 87	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 88		return 0;
 89
 90	if (!tr->ops)
 91		return -EINVAL;
 
 92
 93	ftrace_create_filter_files(tr->ops, parent);
 94
 95	return 0;
 96}
 97
 98void ftrace_destroy_function_files(struct trace_array *tr)
 99{
100	ftrace_destroy_filter_files(tr->ops);
101	ftrace_free_ftrace_ops(tr);
102}
103
104static ftrace_func_t select_trace_function(u32 flags_val)
105{
106	switch (flags_val & TRACE_FUNC_OPT_MASK) {
107	case TRACE_FUNC_NO_OPTS:
108		return function_trace_call;
109	case TRACE_FUNC_OPT_STACK:
110		return function_stack_trace_call;
111	case TRACE_FUNC_OPT_NO_REPEATS:
112		return function_no_repeats_trace_call;
113	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114		return function_stack_no_repeats_trace_call;
115	default:
116		return NULL;
117	}
118}
119
120static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121{
122	if (!tr->last_func_repeats &&
123	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125		if (!tr->last_func_repeats)
126			return false;
127	}
128
129	return true;
130}
131
132static int function_trace_init(struct trace_array *tr)
133{
134	ftrace_func_t func;
 
135	/*
136	 * Instance trace_arrays get their ops allocated
137	 * at instance creation. Unless it failed
138	 * the allocation.
139	 */
140	if (!tr->ops)
141		return -ENOMEM;
142
143	func = select_trace_function(func_flags.val);
144	if (!func)
145		return -EINVAL;
146
147	if (!handle_func_repeats(tr, func_flags.val))
148		return -ENOMEM;
149
150	ftrace_init_array_ops(tr, func);
151
152	tr->array_buffer.cpu = raw_smp_processor_id();
 
153
154	tracing_start_cmdline_record();
155	tracing_start_function_trace(tr);
156	return 0;
157}
158
159static void function_trace_reset(struct trace_array *tr)
160{
161	tracing_stop_function_trace(tr);
162	tracing_stop_cmdline_record();
163	ftrace_reset_array_ops(tr);
164}
165
166static void function_trace_start(struct trace_array *tr)
167{
168	tracing_reset_online_cpus(&tr->array_buffer);
169}
170
171static void
172function_trace_call(unsigned long ip, unsigned long parent_ip,
173		    struct ftrace_ops *op, struct ftrace_regs *fregs)
174{
175	struct trace_array *tr = op->private;
176	struct trace_array_cpu *data;
177	unsigned int trace_ctx;
178	int bit;
179	int cpu;
 
180
181	if (unlikely(!tr->function_enabled))
182		return;
183
184	bit = ftrace_test_recursion_trylock(ip, parent_ip);
185	if (bit < 0)
186		return;
187
188	trace_ctx = tracing_gen_ctx();
189	preempt_disable_notrace();
190
191	cpu = smp_processor_id();
192	data = per_cpu_ptr(tr->array_buffer.data, cpu);
193	if (!atomic_read(&data->disabled))
194		trace_function(tr, ip, parent_ip, trace_ctx);
195
196	ftrace_test_recursion_unlock(bit);
197	preempt_enable_notrace();
198}
199
200#ifdef CONFIG_UNWINDER_ORC
201/*
202 * Skip 2:
203 *
204 *   function_stack_trace_call()
205 *   ftrace_call()
206 */
207#define STACK_SKIP 2
208#else
209/*
210 * Skip 3:
211 *   __trace_stack()
212 *   function_stack_trace_call()
213 *   ftrace_call()
214 */
215#define STACK_SKIP 3
216#endif
217
218static void
219function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
220			  struct ftrace_ops *op, struct ftrace_regs *fregs)
221{
222	struct trace_array *tr = op->private;
223	struct trace_array_cpu *data;
224	unsigned long flags;
225	long disabled;
226	int cpu;
227	unsigned int trace_ctx;
228
229	if (unlikely(!tr->function_enabled))
230		return;
231
232	/*
233	 * Need to use raw, since this must be called before the
234	 * recursive protection is performed.
235	 */
236	local_irq_save(flags);
237	cpu = raw_smp_processor_id();
238	data = per_cpu_ptr(tr->array_buffer.data, cpu);
239	disabled = atomic_inc_return(&data->disabled);
240
241	if (likely(disabled == 1)) {
242		trace_ctx = tracing_gen_ctx_flags(flags);
243		trace_function(tr, ip, parent_ip, trace_ctx);
244		__trace_stack(tr, trace_ctx, STACK_SKIP);
245	}
246
247	atomic_dec(&data->disabled);
248	local_irq_restore(flags);
249}
250
251static inline bool is_repeat_check(struct trace_array *tr,
252				   struct trace_func_repeats *last_info,
253				   unsigned long ip, unsigned long parent_ip)
254{
255	if (last_info->ip == ip &&
256	    last_info->parent_ip == parent_ip &&
257	    last_info->count < U16_MAX) {
258		last_info->ts_last_call =
259			ring_buffer_time_stamp(tr->array_buffer.buffer);
260		last_info->count++;
261		return true;
262	}
263
264	return false;
265}
266
267static inline void process_repeats(struct trace_array *tr,
268				   unsigned long ip, unsigned long parent_ip,
269				   struct trace_func_repeats *last_info,
270				   unsigned int trace_ctx)
271{
272	if (last_info->count) {
273		trace_last_func_repeats(tr, last_info, trace_ctx);
274		last_info->count = 0;
275	}
276
277	last_info->ip = ip;
278	last_info->parent_ip = parent_ip;
279}
280
281static void
282function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
283			       struct ftrace_ops *op,
284			       struct ftrace_regs *fregs)
285{
286	struct trace_func_repeats *last_info;
287	struct trace_array *tr = op->private;
288	struct trace_array_cpu *data;
289	unsigned int trace_ctx;
290	unsigned long flags;
291	int bit;
292	int cpu;
293
294	if (unlikely(!tr->function_enabled))
295		return;
296
297	bit = ftrace_test_recursion_trylock(ip, parent_ip);
298	if (bit < 0)
299		return;
300
301	preempt_disable_notrace();
302
303	cpu = smp_processor_id();
304	data = per_cpu_ptr(tr->array_buffer.data, cpu);
305	if (atomic_read(&data->disabled))
306		goto out;
 
 
 
307
308	/*
309	 * An interrupt may happen at any place here. But as far as I can see,
310	 * the only damage that this can cause is to mess up the repetition
311	 * counter without valuable data being lost.
312	 * TODO: think about a solution that is better than just hoping to be
313	 * lucky.
314	 */
315	last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
316	if (is_repeat_check(tr, last_info, ip, parent_ip))
317		goto out;
318
319	local_save_flags(flags);
320	trace_ctx = tracing_gen_ctx_flags(flags);
321	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
322
323	trace_function(tr, ip, parent_ip, trace_ctx);
324
325out:
326	ftrace_test_recursion_unlock(bit);
327	preempt_enable_notrace();
328}
329
330static void
331function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
332				     struct ftrace_ops *op,
333				     struct ftrace_regs *fregs)
334{
335	struct trace_func_repeats *last_info;
336	struct trace_array *tr = op->private;
337	struct trace_array_cpu *data;
338	unsigned long flags;
339	long disabled;
340	int cpu;
341	unsigned int trace_ctx;
342
343	if (unlikely(!tr->function_enabled))
344		return;
345
346	/*
347	 * Need to use raw, since this must be called before the
348	 * recursive protection is performed.
349	 */
350	local_irq_save(flags);
351	cpu = raw_smp_processor_id();
352	data = per_cpu_ptr(tr->array_buffer.data, cpu);
353	disabled = atomic_inc_return(&data->disabled);
354
355	if (likely(disabled == 1)) {
356		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
357		if (is_repeat_check(tr, last_info, ip, parent_ip))
358			goto out;
359
360		trace_ctx = tracing_gen_ctx_flags(flags);
361		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
362
363		trace_function(tr, ip, parent_ip, trace_ctx);
364		__trace_stack(tr, trace_ctx, STACK_SKIP);
 
 
365	}
366
367 out:
368	atomic_dec(&data->disabled);
369	local_irq_restore(flags);
370}
371
372static struct tracer_opt func_opts[] = {
373#ifdef CONFIG_STACKTRACE
374	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
375#endif
376	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
377	{ } /* Always set a last empty entry */
378};
379
380static struct tracer_flags func_flags = {
381	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
382	.opts = func_opts
383};
384
385static void tracing_start_function_trace(struct trace_array *tr)
386{
387	tr->function_enabled = 0;
388	register_ftrace_function(tr->ops);
389	tr->function_enabled = 1;
390}
391
392static void tracing_stop_function_trace(struct trace_array *tr)
393{
394	tr->function_enabled = 0;
395	unregister_ftrace_function(tr->ops);
396}
397
398static struct tracer function_trace;
399
400static int
401func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
402{
403	ftrace_func_t func;
404	u32 new_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
406	/* Do nothing if already set. */
407	if (!!set == !!(func_flags.val & bit))
408		return 0;
409
410	/* We can change this flag only when not running. */
411	if (tr->current_trace != &function_trace)
412		return 0;
413
414	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
415	func = select_trace_function(new_flags);
416	if (!func)
417		return -EINVAL;
418
419	/* Check if there's anything to change. */
420	if (tr->ops->func == func)
421		return 0;
422
423	if (!handle_func_repeats(tr, new_flags))
424		return -ENOMEM;
425
426	unregister_ftrace_function(tr->ops);
427	tr->ops->func = func;
428	register_ftrace_function(tr->ops);
429
430	return 0;
431}
432
433static struct tracer function_trace __tracer_data =
434{
435	.name		= "function",
436	.init		= function_trace_init,
437	.reset		= function_trace_reset,
438	.start		= function_trace_start,
439	.flags		= &func_flags,
440	.set_flag	= func_set_flag,
441	.allow_instances = true,
442#ifdef CONFIG_FTRACE_SELFTEST
443	.selftest	= trace_selftest_startup_function,
444#endif
445};
446
447#ifdef CONFIG_DYNAMIC_FTRACE
448static void update_traceon_count(struct ftrace_probe_ops *ops,
449				 unsigned long ip,
450				 struct trace_array *tr, bool on,
451				 void *data)
452{
453	struct ftrace_func_mapper *mapper = data;
454	long *count;
455	long old_count;
456
457	/*
458	 * Tracing gets disabled (or enabled) once per count.
459	 * This function can be called at the same time on multiple CPUs.
460	 * It is fine if both disable (or enable) tracing, as disabling
461	 * (or enabling) the second time doesn't do anything as the
462	 * state of the tracer is already disabled (or enabled).
463	 * What needs to be synchronized in this case is that the count
464	 * only gets decremented once, even if the tracer is disabled
465	 * (or enabled) twice, as the second one is really a nop.
466	 *
467	 * The memory barriers guarantee that we only decrement the
468	 * counter once. First the count is read to a local variable
469	 * and a read barrier is used to make sure that it is loaded
470	 * before checking if the tracer is in the state we want.
471	 * If the tracer is not in the state we want, then the count
472	 * is guaranteed to be the old count.
473	 *
474	 * Next the tracer is set to the state we want (disabled or enabled)
475	 * then a write memory barrier is used to make sure that
476	 * the new state is visible before changing the counter by
477	 * one minus the old counter. This guarantees that another CPU
478	 * executing this code will see the new state before seeing
479	 * the new counter value, and would not do anything if the new
480	 * counter is seen.
481	 *
482	 * Note, there is no synchronization between this and a user
483	 * setting the tracing_on file. But we currently don't care
484	 * about that.
485	 */
486	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
487	old_count = *count;
488
489	if (old_count <= 0)
490		return;
491
492	/* Make sure we see count before checking tracing state */
493	smp_rmb();
494
495	if (on == !!tracer_tracing_is_on(tr))
496		return;
497
498	if (on)
499		tracer_tracing_on(tr);
500	else
501		tracer_tracing_off(tr);
 
 
 
 
502
503	/* Make sure tracing state is visible before updating count */
504	smp_wmb();
505
506	*count = old_count - 1;
507}
508
509static void
510ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
511		     struct trace_array *tr, struct ftrace_probe_ops *ops,
512		     void *data)
513{
514	update_traceon_count(ops, ip, tr, 1, data);
515}
516
517static void
518ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
519		      struct trace_array *tr, struct ftrace_probe_ops *ops,
520		      void *data)
521{
522	update_traceon_count(ops, ip, tr, 0, data);
523}
524
525static void
526ftrace_traceon(unsigned long ip, unsigned long parent_ip,
527	       struct trace_array *tr, struct ftrace_probe_ops *ops,
528	       void *data)
529{
530	if (tracer_tracing_is_on(tr))
531		return;
532
533	tracer_tracing_on(tr);
534}
535
536static void
537ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
538		struct trace_array *tr, struct ftrace_probe_ops *ops,
539		void *data)
540{
541	if (!tracer_tracing_is_on(tr))
542		return;
543
544	tracer_tracing_off(tr);
545}
546
547#ifdef CONFIG_UNWINDER_ORC
548/*
549 * Skip 3:
550 *
551 *   function_trace_probe_call()
552 *   ftrace_ops_assist_func()
553 *   ftrace_call()
554 */
555#define FTRACE_STACK_SKIP 3
556#else
557/*
558 * Skip 5:
559 *
560 *   __trace_stack()
561 *   ftrace_stacktrace()
562 *   function_trace_probe_call()
563 *   ftrace_ops_assist_func()
564 *   ftrace_call()
565 */
566#define FTRACE_STACK_SKIP 5
567#endif
568
569static __always_inline void trace_stack(struct trace_array *tr)
570{
571	unsigned int trace_ctx;
572
573	trace_ctx = tracing_gen_ctx();
574
575	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
576}
577
578static void
579ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
580		  struct trace_array *tr, struct ftrace_probe_ops *ops,
581		  void *data)
582{
583	trace_stack(tr);
584}
585
586static void
587ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
588			struct trace_array *tr, struct ftrace_probe_ops *ops,
589			void *data)
590{
591	struct ftrace_func_mapper *mapper = data;
592	long *count;
593	long old_count;
594	long new_count;
595
596	if (!tracing_is_on())
597		return;
598
599	/* unlimited? */
600	if (!mapper) {
601		trace_stack(tr);
602		return;
603	}
604
605	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
606
607	/*
608	 * Stack traces should only execute the number of times the
609	 * user specified in the counter.
610	 */
611	do {
 
 
 
 
612		old_count = *count;
613
614		if (!old_count)
615			return;
616
 
 
 
 
 
 
617		new_count = old_count - 1;
618		new_count = cmpxchg(count, old_count, new_count);
619		if (new_count == old_count)
620			trace_stack(tr);
621
622		if (!tracing_is_on())
623			return;
624
625	} while (new_count != old_count);
626}
627
628static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
629			void *data)
630{
631	struct ftrace_func_mapper *mapper = data;
632	long *count = NULL;
633
634	if (mapper)
635		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
636
637	if (count) {
638		if (*count <= 0)
639			return 0;
640		(*count)--;
641	}
642
643	return 1;
644}
645
646static void
647ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
648		  struct trace_array *tr, struct ftrace_probe_ops *ops,
649		  void *data)
650{
651	if (update_count(ops, ip, data))
652		ftrace_dump(DUMP_ALL);
653}
654
655/* Only dump the current CPU buffer. */
656static void
657ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
658		     struct trace_array *tr, struct ftrace_probe_ops *ops,
659		     void *data)
660{
661	if (update_count(ops, ip, data))
662		ftrace_dump(DUMP_ORIG);
663}
664
665static int
666ftrace_probe_print(const char *name, struct seq_file *m,
667		   unsigned long ip, struct ftrace_probe_ops *ops,
668		   void *data)
669{
670	struct ftrace_func_mapper *mapper = data;
671	long *count = NULL;
672
673	seq_printf(m, "%ps:%s", (void *)ip, name);
674
675	if (mapper)
676		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
677
678	if (count)
679		seq_printf(m, ":count=%ld\n", *count);
680	else
681		seq_puts(m, ":unlimited\n");
682
683	return 0;
684}
685
686static int
687ftrace_traceon_print(struct seq_file *m, unsigned long ip,
688		     struct ftrace_probe_ops *ops,
689		     void *data)
690{
691	return ftrace_probe_print("traceon", m, ip, ops, data);
692}
693
694static int
695ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
696			 struct ftrace_probe_ops *ops, void *data)
697{
698	return ftrace_probe_print("traceoff", m, ip, ops, data);
699}
700
701static int
702ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
703			struct ftrace_probe_ops *ops, void *data)
704{
705	return ftrace_probe_print("stacktrace", m, ip, ops, data);
706}
707
708static int
709ftrace_dump_print(struct seq_file *m, unsigned long ip,
710			struct ftrace_probe_ops *ops, void *data)
711{
712	return ftrace_probe_print("dump", m, ip, ops, data);
713}
714
715static int
716ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
717			struct ftrace_probe_ops *ops, void *data)
718{
719	return ftrace_probe_print("cpudump", m, ip, ops, data);
720}
721
722
723static int
724ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
725		  unsigned long ip, void *init_data, void **data)
726{
727	struct ftrace_func_mapper *mapper = *data;
728
729	if (!mapper) {
730		mapper = allocate_ftrace_func_mapper();
731		if (!mapper)
732			return -ENOMEM;
733		*data = mapper;
734	}
735
736	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
737}
738
739static void
740ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
741		  unsigned long ip, void *data)
742{
743	struct ftrace_func_mapper *mapper = data;
744
745	if (!ip) {
746		free_ftrace_func_mapper(mapper, NULL);
747		return;
748	}
749
750	ftrace_func_mapper_remove_ip(mapper, ip);
751}
752
753static struct ftrace_probe_ops traceon_count_probe_ops = {
754	.func			= ftrace_traceon_count,
755	.print			= ftrace_traceon_print,
756	.init			= ftrace_count_init,
757	.free			= ftrace_count_free,
758};
759
760static struct ftrace_probe_ops traceoff_count_probe_ops = {
761	.func			= ftrace_traceoff_count,
762	.print			= ftrace_traceoff_print,
763	.init			= ftrace_count_init,
764	.free			= ftrace_count_free,
765};
766
767static struct ftrace_probe_ops stacktrace_count_probe_ops = {
768	.func			= ftrace_stacktrace_count,
769	.print			= ftrace_stacktrace_print,
770	.init			= ftrace_count_init,
771	.free			= ftrace_count_free,
772};
773
774static struct ftrace_probe_ops dump_probe_ops = {
775	.func			= ftrace_dump_probe,
776	.print			= ftrace_dump_print,
777	.init			= ftrace_count_init,
778	.free			= ftrace_count_free,
779};
780
781static struct ftrace_probe_ops cpudump_probe_ops = {
782	.func			= ftrace_cpudump_probe,
783	.print			= ftrace_cpudump_print,
784};
785
786static struct ftrace_probe_ops traceon_probe_ops = {
787	.func			= ftrace_traceon,
788	.print			= ftrace_traceon_print,
789};
790
791static struct ftrace_probe_ops traceoff_probe_ops = {
792	.func			= ftrace_traceoff,
793	.print			= ftrace_traceoff_print,
794};
795
796static struct ftrace_probe_ops stacktrace_probe_ops = {
797	.func			= ftrace_stacktrace,
798	.print			= ftrace_stacktrace_print,
799};
800
801static int
802ftrace_trace_probe_callback(struct trace_array *tr,
803			    struct ftrace_probe_ops *ops,
804			    struct ftrace_hash *hash, char *glob,
805			    char *cmd, char *param, int enable)
806{
807	void *count = (void *)-1;
808	char *number;
809	int ret;
810
811	/* hash funcs only work with set_ftrace_filter */
812	if (!enable)
813		return -EINVAL;
814
815	if (glob[0] == '!')
816		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
 
 
817
818	if (!param)
819		goto out_reg;
820
821	number = strsep(&param, ":");
822
823	if (!strlen(number))
824		goto out_reg;
825
826	/*
827	 * We use the callback data field (which is a pointer)
828	 * as our counter.
829	 */
830	ret = kstrtoul(number, 0, (unsigned long *)&count);
831	if (ret)
832		return ret;
833
834 out_reg:
835	ret = register_ftrace_function_probe(glob, tr, ops, count);
836
837	return ret < 0 ? ret : 0;
838}
839
840static int
841ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
842			    char *glob, char *cmd, char *param, int enable)
843{
844	struct ftrace_probe_ops *ops;
845
846	if (!tr)
847		return -ENODEV;
848
849	/* we register both traceon and traceoff to this callback */
850	if (strcmp(cmd, "traceon") == 0)
851		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
852	else
853		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
854
855	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
856					   param, enable);
857}
858
859static int
860ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
861			   char *glob, char *cmd, char *param, int enable)
862{
863	struct ftrace_probe_ops *ops;
864
865	if (!tr)
866		return -ENODEV;
867
868	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
869
870	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
871					   param, enable);
872}
873
874static int
875ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
876			   char *glob, char *cmd, char *param, int enable)
877{
878	struct ftrace_probe_ops *ops;
879
880	if (!tr)
881		return -ENODEV;
882
883	ops = &dump_probe_ops;
884
885	/* Only dump once. */
886	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
887					   "1", enable);
888}
889
890static int
891ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
892			   char *glob, char *cmd, char *param, int enable)
893{
894	struct ftrace_probe_ops *ops;
895
896	if (!tr)
897		return -ENODEV;
898
899	ops = &cpudump_probe_ops;
900
901	/* Only dump once. */
902	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
903					   "1", enable);
904}
905
906static struct ftrace_func_command ftrace_traceon_cmd = {
907	.name			= "traceon",
908	.func			= ftrace_trace_onoff_callback,
909};
910
911static struct ftrace_func_command ftrace_traceoff_cmd = {
912	.name			= "traceoff",
913	.func			= ftrace_trace_onoff_callback,
914};
915
916static struct ftrace_func_command ftrace_stacktrace_cmd = {
917	.name			= "stacktrace",
918	.func			= ftrace_stacktrace_callback,
919};
920
921static struct ftrace_func_command ftrace_dump_cmd = {
922	.name			= "dump",
923	.func			= ftrace_dump_callback,
924};
925
926static struct ftrace_func_command ftrace_cpudump_cmd = {
927	.name			= "cpudump",
928	.func			= ftrace_cpudump_callback,
929};
930
931static int __init init_func_cmd_traceon(void)
932{
933	int ret;
934
935	ret = register_ftrace_command(&ftrace_traceoff_cmd);
936	if (ret)
937		return ret;
938
939	ret = register_ftrace_command(&ftrace_traceon_cmd);
940	if (ret)
941		goto out_free_traceoff;
942
943	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
944	if (ret)
945		goto out_free_traceon;
946
947	ret = register_ftrace_command(&ftrace_dump_cmd);
948	if (ret)
949		goto out_free_stacktrace;
950
951	ret = register_ftrace_command(&ftrace_cpudump_cmd);
952	if (ret)
953		goto out_free_dump;
954
955	return 0;
956
957 out_free_dump:
958	unregister_ftrace_command(&ftrace_dump_cmd);
959 out_free_stacktrace:
960	unregister_ftrace_command(&ftrace_stacktrace_cmd);
961 out_free_traceon:
962	unregister_ftrace_command(&ftrace_traceon_cmd);
963 out_free_traceoff:
964	unregister_ftrace_command(&ftrace_traceoff_cmd);
965
966	return ret;
967}
968#else
969static inline int init_func_cmd_traceon(void)
970{
971	return 0;
972}
973#endif /* CONFIG_DYNAMIC_FTRACE */
974
975__init int init_function_trace(void)
976{
977	init_func_cmd_traceon();
978	return register_tracer(&function_trace);
979}