Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * ring buffer based function tracer
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6 *
  7 * Based on code from the latency_tracer, that is:
  8 *
  9 *  Copyright (C) 2004-2006 Ingo Molnar
 10 *  Copyright (C) 2004 Nadia Yvette Chambers
 11 */
 12#include <linux/ring_buffer.h>
 13#include <linux/debugfs.h>
 14#include <linux/uaccess.h>
 15#include <linux/ftrace.h>
 16#include <linux/slab.h>
 17#include <linux/fs.h>
 18
 19#include "trace.h"
 20
 21static void tracing_start_function_trace(struct trace_array *tr);
 22static void tracing_stop_function_trace(struct trace_array *tr);
 23static void
 24function_trace_call(unsigned long ip, unsigned long parent_ip,
 25		    struct ftrace_ops *op, struct pt_regs *pt_regs);
 26static void
 27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 28			  struct ftrace_ops *op, struct pt_regs *pt_regs);
 
 
 
 
 
 
 
 29static struct tracer_flags func_flags;
 30
 31/* Our option */
 32enum {
 33	TRACE_FUNC_OPT_STACK	= 0x1,
 
 
 
 
 
 
 34};
 35
 36static int allocate_ftrace_ops(struct trace_array *tr)
 
 
 37{
 38	struct ftrace_ops *ops;
 39
 
 
 
 
 40	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 41	if (!ops)
 42		return -ENOMEM;
 43
 44	/* Currently only the non stack verision is supported */
 45	ops->func = function_trace_call;
 46	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
 47
 48	tr->ops = ops;
 49	ops->private = tr;
 
 50	return 0;
 51}
 52
 
 
 
 
 
 53
 54int ftrace_create_function_files(struct trace_array *tr,
 55				 struct dentry *parent)
 56{
 57	int ret;
 58
 59	/*
 60	 * The top level array uses the "global_ops", and the files are
 61	 * created on boot up.
 62	 */
 63	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 64		return 0;
 65
 66	ret = allocate_ftrace_ops(tr);
 67	if (ret)
 
 
 
 
 68		return ret;
 
 69
 70	ftrace_create_filter_files(tr->ops, parent);
 71
 72	return 0;
 73}
 74
 75void ftrace_destroy_function_files(struct trace_array *tr)
 76{
 77	ftrace_destroy_filter_files(tr->ops);
 78	kfree(tr->ops);
 79	tr->ops = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80}
 81
 82static int function_trace_init(struct trace_array *tr)
 83{
 84	ftrace_func_t func;
 85
 86	/*
 87	 * Instance trace_arrays get their ops allocated
 88	 * at instance creation. Unless it failed
 89	 * the allocation.
 90	 */
 91	if (!tr->ops)
 92		return -ENOMEM;
 93
 94	/* Currently only the global instance can do stack tracing */
 95	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 96	    func_flags.val & TRACE_FUNC_OPT_STACK)
 97		func = function_stack_trace_call;
 98	else
 99		func = function_trace_call;
100
101	ftrace_init_array_ops(tr, func);
102
103	tr->trace_buffer.cpu = get_cpu();
104	put_cpu();
105
106	tracing_start_cmdline_record();
107	tracing_start_function_trace(tr);
108	return 0;
109}
110
111static void function_trace_reset(struct trace_array *tr)
112{
113	tracing_stop_function_trace(tr);
114	tracing_stop_cmdline_record();
115	ftrace_reset_array_ops(tr);
116}
117
118static void function_trace_start(struct trace_array *tr)
119{
120	tracing_reset_online_cpus(&tr->trace_buffer);
121}
122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123static void
124function_trace_call(unsigned long ip, unsigned long parent_ip,
125		    struct ftrace_ops *op, struct pt_regs *pt_regs)
126{
127	struct trace_array *tr = op->private;
128	struct trace_array_cpu *data;
129	unsigned long flags;
130	int bit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131	int cpu;
132	int pc;
 
133
134	if (unlikely(!tr->function_enabled))
135		return;
136
137	pc = preempt_count();
138	preempt_disable_notrace();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
140	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
141	if (bit < 0)
 
 
 
 
 
142		goto out;
143
144	cpu = smp_processor_id();
145	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146	if (!atomic_read(&data->disabled)) {
147		local_save_flags(flags);
148		trace_function(tr, ip, parent_ip, flags, pc);
149	}
150	trace_clear_recursion(bit);
 
 
 
151
152 out:
153	preempt_enable_notrace();
 
 
 
 
 
154}
155
156static void
157function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158			  struct ftrace_ops *op, struct pt_regs *pt_regs)
 
159{
 
160	struct trace_array *tr = op->private;
161	struct trace_array_cpu *data;
162	unsigned long flags;
163	long disabled;
164	int cpu;
165	int pc;
166
167	if (unlikely(!tr->function_enabled))
168		return;
169
170	/*
171	 * Need to use raw, since this must be called before the
172	 * recursive protection is performed.
173	 */
174	local_irq_save(flags);
 
175	cpu = raw_smp_processor_id();
176	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177	disabled = atomic_inc_return(&data->disabled);
178
179	if (likely(disabled == 1)) {
180		pc = preempt_count();
181		trace_function(tr, ip, parent_ip, flags, pc);
182		/*
183		 * skip over 5 funcs:
184		 *    __ftrace_trace_stack,
185		 *    __trace_stack,
186		 *    function_stack_trace_call
187		 *    ftrace_list_func
188		 *    ftrace_call
189		 */
190		__trace_stack(tr, flags, 5, pc);
191	}
192
 
193	atomic_dec(&data->disabled);
194	local_irq_restore(flags);
195}
196
197static struct tracer_opt func_opts[] = {
198#ifdef CONFIG_STACKTRACE
199	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
200#endif
 
201	{ } /* Always set a last empty entry */
202};
203
204static struct tracer_flags func_flags = {
205	.val = 0, /* By default: all flags disabled */
206	.opts = func_opts
207};
208
209static void tracing_start_function_trace(struct trace_array *tr)
210{
211	tr->function_enabled = 0;
212	register_ftrace_function(tr->ops);
213	tr->function_enabled = 1;
214}
215
216static void tracing_stop_function_trace(struct trace_array *tr)
217{
218	tr->function_enabled = 0;
219	unregister_ftrace_function(tr->ops);
220}
221
222static struct tracer function_trace;
223
224static int
225func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
226{
227	switch (bit) {
228	case TRACE_FUNC_OPT_STACK:
229		/* do nothing if already set */
230		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
231			break;
232
233		/* We can change this flag when not running. */
234		if (tr->current_trace != &function_trace)
235			break;
236
237		unregister_ftrace_function(tr->ops);
238
239		if (set) {
240			tr->ops->func = function_stack_trace_call;
241			register_ftrace_function(tr->ops);
242		} else {
243			tr->ops->func = function_trace_call;
244			register_ftrace_function(tr->ops);
245		}
246
247		break;
248	default:
 
 
 
 
 
 
 
 
 
249		return -EINVAL;
250	}
 
 
 
 
 
 
 
 
 
 
251
252	return 0;
253}
254
255static struct tracer function_trace __tracer_data =
256{
257	.name		= "function",
258	.init		= function_trace_init,
259	.reset		= function_trace_reset,
260	.start		= function_trace_start,
261	.flags		= &func_flags,
262	.set_flag	= func_set_flag,
263	.allow_instances = true,
264#ifdef CONFIG_FTRACE_SELFTEST
265	.selftest	= trace_selftest_startup_function,
266#endif
267};
268
269#ifdef CONFIG_DYNAMIC_FTRACE
270static void update_traceon_count(void **data, bool on)
 
 
 
271{
272	long *count = (long *)data;
273	long old_count = *count;
 
274
275	/*
276	 * Tracing gets disabled (or enabled) once per count.
277	 * This function can be called at the same time on multiple CPUs.
278	 * It is fine if both disable (or enable) tracing, as disabling
279	 * (or enabling) the second time doesn't do anything as the
280	 * state of the tracer is already disabled (or enabled).
281	 * What needs to be synchronized in this case is that the count
282	 * only gets decremented once, even if the tracer is disabled
283	 * (or enabled) twice, as the second one is really a nop.
284	 *
285	 * The memory barriers guarantee that we only decrement the
286	 * counter once. First the count is read to a local variable
287	 * and a read barrier is used to make sure that it is loaded
288	 * before checking if the tracer is in the state we want.
289	 * If the tracer is not in the state we want, then the count
290	 * is guaranteed to be the old count.
291	 *
292	 * Next the tracer is set to the state we want (disabled or enabled)
293	 * then a write memory barrier is used to make sure that
294	 * the new state is visible before changing the counter by
295	 * one minus the old counter. This guarantees that another CPU
296	 * executing this code will see the new state before seeing
297	 * the new counter value, and would not do anything if the new
298	 * counter is seen.
299	 *
300	 * Note, there is no synchronization between this and a user
301	 * setting the tracing_on file. But we currently don't care
302	 * about that.
303	 */
304	if (!old_count)
 
 
 
305		return;
306
307	/* Make sure we see count before checking tracing state */
308	smp_rmb();
309
310	if (on == !!tracing_is_on())
311		return;
312
313	if (on)
314		tracing_on();
315	else
316		tracing_off();
317
318	/* unlimited? */
319	if (old_count == -1)
320		return;
321
322	/* Make sure tracing state is visible before updating count */
323	smp_wmb();
324
325	*count = old_count - 1;
326}
327
328static void
329ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
330{
331	update_traceon_count(data, 1);
332}
333
334static void
335ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
336{
337	update_traceon_count(data, 0);
338}
339
340static void
341ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
 
 
342{
343	if (tracing_is_on())
344		return;
345
346	tracing_on();
347}
348
349static void
350ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
 
 
351{
352	if (!tracing_is_on())
353		return;
354
355	tracing_off();
356}
357
 
 
 
 
 
 
 
 
 
 
358/*
359 * Skip 4:
 
 
360 *   ftrace_stacktrace()
361 *   function_trace_probe_call()
362 *   ftrace_ops_list_func()
363 *   ftrace_call()
364 */
365#define STACK_SKIP 4
 
 
 
 
 
 
 
 
 
 
366
367static void
368ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
 
 
369{
370	trace_dump_stack(STACK_SKIP);
371}
372
373static void
374ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
375{
376	long *count = (long *)data;
 
377	long old_count;
378	long new_count;
379
 
 
 
 
 
 
 
 
 
 
 
380	/*
381	 * Stack traces should only execute the number of times the
382	 * user specified in the counter.
383	 */
384	do {
385
386		if (!tracing_is_on())
387			return;
388
389		old_count = *count;
390
391		if (!old_count)
392			return;
393
394		/* unlimited? */
395		if (old_count == -1) {
396			trace_dump_stack(STACK_SKIP);
397			return;
398		}
399
400		new_count = old_count - 1;
401		new_count = cmpxchg(count, old_count, new_count);
402		if (new_count == old_count)
403			trace_dump_stack(STACK_SKIP);
 
 
 
404
405	} while (new_count != old_count);
406}
407
408static int update_count(void **data)
 
409{
410	unsigned long *count = (long *)data;
 
411
412	if (!*count)
413		return 0;
414
415	if (*count != -1)
 
 
416		(*count)--;
 
417
418	return 1;
419}
420
421static void
422ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 
 
423{
424	if (update_count(data))
425		ftrace_dump(DUMP_ALL);
426}
427
428/* Only dump the current CPU buffer. */
429static void
430ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 
 
431{
432	if (update_count(data))
433		ftrace_dump(DUMP_ORIG);
434}
435
436static int
437ftrace_probe_print(const char *name, struct seq_file *m,
438		   unsigned long ip, void *data)
 
439{
440	long count = (long)data;
 
441
442	seq_printf(m, "%ps:%s", (void *)ip, name);
443
444	if (count == -1)
445		seq_puts(m, ":unlimited\n");
 
 
 
446	else
447		seq_printf(m, ":count=%ld\n", count);
448
449	return 0;
450}
451
452static int
453ftrace_traceon_print(struct seq_file *m, unsigned long ip,
454			 struct ftrace_probe_ops *ops, void *data)
 
455{
456	return ftrace_probe_print("traceon", m, ip, data);
457}
458
459static int
460ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
461			 struct ftrace_probe_ops *ops, void *data)
462{
463	return ftrace_probe_print("traceoff", m, ip, data);
464}
465
466static int
467ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
468			struct ftrace_probe_ops *ops, void *data)
469{
470	return ftrace_probe_print("stacktrace", m, ip, data);
471}
472
473static int
474ftrace_dump_print(struct seq_file *m, unsigned long ip,
475			struct ftrace_probe_ops *ops, void *data)
476{
477	return ftrace_probe_print("dump", m, ip, data);
478}
479
480static int
481ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
482			struct ftrace_probe_ops *ops, void *data)
483{
484	return ftrace_probe_print("cpudump", m, ip, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485}
486
487static struct ftrace_probe_ops traceon_count_probe_ops = {
488	.func			= ftrace_traceon_count,
489	.print			= ftrace_traceon_print,
 
 
490};
491
492static struct ftrace_probe_ops traceoff_count_probe_ops = {
493	.func			= ftrace_traceoff_count,
494	.print			= ftrace_traceoff_print,
 
 
495};
496
497static struct ftrace_probe_ops stacktrace_count_probe_ops = {
498	.func			= ftrace_stacktrace_count,
499	.print			= ftrace_stacktrace_print,
 
 
500};
501
502static struct ftrace_probe_ops dump_probe_ops = {
503	.func			= ftrace_dump_probe,
504	.print			= ftrace_dump_print,
 
 
505};
506
507static struct ftrace_probe_ops cpudump_probe_ops = {
508	.func			= ftrace_cpudump_probe,
509	.print			= ftrace_cpudump_print,
510};
511
512static struct ftrace_probe_ops traceon_probe_ops = {
513	.func			= ftrace_traceon,
514	.print			= ftrace_traceon_print,
515};
516
517static struct ftrace_probe_ops traceoff_probe_ops = {
518	.func			= ftrace_traceoff,
519	.print			= ftrace_traceoff_print,
520};
521
522static struct ftrace_probe_ops stacktrace_probe_ops = {
523	.func			= ftrace_stacktrace,
524	.print			= ftrace_stacktrace_print,
525};
526
527static int
528ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
 
529			    struct ftrace_hash *hash, char *glob,
530			    char *cmd, char *param, int enable)
531{
532	void *count = (void *)-1;
533	char *number;
534	int ret;
535
536	/* hash funcs only work with set_ftrace_filter */
537	if (!enable)
538		return -EINVAL;
539
540	if (glob[0] == '!') {
541		unregister_ftrace_function_probe_func(glob+1, ops);
542		return 0;
543	}
544
545	if (!param)
546		goto out_reg;
547
548	number = strsep(&param, ":");
549
550	if (!strlen(number))
551		goto out_reg;
552
553	/*
554	 * We use the callback data field (which is a pointer)
555	 * as our counter.
556	 */
557	ret = kstrtoul(number, 0, (unsigned long *)&count);
558	if (ret)
559		return ret;
560
561 out_reg:
562	ret = register_ftrace_function_probe(glob, ops, count);
563
564	return ret < 0 ? ret : 0;
565}
566
567static int
568ftrace_trace_onoff_callback(struct ftrace_hash *hash,
569			    char *glob, char *cmd, char *param, int enable)
570{
571	struct ftrace_probe_ops *ops;
572
 
 
 
573	/* we register both traceon and traceoff to this callback */
574	if (strcmp(cmd, "traceon") == 0)
575		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
576	else
577		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
578
579	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
580					   param, enable);
581}
582
583static int
584ftrace_stacktrace_callback(struct ftrace_hash *hash,
585			   char *glob, char *cmd, char *param, int enable)
586{
587	struct ftrace_probe_ops *ops;
588
 
 
 
589	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
590
591	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
592					   param, enable);
593}
594
595static int
596ftrace_dump_callback(struct ftrace_hash *hash,
597			   char *glob, char *cmd, char *param, int enable)
598{
599	struct ftrace_probe_ops *ops;
600
 
 
 
601	ops = &dump_probe_ops;
602
603	/* Only dump once. */
604	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
605					   "1", enable);
606}
607
608static int
609ftrace_cpudump_callback(struct ftrace_hash *hash,
610			   char *glob, char *cmd, char *param, int enable)
611{
612	struct ftrace_probe_ops *ops;
613
 
 
 
614	ops = &cpudump_probe_ops;
615
616	/* Only dump once. */
617	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
618					   "1", enable);
619}
620
621static struct ftrace_func_command ftrace_traceon_cmd = {
622	.name			= "traceon",
623	.func			= ftrace_trace_onoff_callback,
624};
625
626static struct ftrace_func_command ftrace_traceoff_cmd = {
627	.name			= "traceoff",
628	.func			= ftrace_trace_onoff_callback,
629};
630
631static struct ftrace_func_command ftrace_stacktrace_cmd = {
632	.name			= "stacktrace",
633	.func			= ftrace_stacktrace_callback,
634};
635
636static struct ftrace_func_command ftrace_dump_cmd = {
637	.name			= "dump",
638	.func			= ftrace_dump_callback,
639};
640
641static struct ftrace_func_command ftrace_cpudump_cmd = {
642	.name			= "cpudump",
643	.func			= ftrace_cpudump_callback,
644};
645
646static int __init init_func_cmd_traceon(void)
647{
648	int ret;
649
650	ret = register_ftrace_command(&ftrace_traceoff_cmd);
651	if (ret)
652		return ret;
653
654	ret = register_ftrace_command(&ftrace_traceon_cmd);
655	if (ret)
656		goto out_free_traceoff;
657
658	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
659	if (ret)
660		goto out_free_traceon;
661
662	ret = register_ftrace_command(&ftrace_dump_cmd);
663	if (ret)
664		goto out_free_stacktrace;
665
666	ret = register_ftrace_command(&ftrace_cpudump_cmd);
667	if (ret)
668		goto out_free_dump;
669
670	return 0;
671
672 out_free_dump:
673	unregister_ftrace_command(&ftrace_dump_cmd);
674 out_free_stacktrace:
675	unregister_ftrace_command(&ftrace_stacktrace_cmd);
676 out_free_traceon:
677	unregister_ftrace_command(&ftrace_traceon_cmd);
678 out_free_traceoff:
679	unregister_ftrace_command(&ftrace_traceoff_cmd);
680
681	return ret;
682}
683#else
684static inline int init_func_cmd_traceon(void)
685{
686	return 0;
687}
688#endif /* CONFIG_DYNAMIC_FTRACE */
689
690static __init int init_function_trace(void)
691{
692	init_func_cmd_traceon();
693	return register_tracer(&function_trace);
694}
695core_initcall(init_function_trace);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ring buffer based function tracer
   4 *
   5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   7 *
   8 * Based on code from the latency_tracer, that is:
   9 *
  10 *  Copyright (C) 2004-2006 Ingo Molnar
  11 *  Copyright (C) 2004 Nadia Yvette Chambers
  12 */
  13#include <linux/ring_buffer.h>
  14#include <linux/debugfs.h>
  15#include <linux/uaccess.h>
  16#include <linux/ftrace.h>
  17#include <linux/slab.h>
  18#include <linux/fs.h>
  19
  20#include "trace.h"
  21
  22static void tracing_start_function_trace(struct trace_array *tr);
  23static void tracing_stop_function_trace(struct trace_array *tr);
  24static void
  25function_trace_call(unsigned long ip, unsigned long parent_ip,
  26		    struct ftrace_ops *op, struct ftrace_regs *fregs);
  27static void
  28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  29			  struct ftrace_ops *op, struct ftrace_regs *fregs);
  30static void
  31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  32			       struct ftrace_ops *op, struct ftrace_regs *fregs);
  33static void
  34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  35				     struct ftrace_ops *op,
  36				     struct ftrace_regs *fregs);
  37static struct tracer_flags func_flags;
  38
  39/* Our option */
  40enum {
  41
  42	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
  43	TRACE_FUNC_OPT_STACK		= 0x1,
  44	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
  45
  46	/* Update this to next highest bit. */
  47	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
  48};
  49
  50#define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
  51
  52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
  53{
  54	struct ftrace_ops *ops;
  55
  56	/* The top level array uses the "global_ops" */
  57	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  58		return 0;
  59
  60	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  61	if (!ops)
  62		return -ENOMEM;
  63
  64	/* Currently only the non stack version is supported */
  65	ops->func = function_trace_call;
  66	ops->flags = FTRACE_OPS_FL_PID;
  67
  68	tr->ops = ops;
  69	ops->private = tr;
  70
  71	return 0;
  72}
  73
  74void ftrace_free_ftrace_ops(struct trace_array *tr)
  75{
  76	kfree(tr->ops);
  77	tr->ops = NULL;
  78}
  79
  80int ftrace_create_function_files(struct trace_array *tr,
  81				 struct dentry *parent)
  82{
  83	int ret;
 
  84	/*
  85	 * The top level array uses the "global_ops", and the files are
  86	 * created on boot up.
  87	 */
  88	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  89		return 0;
  90
  91	if (!tr->ops)
  92		return -EINVAL;
  93
  94	ret = allocate_fgraph_ops(tr, tr->ops);
  95	if (ret) {
  96		kfree(tr->ops);
  97		return ret;
  98	}
  99
 100	ftrace_create_filter_files(tr->ops, parent);
 101
 102	return 0;
 103}
 104
 105void ftrace_destroy_function_files(struct trace_array *tr)
 106{
 107	ftrace_destroy_filter_files(tr->ops);
 108	ftrace_free_ftrace_ops(tr);
 109	free_fgraph_ops(tr);
 110}
 111
 112static ftrace_func_t select_trace_function(u32 flags_val)
 113{
 114	switch (flags_val & TRACE_FUNC_OPT_MASK) {
 115	case TRACE_FUNC_NO_OPTS:
 116		return function_trace_call;
 117	case TRACE_FUNC_OPT_STACK:
 118		return function_stack_trace_call;
 119	case TRACE_FUNC_OPT_NO_REPEATS:
 120		return function_no_repeats_trace_call;
 121	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
 122		return function_stack_no_repeats_trace_call;
 123	default:
 124		return NULL;
 125	}
 126}
 127
 128static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
 129{
 130	if (!tr->last_func_repeats &&
 131	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
 132		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
 133		if (!tr->last_func_repeats)
 134			return false;
 135	}
 136
 137	return true;
 138}
 139
 140static int function_trace_init(struct trace_array *tr)
 141{
 142	ftrace_func_t func;
 
 143	/*
 144	 * Instance trace_arrays get their ops allocated
 145	 * at instance creation. Unless it failed
 146	 * the allocation.
 147	 */
 148	if (!tr->ops)
 149		return -ENOMEM;
 150
 151	func = select_trace_function(func_flags.val);
 152	if (!func)
 153		return -EINVAL;
 154
 155	if (!handle_func_repeats(tr, func_flags.val))
 156		return -ENOMEM;
 157
 158	ftrace_init_array_ops(tr, func);
 159
 160	tr->array_buffer.cpu = raw_smp_processor_id();
 
 161
 162	tracing_start_cmdline_record();
 163	tracing_start_function_trace(tr);
 164	return 0;
 165}
 166
 167static void function_trace_reset(struct trace_array *tr)
 168{
 169	tracing_stop_function_trace(tr);
 170	tracing_stop_cmdline_record();
 171	ftrace_reset_array_ops(tr);
 172}
 173
 174static void function_trace_start(struct trace_array *tr)
 175{
 176	tracing_reset_online_cpus(&tr->array_buffer);
 177}
 178
 179/* fregs are guaranteed not to be NULL if HAVE_DYNAMIC_FTRACE_WITH_ARGS is set */
 180#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
 181static __always_inline unsigned long
 182function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
 183{
 184	unsigned long true_parent_ip;
 185	int idx = 0;
 186
 187	true_parent_ip = parent_ip;
 188	if (unlikely(parent_ip == (unsigned long)&return_to_handler) && fregs)
 189		true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip,
 190				(unsigned long *)ftrace_regs_get_stack_pointer(fregs));
 191	return true_parent_ip;
 192}
 193#else
 194static __always_inline unsigned long
 195function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
 196{
 197	return parent_ip;
 198}
 199#endif
 200
 201static void
 202function_trace_call(unsigned long ip, unsigned long parent_ip,
 203		    struct ftrace_ops *op, struct ftrace_regs *fregs)
 204{
 205	struct trace_array *tr = op->private;
 206	struct trace_array_cpu *data;
 207	unsigned int trace_ctx;
 208	int bit;
 209
 210	if (unlikely(!tr->function_enabled))
 211		return;
 212
 213	bit = ftrace_test_recursion_trylock(ip, parent_ip);
 214	if (bit < 0)
 215		return;
 216
 217	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
 218
 219	trace_ctx = tracing_gen_ctx_dec();
 220
 221	data = this_cpu_ptr(tr->array_buffer.data);
 222	if (!atomic_read(&data->disabled))
 223		trace_function(tr, ip, parent_ip, trace_ctx);
 224
 225	ftrace_test_recursion_unlock(bit);
 226}
 227
 228#ifdef CONFIG_UNWINDER_ORC
 229/*
 230 * Skip 2:
 231 *
 232 *   function_stack_trace_call()
 233 *   ftrace_call()
 234 */
 235#define STACK_SKIP 2
 236#else
 237/*
 238 * Skip 3:
 239 *   __trace_stack()
 240 *   function_stack_trace_call()
 241 *   ftrace_call()
 242 */
 243#define STACK_SKIP 3
 244#endif
 245
 246static void
 247function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 248			  struct ftrace_ops *op, struct ftrace_regs *fregs)
 249{
 250	struct trace_array *tr = op->private;
 251	struct trace_array_cpu *data;
 252	unsigned long flags;
 253	long disabled;
 254	int cpu;
 255	unsigned int trace_ctx;
 256	int skip = STACK_SKIP;
 257
 258	if (unlikely(!tr->function_enabled))
 259		return;
 260
 261	/*
 262	 * Need to use raw, since this must be called before the
 263	 * recursive protection is performed.
 264	 */
 265	local_irq_save(flags);
 266	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
 267	cpu = raw_smp_processor_id();
 268	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 269	disabled = atomic_inc_return(&data->disabled);
 270
 271	if (likely(disabled == 1)) {
 272		trace_ctx = tracing_gen_ctx_flags(flags);
 273		trace_function(tr, ip, parent_ip, trace_ctx);
 274#ifdef CONFIG_UNWINDER_FRAME_POINTER
 275		if (ftrace_pids_enabled(op))
 276			skip++;
 277#endif
 278		__trace_stack(tr, trace_ctx, skip);
 279	}
 280
 281	atomic_dec(&data->disabled);
 282	local_irq_restore(flags);
 283}
 284
 285static inline bool is_repeat_check(struct trace_array *tr,
 286				   struct trace_func_repeats *last_info,
 287				   unsigned long ip, unsigned long parent_ip)
 288{
 289	if (last_info->ip == ip &&
 290	    last_info->parent_ip == parent_ip &&
 291	    last_info->count < U16_MAX) {
 292		last_info->ts_last_call =
 293			ring_buffer_time_stamp(tr->array_buffer.buffer);
 294		last_info->count++;
 295		return true;
 296	}
 297
 298	return false;
 299}
 300
 301static inline void process_repeats(struct trace_array *tr,
 302				   unsigned long ip, unsigned long parent_ip,
 303				   struct trace_func_repeats *last_info,
 304				   unsigned int trace_ctx)
 305{
 306	if (last_info->count) {
 307		trace_last_func_repeats(tr, last_info, trace_ctx);
 308		last_info->count = 0;
 309	}
 310
 311	last_info->ip = ip;
 312	last_info->parent_ip = parent_ip;
 313}
 314
 315static void
 316function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 317			       struct ftrace_ops *op,
 318			       struct ftrace_regs *fregs)
 319{
 320	struct trace_func_repeats *last_info;
 321	struct trace_array *tr = op->private;
 322	struct trace_array_cpu *data;
 323	unsigned int trace_ctx;
 324	int bit;
 325
 326	if (unlikely(!tr->function_enabled))
 327		return;
 328
 329	bit = ftrace_test_recursion_trylock(ip, parent_ip);
 330	if (bit < 0)
 331		return;
 332
 333	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
 334	data = this_cpu_ptr(tr->array_buffer.data);
 335	if (atomic_read(&data->disabled))
 336		goto out;
 337
 338	/*
 339	 * An interrupt may happen at any place here. But as far as I can see,
 340	 * the only damage that this can cause is to mess up the repetition
 341	 * counter without valuable data being lost.
 342	 * TODO: think about a solution that is better than just hoping to be
 343	 * lucky.
 344	 */
 345	last_info = this_cpu_ptr(tr->last_func_repeats);
 346	if (is_repeat_check(tr, last_info, ip, parent_ip))
 347		goto out;
 348
 349	trace_ctx = tracing_gen_ctx_dec();
 350	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
 351
 352	trace_function(tr, ip, parent_ip, trace_ctx);
 353
 354out:
 355	ftrace_test_recursion_unlock(bit);
 356}
 357
 358static void
 359function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 360				     struct ftrace_ops *op,
 361				     struct ftrace_regs *fregs)
 362{
 363	struct trace_func_repeats *last_info;
 364	struct trace_array *tr = op->private;
 365	struct trace_array_cpu *data;
 366	unsigned long flags;
 367	long disabled;
 368	int cpu;
 369	unsigned int trace_ctx;
 370
 371	if (unlikely(!tr->function_enabled))
 372		return;
 373
 374	/*
 375	 * Need to use raw, since this must be called before the
 376	 * recursive protection is performed.
 377	 */
 378	local_irq_save(flags);
 379	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
 380	cpu = raw_smp_processor_id();
 381	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 382	disabled = atomic_inc_return(&data->disabled);
 383
 384	if (likely(disabled == 1)) {
 385		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
 386		if (is_repeat_check(tr, last_info, ip, parent_ip))
 387			goto out;
 388
 389		trace_ctx = tracing_gen_ctx_flags(flags);
 390		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
 391
 392		trace_function(tr, ip, parent_ip, trace_ctx);
 393		__trace_stack(tr, trace_ctx, STACK_SKIP);
 
 
 394	}
 395
 396 out:
 397	atomic_dec(&data->disabled);
 398	local_irq_restore(flags);
 399}
 400
 401static struct tracer_opt func_opts[] = {
 402#ifdef CONFIG_STACKTRACE
 403	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
 404#endif
 405	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
 406	{ } /* Always set a last empty entry */
 407};
 408
 409static struct tracer_flags func_flags = {
 410	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
 411	.opts = func_opts
 412};
 413
 414static void tracing_start_function_trace(struct trace_array *tr)
 415{
 416	tr->function_enabled = 0;
 417	register_ftrace_function(tr->ops);
 418	tr->function_enabled = 1;
 419}
 420
 421static void tracing_stop_function_trace(struct trace_array *tr)
 422{
 423	tr->function_enabled = 0;
 424	unregister_ftrace_function(tr->ops);
 425}
 426
 427static struct tracer function_trace;
 428
 429static int
 430func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 431{
 432	ftrace_func_t func;
 433	u32 new_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 434
 435	/* Do nothing if already set. */
 436	if (!!set == !!(func_flags.val & bit))
 437		return 0;
 438
 439	/* We can change this flag only when not running. */
 440	if (tr->current_trace != &function_trace)
 441		return 0;
 442
 443	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
 444	func = select_trace_function(new_flags);
 445	if (!func)
 446		return -EINVAL;
 447
 448	/* Check if there's anything to change. */
 449	if (tr->ops->func == func)
 450		return 0;
 451
 452	if (!handle_func_repeats(tr, new_flags))
 453		return -ENOMEM;
 454
 455	unregister_ftrace_function(tr->ops);
 456	tr->ops->func = func;
 457	register_ftrace_function(tr->ops);
 458
 459	return 0;
 460}
 461
 462static struct tracer function_trace __tracer_data =
 463{
 464	.name		= "function",
 465	.init		= function_trace_init,
 466	.reset		= function_trace_reset,
 467	.start		= function_trace_start,
 468	.flags		= &func_flags,
 469	.set_flag	= func_set_flag,
 470	.allow_instances = true,
 471#ifdef CONFIG_FTRACE_SELFTEST
 472	.selftest	= trace_selftest_startup_function,
 473#endif
 474};
 475
 476#ifdef CONFIG_DYNAMIC_FTRACE
 477static void update_traceon_count(struct ftrace_probe_ops *ops,
 478				 unsigned long ip,
 479				 struct trace_array *tr, bool on,
 480				 void *data)
 481{
 482	struct ftrace_func_mapper *mapper = data;
 483	long *count;
 484	long old_count;
 485
 486	/*
 487	 * Tracing gets disabled (or enabled) once per count.
 488	 * This function can be called at the same time on multiple CPUs.
 489	 * It is fine if both disable (or enable) tracing, as disabling
 490	 * (or enabling) the second time doesn't do anything as the
 491	 * state of the tracer is already disabled (or enabled).
 492	 * What needs to be synchronized in this case is that the count
 493	 * only gets decremented once, even if the tracer is disabled
 494	 * (or enabled) twice, as the second one is really a nop.
 495	 *
 496	 * The memory barriers guarantee that we only decrement the
 497	 * counter once. First the count is read to a local variable
 498	 * and a read barrier is used to make sure that it is loaded
 499	 * before checking if the tracer is in the state we want.
 500	 * If the tracer is not in the state we want, then the count
 501	 * is guaranteed to be the old count.
 502	 *
 503	 * Next the tracer is set to the state we want (disabled or enabled)
 504	 * then a write memory barrier is used to make sure that
 505	 * the new state is visible before changing the counter by
 506	 * one minus the old counter. This guarantees that another CPU
 507	 * executing this code will see the new state before seeing
 508	 * the new counter value, and would not do anything if the new
 509	 * counter is seen.
 510	 *
 511	 * Note, there is no synchronization between this and a user
 512	 * setting the tracing_on file. But we currently don't care
 513	 * about that.
 514	 */
 515	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 516	old_count = *count;
 517
 518	if (old_count <= 0)
 519		return;
 520
 521	/* Make sure we see count before checking tracing state */
 522	smp_rmb();
 523
 524	if (on == !!tracer_tracing_is_on(tr))
 525		return;
 526
 527	if (on)
 528		tracer_tracing_on(tr);
 529	else
 530		tracer_tracing_off(tr);
 
 
 
 
 531
 532	/* Make sure tracing state is visible before updating count */
 533	smp_wmb();
 534
 535	*count = old_count - 1;
 536}
 537
 538static void
 539ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
 540		     struct trace_array *tr, struct ftrace_probe_ops *ops,
 541		     void *data)
 542{
 543	update_traceon_count(ops, ip, tr, 1, data);
 544}
 545
 546static void
 547ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
 548		      struct trace_array *tr, struct ftrace_probe_ops *ops,
 549		      void *data)
 550{
 551	update_traceon_count(ops, ip, tr, 0, data);
 552}
 553
 554static void
 555ftrace_traceon(unsigned long ip, unsigned long parent_ip,
 556	       struct trace_array *tr, struct ftrace_probe_ops *ops,
 557	       void *data)
 558{
 559	if (tracer_tracing_is_on(tr))
 560		return;
 561
 562	tracer_tracing_on(tr);
 563}
 564
 565static void
 566ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
 567		struct trace_array *tr, struct ftrace_probe_ops *ops,
 568		void *data)
 569{
 570	if (!tracer_tracing_is_on(tr))
 571		return;
 572
 573	tracer_tracing_off(tr);
 574}
 575
 576#ifdef CONFIG_UNWINDER_ORC
 577/*
 578 * Skip 3:
 579 *
 580 *   function_trace_probe_call()
 581 *   ftrace_ops_assist_func()
 582 *   ftrace_call()
 583 */
 584#define FTRACE_STACK_SKIP 3
 585#else
 586/*
 587 * Skip 5:
 588 *
 589 *   __trace_stack()
 590 *   ftrace_stacktrace()
 591 *   function_trace_probe_call()
 592 *   ftrace_ops_assist_func()
 593 *   ftrace_call()
 594 */
 595#define FTRACE_STACK_SKIP 5
 596#endif
 597
 598static __always_inline void trace_stack(struct trace_array *tr)
 599{
 600	unsigned int trace_ctx;
 601
 602	trace_ctx = tracing_gen_ctx();
 603
 604	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
 605}
 606
 607static void
 608ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
 609		  struct trace_array *tr, struct ftrace_probe_ops *ops,
 610		  void *data)
 611{
 612	trace_stack(tr);
 613}
 614
 615static void
 616ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
 617			struct trace_array *tr, struct ftrace_probe_ops *ops,
 618			void *data)
 619{
 620	struct ftrace_func_mapper *mapper = data;
 621	long *count;
 622	long old_count;
 623	long new_count;
 624
 625	if (!tracing_is_on())
 626		return;
 627
 628	/* unlimited? */
 629	if (!mapper) {
 630		trace_stack(tr);
 631		return;
 632	}
 633
 634	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 635
 636	/*
 637	 * Stack traces should only execute the number of times the
 638	 * user specified in the counter.
 639	 */
 640	do {
 
 
 
 
 641		old_count = *count;
 642
 643		if (!old_count)
 644			return;
 645
 
 
 
 
 
 
 646		new_count = old_count - 1;
 647		new_count = cmpxchg(count, old_count, new_count);
 648		if (new_count == old_count)
 649			trace_stack(tr);
 650
 651		if (!tracing_is_on())
 652			return;
 653
 654	} while (new_count != old_count);
 655}
 656
 657static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
 658			void *data)
 659{
 660	struct ftrace_func_mapper *mapper = data;
 661	long *count = NULL;
 662
 663	if (mapper)
 664		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 665
 666	if (count) {
 667		if (*count <= 0)
 668			return 0;
 669		(*count)--;
 670	}
 671
 672	return 1;
 673}
 674
 675static void
 676ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
 677		  struct trace_array *tr, struct ftrace_probe_ops *ops,
 678		  void *data)
 679{
 680	if (update_count(ops, ip, data))
 681		ftrace_dump(DUMP_ALL);
 682}
 683
 684/* Only dump the current CPU buffer. */
 685static void
 686ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
 687		     struct trace_array *tr, struct ftrace_probe_ops *ops,
 688		     void *data)
 689{
 690	if (update_count(ops, ip, data))
 691		ftrace_dump(DUMP_ORIG);
 692}
 693
 694static int
 695ftrace_probe_print(const char *name, struct seq_file *m,
 696		   unsigned long ip, struct ftrace_probe_ops *ops,
 697		   void *data)
 698{
 699	struct ftrace_func_mapper *mapper = data;
 700	long *count = NULL;
 701
 702	seq_printf(m, "%ps:%s", (void *)ip, name);
 703
 704	if (mapper)
 705		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 706
 707	if (count)
 708		seq_printf(m, ":count=%ld\n", *count);
 709	else
 710		seq_puts(m, ":unlimited\n");
 711
 712	return 0;
 713}
 714
 715static int
 716ftrace_traceon_print(struct seq_file *m, unsigned long ip,
 717		     struct ftrace_probe_ops *ops,
 718		     void *data)
 719{
 720	return ftrace_probe_print("traceon", m, ip, ops, data);
 721}
 722
 723static int
 724ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
 725			 struct ftrace_probe_ops *ops, void *data)
 726{
 727	return ftrace_probe_print("traceoff", m, ip, ops, data);
 728}
 729
 730static int
 731ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
 732			struct ftrace_probe_ops *ops, void *data)
 733{
 734	return ftrace_probe_print("stacktrace", m, ip, ops, data);
 735}
 736
 737static int
 738ftrace_dump_print(struct seq_file *m, unsigned long ip,
 739			struct ftrace_probe_ops *ops, void *data)
 740{
 741	return ftrace_probe_print("dump", m, ip, ops, data);
 742}
 743
 744static int
 745ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
 746			struct ftrace_probe_ops *ops, void *data)
 747{
 748	return ftrace_probe_print("cpudump", m, ip, ops, data);
 749}
 750
 751
 752static int
 753ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
 754		  unsigned long ip, void *init_data, void **data)
 755{
 756	struct ftrace_func_mapper *mapper = *data;
 757
 758	if (!mapper) {
 759		mapper = allocate_ftrace_func_mapper();
 760		if (!mapper)
 761			return -ENOMEM;
 762		*data = mapper;
 763	}
 764
 765	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
 766}
 767
 768static void
 769ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
 770		  unsigned long ip, void *data)
 771{
 772	struct ftrace_func_mapper *mapper = data;
 773
 774	if (!ip) {
 775		free_ftrace_func_mapper(mapper, NULL);
 776		return;
 777	}
 778
 779	ftrace_func_mapper_remove_ip(mapper, ip);
 780}
 781
 782static struct ftrace_probe_ops traceon_count_probe_ops = {
 783	.func			= ftrace_traceon_count,
 784	.print			= ftrace_traceon_print,
 785	.init			= ftrace_count_init,
 786	.free			= ftrace_count_free,
 787};
 788
 789static struct ftrace_probe_ops traceoff_count_probe_ops = {
 790	.func			= ftrace_traceoff_count,
 791	.print			= ftrace_traceoff_print,
 792	.init			= ftrace_count_init,
 793	.free			= ftrace_count_free,
 794};
 795
 796static struct ftrace_probe_ops stacktrace_count_probe_ops = {
 797	.func			= ftrace_stacktrace_count,
 798	.print			= ftrace_stacktrace_print,
 799	.init			= ftrace_count_init,
 800	.free			= ftrace_count_free,
 801};
 802
 803static struct ftrace_probe_ops dump_probe_ops = {
 804	.func			= ftrace_dump_probe,
 805	.print			= ftrace_dump_print,
 806	.init			= ftrace_count_init,
 807	.free			= ftrace_count_free,
 808};
 809
 810static struct ftrace_probe_ops cpudump_probe_ops = {
 811	.func			= ftrace_cpudump_probe,
 812	.print			= ftrace_cpudump_print,
 813};
 814
 815static struct ftrace_probe_ops traceon_probe_ops = {
 816	.func			= ftrace_traceon,
 817	.print			= ftrace_traceon_print,
 818};
 819
 820static struct ftrace_probe_ops traceoff_probe_ops = {
 821	.func			= ftrace_traceoff,
 822	.print			= ftrace_traceoff_print,
 823};
 824
 825static struct ftrace_probe_ops stacktrace_probe_ops = {
 826	.func			= ftrace_stacktrace,
 827	.print			= ftrace_stacktrace_print,
 828};
 829
 830static int
 831ftrace_trace_probe_callback(struct trace_array *tr,
 832			    struct ftrace_probe_ops *ops,
 833			    struct ftrace_hash *hash, char *glob,
 834			    char *cmd, char *param, int enable)
 835{
 836	void *count = (void *)-1;
 837	char *number;
 838	int ret;
 839
 840	/* hash funcs only work with set_ftrace_filter */
 841	if (!enable)
 842		return -EINVAL;
 843
 844	if (glob[0] == '!')
 845		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
 
 
 846
 847	if (!param)
 848		goto out_reg;
 849
 850	number = strsep(&param, ":");
 851
 852	if (!strlen(number))
 853		goto out_reg;
 854
 855	/*
 856	 * We use the callback data field (which is a pointer)
 857	 * as our counter.
 858	 */
 859	ret = kstrtoul(number, 0, (unsigned long *)&count);
 860	if (ret)
 861		return ret;
 862
 863 out_reg:
 864	ret = register_ftrace_function_probe(glob, tr, ops, count);
 865
 866	return ret < 0 ? ret : 0;
 867}
 868
 869static int
 870ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
 871			    char *glob, char *cmd, char *param, int enable)
 872{
 873	struct ftrace_probe_ops *ops;
 874
 875	if (!tr)
 876		return -ENODEV;
 877
 878	/* we register both traceon and traceoff to this callback */
 879	if (strcmp(cmd, "traceon") == 0)
 880		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
 881	else
 882		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
 883
 884	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
 885					   param, enable);
 886}
 887
 888static int
 889ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
 890			   char *glob, char *cmd, char *param, int enable)
 891{
 892	struct ftrace_probe_ops *ops;
 893
 894	if (!tr)
 895		return -ENODEV;
 896
 897	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
 898
 899	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
 900					   param, enable);
 901}
 902
 903static int
 904ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
 905			   char *glob, char *cmd, char *param, int enable)
 906{
 907	struct ftrace_probe_ops *ops;
 908
 909	if (!tr)
 910		return -ENODEV;
 911
 912	ops = &dump_probe_ops;
 913
 914	/* Only dump once. */
 915	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
 916					   "1", enable);
 917}
 918
 919static int
 920ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
 921			   char *glob, char *cmd, char *param, int enable)
 922{
 923	struct ftrace_probe_ops *ops;
 924
 925	if (!tr)
 926		return -ENODEV;
 927
 928	ops = &cpudump_probe_ops;
 929
 930	/* Only dump once. */
 931	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
 932					   "1", enable);
 933}
 934
 935static struct ftrace_func_command ftrace_traceon_cmd = {
 936	.name			= "traceon",
 937	.func			= ftrace_trace_onoff_callback,
 938};
 939
 940static struct ftrace_func_command ftrace_traceoff_cmd = {
 941	.name			= "traceoff",
 942	.func			= ftrace_trace_onoff_callback,
 943};
 944
 945static struct ftrace_func_command ftrace_stacktrace_cmd = {
 946	.name			= "stacktrace",
 947	.func			= ftrace_stacktrace_callback,
 948};
 949
 950static struct ftrace_func_command ftrace_dump_cmd = {
 951	.name			= "dump",
 952	.func			= ftrace_dump_callback,
 953};
 954
 955static struct ftrace_func_command ftrace_cpudump_cmd = {
 956	.name			= "cpudump",
 957	.func			= ftrace_cpudump_callback,
 958};
 959
 960static int __init init_func_cmd_traceon(void)
 961{
 962	int ret;
 963
 964	ret = register_ftrace_command(&ftrace_traceoff_cmd);
 965	if (ret)
 966		return ret;
 967
 968	ret = register_ftrace_command(&ftrace_traceon_cmd);
 969	if (ret)
 970		goto out_free_traceoff;
 971
 972	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
 973	if (ret)
 974		goto out_free_traceon;
 975
 976	ret = register_ftrace_command(&ftrace_dump_cmd);
 977	if (ret)
 978		goto out_free_stacktrace;
 979
 980	ret = register_ftrace_command(&ftrace_cpudump_cmd);
 981	if (ret)
 982		goto out_free_dump;
 983
 984	return 0;
 985
 986 out_free_dump:
 987	unregister_ftrace_command(&ftrace_dump_cmd);
 988 out_free_stacktrace:
 989	unregister_ftrace_command(&ftrace_stacktrace_cmd);
 990 out_free_traceon:
 991	unregister_ftrace_command(&ftrace_traceon_cmd);
 992 out_free_traceoff:
 993	unregister_ftrace_command(&ftrace_traceoff_cmd);
 994
 995	return ret;
 996}
 997#else
 998static inline int init_func_cmd_traceon(void)
 999{
1000	return 0;
1001}
1002#endif /* CONFIG_DYNAMIC_FTRACE */
1003
1004__init int init_function_trace(void)
1005{
1006	init_func_cmd_traceon();
1007	return register_tracer(&function_trace);
1008}