Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * ring buffer based function tracer
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6 *
  7 * Based on code from the latency_tracer, that is:
  8 *
  9 *  Copyright (C) 2004-2006 Ingo Molnar
 10 *  Copyright (C) 2004 Nadia Yvette Chambers
 11 */
 12#include <linux/ring_buffer.h>
 13#include <linux/debugfs.h>
 14#include <linux/uaccess.h>
 15#include <linux/ftrace.h>
 16#include <linux/slab.h>
 17#include <linux/fs.h>
 18
 19#include "trace.h"
 20
 21static void tracing_start_function_trace(struct trace_array *tr);
 22static void tracing_stop_function_trace(struct trace_array *tr);
 23static void
 24function_trace_call(unsigned long ip, unsigned long parent_ip,
 25		    struct ftrace_ops *op, struct pt_regs *pt_regs);
 26static void
 27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 28			  struct ftrace_ops *op, struct pt_regs *pt_regs);
 29static struct ftrace_ops trace_ops;
 30static struct ftrace_ops trace_stack_ops;
 
 
 
 
 
 31static struct tracer_flags func_flags;
 32
 33/* Our option */
 34enum {
 35	TRACE_FUNC_OPT_STACK	= 0x1,
 
 
 
 
 
 
 36};
 37
 38static int allocate_ftrace_ops(struct trace_array *tr)
 
 
 39{
 40	struct ftrace_ops *ops;
 41
 
 
 
 
 42	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 43	if (!ops)
 44		return -ENOMEM;
 45
 46	/* Currently only the non stack verision is supported */
 47	ops->func = function_trace_call;
 48	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
 49
 50	tr->ops = ops;
 51	ops->private = tr;
 
 52	return 0;
 53}
 54
 
 
 
 
 
 55
 56int ftrace_create_function_files(struct trace_array *tr,
 57				 struct dentry *parent)
 58{
 59	int ret;
 60
 61	/*
 62	 * The top level array uses the "global_ops", and the files are
 63	 * created on boot up.
 64	 */
 65	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 66		return 0;
 67
 68	ret = allocate_ftrace_ops(tr);
 69	if (ret)
 
 
 
 
 70		return ret;
 
 71
 72	ftrace_create_filter_files(tr->ops, parent);
 73
 74	return 0;
 75}
 76
 77void ftrace_destroy_function_files(struct trace_array *tr)
 78{
 79	ftrace_destroy_filter_files(tr->ops);
 80	kfree(tr->ops);
 81	tr->ops = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82}
 83
 84static int function_trace_init(struct trace_array *tr)
 85{
 86	struct ftrace_ops *ops;
 
 
 
 
 
 
 
 
 
 
 
 87
 88	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
 89		/* There's only one global tr */
 90		if (!trace_ops.private) {
 91			trace_ops.private = tr;
 92			trace_stack_ops.private = tr;
 93		}
 94
 95		if (func_flags.val & TRACE_FUNC_OPT_STACK)
 96			ops = &trace_stack_ops;
 97		else
 98			ops = &trace_ops;
 99		tr->ops = ops;
100	} else if (!tr->ops) {
101		/*
102		 * Instance trace_arrays get their ops allocated
103		 * at instance creation. Unless it failed
104		 * the allocation.
105		 */
106		return -ENOMEM;
107	}
108
109	tr->trace_buffer.cpu = get_cpu();
110	put_cpu();
 
111
112	tracing_start_cmdline_record();
113	tracing_start_function_trace(tr);
114	return 0;
115}
116
117static void function_trace_reset(struct trace_array *tr)
118{
119	tracing_stop_function_trace(tr);
120	tracing_stop_cmdline_record();
 
121}
122
123static void function_trace_start(struct trace_array *tr)
124{
125	tracing_reset_online_cpus(&tr->trace_buffer);
126}
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128static void
129function_trace_call(unsigned long ip, unsigned long parent_ip,
130		    struct ftrace_ops *op, struct pt_regs *pt_regs)
131{
132	struct trace_array *tr = op->private;
133	struct trace_array_cpu *data;
134	unsigned long flags;
135	int bit;
136	int cpu;
137	int pc;
138
139	if (unlikely(!tr->function_enabled))
140		return;
141
142	pc = preempt_count();
143	preempt_disable_notrace();
144
145	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
146	if (bit < 0)
147		goto out;
148
149	cpu = smp_processor_id();
150	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
151	if (!atomic_read(&data->disabled)) {
152		local_save_flags(flags);
153		trace_function(tr, ip, parent_ip, flags, pc);
154	}
155	trace_clear_recursion(bit);
156
157 out:
158	preempt_enable_notrace();
 
 
 
 
 
159}
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161static void
162function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
163			  struct ftrace_ops *op, struct pt_regs *pt_regs)
164{
165	struct trace_array *tr = op->private;
166	struct trace_array_cpu *data;
167	unsigned long flags;
168	long disabled;
169	int cpu;
170	int pc;
 
171
172	if (unlikely(!tr->function_enabled))
173		return;
174
175	/*
176	 * Need to use raw, since this must be called before the
177	 * recursive protection is performed.
178	 */
179	local_irq_save(flags);
 
180	cpu = raw_smp_processor_id();
181	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
182	disabled = atomic_inc_return(&data->disabled);
183
184	if (likely(disabled == 1)) {
185		pc = preempt_count();
186		trace_function(tr, ip, parent_ip, flags, pc);
187		/*
188		 * skip over 5 funcs:
189		 *    __ftrace_trace_stack,
190		 *    __trace_stack,
191		 *    function_stack_trace_call
192		 *    ftrace_list_func
193		 *    ftrace_call
194		 */
195		__trace_stack(tr, flags, 5, pc);
196	}
197
198	atomic_dec(&data->disabled);
199	local_irq_restore(flags);
200}
201
202static struct ftrace_ops trace_ops __read_mostly =
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203{
204	.func = function_trace_call,
205	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
206};
 
207
208static struct ftrace_ops trace_stack_ops __read_mostly =
 
 
 
 
 
 
 
209{
210	.func = function_stack_trace_call,
211	.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
212};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
214static struct tracer_opt func_opts[] = {
215#ifdef CONFIG_STACKTRACE
216	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
217#endif
 
218	{ } /* Always set a last empty entry */
219};
220
221static struct tracer_flags func_flags = {
222	.val = 0, /* By default: all flags disabled */
223	.opts = func_opts
224};
225
226static void tracing_start_function_trace(struct trace_array *tr)
227{
228	tr->function_enabled = 0;
229	register_ftrace_function(tr->ops);
230	tr->function_enabled = 1;
231}
232
233static void tracing_stop_function_trace(struct trace_array *tr)
234{
235	tr->function_enabled = 0;
236	unregister_ftrace_function(tr->ops);
237}
238
 
 
239static int
240func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
241{
242	switch (bit) {
243	case TRACE_FUNC_OPT_STACK:
244		/* do nothing if already set */
245		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
246			break;
247
248		unregister_ftrace_function(tr->ops);
249
250		if (set) {
251			tr->ops = &trace_stack_ops;
252			register_ftrace_function(tr->ops);
253		} else {
254			tr->ops = &trace_ops;
255			register_ftrace_function(tr->ops);
256		}
257
258		break;
259	default:
 
 
 
 
 
 
 
 
 
260		return -EINVAL;
261	}
 
 
 
 
 
 
 
 
 
 
262
263	return 0;
264}
265
266static struct tracer function_trace __tracer_data =
267{
268	.name		= "function",
269	.init		= function_trace_init,
270	.reset		= function_trace_reset,
271	.start		= function_trace_start,
272	.wait_pipe	= poll_wait_pipe,
273	.flags		= &func_flags,
274	.set_flag	= func_set_flag,
275	.allow_instances = true,
276#ifdef CONFIG_FTRACE_SELFTEST
277	.selftest	= trace_selftest_startup_function,
278#endif
279};
280
281#ifdef CONFIG_DYNAMIC_FTRACE
282static int update_count(void **data)
283{
284	unsigned long *count = (long *)data;
 
 
 
 
 
285
286	if (!*count)
287		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
289	if (*count != -1)
290		(*count)--;
291
292	return 1;
293}
294
295static void
296ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
297{
298	if (tracing_is_on())
299		return;
300
301	if (update_count(data))
302		tracing_on();
 
 
 
 
 
 
 
303}
304
305static void
306ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
307{
308	if (!tracing_is_on())
309		return;
310
311	if (update_count(data))
312		tracing_off();
 
 
 
 
313}
314
315static void
316ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
 
 
317{
318	if (tracing_is_on())
319		return;
320
321	tracing_on();
322}
323
324static void
325ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
 
 
326{
327	if (!tracing_is_on())
328		return;
329
330	tracing_off();
331}
332
 
333/*
334 * Skip 4:
 
 
 
 
 
 
 
 
 
 
 
335 *   ftrace_stacktrace()
336 *   function_trace_probe_call()
337 *   ftrace_ops_list_func()
338 *   ftrace_call()
339 */
340#define STACK_SKIP 4
 
 
 
 
 
 
 
 
 
 
341
342static void
343ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
 
 
344{
345	trace_dump_stack(STACK_SKIP);
346}
347
348static void
349ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
350{
 
 
 
 
 
351	if (!tracing_is_on())
352		return;
353
354	if (update_count(data))
355		trace_dump_stack(STACK_SKIP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
356}
357
358static void
359ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 
 
360{
361	if (update_count(data))
362		ftrace_dump(DUMP_ALL);
363}
364
365/* Only dump the current CPU buffer. */
366static void
367ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 
 
368{
369	if (update_count(data))
370		ftrace_dump(DUMP_ORIG);
371}
372
373static int
374ftrace_probe_print(const char *name, struct seq_file *m,
375		   unsigned long ip, void *data)
 
376{
377	long count = (long)data;
 
378
379	seq_printf(m, "%ps:%s", (void *)ip, name);
380
381	if (count == -1)
382		seq_printf(m, ":unlimited\n");
 
 
 
383	else
384		seq_printf(m, ":count=%ld\n", count);
385
386	return 0;
387}
388
389static int
390ftrace_traceon_print(struct seq_file *m, unsigned long ip,
391			 struct ftrace_probe_ops *ops, void *data)
 
392{
393	return ftrace_probe_print("traceon", m, ip, data);
394}
395
396static int
397ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
398			 struct ftrace_probe_ops *ops, void *data)
399{
400	return ftrace_probe_print("traceoff", m, ip, data);
401}
402
403static int
404ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
405			struct ftrace_probe_ops *ops, void *data)
406{
407	return ftrace_probe_print("stacktrace", m, ip, data);
408}
409
410static int
411ftrace_dump_print(struct seq_file *m, unsigned long ip,
412			struct ftrace_probe_ops *ops, void *data)
413{
414	return ftrace_probe_print("dump", m, ip, data);
415}
416
417static int
418ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
419			struct ftrace_probe_ops *ops, void *data)
420{
421	return ftrace_probe_print("cpudump", m, ip, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422}
423
424static struct ftrace_probe_ops traceon_count_probe_ops = {
425	.func			= ftrace_traceon_count,
426	.print			= ftrace_traceon_print,
 
 
427};
428
429static struct ftrace_probe_ops traceoff_count_probe_ops = {
430	.func			= ftrace_traceoff_count,
431	.print			= ftrace_traceoff_print,
 
 
432};
433
434static struct ftrace_probe_ops stacktrace_count_probe_ops = {
435	.func			= ftrace_stacktrace_count,
436	.print			= ftrace_stacktrace_print,
 
 
437};
438
439static struct ftrace_probe_ops dump_probe_ops = {
440	.func			= ftrace_dump_probe,
441	.print			= ftrace_dump_print,
 
 
442};
443
444static struct ftrace_probe_ops cpudump_probe_ops = {
445	.func			= ftrace_cpudump_probe,
446	.print			= ftrace_cpudump_print,
447};
448
449static struct ftrace_probe_ops traceon_probe_ops = {
450	.func			= ftrace_traceon,
451	.print			= ftrace_traceon_print,
452};
453
454static struct ftrace_probe_ops traceoff_probe_ops = {
455	.func			= ftrace_traceoff,
456	.print			= ftrace_traceoff_print,
457};
458
459static struct ftrace_probe_ops stacktrace_probe_ops = {
460	.func			= ftrace_stacktrace,
461	.print			= ftrace_stacktrace_print,
462};
463
464static int
465ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
 
466			    struct ftrace_hash *hash, char *glob,
467			    char *cmd, char *param, int enable)
468{
469	void *count = (void *)-1;
470	char *number;
471	int ret;
472
473	/* hash funcs only work with set_ftrace_filter */
474	if (!enable)
475		return -EINVAL;
476
477	if (glob[0] == '!') {
478		unregister_ftrace_function_probe_func(glob+1, ops);
479		return 0;
480	}
481
482	if (!param)
483		goto out_reg;
484
485	number = strsep(&param, ":");
486
487	if (!strlen(number))
488		goto out_reg;
489
490	/*
491	 * We use the callback data field (which is a pointer)
492	 * as our counter.
493	 */
494	ret = kstrtoul(number, 0, (unsigned long *)&count);
495	if (ret)
496		return ret;
497
498 out_reg:
499	ret = register_ftrace_function_probe(glob, ops, count);
500
501	return ret < 0 ? ret : 0;
502}
503
504static int
505ftrace_trace_onoff_callback(struct ftrace_hash *hash,
506			    char *glob, char *cmd, char *param, int enable)
507{
508	struct ftrace_probe_ops *ops;
509
 
 
 
510	/* we register both traceon and traceoff to this callback */
511	if (strcmp(cmd, "traceon") == 0)
512		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
513	else
514		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
515
516	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
517					   param, enable);
518}
519
520static int
521ftrace_stacktrace_callback(struct ftrace_hash *hash,
522			   char *glob, char *cmd, char *param, int enable)
523{
524	struct ftrace_probe_ops *ops;
525
 
 
 
526	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
527
528	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
529					   param, enable);
530}
531
532static int
533ftrace_dump_callback(struct ftrace_hash *hash,
534			   char *glob, char *cmd, char *param, int enable)
535{
536	struct ftrace_probe_ops *ops;
537
 
 
 
538	ops = &dump_probe_ops;
539
540	/* Only dump once. */
541	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
542					   "1", enable);
543}
544
545static int
546ftrace_cpudump_callback(struct ftrace_hash *hash,
547			   char *glob, char *cmd, char *param, int enable)
548{
549	struct ftrace_probe_ops *ops;
550
 
 
 
551	ops = &cpudump_probe_ops;
552
553	/* Only dump once. */
554	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
555					   "1", enable);
556}
557
558static struct ftrace_func_command ftrace_traceon_cmd = {
559	.name			= "traceon",
560	.func			= ftrace_trace_onoff_callback,
561};
562
563static struct ftrace_func_command ftrace_traceoff_cmd = {
564	.name			= "traceoff",
565	.func			= ftrace_trace_onoff_callback,
566};
567
568static struct ftrace_func_command ftrace_stacktrace_cmd = {
569	.name			= "stacktrace",
570	.func			= ftrace_stacktrace_callback,
571};
572
573static struct ftrace_func_command ftrace_dump_cmd = {
574	.name			= "dump",
575	.func			= ftrace_dump_callback,
576};
577
578static struct ftrace_func_command ftrace_cpudump_cmd = {
579	.name			= "cpudump",
580	.func			= ftrace_cpudump_callback,
581};
582
583static int __init init_func_cmd_traceon(void)
584{
585	int ret;
586
587	ret = register_ftrace_command(&ftrace_traceoff_cmd);
588	if (ret)
589		return ret;
590
591	ret = register_ftrace_command(&ftrace_traceon_cmd);
592	if (ret)
593		goto out_free_traceoff;
594
595	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
596	if (ret)
597		goto out_free_traceon;
598
599	ret = register_ftrace_command(&ftrace_dump_cmd);
600	if (ret)
601		goto out_free_stacktrace;
602
603	ret = register_ftrace_command(&ftrace_cpudump_cmd);
604	if (ret)
605		goto out_free_dump;
606
607	return 0;
608
609 out_free_dump:
610	unregister_ftrace_command(&ftrace_dump_cmd);
611 out_free_stacktrace:
612	unregister_ftrace_command(&ftrace_stacktrace_cmd);
613 out_free_traceon:
614	unregister_ftrace_command(&ftrace_traceon_cmd);
615 out_free_traceoff:
616	unregister_ftrace_command(&ftrace_traceoff_cmd);
617
618	return ret;
619}
620#else
621static inline int init_func_cmd_traceon(void)
622{
623	return 0;
624}
625#endif /* CONFIG_DYNAMIC_FTRACE */
626
627static __init int init_function_trace(void)
628{
629	init_func_cmd_traceon();
630	return register_tracer(&function_trace);
631}
632core_initcall(init_function_trace);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ring buffer based function tracer
   4 *
   5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   7 *
   8 * Based on code from the latency_tracer, that is:
   9 *
  10 *  Copyright (C) 2004-2006 Ingo Molnar
  11 *  Copyright (C) 2004 Nadia Yvette Chambers
  12 */
  13#include <linux/ring_buffer.h>
  14#include <linux/debugfs.h>
  15#include <linux/uaccess.h>
  16#include <linux/ftrace.h>
  17#include <linux/slab.h>
  18#include <linux/fs.h>
  19
  20#include "trace.h"
  21
  22static void tracing_start_function_trace(struct trace_array *tr);
  23static void tracing_stop_function_trace(struct trace_array *tr);
  24static void
  25function_trace_call(unsigned long ip, unsigned long parent_ip,
  26		    struct ftrace_ops *op, struct ftrace_regs *fregs);
  27static void
  28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  29			  struct ftrace_ops *op, struct ftrace_regs *fregs);
  30static void
  31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  32			       struct ftrace_ops *op, struct ftrace_regs *fregs);
  33static void
  34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
  35				     struct ftrace_ops *op,
  36				     struct ftrace_regs *fregs);
  37static struct tracer_flags func_flags;
  38
  39/* Our option */
  40enum {
  41
  42	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
  43	TRACE_FUNC_OPT_STACK		= 0x1,
  44	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
  45
  46	/* Update this to next highest bit. */
  47	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
  48};
  49
  50#define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
  51
  52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
  53{
  54	struct ftrace_ops *ops;
  55
  56	/* The top level array uses the "global_ops" */
  57	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  58		return 0;
  59
  60	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  61	if (!ops)
  62		return -ENOMEM;
  63
  64	/* Currently only the non stack version is supported */
  65	ops->func = function_trace_call;
  66	ops->flags = FTRACE_OPS_FL_PID;
  67
  68	tr->ops = ops;
  69	ops->private = tr;
  70
  71	return 0;
  72}
  73
  74void ftrace_free_ftrace_ops(struct trace_array *tr)
  75{
  76	kfree(tr->ops);
  77	tr->ops = NULL;
  78}
  79
  80int ftrace_create_function_files(struct trace_array *tr,
  81				 struct dentry *parent)
  82{
  83	int ret;
 
  84	/*
  85	 * The top level array uses the "global_ops", and the files are
  86	 * created on boot up.
  87	 */
  88	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
  89		return 0;
  90
  91	if (!tr->ops)
  92		return -EINVAL;
  93
  94	ret = allocate_fgraph_ops(tr, tr->ops);
  95	if (ret) {
  96		kfree(tr->ops);
  97		return ret;
  98	}
  99
 100	ftrace_create_filter_files(tr->ops, parent);
 101
 102	return 0;
 103}
 104
 105void ftrace_destroy_function_files(struct trace_array *tr)
 106{
 107	ftrace_destroy_filter_files(tr->ops);
 108	ftrace_free_ftrace_ops(tr);
 109	free_fgraph_ops(tr);
 110}
 111
 112static ftrace_func_t select_trace_function(u32 flags_val)
 113{
 114	switch (flags_val & TRACE_FUNC_OPT_MASK) {
 115	case TRACE_FUNC_NO_OPTS:
 116		return function_trace_call;
 117	case TRACE_FUNC_OPT_STACK:
 118		return function_stack_trace_call;
 119	case TRACE_FUNC_OPT_NO_REPEATS:
 120		return function_no_repeats_trace_call;
 121	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
 122		return function_stack_no_repeats_trace_call;
 123	default:
 124		return NULL;
 125	}
 126}
 127
 128static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
 129{
 130	if (!tr->last_func_repeats &&
 131	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
 132		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
 133		if (!tr->last_func_repeats)
 134			return false;
 135	}
 136
 137	return true;
 138}
 139
 140static int function_trace_init(struct trace_array *tr)
 141{
 142	ftrace_func_t func;
 143	/*
 144	 * Instance trace_arrays get their ops allocated
 145	 * at instance creation. Unless it failed
 146	 * the allocation.
 147	 */
 148	if (!tr->ops)
 149		return -ENOMEM;
 150
 151	func = select_trace_function(func_flags.val);
 152	if (!func)
 153		return -EINVAL;
 154
 155	if (!handle_func_repeats(tr, func_flags.val))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156		return -ENOMEM;
 
 157
 158	ftrace_init_array_ops(tr, func);
 159
 160	tr->array_buffer.cpu = raw_smp_processor_id();
 161
 162	tracing_start_cmdline_record();
 163	tracing_start_function_trace(tr);
 164	return 0;
 165}
 166
 167static void function_trace_reset(struct trace_array *tr)
 168{
 169	tracing_stop_function_trace(tr);
 170	tracing_stop_cmdline_record();
 171	ftrace_reset_array_ops(tr);
 172}
 173
 174static void function_trace_start(struct trace_array *tr)
 175{
 176	tracing_reset_online_cpus(&tr->array_buffer);
 177}
 178
 179/* fregs are guaranteed not to be NULL if HAVE_DYNAMIC_FTRACE_WITH_ARGS is set */
 180#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
 181static __always_inline unsigned long
 182function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
 183{
 184	unsigned long true_parent_ip;
 185	int idx = 0;
 186
 187	true_parent_ip = parent_ip;
 188	if (unlikely(parent_ip == (unsigned long)&return_to_handler) && fregs)
 189		true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip,
 190				(unsigned long *)ftrace_regs_get_stack_pointer(fregs));
 191	return true_parent_ip;
 192}
 193#else
 194static __always_inline unsigned long
 195function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs *fregs)
 196{
 197	return parent_ip;
 198}
 199#endif
 200
 201static void
 202function_trace_call(unsigned long ip, unsigned long parent_ip,
 203		    struct ftrace_ops *op, struct ftrace_regs *fregs)
 204{
 205	struct trace_array *tr = op->private;
 206	struct trace_array_cpu *data;
 207	unsigned int trace_ctx;
 208	int bit;
 
 
 209
 210	if (unlikely(!tr->function_enabled))
 211		return;
 212
 213	bit = ftrace_test_recursion_trylock(ip, parent_ip);
 
 
 
 214	if (bit < 0)
 215		return;
 216
 217	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
 
 
 
 
 
 
 218
 219	trace_ctx = tracing_gen_ctx_dec();
 220
 221	data = this_cpu_ptr(tr->array_buffer.data);
 222	if (!atomic_read(&data->disabled))
 223		trace_function(tr, ip, parent_ip, trace_ctx);
 224
 225	ftrace_test_recursion_unlock(bit);
 226}
 227
 228#ifdef CONFIG_UNWINDER_ORC
 229/*
 230 * Skip 2:
 231 *
 232 *   function_stack_trace_call()
 233 *   ftrace_call()
 234 */
 235#define STACK_SKIP 2
 236#else
 237/*
 238 * Skip 3:
 239 *   __trace_stack()
 240 *   function_stack_trace_call()
 241 *   ftrace_call()
 242 */
 243#define STACK_SKIP 3
 244#endif
 245
 246static void
 247function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 248			  struct ftrace_ops *op, struct ftrace_regs *fregs)
 249{
 250	struct trace_array *tr = op->private;
 251	struct trace_array_cpu *data;
 252	unsigned long flags;
 253	long disabled;
 254	int cpu;
 255	unsigned int trace_ctx;
 256	int skip = STACK_SKIP;
 257
 258	if (unlikely(!tr->function_enabled))
 259		return;
 260
 261	/*
 262	 * Need to use raw, since this must be called before the
 263	 * recursive protection is performed.
 264	 */
 265	local_irq_save(flags);
 266	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
 267	cpu = raw_smp_processor_id();
 268	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 269	disabled = atomic_inc_return(&data->disabled);
 270
 271	if (likely(disabled == 1)) {
 272		trace_ctx = tracing_gen_ctx_flags(flags);
 273		trace_function(tr, ip, parent_ip, trace_ctx);
 274#ifdef CONFIG_UNWINDER_FRAME_POINTER
 275		if (ftrace_pids_enabled(op))
 276			skip++;
 277#endif
 278		__trace_stack(tr, trace_ctx, skip);
 
 
 
 
 279	}
 280
 281	atomic_dec(&data->disabled);
 282	local_irq_restore(flags);
 283}
 284
 285static inline bool is_repeat_check(struct trace_array *tr,
 286				   struct trace_func_repeats *last_info,
 287				   unsigned long ip, unsigned long parent_ip)
 288{
 289	if (last_info->ip == ip &&
 290	    last_info->parent_ip == parent_ip &&
 291	    last_info->count < U16_MAX) {
 292		last_info->ts_last_call =
 293			ring_buffer_time_stamp(tr->array_buffer.buffer);
 294		last_info->count++;
 295		return true;
 296	}
 297
 298	return false;
 299}
 300
 301static inline void process_repeats(struct trace_array *tr,
 302				   unsigned long ip, unsigned long parent_ip,
 303				   struct trace_func_repeats *last_info,
 304				   unsigned int trace_ctx)
 305{
 306	if (last_info->count) {
 307		trace_last_func_repeats(tr, last_info, trace_ctx);
 308		last_info->count = 0;
 309	}
 310
 311	last_info->ip = ip;
 312	last_info->parent_ip = parent_ip;
 313}
 314
 315static void
 316function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 317			       struct ftrace_ops *op,
 318			       struct ftrace_regs *fregs)
 319{
 320	struct trace_func_repeats *last_info;
 321	struct trace_array *tr = op->private;
 322	struct trace_array_cpu *data;
 323	unsigned int trace_ctx;
 324	int bit;
 325
 326	if (unlikely(!tr->function_enabled))
 327		return;
 328
 329	bit = ftrace_test_recursion_trylock(ip, parent_ip);
 330	if (bit < 0)
 331		return;
 332
 333	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
 334	data = this_cpu_ptr(tr->array_buffer.data);
 335	if (atomic_read(&data->disabled))
 336		goto out;
 337
 338	/*
 339	 * An interrupt may happen at any place here. But as far as I can see,
 340	 * the only damage that this can cause is to mess up the repetition
 341	 * counter without valuable data being lost.
 342	 * TODO: think about a solution that is better than just hoping to be
 343	 * lucky.
 344	 */
 345	last_info = this_cpu_ptr(tr->last_func_repeats);
 346	if (is_repeat_check(tr, last_info, ip, parent_ip))
 347		goto out;
 348
 349	trace_ctx = tracing_gen_ctx_dec();
 350	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
 351
 352	trace_function(tr, ip, parent_ip, trace_ctx);
 353
 354out:
 355	ftrace_test_recursion_unlock(bit);
 356}
 357
 358static void
 359function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 360				     struct ftrace_ops *op,
 361				     struct ftrace_regs *fregs)
 362{
 363	struct trace_func_repeats *last_info;
 364	struct trace_array *tr = op->private;
 365	struct trace_array_cpu *data;
 366	unsigned long flags;
 367	long disabled;
 368	int cpu;
 369	unsigned int trace_ctx;
 370
 371	if (unlikely(!tr->function_enabled))
 372		return;
 373
 374	/*
 375	 * Need to use raw, since this must be called before the
 376	 * recursive protection is performed.
 377	 */
 378	local_irq_save(flags);
 379	parent_ip = function_get_true_parent_ip(parent_ip, fregs);
 380	cpu = raw_smp_processor_id();
 381	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 382	disabled = atomic_inc_return(&data->disabled);
 383
 384	if (likely(disabled == 1)) {
 385		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
 386		if (is_repeat_check(tr, last_info, ip, parent_ip))
 387			goto out;
 388
 389		trace_ctx = tracing_gen_ctx_flags(flags);
 390		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
 391
 392		trace_function(tr, ip, parent_ip, trace_ctx);
 393		__trace_stack(tr, trace_ctx, STACK_SKIP);
 394	}
 395
 396 out:
 397	atomic_dec(&data->disabled);
 398	local_irq_restore(flags);
 399}
 400
 401static struct tracer_opt func_opts[] = {
 402#ifdef CONFIG_STACKTRACE
 403	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
 404#endif
 405	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
 406	{ } /* Always set a last empty entry */
 407};
 408
 409static struct tracer_flags func_flags = {
 410	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
 411	.opts = func_opts
 412};
 413
 414static void tracing_start_function_trace(struct trace_array *tr)
 415{
 416	tr->function_enabled = 0;
 417	register_ftrace_function(tr->ops);
 418	tr->function_enabled = 1;
 419}
 420
 421static void tracing_stop_function_trace(struct trace_array *tr)
 422{
 423	tr->function_enabled = 0;
 424	unregister_ftrace_function(tr->ops);
 425}
 426
 427static struct tracer function_trace;
 428
 429static int
 430func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 431{
 432	ftrace_func_t func;
 433	u32 new_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 434
 435	/* Do nothing if already set. */
 436	if (!!set == !!(func_flags.val & bit))
 437		return 0;
 438
 439	/* We can change this flag only when not running. */
 440	if (tr->current_trace != &function_trace)
 441		return 0;
 442
 443	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
 444	func = select_trace_function(new_flags);
 445	if (!func)
 446		return -EINVAL;
 447
 448	/* Check if there's anything to change. */
 449	if (tr->ops->func == func)
 450		return 0;
 451
 452	if (!handle_func_repeats(tr, new_flags))
 453		return -ENOMEM;
 454
 455	unregister_ftrace_function(tr->ops);
 456	tr->ops->func = func;
 457	register_ftrace_function(tr->ops);
 458
 459	return 0;
 460}
 461
 462static struct tracer function_trace __tracer_data =
 463{
 464	.name		= "function",
 465	.init		= function_trace_init,
 466	.reset		= function_trace_reset,
 467	.start		= function_trace_start,
 
 468	.flags		= &func_flags,
 469	.set_flag	= func_set_flag,
 470	.allow_instances = true,
 471#ifdef CONFIG_FTRACE_SELFTEST
 472	.selftest	= trace_selftest_startup_function,
 473#endif
 474};
 475
 476#ifdef CONFIG_DYNAMIC_FTRACE
 477static void update_traceon_count(struct ftrace_probe_ops *ops,
 478				 unsigned long ip,
 479				 struct trace_array *tr, bool on,
 480				 void *data)
 481{
 482	struct ftrace_func_mapper *mapper = data;
 483	long *count;
 484	long old_count;
 485
 486	/*
 487	 * Tracing gets disabled (or enabled) once per count.
 488	 * This function can be called at the same time on multiple CPUs.
 489	 * It is fine if both disable (or enable) tracing, as disabling
 490	 * (or enabling) the second time doesn't do anything as the
 491	 * state of the tracer is already disabled (or enabled).
 492	 * What needs to be synchronized in this case is that the count
 493	 * only gets decremented once, even if the tracer is disabled
 494	 * (or enabled) twice, as the second one is really a nop.
 495	 *
 496	 * The memory barriers guarantee that we only decrement the
 497	 * counter once. First the count is read to a local variable
 498	 * and a read barrier is used to make sure that it is loaded
 499	 * before checking if the tracer is in the state we want.
 500	 * If the tracer is not in the state we want, then the count
 501	 * is guaranteed to be the old count.
 502	 *
 503	 * Next the tracer is set to the state we want (disabled or enabled)
 504	 * then a write memory barrier is used to make sure that
 505	 * the new state is visible before changing the counter by
 506	 * one minus the old counter. This guarantees that another CPU
 507	 * executing this code will see the new state before seeing
 508	 * the new counter value, and would not do anything if the new
 509	 * counter is seen.
 510	 *
 511	 * Note, there is no synchronization between this and a user
 512	 * setting the tracing_on file. But we currently don't care
 513	 * about that.
 514	 */
 515	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 516	old_count = *count;
 517
 518	if (old_count <= 0)
 519		return;
 520
 521	/* Make sure we see count before checking tracing state */
 522	smp_rmb();
 523
 524	if (on == !!tracer_tracing_is_on(tr))
 
 
 
 525		return;
 526
 527	if (on)
 528		tracer_tracing_on(tr);
 529	else
 530		tracer_tracing_off(tr);
 531
 532	/* Make sure tracing state is visible before updating count */
 533	smp_wmb();
 534
 535	*count = old_count - 1;
 536}
 537
 538static void
 539ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
 540		     struct trace_array *tr, struct ftrace_probe_ops *ops,
 541		     void *data)
 542{
 543	update_traceon_count(ops, ip, tr, 1, data);
 544}
 545
 546static void
 547ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
 548		      struct trace_array *tr, struct ftrace_probe_ops *ops,
 549		      void *data)
 550{
 551	update_traceon_count(ops, ip, tr, 0, data);
 552}
 553
 554static void
 555ftrace_traceon(unsigned long ip, unsigned long parent_ip,
 556	       struct trace_array *tr, struct ftrace_probe_ops *ops,
 557	       void *data)
 558{
 559	if (tracer_tracing_is_on(tr))
 560		return;
 561
 562	tracer_tracing_on(tr);
 563}
 564
 565static void
 566ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
 567		struct trace_array *tr, struct ftrace_probe_ops *ops,
 568		void *data)
 569{
 570	if (!tracer_tracing_is_on(tr))
 571		return;
 572
 573	tracer_tracing_off(tr);
 574}
 575
 576#ifdef CONFIG_UNWINDER_ORC
 577/*
 578 * Skip 3:
 579 *
 580 *   function_trace_probe_call()
 581 *   ftrace_ops_assist_func()
 582 *   ftrace_call()
 583 */
 584#define FTRACE_STACK_SKIP 3
 585#else
 586/*
 587 * Skip 5:
 588 *
 589 *   __trace_stack()
 590 *   ftrace_stacktrace()
 591 *   function_trace_probe_call()
 592 *   ftrace_ops_assist_func()
 593 *   ftrace_call()
 594 */
 595#define FTRACE_STACK_SKIP 5
 596#endif
 597
 598static __always_inline void trace_stack(struct trace_array *tr)
 599{
 600	unsigned int trace_ctx;
 601
 602	trace_ctx = tracing_gen_ctx();
 603
 604	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
 605}
 606
 607static void
 608ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
 609		  struct trace_array *tr, struct ftrace_probe_ops *ops,
 610		  void *data)
 611{
 612	trace_stack(tr);
 613}
 614
 615static void
 616ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
 617			struct trace_array *tr, struct ftrace_probe_ops *ops,
 618			void *data)
 619{
 620	struct ftrace_func_mapper *mapper = data;
 621	long *count;
 622	long old_count;
 623	long new_count;
 624
 625	if (!tracing_is_on())
 626		return;
 627
 628	/* unlimited? */
 629	if (!mapper) {
 630		trace_stack(tr);
 631		return;
 632	}
 633
 634	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 635
 636	/*
 637	 * Stack traces should only execute the number of times the
 638	 * user specified in the counter.
 639	 */
 640	do {
 641		old_count = *count;
 642
 643		if (!old_count)
 644			return;
 645
 646		new_count = old_count - 1;
 647		new_count = cmpxchg(count, old_count, new_count);
 648		if (new_count == old_count)
 649			trace_stack(tr);
 650
 651		if (!tracing_is_on())
 652			return;
 653
 654	} while (new_count != old_count);
 655}
 656
 657static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
 658			void *data)
 659{
 660	struct ftrace_func_mapper *mapper = data;
 661	long *count = NULL;
 662
 663	if (mapper)
 664		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 665
 666	if (count) {
 667		if (*count <= 0)
 668			return 0;
 669		(*count)--;
 670	}
 671
 672	return 1;
 673}
 674
 675static void
 676ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
 677		  struct trace_array *tr, struct ftrace_probe_ops *ops,
 678		  void *data)
 679{
 680	if (update_count(ops, ip, data))
 681		ftrace_dump(DUMP_ALL);
 682}
 683
 684/* Only dump the current CPU buffer. */
 685static void
 686ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
 687		     struct trace_array *tr, struct ftrace_probe_ops *ops,
 688		     void *data)
 689{
 690	if (update_count(ops, ip, data))
 691		ftrace_dump(DUMP_ORIG);
 692}
 693
 694static int
 695ftrace_probe_print(const char *name, struct seq_file *m,
 696		   unsigned long ip, struct ftrace_probe_ops *ops,
 697		   void *data)
 698{
 699	struct ftrace_func_mapper *mapper = data;
 700	long *count = NULL;
 701
 702	seq_printf(m, "%ps:%s", (void *)ip, name);
 703
 704	if (mapper)
 705		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 706
 707	if (count)
 708		seq_printf(m, ":count=%ld\n", *count);
 709	else
 710		seq_puts(m, ":unlimited\n");
 711
 712	return 0;
 713}
 714
 715static int
 716ftrace_traceon_print(struct seq_file *m, unsigned long ip,
 717		     struct ftrace_probe_ops *ops,
 718		     void *data)
 719{
 720	return ftrace_probe_print("traceon", m, ip, ops, data);
 721}
 722
 723static int
 724ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
 725			 struct ftrace_probe_ops *ops, void *data)
 726{
 727	return ftrace_probe_print("traceoff", m, ip, ops, data);
 728}
 729
 730static int
 731ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
 732			struct ftrace_probe_ops *ops, void *data)
 733{
 734	return ftrace_probe_print("stacktrace", m, ip, ops, data);
 735}
 736
 737static int
 738ftrace_dump_print(struct seq_file *m, unsigned long ip,
 739			struct ftrace_probe_ops *ops, void *data)
 740{
 741	return ftrace_probe_print("dump", m, ip, ops, data);
 742}
 743
 744static int
 745ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
 746			struct ftrace_probe_ops *ops, void *data)
 747{
 748	return ftrace_probe_print("cpudump", m, ip, ops, data);
 749}
 750
 751
 752static int
 753ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
 754		  unsigned long ip, void *init_data, void **data)
 755{
 756	struct ftrace_func_mapper *mapper = *data;
 757
 758	if (!mapper) {
 759		mapper = allocate_ftrace_func_mapper();
 760		if (!mapper)
 761			return -ENOMEM;
 762		*data = mapper;
 763	}
 764
 765	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
 766}
 767
 768static void
 769ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
 770		  unsigned long ip, void *data)
 771{
 772	struct ftrace_func_mapper *mapper = data;
 773
 774	if (!ip) {
 775		free_ftrace_func_mapper(mapper, NULL);
 776		return;
 777	}
 778
 779	ftrace_func_mapper_remove_ip(mapper, ip);
 780}
 781
 782static struct ftrace_probe_ops traceon_count_probe_ops = {
 783	.func			= ftrace_traceon_count,
 784	.print			= ftrace_traceon_print,
 785	.init			= ftrace_count_init,
 786	.free			= ftrace_count_free,
 787};
 788
 789static struct ftrace_probe_ops traceoff_count_probe_ops = {
 790	.func			= ftrace_traceoff_count,
 791	.print			= ftrace_traceoff_print,
 792	.init			= ftrace_count_init,
 793	.free			= ftrace_count_free,
 794};
 795
 796static struct ftrace_probe_ops stacktrace_count_probe_ops = {
 797	.func			= ftrace_stacktrace_count,
 798	.print			= ftrace_stacktrace_print,
 799	.init			= ftrace_count_init,
 800	.free			= ftrace_count_free,
 801};
 802
 803static struct ftrace_probe_ops dump_probe_ops = {
 804	.func			= ftrace_dump_probe,
 805	.print			= ftrace_dump_print,
 806	.init			= ftrace_count_init,
 807	.free			= ftrace_count_free,
 808};
 809
 810static struct ftrace_probe_ops cpudump_probe_ops = {
 811	.func			= ftrace_cpudump_probe,
 812	.print			= ftrace_cpudump_print,
 813};
 814
 815static struct ftrace_probe_ops traceon_probe_ops = {
 816	.func			= ftrace_traceon,
 817	.print			= ftrace_traceon_print,
 818};
 819
 820static struct ftrace_probe_ops traceoff_probe_ops = {
 821	.func			= ftrace_traceoff,
 822	.print			= ftrace_traceoff_print,
 823};
 824
 825static struct ftrace_probe_ops stacktrace_probe_ops = {
 826	.func			= ftrace_stacktrace,
 827	.print			= ftrace_stacktrace_print,
 828};
 829
 830static int
 831ftrace_trace_probe_callback(struct trace_array *tr,
 832			    struct ftrace_probe_ops *ops,
 833			    struct ftrace_hash *hash, char *glob,
 834			    char *cmd, char *param, int enable)
 835{
 836	void *count = (void *)-1;
 837	char *number;
 838	int ret;
 839
 840	/* hash funcs only work with set_ftrace_filter */
 841	if (!enable)
 842		return -EINVAL;
 843
 844	if (glob[0] == '!')
 845		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
 
 
 846
 847	if (!param)
 848		goto out_reg;
 849
 850	number = strsep(&param, ":");
 851
 852	if (!strlen(number))
 853		goto out_reg;
 854
 855	/*
 856	 * We use the callback data field (which is a pointer)
 857	 * as our counter.
 858	 */
 859	ret = kstrtoul(number, 0, (unsigned long *)&count);
 860	if (ret)
 861		return ret;
 862
 863 out_reg:
 864	ret = register_ftrace_function_probe(glob, tr, ops, count);
 865
 866	return ret < 0 ? ret : 0;
 867}
 868
 869static int
 870ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
 871			    char *glob, char *cmd, char *param, int enable)
 872{
 873	struct ftrace_probe_ops *ops;
 874
 875	if (!tr)
 876		return -ENODEV;
 877
 878	/* we register both traceon and traceoff to this callback */
 879	if (strcmp(cmd, "traceon") == 0)
 880		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
 881	else
 882		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
 883
 884	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
 885					   param, enable);
 886}
 887
 888static int
 889ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
 890			   char *glob, char *cmd, char *param, int enable)
 891{
 892	struct ftrace_probe_ops *ops;
 893
 894	if (!tr)
 895		return -ENODEV;
 896
 897	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
 898
 899	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
 900					   param, enable);
 901}
 902
 903static int
 904ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
 905			   char *glob, char *cmd, char *param, int enable)
 906{
 907	struct ftrace_probe_ops *ops;
 908
 909	if (!tr)
 910		return -ENODEV;
 911
 912	ops = &dump_probe_ops;
 913
 914	/* Only dump once. */
 915	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
 916					   "1", enable);
 917}
 918
 919static int
 920ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
 921			   char *glob, char *cmd, char *param, int enable)
 922{
 923	struct ftrace_probe_ops *ops;
 924
 925	if (!tr)
 926		return -ENODEV;
 927
 928	ops = &cpudump_probe_ops;
 929
 930	/* Only dump once. */
 931	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
 932					   "1", enable);
 933}
 934
 935static struct ftrace_func_command ftrace_traceon_cmd = {
 936	.name			= "traceon",
 937	.func			= ftrace_trace_onoff_callback,
 938};
 939
 940static struct ftrace_func_command ftrace_traceoff_cmd = {
 941	.name			= "traceoff",
 942	.func			= ftrace_trace_onoff_callback,
 943};
 944
 945static struct ftrace_func_command ftrace_stacktrace_cmd = {
 946	.name			= "stacktrace",
 947	.func			= ftrace_stacktrace_callback,
 948};
 949
 950static struct ftrace_func_command ftrace_dump_cmd = {
 951	.name			= "dump",
 952	.func			= ftrace_dump_callback,
 953};
 954
 955static struct ftrace_func_command ftrace_cpudump_cmd = {
 956	.name			= "cpudump",
 957	.func			= ftrace_cpudump_callback,
 958};
 959
 960static int __init init_func_cmd_traceon(void)
 961{
 962	int ret;
 963
 964	ret = register_ftrace_command(&ftrace_traceoff_cmd);
 965	if (ret)
 966		return ret;
 967
 968	ret = register_ftrace_command(&ftrace_traceon_cmd);
 969	if (ret)
 970		goto out_free_traceoff;
 971
 972	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
 973	if (ret)
 974		goto out_free_traceon;
 975
 976	ret = register_ftrace_command(&ftrace_dump_cmd);
 977	if (ret)
 978		goto out_free_stacktrace;
 979
 980	ret = register_ftrace_command(&ftrace_cpudump_cmd);
 981	if (ret)
 982		goto out_free_dump;
 983
 984	return 0;
 985
 986 out_free_dump:
 987	unregister_ftrace_command(&ftrace_dump_cmd);
 988 out_free_stacktrace:
 989	unregister_ftrace_command(&ftrace_stacktrace_cmd);
 990 out_free_traceon:
 991	unregister_ftrace_command(&ftrace_traceon_cmd);
 992 out_free_traceoff:
 993	unregister_ftrace_command(&ftrace_traceoff_cmd);
 994
 995	return ret;
 996}
 997#else
 998static inline int init_func_cmd_traceon(void)
 999{
1000	return 0;
1001}
1002#endif /* CONFIG_DYNAMIC_FTRACE */
1003
1004__init int init_function_trace(void)
1005{
1006	init_func_cmd_traceon();
1007	return register_tracer(&function_trace);
1008}