Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ring buffer based function tracer
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/ring_buffer.h>
 14#include <linux/debugfs.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/slab.h>
 18#include <linux/fs.h>
 19
 20#include "trace.h"
 21
 22static void tracing_start_function_trace(struct trace_array *tr);
 23static void tracing_stop_function_trace(struct trace_array *tr);
 24static void
 25function_trace_call(unsigned long ip, unsigned long parent_ip,
 26		    struct ftrace_ops *op, struct pt_regs *pt_regs);
 27static void
 28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 29			  struct ftrace_ops *op, struct pt_regs *pt_regs);
 
 
 
 
 
 
 
 30static struct tracer_flags func_flags;
 31
 32/* Our option */
 33enum {
 34	TRACE_FUNC_OPT_STACK	= 0x1,
 
 
 
 
 
 
 35};
 36
 37static int allocate_ftrace_ops(struct trace_array *tr)
 
 
 38{
 39	struct ftrace_ops *ops;
 40
 
 
 
 
 41	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 42	if (!ops)
 43		return -ENOMEM;
 44
 45	/* Currently only the non stack verision is supported */
 46	ops->func = function_trace_call;
 47	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
 48
 49	tr->ops = ops;
 50	ops->private = tr;
 
 51	return 0;
 52}
 53
 
 
 
 
 
 54
 55int ftrace_create_function_files(struct trace_array *tr,
 56				 struct dentry *parent)
 57{
 58	int ret;
 59
 60	/*
 61	 * The top level array uses the "global_ops", and the files are
 62	 * created on boot up.
 63	 */
 64	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 65		return 0;
 66
 67	ret = allocate_ftrace_ops(tr);
 68	if (ret)
 69		return ret;
 70
 71	ftrace_create_filter_files(tr->ops, parent);
 72
 73	return 0;
 74}
 75
 76void ftrace_destroy_function_files(struct trace_array *tr)
 77{
 78	ftrace_destroy_filter_files(tr->ops);
 79	kfree(tr->ops);
 80	tr->ops = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81}
 82
 83static int function_trace_init(struct trace_array *tr)
 84{
 85	ftrace_func_t func;
 86
 87	/*
 88	 * Instance trace_arrays get their ops allocated
 89	 * at instance creation. Unless it failed
 90	 * the allocation.
 91	 */
 92	if (!tr->ops)
 93		return -ENOMEM;
 94
 95	/* Currently only the global instance can do stack tracing */
 96	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 97	    func_flags.val & TRACE_FUNC_OPT_STACK)
 98		func = function_stack_trace_call;
 99	else
100		func = function_trace_call;
101
102	ftrace_init_array_ops(tr, func);
103
104	tr->trace_buffer.cpu = get_cpu();
105	put_cpu();
106
107	tracing_start_cmdline_record();
108	tracing_start_function_trace(tr);
109	return 0;
110}
111
112static void function_trace_reset(struct trace_array *tr)
113{
114	tracing_stop_function_trace(tr);
115	tracing_stop_cmdline_record();
116	ftrace_reset_array_ops(tr);
117}
118
119static void function_trace_start(struct trace_array *tr)
120{
121	tracing_reset_online_cpus(&tr->trace_buffer);
122}
123
124static void
125function_trace_call(unsigned long ip, unsigned long parent_ip,
126		    struct ftrace_ops *op, struct pt_regs *pt_regs)
127{
128	struct trace_array *tr = op->private;
129	struct trace_array_cpu *data;
130	unsigned long flags;
131	int bit;
132	int cpu;
133	int pc;
134
135	if (unlikely(!tr->function_enabled))
136		return;
137
138	pc = preempt_count();
139	preempt_disable_notrace();
140
141	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
142	if (bit < 0)
143		goto out;
 
 
 
144
145	cpu = smp_processor_id();
146	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
147	if (!atomic_read(&data->disabled)) {
148		local_save_flags(flags);
149		trace_function(tr, ip, parent_ip, flags, pc);
150	}
151	trace_clear_recursion(bit);
152
153 out:
154	preempt_enable_notrace();
155}
156
157#ifdef CONFIG_UNWINDER_ORC
158/*
159 * Skip 2:
160 *
161 *   function_stack_trace_call()
162 *   ftrace_call()
163 */
164#define STACK_SKIP 2
165#else
166/*
167 * Skip 3:
168 *   __trace_stack()
169 *   function_stack_trace_call()
170 *   ftrace_call()
171 */
172#define STACK_SKIP 3
173#endif
174
175static void
176function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
177			  struct ftrace_ops *op, struct pt_regs *pt_regs)
178{
179	struct trace_array *tr = op->private;
180	struct trace_array_cpu *data;
181	unsigned long flags;
182	long disabled;
183	int cpu;
184	int pc;
185
186	if (unlikely(!tr->function_enabled))
187		return;
188
189	/*
190	 * Need to use raw, since this must be called before the
191	 * recursive protection is performed.
192	 */
193	local_irq_save(flags);
194	cpu = raw_smp_processor_id();
195	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
196	disabled = atomic_inc_return(&data->disabled);
197
198	if (likely(disabled == 1)) {
199		pc = preempt_count();
200		trace_function(tr, ip, parent_ip, flags, pc);
201		__trace_stack(tr, flags, STACK_SKIP, pc);
202	}
203
204	atomic_dec(&data->disabled);
205	local_irq_restore(flags);
206}
207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208static struct tracer_opt func_opts[] = {
209#ifdef CONFIG_STACKTRACE
210	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
211#endif
 
212	{ } /* Always set a last empty entry */
213};
214
215static struct tracer_flags func_flags = {
216	.val = 0, /* By default: all flags disabled */
217	.opts = func_opts
218};
219
220static void tracing_start_function_trace(struct trace_array *tr)
221{
222	tr->function_enabled = 0;
223	register_ftrace_function(tr->ops);
224	tr->function_enabled = 1;
225}
226
227static void tracing_stop_function_trace(struct trace_array *tr)
228{
229	tr->function_enabled = 0;
230	unregister_ftrace_function(tr->ops);
231}
232
233static struct tracer function_trace;
234
235static int
236func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
237{
238	switch (bit) {
239	case TRACE_FUNC_OPT_STACK:
240		/* do nothing if already set */
241		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
242			break;
243
244		/* We can change this flag when not running. */
245		if (tr->current_trace != &function_trace)
246			break;
247
248		unregister_ftrace_function(tr->ops);
249
250		if (set) {
251			tr->ops->func = function_stack_trace_call;
252			register_ftrace_function(tr->ops);
253		} else {
254			tr->ops->func = function_trace_call;
255			register_ftrace_function(tr->ops);
256		}
257
258		break;
259	default:
 
 
 
 
 
 
 
 
 
260		return -EINVAL;
261	}
 
 
 
 
 
 
 
 
 
 
262
263	return 0;
264}
265
266static struct tracer function_trace __tracer_data =
267{
268	.name		= "function",
269	.init		= function_trace_init,
270	.reset		= function_trace_reset,
271	.start		= function_trace_start,
272	.flags		= &func_flags,
273	.set_flag	= func_set_flag,
274	.allow_instances = true,
275#ifdef CONFIG_FTRACE_SELFTEST
276	.selftest	= trace_selftest_startup_function,
277#endif
278};
279
280#ifdef CONFIG_DYNAMIC_FTRACE
281static void update_traceon_count(struct ftrace_probe_ops *ops,
282				 unsigned long ip,
283				 struct trace_array *tr, bool on,
284				 void *data)
285{
286	struct ftrace_func_mapper *mapper = data;
287	long *count;
288	long old_count;
289
290	/*
291	 * Tracing gets disabled (or enabled) once per count.
292	 * This function can be called at the same time on multiple CPUs.
293	 * It is fine if both disable (or enable) tracing, as disabling
294	 * (or enabling) the second time doesn't do anything as the
295	 * state of the tracer is already disabled (or enabled).
296	 * What needs to be synchronized in this case is that the count
297	 * only gets decremented once, even if the tracer is disabled
298	 * (or enabled) twice, as the second one is really a nop.
299	 *
300	 * The memory barriers guarantee that we only decrement the
301	 * counter once. First the count is read to a local variable
302	 * and a read barrier is used to make sure that it is loaded
303	 * before checking if the tracer is in the state we want.
304	 * If the tracer is not in the state we want, then the count
305	 * is guaranteed to be the old count.
306	 *
307	 * Next the tracer is set to the state we want (disabled or enabled)
308	 * then a write memory barrier is used to make sure that
309	 * the new state is visible before changing the counter by
310	 * one minus the old counter. This guarantees that another CPU
311	 * executing this code will see the new state before seeing
312	 * the new counter value, and would not do anything if the new
313	 * counter is seen.
314	 *
315	 * Note, there is no synchronization between this and a user
316	 * setting the tracing_on file. But we currently don't care
317	 * about that.
318	 */
319	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
320	old_count = *count;
321
322	if (old_count <= 0)
323		return;
324
325	/* Make sure we see count before checking tracing state */
326	smp_rmb();
327
328	if (on == !!tracer_tracing_is_on(tr))
329		return;
330
331	if (on)
332		tracer_tracing_on(tr);
333	else
334		tracer_tracing_off(tr);
335
336	/* Make sure tracing state is visible before updating count */
337	smp_wmb();
338
339	*count = old_count - 1;
340}
341
342static void
343ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
344		     struct trace_array *tr, struct ftrace_probe_ops *ops,
345		     void *data)
346{
347	update_traceon_count(ops, ip, tr, 1, data);
348}
349
350static void
351ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
352		      struct trace_array *tr, struct ftrace_probe_ops *ops,
353		      void *data)
354{
355	update_traceon_count(ops, ip, tr, 0, data);
356}
357
358static void
359ftrace_traceon(unsigned long ip, unsigned long parent_ip,
360	       struct trace_array *tr, struct ftrace_probe_ops *ops,
361	       void *data)
362{
363	if (tracer_tracing_is_on(tr))
364		return;
365
366	tracer_tracing_on(tr);
367}
368
369static void
370ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
371		struct trace_array *tr, struct ftrace_probe_ops *ops,
372		void *data)
373{
374	if (!tracer_tracing_is_on(tr))
375		return;
376
377	tracer_tracing_off(tr);
378}
379
380#ifdef CONFIG_UNWINDER_ORC
381/*
382 * Skip 3:
383 *
384 *   function_trace_probe_call()
385 *   ftrace_ops_assist_func()
386 *   ftrace_call()
387 */
388#define FTRACE_STACK_SKIP 3
389#else
390/*
391 * Skip 5:
392 *
393 *   __trace_stack()
394 *   ftrace_stacktrace()
395 *   function_trace_probe_call()
396 *   ftrace_ops_assist_func()
397 *   ftrace_call()
398 */
399#define FTRACE_STACK_SKIP 5
400#endif
401
402static __always_inline void trace_stack(struct trace_array *tr)
403{
404	unsigned long flags;
405	int pc;
406
407	local_save_flags(flags);
408	pc = preempt_count();
409
410	__trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
411}
412
413static void
414ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
415		  struct trace_array *tr, struct ftrace_probe_ops *ops,
416		  void *data)
417{
418	trace_stack(tr);
419}
420
421static void
422ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
423			struct trace_array *tr, struct ftrace_probe_ops *ops,
424			void *data)
425{
426	struct ftrace_func_mapper *mapper = data;
427	long *count;
428	long old_count;
429	long new_count;
430
431	if (!tracing_is_on())
432		return;
433
434	/* unlimited? */
435	if (!mapper) {
436		trace_stack(tr);
437		return;
438	}
439
440	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
441
442	/*
443	 * Stack traces should only execute the number of times the
444	 * user specified in the counter.
445	 */
446	do {
447		old_count = *count;
448
449		if (!old_count)
450			return;
451
452		new_count = old_count - 1;
453		new_count = cmpxchg(count, old_count, new_count);
454		if (new_count == old_count)
455			trace_stack(tr);
456
457		if (!tracing_is_on())
458			return;
459
460	} while (new_count != old_count);
461}
462
463static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
464			void *data)
465{
466	struct ftrace_func_mapper *mapper = data;
467	long *count = NULL;
468
469	if (mapper)
470		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
471
472	if (count) {
473		if (*count <= 0)
474			return 0;
475		(*count)--;
476	}
477
478	return 1;
479}
480
481static void
482ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
483		  struct trace_array *tr, struct ftrace_probe_ops *ops,
484		  void *data)
485{
486	if (update_count(ops, ip, data))
487		ftrace_dump(DUMP_ALL);
488}
489
490/* Only dump the current CPU buffer. */
491static void
492ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
493		     struct trace_array *tr, struct ftrace_probe_ops *ops,
494		     void *data)
495{
496	if (update_count(ops, ip, data))
497		ftrace_dump(DUMP_ORIG);
498}
499
500static int
501ftrace_probe_print(const char *name, struct seq_file *m,
502		   unsigned long ip, struct ftrace_probe_ops *ops,
503		   void *data)
504{
505	struct ftrace_func_mapper *mapper = data;
506	long *count = NULL;
507
508	seq_printf(m, "%ps:%s", (void *)ip, name);
509
510	if (mapper)
511		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
512
513	if (count)
514		seq_printf(m, ":count=%ld\n", *count);
515	else
516		seq_puts(m, ":unlimited\n");
517
518	return 0;
519}
520
521static int
522ftrace_traceon_print(struct seq_file *m, unsigned long ip,
523		     struct ftrace_probe_ops *ops,
524		     void *data)
525{
526	return ftrace_probe_print("traceon", m, ip, ops, data);
527}
528
529static int
530ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
531			 struct ftrace_probe_ops *ops, void *data)
532{
533	return ftrace_probe_print("traceoff", m, ip, ops, data);
534}
535
536static int
537ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
538			struct ftrace_probe_ops *ops, void *data)
539{
540	return ftrace_probe_print("stacktrace", m, ip, ops, data);
541}
542
543static int
544ftrace_dump_print(struct seq_file *m, unsigned long ip,
545			struct ftrace_probe_ops *ops, void *data)
546{
547	return ftrace_probe_print("dump", m, ip, ops, data);
548}
549
550static int
551ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
552			struct ftrace_probe_ops *ops, void *data)
553{
554	return ftrace_probe_print("cpudump", m, ip, ops, data);
555}
556
557
558static int
559ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
560		  unsigned long ip, void *init_data, void **data)
561{
562	struct ftrace_func_mapper *mapper = *data;
563
564	if (!mapper) {
565		mapper = allocate_ftrace_func_mapper();
566		if (!mapper)
567			return -ENOMEM;
568		*data = mapper;
569	}
570
571	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
572}
573
574static void
575ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
576		  unsigned long ip, void *data)
577{
578	struct ftrace_func_mapper *mapper = data;
579
580	if (!ip) {
581		free_ftrace_func_mapper(mapper, NULL);
582		return;
583	}
584
585	ftrace_func_mapper_remove_ip(mapper, ip);
586}
587
588static struct ftrace_probe_ops traceon_count_probe_ops = {
589	.func			= ftrace_traceon_count,
590	.print			= ftrace_traceon_print,
591	.init			= ftrace_count_init,
592	.free			= ftrace_count_free,
593};
594
595static struct ftrace_probe_ops traceoff_count_probe_ops = {
596	.func			= ftrace_traceoff_count,
597	.print			= ftrace_traceoff_print,
598	.init			= ftrace_count_init,
599	.free			= ftrace_count_free,
600};
601
602static struct ftrace_probe_ops stacktrace_count_probe_ops = {
603	.func			= ftrace_stacktrace_count,
604	.print			= ftrace_stacktrace_print,
605	.init			= ftrace_count_init,
606	.free			= ftrace_count_free,
607};
608
609static struct ftrace_probe_ops dump_probe_ops = {
610	.func			= ftrace_dump_probe,
611	.print			= ftrace_dump_print,
612	.init			= ftrace_count_init,
613	.free			= ftrace_count_free,
614};
615
616static struct ftrace_probe_ops cpudump_probe_ops = {
617	.func			= ftrace_cpudump_probe,
618	.print			= ftrace_cpudump_print,
619};
620
621static struct ftrace_probe_ops traceon_probe_ops = {
622	.func			= ftrace_traceon,
623	.print			= ftrace_traceon_print,
624};
625
626static struct ftrace_probe_ops traceoff_probe_ops = {
627	.func			= ftrace_traceoff,
628	.print			= ftrace_traceoff_print,
629};
630
631static struct ftrace_probe_ops stacktrace_probe_ops = {
632	.func			= ftrace_stacktrace,
633	.print			= ftrace_stacktrace_print,
634};
635
636static int
637ftrace_trace_probe_callback(struct trace_array *tr,
638			    struct ftrace_probe_ops *ops,
639			    struct ftrace_hash *hash, char *glob,
640			    char *cmd, char *param, int enable)
641{
642	void *count = (void *)-1;
643	char *number;
644	int ret;
645
646	/* hash funcs only work with set_ftrace_filter */
647	if (!enable)
648		return -EINVAL;
649
650	if (glob[0] == '!')
651		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
652
653	if (!param)
654		goto out_reg;
655
656	number = strsep(&param, ":");
657
658	if (!strlen(number))
659		goto out_reg;
660
661	/*
662	 * We use the callback data field (which is a pointer)
663	 * as our counter.
664	 */
665	ret = kstrtoul(number, 0, (unsigned long *)&count);
666	if (ret)
667		return ret;
668
669 out_reg:
670	ret = register_ftrace_function_probe(glob, tr, ops, count);
671
672	return ret < 0 ? ret : 0;
673}
674
675static int
676ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
677			    char *glob, char *cmd, char *param, int enable)
678{
679	struct ftrace_probe_ops *ops;
680
681	if (!tr)
682		return -ENODEV;
683
684	/* we register both traceon and traceoff to this callback */
685	if (strcmp(cmd, "traceon") == 0)
686		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
687	else
688		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
689
690	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
691					   param, enable);
692}
693
694static int
695ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
696			   char *glob, char *cmd, char *param, int enable)
697{
698	struct ftrace_probe_ops *ops;
699
700	if (!tr)
701		return -ENODEV;
702
703	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
704
705	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
706					   param, enable);
707}
708
709static int
710ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
711			   char *glob, char *cmd, char *param, int enable)
712{
713	struct ftrace_probe_ops *ops;
714
715	if (!tr)
716		return -ENODEV;
717
718	ops = &dump_probe_ops;
719
720	/* Only dump once. */
721	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
722					   "1", enable);
723}
724
725static int
726ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
727			   char *glob, char *cmd, char *param, int enable)
728{
729	struct ftrace_probe_ops *ops;
730
731	if (!tr)
732		return -ENODEV;
733
734	ops = &cpudump_probe_ops;
735
736	/* Only dump once. */
737	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
738					   "1", enable);
739}
740
741static struct ftrace_func_command ftrace_traceon_cmd = {
742	.name			= "traceon",
743	.func			= ftrace_trace_onoff_callback,
744};
745
746static struct ftrace_func_command ftrace_traceoff_cmd = {
747	.name			= "traceoff",
748	.func			= ftrace_trace_onoff_callback,
749};
750
751static struct ftrace_func_command ftrace_stacktrace_cmd = {
752	.name			= "stacktrace",
753	.func			= ftrace_stacktrace_callback,
754};
755
756static struct ftrace_func_command ftrace_dump_cmd = {
757	.name			= "dump",
758	.func			= ftrace_dump_callback,
759};
760
761static struct ftrace_func_command ftrace_cpudump_cmd = {
762	.name			= "cpudump",
763	.func			= ftrace_cpudump_callback,
764};
765
766static int __init init_func_cmd_traceon(void)
767{
768	int ret;
769
770	ret = register_ftrace_command(&ftrace_traceoff_cmd);
771	if (ret)
772		return ret;
773
774	ret = register_ftrace_command(&ftrace_traceon_cmd);
775	if (ret)
776		goto out_free_traceoff;
777
778	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
779	if (ret)
780		goto out_free_traceon;
781
782	ret = register_ftrace_command(&ftrace_dump_cmd);
783	if (ret)
784		goto out_free_stacktrace;
785
786	ret = register_ftrace_command(&ftrace_cpudump_cmd);
787	if (ret)
788		goto out_free_dump;
789
790	return 0;
791
792 out_free_dump:
793	unregister_ftrace_command(&ftrace_dump_cmd);
794 out_free_stacktrace:
795	unregister_ftrace_command(&ftrace_stacktrace_cmd);
796 out_free_traceon:
797	unregister_ftrace_command(&ftrace_traceon_cmd);
798 out_free_traceoff:
799	unregister_ftrace_command(&ftrace_traceoff_cmd);
800
801	return ret;
802}
803#else
804static inline int init_func_cmd_traceon(void)
805{
806	return 0;
807}
808#endif /* CONFIG_DYNAMIC_FTRACE */
809
810__init int init_function_trace(void)
811{
812	init_func_cmd_traceon();
813	return register_tracer(&function_trace);
814}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ring buffer based function tracer
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/ring_buffer.h>
 14#include <linux/debugfs.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/slab.h>
 18#include <linux/fs.h>
 19
 20#include "trace.h"
 21
 22static void tracing_start_function_trace(struct trace_array *tr);
 23static void tracing_stop_function_trace(struct trace_array *tr);
 24static void
 25function_trace_call(unsigned long ip, unsigned long parent_ip,
 26		    struct ftrace_ops *op, struct ftrace_regs *fregs);
 27static void
 28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 29			  struct ftrace_ops *op, struct ftrace_regs *fregs);
 30static void
 31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 32			       struct ftrace_ops *op, struct ftrace_regs *fregs);
 33static void
 34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 35				     struct ftrace_ops *op,
 36				     struct ftrace_regs *fregs);
 37static struct tracer_flags func_flags;
 38
 39/* Our option */
 40enum {
 41
 42	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
 43	TRACE_FUNC_OPT_STACK		= 0x1,
 44	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
 45
 46	/* Update this to next highest bit. */
 47	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
 48};
 49
 50#define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
 51
 52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 53{
 54	struct ftrace_ops *ops;
 55
 56	/* The top level array uses the "global_ops" */
 57	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 58		return 0;
 59
 60	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 61	if (!ops)
 62		return -ENOMEM;
 63
 64	/* Currently only the non stack version is supported */
 65	ops->func = function_trace_call;
 66	ops->flags = FTRACE_OPS_FL_PID;
 67
 68	tr->ops = ops;
 69	ops->private = tr;
 70
 71	return 0;
 72}
 73
 74void ftrace_free_ftrace_ops(struct trace_array *tr)
 75{
 76	kfree(tr->ops);
 77	tr->ops = NULL;
 78}
 79
 80int ftrace_create_function_files(struct trace_array *tr,
 81				 struct dentry *parent)
 82{
 
 
 83	/*
 84	 * The top level array uses the "global_ops", and the files are
 85	 * created on boot up.
 86	 */
 87	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 88		return 0;
 89
 90	if (!tr->ops)
 91		return -EINVAL;
 
 92
 93	ftrace_create_filter_files(tr->ops, parent);
 94
 95	return 0;
 96}
 97
 98void ftrace_destroy_function_files(struct trace_array *tr)
 99{
100	ftrace_destroy_filter_files(tr->ops);
101	ftrace_free_ftrace_ops(tr);
102}
103
104static ftrace_func_t select_trace_function(u32 flags_val)
105{
106	switch (flags_val & TRACE_FUNC_OPT_MASK) {
107	case TRACE_FUNC_NO_OPTS:
108		return function_trace_call;
109	case TRACE_FUNC_OPT_STACK:
110		return function_stack_trace_call;
111	case TRACE_FUNC_OPT_NO_REPEATS:
112		return function_no_repeats_trace_call;
113	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114		return function_stack_no_repeats_trace_call;
115	default:
116		return NULL;
117	}
118}
119
120static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121{
122	if (!tr->last_func_repeats &&
123	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125		if (!tr->last_func_repeats)
126			return false;
127	}
128
129	return true;
130}
131
132static int function_trace_init(struct trace_array *tr)
133{
134	ftrace_func_t func;
 
135	/*
136	 * Instance trace_arrays get their ops allocated
137	 * at instance creation. Unless it failed
138	 * the allocation.
139	 */
140	if (!tr->ops)
141		return -ENOMEM;
142
143	func = select_trace_function(func_flags.val);
144	if (!func)
145		return -EINVAL;
146
147	if (!handle_func_repeats(tr, func_flags.val))
148		return -ENOMEM;
149
150	ftrace_init_array_ops(tr, func);
151
152	tr->array_buffer.cpu = raw_smp_processor_id();
 
153
154	tracing_start_cmdline_record();
155	tracing_start_function_trace(tr);
156	return 0;
157}
158
159static void function_trace_reset(struct trace_array *tr)
160{
161	tracing_stop_function_trace(tr);
162	tracing_stop_cmdline_record();
163	ftrace_reset_array_ops(tr);
164}
165
166static void function_trace_start(struct trace_array *tr)
167{
168	tracing_reset_online_cpus(&tr->array_buffer);
169}
170
171static void
172function_trace_call(unsigned long ip, unsigned long parent_ip,
173		    struct ftrace_ops *op, struct ftrace_regs *fregs)
174{
175	struct trace_array *tr = op->private;
176	struct trace_array_cpu *data;
177	unsigned int trace_ctx;
178	int bit;
179	int cpu;
 
180
181	if (unlikely(!tr->function_enabled))
182		return;
183
184	bit = ftrace_test_recursion_trylock(ip, parent_ip);
 
 
 
185	if (bit < 0)
186		return;
187
188	trace_ctx = tracing_gen_ctx();
189	preempt_disable_notrace();
190
191	cpu = smp_processor_id();
192	data = per_cpu_ptr(tr->array_buffer.data, cpu);
193	if (!atomic_read(&data->disabled))
194		trace_function(tr, ip, parent_ip, trace_ctx);
 
 
 
195
196	ftrace_test_recursion_unlock(bit);
197	preempt_enable_notrace();
198}
199
200#ifdef CONFIG_UNWINDER_ORC
201/*
202 * Skip 2:
203 *
204 *   function_stack_trace_call()
205 *   ftrace_call()
206 */
207#define STACK_SKIP 2
208#else
209/*
210 * Skip 3:
211 *   __trace_stack()
212 *   function_stack_trace_call()
213 *   ftrace_call()
214 */
215#define STACK_SKIP 3
216#endif
217
218static void
219function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
220			  struct ftrace_ops *op, struct ftrace_regs *fregs)
221{
222	struct trace_array *tr = op->private;
223	struct trace_array_cpu *data;
224	unsigned long flags;
225	long disabled;
226	int cpu;
227	unsigned int trace_ctx;
228
229	if (unlikely(!tr->function_enabled))
230		return;
231
232	/*
233	 * Need to use raw, since this must be called before the
234	 * recursive protection is performed.
235	 */
236	local_irq_save(flags);
237	cpu = raw_smp_processor_id();
238	data = per_cpu_ptr(tr->array_buffer.data, cpu);
239	disabled = atomic_inc_return(&data->disabled);
240
241	if (likely(disabled == 1)) {
242		trace_ctx = tracing_gen_ctx_flags(flags);
243		trace_function(tr, ip, parent_ip, trace_ctx);
244		__trace_stack(tr, trace_ctx, STACK_SKIP);
245	}
246
247	atomic_dec(&data->disabled);
248	local_irq_restore(flags);
249}
250
251static inline bool is_repeat_check(struct trace_array *tr,
252				   struct trace_func_repeats *last_info,
253				   unsigned long ip, unsigned long parent_ip)
254{
255	if (last_info->ip == ip &&
256	    last_info->parent_ip == parent_ip &&
257	    last_info->count < U16_MAX) {
258		last_info->ts_last_call =
259			ring_buffer_time_stamp(tr->array_buffer.buffer);
260		last_info->count++;
261		return true;
262	}
263
264	return false;
265}
266
267static inline void process_repeats(struct trace_array *tr,
268				   unsigned long ip, unsigned long parent_ip,
269				   struct trace_func_repeats *last_info,
270				   unsigned int trace_ctx)
271{
272	if (last_info->count) {
273		trace_last_func_repeats(tr, last_info, trace_ctx);
274		last_info->count = 0;
275	}
276
277	last_info->ip = ip;
278	last_info->parent_ip = parent_ip;
279}
280
281static void
282function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
283			       struct ftrace_ops *op,
284			       struct ftrace_regs *fregs)
285{
286	struct trace_func_repeats *last_info;
287	struct trace_array *tr = op->private;
288	struct trace_array_cpu *data;
289	unsigned int trace_ctx;
290	unsigned long flags;
291	int bit;
292	int cpu;
293
294	if (unlikely(!tr->function_enabled))
295		return;
296
297	bit = ftrace_test_recursion_trylock(ip, parent_ip);
298	if (bit < 0)
299		return;
300
301	preempt_disable_notrace();
302
303	cpu = smp_processor_id();
304	data = per_cpu_ptr(tr->array_buffer.data, cpu);
305	if (atomic_read(&data->disabled))
306		goto out;
307
308	/*
309	 * An interrupt may happen at any place here. But as far as I can see,
310	 * the only damage that this can cause is to mess up the repetition
311	 * counter without valuable data being lost.
312	 * TODO: think about a solution that is better than just hoping to be
313	 * lucky.
314	 */
315	last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
316	if (is_repeat_check(tr, last_info, ip, parent_ip))
317		goto out;
318
319	local_save_flags(flags);
320	trace_ctx = tracing_gen_ctx_flags(flags);
321	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
322
323	trace_function(tr, ip, parent_ip, trace_ctx);
324
325out:
326	ftrace_test_recursion_unlock(bit);
327	preempt_enable_notrace();
328}
329
330static void
331function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
332				     struct ftrace_ops *op,
333				     struct ftrace_regs *fregs)
334{
335	struct trace_func_repeats *last_info;
336	struct trace_array *tr = op->private;
337	struct trace_array_cpu *data;
338	unsigned long flags;
339	long disabled;
340	int cpu;
341	unsigned int trace_ctx;
342
343	if (unlikely(!tr->function_enabled))
344		return;
345
346	/*
347	 * Need to use raw, since this must be called before the
348	 * recursive protection is performed.
349	 */
350	local_irq_save(flags);
351	cpu = raw_smp_processor_id();
352	data = per_cpu_ptr(tr->array_buffer.data, cpu);
353	disabled = atomic_inc_return(&data->disabled);
354
355	if (likely(disabled == 1)) {
356		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
357		if (is_repeat_check(tr, last_info, ip, parent_ip))
358			goto out;
359
360		trace_ctx = tracing_gen_ctx_flags(flags);
361		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
362
363		trace_function(tr, ip, parent_ip, trace_ctx);
364		__trace_stack(tr, trace_ctx, STACK_SKIP);
365	}
366
367 out:
368	atomic_dec(&data->disabled);
369	local_irq_restore(flags);
370}
371
372static struct tracer_opt func_opts[] = {
373#ifdef CONFIG_STACKTRACE
374	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
375#endif
376	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
377	{ } /* Always set a last empty entry */
378};
379
380static struct tracer_flags func_flags = {
381	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
382	.opts = func_opts
383};
384
385static void tracing_start_function_trace(struct trace_array *tr)
386{
387	tr->function_enabled = 0;
388	register_ftrace_function(tr->ops);
389	tr->function_enabled = 1;
390}
391
392static void tracing_stop_function_trace(struct trace_array *tr)
393{
394	tr->function_enabled = 0;
395	unregister_ftrace_function(tr->ops);
396}
397
398static struct tracer function_trace;
399
400static int
401func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
402{
403	ftrace_func_t func;
404	u32 new_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
406	/* Do nothing if already set. */
407	if (!!set == !!(func_flags.val & bit))
408		return 0;
409
410	/* We can change this flag only when not running. */
411	if (tr->current_trace != &function_trace)
412		return 0;
413
414	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
415	func = select_trace_function(new_flags);
416	if (!func)
417		return -EINVAL;
418
419	/* Check if there's anything to change. */
420	if (tr->ops->func == func)
421		return 0;
422
423	if (!handle_func_repeats(tr, new_flags))
424		return -ENOMEM;
425
426	unregister_ftrace_function(tr->ops);
427	tr->ops->func = func;
428	register_ftrace_function(tr->ops);
429
430	return 0;
431}
432
433static struct tracer function_trace __tracer_data =
434{
435	.name		= "function",
436	.init		= function_trace_init,
437	.reset		= function_trace_reset,
438	.start		= function_trace_start,
439	.flags		= &func_flags,
440	.set_flag	= func_set_flag,
441	.allow_instances = true,
442#ifdef CONFIG_FTRACE_SELFTEST
443	.selftest	= trace_selftest_startup_function,
444#endif
445};
446
447#ifdef CONFIG_DYNAMIC_FTRACE
448static void update_traceon_count(struct ftrace_probe_ops *ops,
449				 unsigned long ip,
450				 struct trace_array *tr, bool on,
451				 void *data)
452{
453	struct ftrace_func_mapper *mapper = data;
454	long *count;
455	long old_count;
456
457	/*
458	 * Tracing gets disabled (or enabled) once per count.
459	 * This function can be called at the same time on multiple CPUs.
460	 * It is fine if both disable (or enable) tracing, as disabling
461	 * (or enabling) the second time doesn't do anything as the
462	 * state of the tracer is already disabled (or enabled).
463	 * What needs to be synchronized in this case is that the count
464	 * only gets decremented once, even if the tracer is disabled
465	 * (or enabled) twice, as the second one is really a nop.
466	 *
467	 * The memory barriers guarantee that we only decrement the
468	 * counter once. First the count is read to a local variable
469	 * and a read barrier is used to make sure that it is loaded
470	 * before checking if the tracer is in the state we want.
471	 * If the tracer is not in the state we want, then the count
472	 * is guaranteed to be the old count.
473	 *
474	 * Next the tracer is set to the state we want (disabled or enabled)
475	 * then a write memory barrier is used to make sure that
476	 * the new state is visible before changing the counter by
477	 * one minus the old counter. This guarantees that another CPU
478	 * executing this code will see the new state before seeing
479	 * the new counter value, and would not do anything if the new
480	 * counter is seen.
481	 *
482	 * Note, there is no synchronization between this and a user
483	 * setting the tracing_on file. But we currently don't care
484	 * about that.
485	 */
486	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
487	old_count = *count;
488
489	if (old_count <= 0)
490		return;
491
492	/* Make sure we see count before checking tracing state */
493	smp_rmb();
494
495	if (on == !!tracer_tracing_is_on(tr))
496		return;
497
498	if (on)
499		tracer_tracing_on(tr);
500	else
501		tracer_tracing_off(tr);
502
503	/* Make sure tracing state is visible before updating count */
504	smp_wmb();
505
506	*count = old_count - 1;
507}
508
509static void
510ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
511		     struct trace_array *tr, struct ftrace_probe_ops *ops,
512		     void *data)
513{
514	update_traceon_count(ops, ip, tr, 1, data);
515}
516
517static void
518ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
519		      struct trace_array *tr, struct ftrace_probe_ops *ops,
520		      void *data)
521{
522	update_traceon_count(ops, ip, tr, 0, data);
523}
524
525static void
526ftrace_traceon(unsigned long ip, unsigned long parent_ip,
527	       struct trace_array *tr, struct ftrace_probe_ops *ops,
528	       void *data)
529{
530	if (tracer_tracing_is_on(tr))
531		return;
532
533	tracer_tracing_on(tr);
534}
535
536static void
537ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
538		struct trace_array *tr, struct ftrace_probe_ops *ops,
539		void *data)
540{
541	if (!tracer_tracing_is_on(tr))
542		return;
543
544	tracer_tracing_off(tr);
545}
546
547#ifdef CONFIG_UNWINDER_ORC
548/*
549 * Skip 3:
550 *
551 *   function_trace_probe_call()
552 *   ftrace_ops_assist_func()
553 *   ftrace_call()
554 */
555#define FTRACE_STACK_SKIP 3
556#else
557/*
558 * Skip 5:
559 *
560 *   __trace_stack()
561 *   ftrace_stacktrace()
562 *   function_trace_probe_call()
563 *   ftrace_ops_assist_func()
564 *   ftrace_call()
565 */
566#define FTRACE_STACK_SKIP 5
567#endif
568
569static __always_inline void trace_stack(struct trace_array *tr)
570{
571	unsigned int trace_ctx;
 
572
573	trace_ctx = tracing_gen_ctx();
 
574
575	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
576}
577
578static void
579ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
580		  struct trace_array *tr, struct ftrace_probe_ops *ops,
581		  void *data)
582{
583	trace_stack(tr);
584}
585
586static void
587ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
588			struct trace_array *tr, struct ftrace_probe_ops *ops,
589			void *data)
590{
591	struct ftrace_func_mapper *mapper = data;
592	long *count;
593	long old_count;
594	long new_count;
595
596	if (!tracing_is_on())
597		return;
598
599	/* unlimited? */
600	if (!mapper) {
601		trace_stack(tr);
602		return;
603	}
604
605	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
606
607	/*
608	 * Stack traces should only execute the number of times the
609	 * user specified in the counter.
610	 */
611	do {
612		old_count = *count;
613
614		if (!old_count)
615			return;
616
617		new_count = old_count - 1;
618		new_count = cmpxchg(count, old_count, new_count);
619		if (new_count == old_count)
620			trace_stack(tr);
621
622		if (!tracing_is_on())
623			return;
624
625	} while (new_count != old_count);
626}
627
628static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
629			void *data)
630{
631	struct ftrace_func_mapper *mapper = data;
632	long *count = NULL;
633
634	if (mapper)
635		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
636
637	if (count) {
638		if (*count <= 0)
639			return 0;
640		(*count)--;
641	}
642
643	return 1;
644}
645
646static void
647ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
648		  struct trace_array *tr, struct ftrace_probe_ops *ops,
649		  void *data)
650{
651	if (update_count(ops, ip, data))
652		ftrace_dump(DUMP_ALL);
653}
654
655/* Only dump the current CPU buffer. */
656static void
657ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
658		     struct trace_array *tr, struct ftrace_probe_ops *ops,
659		     void *data)
660{
661	if (update_count(ops, ip, data))
662		ftrace_dump(DUMP_ORIG);
663}
664
665static int
666ftrace_probe_print(const char *name, struct seq_file *m,
667		   unsigned long ip, struct ftrace_probe_ops *ops,
668		   void *data)
669{
670	struct ftrace_func_mapper *mapper = data;
671	long *count = NULL;
672
673	seq_printf(m, "%ps:%s", (void *)ip, name);
674
675	if (mapper)
676		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
677
678	if (count)
679		seq_printf(m, ":count=%ld\n", *count);
680	else
681		seq_puts(m, ":unlimited\n");
682
683	return 0;
684}
685
686static int
687ftrace_traceon_print(struct seq_file *m, unsigned long ip,
688		     struct ftrace_probe_ops *ops,
689		     void *data)
690{
691	return ftrace_probe_print("traceon", m, ip, ops, data);
692}
693
694static int
695ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
696			 struct ftrace_probe_ops *ops, void *data)
697{
698	return ftrace_probe_print("traceoff", m, ip, ops, data);
699}
700
701static int
702ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
703			struct ftrace_probe_ops *ops, void *data)
704{
705	return ftrace_probe_print("stacktrace", m, ip, ops, data);
706}
707
708static int
709ftrace_dump_print(struct seq_file *m, unsigned long ip,
710			struct ftrace_probe_ops *ops, void *data)
711{
712	return ftrace_probe_print("dump", m, ip, ops, data);
713}
714
715static int
716ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
717			struct ftrace_probe_ops *ops, void *data)
718{
719	return ftrace_probe_print("cpudump", m, ip, ops, data);
720}
721
722
723static int
724ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
725		  unsigned long ip, void *init_data, void **data)
726{
727	struct ftrace_func_mapper *mapper = *data;
728
729	if (!mapper) {
730		mapper = allocate_ftrace_func_mapper();
731		if (!mapper)
732			return -ENOMEM;
733		*data = mapper;
734	}
735
736	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
737}
738
739static void
740ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
741		  unsigned long ip, void *data)
742{
743	struct ftrace_func_mapper *mapper = data;
744
745	if (!ip) {
746		free_ftrace_func_mapper(mapper, NULL);
747		return;
748	}
749
750	ftrace_func_mapper_remove_ip(mapper, ip);
751}
752
753static struct ftrace_probe_ops traceon_count_probe_ops = {
754	.func			= ftrace_traceon_count,
755	.print			= ftrace_traceon_print,
756	.init			= ftrace_count_init,
757	.free			= ftrace_count_free,
758};
759
760static struct ftrace_probe_ops traceoff_count_probe_ops = {
761	.func			= ftrace_traceoff_count,
762	.print			= ftrace_traceoff_print,
763	.init			= ftrace_count_init,
764	.free			= ftrace_count_free,
765};
766
767static struct ftrace_probe_ops stacktrace_count_probe_ops = {
768	.func			= ftrace_stacktrace_count,
769	.print			= ftrace_stacktrace_print,
770	.init			= ftrace_count_init,
771	.free			= ftrace_count_free,
772};
773
774static struct ftrace_probe_ops dump_probe_ops = {
775	.func			= ftrace_dump_probe,
776	.print			= ftrace_dump_print,
777	.init			= ftrace_count_init,
778	.free			= ftrace_count_free,
779};
780
781static struct ftrace_probe_ops cpudump_probe_ops = {
782	.func			= ftrace_cpudump_probe,
783	.print			= ftrace_cpudump_print,
784};
785
786static struct ftrace_probe_ops traceon_probe_ops = {
787	.func			= ftrace_traceon,
788	.print			= ftrace_traceon_print,
789};
790
791static struct ftrace_probe_ops traceoff_probe_ops = {
792	.func			= ftrace_traceoff,
793	.print			= ftrace_traceoff_print,
794};
795
796static struct ftrace_probe_ops stacktrace_probe_ops = {
797	.func			= ftrace_stacktrace,
798	.print			= ftrace_stacktrace_print,
799};
800
801static int
802ftrace_trace_probe_callback(struct trace_array *tr,
803			    struct ftrace_probe_ops *ops,
804			    struct ftrace_hash *hash, char *glob,
805			    char *cmd, char *param, int enable)
806{
807	void *count = (void *)-1;
808	char *number;
809	int ret;
810
811	/* hash funcs only work with set_ftrace_filter */
812	if (!enable)
813		return -EINVAL;
814
815	if (glob[0] == '!')
816		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
817
818	if (!param)
819		goto out_reg;
820
821	number = strsep(&param, ":");
822
823	if (!strlen(number))
824		goto out_reg;
825
826	/*
827	 * We use the callback data field (which is a pointer)
828	 * as our counter.
829	 */
830	ret = kstrtoul(number, 0, (unsigned long *)&count);
831	if (ret)
832		return ret;
833
834 out_reg:
835	ret = register_ftrace_function_probe(glob, tr, ops, count);
836
837	return ret < 0 ? ret : 0;
838}
839
840static int
841ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
842			    char *glob, char *cmd, char *param, int enable)
843{
844	struct ftrace_probe_ops *ops;
845
846	if (!tr)
847		return -ENODEV;
848
849	/* we register both traceon and traceoff to this callback */
850	if (strcmp(cmd, "traceon") == 0)
851		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
852	else
853		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
854
855	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
856					   param, enable);
857}
858
859static int
860ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
861			   char *glob, char *cmd, char *param, int enable)
862{
863	struct ftrace_probe_ops *ops;
864
865	if (!tr)
866		return -ENODEV;
867
868	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
869
870	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
871					   param, enable);
872}
873
874static int
875ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
876			   char *glob, char *cmd, char *param, int enable)
877{
878	struct ftrace_probe_ops *ops;
879
880	if (!tr)
881		return -ENODEV;
882
883	ops = &dump_probe_ops;
884
885	/* Only dump once. */
886	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
887					   "1", enable);
888}
889
890static int
891ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
892			   char *glob, char *cmd, char *param, int enable)
893{
894	struct ftrace_probe_ops *ops;
895
896	if (!tr)
897		return -ENODEV;
898
899	ops = &cpudump_probe_ops;
900
901	/* Only dump once. */
902	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
903					   "1", enable);
904}
905
906static struct ftrace_func_command ftrace_traceon_cmd = {
907	.name			= "traceon",
908	.func			= ftrace_trace_onoff_callback,
909};
910
911static struct ftrace_func_command ftrace_traceoff_cmd = {
912	.name			= "traceoff",
913	.func			= ftrace_trace_onoff_callback,
914};
915
916static struct ftrace_func_command ftrace_stacktrace_cmd = {
917	.name			= "stacktrace",
918	.func			= ftrace_stacktrace_callback,
919};
920
921static struct ftrace_func_command ftrace_dump_cmd = {
922	.name			= "dump",
923	.func			= ftrace_dump_callback,
924};
925
926static struct ftrace_func_command ftrace_cpudump_cmd = {
927	.name			= "cpudump",
928	.func			= ftrace_cpudump_callback,
929};
930
931static int __init init_func_cmd_traceon(void)
932{
933	int ret;
934
935	ret = register_ftrace_command(&ftrace_traceoff_cmd);
936	if (ret)
937		return ret;
938
939	ret = register_ftrace_command(&ftrace_traceon_cmd);
940	if (ret)
941		goto out_free_traceoff;
942
943	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
944	if (ret)
945		goto out_free_traceon;
946
947	ret = register_ftrace_command(&ftrace_dump_cmd);
948	if (ret)
949		goto out_free_stacktrace;
950
951	ret = register_ftrace_command(&ftrace_cpudump_cmd);
952	if (ret)
953		goto out_free_dump;
954
955	return 0;
956
957 out_free_dump:
958	unregister_ftrace_command(&ftrace_dump_cmd);
959 out_free_stacktrace:
960	unregister_ftrace_command(&ftrace_stacktrace_cmd);
961 out_free_traceon:
962	unregister_ftrace_command(&ftrace_traceon_cmd);
963 out_free_traceoff:
964	unregister_ftrace_command(&ftrace_traceoff_cmd);
965
966	return ret;
967}
968#else
969static inline int init_func_cmd_traceon(void)
970{
971	return 0;
972}
973#endif /* CONFIG_DYNAMIC_FTRACE */
974
975__init int init_function_trace(void)
976{
977	init_func_cmd_traceon();
978	return register_tracer(&function_trace);
979}