Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ring buffer based function tracer
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/ring_buffer.h>
 14#include <linux/debugfs.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/slab.h>
 18#include <linux/fs.h>
 19
 20#include "trace.h"
 21
 22static void tracing_start_function_trace(struct trace_array *tr);
 23static void tracing_stop_function_trace(struct trace_array *tr);
 24static void
 25function_trace_call(unsigned long ip, unsigned long parent_ip,
 26		    struct ftrace_ops *op, struct pt_regs *pt_regs);
 27static void
 28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 29			  struct ftrace_ops *op, struct pt_regs *pt_regs);
 30static struct tracer_flags func_flags;
 31
 32/* Our option */
 33enum {
 34	TRACE_FUNC_OPT_STACK	= 0x1,
 35};
 36
 37static int allocate_ftrace_ops(struct trace_array *tr)
 38{
 39	struct ftrace_ops *ops;
 40
 41	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 42	if (!ops)
 43		return -ENOMEM;
 44
 45	/* Currently only the non stack verision is supported */
 46	ops->func = function_trace_call;
 47	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
 48
 49	tr->ops = ops;
 50	ops->private = tr;
 51	return 0;
 52}
 53
 54
 55int ftrace_create_function_files(struct trace_array *tr,
 56				 struct dentry *parent)
 57{
 58	int ret;
 59
 60	/*
 61	 * The top level array uses the "global_ops", and the files are
 62	 * created on boot up.
 63	 */
 64	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 65		return 0;
 66
 67	ret = allocate_ftrace_ops(tr);
 68	if (ret)
 69		return ret;
 70
 71	ftrace_create_filter_files(tr->ops, parent);
 72
 73	return 0;
 74}
 75
 76void ftrace_destroy_function_files(struct trace_array *tr)
 77{
 78	ftrace_destroy_filter_files(tr->ops);
 79	kfree(tr->ops);
 80	tr->ops = NULL;
 81}
 82
 83static int function_trace_init(struct trace_array *tr)
 84{
 85	ftrace_func_t func;
 86
 87	/*
 88	 * Instance trace_arrays get their ops allocated
 89	 * at instance creation. Unless it failed
 90	 * the allocation.
 91	 */
 92	if (!tr->ops)
 93		return -ENOMEM;
 94
 95	/* Currently only the global instance can do stack tracing */
 96	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 97	    func_flags.val & TRACE_FUNC_OPT_STACK)
 98		func = function_stack_trace_call;
 99	else
100		func = function_trace_call;
101
102	ftrace_init_array_ops(tr, func);
103
104	tr->trace_buffer.cpu = get_cpu();
105	put_cpu();
106
107	tracing_start_cmdline_record();
108	tracing_start_function_trace(tr);
109	return 0;
110}
111
112static void function_trace_reset(struct trace_array *tr)
113{
114	tracing_stop_function_trace(tr);
115	tracing_stop_cmdline_record();
116	ftrace_reset_array_ops(tr);
117}
118
119static void function_trace_start(struct trace_array *tr)
120{
121	tracing_reset_online_cpus(&tr->trace_buffer);
122}
123
124static void
125function_trace_call(unsigned long ip, unsigned long parent_ip,
126		    struct ftrace_ops *op, struct pt_regs *pt_regs)
127{
128	struct trace_array *tr = op->private;
129	struct trace_array_cpu *data;
130	unsigned long flags;
131	int bit;
132	int cpu;
133	int pc;
134
135	if (unlikely(!tr->function_enabled))
136		return;
137
138	pc = preempt_count();
139	preempt_disable_notrace();
140
141	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
142	if (bit < 0)
143		goto out;
144
145	cpu = smp_processor_id();
146	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
147	if (!atomic_read(&data->disabled)) {
148		local_save_flags(flags);
149		trace_function(tr, ip, parent_ip, flags, pc);
150	}
151	trace_clear_recursion(bit);
152
153 out:
154	preempt_enable_notrace();
155}
156
157#ifdef CONFIG_UNWINDER_ORC
158/*
159 * Skip 2:
160 *
161 *   function_stack_trace_call()
162 *   ftrace_call()
163 */
164#define STACK_SKIP 2
165#else
166/*
167 * Skip 3:
168 *   __trace_stack()
169 *   function_stack_trace_call()
170 *   ftrace_call()
171 */
172#define STACK_SKIP 3
173#endif
174
175static void
176function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
177			  struct ftrace_ops *op, struct pt_regs *pt_regs)
178{
179	struct trace_array *tr = op->private;
180	struct trace_array_cpu *data;
181	unsigned long flags;
182	long disabled;
183	int cpu;
184	int pc;
185
186	if (unlikely(!tr->function_enabled))
187		return;
188
189	/*
190	 * Need to use raw, since this must be called before the
191	 * recursive protection is performed.
192	 */
193	local_irq_save(flags);
194	cpu = raw_smp_processor_id();
195	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
196	disabled = atomic_inc_return(&data->disabled);
197
198	if (likely(disabled == 1)) {
199		pc = preempt_count();
200		trace_function(tr, ip, parent_ip, flags, pc);
201		__trace_stack(tr, flags, STACK_SKIP, pc);
 
 
 
 
 
 
 
 
202	}
203
204	atomic_dec(&data->disabled);
205	local_irq_restore(flags);
206}
207
208static struct tracer_opt func_opts[] = {
209#ifdef CONFIG_STACKTRACE
210	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
211#endif
212	{ } /* Always set a last empty entry */
213};
214
215static struct tracer_flags func_flags = {
216	.val = 0, /* By default: all flags disabled */
217	.opts = func_opts
218};
219
220static void tracing_start_function_trace(struct trace_array *tr)
221{
222	tr->function_enabled = 0;
223	register_ftrace_function(tr->ops);
224	tr->function_enabled = 1;
225}
226
227static void tracing_stop_function_trace(struct trace_array *tr)
228{
229	tr->function_enabled = 0;
230	unregister_ftrace_function(tr->ops);
231}
232
233static struct tracer function_trace;
234
235static int
236func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
237{
238	switch (bit) {
239	case TRACE_FUNC_OPT_STACK:
240		/* do nothing if already set */
241		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
242			break;
243
244		/* We can change this flag when not running. */
245		if (tr->current_trace != &function_trace)
246			break;
247
248		unregister_ftrace_function(tr->ops);
249
250		if (set) {
251			tr->ops->func = function_stack_trace_call;
252			register_ftrace_function(tr->ops);
253		} else {
254			tr->ops->func = function_trace_call;
255			register_ftrace_function(tr->ops);
256		}
257
258		break;
259	default:
260		return -EINVAL;
261	}
262
263	return 0;
264}
265
266static struct tracer function_trace __tracer_data =
267{
268	.name		= "function",
269	.init		= function_trace_init,
270	.reset		= function_trace_reset,
271	.start		= function_trace_start,
272	.flags		= &func_flags,
273	.set_flag	= func_set_flag,
274	.allow_instances = true,
275#ifdef CONFIG_FTRACE_SELFTEST
276	.selftest	= trace_selftest_startup_function,
277#endif
278};
279
280#ifdef CONFIG_DYNAMIC_FTRACE
281static void update_traceon_count(struct ftrace_probe_ops *ops,
282				 unsigned long ip,
283				 struct trace_array *tr, bool on,
284				 void *data)
285{
286	struct ftrace_func_mapper *mapper = data;
287	long *count;
288	long old_count;
289
290	/*
291	 * Tracing gets disabled (or enabled) once per count.
292	 * This function can be called at the same time on multiple CPUs.
293	 * It is fine if both disable (or enable) tracing, as disabling
294	 * (or enabling) the second time doesn't do anything as the
295	 * state of the tracer is already disabled (or enabled).
296	 * What needs to be synchronized in this case is that the count
297	 * only gets decremented once, even if the tracer is disabled
298	 * (or enabled) twice, as the second one is really a nop.
299	 *
300	 * The memory barriers guarantee that we only decrement the
301	 * counter once. First the count is read to a local variable
302	 * and a read barrier is used to make sure that it is loaded
303	 * before checking if the tracer is in the state we want.
304	 * If the tracer is not in the state we want, then the count
305	 * is guaranteed to be the old count.
306	 *
307	 * Next the tracer is set to the state we want (disabled or enabled)
308	 * then a write memory barrier is used to make sure that
309	 * the new state is visible before changing the counter by
310	 * one minus the old counter. This guarantees that another CPU
311	 * executing this code will see the new state before seeing
312	 * the new counter value, and would not do anything if the new
313	 * counter is seen.
314	 *
315	 * Note, there is no synchronization between this and a user
316	 * setting the tracing_on file. But we currently don't care
317	 * about that.
318	 */
319	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
320	old_count = *count;
321
322	if (old_count <= 0)
323		return;
324
325	/* Make sure we see count before checking tracing state */
326	smp_rmb();
327
328	if (on == !!tracer_tracing_is_on(tr))
329		return;
330
331	if (on)
332		tracer_tracing_on(tr);
333	else
334		tracer_tracing_off(tr);
 
 
 
 
335
336	/* Make sure tracing state is visible before updating count */
337	smp_wmb();
338
339	*count = old_count - 1;
340}
341
342static void
343ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
344		     struct trace_array *tr, struct ftrace_probe_ops *ops,
345		     void *data)
346{
347	update_traceon_count(ops, ip, tr, 1, data);
348}
349
350static void
351ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
352		      struct trace_array *tr, struct ftrace_probe_ops *ops,
353		      void *data)
354{
355	update_traceon_count(ops, ip, tr, 0, data);
356}
357
358static void
359ftrace_traceon(unsigned long ip, unsigned long parent_ip,
360	       struct trace_array *tr, struct ftrace_probe_ops *ops,
361	       void *data)
362{
363	if (tracer_tracing_is_on(tr))
364		return;
365
366	tracer_tracing_on(tr);
367}
368
369static void
370ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
371		struct trace_array *tr, struct ftrace_probe_ops *ops,
372		void *data)
373{
374	if (!tracer_tracing_is_on(tr))
375		return;
376
377	tracer_tracing_off(tr);
378}
379
380#ifdef CONFIG_UNWINDER_ORC
381/*
382 * Skip 3:
383 *
384 *   function_trace_probe_call()
385 *   ftrace_ops_assist_func()
386 *   ftrace_call()
387 */
388#define FTRACE_STACK_SKIP 3
389#else
390/*
391 * Skip 5:
392 *
393 *   __trace_stack()
394 *   ftrace_stacktrace()
395 *   function_trace_probe_call()
396 *   ftrace_ops_assist_func()
397 *   ftrace_call()
398 */
399#define FTRACE_STACK_SKIP 5
400#endif
401
402static __always_inline void trace_stack(struct trace_array *tr)
403{
404	unsigned long flags;
405	int pc;
406
407	local_save_flags(flags);
408	pc = preempt_count();
409
410	__trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
411}
412
413static void
414ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
415		  struct trace_array *tr, struct ftrace_probe_ops *ops,
416		  void *data)
417{
418	trace_stack(tr);
419}
420
421static void
422ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
423			struct trace_array *tr, struct ftrace_probe_ops *ops,
424			void *data)
425{
426	struct ftrace_func_mapper *mapper = data;
427	long *count;
428	long old_count;
429	long new_count;
430
431	if (!tracing_is_on())
432		return;
433
434	/* unlimited? */
435	if (!mapper) {
436		trace_stack(tr);
437		return;
438	}
439
440	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
441
442	/*
443	 * Stack traces should only execute the number of times the
444	 * user specified in the counter.
445	 */
446	do {
 
 
 
 
447		old_count = *count;
448
449		if (!old_count)
450			return;
451
 
 
 
 
 
 
452		new_count = old_count - 1;
453		new_count = cmpxchg(count, old_count, new_count);
454		if (new_count == old_count)
455			trace_stack(tr);
456
457		if (!tracing_is_on())
458			return;
459
460	} while (new_count != old_count);
461}
462
463static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
464			void *data)
465{
466	struct ftrace_func_mapper *mapper = data;
467	long *count = NULL;
468
469	if (mapper)
470		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
471
472	if (count) {
473		if (*count <= 0)
474			return 0;
475		(*count)--;
476	}
477
478	return 1;
479}
480
481static void
482ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
483		  struct trace_array *tr, struct ftrace_probe_ops *ops,
484		  void *data)
485{
486	if (update_count(ops, ip, data))
487		ftrace_dump(DUMP_ALL);
488}
489
490/* Only dump the current CPU buffer. */
491static void
492ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
493		     struct trace_array *tr, struct ftrace_probe_ops *ops,
494		     void *data)
495{
496	if (update_count(ops, ip, data))
497		ftrace_dump(DUMP_ORIG);
498}
499
500static int
501ftrace_probe_print(const char *name, struct seq_file *m,
502		   unsigned long ip, struct ftrace_probe_ops *ops,
503		   void *data)
504{
505	struct ftrace_func_mapper *mapper = data;
506	long *count = NULL;
507
508	seq_printf(m, "%ps:%s", (void *)ip, name);
509
510	if (mapper)
511		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
512
513	if (count)
514		seq_printf(m, ":count=%ld\n", *count);
515	else
516		seq_puts(m, ":unlimited\n");
 
 
517
518	return 0;
519}
520
521static int
522ftrace_traceon_print(struct seq_file *m, unsigned long ip,
523		     struct ftrace_probe_ops *ops,
524		     void *data)
525{
526	return ftrace_probe_print("traceon", m, ip, ops, data);
527}
528
529static int
530ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
531			 struct ftrace_probe_ops *ops, void *data)
532{
533	return ftrace_probe_print("traceoff", m, ip, ops, data);
534}
535
536static int
537ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
538			struct ftrace_probe_ops *ops, void *data)
539{
540	return ftrace_probe_print("stacktrace", m, ip, ops, data);
541}
542
543static int
544ftrace_dump_print(struct seq_file *m, unsigned long ip,
545			struct ftrace_probe_ops *ops, void *data)
546{
547	return ftrace_probe_print("dump", m, ip, ops, data);
548}
549
550static int
551ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
552			struct ftrace_probe_ops *ops, void *data)
553{
554	return ftrace_probe_print("cpudump", m, ip, ops, data);
555}
556
557
558static int
559ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
560		  unsigned long ip, void *init_data, void **data)
561{
562	struct ftrace_func_mapper *mapper = *data;
563
564	if (!mapper) {
565		mapper = allocate_ftrace_func_mapper();
566		if (!mapper)
567			return -ENOMEM;
568		*data = mapper;
569	}
570
571	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
572}
573
574static void
575ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
576		  unsigned long ip, void *data)
577{
578	struct ftrace_func_mapper *mapper = data;
579
580	if (!ip) {
581		free_ftrace_func_mapper(mapper, NULL);
582		return;
583	}
584
585	ftrace_func_mapper_remove_ip(mapper, ip);
586}
587
588static struct ftrace_probe_ops traceon_count_probe_ops = {
589	.func			= ftrace_traceon_count,
590	.print			= ftrace_traceon_print,
591	.init			= ftrace_count_init,
592	.free			= ftrace_count_free,
593};
594
595static struct ftrace_probe_ops traceoff_count_probe_ops = {
596	.func			= ftrace_traceoff_count,
597	.print			= ftrace_traceoff_print,
598	.init			= ftrace_count_init,
599	.free			= ftrace_count_free,
600};
601
602static struct ftrace_probe_ops stacktrace_count_probe_ops = {
603	.func			= ftrace_stacktrace_count,
604	.print			= ftrace_stacktrace_print,
605	.init			= ftrace_count_init,
606	.free			= ftrace_count_free,
607};
608
609static struct ftrace_probe_ops dump_probe_ops = {
610	.func			= ftrace_dump_probe,
611	.print			= ftrace_dump_print,
612	.init			= ftrace_count_init,
613	.free			= ftrace_count_free,
614};
615
616static struct ftrace_probe_ops cpudump_probe_ops = {
617	.func			= ftrace_cpudump_probe,
618	.print			= ftrace_cpudump_print,
619};
620
621static struct ftrace_probe_ops traceon_probe_ops = {
622	.func			= ftrace_traceon,
623	.print			= ftrace_traceon_print,
624};
625
626static struct ftrace_probe_ops traceoff_probe_ops = {
627	.func			= ftrace_traceoff,
628	.print			= ftrace_traceoff_print,
629};
630
631static struct ftrace_probe_ops stacktrace_probe_ops = {
632	.func			= ftrace_stacktrace,
633	.print			= ftrace_stacktrace_print,
634};
635
636static int
637ftrace_trace_probe_callback(struct trace_array *tr,
638			    struct ftrace_probe_ops *ops,
639			    struct ftrace_hash *hash, char *glob,
640			    char *cmd, char *param, int enable)
641{
642	void *count = (void *)-1;
643	char *number;
644	int ret;
645
646	/* hash funcs only work with set_ftrace_filter */
647	if (!enable)
648		return -EINVAL;
649
650	if (glob[0] == '!')
651		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
 
 
652
653	if (!param)
654		goto out_reg;
655
656	number = strsep(&param, ":");
657
658	if (!strlen(number))
659		goto out_reg;
660
661	/*
662	 * We use the callback data field (which is a pointer)
663	 * as our counter.
664	 */
665	ret = kstrtoul(number, 0, (unsigned long *)&count);
666	if (ret)
667		return ret;
668
669 out_reg:
670	ret = register_ftrace_function_probe(glob, tr, ops, count);
671
672	return ret < 0 ? ret : 0;
673}
674
675static int
676ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
677			    char *glob, char *cmd, char *param, int enable)
678{
679	struct ftrace_probe_ops *ops;
680
681	if (!tr)
682		return -ENODEV;
683
684	/* we register both traceon and traceoff to this callback */
685	if (strcmp(cmd, "traceon") == 0)
686		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
687	else
688		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
689
690	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
691					   param, enable);
692}
693
694static int
695ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
696			   char *glob, char *cmd, char *param, int enable)
697{
698	struct ftrace_probe_ops *ops;
699
700	if (!tr)
701		return -ENODEV;
702
703	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
704
705	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
706					   param, enable);
707}
708
709static int
710ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
711			   char *glob, char *cmd, char *param, int enable)
712{
713	struct ftrace_probe_ops *ops;
714
715	if (!tr)
716		return -ENODEV;
717
718	ops = &dump_probe_ops;
719
720	/* Only dump once. */
721	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
722					   "1", enable);
723}
724
725static int
726ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
727			   char *glob, char *cmd, char *param, int enable)
728{
729	struct ftrace_probe_ops *ops;
730
731	if (!tr)
732		return -ENODEV;
733
734	ops = &cpudump_probe_ops;
735
736	/* Only dump once. */
737	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
738					   "1", enable);
739}
740
741static struct ftrace_func_command ftrace_traceon_cmd = {
742	.name			= "traceon",
743	.func			= ftrace_trace_onoff_callback,
744};
745
746static struct ftrace_func_command ftrace_traceoff_cmd = {
747	.name			= "traceoff",
748	.func			= ftrace_trace_onoff_callback,
749};
750
751static struct ftrace_func_command ftrace_stacktrace_cmd = {
752	.name			= "stacktrace",
753	.func			= ftrace_stacktrace_callback,
754};
755
756static struct ftrace_func_command ftrace_dump_cmd = {
757	.name			= "dump",
758	.func			= ftrace_dump_callback,
759};
760
761static struct ftrace_func_command ftrace_cpudump_cmd = {
762	.name			= "cpudump",
763	.func			= ftrace_cpudump_callback,
764};
765
766static int __init init_func_cmd_traceon(void)
767{
768	int ret;
769
770	ret = register_ftrace_command(&ftrace_traceoff_cmd);
771	if (ret)
772		return ret;
773
774	ret = register_ftrace_command(&ftrace_traceon_cmd);
775	if (ret)
776		goto out_free_traceoff;
777
778	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
779	if (ret)
780		goto out_free_traceon;
781
782	ret = register_ftrace_command(&ftrace_dump_cmd);
783	if (ret)
784		goto out_free_stacktrace;
785
786	ret = register_ftrace_command(&ftrace_cpudump_cmd);
787	if (ret)
788		goto out_free_dump;
789
790	return 0;
791
792 out_free_dump:
793	unregister_ftrace_command(&ftrace_dump_cmd);
794 out_free_stacktrace:
795	unregister_ftrace_command(&ftrace_stacktrace_cmd);
796 out_free_traceon:
797	unregister_ftrace_command(&ftrace_traceon_cmd);
798 out_free_traceoff:
799	unregister_ftrace_command(&ftrace_traceoff_cmd);
800
801	return ret;
802}
803#else
804static inline int init_func_cmd_traceon(void)
805{
806	return 0;
807}
808#endif /* CONFIG_DYNAMIC_FTRACE */
809
810__init int init_function_trace(void)
811{
812	init_func_cmd_traceon();
813	return register_tracer(&function_trace);
814}
v4.6
 
  1/*
  2 * ring buffer based function tracer
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6 *
  7 * Based on code from the latency_tracer, that is:
  8 *
  9 *  Copyright (C) 2004-2006 Ingo Molnar
 10 *  Copyright (C) 2004 Nadia Yvette Chambers
 11 */
 12#include <linux/ring_buffer.h>
 13#include <linux/debugfs.h>
 14#include <linux/uaccess.h>
 15#include <linux/ftrace.h>
 16#include <linux/slab.h>
 17#include <linux/fs.h>
 18
 19#include "trace.h"
 20
 21static void tracing_start_function_trace(struct trace_array *tr);
 22static void tracing_stop_function_trace(struct trace_array *tr);
 23static void
 24function_trace_call(unsigned long ip, unsigned long parent_ip,
 25		    struct ftrace_ops *op, struct pt_regs *pt_regs);
 26static void
 27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 28			  struct ftrace_ops *op, struct pt_regs *pt_regs);
 29static struct tracer_flags func_flags;
 30
 31/* Our option */
 32enum {
 33	TRACE_FUNC_OPT_STACK	= 0x1,
 34};
 35
 36static int allocate_ftrace_ops(struct trace_array *tr)
 37{
 38	struct ftrace_ops *ops;
 39
 40	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 41	if (!ops)
 42		return -ENOMEM;
 43
 44	/* Currently only the non stack verision is supported */
 45	ops->func = function_trace_call;
 46	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
 47
 48	tr->ops = ops;
 49	ops->private = tr;
 50	return 0;
 51}
 52
 53
 54int ftrace_create_function_files(struct trace_array *tr,
 55				 struct dentry *parent)
 56{
 57	int ret;
 58
 59	/*
 60	 * The top level array uses the "global_ops", and the files are
 61	 * created on boot up.
 62	 */
 63	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 64		return 0;
 65
 66	ret = allocate_ftrace_ops(tr);
 67	if (ret)
 68		return ret;
 69
 70	ftrace_create_filter_files(tr->ops, parent);
 71
 72	return 0;
 73}
 74
 75void ftrace_destroy_function_files(struct trace_array *tr)
 76{
 77	ftrace_destroy_filter_files(tr->ops);
 78	kfree(tr->ops);
 79	tr->ops = NULL;
 80}
 81
 82static int function_trace_init(struct trace_array *tr)
 83{
 84	ftrace_func_t func;
 85
 86	/*
 87	 * Instance trace_arrays get their ops allocated
 88	 * at instance creation. Unless it failed
 89	 * the allocation.
 90	 */
 91	if (!tr->ops)
 92		return -ENOMEM;
 93
 94	/* Currently only the global instance can do stack tracing */
 95	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 96	    func_flags.val & TRACE_FUNC_OPT_STACK)
 97		func = function_stack_trace_call;
 98	else
 99		func = function_trace_call;
100
101	ftrace_init_array_ops(tr, func);
102
103	tr->trace_buffer.cpu = get_cpu();
104	put_cpu();
105
106	tracing_start_cmdline_record();
107	tracing_start_function_trace(tr);
108	return 0;
109}
110
111static void function_trace_reset(struct trace_array *tr)
112{
113	tracing_stop_function_trace(tr);
114	tracing_stop_cmdline_record();
115	ftrace_reset_array_ops(tr);
116}
117
118static void function_trace_start(struct trace_array *tr)
119{
120	tracing_reset_online_cpus(&tr->trace_buffer);
121}
122
123static void
124function_trace_call(unsigned long ip, unsigned long parent_ip,
125		    struct ftrace_ops *op, struct pt_regs *pt_regs)
126{
127	struct trace_array *tr = op->private;
128	struct trace_array_cpu *data;
129	unsigned long flags;
130	int bit;
131	int cpu;
132	int pc;
133
134	if (unlikely(!tr->function_enabled))
135		return;
136
137	pc = preempt_count();
138	preempt_disable_notrace();
139
140	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
141	if (bit < 0)
142		goto out;
143
144	cpu = smp_processor_id();
145	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146	if (!atomic_read(&data->disabled)) {
147		local_save_flags(flags);
148		trace_function(tr, ip, parent_ip, flags, pc);
149	}
150	trace_clear_recursion(bit);
151
152 out:
153	preempt_enable_notrace();
154}
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156static void
157function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158			  struct ftrace_ops *op, struct pt_regs *pt_regs)
159{
160	struct trace_array *tr = op->private;
161	struct trace_array_cpu *data;
162	unsigned long flags;
163	long disabled;
164	int cpu;
165	int pc;
166
167	if (unlikely(!tr->function_enabled))
168		return;
169
170	/*
171	 * Need to use raw, since this must be called before the
172	 * recursive protection is performed.
173	 */
174	local_irq_save(flags);
175	cpu = raw_smp_processor_id();
176	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177	disabled = atomic_inc_return(&data->disabled);
178
179	if (likely(disabled == 1)) {
180		pc = preempt_count();
181		trace_function(tr, ip, parent_ip, flags, pc);
182		/*
183		 * skip over 5 funcs:
184		 *    __ftrace_trace_stack,
185		 *    __trace_stack,
186		 *    function_stack_trace_call
187		 *    ftrace_list_func
188		 *    ftrace_call
189		 */
190		__trace_stack(tr, flags, 5, pc);
191	}
192
193	atomic_dec(&data->disabled);
194	local_irq_restore(flags);
195}
196
197static struct tracer_opt func_opts[] = {
198#ifdef CONFIG_STACKTRACE
199	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
200#endif
201	{ } /* Always set a last empty entry */
202};
203
204static struct tracer_flags func_flags = {
205	.val = 0, /* By default: all flags disabled */
206	.opts = func_opts
207};
208
209static void tracing_start_function_trace(struct trace_array *tr)
210{
211	tr->function_enabled = 0;
212	register_ftrace_function(tr->ops);
213	tr->function_enabled = 1;
214}
215
216static void tracing_stop_function_trace(struct trace_array *tr)
217{
218	tr->function_enabled = 0;
219	unregister_ftrace_function(tr->ops);
220}
221
222static struct tracer function_trace;
223
224static int
225func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
226{
227	switch (bit) {
228	case TRACE_FUNC_OPT_STACK:
229		/* do nothing if already set */
230		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
231			break;
232
233		/* We can change this flag when not running. */
234		if (tr->current_trace != &function_trace)
235			break;
236
237		unregister_ftrace_function(tr->ops);
238
239		if (set) {
240			tr->ops->func = function_stack_trace_call;
241			register_ftrace_function(tr->ops);
242		} else {
243			tr->ops->func = function_trace_call;
244			register_ftrace_function(tr->ops);
245		}
246
247		break;
248	default:
249		return -EINVAL;
250	}
251
252	return 0;
253}
254
255static struct tracer function_trace __tracer_data =
256{
257	.name		= "function",
258	.init		= function_trace_init,
259	.reset		= function_trace_reset,
260	.start		= function_trace_start,
261	.flags		= &func_flags,
262	.set_flag	= func_set_flag,
263	.allow_instances = true,
264#ifdef CONFIG_FTRACE_SELFTEST
265	.selftest	= trace_selftest_startup_function,
266#endif
267};
268
269#ifdef CONFIG_DYNAMIC_FTRACE
270static void update_traceon_count(void **data, bool on)
 
 
 
271{
272	long *count = (long *)data;
273	long old_count = *count;
 
274
275	/*
276	 * Tracing gets disabled (or enabled) once per count.
277	 * This function can be called at the same time on multiple CPUs.
278	 * It is fine if both disable (or enable) tracing, as disabling
279	 * (or enabling) the second time doesn't do anything as the
280	 * state of the tracer is already disabled (or enabled).
281	 * What needs to be synchronized in this case is that the count
282	 * only gets decremented once, even if the tracer is disabled
283	 * (or enabled) twice, as the second one is really a nop.
284	 *
285	 * The memory barriers guarantee that we only decrement the
286	 * counter once. First the count is read to a local variable
287	 * and a read barrier is used to make sure that it is loaded
288	 * before checking if the tracer is in the state we want.
289	 * If the tracer is not in the state we want, then the count
290	 * is guaranteed to be the old count.
291	 *
292	 * Next the tracer is set to the state we want (disabled or enabled)
293	 * then a write memory barrier is used to make sure that
294	 * the new state is visible before changing the counter by
295	 * one minus the old counter. This guarantees that another CPU
296	 * executing this code will see the new state before seeing
297	 * the new counter value, and would not do anything if the new
298	 * counter is seen.
299	 *
300	 * Note, there is no synchronization between this and a user
301	 * setting the tracing_on file. But we currently don't care
302	 * about that.
303	 */
304	if (!old_count)
 
 
 
305		return;
306
307	/* Make sure we see count before checking tracing state */
308	smp_rmb();
309
310	if (on == !!tracing_is_on())
311		return;
312
313	if (on)
314		tracing_on();
315	else
316		tracing_off();
317
318	/* unlimited? */
319	if (old_count == -1)
320		return;
321
322	/* Make sure tracing state is visible before updating count */
323	smp_wmb();
324
325	*count = old_count - 1;
326}
327
328static void
329ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
330{
331	update_traceon_count(data, 1);
332}
333
334static void
335ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
336{
337	update_traceon_count(data, 0);
338}
339
340static void
341ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
 
 
342{
343	if (tracing_is_on())
344		return;
345
346	tracing_on();
347}
348
349static void
350ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
 
 
351{
352	if (!tracing_is_on())
353		return;
354
355	tracing_off();
356}
357
 
358/*
359 * Skip 4:
 
 
 
 
 
 
 
 
 
 
 
360 *   ftrace_stacktrace()
361 *   function_trace_probe_call()
362 *   ftrace_ops_list_func()
363 *   ftrace_call()
364 */
365#define STACK_SKIP 4
 
 
 
 
 
 
 
 
 
 
 
 
366
367static void
368ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
 
 
369{
370	trace_dump_stack(STACK_SKIP);
371}
372
373static void
374ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
 
 
375{
376	long *count = (long *)data;
 
377	long old_count;
378	long new_count;
379
 
 
 
 
 
 
 
 
 
 
 
380	/*
381	 * Stack traces should only execute the number of times the
382	 * user specified in the counter.
383	 */
384	do {
385
386		if (!tracing_is_on())
387			return;
388
389		old_count = *count;
390
391		if (!old_count)
392			return;
393
394		/* unlimited? */
395		if (old_count == -1) {
396			trace_dump_stack(STACK_SKIP);
397			return;
398		}
399
400		new_count = old_count - 1;
401		new_count = cmpxchg(count, old_count, new_count);
402		if (new_count == old_count)
403			trace_dump_stack(STACK_SKIP);
 
 
 
404
405	} while (new_count != old_count);
406}
407
408static int update_count(void **data)
 
409{
410	unsigned long *count = (long *)data;
 
411
412	if (!*count)
413		return 0;
414
415	if (*count != -1)
 
 
416		(*count)--;
 
417
418	return 1;
419}
420
421static void
422ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 
 
423{
424	if (update_count(data))
425		ftrace_dump(DUMP_ALL);
426}
427
428/* Only dump the current CPU buffer. */
429static void
430ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
 
 
431{
432	if (update_count(data))
433		ftrace_dump(DUMP_ORIG);
434}
435
436static int
437ftrace_probe_print(const char *name, struct seq_file *m,
438		   unsigned long ip, void *data)
 
439{
440	long count = (long)data;
 
441
442	seq_printf(m, "%ps:%s", (void *)ip, name);
443
444	if (count == -1)
 
 
 
 
 
445		seq_puts(m, ":unlimited\n");
446	else
447		seq_printf(m, ":count=%ld\n", count);
448
449	return 0;
450}
451
452static int
453ftrace_traceon_print(struct seq_file *m, unsigned long ip,
454			 struct ftrace_probe_ops *ops, void *data)
 
455{
456	return ftrace_probe_print("traceon", m, ip, data);
457}
458
459static int
460ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
461			 struct ftrace_probe_ops *ops, void *data)
462{
463	return ftrace_probe_print("traceoff", m, ip, data);
464}
465
466static int
467ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
468			struct ftrace_probe_ops *ops, void *data)
469{
470	return ftrace_probe_print("stacktrace", m, ip, data);
471}
472
473static int
474ftrace_dump_print(struct seq_file *m, unsigned long ip,
475			struct ftrace_probe_ops *ops, void *data)
476{
477	return ftrace_probe_print("dump", m, ip, data);
478}
479
480static int
481ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
482			struct ftrace_probe_ops *ops, void *data)
483{
484	return ftrace_probe_print("cpudump", m, ip, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485}
486
487static struct ftrace_probe_ops traceon_count_probe_ops = {
488	.func			= ftrace_traceon_count,
489	.print			= ftrace_traceon_print,
 
 
490};
491
492static struct ftrace_probe_ops traceoff_count_probe_ops = {
493	.func			= ftrace_traceoff_count,
494	.print			= ftrace_traceoff_print,
 
 
495};
496
497static struct ftrace_probe_ops stacktrace_count_probe_ops = {
498	.func			= ftrace_stacktrace_count,
499	.print			= ftrace_stacktrace_print,
 
 
500};
501
502static struct ftrace_probe_ops dump_probe_ops = {
503	.func			= ftrace_dump_probe,
504	.print			= ftrace_dump_print,
 
 
505};
506
507static struct ftrace_probe_ops cpudump_probe_ops = {
508	.func			= ftrace_cpudump_probe,
509	.print			= ftrace_cpudump_print,
510};
511
512static struct ftrace_probe_ops traceon_probe_ops = {
513	.func			= ftrace_traceon,
514	.print			= ftrace_traceon_print,
515};
516
517static struct ftrace_probe_ops traceoff_probe_ops = {
518	.func			= ftrace_traceoff,
519	.print			= ftrace_traceoff_print,
520};
521
522static struct ftrace_probe_ops stacktrace_probe_ops = {
523	.func			= ftrace_stacktrace,
524	.print			= ftrace_stacktrace_print,
525};
526
527static int
528ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
 
529			    struct ftrace_hash *hash, char *glob,
530			    char *cmd, char *param, int enable)
531{
532	void *count = (void *)-1;
533	char *number;
534	int ret;
535
536	/* hash funcs only work with set_ftrace_filter */
537	if (!enable)
538		return -EINVAL;
539
540	if (glob[0] == '!') {
541		unregister_ftrace_function_probe_func(glob+1, ops);
542		return 0;
543	}
544
545	if (!param)
546		goto out_reg;
547
548	number = strsep(&param, ":");
549
550	if (!strlen(number))
551		goto out_reg;
552
553	/*
554	 * We use the callback data field (which is a pointer)
555	 * as our counter.
556	 */
557	ret = kstrtoul(number, 0, (unsigned long *)&count);
558	if (ret)
559		return ret;
560
561 out_reg:
562	ret = register_ftrace_function_probe(glob, ops, count);
563
564	return ret < 0 ? ret : 0;
565}
566
567static int
568ftrace_trace_onoff_callback(struct ftrace_hash *hash,
569			    char *glob, char *cmd, char *param, int enable)
570{
571	struct ftrace_probe_ops *ops;
572
 
 
 
573	/* we register both traceon and traceoff to this callback */
574	if (strcmp(cmd, "traceon") == 0)
575		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
576	else
577		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
578
579	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
580					   param, enable);
581}
582
583static int
584ftrace_stacktrace_callback(struct ftrace_hash *hash,
585			   char *glob, char *cmd, char *param, int enable)
586{
587	struct ftrace_probe_ops *ops;
588
 
 
 
589	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
590
591	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
592					   param, enable);
593}
594
595static int
596ftrace_dump_callback(struct ftrace_hash *hash,
597			   char *glob, char *cmd, char *param, int enable)
598{
599	struct ftrace_probe_ops *ops;
600
 
 
 
601	ops = &dump_probe_ops;
602
603	/* Only dump once. */
604	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
605					   "1", enable);
606}
607
608static int
609ftrace_cpudump_callback(struct ftrace_hash *hash,
610			   char *glob, char *cmd, char *param, int enable)
611{
612	struct ftrace_probe_ops *ops;
613
 
 
 
614	ops = &cpudump_probe_ops;
615
616	/* Only dump once. */
617	return ftrace_trace_probe_callback(ops, hash, glob, cmd,
618					   "1", enable);
619}
620
621static struct ftrace_func_command ftrace_traceon_cmd = {
622	.name			= "traceon",
623	.func			= ftrace_trace_onoff_callback,
624};
625
626static struct ftrace_func_command ftrace_traceoff_cmd = {
627	.name			= "traceoff",
628	.func			= ftrace_trace_onoff_callback,
629};
630
631static struct ftrace_func_command ftrace_stacktrace_cmd = {
632	.name			= "stacktrace",
633	.func			= ftrace_stacktrace_callback,
634};
635
636static struct ftrace_func_command ftrace_dump_cmd = {
637	.name			= "dump",
638	.func			= ftrace_dump_callback,
639};
640
641static struct ftrace_func_command ftrace_cpudump_cmd = {
642	.name			= "cpudump",
643	.func			= ftrace_cpudump_callback,
644};
645
646static int __init init_func_cmd_traceon(void)
647{
648	int ret;
649
650	ret = register_ftrace_command(&ftrace_traceoff_cmd);
651	if (ret)
652		return ret;
653
654	ret = register_ftrace_command(&ftrace_traceon_cmd);
655	if (ret)
656		goto out_free_traceoff;
657
658	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
659	if (ret)
660		goto out_free_traceon;
661
662	ret = register_ftrace_command(&ftrace_dump_cmd);
663	if (ret)
664		goto out_free_stacktrace;
665
666	ret = register_ftrace_command(&ftrace_cpudump_cmd);
667	if (ret)
668		goto out_free_dump;
669
670	return 0;
671
672 out_free_dump:
673	unregister_ftrace_command(&ftrace_dump_cmd);
674 out_free_stacktrace:
675	unregister_ftrace_command(&ftrace_stacktrace_cmd);
676 out_free_traceon:
677	unregister_ftrace_command(&ftrace_traceon_cmd);
678 out_free_traceoff:
679	unregister_ftrace_command(&ftrace_traceoff_cmd);
680
681	return ret;
682}
683#else
684static inline int init_func_cmd_traceon(void)
685{
686	return 0;
687}
688#endif /* CONFIG_DYNAMIC_FTRACE */
689
690static __init int init_function_trace(void)
691{
692	init_func_cmd_traceon();
693	return register_tracer(&function_trace);
694}
695core_initcall(init_function_trace);