Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ring buffer based function tracer
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/ring_buffer.h>
 14#include <linux/debugfs.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/slab.h>
 18#include <linux/fs.h>
 19
 20#include "trace.h"
 21
 22static void tracing_start_function_trace(struct trace_array *tr);
 23static void tracing_stop_function_trace(struct trace_array *tr);
 24static void
 25function_trace_call(unsigned long ip, unsigned long parent_ip,
 26		    struct ftrace_ops *op, struct pt_regs *pt_regs);
 27static void
 28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 29			  struct ftrace_ops *op, struct pt_regs *pt_regs);
 30static struct tracer_flags func_flags;
 31
 32/* Our option */
 33enum {
 34	TRACE_FUNC_OPT_STACK	= 0x1,
 35};
 36
 37static int allocate_ftrace_ops(struct trace_array *tr)
 38{
 39	struct ftrace_ops *ops;
 40
 41	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 42	if (!ops)
 43		return -ENOMEM;
 44
 45	/* Currently only the non stack verision is supported */
 46	ops->func = function_trace_call;
 47	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
 48
 49	tr->ops = ops;
 50	ops->private = tr;
 51	return 0;
 52}
 53
 54
 55int ftrace_create_function_files(struct trace_array *tr,
 56				 struct dentry *parent)
 57{
 58	int ret;
 59
 60	/*
 61	 * The top level array uses the "global_ops", and the files are
 62	 * created on boot up.
 63	 */
 64	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 65		return 0;
 66
 67	ret = allocate_ftrace_ops(tr);
 68	if (ret)
 69		return ret;
 70
 71	ftrace_create_filter_files(tr->ops, parent);
 72
 73	return 0;
 74}
 75
 76void ftrace_destroy_function_files(struct trace_array *tr)
 77{
 78	ftrace_destroy_filter_files(tr->ops);
 79	kfree(tr->ops);
 80	tr->ops = NULL;
 81}
 82
 83static int function_trace_init(struct trace_array *tr)
 84{
 85	ftrace_func_t func;
 86
 87	/*
 88	 * Instance trace_arrays get their ops allocated
 89	 * at instance creation. Unless it failed
 90	 * the allocation.
 91	 */
 92	if (!tr->ops)
 93		return -ENOMEM;
 94
 95	/* Currently only the global instance can do stack tracing */
 96	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
 97	    func_flags.val & TRACE_FUNC_OPT_STACK)
 98		func = function_stack_trace_call;
 99	else
100		func = function_trace_call;
101
102	ftrace_init_array_ops(tr, func);
103
104	tr->trace_buffer.cpu = get_cpu();
105	put_cpu();
106
107	tracing_start_cmdline_record();
108	tracing_start_function_trace(tr);
109	return 0;
110}
111
112static void function_trace_reset(struct trace_array *tr)
113{
114	tracing_stop_function_trace(tr);
115	tracing_stop_cmdline_record();
116	ftrace_reset_array_ops(tr);
117}
118
119static void function_trace_start(struct trace_array *tr)
120{
121	tracing_reset_online_cpus(&tr->trace_buffer);
122}
123
124static void
125function_trace_call(unsigned long ip, unsigned long parent_ip,
126		    struct ftrace_ops *op, struct pt_regs *pt_regs)
127{
128	struct trace_array *tr = op->private;
129	struct trace_array_cpu *data;
130	unsigned long flags;
131	int bit;
132	int cpu;
133	int pc;
134
135	if (unlikely(!tr->function_enabled))
136		return;
137
138	pc = preempt_count();
139	preempt_disable_notrace();
 
 
 
 
140
141	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
142	if (bit < 0)
143		goto out;
144
145	cpu = smp_processor_id();
146	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
147	if (!atomic_read(&data->disabled)) {
148		local_save_flags(flags);
149		trace_function(tr, ip, parent_ip, flags, pc);
150	}
151	trace_clear_recursion(bit);
152
153 out:
154	preempt_enable_notrace();
155}
156
157#ifdef CONFIG_UNWINDER_ORC
158/*
159 * Skip 2:
160 *
161 *   function_stack_trace_call()
162 *   ftrace_call()
163 */
164#define STACK_SKIP 2
165#else
166/*
167 * Skip 3:
168 *   __trace_stack()
169 *   function_stack_trace_call()
170 *   ftrace_call()
171 */
172#define STACK_SKIP 3
173#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
174
175static void
176function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
177			  struct ftrace_ops *op, struct pt_regs *pt_regs)
178{
179	struct trace_array *tr = op->private;
180	struct trace_array_cpu *data;
181	unsigned long flags;
182	long disabled;
183	int cpu;
184	int pc;
185
186	if (unlikely(!tr->function_enabled))
187		return;
188
189	/*
190	 * Need to use raw, since this must be called before the
191	 * recursive protection is performed.
192	 */
193	local_irq_save(flags);
194	cpu = raw_smp_processor_id();
195	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
196	disabled = atomic_inc_return(&data->disabled);
197
198	if (likely(disabled == 1)) {
199		pc = preempt_count();
200		trace_function(tr, ip, parent_ip, flags, pc);
201		__trace_stack(tr, flags, STACK_SKIP, pc);
 
 
 
 
 
 
 
 
202	}
203
204	atomic_dec(&data->disabled);
205	local_irq_restore(flags);
206}
207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
208static struct tracer_opt func_opts[] = {
209#ifdef CONFIG_STACKTRACE
210	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
211#endif
212	{ } /* Always set a last empty entry */
213};
214
215static struct tracer_flags func_flags = {
216	.val = 0, /* By default: all flags disabled */
217	.opts = func_opts
218};
219
220static void tracing_start_function_trace(struct trace_array *tr)
221{
222	tr->function_enabled = 0;
223	register_ftrace_function(tr->ops);
224	tr->function_enabled = 1;
 
 
 
 
 
 
 
 
 
 
225}
226
227static void tracing_stop_function_trace(struct trace_array *tr)
228{
229	tr->function_enabled = 0;
230	unregister_ftrace_function(tr->ops);
231}
232
233static struct tracer function_trace;
 
 
 
 
234
235static int
236func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
237{
238	switch (bit) {
239	case TRACE_FUNC_OPT_STACK:
240		/* do nothing if already set */
241		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
242			break;
243
244		/* We can change this flag when not running. */
245		if (tr->current_trace != &function_trace)
246			break;
247
248		unregister_ftrace_function(tr->ops);
249
250		if (set) {
251			tr->ops->func = function_stack_trace_call;
252			register_ftrace_function(tr->ops);
253		} else {
254			tr->ops->func = function_trace_call;
255			register_ftrace_function(tr->ops);
256		}
257
258		break;
259	default:
260		return -EINVAL;
261	}
262
263	return 0;
264}
265
266static struct tracer function_trace __tracer_data =
267{
268	.name		= "function",
269	.init		= function_trace_init,
270	.reset		= function_trace_reset,
271	.start		= function_trace_start,
 
272	.flags		= &func_flags,
273	.set_flag	= func_set_flag,
274	.allow_instances = true,
275#ifdef CONFIG_FTRACE_SELFTEST
276	.selftest	= trace_selftest_startup_function,
277#endif
278};
279
280#ifdef CONFIG_DYNAMIC_FTRACE
281static void update_traceon_count(struct ftrace_probe_ops *ops,
282				 unsigned long ip,
283				 struct trace_array *tr, bool on,
284				 void *data)
285{
286	struct ftrace_func_mapper *mapper = data;
287	long *count;
288	long old_count;
289
290	/*
291	 * Tracing gets disabled (or enabled) once per count.
292	 * This function can be called at the same time on multiple CPUs.
293	 * It is fine if both disable (or enable) tracing, as disabling
294	 * (or enabling) the second time doesn't do anything as the
295	 * state of the tracer is already disabled (or enabled).
296	 * What needs to be synchronized in this case is that the count
297	 * only gets decremented once, even if the tracer is disabled
298	 * (or enabled) twice, as the second one is really a nop.
299	 *
300	 * The memory barriers guarantee that we only decrement the
301	 * counter once. First the count is read to a local variable
302	 * and a read barrier is used to make sure that it is loaded
303	 * before checking if the tracer is in the state we want.
304	 * If the tracer is not in the state we want, then the count
305	 * is guaranteed to be the old count.
306	 *
307	 * Next the tracer is set to the state we want (disabled or enabled)
308	 * then a write memory barrier is used to make sure that
309	 * the new state is visible before changing the counter by
310	 * one minus the old counter. This guarantees that another CPU
311	 * executing this code will see the new state before seeing
312	 * the new counter value, and would not do anything if the new
313	 * counter is seen.
314	 *
315	 * Note, there is no synchronization between this and a user
316	 * setting the tracing_on file. But we currently don't care
317	 * about that.
318	 */
319	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
320	old_count = *count;
321
322	if (old_count <= 0)
323		return;
324
325	/* Make sure we see count before checking tracing state */
326	smp_rmb();
327
328	if (on == !!tracer_tracing_is_on(tr))
329		return;
330
331	if (on)
332		tracer_tracing_on(tr);
333	else
334		tracer_tracing_off(tr);
335
336	/* Make sure tracing state is visible before updating count */
337	smp_wmb();
338
339	*count = old_count - 1;
340}
341
342static void
343ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
344		     struct trace_array *tr, struct ftrace_probe_ops *ops,
345		     void *data)
346{
347	update_traceon_count(ops, ip, tr, 1, data);
348}
349
350static void
351ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
352		      struct trace_array *tr, struct ftrace_probe_ops *ops,
353		      void *data)
354{
355	update_traceon_count(ops, ip, tr, 0, data);
356}
357
358static void
359ftrace_traceon(unsigned long ip, unsigned long parent_ip,
360	       struct trace_array *tr, struct ftrace_probe_ops *ops,
361	       void *data)
362{
363	if (tracer_tracing_is_on(tr))
364		return;
365
366	tracer_tracing_on(tr);
367}
368
369static void
370ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
371		struct trace_array *tr, struct ftrace_probe_ops *ops,
372		void *data)
373{
374	if (!tracer_tracing_is_on(tr))
375		return;
376
377	tracer_tracing_off(tr);
378}
379
380#ifdef CONFIG_UNWINDER_ORC
381/*
382 * Skip 3:
383 *
384 *   function_trace_probe_call()
385 *   ftrace_ops_assist_func()
386 *   ftrace_call()
387 */
388#define FTRACE_STACK_SKIP 3
389#else
390/*
391 * Skip 5:
392 *
393 *   __trace_stack()
394 *   ftrace_stacktrace()
395 *   function_trace_probe_call()
396 *   ftrace_ops_assist_func()
397 *   ftrace_call()
398 */
399#define FTRACE_STACK_SKIP 5
400#endif
401
402static __always_inline void trace_stack(struct trace_array *tr)
403{
404	unsigned long flags;
405	int pc;
406
407	local_save_flags(flags);
408	pc = preempt_count();
409
410	__trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
411}
412
413static void
414ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
415		  struct trace_array *tr, struct ftrace_probe_ops *ops,
416		  void *data)
417{
418	trace_stack(tr);
419}
420
421static void
422ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
423			struct trace_array *tr, struct ftrace_probe_ops *ops,
424			void *data)
425{
426	struct ftrace_func_mapper *mapper = data;
427	long *count;
428	long old_count;
429	long new_count;
430
431	if (!tracing_is_on())
432		return;
433
434	/* unlimited? */
435	if (!mapper) {
436		trace_stack(tr);
437		return;
438	}
439
440	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
441
442	/*
443	 * Stack traces should only execute the number of times the
444	 * user specified in the counter.
445	 */
446	do {
447		old_count = *count;
448
449		if (!old_count)
450			return;
451
452		new_count = old_count - 1;
453		new_count = cmpxchg(count, old_count, new_count);
454		if (new_count == old_count)
455			trace_stack(tr);
456
457		if (!tracing_is_on())
458			return;
459
460	} while (new_count != old_count);
461}
462
463static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
464			void *data)
465{
466	struct ftrace_func_mapper *mapper = data;
467	long *count = NULL;
468
469	if (mapper)
470		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
471
472	if (count) {
473		if (*count <= 0)
474			return 0;
475		(*count)--;
476	}
477
478	return 1;
479}
480
481static void
482ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
483		  struct trace_array *tr, struct ftrace_probe_ops *ops,
484		  void *data)
485{
486	if (update_count(ops, ip, data))
487		ftrace_dump(DUMP_ALL);
488}
489
490/* Only dump the current CPU buffer. */
491static void
492ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
493		     struct trace_array *tr, struct ftrace_probe_ops *ops,
494		     void *data)
495{
496	if (update_count(ops, ip, data))
497		ftrace_dump(DUMP_ORIG);
498}
499
500static int
501ftrace_probe_print(const char *name, struct seq_file *m,
502		   unsigned long ip, struct ftrace_probe_ops *ops,
503		   void *data)
504{
505	struct ftrace_func_mapper *mapper = data;
506	long *count = NULL;
507
508	seq_printf(m, "%ps:%s", (void *)ip, name);
509
510	if (mapper)
511		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 
 
512
513	if (count)
514		seq_printf(m, ":count=%ld\n", *count);
515	else
516		seq_puts(m, ":unlimited\n");
517
518	return 0;
519}
520
521static int
522ftrace_traceon_print(struct seq_file *m, unsigned long ip,
523		     struct ftrace_probe_ops *ops,
524		     void *data)
525{
526	return ftrace_probe_print("traceon", m, ip, ops, data);
527}
528
529static int
530ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
531			 struct ftrace_probe_ops *ops, void *data)
532{
533	return ftrace_probe_print("traceoff", m, ip, ops, data);
534}
535
536static int
537ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
538			struct ftrace_probe_ops *ops, void *data)
539{
540	return ftrace_probe_print("stacktrace", m, ip, ops, data);
541}
542
543static int
544ftrace_dump_print(struct seq_file *m, unsigned long ip,
545			struct ftrace_probe_ops *ops, void *data)
546{
547	return ftrace_probe_print("dump", m, ip, ops, data);
548}
549
550static int
551ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
552			struct ftrace_probe_ops *ops, void *data)
553{
554	return ftrace_probe_print("cpudump", m, ip, ops, data);
555}
556
557
558static int
559ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
560		  unsigned long ip, void *init_data, void **data)
561{
562	struct ftrace_func_mapper *mapper = *data;
563
564	if (!mapper) {
565		mapper = allocate_ftrace_func_mapper();
566		if (!mapper)
567			return -ENOMEM;
568		*data = mapper;
569	}
570
571	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
572}
573
574static void
575ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
576		  unsigned long ip, void *data)
577{
578	struct ftrace_func_mapper *mapper = data;
579
580	if (!ip) {
581		free_ftrace_func_mapper(mapper, NULL);
582		return;
583	}
584
585	ftrace_func_mapper_remove_ip(mapper, ip);
586}
587
588static struct ftrace_probe_ops traceon_count_probe_ops = {
589	.func			= ftrace_traceon_count,
590	.print			= ftrace_traceon_print,
591	.init			= ftrace_count_init,
592	.free			= ftrace_count_free,
593};
594
595static struct ftrace_probe_ops traceoff_count_probe_ops = {
596	.func			= ftrace_traceoff_count,
597	.print			= ftrace_traceoff_print,
598	.init			= ftrace_count_init,
599	.free			= ftrace_count_free,
600};
601
602static struct ftrace_probe_ops stacktrace_count_probe_ops = {
603	.func			= ftrace_stacktrace_count,
604	.print			= ftrace_stacktrace_print,
605	.init			= ftrace_count_init,
606	.free			= ftrace_count_free,
607};
608
609static struct ftrace_probe_ops dump_probe_ops = {
610	.func			= ftrace_dump_probe,
611	.print			= ftrace_dump_print,
612	.init			= ftrace_count_init,
613	.free			= ftrace_count_free,
614};
615
616static struct ftrace_probe_ops cpudump_probe_ops = {
617	.func			= ftrace_cpudump_probe,
618	.print			= ftrace_cpudump_print,
619};
620
621static struct ftrace_probe_ops traceon_probe_ops = {
622	.func			= ftrace_traceon,
623	.print			= ftrace_traceon_print,
624};
625
626static struct ftrace_probe_ops traceoff_probe_ops = {
627	.func			= ftrace_traceoff,
628	.print			= ftrace_traceoff_print,
629};
630
631static struct ftrace_probe_ops stacktrace_probe_ops = {
632	.func			= ftrace_stacktrace,
633	.print			= ftrace_stacktrace_print,
634};
635
636static int
637ftrace_trace_probe_callback(struct trace_array *tr,
638			    struct ftrace_probe_ops *ops,
639			    struct ftrace_hash *hash, char *glob,
640			    char *cmd, char *param, int enable)
641{
 
642	void *count = (void *)-1;
643	char *number;
644	int ret;
645
646	/* hash funcs only work with set_ftrace_filter */
647	if (!enable)
648		return -EINVAL;
649
650	if (glob[0] == '!')
651		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
 
 
 
 
 
 
652
653	if (!param)
654		goto out_reg;
655
656	number = strsep(&param, ":");
657
658	if (!strlen(number))
659		goto out_reg;
660
661	/*
662	 * We use the callback data field (which is a pointer)
663	 * as our counter.
664	 */
665	ret = kstrtoul(number, 0, (unsigned long *)&count);
666	if (ret)
667		return ret;
668
669 out_reg:
670	ret = register_ftrace_function_probe(glob, tr, ops, count);
671
672	return ret < 0 ? ret : 0;
673}
674
675static int
676ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
677			    char *glob, char *cmd, char *param, int enable)
678{
679	struct ftrace_probe_ops *ops;
680
681	if (!tr)
682		return -ENODEV;
683
684	/* we register both traceon and traceoff to this callback */
685	if (strcmp(cmd, "traceon") == 0)
686		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
687	else
688		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
689
690	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
691					   param, enable);
692}
693
694static int
695ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
696			   char *glob, char *cmd, char *param, int enable)
697{
698	struct ftrace_probe_ops *ops;
699
700	if (!tr)
701		return -ENODEV;
702
703	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
704
705	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
706					   param, enable);
707}
708
709static int
710ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
711			   char *glob, char *cmd, char *param, int enable)
712{
713	struct ftrace_probe_ops *ops;
714
715	if (!tr)
716		return -ENODEV;
717
718	ops = &dump_probe_ops;
719
720	/* Only dump once. */
721	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
722					   "1", enable);
723}
724
725static int
726ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
727			   char *glob, char *cmd, char *param, int enable)
728{
729	struct ftrace_probe_ops *ops;
730
731	if (!tr)
732		return -ENODEV;
733
734	ops = &cpudump_probe_ops;
735
736	/* Only dump once. */
737	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
738					   "1", enable);
739}
740
741static struct ftrace_func_command ftrace_traceon_cmd = {
742	.name			= "traceon",
743	.func			= ftrace_trace_onoff_callback,
744};
745
746static struct ftrace_func_command ftrace_traceoff_cmd = {
747	.name			= "traceoff",
748	.func			= ftrace_trace_onoff_callback,
749};
750
751static struct ftrace_func_command ftrace_stacktrace_cmd = {
752	.name			= "stacktrace",
753	.func			= ftrace_stacktrace_callback,
754};
755
756static struct ftrace_func_command ftrace_dump_cmd = {
757	.name			= "dump",
758	.func			= ftrace_dump_callback,
759};
760
761static struct ftrace_func_command ftrace_cpudump_cmd = {
762	.name			= "cpudump",
763	.func			= ftrace_cpudump_callback,
764};
765
766static int __init init_func_cmd_traceon(void)
767{
768	int ret;
769
770	ret = register_ftrace_command(&ftrace_traceoff_cmd);
771	if (ret)
772		return ret;
773
774	ret = register_ftrace_command(&ftrace_traceon_cmd);
775	if (ret)
776		goto out_free_traceoff;
777
778	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
779	if (ret)
780		goto out_free_traceon;
781
782	ret = register_ftrace_command(&ftrace_dump_cmd);
783	if (ret)
784		goto out_free_stacktrace;
785
786	ret = register_ftrace_command(&ftrace_cpudump_cmd);
787	if (ret)
788		goto out_free_dump;
789
790	return 0;
791
792 out_free_dump:
793	unregister_ftrace_command(&ftrace_dump_cmd);
794 out_free_stacktrace:
795	unregister_ftrace_command(&ftrace_stacktrace_cmd);
796 out_free_traceon:
797	unregister_ftrace_command(&ftrace_traceon_cmd);
798 out_free_traceoff:
799	unregister_ftrace_command(&ftrace_traceoff_cmd);
800
801	return ret;
802}
803#else
804static inline int init_func_cmd_traceon(void)
805{
806	return 0;
807}
808#endif /* CONFIG_DYNAMIC_FTRACE */
809
810__init int init_function_trace(void)
811{
812	init_func_cmd_traceon();
813	return register_tracer(&function_trace);
814}
v3.1
 
  1/*
  2 * ring buffer based function tracer
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6 *
  7 * Based on code from the latency_tracer, that is:
  8 *
  9 *  Copyright (C) 2004-2006 Ingo Molnar
 10 *  Copyright (C) 2004 William Lee Irwin III
 11 */
 12#include <linux/ring_buffer.h>
 13#include <linux/debugfs.h>
 14#include <linux/uaccess.h>
 15#include <linux/ftrace.h>
 
 16#include <linux/fs.h>
 17
 18#include "trace.h"
 19
 20/* function tracing enabled */
 21static int			ftrace_function_enabled;
 
 
 
 
 
 
 
 
 
 
 
 
 22
 23static struct trace_array	*func_trace;
 
 
 
 
 
 
 
 
 
 
 24
 25static void tracing_start_function_trace(void);
 26static void tracing_stop_function_trace(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27
 28static int function_trace_init(struct trace_array *tr)
 29{
 30	func_trace = tr;
 31	tr->cpu = get_cpu();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32	put_cpu();
 33
 34	tracing_start_cmdline_record();
 35	tracing_start_function_trace();
 36	return 0;
 37}
 38
 39static void function_trace_reset(struct trace_array *tr)
 40{
 41	tracing_stop_function_trace();
 42	tracing_stop_cmdline_record();
 
 43}
 44
 45static void function_trace_start(struct trace_array *tr)
 46{
 47	tracing_reset_online_cpus(tr);
 48}
 49
 50static void
 51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
 
 52{
 53	struct trace_array *tr = func_trace;
 54	struct trace_array_cpu *data;
 55	unsigned long flags;
 56	long disabled;
 57	int cpu;
 58	int pc;
 59
 60	if (unlikely(!ftrace_function_enabled))
 61		return;
 62
 63	pc = preempt_count();
 64	preempt_disable_notrace();
 65	local_save_flags(flags);
 66	cpu = raw_smp_processor_id();
 67	data = tr->data[cpu];
 68	disabled = atomic_inc_return(&data->disabled);
 69
 70	if (likely(disabled == 1))
 
 
 
 
 
 
 
 71		trace_function(tr, ip, parent_ip, flags, pc);
 
 
 72
 73	atomic_dec(&data->disabled);
 74	preempt_enable_notrace();
 75}
 76
 77static void
 78function_trace_call(unsigned long ip, unsigned long parent_ip)
 79{
 80	struct trace_array *tr = func_trace;
 81	struct trace_array_cpu *data;
 82	unsigned long flags;
 83	long disabled;
 84	int cpu;
 85	int pc;
 86
 87	if (unlikely(!ftrace_function_enabled))
 88		return;
 89
 90	/*
 91	 * Need to use raw, since this must be called before the
 92	 * recursive protection is performed.
 93	 */
 94	local_irq_save(flags);
 95	cpu = raw_smp_processor_id();
 96	data = tr->data[cpu];
 97	disabled = atomic_inc_return(&data->disabled);
 98
 99	if (likely(disabled == 1)) {
100		pc = preempt_count();
101		trace_function(tr, ip, parent_ip, flags, pc);
102	}
103
104	atomic_dec(&data->disabled);
105	local_irq_restore(flags);
106}
107
108static void
109function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
 
110{
111	struct trace_array *tr = func_trace;
112	struct trace_array_cpu *data;
113	unsigned long flags;
114	long disabled;
115	int cpu;
116	int pc;
117
118	if (unlikely(!ftrace_function_enabled))
119		return;
120
121	/*
122	 * Need to use raw, since this must be called before the
123	 * recursive protection is performed.
124	 */
125	local_irq_save(flags);
126	cpu = raw_smp_processor_id();
127	data = tr->data[cpu];
128	disabled = atomic_inc_return(&data->disabled);
129
130	if (likely(disabled == 1)) {
131		pc = preempt_count();
132		trace_function(tr, ip, parent_ip, flags, pc);
133		/*
134		 * skip over 5 funcs:
135		 *    __ftrace_trace_stack,
136		 *    __trace_stack,
137		 *    function_stack_trace_call
138		 *    ftrace_list_func
139		 *    ftrace_call
140		 */
141		__trace_stack(tr, flags, 5, pc);
142	}
143
144	atomic_dec(&data->disabled);
145	local_irq_restore(flags);
146}
147
148
149static struct ftrace_ops trace_ops __read_mostly =
150{
151	.func = function_trace_call,
152	.flags = FTRACE_OPS_FL_GLOBAL,
153};
154
155static struct ftrace_ops trace_stack_ops __read_mostly =
156{
157	.func = function_stack_trace_call,
158	.flags = FTRACE_OPS_FL_GLOBAL,
159};
160
161/* Our two options */
162enum {
163	TRACE_FUNC_OPT_STACK = 0x1,
164};
165
166static struct tracer_opt func_opts[] = {
167#ifdef CONFIG_STACKTRACE
168	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
169#endif
170	{ } /* Always set a last empty entry */
171};
172
173static struct tracer_flags func_flags = {
174	.val = 0, /* By default: all flags disabled */
175	.opts = func_opts
176};
177
178static void tracing_start_function_trace(void)
179{
180	ftrace_function_enabled = 0;
181
182	if (trace_flags & TRACE_ITER_PREEMPTONLY)
183		trace_ops.func = function_trace_call_preempt_only;
184	else
185		trace_ops.func = function_trace_call;
186
187	if (func_flags.val & TRACE_FUNC_OPT_STACK)
188		register_ftrace_function(&trace_stack_ops);
189	else
190		register_ftrace_function(&trace_ops);
191
192	ftrace_function_enabled = 1;
193}
194
195static void tracing_stop_function_trace(void)
196{
197	ftrace_function_enabled = 0;
 
 
198
199	if (func_flags.val & TRACE_FUNC_OPT_STACK)
200		unregister_ftrace_function(&trace_stack_ops);
201	else
202		unregister_ftrace_function(&trace_ops);
203}
204
205static int func_set_flag(u32 old_flags, u32 bit, int set)
 
206{
207	if (bit == TRACE_FUNC_OPT_STACK) {
 
208		/* do nothing if already set */
209		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
210			return 0;
 
 
 
 
 
 
211
212		if (set) {
213			unregister_ftrace_function(&trace_ops);
214			register_ftrace_function(&trace_stack_ops);
215		} else {
216			unregister_ftrace_function(&trace_stack_ops);
217			register_ftrace_function(&trace_ops);
218		}
219
220		return 0;
 
 
221	}
222
223	return -EINVAL;
224}
225
226static struct tracer function_trace __read_mostly =
227{
228	.name		= "function",
229	.init		= function_trace_init,
230	.reset		= function_trace_reset,
231	.start		= function_trace_start,
232	.wait_pipe	= poll_wait_pipe,
233	.flags		= &func_flags,
234	.set_flag	= func_set_flag,
 
235#ifdef CONFIG_FTRACE_SELFTEST
236	.selftest	= trace_selftest_startup_function,
237#endif
238};
239
240#ifdef CONFIG_DYNAMIC_FTRACE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241static void
242ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
 
 
243{
244	long *count = (long *)data;
 
 
 
 
 
 
 
 
 
245
246	if (tracing_is_on())
 
 
 
 
 
247		return;
248
249	if (!*count)
 
 
 
 
 
 
 
 
250		return;
251
252	if (*count != -1)
253		(*count)--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
255	tracing_on();
256}
257
258static void
259ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
 
 
260{
261	long *count = (long *)data;
 
 
 
 
 
 
 
 
 
 
 
262
263	if (!tracing_is_on())
264		return;
265
266	if (!*count)
 
 
267		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
269	if (*count != -1)
 
 
 
 
 
 
 
 
 
 
 
270		(*count)--;
 
271
272	tracing_off();
273}
274
275static int
276ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
277			 struct ftrace_probe_ops *ops, void *data);
 
 
 
 
 
278
279static struct ftrace_probe_ops traceon_probe_ops = {
280	.func			= ftrace_traceon,
281	.print			= ftrace_trace_onoff_print,
282};
283
284static struct ftrace_probe_ops traceoff_probe_ops = {
285	.func			= ftrace_traceoff,
286	.print			= ftrace_trace_onoff_print,
287};
288
289static int
290ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
291			 struct ftrace_probe_ops *ops, void *data)
 
292{
293	long count = (long)data;
 
294
295	seq_printf(m, "%ps:", (void *)ip);
296
297	if (ops == &traceon_probe_ops)
298		seq_printf(m, "traceon");
299	else
300		seq_printf(m, "traceoff");
301
302	if (count == -1)
303		seq_printf(m, ":unlimited\n");
304	else
305		seq_printf(m, ":count=%ld\n", count);
306
307	return 0;
308}
309
310static int
311ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
 
 
312{
313	struct ftrace_probe_ops *ops;
 
 
 
 
 
 
 
 
314
315	/* we register both traceon and traceoff to this callback */
316	if (strcmp(cmd, "traceon") == 0)
317		ops = &traceon_probe_ops;
318	else
319		ops = &traceoff_probe_ops;
 
320
321	unregister_ftrace_function_probe_func(glob, ops);
 
 
 
 
 
322
323	return 0;
 
 
 
 
324}
325
 
326static int
327ftrace_trace_onoff_callback(struct ftrace_hash *hash,
328			    char *glob, char *cmd, char *param, int enable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329{
330	struct ftrace_probe_ops *ops;
331	void *count = (void *)-1;
332	char *number;
333	int ret;
334
335	/* hash funcs only work with set_ftrace_filter */
336	if (!enable)
337		return -EINVAL;
338
339	if (glob[0] == '!')
340		return ftrace_trace_onoff_unreg(glob+1, cmd, param);
341
342	/* we register both traceon and traceoff to this callback */
343	if (strcmp(cmd, "traceon") == 0)
344		ops = &traceon_probe_ops;
345	else
346		ops = &traceoff_probe_ops;
347
348	if (!param)
349		goto out_reg;
350
351	number = strsep(&param, ":");
352
353	if (!strlen(number))
354		goto out_reg;
355
356	/*
357	 * We use the callback data field (which is a pointer)
358	 * as our counter.
359	 */
360	ret = strict_strtoul(number, 0, (unsigned long *)&count);
361	if (ret)
362		return ret;
363
364 out_reg:
365	ret = register_ftrace_function_probe(glob, ops, count);
366
367	return ret < 0 ? ret : 0;
368}
369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370static struct ftrace_func_command ftrace_traceon_cmd = {
371	.name			= "traceon",
372	.func			= ftrace_trace_onoff_callback,
373};
374
375static struct ftrace_func_command ftrace_traceoff_cmd = {
376	.name			= "traceoff",
377	.func			= ftrace_trace_onoff_callback,
378};
379
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380static int __init init_func_cmd_traceon(void)
381{
382	int ret;
383
384	ret = register_ftrace_command(&ftrace_traceoff_cmd);
385	if (ret)
386		return ret;
387
388	ret = register_ftrace_command(&ftrace_traceon_cmd);
389	if (ret)
390		unregister_ftrace_command(&ftrace_traceoff_cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391	return ret;
392}
393#else
394static inline int init_func_cmd_traceon(void)
395{
396	return 0;
397}
398#endif /* CONFIG_DYNAMIC_FTRACE */
399
400static __init int init_function_trace(void)
401{
402	init_func_cmd_traceon();
403	return register_tracer(&function_trace);
404}
405device_initcall(init_function_trace);
406