Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ring buffer based function tracer
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/ring_buffer.h>
 14#include <linux/debugfs.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/slab.h>
 18#include <linux/fs.h>
 19
 20#include "trace.h"
 21
 22static void tracing_start_function_trace(struct trace_array *tr);
 23static void tracing_stop_function_trace(struct trace_array *tr);
 24static void
 25function_trace_call(unsigned long ip, unsigned long parent_ip,
 26		    struct ftrace_ops *op, struct ftrace_regs *fregs);
 27static void
 28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 29			  struct ftrace_ops *op, struct ftrace_regs *fregs);
 30static void
 31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 32			       struct ftrace_ops *op, struct ftrace_regs *fregs);
 33static void
 34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 35				     struct ftrace_ops *op,
 36				     struct ftrace_regs *fregs);
 37static struct tracer_flags func_flags;
 38
 39/* Our option */
 40enum {
 41
 42	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
 43	TRACE_FUNC_OPT_STACK		= 0x1,
 44	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
 45
 46	/* Update this to next highest bit. */
 47	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
 48};
 49
 50#define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
 51
 52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 53{
 54	struct ftrace_ops *ops;
 55
 56	/* The top level array uses the "global_ops" */
 57	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 58		return 0;
 59
 60	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 61	if (!ops)
 62		return -ENOMEM;
 63
 64	/* Currently only the non stack version is supported */
 65	ops->func = function_trace_call;
 66	ops->flags = FTRACE_OPS_FL_PID;
 67
 68	tr->ops = ops;
 69	ops->private = tr;
 70
 71	return 0;
 72}
 73
 74void ftrace_free_ftrace_ops(struct trace_array *tr)
 75{
 76	kfree(tr->ops);
 77	tr->ops = NULL;
 78}
 79
 80int ftrace_create_function_files(struct trace_array *tr,
 81				 struct dentry *parent)
 82{
 83	/*
 84	 * The top level array uses the "global_ops", and the files are
 85	 * created on boot up.
 86	 */
 87	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 88		return 0;
 89
 90	if (!tr->ops)
 91		return -EINVAL;
 92
 93	ftrace_create_filter_files(tr->ops, parent);
 94
 95	return 0;
 96}
 97
 98void ftrace_destroy_function_files(struct trace_array *tr)
 99{
100	ftrace_destroy_filter_files(tr->ops);
101	ftrace_free_ftrace_ops(tr);
102}
103
104static ftrace_func_t select_trace_function(u32 flags_val)
105{
106	switch (flags_val & TRACE_FUNC_OPT_MASK) {
107	case TRACE_FUNC_NO_OPTS:
108		return function_trace_call;
109	case TRACE_FUNC_OPT_STACK:
110		return function_stack_trace_call;
111	case TRACE_FUNC_OPT_NO_REPEATS:
112		return function_no_repeats_trace_call;
113	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114		return function_stack_no_repeats_trace_call;
115	default:
116		return NULL;
117	}
118}
119
120static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121{
122	if (!tr->last_func_repeats &&
123	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125		if (!tr->last_func_repeats)
126			return false;
127	}
128
129	return true;
130}
131
132static int function_trace_init(struct trace_array *tr)
133{
134	ftrace_func_t func;
135	/*
136	 * Instance trace_arrays get their ops allocated
137	 * at instance creation. Unless it failed
138	 * the allocation.
139	 */
140	if (!tr->ops)
141		return -ENOMEM;
142
143	func = select_trace_function(func_flags.val);
144	if (!func)
145		return -EINVAL;
146
147	if (!handle_func_repeats(tr, func_flags.val))
148		return -ENOMEM;
149
150	ftrace_init_array_ops(tr, func);
151
152	tr->array_buffer.cpu = raw_smp_processor_id();
153
154	tracing_start_cmdline_record();
155	tracing_start_function_trace(tr);
156	return 0;
157}
158
159static void function_trace_reset(struct trace_array *tr)
160{
161	tracing_stop_function_trace(tr);
162	tracing_stop_cmdline_record();
163	ftrace_reset_array_ops(tr);
164}
165
166static void function_trace_start(struct trace_array *tr)
167{
168	tracing_reset_online_cpus(&tr->array_buffer);
169}
170
171static void
172function_trace_call(unsigned long ip, unsigned long parent_ip,
173		    struct ftrace_ops *op, struct ftrace_regs *fregs)
174{
175	struct trace_array *tr = op->private;
176	struct trace_array_cpu *data;
177	unsigned int trace_ctx;
178	int bit;
179	int cpu;
180
181	if (unlikely(!tr->function_enabled))
182		return;
183
184	bit = ftrace_test_recursion_trylock(ip, parent_ip);
185	if (bit < 0)
186		return;
187
188	trace_ctx = tracing_gen_ctx();
 
189
190	cpu = smp_processor_id();
191	data = per_cpu_ptr(tr->array_buffer.data, cpu);
192	if (!atomic_read(&data->disabled))
193		trace_function(tr, ip, parent_ip, trace_ctx);
194
195	ftrace_test_recursion_unlock(bit);
 
196}
197
198#ifdef CONFIG_UNWINDER_ORC
199/*
200 * Skip 2:
201 *
202 *   function_stack_trace_call()
203 *   ftrace_call()
204 */
205#define STACK_SKIP 2
206#else
207/*
208 * Skip 3:
209 *   __trace_stack()
210 *   function_stack_trace_call()
211 *   ftrace_call()
212 */
213#define STACK_SKIP 3
214#endif
215
216static void
217function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
218			  struct ftrace_ops *op, struct ftrace_regs *fregs)
219{
220	struct trace_array *tr = op->private;
221	struct trace_array_cpu *data;
222	unsigned long flags;
223	long disabled;
224	int cpu;
225	unsigned int trace_ctx;
226
227	if (unlikely(!tr->function_enabled))
228		return;
229
230	/*
231	 * Need to use raw, since this must be called before the
232	 * recursive protection is performed.
233	 */
234	local_irq_save(flags);
235	cpu = raw_smp_processor_id();
236	data = per_cpu_ptr(tr->array_buffer.data, cpu);
237	disabled = atomic_inc_return(&data->disabled);
238
239	if (likely(disabled == 1)) {
240		trace_ctx = tracing_gen_ctx_flags(flags);
241		trace_function(tr, ip, parent_ip, trace_ctx);
242		__trace_stack(tr, trace_ctx, STACK_SKIP);
243	}
244
245	atomic_dec(&data->disabled);
246	local_irq_restore(flags);
247}
248
249static inline bool is_repeat_check(struct trace_array *tr,
250				   struct trace_func_repeats *last_info,
251				   unsigned long ip, unsigned long parent_ip)
252{
253	if (last_info->ip == ip &&
254	    last_info->parent_ip == parent_ip &&
255	    last_info->count < U16_MAX) {
256		last_info->ts_last_call =
257			ring_buffer_time_stamp(tr->array_buffer.buffer);
258		last_info->count++;
259		return true;
260	}
261
262	return false;
263}
264
265static inline void process_repeats(struct trace_array *tr,
266				   unsigned long ip, unsigned long parent_ip,
267				   struct trace_func_repeats *last_info,
268				   unsigned int trace_ctx)
269{
270	if (last_info->count) {
271		trace_last_func_repeats(tr, last_info, trace_ctx);
272		last_info->count = 0;
273	}
274
275	last_info->ip = ip;
276	last_info->parent_ip = parent_ip;
277}
278
279static void
280function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
281			       struct ftrace_ops *op,
282			       struct ftrace_regs *fregs)
283{
284	struct trace_func_repeats *last_info;
285	struct trace_array *tr = op->private;
286	struct trace_array_cpu *data;
287	unsigned int trace_ctx;
288	unsigned long flags;
289	int bit;
290	int cpu;
291
292	if (unlikely(!tr->function_enabled))
293		return;
294
295	bit = ftrace_test_recursion_trylock(ip, parent_ip);
296	if (bit < 0)
297		return;
298
 
 
299	cpu = smp_processor_id();
300	data = per_cpu_ptr(tr->array_buffer.data, cpu);
301	if (atomic_read(&data->disabled))
302		goto out;
303
304	/*
305	 * An interrupt may happen at any place here. But as far as I can see,
306	 * the only damage that this can cause is to mess up the repetition
307	 * counter without valuable data being lost.
308	 * TODO: think about a solution that is better than just hoping to be
309	 * lucky.
310	 */
311	last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
312	if (is_repeat_check(tr, last_info, ip, parent_ip))
313		goto out;
314
315	local_save_flags(flags);
316	trace_ctx = tracing_gen_ctx_flags(flags);
317	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
318
319	trace_function(tr, ip, parent_ip, trace_ctx);
320
321out:
322	ftrace_test_recursion_unlock(bit);
 
323}
324
325static void
326function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
327				     struct ftrace_ops *op,
328				     struct ftrace_regs *fregs)
329{
330	struct trace_func_repeats *last_info;
331	struct trace_array *tr = op->private;
332	struct trace_array_cpu *data;
333	unsigned long flags;
334	long disabled;
335	int cpu;
336	unsigned int trace_ctx;
337
338	if (unlikely(!tr->function_enabled))
339		return;
340
341	/*
342	 * Need to use raw, since this must be called before the
343	 * recursive protection is performed.
344	 */
345	local_irq_save(flags);
346	cpu = raw_smp_processor_id();
347	data = per_cpu_ptr(tr->array_buffer.data, cpu);
348	disabled = atomic_inc_return(&data->disabled);
349
350	if (likely(disabled == 1)) {
351		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
352		if (is_repeat_check(tr, last_info, ip, parent_ip))
353			goto out;
354
355		trace_ctx = tracing_gen_ctx_flags(flags);
356		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
357
358		trace_function(tr, ip, parent_ip, trace_ctx);
359		__trace_stack(tr, trace_ctx, STACK_SKIP);
360	}
361
362 out:
363	atomic_dec(&data->disabled);
364	local_irq_restore(flags);
365}
366
367static struct tracer_opt func_opts[] = {
368#ifdef CONFIG_STACKTRACE
369	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
370#endif
371	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
372	{ } /* Always set a last empty entry */
373};
374
375static struct tracer_flags func_flags = {
376	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
377	.opts = func_opts
378};
379
380static void tracing_start_function_trace(struct trace_array *tr)
381{
382	tr->function_enabled = 0;
383	register_ftrace_function(tr->ops);
384	tr->function_enabled = 1;
385}
386
387static void tracing_stop_function_trace(struct trace_array *tr)
388{
389	tr->function_enabled = 0;
390	unregister_ftrace_function(tr->ops);
391}
392
393static struct tracer function_trace;
394
395static int
396func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
397{
398	ftrace_func_t func;
399	u32 new_flags;
400
401	/* Do nothing if already set. */
402	if (!!set == !!(func_flags.val & bit))
403		return 0;
404
405	/* We can change this flag only when not running. */
406	if (tr->current_trace != &function_trace)
407		return 0;
408
409	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
410	func = select_trace_function(new_flags);
411	if (!func)
412		return -EINVAL;
413
414	/* Check if there's anything to change. */
415	if (tr->ops->func == func)
416		return 0;
417
418	if (!handle_func_repeats(tr, new_flags))
419		return -ENOMEM;
420
421	unregister_ftrace_function(tr->ops);
422	tr->ops->func = func;
423	register_ftrace_function(tr->ops);
424
425	return 0;
426}
427
428static struct tracer function_trace __tracer_data =
429{
430	.name		= "function",
431	.init		= function_trace_init,
432	.reset		= function_trace_reset,
433	.start		= function_trace_start,
434	.flags		= &func_flags,
435	.set_flag	= func_set_flag,
436	.allow_instances = true,
437#ifdef CONFIG_FTRACE_SELFTEST
438	.selftest	= trace_selftest_startup_function,
439#endif
440};
441
442#ifdef CONFIG_DYNAMIC_FTRACE
443static void update_traceon_count(struct ftrace_probe_ops *ops,
444				 unsigned long ip,
445				 struct trace_array *tr, bool on,
446				 void *data)
447{
448	struct ftrace_func_mapper *mapper = data;
449	long *count;
450	long old_count;
451
452	/*
453	 * Tracing gets disabled (or enabled) once per count.
454	 * This function can be called at the same time on multiple CPUs.
455	 * It is fine if both disable (or enable) tracing, as disabling
456	 * (or enabling) the second time doesn't do anything as the
457	 * state of the tracer is already disabled (or enabled).
458	 * What needs to be synchronized in this case is that the count
459	 * only gets decremented once, even if the tracer is disabled
460	 * (or enabled) twice, as the second one is really a nop.
461	 *
462	 * The memory barriers guarantee that we only decrement the
463	 * counter once. First the count is read to a local variable
464	 * and a read barrier is used to make sure that it is loaded
465	 * before checking if the tracer is in the state we want.
466	 * If the tracer is not in the state we want, then the count
467	 * is guaranteed to be the old count.
468	 *
469	 * Next the tracer is set to the state we want (disabled or enabled)
470	 * then a write memory barrier is used to make sure that
471	 * the new state is visible before changing the counter by
472	 * one minus the old counter. This guarantees that another CPU
473	 * executing this code will see the new state before seeing
474	 * the new counter value, and would not do anything if the new
475	 * counter is seen.
476	 *
477	 * Note, there is no synchronization between this and a user
478	 * setting the tracing_on file. But we currently don't care
479	 * about that.
480	 */
481	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
482	old_count = *count;
483
484	if (old_count <= 0)
485		return;
486
487	/* Make sure we see count before checking tracing state */
488	smp_rmb();
489
490	if (on == !!tracer_tracing_is_on(tr))
491		return;
492
493	if (on)
494		tracer_tracing_on(tr);
495	else
496		tracer_tracing_off(tr);
497
498	/* Make sure tracing state is visible before updating count */
499	smp_wmb();
500
501	*count = old_count - 1;
502}
503
504static void
505ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
506		     struct trace_array *tr, struct ftrace_probe_ops *ops,
507		     void *data)
508{
509	update_traceon_count(ops, ip, tr, 1, data);
510}
511
512static void
513ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
514		      struct trace_array *tr, struct ftrace_probe_ops *ops,
515		      void *data)
516{
517	update_traceon_count(ops, ip, tr, 0, data);
518}
519
520static void
521ftrace_traceon(unsigned long ip, unsigned long parent_ip,
522	       struct trace_array *tr, struct ftrace_probe_ops *ops,
523	       void *data)
524{
525	if (tracer_tracing_is_on(tr))
526		return;
527
528	tracer_tracing_on(tr);
529}
530
531static void
532ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
533		struct trace_array *tr, struct ftrace_probe_ops *ops,
534		void *data)
535{
536	if (!tracer_tracing_is_on(tr))
537		return;
538
539	tracer_tracing_off(tr);
540}
541
542#ifdef CONFIG_UNWINDER_ORC
543/*
544 * Skip 3:
545 *
546 *   function_trace_probe_call()
547 *   ftrace_ops_assist_func()
548 *   ftrace_call()
549 */
550#define FTRACE_STACK_SKIP 3
551#else
552/*
553 * Skip 5:
554 *
555 *   __trace_stack()
556 *   ftrace_stacktrace()
557 *   function_trace_probe_call()
558 *   ftrace_ops_assist_func()
559 *   ftrace_call()
560 */
561#define FTRACE_STACK_SKIP 5
562#endif
563
564static __always_inline void trace_stack(struct trace_array *tr)
565{
566	unsigned int trace_ctx;
567
568	trace_ctx = tracing_gen_ctx();
569
570	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
571}
572
573static void
574ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
575		  struct trace_array *tr, struct ftrace_probe_ops *ops,
576		  void *data)
577{
578	trace_stack(tr);
579}
580
581static void
582ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
583			struct trace_array *tr, struct ftrace_probe_ops *ops,
584			void *data)
585{
586	struct ftrace_func_mapper *mapper = data;
587	long *count;
588	long old_count;
589	long new_count;
590
591	if (!tracing_is_on())
592		return;
593
594	/* unlimited? */
595	if (!mapper) {
596		trace_stack(tr);
597		return;
598	}
599
600	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
601
602	/*
603	 * Stack traces should only execute the number of times the
604	 * user specified in the counter.
605	 */
606	do {
607		old_count = *count;
608
609		if (!old_count)
610			return;
611
612		new_count = old_count - 1;
613		new_count = cmpxchg(count, old_count, new_count);
614		if (new_count == old_count)
615			trace_stack(tr);
616
617		if (!tracing_is_on())
618			return;
619
620	} while (new_count != old_count);
621}
622
623static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
624			void *data)
625{
626	struct ftrace_func_mapper *mapper = data;
627	long *count = NULL;
628
629	if (mapper)
630		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
631
632	if (count) {
633		if (*count <= 0)
634			return 0;
635		(*count)--;
636	}
637
638	return 1;
639}
640
641static void
642ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
643		  struct trace_array *tr, struct ftrace_probe_ops *ops,
644		  void *data)
645{
646	if (update_count(ops, ip, data))
647		ftrace_dump(DUMP_ALL);
648}
649
650/* Only dump the current CPU buffer. */
651static void
652ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
653		     struct trace_array *tr, struct ftrace_probe_ops *ops,
654		     void *data)
655{
656	if (update_count(ops, ip, data))
657		ftrace_dump(DUMP_ORIG);
658}
659
660static int
661ftrace_probe_print(const char *name, struct seq_file *m,
662		   unsigned long ip, struct ftrace_probe_ops *ops,
663		   void *data)
664{
665	struct ftrace_func_mapper *mapper = data;
666	long *count = NULL;
667
668	seq_printf(m, "%ps:%s", (void *)ip, name);
669
670	if (mapper)
671		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
672
673	if (count)
674		seq_printf(m, ":count=%ld\n", *count);
675	else
676		seq_puts(m, ":unlimited\n");
677
678	return 0;
679}
680
681static int
682ftrace_traceon_print(struct seq_file *m, unsigned long ip,
683		     struct ftrace_probe_ops *ops,
684		     void *data)
685{
686	return ftrace_probe_print("traceon", m, ip, ops, data);
687}
688
689static int
690ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
691			 struct ftrace_probe_ops *ops, void *data)
692{
693	return ftrace_probe_print("traceoff", m, ip, ops, data);
694}
695
696static int
697ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
698			struct ftrace_probe_ops *ops, void *data)
699{
700	return ftrace_probe_print("stacktrace", m, ip, ops, data);
701}
702
703static int
704ftrace_dump_print(struct seq_file *m, unsigned long ip,
705			struct ftrace_probe_ops *ops, void *data)
706{
707	return ftrace_probe_print("dump", m, ip, ops, data);
708}
709
710static int
711ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
712			struct ftrace_probe_ops *ops, void *data)
713{
714	return ftrace_probe_print("cpudump", m, ip, ops, data);
715}
716
717
718static int
719ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
720		  unsigned long ip, void *init_data, void **data)
721{
722	struct ftrace_func_mapper *mapper = *data;
723
724	if (!mapper) {
725		mapper = allocate_ftrace_func_mapper();
726		if (!mapper)
727			return -ENOMEM;
728		*data = mapper;
729	}
730
731	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
732}
733
734static void
735ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
736		  unsigned long ip, void *data)
737{
738	struct ftrace_func_mapper *mapper = data;
739
740	if (!ip) {
741		free_ftrace_func_mapper(mapper, NULL);
742		return;
743	}
744
745	ftrace_func_mapper_remove_ip(mapper, ip);
746}
747
748static struct ftrace_probe_ops traceon_count_probe_ops = {
749	.func			= ftrace_traceon_count,
750	.print			= ftrace_traceon_print,
751	.init			= ftrace_count_init,
752	.free			= ftrace_count_free,
753};
754
755static struct ftrace_probe_ops traceoff_count_probe_ops = {
756	.func			= ftrace_traceoff_count,
757	.print			= ftrace_traceoff_print,
758	.init			= ftrace_count_init,
759	.free			= ftrace_count_free,
760};
761
762static struct ftrace_probe_ops stacktrace_count_probe_ops = {
763	.func			= ftrace_stacktrace_count,
764	.print			= ftrace_stacktrace_print,
765	.init			= ftrace_count_init,
766	.free			= ftrace_count_free,
767};
768
769static struct ftrace_probe_ops dump_probe_ops = {
770	.func			= ftrace_dump_probe,
771	.print			= ftrace_dump_print,
772	.init			= ftrace_count_init,
773	.free			= ftrace_count_free,
774};
775
776static struct ftrace_probe_ops cpudump_probe_ops = {
777	.func			= ftrace_cpudump_probe,
778	.print			= ftrace_cpudump_print,
779};
780
781static struct ftrace_probe_ops traceon_probe_ops = {
782	.func			= ftrace_traceon,
783	.print			= ftrace_traceon_print,
784};
785
786static struct ftrace_probe_ops traceoff_probe_ops = {
787	.func			= ftrace_traceoff,
788	.print			= ftrace_traceoff_print,
789};
790
791static struct ftrace_probe_ops stacktrace_probe_ops = {
792	.func			= ftrace_stacktrace,
793	.print			= ftrace_stacktrace_print,
794};
795
796static int
797ftrace_trace_probe_callback(struct trace_array *tr,
798			    struct ftrace_probe_ops *ops,
799			    struct ftrace_hash *hash, char *glob,
800			    char *cmd, char *param, int enable)
801{
802	void *count = (void *)-1;
803	char *number;
804	int ret;
805
806	/* hash funcs only work with set_ftrace_filter */
807	if (!enable)
808		return -EINVAL;
809
810	if (glob[0] == '!')
811		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
812
813	if (!param)
814		goto out_reg;
815
816	number = strsep(&param, ":");
817
818	if (!strlen(number))
819		goto out_reg;
820
821	/*
822	 * We use the callback data field (which is a pointer)
823	 * as our counter.
824	 */
825	ret = kstrtoul(number, 0, (unsigned long *)&count);
826	if (ret)
827		return ret;
828
829 out_reg:
830	ret = register_ftrace_function_probe(glob, tr, ops, count);
831
832	return ret < 0 ? ret : 0;
833}
834
835static int
836ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
837			    char *glob, char *cmd, char *param, int enable)
838{
839	struct ftrace_probe_ops *ops;
840
841	if (!tr)
842		return -ENODEV;
843
844	/* we register both traceon and traceoff to this callback */
845	if (strcmp(cmd, "traceon") == 0)
846		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
847	else
848		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
849
850	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
851					   param, enable);
852}
853
854static int
855ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
856			   char *glob, char *cmd, char *param, int enable)
857{
858	struct ftrace_probe_ops *ops;
859
860	if (!tr)
861		return -ENODEV;
862
863	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
864
865	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
866					   param, enable);
867}
868
869static int
870ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
871			   char *glob, char *cmd, char *param, int enable)
872{
873	struct ftrace_probe_ops *ops;
874
875	if (!tr)
876		return -ENODEV;
877
878	ops = &dump_probe_ops;
879
880	/* Only dump once. */
881	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
882					   "1", enable);
883}
884
885static int
886ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
887			   char *glob, char *cmd, char *param, int enable)
888{
889	struct ftrace_probe_ops *ops;
890
891	if (!tr)
892		return -ENODEV;
893
894	ops = &cpudump_probe_ops;
895
896	/* Only dump once. */
897	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
898					   "1", enable);
899}
900
901static struct ftrace_func_command ftrace_traceon_cmd = {
902	.name			= "traceon",
903	.func			= ftrace_trace_onoff_callback,
904};
905
906static struct ftrace_func_command ftrace_traceoff_cmd = {
907	.name			= "traceoff",
908	.func			= ftrace_trace_onoff_callback,
909};
910
911static struct ftrace_func_command ftrace_stacktrace_cmd = {
912	.name			= "stacktrace",
913	.func			= ftrace_stacktrace_callback,
914};
915
916static struct ftrace_func_command ftrace_dump_cmd = {
917	.name			= "dump",
918	.func			= ftrace_dump_callback,
919};
920
921static struct ftrace_func_command ftrace_cpudump_cmd = {
922	.name			= "cpudump",
923	.func			= ftrace_cpudump_callback,
924};
925
926static int __init init_func_cmd_traceon(void)
927{
928	int ret;
929
930	ret = register_ftrace_command(&ftrace_traceoff_cmd);
931	if (ret)
932		return ret;
933
934	ret = register_ftrace_command(&ftrace_traceon_cmd);
935	if (ret)
936		goto out_free_traceoff;
937
938	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
939	if (ret)
940		goto out_free_traceon;
941
942	ret = register_ftrace_command(&ftrace_dump_cmd);
943	if (ret)
944		goto out_free_stacktrace;
945
946	ret = register_ftrace_command(&ftrace_cpudump_cmd);
947	if (ret)
948		goto out_free_dump;
949
950	return 0;
951
952 out_free_dump:
953	unregister_ftrace_command(&ftrace_dump_cmd);
954 out_free_stacktrace:
955	unregister_ftrace_command(&ftrace_stacktrace_cmd);
956 out_free_traceon:
957	unregister_ftrace_command(&ftrace_traceon_cmd);
958 out_free_traceoff:
959	unregister_ftrace_command(&ftrace_traceoff_cmd);
960
961	return ret;
962}
963#else
964static inline int init_func_cmd_traceon(void)
965{
966	return 0;
967}
968#endif /* CONFIG_DYNAMIC_FTRACE */
969
970__init int init_function_trace(void)
971{
972	init_func_cmd_traceon();
973	return register_tracer(&function_trace);
974}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ring buffer based function tracer
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/ring_buffer.h>
 14#include <linux/debugfs.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/slab.h>
 18#include <linux/fs.h>
 19
 20#include "trace.h"
 21
 22static void tracing_start_function_trace(struct trace_array *tr);
 23static void tracing_stop_function_trace(struct trace_array *tr);
 24static void
 25function_trace_call(unsigned long ip, unsigned long parent_ip,
 26		    struct ftrace_ops *op, struct ftrace_regs *fregs);
 27static void
 28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
 29			  struct ftrace_ops *op, struct ftrace_regs *fregs);
 30static void
 31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 32			       struct ftrace_ops *op, struct ftrace_regs *fregs);
 33static void
 34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
 35				     struct ftrace_ops *op,
 36				     struct ftrace_regs *fregs);
 37static struct tracer_flags func_flags;
 38
 39/* Our option */
 40enum {
 41
 42	TRACE_FUNC_NO_OPTS		= 0x0, /* No flags set. */
 43	TRACE_FUNC_OPT_STACK		= 0x1,
 44	TRACE_FUNC_OPT_NO_REPEATS	= 0x2,
 45
 46	/* Update this to next highest bit. */
 47	TRACE_FUNC_OPT_HIGHEST_BIT	= 0x4
 48};
 49
 50#define TRACE_FUNC_OPT_MASK	(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
 51
 52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 53{
 54	struct ftrace_ops *ops;
 55
 56	/* The top level array uses the "global_ops" */
 57	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 58		return 0;
 59
 60	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
 61	if (!ops)
 62		return -ENOMEM;
 63
 64	/* Currently only the non stack version is supported */
 65	ops->func = function_trace_call;
 66	ops->flags = FTRACE_OPS_FL_PID;
 67
 68	tr->ops = ops;
 69	ops->private = tr;
 70
 71	return 0;
 72}
 73
 74void ftrace_free_ftrace_ops(struct trace_array *tr)
 75{
 76	kfree(tr->ops);
 77	tr->ops = NULL;
 78}
 79
 80int ftrace_create_function_files(struct trace_array *tr,
 81				 struct dentry *parent)
 82{
 83	/*
 84	 * The top level array uses the "global_ops", and the files are
 85	 * created on boot up.
 86	 */
 87	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 88		return 0;
 89
 90	if (!tr->ops)
 91		return -EINVAL;
 92
 93	ftrace_create_filter_files(tr->ops, parent);
 94
 95	return 0;
 96}
 97
 98void ftrace_destroy_function_files(struct trace_array *tr)
 99{
100	ftrace_destroy_filter_files(tr->ops);
101	ftrace_free_ftrace_ops(tr);
102}
103
104static ftrace_func_t select_trace_function(u32 flags_val)
105{
106	switch (flags_val & TRACE_FUNC_OPT_MASK) {
107	case TRACE_FUNC_NO_OPTS:
108		return function_trace_call;
109	case TRACE_FUNC_OPT_STACK:
110		return function_stack_trace_call;
111	case TRACE_FUNC_OPT_NO_REPEATS:
112		return function_no_repeats_trace_call;
113	case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114		return function_stack_no_repeats_trace_call;
115	default:
116		return NULL;
117	}
118}
119
120static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121{
122	if (!tr->last_func_repeats &&
123	    (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124		tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125		if (!tr->last_func_repeats)
126			return false;
127	}
128
129	return true;
130}
131
132static int function_trace_init(struct trace_array *tr)
133{
134	ftrace_func_t func;
135	/*
136	 * Instance trace_arrays get their ops allocated
137	 * at instance creation. Unless it failed
138	 * the allocation.
139	 */
140	if (!tr->ops)
141		return -ENOMEM;
142
143	func = select_trace_function(func_flags.val);
144	if (!func)
145		return -EINVAL;
146
147	if (!handle_func_repeats(tr, func_flags.val))
148		return -ENOMEM;
149
150	ftrace_init_array_ops(tr, func);
151
152	tr->array_buffer.cpu = raw_smp_processor_id();
153
154	tracing_start_cmdline_record();
155	tracing_start_function_trace(tr);
156	return 0;
157}
158
159static void function_trace_reset(struct trace_array *tr)
160{
161	tracing_stop_function_trace(tr);
162	tracing_stop_cmdline_record();
163	ftrace_reset_array_ops(tr);
164}
165
166static void function_trace_start(struct trace_array *tr)
167{
168	tracing_reset_online_cpus(&tr->array_buffer);
169}
170
171static void
172function_trace_call(unsigned long ip, unsigned long parent_ip,
173		    struct ftrace_ops *op, struct ftrace_regs *fregs)
174{
175	struct trace_array *tr = op->private;
176	struct trace_array_cpu *data;
177	unsigned int trace_ctx;
178	int bit;
179	int cpu;
180
181	if (unlikely(!tr->function_enabled))
182		return;
183
184	bit = ftrace_test_recursion_trylock(ip, parent_ip);
185	if (bit < 0)
186		return;
187
188	trace_ctx = tracing_gen_ctx();
189	preempt_disable_notrace();
190
191	cpu = smp_processor_id();
192	data = per_cpu_ptr(tr->array_buffer.data, cpu);
193	if (!atomic_read(&data->disabled))
194		trace_function(tr, ip, parent_ip, trace_ctx);
195
196	ftrace_test_recursion_unlock(bit);
197	preempt_enable_notrace();
198}
199
200#ifdef CONFIG_UNWINDER_ORC
201/*
202 * Skip 2:
203 *
204 *   function_stack_trace_call()
205 *   ftrace_call()
206 */
207#define STACK_SKIP 2
208#else
209/*
210 * Skip 3:
211 *   __trace_stack()
212 *   function_stack_trace_call()
213 *   ftrace_call()
214 */
215#define STACK_SKIP 3
216#endif
217
218static void
219function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
220			  struct ftrace_ops *op, struct ftrace_regs *fregs)
221{
222	struct trace_array *tr = op->private;
223	struct trace_array_cpu *data;
224	unsigned long flags;
225	long disabled;
226	int cpu;
227	unsigned int trace_ctx;
228
229	if (unlikely(!tr->function_enabled))
230		return;
231
232	/*
233	 * Need to use raw, since this must be called before the
234	 * recursive protection is performed.
235	 */
236	local_irq_save(flags);
237	cpu = raw_smp_processor_id();
238	data = per_cpu_ptr(tr->array_buffer.data, cpu);
239	disabled = atomic_inc_return(&data->disabled);
240
241	if (likely(disabled == 1)) {
242		trace_ctx = tracing_gen_ctx_flags(flags);
243		trace_function(tr, ip, parent_ip, trace_ctx);
244		__trace_stack(tr, trace_ctx, STACK_SKIP);
245	}
246
247	atomic_dec(&data->disabled);
248	local_irq_restore(flags);
249}
250
251static inline bool is_repeat_check(struct trace_array *tr,
252				   struct trace_func_repeats *last_info,
253				   unsigned long ip, unsigned long parent_ip)
254{
255	if (last_info->ip == ip &&
256	    last_info->parent_ip == parent_ip &&
257	    last_info->count < U16_MAX) {
258		last_info->ts_last_call =
259			ring_buffer_time_stamp(tr->array_buffer.buffer);
260		last_info->count++;
261		return true;
262	}
263
264	return false;
265}
266
267static inline void process_repeats(struct trace_array *tr,
268				   unsigned long ip, unsigned long parent_ip,
269				   struct trace_func_repeats *last_info,
270				   unsigned int trace_ctx)
271{
272	if (last_info->count) {
273		trace_last_func_repeats(tr, last_info, trace_ctx);
274		last_info->count = 0;
275	}
276
277	last_info->ip = ip;
278	last_info->parent_ip = parent_ip;
279}
280
281static void
282function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
283			       struct ftrace_ops *op,
284			       struct ftrace_regs *fregs)
285{
286	struct trace_func_repeats *last_info;
287	struct trace_array *tr = op->private;
288	struct trace_array_cpu *data;
289	unsigned int trace_ctx;
290	unsigned long flags;
291	int bit;
292	int cpu;
293
294	if (unlikely(!tr->function_enabled))
295		return;
296
297	bit = ftrace_test_recursion_trylock(ip, parent_ip);
298	if (bit < 0)
299		return;
300
301	preempt_disable_notrace();
302
303	cpu = smp_processor_id();
304	data = per_cpu_ptr(tr->array_buffer.data, cpu);
305	if (atomic_read(&data->disabled))
306		goto out;
307
308	/*
309	 * An interrupt may happen at any place here. But as far as I can see,
310	 * the only damage that this can cause is to mess up the repetition
311	 * counter without valuable data being lost.
312	 * TODO: think about a solution that is better than just hoping to be
313	 * lucky.
314	 */
315	last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
316	if (is_repeat_check(tr, last_info, ip, parent_ip))
317		goto out;
318
319	local_save_flags(flags);
320	trace_ctx = tracing_gen_ctx_flags(flags);
321	process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
322
323	trace_function(tr, ip, parent_ip, trace_ctx);
324
325out:
326	ftrace_test_recursion_unlock(bit);
327	preempt_enable_notrace();
328}
329
330static void
331function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
332				     struct ftrace_ops *op,
333				     struct ftrace_regs *fregs)
334{
335	struct trace_func_repeats *last_info;
336	struct trace_array *tr = op->private;
337	struct trace_array_cpu *data;
338	unsigned long flags;
339	long disabled;
340	int cpu;
341	unsigned int trace_ctx;
342
343	if (unlikely(!tr->function_enabled))
344		return;
345
346	/*
347	 * Need to use raw, since this must be called before the
348	 * recursive protection is performed.
349	 */
350	local_irq_save(flags);
351	cpu = raw_smp_processor_id();
352	data = per_cpu_ptr(tr->array_buffer.data, cpu);
353	disabled = atomic_inc_return(&data->disabled);
354
355	if (likely(disabled == 1)) {
356		last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
357		if (is_repeat_check(tr, last_info, ip, parent_ip))
358			goto out;
359
360		trace_ctx = tracing_gen_ctx_flags(flags);
361		process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
362
363		trace_function(tr, ip, parent_ip, trace_ctx);
364		__trace_stack(tr, trace_ctx, STACK_SKIP);
365	}
366
367 out:
368	atomic_dec(&data->disabled);
369	local_irq_restore(flags);
370}
371
372static struct tracer_opt func_opts[] = {
373#ifdef CONFIG_STACKTRACE
374	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
375#endif
376	{ TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
377	{ } /* Always set a last empty entry */
378};
379
380static struct tracer_flags func_flags = {
381	.val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
382	.opts = func_opts
383};
384
385static void tracing_start_function_trace(struct trace_array *tr)
386{
387	tr->function_enabled = 0;
388	register_ftrace_function(tr->ops);
389	tr->function_enabled = 1;
390}
391
392static void tracing_stop_function_trace(struct trace_array *tr)
393{
394	tr->function_enabled = 0;
395	unregister_ftrace_function(tr->ops);
396}
397
398static struct tracer function_trace;
399
400static int
401func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
402{
403	ftrace_func_t func;
404	u32 new_flags;
405
406	/* Do nothing if already set. */
407	if (!!set == !!(func_flags.val & bit))
408		return 0;
409
410	/* We can change this flag only when not running. */
411	if (tr->current_trace != &function_trace)
412		return 0;
413
414	new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
415	func = select_trace_function(new_flags);
416	if (!func)
417		return -EINVAL;
418
419	/* Check if there's anything to change. */
420	if (tr->ops->func == func)
421		return 0;
422
423	if (!handle_func_repeats(tr, new_flags))
424		return -ENOMEM;
425
426	unregister_ftrace_function(tr->ops);
427	tr->ops->func = func;
428	register_ftrace_function(tr->ops);
429
430	return 0;
431}
432
433static struct tracer function_trace __tracer_data =
434{
435	.name		= "function",
436	.init		= function_trace_init,
437	.reset		= function_trace_reset,
438	.start		= function_trace_start,
439	.flags		= &func_flags,
440	.set_flag	= func_set_flag,
441	.allow_instances = true,
442#ifdef CONFIG_FTRACE_SELFTEST
443	.selftest	= trace_selftest_startup_function,
444#endif
445};
446
447#ifdef CONFIG_DYNAMIC_FTRACE
448static void update_traceon_count(struct ftrace_probe_ops *ops,
449				 unsigned long ip,
450				 struct trace_array *tr, bool on,
451				 void *data)
452{
453	struct ftrace_func_mapper *mapper = data;
454	long *count;
455	long old_count;
456
457	/*
458	 * Tracing gets disabled (or enabled) once per count.
459	 * This function can be called at the same time on multiple CPUs.
460	 * It is fine if both disable (or enable) tracing, as disabling
461	 * (or enabling) the second time doesn't do anything as the
462	 * state of the tracer is already disabled (or enabled).
463	 * What needs to be synchronized in this case is that the count
464	 * only gets decremented once, even if the tracer is disabled
465	 * (or enabled) twice, as the second one is really a nop.
466	 *
467	 * The memory barriers guarantee that we only decrement the
468	 * counter once. First the count is read to a local variable
469	 * and a read barrier is used to make sure that it is loaded
470	 * before checking if the tracer is in the state we want.
471	 * If the tracer is not in the state we want, then the count
472	 * is guaranteed to be the old count.
473	 *
474	 * Next the tracer is set to the state we want (disabled or enabled)
475	 * then a write memory barrier is used to make sure that
476	 * the new state is visible before changing the counter by
477	 * one minus the old counter. This guarantees that another CPU
478	 * executing this code will see the new state before seeing
479	 * the new counter value, and would not do anything if the new
480	 * counter is seen.
481	 *
482	 * Note, there is no synchronization between this and a user
483	 * setting the tracing_on file. But we currently don't care
484	 * about that.
485	 */
486	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
487	old_count = *count;
488
489	if (old_count <= 0)
490		return;
491
492	/* Make sure we see count before checking tracing state */
493	smp_rmb();
494
495	if (on == !!tracer_tracing_is_on(tr))
496		return;
497
498	if (on)
499		tracer_tracing_on(tr);
500	else
501		tracer_tracing_off(tr);
502
503	/* Make sure tracing state is visible before updating count */
504	smp_wmb();
505
506	*count = old_count - 1;
507}
508
509static void
510ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
511		     struct trace_array *tr, struct ftrace_probe_ops *ops,
512		     void *data)
513{
514	update_traceon_count(ops, ip, tr, 1, data);
515}
516
517static void
518ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
519		      struct trace_array *tr, struct ftrace_probe_ops *ops,
520		      void *data)
521{
522	update_traceon_count(ops, ip, tr, 0, data);
523}
524
525static void
526ftrace_traceon(unsigned long ip, unsigned long parent_ip,
527	       struct trace_array *tr, struct ftrace_probe_ops *ops,
528	       void *data)
529{
530	if (tracer_tracing_is_on(tr))
531		return;
532
533	tracer_tracing_on(tr);
534}
535
536static void
537ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
538		struct trace_array *tr, struct ftrace_probe_ops *ops,
539		void *data)
540{
541	if (!tracer_tracing_is_on(tr))
542		return;
543
544	tracer_tracing_off(tr);
545}
546
547#ifdef CONFIG_UNWINDER_ORC
548/*
549 * Skip 3:
550 *
551 *   function_trace_probe_call()
552 *   ftrace_ops_assist_func()
553 *   ftrace_call()
554 */
555#define FTRACE_STACK_SKIP 3
556#else
557/*
558 * Skip 5:
559 *
560 *   __trace_stack()
561 *   ftrace_stacktrace()
562 *   function_trace_probe_call()
563 *   ftrace_ops_assist_func()
564 *   ftrace_call()
565 */
566#define FTRACE_STACK_SKIP 5
567#endif
568
569static __always_inline void trace_stack(struct trace_array *tr)
570{
571	unsigned int trace_ctx;
572
573	trace_ctx = tracing_gen_ctx();
574
575	__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
576}
577
578static void
579ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
580		  struct trace_array *tr, struct ftrace_probe_ops *ops,
581		  void *data)
582{
583	trace_stack(tr);
584}
585
586static void
587ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
588			struct trace_array *tr, struct ftrace_probe_ops *ops,
589			void *data)
590{
591	struct ftrace_func_mapper *mapper = data;
592	long *count;
593	long old_count;
594	long new_count;
595
596	if (!tracing_is_on())
597		return;
598
599	/* unlimited? */
600	if (!mapper) {
601		trace_stack(tr);
602		return;
603	}
604
605	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
606
607	/*
608	 * Stack traces should only execute the number of times the
609	 * user specified in the counter.
610	 */
611	do {
612		old_count = *count;
613
614		if (!old_count)
615			return;
616
617		new_count = old_count - 1;
618		new_count = cmpxchg(count, old_count, new_count);
619		if (new_count == old_count)
620			trace_stack(tr);
621
622		if (!tracing_is_on())
623			return;
624
625	} while (new_count != old_count);
626}
627
628static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
629			void *data)
630{
631	struct ftrace_func_mapper *mapper = data;
632	long *count = NULL;
633
634	if (mapper)
635		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
636
637	if (count) {
638		if (*count <= 0)
639			return 0;
640		(*count)--;
641	}
642
643	return 1;
644}
645
646static void
647ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
648		  struct trace_array *tr, struct ftrace_probe_ops *ops,
649		  void *data)
650{
651	if (update_count(ops, ip, data))
652		ftrace_dump(DUMP_ALL);
653}
654
655/* Only dump the current CPU buffer. */
656static void
657ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
658		     struct trace_array *tr, struct ftrace_probe_ops *ops,
659		     void *data)
660{
661	if (update_count(ops, ip, data))
662		ftrace_dump(DUMP_ORIG);
663}
664
665static int
666ftrace_probe_print(const char *name, struct seq_file *m,
667		   unsigned long ip, struct ftrace_probe_ops *ops,
668		   void *data)
669{
670	struct ftrace_func_mapper *mapper = data;
671	long *count = NULL;
672
673	seq_printf(m, "%ps:%s", (void *)ip, name);
674
675	if (mapper)
676		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
677
678	if (count)
679		seq_printf(m, ":count=%ld\n", *count);
680	else
681		seq_puts(m, ":unlimited\n");
682
683	return 0;
684}
685
686static int
687ftrace_traceon_print(struct seq_file *m, unsigned long ip,
688		     struct ftrace_probe_ops *ops,
689		     void *data)
690{
691	return ftrace_probe_print("traceon", m, ip, ops, data);
692}
693
694static int
695ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
696			 struct ftrace_probe_ops *ops, void *data)
697{
698	return ftrace_probe_print("traceoff", m, ip, ops, data);
699}
700
701static int
702ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
703			struct ftrace_probe_ops *ops, void *data)
704{
705	return ftrace_probe_print("stacktrace", m, ip, ops, data);
706}
707
708static int
709ftrace_dump_print(struct seq_file *m, unsigned long ip,
710			struct ftrace_probe_ops *ops, void *data)
711{
712	return ftrace_probe_print("dump", m, ip, ops, data);
713}
714
715static int
716ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
717			struct ftrace_probe_ops *ops, void *data)
718{
719	return ftrace_probe_print("cpudump", m, ip, ops, data);
720}
721
722
723static int
724ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
725		  unsigned long ip, void *init_data, void **data)
726{
727	struct ftrace_func_mapper *mapper = *data;
728
729	if (!mapper) {
730		mapper = allocate_ftrace_func_mapper();
731		if (!mapper)
732			return -ENOMEM;
733		*data = mapper;
734	}
735
736	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
737}
738
739static void
740ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
741		  unsigned long ip, void *data)
742{
743	struct ftrace_func_mapper *mapper = data;
744
745	if (!ip) {
746		free_ftrace_func_mapper(mapper, NULL);
747		return;
748	}
749
750	ftrace_func_mapper_remove_ip(mapper, ip);
751}
752
753static struct ftrace_probe_ops traceon_count_probe_ops = {
754	.func			= ftrace_traceon_count,
755	.print			= ftrace_traceon_print,
756	.init			= ftrace_count_init,
757	.free			= ftrace_count_free,
758};
759
760static struct ftrace_probe_ops traceoff_count_probe_ops = {
761	.func			= ftrace_traceoff_count,
762	.print			= ftrace_traceoff_print,
763	.init			= ftrace_count_init,
764	.free			= ftrace_count_free,
765};
766
767static struct ftrace_probe_ops stacktrace_count_probe_ops = {
768	.func			= ftrace_stacktrace_count,
769	.print			= ftrace_stacktrace_print,
770	.init			= ftrace_count_init,
771	.free			= ftrace_count_free,
772};
773
774static struct ftrace_probe_ops dump_probe_ops = {
775	.func			= ftrace_dump_probe,
776	.print			= ftrace_dump_print,
777	.init			= ftrace_count_init,
778	.free			= ftrace_count_free,
779};
780
781static struct ftrace_probe_ops cpudump_probe_ops = {
782	.func			= ftrace_cpudump_probe,
783	.print			= ftrace_cpudump_print,
784};
785
786static struct ftrace_probe_ops traceon_probe_ops = {
787	.func			= ftrace_traceon,
788	.print			= ftrace_traceon_print,
789};
790
791static struct ftrace_probe_ops traceoff_probe_ops = {
792	.func			= ftrace_traceoff,
793	.print			= ftrace_traceoff_print,
794};
795
796static struct ftrace_probe_ops stacktrace_probe_ops = {
797	.func			= ftrace_stacktrace,
798	.print			= ftrace_stacktrace_print,
799};
800
801static int
802ftrace_trace_probe_callback(struct trace_array *tr,
803			    struct ftrace_probe_ops *ops,
804			    struct ftrace_hash *hash, char *glob,
805			    char *cmd, char *param, int enable)
806{
807	void *count = (void *)-1;
808	char *number;
809	int ret;
810
811	/* hash funcs only work with set_ftrace_filter */
812	if (!enable)
813		return -EINVAL;
814
815	if (glob[0] == '!')
816		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
817
818	if (!param)
819		goto out_reg;
820
821	number = strsep(&param, ":");
822
823	if (!strlen(number))
824		goto out_reg;
825
826	/*
827	 * We use the callback data field (which is a pointer)
828	 * as our counter.
829	 */
830	ret = kstrtoul(number, 0, (unsigned long *)&count);
831	if (ret)
832		return ret;
833
834 out_reg:
835	ret = register_ftrace_function_probe(glob, tr, ops, count);
836
837	return ret < 0 ? ret : 0;
838}
839
840static int
841ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
842			    char *glob, char *cmd, char *param, int enable)
843{
844	struct ftrace_probe_ops *ops;
845
846	if (!tr)
847		return -ENODEV;
848
849	/* we register both traceon and traceoff to this callback */
850	if (strcmp(cmd, "traceon") == 0)
851		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
852	else
853		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
854
855	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
856					   param, enable);
857}
858
859static int
860ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
861			   char *glob, char *cmd, char *param, int enable)
862{
863	struct ftrace_probe_ops *ops;
864
865	if (!tr)
866		return -ENODEV;
867
868	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
869
870	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
871					   param, enable);
872}
873
874static int
875ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
876			   char *glob, char *cmd, char *param, int enable)
877{
878	struct ftrace_probe_ops *ops;
879
880	if (!tr)
881		return -ENODEV;
882
883	ops = &dump_probe_ops;
884
885	/* Only dump once. */
886	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
887					   "1", enable);
888}
889
890static int
891ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
892			   char *glob, char *cmd, char *param, int enable)
893{
894	struct ftrace_probe_ops *ops;
895
896	if (!tr)
897		return -ENODEV;
898
899	ops = &cpudump_probe_ops;
900
901	/* Only dump once. */
902	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
903					   "1", enable);
904}
905
906static struct ftrace_func_command ftrace_traceon_cmd = {
907	.name			= "traceon",
908	.func			= ftrace_trace_onoff_callback,
909};
910
911static struct ftrace_func_command ftrace_traceoff_cmd = {
912	.name			= "traceoff",
913	.func			= ftrace_trace_onoff_callback,
914};
915
916static struct ftrace_func_command ftrace_stacktrace_cmd = {
917	.name			= "stacktrace",
918	.func			= ftrace_stacktrace_callback,
919};
920
921static struct ftrace_func_command ftrace_dump_cmd = {
922	.name			= "dump",
923	.func			= ftrace_dump_callback,
924};
925
926static struct ftrace_func_command ftrace_cpudump_cmd = {
927	.name			= "cpudump",
928	.func			= ftrace_cpudump_callback,
929};
930
931static int __init init_func_cmd_traceon(void)
932{
933	int ret;
934
935	ret = register_ftrace_command(&ftrace_traceoff_cmd);
936	if (ret)
937		return ret;
938
939	ret = register_ftrace_command(&ftrace_traceon_cmd);
940	if (ret)
941		goto out_free_traceoff;
942
943	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
944	if (ret)
945		goto out_free_traceon;
946
947	ret = register_ftrace_command(&ftrace_dump_cmd);
948	if (ret)
949		goto out_free_stacktrace;
950
951	ret = register_ftrace_command(&ftrace_cpudump_cmd);
952	if (ret)
953		goto out_free_dump;
954
955	return 0;
956
957 out_free_dump:
958	unregister_ftrace_command(&ftrace_dump_cmd);
959 out_free_stacktrace:
960	unregister_ftrace_command(&ftrace_stacktrace_cmd);
961 out_free_traceon:
962	unregister_ftrace_command(&ftrace_traceon_cmd);
963 out_free_traceoff:
964	unregister_ftrace_command(&ftrace_traceoff_cmd);
965
966	return ret;
967}
968#else
969static inline int init_func_cmd_traceon(void)
970{
971	return 0;
972}
973#endif /* CONFIG_DYNAMIC_FTRACE */
974
975__init int init_function_trace(void)
976{
977	init_func_cmd_traceon();
978	return register_tracer(&function_trace);
979}