Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/ring_buffer.h>
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/slab.h>
18#include <linux/fs.h>
19
20#include "trace.h"
21
22static void tracing_start_function_trace(struct trace_array *tr);
23static void tracing_stop_function_trace(struct trace_array *tr);
24static void
25function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct ftrace_regs *fregs);
27static void
28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct ftrace_regs *fregs);
30static void
31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 struct ftrace_ops *op, struct ftrace_regs *fregs);
33static void
34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 struct ftrace_ops *op,
36 struct ftrace_regs *fregs);
37static struct tracer_flags func_flags;
38
39/* Our option */
40enum {
41
42 TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
43 TRACE_FUNC_OPT_STACK = 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
45
46 /* Update this to next highest bit. */
47 TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
48};
49
50#define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51
52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53{
54 struct ftrace_ops *ops;
55
56 /* The top level array uses the "global_ops" */
57 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 return 0;
59
60 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 if (!ops)
62 return -ENOMEM;
63
64 /* Currently only the non stack version is supported */
65 ops->func = function_trace_call;
66 ops->flags = FTRACE_OPS_FL_PID;
67
68 tr->ops = ops;
69 ops->private = tr;
70
71 return 0;
72}
73
74void ftrace_free_ftrace_ops(struct trace_array *tr)
75{
76 kfree(tr->ops);
77 tr->ops = NULL;
78}
79
80int ftrace_create_function_files(struct trace_array *tr,
81 struct dentry *parent)
82{
83 /*
84 * The top level array uses the "global_ops", and the files are
85 * created on boot up.
86 */
87 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
88 return 0;
89
90 if (!tr->ops)
91 return -EINVAL;
92
93 ftrace_create_filter_files(tr->ops, parent);
94
95 return 0;
96}
97
98void ftrace_destroy_function_files(struct trace_array *tr)
99{
100 ftrace_destroy_filter_files(tr->ops);
101 ftrace_free_ftrace_ops(tr);
102}
103
104static ftrace_func_t select_trace_function(u32 flags_val)
105{
106 switch (flags_val & TRACE_FUNC_OPT_MASK) {
107 case TRACE_FUNC_NO_OPTS:
108 return function_trace_call;
109 case TRACE_FUNC_OPT_STACK:
110 return function_stack_trace_call;
111 case TRACE_FUNC_OPT_NO_REPEATS:
112 return function_no_repeats_trace_call;
113 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114 return function_stack_no_repeats_trace_call;
115 default:
116 return NULL;
117 }
118}
119
120static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121{
122 if (!tr->last_func_repeats &&
123 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125 if (!tr->last_func_repeats)
126 return false;
127 }
128
129 return true;
130}
131
132static int function_trace_init(struct trace_array *tr)
133{
134 ftrace_func_t func;
135 /*
136 * Instance trace_arrays get their ops allocated
137 * at instance creation. Unless it failed
138 * the allocation.
139 */
140 if (!tr->ops)
141 return -ENOMEM;
142
143 func = select_trace_function(func_flags.val);
144 if (!func)
145 return -EINVAL;
146
147 if (!handle_func_repeats(tr, func_flags.val))
148 return -ENOMEM;
149
150 ftrace_init_array_ops(tr, func);
151
152 tr->array_buffer.cpu = raw_smp_processor_id();
153
154 tracing_start_cmdline_record();
155 tracing_start_function_trace(tr);
156 return 0;
157}
158
159static void function_trace_reset(struct trace_array *tr)
160{
161 tracing_stop_function_trace(tr);
162 tracing_stop_cmdline_record();
163 ftrace_reset_array_ops(tr);
164}
165
166static void function_trace_start(struct trace_array *tr)
167{
168 tracing_reset_online_cpus(&tr->array_buffer);
169}
170
171static void
172function_trace_call(unsigned long ip, unsigned long parent_ip,
173 struct ftrace_ops *op, struct ftrace_regs *fregs)
174{
175 struct trace_array *tr = op->private;
176 struct trace_array_cpu *data;
177 unsigned int trace_ctx;
178 int bit;
179 int cpu;
180
181 if (unlikely(!tr->function_enabled))
182 return;
183
184 bit = ftrace_test_recursion_trylock(ip, parent_ip);
185 if (bit < 0)
186 return;
187
188 trace_ctx = tracing_gen_ctx();
189
190 cpu = smp_processor_id();
191 data = per_cpu_ptr(tr->array_buffer.data, cpu);
192 if (!atomic_read(&data->disabled))
193 trace_function(tr, ip, parent_ip, trace_ctx);
194
195 ftrace_test_recursion_unlock(bit);
196}
197
198#ifdef CONFIG_UNWINDER_ORC
199/*
200 * Skip 2:
201 *
202 * function_stack_trace_call()
203 * ftrace_call()
204 */
205#define STACK_SKIP 2
206#else
207/*
208 * Skip 3:
209 * __trace_stack()
210 * function_stack_trace_call()
211 * ftrace_call()
212 */
213#define STACK_SKIP 3
214#endif
215
216static void
217function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
218 struct ftrace_ops *op, struct ftrace_regs *fregs)
219{
220 struct trace_array *tr = op->private;
221 struct trace_array_cpu *data;
222 unsigned long flags;
223 long disabled;
224 int cpu;
225 unsigned int trace_ctx;
226
227 if (unlikely(!tr->function_enabled))
228 return;
229
230 /*
231 * Need to use raw, since this must be called before the
232 * recursive protection is performed.
233 */
234 local_irq_save(flags);
235 cpu = raw_smp_processor_id();
236 data = per_cpu_ptr(tr->array_buffer.data, cpu);
237 disabled = atomic_inc_return(&data->disabled);
238
239 if (likely(disabled == 1)) {
240 trace_ctx = tracing_gen_ctx_flags(flags);
241 trace_function(tr, ip, parent_ip, trace_ctx);
242 __trace_stack(tr, trace_ctx, STACK_SKIP);
243 }
244
245 atomic_dec(&data->disabled);
246 local_irq_restore(flags);
247}
248
249static inline bool is_repeat_check(struct trace_array *tr,
250 struct trace_func_repeats *last_info,
251 unsigned long ip, unsigned long parent_ip)
252{
253 if (last_info->ip == ip &&
254 last_info->parent_ip == parent_ip &&
255 last_info->count < U16_MAX) {
256 last_info->ts_last_call =
257 ring_buffer_time_stamp(tr->array_buffer.buffer);
258 last_info->count++;
259 return true;
260 }
261
262 return false;
263}
264
265static inline void process_repeats(struct trace_array *tr,
266 unsigned long ip, unsigned long parent_ip,
267 struct trace_func_repeats *last_info,
268 unsigned int trace_ctx)
269{
270 if (last_info->count) {
271 trace_last_func_repeats(tr, last_info, trace_ctx);
272 last_info->count = 0;
273 }
274
275 last_info->ip = ip;
276 last_info->parent_ip = parent_ip;
277}
278
279static void
280function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
281 struct ftrace_ops *op,
282 struct ftrace_regs *fregs)
283{
284 struct trace_func_repeats *last_info;
285 struct trace_array *tr = op->private;
286 struct trace_array_cpu *data;
287 unsigned int trace_ctx;
288 unsigned long flags;
289 int bit;
290 int cpu;
291
292 if (unlikely(!tr->function_enabled))
293 return;
294
295 bit = ftrace_test_recursion_trylock(ip, parent_ip);
296 if (bit < 0)
297 return;
298
299 cpu = smp_processor_id();
300 data = per_cpu_ptr(tr->array_buffer.data, cpu);
301 if (atomic_read(&data->disabled))
302 goto out;
303
304 /*
305 * An interrupt may happen at any place here. But as far as I can see,
306 * the only damage that this can cause is to mess up the repetition
307 * counter without valuable data being lost.
308 * TODO: think about a solution that is better than just hoping to be
309 * lucky.
310 */
311 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
312 if (is_repeat_check(tr, last_info, ip, parent_ip))
313 goto out;
314
315 local_save_flags(flags);
316 trace_ctx = tracing_gen_ctx_flags(flags);
317 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
318
319 trace_function(tr, ip, parent_ip, trace_ctx);
320
321out:
322 ftrace_test_recursion_unlock(bit);
323}
324
325static void
326function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
327 struct ftrace_ops *op,
328 struct ftrace_regs *fregs)
329{
330 struct trace_func_repeats *last_info;
331 struct trace_array *tr = op->private;
332 struct trace_array_cpu *data;
333 unsigned long flags;
334 long disabled;
335 int cpu;
336 unsigned int trace_ctx;
337
338 if (unlikely(!tr->function_enabled))
339 return;
340
341 /*
342 * Need to use raw, since this must be called before the
343 * recursive protection is performed.
344 */
345 local_irq_save(flags);
346 cpu = raw_smp_processor_id();
347 data = per_cpu_ptr(tr->array_buffer.data, cpu);
348 disabled = atomic_inc_return(&data->disabled);
349
350 if (likely(disabled == 1)) {
351 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
352 if (is_repeat_check(tr, last_info, ip, parent_ip))
353 goto out;
354
355 trace_ctx = tracing_gen_ctx_flags(flags);
356 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
357
358 trace_function(tr, ip, parent_ip, trace_ctx);
359 __trace_stack(tr, trace_ctx, STACK_SKIP);
360 }
361
362 out:
363 atomic_dec(&data->disabled);
364 local_irq_restore(flags);
365}
366
367static struct tracer_opt func_opts[] = {
368#ifdef CONFIG_STACKTRACE
369 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
370#endif
371 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
372 { } /* Always set a last empty entry */
373};
374
375static struct tracer_flags func_flags = {
376 .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
377 .opts = func_opts
378};
379
380static void tracing_start_function_trace(struct trace_array *tr)
381{
382 tr->function_enabled = 0;
383 register_ftrace_function(tr->ops);
384 tr->function_enabled = 1;
385}
386
387static void tracing_stop_function_trace(struct trace_array *tr)
388{
389 tr->function_enabled = 0;
390 unregister_ftrace_function(tr->ops);
391}
392
393static struct tracer function_trace;
394
395static int
396func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
397{
398 ftrace_func_t func;
399 u32 new_flags;
400
401 /* Do nothing if already set. */
402 if (!!set == !!(func_flags.val & bit))
403 return 0;
404
405 /* We can change this flag only when not running. */
406 if (tr->current_trace != &function_trace)
407 return 0;
408
409 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
410 func = select_trace_function(new_flags);
411 if (!func)
412 return -EINVAL;
413
414 /* Check if there's anything to change. */
415 if (tr->ops->func == func)
416 return 0;
417
418 if (!handle_func_repeats(tr, new_flags))
419 return -ENOMEM;
420
421 unregister_ftrace_function(tr->ops);
422 tr->ops->func = func;
423 register_ftrace_function(tr->ops);
424
425 return 0;
426}
427
428static struct tracer function_trace __tracer_data =
429{
430 .name = "function",
431 .init = function_trace_init,
432 .reset = function_trace_reset,
433 .start = function_trace_start,
434 .flags = &func_flags,
435 .set_flag = func_set_flag,
436 .allow_instances = true,
437#ifdef CONFIG_FTRACE_SELFTEST
438 .selftest = trace_selftest_startup_function,
439#endif
440};
441
442#ifdef CONFIG_DYNAMIC_FTRACE
443static void update_traceon_count(struct ftrace_probe_ops *ops,
444 unsigned long ip,
445 struct trace_array *tr, bool on,
446 void *data)
447{
448 struct ftrace_func_mapper *mapper = data;
449 long *count;
450 long old_count;
451
452 /*
453 * Tracing gets disabled (or enabled) once per count.
454 * This function can be called at the same time on multiple CPUs.
455 * It is fine if both disable (or enable) tracing, as disabling
456 * (or enabling) the second time doesn't do anything as the
457 * state of the tracer is already disabled (or enabled).
458 * What needs to be synchronized in this case is that the count
459 * only gets decremented once, even if the tracer is disabled
460 * (or enabled) twice, as the second one is really a nop.
461 *
462 * The memory barriers guarantee that we only decrement the
463 * counter once. First the count is read to a local variable
464 * and a read barrier is used to make sure that it is loaded
465 * before checking if the tracer is in the state we want.
466 * If the tracer is not in the state we want, then the count
467 * is guaranteed to be the old count.
468 *
469 * Next the tracer is set to the state we want (disabled or enabled)
470 * then a write memory barrier is used to make sure that
471 * the new state is visible before changing the counter by
472 * one minus the old counter. This guarantees that another CPU
473 * executing this code will see the new state before seeing
474 * the new counter value, and would not do anything if the new
475 * counter is seen.
476 *
477 * Note, there is no synchronization between this and a user
478 * setting the tracing_on file. But we currently don't care
479 * about that.
480 */
481 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
482 old_count = *count;
483
484 if (old_count <= 0)
485 return;
486
487 /* Make sure we see count before checking tracing state */
488 smp_rmb();
489
490 if (on == !!tracer_tracing_is_on(tr))
491 return;
492
493 if (on)
494 tracer_tracing_on(tr);
495 else
496 tracer_tracing_off(tr);
497
498 /* Make sure tracing state is visible before updating count */
499 smp_wmb();
500
501 *count = old_count - 1;
502}
503
504static void
505ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
506 struct trace_array *tr, struct ftrace_probe_ops *ops,
507 void *data)
508{
509 update_traceon_count(ops, ip, tr, 1, data);
510}
511
512static void
513ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
514 struct trace_array *tr, struct ftrace_probe_ops *ops,
515 void *data)
516{
517 update_traceon_count(ops, ip, tr, 0, data);
518}
519
520static void
521ftrace_traceon(unsigned long ip, unsigned long parent_ip,
522 struct trace_array *tr, struct ftrace_probe_ops *ops,
523 void *data)
524{
525 if (tracer_tracing_is_on(tr))
526 return;
527
528 tracer_tracing_on(tr);
529}
530
531static void
532ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
533 struct trace_array *tr, struct ftrace_probe_ops *ops,
534 void *data)
535{
536 if (!tracer_tracing_is_on(tr))
537 return;
538
539 tracer_tracing_off(tr);
540}
541
542#ifdef CONFIG_UNWINDER_ORC
543/*
544 * Skip 3:
545 *
546 * function_trace_probe_call()
547 * ftrace_ops_assist_func()
548 * ftrace_call()
549 */
550#define FTRACE_STACK_SKIP 3
551#else
552/*
553 * Skip 5:
554 *
555 * __trace_stack()
556 * ftrace_stacktrace()
557 * function_trace_probe_call()
558 * ftrace_ops_assist_func()
559 * ftrace_call()
560 */
561#define FTRACE_STACK_SKIP 5
562#endif
563
564static __always_inline void trace_stack(struct trace_array *tr)
565{
566 unsigned int trace_ctx;
567
568 trace_ctx = tracing_gen_ctx();
569
570 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
571}
572
573static void
574ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
575 struct trace_array *tr, struct ftrace_probe_ops *ops,
576 void *data)
577{
578 trace_stack(tr);
579}
580
581static void
582ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
583 struct trace_array *tr, struct ftrace_probe_ops *ops,
584 void *data)
585{
586 struct ftrace_func_mapper *mapper = data;
587 long *count;
588 long old_count;
589 long new_count;
590
591 if (!tracing_is_on())
592 return;
593
594 /* unlimited? */
595 if (!mapper) {
596 trace_stack(tr);
597 return;
598 }
599
600 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
601
602 /*
603 * Stack traces should only execute the number of times the
604 * user specified in the counter.
605 */
606 do {
607 old_count = *count;
608
609 if (!old_count)
610 return;
611
612 new_count = old_count - 1;
613 new_count = cmpxchg(count, old_count, new_count);
614 if (new_count == old_count)
615 trace_stack(tr);
616
617 if (!tracing_is_on())
618 return;
619
620 } while (new_count != old_count);
621}
622
623static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
624 void *data)
625{
626 struct ftrace_func_mapper *mapper = data;
627 long *count = NULL;
628
629 if (mapper)
630 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
631
632 if (count) {
633 if (*count <= 0)
634 return 0;
635 (*count)--;
636 }
637
638 return 1;
639}
640
641static void
642ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
643 struct trace_array *tr, struct ftrace_probe_ops *ops,
644 void *data)
645{
646 if (update_count(ops, ip, data))
647 ftrace_dump(DUMP_ALL);
648}
649
650/* Only dump the current CPU buffer. */
651static void
652ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
653 struct trace_array *tr, struct ftrace_probe_ops *ops,
654 void *data)
655{
656 if (update_count(ops, ip, data))
657 ftrace_dump(DUMP_ORIG);
658}
659
660static int
661ftrace_probe_print(const char *name, struct seq_file *m,
662 unsigned long ip, struct ftrace_probe_ops *ops,
663 void *data)
664{
665 struct ftrace_func_mapper *mapper = data;
666 long *count = NULL;
667
668 seq_printf(m, "%ps:%s", (void *)ip, name);
669
670 if (mapper)
671 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
672
673 if (count)
674 seq_printf(m, ":count=%ld\n", *count);
675 else
676 seq_puts(m, ":unlimited\n");
677
678 return 0;
679}
680
681static int
682ftrace_traceon_print(struct seq_file *m, unsigned long ip,
683 struct ftrace_probe_ops *ops,
684 void *data)
685{
686 return ftrace_probe_print("traceon", m, ip, ops, data);
687}
688
689static int
690ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
691 struct ftrace_probe_ops *ops, void *data)
692{
693 return ftrace_probe_print("traceoff", m, ip, ops, data);
694}
695
696static int
697ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
698 struct ftrace_probe_ops *ops, void *data)
699{
700 return ftrace_probe_print("stacktrace", m, ip, ops, data);
701}
702
703static int
704ftrace_dump_print(struct seq_file *m, unsigned long ip,
705 struct ftrace_probe_ops *ops, void *data)
706{
707 return ftrace_probe_print("dump", m, ip, ops, data);
708}
709
710static int
711ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
712 struct ftrace_probe_ops *ops, void *data)
713{
714 return ftrace_probe_print("cpudump", m, ip, ops, data);
715}
716
717
718static int
719ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
720 unsigned long ip, void *init_data, void **data)
721{
722 struct ftrace_func_mapper *mapper = *data;
723
724 if (!mapper) {
725 mapper = allocate_ftrace_func_mapper();
726 if (!mapper)
727 return -ENOMEM;
728 *data = mapper;
729 }
730
731 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
732}
733
734static void
735ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
736 unsigned long ip, void *data)
737{
738 struct ftrace_func_mapper *mapper = data;
739
740 if (!ip) {
741 free_ftrace_func_mapper(mapper, NULL);
742 return;
743 }
744
745 ftrace_func_mapper_remove_ip(mapper, ip);
746}
747
748static struct ftrace_probe_ops traceon_count_probe_ops = {
749 .func = ftrace_traceon_count,
750 .print = ftrace_traceon_print,
751 .init = ftrace_count_init,
752 .free = ftrace_count_free,
753};
754
755static struct ftrace_probe_ops traceoff_count_probe_ops = {
756 .func = ftrace_traceoff_count,
757 .print = ftrace_traceoff_print,
758 .init = ftrace_count_init,
759 .free = ftrace_count_free,
760};
761
762static struct ftrace_probe_ops stacktrace_count_probe_ops = {
763 .func = ftrace_stacktrace_count,
764 .print = ftrace_stacktrace_print,
765 .init = ftrace_count_init,
766 .free = ftrace_count_free,
767};
768
769static struct ftrace_probe_ops dump_probe_ops = {
770 .func = ftrace_dump_probe,
771 .print = ftrace_dump_print,
772 .init = ftrace_count_init,
773 .free = ftrace_count_free,
774};
775
776static struct ftrace_probe_ops cpudump_probe_ops = {
777 .func = ftrace_cpudump_probe,
778 .print = ftrace_cpudump_print,
779};
780
781static struct ftrace_probe_ops traceon_probe_ops = {
782 .func = ftrace_traceon,
783 .print = ftrace_traceon_print,
784};
785
786static struct ftrace_probe_ops traceoff_probe_ops = {
787 .func = ftrace_traceoff,
788 .print = ftrace_traceoff_print,
789};
790
791static struct ftrace_probe_ops stacktrace_probe_ops = {
792 .func = ftrace_stacktrace,
793 .print = ftrace_stacktrace_print,
794};
795
796static int
797ftrace_trace_probe_callback(struct trace_array *tr,
798 struct ftrace_probe_ops *ops,
799 struct ftrace_hash *hash, char *glob,
800 char *cmd, char *param, int enable)
801{
802 void *count = (void *)-1;
803 char *number;
804 int ret;
805
806 /* hash funcs only work with set_ftrace_filter */
807 if (!enable)
808 return -EINVAL;
809
810 if (glob[0] == '!')
811 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
812
813 if (!param)
814 goto out_reg;
815
816 number = strsep(¶m, ":");
817
818 if (!strlen(number))
819 goto out_reg;
820
821 /*
822 * We use the callback data field (which is a pointer)
823 * as our counter.
824 */
825 ret = kstrtoul(number, 0, (unsigned long *)&count);
826 if (ret)
827 return ret;
828
829 out_reg:
830 ret = register_ftrace_function_probe(glob, tr, ops, count);
831
832 return ret < 0 ? ret : 0;
833}
834
835static int
836ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
837 char *glob, char *cmd, char *param, int enable)
838{
839 struct ftrace_probe_ops *ops;
840
841 if (!tr)
842 return -ENODEV;
843
844 /* we register both traceon and traceoff to this callback */
845 if (strcmp(cmd, "traceon") == 0)
846 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
847 else
848 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
849
850 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
851 param, enable);
852}
853
854static int
855ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
856 char *glob, char *cmd, char *param, int enable)
857{
858 struct ftrace_probe_ops *ops;
859
860 if (!tr)
861 return -ENODEV;
862
863 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
864
865 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
866 param, enable);
867}
868
869static int
870ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
871 char *glob, char *cmd, char *param, int enable)
872{
873 struct ftrace_probe_ops *ops;
874
875 if (!tr)
876 return -ENODEV;
877
878 ops = &dump_probe_ops;
879
880 /* Only dump once. */
881 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
882 "1", enable);
883}
884
885static int
886ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
887 char *glob, char *cmd, char *param, int enable)
888{
889 struct ftrace_probe_ops *ops;
890
891 if (!tr)
892 return -ENODEV;
893
894 ops = &cpudump_probe_ops;
895
896 /* Only dump once. */
897 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
898 "1", enable);
899}
900
901static struct ftrace_func_command ftrace_traceon_cmd = {
902 .name = "traceon",
903 .func = ftrace_trace_onoff_callback,
904};
905
906static struct ftrace_func_command ftrace_traceoff_cmd = {
907 .name = "traceoff",
908 .func = ftrace_trace_onoff_callback,
909};
910
911static struct ftrace_func_command ftrace_stacktrace_cmd = {
912 .name = "stacktrace",
913 .func = ftrace_stacktrace_callback,
914};
915
916static struct ftrace_func_command ftrace_dump_cmd = {
917 .name = "dump",
918 .func = ftrace_dump_callback,
919};
920
921static struct ftrace_func_command ftrace_cpudump_cmd = {
922 .name = "cpudump",
923 .func = ftrace_cpudump_callback,
924};
925
926static int __init init_func_cmd_traceon(void)
927{
928 int ret;
929
930 ret = register_ftrace_command(&ftrace_traceoff_cmd);
931 if (ret)
932 return ret;
933
934 ret = register_ftrace_command(&ftrace_traceon_cmd);
935 if (ret)
936 goto out_free_traceoff;
937
938 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
939 if (ret)
940 goto out_free_traceon;
941
942 ret = register_ftrace_command(&ftrace_dump_cmd);
943 if (ret)
944 goto out_free_stacktrace;
945
946 ret = register_ftrace_command(&ftrace_cpudump_cmd);
947 if (ret)
948 goto out_free_dump;
949
950 return 0;
951
952 out_free_dump:
953 unregister_ftrace_command(&ftrace_dump_cmd);
954 out_free_stacktrace:
955 unregister_ftrace_command(&ftrace_stacktrace_cmd);
956 out_free_traceon:
957 unregister_ftrace_command(&ftrace_traceon_cmd);
958 out_free_traceoff:
959 unregister_ftrace_command(&ftrace_traceoff_cmd);
960
961 return ret;
962}
963#else
964static inline int init_func_cmd_traceon(void)
965{
966 return 0;
967}
968#endif /* CONFIG_DYNAMIC_FTRACE */
969
970__init int init_function_trace(void)
971{
972 init_func_cmd_traceon();
973 return register_tracer(&function_trace);
974}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/ring_buffer.h>
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/slab.h>
18#include <linux/fs.h>
19
20#include "trace.h"
21
22static void tracing_start_function_trace(struct trace_array *tr);
23static void tracing_stop_function_trace(struct trace_array *tr);
24static void
25function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct pt_regs *pt_regs);
27static void
28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct pt_regs *pt_regs);
30static struct tracer_flags func_flags;
31
32/* Our option */
33enum {
34 TRACE_FUNC_OPT_STACK = 0x1,
35};
36
37static int allocate_ftrace_ops(struct trace_array *tr)
38{
39 struct ftrace_ops *ops;
40
41 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
42 if (!ops)
43 return -ENOMEM;
44
45 /* Currently only the non stack verision is supported */
46 ops->func = function_trace_call;
47 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
48
49 tr->ops = ops;
50 ops->private = tr;
51 return 0;
52}
53
54
55int ftrace_create_function_files(struct trace_array *tr,
56 struct dentry *parent)
57{
58 int ret;
59
60 /*
61 * The top level array uses the "global_ops", and the files are
62 * created on boot up.
63 */
64 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
65 return 0;
66
67 ret = allocate_ftrace_ops(tr);
68 if (ret)
69 return ret;
70
71 ftrace_create_filter_files(tr->ops, parent);
72
73 return 0;
74}
75
76void ftrace_destroy_function_files(struct trace_array *tr)
77{
78 ftrace_destroy_filter_files(tr->ops);
79 kfree(tr->ops);
80 tr->ops = NULL;
81}
82
83static int function_trace_init(struct trace_array *tr)
84{
85 ftrace_func_t func;
86
87 /*
88 * Instance trace_arrays get their ops allocated
89 * at instance creation. Unless it failed
90 * the allocation.
91 */
92 if (!tr->ops)
93 return -ENOMEM;
94
95 /* Currently only the global instance can do stack tracing */
96 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
97 func_flags.val & TRACE_FUNC_OPT_STACK)
98 func = function_stack_trace_call;
99 else
100 func = function_trace_call;
101
102 ftrace_init_array_ops(tr, func);
103
104 tr->trace_buffer.cpu = get_cpu();
105 put_cpu();
106
107 tracing_start_cmdline_record();
108 tracing_start_function_trace(tr);
109 return 0;
110}
111
112static void function_trace_reset(struct trace_array *tr)
113{
114 tracing_stop_function_trace(tr);
115 tracing_stop_cmdline_record();
116 ftrace_reset_array_ops(tr);
117}
118
119static void function_trace_start(struct trace_array *tr)
120{
121 tracing_reset_online_cpus(&tr->trace_buffer);
122}
123
124static void
125function_trace_call(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op, struct pt_regs *pt_regs)
127{
128 struct trace_array *tr = op->private;
129 struct trace_array_cpu *data;
130 unsigned long flags;
131 int bit;
132 int cpu;
133 int pc;
134
135 if (unlikely(!tr->function_enabled))
136 return;
137
138 pc = preempt_count();
139 preempt_disable_notrace();
140
141 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
142 if (bit < 0)
143 goto out;
144
145 cpu = smp_processor_id();
146 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
147 if (!atomic_read(&data->disabled)) {
148 local_save_flags(flags);
149 trace_function(tr, ip, parent_ip, flags, pc);
150 }
151 trace_clear_recursion(bit);
152
153 out:
154 preempt_enable_notrace();
155}
156
157#ifdef CONFIG_UNWINDER_ORC
158/*
159 * Skip 2:
160 *
161 * function_stack_trace_call()
162 * ftrace_call()
163 */
164#define STACK_SKIP 2
165#else
166/*
167 * Skip 3:
168 * __trace_stack()
169 * function_stack_trace_call()
170 * ftrace_call()
171 */
172#define STACK_SKIP 3
173#endif
174
175static void
176function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
177 struct ftrace_ops *op, struct pt_regs *pt_regs)
178{
179 struct trace_array *tr = op->private;
180 struct trace_array_cpu *data;
181 unsigned long flags;
182 long disabled;
183 int cpu;
184 int pc;
185
186 if (unlikely(!tr->function_enabled))
187 return;
188
189 /*
190 * Need to use raw, since this must be called before the
191 * recursive protection is performed.
192 */
193 local_irq_save(flags);
194 cpu = raw_smp_processor_id();
195 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
196 disabled = atomic_inc_return(&data->disabled);
197
198 if (likely(disabled == 1)) {
199 pc = preempt_count();
200 trace_function(tr, ip, parent_ip, flags, pc);
201 __trace_stack(tr, flags, STACK_SKIP, pc);
202 }
203
204 atomic_dec(&data->disabled);
205 local_irq_restore(flags);
206}
207
208static struct tracer_opt func_opts[] = {
209#ifdef CONFIG_STACKTRACE
210 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
211#endif
212 { } /* Always set a last empty entry */
213};
214
215static struct tracer_flags func_flags = {
216 .val = 0, /* By default: all flags disabled */
217 .opts = func_opts
218};
219
220static void tracing_start_function_trace(struct trace_array *tr)
221{
222 tr->function_enabled = 0;
223 register_ftrace_function(tr->ops);
224 tr->function_enabled = 1;
225}
226
227static void tracing_stop_function_trace(struct trace_array *tr)
228{
229 tr->function_enabled = 0;
230 unregister_ftrace_function(tr->ops);
231}
232
233static struct tracer function_trace;
234
235static int
236func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
237{
238 switch (bit) {
239 case TRACE_FUNC_OPT_STACK:
240 /* do nothing if already set */
241 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
242 break;
243
244 /* We can change this flag when not running. */
245 if (tr->current_trace != &function_trace)
246 break;
247
248 unregister_ftrace_function(tr->ops);
249
250 if (set) {
251 tr->ops->func = function_stack_trace_call;
252 register_ftrace_function(tr->ops);
253 } else {
254 tr->ops->func = function_trace_call;
255 register_ftrace_function(tr->ops);
256 }
257
258 break;
259 default:
260 return -EINVAL;
261 }
262
263 return 0;
264}
265
266static struct tracer function_trace __tracer_data =
267{
268 .name = "function",
269 .init = function_trace_init,
270 .reset = function_trace_reset,
271 .start = function_trace_start,
272 .flags = &func_flags,
273 .set_flag = func_set_flag,
274 .allow_instances = true,
275#ifdef CONFIG_FTRACE_SELFTEST
276 .selftest = trace_selftest_startup_function,
277#endif
278};
279
280#ifdef CONFIG_DYNAMIC_FTRACE
281static void update_traceon_count(struct ftrace_probe_ops *ops,
282 unsigned long ip,
283 struct trace_array *tr, bool on,
284 void *data)
285{
286 struct ftrace_func_mapper *mapper = data;
287 long *count;
288 long old_count;
289
290 /*
291 * Tracing gets disabled (or enabled) once per count.
292 * This function can be called at the same time on multiple CPUs.
293 * It is fine if both disable (or enable) tracing, as disabling
294 * (or enabling) the second time doesn't do anything as the
295 * state of the tracer is already disabled (or enabled).
296 * What needs to be synchronized in this case is that the count
297 * only gets decremented once, even if the tracer is disabled
298 * (or enabled) twice, as the second one is really a nop.
299 *
300 * The memory barriers guarantee that we only decrement the
301 * counter once. First the count is read to a local variable
302 * and a read barrier is used to make sure that it is loaded
303 * before checking if the tracer is in the state we want.
304 * If the tracer is not in the state we want, then the count
305 * is guaranteed to be the old count.
306 *
307 * Next the tracer is set to the state we want (disabled or enabled)
308 * then a write memory barrier is used to make sure that
309 * the new state is visible before changing the counter by
310 * one minus the old counter. This guarantees that another CPU
311 * executing this code will see the new state before seeing
312 * the new counter value, and would not do anything if the new
313 * counter is seen.
314 *
315 * Note, there is no synchronization between this and a user
316 * setting the tracing_on file. But we currently don't care
317 * about that.
318 */
319 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
320 old_count = *count;
321
322 if (old_count <= 0)
323 return;
324
325 /* Make sure we see count before checking tracing state */
326 smp_rmb();
327
328 if (on == !!tracer_tracing_is_on(tr))
329 return;
330
331 if (on)
332 tracer_tracing_on(tr);
333 else
334 tracer_tracing_off(tr);
335
336 /* Make sure tracing state is visible before updating count */
337 smp_wmb();
338
339 *count = old_count - 1;
340}
341
342static void
343ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
344 struct trace_array *tr, struct ftrace_probe_ops *ops,
345 void *data)
346{
347 update_traceon_count(ops, ip, tr, 1, data);
348}
349
350static void
351ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
352 struct trace_array *tr, struct ftrace_probe_ops *ops,
353 void *data)
354{
355 update_traceon_count(ops, ip, tr, 0, data);
356}
357
358static void
359ftrace_traceon(unsigned long ip, unsigned long parent_ip,
360 struct trace_array *tr, struct ftrace_probe_ops *ops,
361 void *data)
362{
363 if (tracer_tracing_is_on(tr))
364 return;
365
366 tracer_tracing_on(tr);
367}
368
369static void
370ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
371 struct trace_array *tr, struct ftrace_probe_ops *ops,
372 void *data)
373{
374 if (!tracer_tracing_is_on(tr))
375 return;
376
377 tracer_tracing_off(tr);
378}
379
380#ifdef CONFIG_UNWINDER_ORC
381/*
382 * Skip 3:
383 *
384 * function_trace_probe_call()
385 * ftrace_ops_assist_func()
386 * ftrace_call()
387 */
388#define FTRACE_STACK_SKIP 3
389#else
390/*
391 * Skip 5:
392 *
393 * __trace_stack()
394 * ftrace_stacktrace()
395 * function_trace_probe_call()
396 * ftrace_ops_assist_func()
397 * ftrace_call()
398 */
399#define FTRACE_STACK_SKIP 5
400#endif
401
402static __always_inline void trace_stack(struct trace_array *tr)
403{
404 unsigned long flags;
405 int pc;
406
407 local_save_flags(flags);
408 pc = preempt_count();
409
410 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
411}
412
413static void
414ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
415 struct trace_array *tr, struct ftrace_probe_ops *ops,
416 void *data)
417{
418 trace_stack(tr);
419}
420
421static void
422ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
423 struct trace_array *tr, struct ftrace_probe_ops *ops,
424 void *data)
425{
426 struct ftrace_func_mapper *mapper = data;
427 long *count;
428 long old_count;
429 long new_count;
430
431 if (!tracing_is_on())
432 return;
433
434 /* unlimited? */
435 if (!mapper) {
436 trace_stack(tr);
437 return;
438 }
439
440 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
441
442 /*
443 * Stack traces should only execute the number of times the
444 * user specified in the counter.
445 */
446 do {
447 old_count = *count;
448
449 if (!old_count)
450 return;
451
452 new_count = old_count - 1;
453 new_count = cmpxchg(count, old_count, new_count);
454 if (new_count == old_count)
455 trace_stack(tr);
456
457 if (!tracing_is_on())
458 return;
459
460 } while (new_count != old_count);
461}
462
463static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
464 void *data)
465{
466 struct ftrace_func_mapper *mapper = data;
467 long *count = NULL;
468
469 if (mapper)
470 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
471
472 if (count) {
473 if (*count <= 0)
474 return 0;
475 (*count)--;
476 }
477
478 return 1;
479}
480
481static void
482ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
483 struct trace_array *tr, struct ftrace_probe_ops *ops,
484 void *data)
485{
486 if (update_count(ops, ip, data))
487 ftrace_dump(DUMP_ALL);
488}
489
490/* Only dump the current CPU buffer. */
491static void
492ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
493 struct trace_array *tr, struct ftrace_probe_ops *ops,
494 void *data)
495{
496 if (update_count(ops, ip, data))
497 ftrace_dump(DUMP_ORIG);
498}
499
500static int
501ftrace_probe_print(const char *name, struct seq_file *m,
502 unsigned long ip, struct ftrace_probe_ops *ops,
503 void *data)
504{
505 struct ftrace_func_mapper *mapper = data;
506 long *count = NULL;
507
508 seq_printf(m, "%ps:%s", (void *)ip, name);
509
510 if (mapper)
511 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
512
513 if (count)
514 seq_printf(m, ":count=%ld\n", *count);
515 else
516 seq_puts(m, ":unlimited\n");
517
518 return 0;
519}
520
521static int
522ftrace_traceon_print(struct seq_file *m, unsigned long ip,
523 struct ftrace_probe_ops *ops,
524 void *data)
525{
526 return ftrace_probe_print("traceon", m, ip, ops, data);
527}
528
529static int
530ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
531 struct ftrace_probe_ops *ops, void *data)
532{
533 return ftrace_probe_print("traceoff", m, ip, ops, data);
534}
535
536static int
537ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
538 struct ftrace_probe_ops *ops, void *data)
539{
540 return ftrace_probe_print("stacktrace", m, ip, ops, data);
541}
542
543static int
544ftrace_dump_print(struct seq_file *m, unsigned long ip,
545 struct ftrace_probe_ops *ops, void *data)
546{
547 return ftrace_probe_print("dump", m, ip, ops, data);
548}
549
550static int
551ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
552 struct ftrace_probe_ops *ops, void *data)
553{
554 return ftrace_probe_print("cpudump", m, ip, ops, data);
555}
556
557
558static int
559ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
560 unsigned long ip, void *init_data, void **data)
561{
562 struct ftrace_func_mapper *mapper = *data;
563
564 if (!mapper) {
565 mapper = allocate_ftrace_func_mapper();
566 if (!mapper)
567 return -ENOMEM;
568 *data = mapper;
569 }
570
571 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
572}
573
574static void
575ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
576 unsigned long ip, void *data)
577{
578 struct ftrace_func_mapper *mapper = data;
579
580 if (!ip) {
581 free_ftrace_func_mapper(mapper, NULL);
582 return;
583 }
584
585 ftrace_func_mapper_remove_ip(mapper, ip);
586}
587
588static struct ftrace_probe_ops traceon_count_probe_ops = {
589 .func = ftrace_traceon_count,
590 .print = ftrace_traceon_print,
591 .init = ftrace_count_init,
592 .free = ftrace_count_free,
593};
594
595static struct ftrace_probe_ops traceoff_count_probe_ops = {
596 .func = ftrace_traceoff_count,
597 .print = ftrace_traceoff_print,
598 .init = ftrace_count_init,
599 .free = ftrace_count_free,
600};
601
602static struct ftrace_probe_ops stacktrace_count_probe_ops = {
603 .func = ftrace_stacktrace_count,
604 .print = ftrace_stacktrace_print,
605 .init = ftrace_count_init,
606 .free = ftrace_count_free,
607};
608
609static struct ftrace_probe_ops dump_probe_ops = {
610 .func = ftrace_dump_probe,
611 .print = ftrace_dump_print,
612 .init = ftrace_count_init,
613 .free = ftrace_count_free,
614};
615
616static struct ftrace_probe_ops cpudump_probe_ops = {
617 .func = ftrace_cpudump_probe,
618 .print = ftrace_cpudump_print,
619};
620
621static struct ftrace_probe_ops traceon_probe_ops = {
622 .func = ftrace_traceon,
623 .print = ftrace_traceon_print,
624};
625
626static struct ftrace_probe_ops traceoff_probe_ops = {
627 .func = ftrace_traceoff,
628 .print = ftrace_traceoff_print,
629};
630
631static struct ftrace_probe_ops stacktrace_probe_ops = {
632 .func = ftrace_stacktrace,
633 .print = ftrace_stacktrace_print,
634};
635
636static int
637ftrace_trace_probe_callback(struct trace_array *tr,
638 struct ftrace_probe_ops *ops,
639 struct ftrace_hash *hash, char *glob,
640 char *cmd, char *param, int enable)
641{
642 void *count = (void *)-1;
643 char *number;
644 int ret;
645
646 /* hash funcs only work with set_ftrace_filter */
647 if (!enable)
648 return -EINVAL;
649
650 if (glob[0] == '!')
651 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
652
653 if (!param)
654 goto out_reg;
655
656 number = strsep(¶m, ":");
657
658 if (!strlen(number))
659 goto out_reg;
660
661 /*
662 * We use the callback data field (which is a pointer)
663 * as our counter.
664 */
665 ret = kstrtoul(number, 0, (unsigned long *)&count);
666 if (ret)
667 return ret;
668
669 out_reg:
670 ret = register_ftrace_function_probe(glob, tr, ops, count);
671
672 return ret < 0 ? ret : 0;
673}
674
675static int
676ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
677 char *glob, char *cmd, char *param, int enable)
678{
679 struct ftrace_probe_ops *ops;
680
681 if (!tr)
682 return -ENODEV;
683
684 /* we register both traceon and traceoff to this callback */
685 if (strcmp(cmd, "traceon") == 0)
686 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
687 else
688 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
689
690 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
691 param, enable);
692}
693
694static int
695ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
696 char *glob, char *cmd, char *param, int enable)
697{
698 struct ftrace_probe_ops *ops;
699
700 if (!tr)
701 return -ENODEV;
702
703 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
704
705 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
706 param, enable);
707}
708
709static int
710ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
711 char *glob, char *cmd, char *param, int enable)
712{
713 struct ftrace_probe_ops *ops;
714
715 if (!tr)
716 return -ENODEV;
717
718 ops = &dump_probe_ops;
719
720 /* Only dump once. */
721 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
722 "1", enable);
723}
724
725static int
726ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
727 char *glob, char *cmd, char *param, int enable)
728{
729 struct ftrace_probe_ops *ops;
730
731 if (!tr)
732 return -ENODEV;
733
734 ops = &cpudump_probe_ops;
735
736 /* Only dump once. */
737 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
738 "1", enable);
739}
740
741static struct ftrace_func_command ftrace_traceon_cmd = {
742 .name = "traceon",
743 .func = ftrace_trace_onoff_callback,
744};
745
746static struct ftrace_func_command ftrace_traceoff_cmd = {
747 .name = "traceoff",
748 .func = ftrace_trace_onoff_callback,
749};
750
751static struct ftrace_func_command ftrace_stacktrace_cmd = {
752 .name = "stacktrace",
753 .func = ftrace_stacktrace_callback,
754};
755
756static struct ftrace_func_command ftrace_dump_cmd = {
757 .name = "dump",
758 .func = ftrace_dump_callback,
759};
760
761static struct ftrace_func_command ftrace_cpudump_cmd = {
762 .name = "cpudump",
763 .func = ftrace_cpudump_callback,
764};
765
766static int __init init_func_cmd_traceon(void)
767{
768 int ret;
769
770 ret = register_ftrace_command(&ftrace_traceoff_cmd);
771 if (ret)
772 return ret;
773
774 ret = register_ftrace_command(&ftrace_traceon_cmd);
775 if (ret)
776 goto out_free_traceoff;
777
778 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
779 if (ret)
780 goto out_free_traceon;
781
782 ret = register_ftrace_command(&ftrace_dump_cmd);
783 if (ret)
784 goto out_free_stacktrace;
785
786 ret = register_ftrace_command(&ftrace_cpudump_cmd);
787 if (ret)
788 goto out_free_dump;
789
790 return 0;
791
792 out_free_dump:
793 unregister_ftrace_command(&ftrace_dump_cmd);
794 out_free_stacktrace:
795 unregister_ftrace_command(&ftrace_stacktrace_cmd);
796 out_free_traceon:
797 unregister_ftrace_command(&ftrace_traceon_cmd);
798 out_free_traceoff:
799 unregister_ftrace_command(&ftrace_traceoff_cmd);
800
801 return ret;
802}
803#else
804static inline int init_func_cmd_traceon(void)
805{
806 return 0;
807}
808#endif /* CONFIG_DYNAMIC_FTRACE */
809
810__init int init_function_trace(void)
811{
812 init_func_cmd_traceon();
813 return register_tracer(&function_trace);
814}