Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/ring_buffer.h>
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/slab.h>
18#include <linux/fs.h>
19
20#include "trace.h"
21
22static void tracing_start_function_trace(struct trace_array *tr);
23static void tracing_stop_function_trace(struct trace_array *tr);
24static void
25function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct pt_regs *pt_regs);
27static void
28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct pt_regs *pt_regs);
30static struct tracer_flags func_flags;
31
32/* Our option */
33enum {
34 TRACE_FUNC_OPT_STACK = 0x1,
35};
36
37static int allocate_ftrace_ops(struct trace_array *tr)
38{
39 struct ftrace_ops *ops;
40
41 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
42 if (!ops)
43 return -ENOMEM;
44
45 /* Currently only the non stack verision is supported */
46 ops->func = function_trace_call;
47 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
48
49 tr->ops = ops;
50 ops->private = tr;
51 return 0;
52}
53
54
55int ftrace_create_function_files(struct trace_array *tr,
56 struct dentry *parent)
57{
58 int ret;
59
60 /*
61 * The top level array uses the "global_ops", and the files are
62 * created on boot up.
63 */
64 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
65 return 0;
66
67 ret = allocate_ftrace_ops(tr);
68 if (ret)
69 return ret;
70
71 ftrace_create_filter_files(tr->ops, parent);
72
73 return 0;
74}
75
76void ftrace_destroy_function_files(struct trace_array *tr)
77{
78 ftrace_destroy_filter_files(tr->ops);
79 kfree(tr->ops);
80 tr->ops = NULL;
81}
82
83static int function_trace_init(struct trace_array *tr)
84{
85 ftrace_func_t func;
86
87 /*
88 * Instance trace_arrays get their ops allocated
89 * at instance creation. Unless it failed
90 * the allocation.
91 */
92 if (!tr->ops)
93 return -ENOMEM;
94
95 /* Currently only the global instance can do stack tracing */
96 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
97 func_flags.val & TRACE_FUNC_OPT_STACK)
98 func = function_stack_trace_call;
99 else
100 func = function_trace_call;
101
102 ftrace_init_array_ops(tr, func);
103
104 tr->trace_buffer.cpu = get_cpu();
105 put_cpu();
106
107 tracing_start_cmdline_record();
108 tracing_start_function_trace(tr);
109 return 0;
110}
111
112static void function_trace_reset(struct trace_array *tr)
113{
114 tracing_stop_function_trace(tr);
115 tracing_stop_cmdline_record();
116 ftrace_reset_array_ops(tr);
117}
118
119static void function_trace_start(struct trace_array *tr)
120{
121 tracing_reset_online_cpus(&tr->trace_buffer);
122}
123
124static void
125function_trace_call(unsigned long ip, unsigned long parent_ip,
126 struct ftrace_ops *op, struct pt_regs *pt_regs)
127{
128 struct trace_array *tr = op->private;
129 struct trace_array_cpu *data;
130 unsigned long flags;
131 int bit;
132 int cpu;
133 int pc;
134
135 if (unlikely(!tr->function_enabled))
136 return;
137
138 pc = preempt_count();
139 preempt_disable_notrace();
140
141 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
142 if (bit < 0)
143 goto out;
144
145 cpu = smp_processor_id();
146 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
147 if (!atomic_read(&data->disabled)) {
148 local_save_flags(flags);
149 trace_function(tr, ip, parent_ip, flags, pc);
150 }
151 trace_clear_recursion(bit);
152
153 out:
154 preempt_enable_notrace();
155}
156
157#ifdef CONFIG_UNWINDER_ORC
158/*
159 * Skip 2:
160 *
161 * function_stack_trace_call()
162 * ftrace_call()
163 */
164#define STACK_SKIP 2
165#else
166/*
167 * Skip 3:
168 * __trace_stack()
169 * function_stack_trace_call()
170 * ftrace_call()
171 */
172#define STACK_SKIP 3
173#endif
174
175static void
176function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
177 struct ftrace_ops *op, struct pt_regs *pt_regs)
178{
179 struct trace_array *tr = op->private;
180 struct trace_array_cpu *data;
181 unsigned long flags;
182 long disabled;
183 int cpu;
184 int pc;
185
186 if (unlikely(!tr->function_enabled))
187 return;
188
189 /*
190 * Need to use raw, since this must be called before the
191 * recursive protection is performed.
192 */
193 local_irq_save(flags);
194 cpu = raw_smp_processor_id();
195 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
196 disabled = atomic_inc_return(&data->disabled);
197
198 if (likely(disabled == 1)) {
199 pc = preempt_count();
200 trace_function(tr, ip, parent_ip, flags, pc);
201 __trace_stack(tr, flags, STACK_SKIP, pc);
202 }
203
204 atomic_dec(&data->disabled);
205 local_irq_restore(flags);
206}
207
208static struct tracer_opt func_opts[] = {
209#ifdef CONFIG_STACKTRACE
210 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
211#endif
212 { } /* Always set a last empty entry */
213};
214
215static struct tracer_flags func_flags = {
216 .val = 0, /* By default: all flags disabled */
217 .opts = func_opts
218};
219
220static void tracing_start_function_trace(struct trace_array *tr)
221{
222 tr->function_enabled = 0;
223 register_ftrace_function(tr->ops);
224 tr->function_enabled = 1;
225}
226
227static void tracing_stop_function_trace(struct trace_array *tr)
228{
229 tr->function_enabled = 0;
230 unregister_ftrace_function(tr->ops);
231}
232
233static struct tracer function_trace;
234
235static int
236func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
237{
238 switch (bit) {
239 case TRACE_FUNC_OPT_STACK:
240 /* do nothing if already set */
241 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
242 break;
243
244 /* We can change this flag when not running. */
245 if (tr->current_trace != &function_trace)
246 break;
247
248 unregister_ftrace_function(tr->ops);
249
250 if (set) {
251 tr->ops->func = function_stack_trace_call;
252 register_ftrace_function(tr->ops);
253 } else {
254 tr->ops->func = function_trace_call;
255 register_ftrace_function(tr->ops);
256 }
257
258 break;
259 default:
260 return -EINVAL;
261 }
262
263 return 0;
264}
265
266static struct tracer function_trace __tracer_data =
267{
268 .name = "function",
269 .init = function_trace_init,
270 .reset = function_trace_reset,
271 .start = function_trace_start,
272 .flags = &func_flags,
273 .set_flag = func_set_flag,
274 .allow_instances = true,
275#ifdef CONFIG_FTRACE_SELFTEST
276 .selftest = trace_selftest_startup_function,
277#endif
278};
279
280#ifdef CONFIG_DYNAMIC_FTRACE
281static void update_traceon_count(struct ftrace_probe_ops *ops,
282 unsigned long ip,
283 struct trace_array *tr, bool on,
284 void *data)
285{
286 struct ftrace_func_mapper *mapper = data;
287 long *count;
288 long old_count;
289
290 /*
291 * Tracing gets disabled (or enabled) once per count.
292 * This function can be called at the same time on multiple CPUs.
293 * It is fine if both disable (or enable) tracing, as disabling
294 * (or enabling) the second time doesn't do anything as the
295 * state of the tracer is already disabled (or enabled).
296 * What needs to be synchronized in this case is that the count
297 * only gets decremented once, even if the tracer is disabled
298 * (or enabled) twice, as the second one is really a nop.
299 *
300 * The memory barriers guarantee that we only decrement the
301 * counter once. First the count is read to a local variable
302 * and a read barrier is used to make sure that it is loaded
303 * before checking if the tracer is in the state we want.
304 * If the tracer is not in the state we want, then the count
305 * is guaranteed to be the old count.
306 *
307 * Next the tracer is set to the state we want (disabled or enabled)
308 * then a write memory barrier is used to make sure that
309 * the new state is visible before changing the counter by
310 * one minus the old counter. This guarantees that another CPU
311 * executing this code will see the new state before seeing
312 * the new counter value, and would not do anything if the new
313 * counter is seen.
314 *
315 * Note, there is no synchronization between this and a user
316 * setting the tracing_on file. But we currently don't care
317 * about that.
318 */
319 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
320 old_count = *count;
321
322 if (old_count <= 0)
323 return;
324
325 /* Make sure we see count before checking tracing state */
326 smp_rmb();
327
328 if (on == !!tracer_tracing_is_on(tr))
329 return;
330
331 if (on)
332 tracer_tracing_on(tr);
333 else
334 tracer_tracing_off(tr);
335
336 /* Make sure tracing state is visible before updating count */
337 smp_wmb();
338
339 *count = old_count - 1;
340}
341
342static void
343ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
344 struct trace_array *tr, struct ftrace_probe_ops *ops,
345 void *data)
346{
347 update_traceon_count(ops, ip, tr, 1, data);
348}
349
350static void
351ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
352 struct trace_array *tr, struct ftrace_probe_ops *ops,
353 void *data)
354{
355 update_traceon_count(ops, ip, tr, 0, data);
356}
357
358static void
359ftrace_traceon(unsigned long ip, unsigned long parent_ip,
360 struct trace_array *tr, struct ftrace_probe_ops *ops,
361 void *data)
362{
363 if (tracer_tracing_is_on(tr))
364 return;
365
366 tracer_tracing_on(tr);
367}
368
369static void
370ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
371 struct trace_array *tr, struct ftrace_probe_ops *ops,
372 void *data)
373{
374 if (!tracer_tracing_is_on(tr))
375 return;
376
377 tracer_tracing_off(tr);
378}
379
380#ifdef CONFIG_UNWINDER_ORC
381/*
382 * Skip 3:
383 *
384 * function_trace_probe_call()
385 * ftrace_ops_assist_func()
386 * ftrace_call()
387 */
388#define FTRACE_STACK_SKIP 3
389#else
390/*
391 * Skip 5:
392 *
393 * __trace_stack()
394 * ftrace_stacktrace()
395 * function_trace_probe_call()
396 * ftrace_ops_assist_func()
397 * ftrace_call()
398 */
399#define FTRACE_STACK_SKIP 5
400#endif
401
402static __always_inline void trace_stack(struct trace_array *tr)
403{
404 unsigned long flags;
405 int pc;
406
407 local_save_flags(flags);
408 pc = preempt_count();
409
410 __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
411}
412
413static void
414ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
415 struct trace_array *tr, struct ftrace_probe_ops *ops,
416 void *data)
417{
418 trace_stack(tr);
419}
420
421static void
422ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
423 struct trace_array *tr, struct ftrace_probe_ops *ops,
424 void *data)
425{
426 struct ftrace_func_mapper *mapper = data;
427 long *count;
428 long old_count;
429 long new_count;
430
431 if (!tracing_is_on())
432 return;
433
434 /* unlimited? */
435 if (!mapper) {
436 trace_stack(tr);
437 return;
438 }
439
440 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
441
442 /*
443 * Stack traces should only execute the number of times the
444 * user specified in the counter.
445 */
446 do {
447 old_count = *count;
448
449 if (!old_count)
450 return;
451
452 new_count = old_count - 1;
453 new_count = cmpxchg(count, old_count, new_count);
454 if (new_count == old_count)
455 trace_stack(tr);
456
457 if (!tracing_is_on())
458 return;
459
460 } while (new_count != old_count);
461}
462
463static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
464 void *data)
465{
466 struct ftrace_func_mapper *mapper = data;
467 long *count = NULL;
468
469 if (mapper)
470 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
471
472 if (count) {
473 if (*count <= 0)
474 return 0;
475 (*count)--;
476 }
477
478 return 1;
479}
480
481static void
482ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
483 struct trace_array *tr, struct ftrace_probe_ops *ops,
484 void *data)
485{
486 if (update_count(ops, ip, data))
487 ftrace_dump(DUMP_ALL);
488}
489
490/* Only dump the current CPU buffer. */
491static void
492ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
493 struct trace_array *tr, struct ftrace_probe_ops *ops,
494 void *data)
495{
496 if (update_count(ops, ip, data))
497 ftrace_dump(DUMP_ORIG);
498}
499
500static int
501ftrace_probe_print(const char *name, struct seq_file *m,
502 unsigned long ip, struct ftrace_probe_ops *ops,
503 void *data)
504{
505 struct ftrace_func_mapper *mapper = data;
506 long *count = NULL;
507
508 seq_printf(m, "%ps:%s", (void *)ip, name);
509
510 if (mapper)
511 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
512
513 if (count)
514 seq_printf(m, ":count=%ld\n", *count);
515 else
516 seq_puts(m, ":unlimited\n");
517
518 return 0;
519}
520
521static int
522ftrace_traceon_print(struct seq_file *m, unsigned long ip,
523 struct ftrace_probe_ops *ops,
524 void *data)
525{
526 return ftrace_probe_print("traceon", m, ip, ops, data);
527}
528
529static int
530ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
531 struct ftrace_probe_ops *ops, void *data)
532{
533 return ftrace_probe_print("traceoff", m, ip, ops, data);
534}
535
536static int
537ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
538 struct ftrace_probe_ops *ops, void *data)
539{
540 return ftrace_probe_print("stacktrace", m, ip, ops, data);
541}
542
543static int
544ftrace_dump_print(struct seq_file *m, unsigned long ip,
545 struct ftrace_probe_ops *ops, void *data)
546{
547 return ftrace_probe_print("dump", m, ip, ops, data);
548}
549
550static int
551ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
552 struct ftrace_probe_ops *ops, void *data)
553{
554 return ftrace_probe_print("cpudump", m, ip, ops, data);
555}
556
557
558static int
559ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
560 unsigned long ip, void *init_data, void **data)
561{
562 struct ftrace_func_mapper *mapper = *data;
563
564 if (!mapper) {
565 mapper = allocate_ftrace_func_mapper();
566 if (!mapper)
567 return -ENOMEM;
568 *data = mapper;
569 }
570
571 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
572}
573
574static void
575ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
576 unsigned long ip, void *data)
577{
578 struct ftrace_func_mapper *mapper = data;
579
580 if (!ip) {
581 free_ftrace_func_mapper(mapper, NULL);
582 return;
583 }
584
585 ftrace_func_mapper_remove_ip(mapper, ip);
586}
587
588static struct ftrace_probe_ops traceon_count_probe_ops = {
589 .func = ftrace_traceon_count,
590 .print = ftrace_traceon_print,
591 .init = ftrace_count_init,
592 .free = ftrace_count_free,
593};
594
595static struct ftrace_probe_ops traceoff_count_probe_ops = {
596 .func = ftrace_traceoff_count,
597 .print = ftrace_traceoff_print,
598 .init = ftrace_count_init,
599 .free = ftrace_count_free,
600};
601
602static struct ftrace_probe_ops stacktrace_count_probe_ops = {
603 .func = ftrace_stacktrace_count,
604 .print = ftrace_stacktrace_print,
605 .init = ftrace_count_init,
606 .free = ftrace_count_free,
607};
608
609static struct ftrace_probe_ops dump_probe_ops = {
610 .func = ftrace_dump_probe,
611 .print = ftrace_dump_print,
612 .init = ftrace_count_init,
613 .free = ftrace_count_free,
614};
615
616static struct ftrace_probe_ops cpudump_probe_ops = {
617 .func = ftrace_cpudump_probe,
618 .print = ftrace_cpudump_print,
619};
620
621static struct ftrace_probe_ops traceon_probe_ops = {
622 .func = ftrace_traceon,
623 .print = ftrace_traceon_print,
624};
625
626static struct ftrace_probe_ops traceoff_probe_ops = {
627 .func = ftrace_traceoff,
628 .print = ftrace_traceoff_print,
629};
630
631static struct ftrace_probe_ops stacktrace_probe_ops = {
632 .func = ftrace_stacktrace,
633 .print = ftrace_stacktrace_print,
634};
635
636static int
637ftrace_trace_probe_callback(struct trace_array *tr,
638 struct ftrace_probe_ops *ops,
639 struct ftrace_hash *hash, char *glob,
640 char *cmd, char *param, int enable)
641{
642 void *count = (void *)-1;
643 char *number;
644 int ret;
645
646 /* hash funcs only work with set_ftrace_filter */
647 if (!enable)
648 return -EINVAL;
649
650 if (glob[0] == '!')
651 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
652
653 if (!param)
654 goto out_reg;
655
656 number = strsep(¶m, ":");
657
658 if (!strlen(number))
659 goto out_reg;
660
661 /*
662 * We use the callback data field (which is a pointer)
663 * as our counter.
664 */
665 ret = kstrtoul(number, 0, (unsigned long *)&count);
666 if (ret)
667 return ret;
668
669 out_reg:
670 ret = register_ftrace_function_probe(glob, tr, ops, count);
671
672 return ret < 0 ? ret : 0;
673}
674
675static int
676ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
677 char *glob, char *cmd, char *param, int enable)
678{
679 struct ftrace_probe_ops *ops;
680
681 if (!tr)
682 return -ENODEV;
683
684 /* we register both traceon and traceoff to this callback */
685 if (strcmp(cmd, "traceon") == 0)
686 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
687 else
688 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
689
690 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
691 param, enable);
692}
693
694static int
695ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
696 char *glob, char *cmd, char *param, int enable)
697{
698 struct ftrace_probe_ops *ops;
699
700 if (!tr)
701 return -ENODEV;
702
703 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
704
705 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
706 param, enable);
707}
708
709static int
710ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
711 char *glob, char *cmd, char *param, int enable)
712{
713 struct ftrace_probe_ops *ops;
714
715 if (!tr)
716 return -ENODEV;
717
718 ops = &dump_probe_ops;
719
720 /* Only dump once. */
721 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
722 "1", enable);
723}
724
725static int
726ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
727 char *glob, char *cmd, char *param, int enable)
728{
729 struct ftrace_probe_ops *ops;
730
731 if (!tr)
732 return -ENODEV;
733
734 ops = &cpudump_probe_ops;
735
736 /* Only dump once. */
737 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
738 "1", enable);
739}
740
741static struct ftrace_func_command ftrace_traceon_cmd = {
742 .name = "traceon",
743 .func = ftrace_trace_onoff_callback,
744};
745
746static struct ftrace_func_command ftrace_traceoff_cmd = {
747 .name = "traceoff",
748 .func = ftrace_trace_onoff_callback,
749};
750
751static struct ftrace_func_command ftrace_stacktrace_cmd = {
752 .name = "stacktrace",
753 .func = ftrace_stacktrace_callback,
754};
755
756static struct ftrace_func_command ftrace_dump_cmd = {
757 .name = "dump",
758 .func = ftrace_dump_callback,
759};
760
761static struct ftrace_func_command ftrace_cpudump_cmd = {
762 .name = "cpudump",
763 .func = ftrace_cpudump_callback,
764};
765
766static int __init init_func_cmd_traceon(void)
767{
768 int ret;
769
770 ret = register_ftrace_command(&ftrace_traceoff_cmd);
771 if (ret)
772 return ret;
773
774 ret = register_ftrace_command(&ftrace_traceon_cmd);
775 if (ret)
776 goto out_free_traceoff;
777
778 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
779 if (ret)
780 goto out_free_traceon;
781
782 ret = register_ftrace_command(&ftrace_dump_cmd);
783 if (ret)
784 goto out_free_stacktrace;
785
786 ret = register_ftrace_command(&ftrace_cpudump_cmd);
787 if (ret)
788 goto out_free_dump;
789
790 return 0;
791
792 out_free_dump:
793 unregister_ftrace_command(&ftrace_dump_cmd);
794 out_free_stacktrace:
795 unregister_ftrace_command(&ftrace_stacktrace_cmd);
796 out_free_traceon:
797 unregister_ftrace_command(&ftrace_traceon_cmd);
798 out_free_traceoff:
799 unregister_ftrace_command(&ftrace_traceoff_cmd);
800
801 return ret;
802}
803#else
804static inline int init_func_cmd_traceon(void)
805{
806 return 0;
807}
808#endif /* CONFIG_DYNAMIC_FTRACE */
809
810__init int init_function_trace(void)
811{
812 init_func_cmd_traceon();
813 return register_tracer(&function_trace);
814}
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12#include <linux/ring_buffer.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/slab.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static void tracing_start_function_trace(struct trace_array *tr);
22static void tracing_stop_function_trace(struct trace_array *tr);
23static void
24function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags;
32
33/* Our option */
34enum {
35 TRACE_FUNC_OPT_STACK = 0x1,
36};
37
38static int allocate_ftrace_ops(struct trace_array *tr)
39{
40 struct ftrace_ops *ops;
41
42 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
43 if (!ops)
44 return -ENOMEM;
45
46 /* Currently only the non stack verision is supported */
47 ops->func = function_trace_call;
48 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
49
50 tr->ops = ops;
51 ops->private = tr;
52 return 0;
53}
54
55
56int ftrace_create_function_files(struct trace_array *tr,
57 struct dentry *parent)
58{
59 int ret;
60
61 /*
62 * The top level array uses the "global_ops", and the files are
63 * created on boot up.
64 */
65 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
66 return 0;
67
68 ret = allocate_ftrace_ops(tr);
69 if (ret)
70 return ret;
71
72 ftrace_create_filter_files(tr->ops, parent);
73
74 return 0;
75}
76
77void ftrace_destroy_function_files(struct trace_array *tr)
78{
79 ftrace_destroy_filter_files(tr->ops);
80 kfree(tr->ops);
81 tr->ops = NULL;
82}
83
84static int function_trace_init(struct trace_array *tr)
85{
86 struct ftrace_ops *ops;
87
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
89 /* There's only one global tr */
90 if (!trace_ops.private) {
91 trace_ops.private = tr;
92 trace_stack_ops.private = tr;
93 }
94
95 if (func_flags.val & TRACE_FUNC_OPT_STACK)
96 ops = &trace_stack_ops;
97 else
98 ops = &trace_ops;
99 tr->ops = ops;
100 } else if (!tr->ops) {
101 /*
102 * Instance trace_arrays get their ops allocated
103 * at instance creation. Unless it failed
104 * the allocation.
105 */
106 return -ENOMEM;
107 }
108
109 tr->trace_buffer.cpu = get_cpu();
110 put_cpu();
111
112 tracing_start_cmdline_record();
113 tracing_start_function_trace(tr);
114 return 0;
115}
116
117static void function_trace_reset(struct trace_array *tr)
118{
119 tracing_stop_function_trace(tr);
120 tracing_stop_cmdline_record();
121}
122
123static void function_trace_start(struct trace_array *tr)
124{
125 tracing_reset_online_cpus(&tr->trace_buffer);
126}
127
128static void
129function_trace_call(unsigned long ip, unsigned long parent_ip,
130 struct ftrace_ops *op, struct pt_regs *pt_regs)
131{
132 struct trace_array *tr = op->private;
133 struct trace_array_cpu *data;
134 unsigned long flags;
135 int bit;
136 int cpu;
137 int pc;
138
139 if (unlikely(!tr->function_enabled))
140 return;
141
142 pc = preempt_count();
143 preempt_disable_notrace();
144
145 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
146 if (bit < 0)
147 goto out;
148
149 cpu = smp_processor_id();
150 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
151 if (!atomic_read(&data->disabled)) {
152 local_save_flags(flags);
153 trace_function(tr, ip, parent_ip, flags, pc);
154 }
155 trace_clear_recursion(bit);
156
157 out:
158 preempt_enable_notrace();
159}
160
161static void
162function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
163 struct ftrace_ops *op, struct pt_regs *pt_regs)
164{
165 struct trace_array *tr = op->private;
166 struct trace_array_cpu *data;
167 unsigned long flags;
168 long disabled;
169 int cpu;
170 int pc;
171
172 if (unlikely(!tr->function_enabled))
173 return;
174
175 /*
176 * Need to use raw, since this must be called before the
177 * recursive protection is performed.
178 */
179 local_irq_save(flags);
180 cpu = raw_smp_processor_id();
181 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
182 disabled = atomic_inc_return(&data->disabled);
183
184 if (likely(disabled == 1)) {
185 pc = preempt_count();
186 trace_function(tr, ip, parent_ip, flags, pc);
187 /*
188 * skip over 5 funcs:
189 * __ftrace_trace_stack,
190 * __trace_stack,
191 * function_stack_trace_call
192 * ftrace_list_func
193 * ftrace_call
194 */
195 __trace_stack(tr, flags, 5, pc);
196 }
197
198 atomic_dec(&data->disabled);
199 local_irq_restore(flags);
200}
201
202static struct ftrace_ops trace_ops __read_mostly =
203{
204 .func = function_trace_call,
205 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
206};
207
208static struct ftrace_ops trace_stack_ops __read_mostly =
209{
210 .func = function_stack_trace_call,
211 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
212};
213
214static struct tracer_opt func_opts[] = {
215#ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
217#endif
218 { } /* Always set a last empty entry */
219};
220
221static struct tracer_flags func_flags = {
222 .val = 0, /* By default: all flags disabled */
223 .opts = func_opts
224};
225
226static void tracing_start_function_trace(struct trace_array *tr)
227{
228 tr->function_enabled = 0;
229 register_ftrace_function(tr->ops);
230 tr->function_enabled = 1;
231}
232
233static void tracing_stop_function_trace(struct trace_array *tr)
234{
235 tr->function_enabled = 0;
236 unregister_ftrace_function(tr->ops);
237}
238
239static int
240func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
241{
242 switch (bit) {
243 case TRACE_FUNC_OPT_STACK:
244 /* do nothing if already set */
245 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
246 break;
247
248 unregister_ftrace_function(tr->ops);
249
250 if (set) {
251 tr->ops = &trace_stack_ops;
252 register_ftrace_function(tr->ops);
253 } else {
254 tr->ops = &trace_ops;
255 register_ftrace_function(tr->ops);
256 }
257
258 break;
259 default:
260 return -EINVAL;
261 }
262
263 return 0;
264}
265
266static struct tracer function_trace __tracer_data =
267{
268 .name = "function",
269 .init = function_trace_init,
270 .reset = function_trace_reset,
271 .start = function_trace_start,
272 .wait_pipe = poll_wait_pipe,
273 .flags = &func_flags,
274 .set_flag = func_set_flag,
275 .allow_instances = true,
276#ifdef CONFIG_FTRACE_SELFTEST
277 .selftest = trace_selftest_startup_function,
278#endif
279};
280
281#ifdef CONFIG_DYNAMIC_FTRACE
282static int update_count(void **data)
283{
284 unsigned long *count = (long *)data;
285
286 if (!*count)
287 return 0;
288
289 if (*count != -1)
290 (*count)--;
291
292 return 1;
293}
294
295static void
296ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
297{
298 if (tracing_is_on())
299 return;
300
301 if (update_count(data))
302 tracing_on();
303}
304
305static void
306ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
307{
308 if (!tracing_is_on())
309 return;
310
311 if (update_count(data))
312 tracing_off();
313}
314
315static void
316ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
317{
318 if (tracing_is_on())
319 return;
320
321 tracing_on();
322}
323
324static void
325ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
326{
327 if (!tracing_is_on())
328 return;
329
330 tracing_off();
331}
332
333/*
334 * Skip 4:
335 * ftrace_stacktrace()
336 * function_trace_probe_call()
337 * ftrace_ops_list_func()
338 * ftrace_call()
339 */
340#define STACK_SKIP 4
341
342static void
343ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
344{
345 trace_dump_stack(STACK_SKIP);
346}
347
348static void
349ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
350{
351 if (!tracing_is_on())
352 return;
353
354 if (update_count(data))
355 trace_dump_stack(STACK_SKIP);
356}
357
358static void
359ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
360{
361 if (update_count(data))
362 ftrace_dump(DUMP_ALL);
363}
364
365/* Only dump the current CPU buffer. */
366static void
367ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
368{
369 if (update_count(data))
370 ftrace_dump(DUMP_ORIG);
371}
372
373static int
374ftrace_probe_print(const char *name, struct seq_file *m,
375 unsigned long ip, void *data)
376{
377 long count = (long)data;
378
379 seq_printf(m, "%ps:%s", (void *)ip, name);
380
381 if (count == -1)
382 seq_printf(m, ":unlimited\n");
383 else
384 seq_printf(m, ":count=%ld\n", count);
385
386 return 0;
387}
388
389static int
390ftrace_traceon_print(struct seq_file *m, unsigned long ip,
391 struct ftrace_probe_ops *ops, void *data)
392{
393 return ftrace_probe_print("traceon", m, ip, data);
394}
395
396static int
397ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
398 struct ftrace_probe_ops *ops, void *data)
399{
400 return ftrace_probe_print("traceoff", m, ip, data);
401}
402
403static int
404ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
405 struct ftrace_probe_ops *ops, void *data)
406{
407 return ftrace_probe_print("stacktrace", m, ip, data);
408}
409
410static int
411ftrace_dump_print(struct seq_file *m, unsigned long ip,
412 struct ftrace_probe_ops *ops, void *data)
413{
414 return ftrace_probe_print("dump", m, ip, data);
415}
416
417static int
418ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
419 struct ftrace_probe_ops *ops, void *data)
420{
421 return ftrace_probe_print("cpudump", m, ip, data);
422}
423
424static struct ftrace_probe_ops traceon_count_probe_ops = {
425 .func = ftrace_traceon_count,
426 .print = ftrace_traceon_print,
427};
428
429static struct ftrace_probe_ops traceoff_count_probe_ops = {
430 .func = ftrace_traceoff_count,
431 .print = ftrace_traceoff_print,
432};
433
434static struct ftrace_probe_ops stacktrace_count_probe_ops = {
435 .func = ftrace_stacktrace_count,
436 .print = ftrace_stacktrace_print,
437};
438
439static struct ftrace_probe_ops dump_probe_ops = {
440 .func = ftrace_dump_probe,
441 .print = ftrace_dump_print,
442};
443
444static struct ftrace_probe_ops cpudump_probe_ops = {
445 .func = ftrace_cpudump_probe,
446 .print = ftrace_cpudump_print,
447};
448
449static struct ftrace_probe_ops traceon_probe_ops = {
450 .func = ftrace_traceon,
451 .print = ftrace_traceon_print,
452};
453
454static struct ftrace_probe_ops traceoff_probe_ops = {
455 .func = ftrace_traceoff,
456 .print = ftrace_traceoff_print,
457};
458
459static struct ftrace_probe_ops stacktrace_probe_ops = {
460 .func = ftrace_stacktrace,
461 .print = ftrace_stacktrace_print,
462};
463
464static int
465ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
466 struct ftrace_hash *hash, char *glob,
467 char *cmd, char *param, int enable)
468{
469 void *count = (void *)-1;
470 char *number;
471 int ret;
472
473 /* hash funcs only work with set_ftrace_filter */
474 if (!enable)
475 return -EINVAL;
476
477 if (glob[0] == '!') {
478 unregister_ftrace_function_probe_func(glob+1, ops);
479 return 0;
480 }
481
482 if (!param)
483 goto out_reg;
484
485 number = strsep(¶m, ":");
486
487 if (!strlen(number))
488 goto out_reg;
489
490 /*
491 * We use the callback data field (which is a pointer)
492 * as our counter.
493 */
494 ret = kstrtoul(number, 0, (unsigned long *)&count);
495 if (ret)
496 return ret;
497
498 out_reg:
499 ret = register_ftrace_function_probe(glob, ops, count);
500
501 return ret < 0 ? ret : 0;
502}
503
504static int
505ftrace_trace_onoff_callback(struct ftrace_hash *hash,
506 char *glob, char *cmd, char *param, int enable)
507{
508 struct ftrace_probe_ops *ops;
509
510 /* we register both traceon and traceoff to this callback */
511 if (strcmp(cmd, "traceon") == 0)
512 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
513 else
514 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
515
516 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
517 param, enable);
518}
519
520static int
521ftrace_stacktrace_callback(struct ftrace_hash *hash,
522 char *glob, char *cmd, char *param, int enable)
523{
524 struct ftrace_probe_ops *ops;
525
526 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
527
528 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
529 param, enable);
530}
531
532static int
533ftrace_dump_callback(struct ftrace_hash *hash,
534 char *glob, char *cmd, char *param, int enable)
535{
536 struct ftrace_probe_ops *ops;
537
538 ops = &dump_probe_ops;
539
540 /* Only dump once. */
541 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
542 "1", enable);
543}
544
545static int
546ftrace_cpudump_callback(struct ftrace_hash *hash,
547 char *glob, char *cmd, char *param, int enable)
548{
549 struct ftrace_probe_ops *ops;
550
551 ops = &cpudump_probe_ops;
552
553 /* Only dump once. */
554 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
555 "1", enable);
556}
557
558static struct ftrace_func_command ftrace_traceon_cmd = {
559 .name = "traceon",
560 .func = ftrace_trace_onoff_callback,
561};
562
563static struct ftrace_func_command ftrace_traceoff_cmd = {
564 .name = "traceoff",
565 .func = ftrace_trace_onoff_callback,
566};
567
568static struct ftrace_func_command ftrace_stacktrace_cmd = {
569 .name = "stacktrace",
570 .func = ftrace_stacktrace_callback,
571};
572
573static struct ftrace_func_command ftrace_dump_cmd = {
574 .name = "dump",
575 .func = ftrace_dump_callback,
576};
577
578static struct ftrace_func_command ftrace_cpudump_cmd = {
579 .name = "cpudump",
580 .func = ftrace_cpudump_callback,
581};
582
583static int __init init_func_cmd_traceon(void)
584{
585 int ret;
586
587 ret = register_ftrace_command(&ftrace_traceoff_cmd);
588 if (ret)
589 return ret;
590
591 ret = register_ftrace_command(&ftrace_traceon_cmd);
592 if (ret)
593 goto out_free_traceoff;
594
595 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
596 if (ret)
597 goto out_free_traceon;
598
599 ret = register_ftrace_command(&ftrace_dump_cmd);
600 if (ret)
601 goto out_free_stacktrace;
602
603 ret = register_ftrace_command(&ftrace_cpudump_cmd);
604 if (ret)
605 goto out_free_dump;
606
607 return 0;
608
609 out_free_dump:
610 unregister_ftrace_command(&ftrace_dump_cmd);
611 out_free_stacktrace:
612 unregister_ftrace_command(&ftrace_stacktrace_cmd);
613 out_free_traceon:
614 unregister_ftrace_command(&ftrace_traceon_cmd);
615 out_free_traceoff:
616 unregister_ftrace_command(&ftrace_traceoff_cmd);
617
618 return ret;
619}
620#else
621static inline int init_func_cmd_traceon(void)
622{
623 return 0;
624}
625#endif /* CONFIG_DYNAMIC_FTRACE */
626
627static __init int init_function_trace(void)
628{
629 init_func_cmd_traceon();
630 return register_tracer(&function_trace);
631}
632core_initcall(init_function_trace);