Loading...
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12#include <linux/ring_buffer.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/slab.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static void tracing_start_function_trace(struct trace_array *tr);
22static void tracing_stop_function_trace(struct trace_array *tr);
23static void
24function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags;
32
33/* Our option */
34enum {
35 TRACE_FUNC_OPT_STACK = 0x1,
36};
37
38static int allocate_ftrace_ops(struct trace_array *tr)
39{
40 struct ftrace_ops *ops;
41
42 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
43 if (!ops)
44 return -ENOMEM;
45
46 /* Currently only the non stack verision is supported */
47 ops->func = function_trace_call;
48 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
49
50 tr->ops = ops;
51 ops->private = tr;
52 return 0;
53}
54
55
56int ftrace_create_function_files(struct trace_array *tr,
57 struct dentry *parent)
58{
59 int ret;
60
61 /*
62 * The top level array uses the "global_ops", and the files are
63 * created on boot up.
64 */
65 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
66 return 0;
67
68 ret = allocate_ftrace_ops(tr);
69 if (ret)
70 return ret;
71
72 ftrace_create_filter_files(tr->ops, parent);
73
74 return 0;
75}
76
77void ftrace_destroy_function_files(struct trace_array *tr)
78{
79 ftrace_destroy_filter_files(tr->ops);
80 kfree(tr->ops);
81 tr->ops = NULL;
82}
83
84static int function_trace_init(struct trace_array *tr)
85{
86 struct ftrace_ops *ops;
87
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
89 /* There's only one global tr */
90 if (!trace_ops.private) {
91 trace_ops.private = tr;
92 trace_stack_ops.private = tr;
93 }
94
95 if (func_flags.val & TRACE_FUNC_OPT_STACK)
96 ops = &trace_stack_ops;
97 else
98 ops = &trace_ops;
99 tr->ops = ops;
100 } else if (!tr->ops) {
101 /*
102 * Instance trace_arrays get their ops allocated
103 * at instance creation. Unless it failed
104 * the allocation.
105 */
106 return -ENOMEM;
107 }
108
109 tr->trace_buffer.cpu = get_cpu();
110 put_cpu();
111
112 tracing_start_cmdline_record();
113 tracing_start_function_trace(tr);
114 return 0;
115}
116
117static void function_trace_reset(struct trace_array *tr)
118{
119 tracing_stop_function_trace(tr);
120 tracing_stop_cmdline_record();
121}
122
123static void function_trace_start(struct trace_array *tr)
124{
125 tracing_reset_online_cpus(&tr->trace_buffer);
126}
127
128static void
129function_trace_call(unsigned long ip, unsigned long parent_ip,
130 struct ftrace_ops *op, struct pt_regs *pt_regs)
131{
132 struct trace_array *tr = op->private;
133 struct trace_array_cpu *data;
134 unsigned long flags;
135 int bit;
136 int cpu;
137 int pc;
138
139 if (unlikely(!tr->function_enabled))
140 return;
141
142 pc = preempt_count();
143 preempt_disable_notrace();
144
145 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
146 if (bit < 0)
147 goto out;
148
149 cpu = smp_processor_id();
150 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
151 if (!atomic_read(&data->disabled)) {
152 local_save_flags(flags);
153 trace_function(tr, ip, parent_ip, flags, pc);
154 }
155 trace_clear_recursion(bit);
156
157 out:
158 preempt_enable_notrace();
159}
160
161static void
162function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
163 struct ftrace_ops *op, struct pt_regs *pt_regs)
164{
165 struct trace_array *tr = op->private;
166 struct trace_array_cpu *data;
167 unsigned long flags;
168 long disabled;
169 int cpu;
170 int pc;
171
172 if (unlikely(!tr->function_enabled))
173 return;
174
175 /*
176 * Need to use raw, since this must be called before the
177 * recursive protection is performed.
178 */
179 local_irq_save(flags);
180 cpu = raw_smp_processor_id();
181 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
182 disabled = atomic_inc_return(&data->disabled);
183
184 if (likely(disabled == 1)) {
185 pc = preempt_count();
186 trace_function(tr, ip, parent_ip, flags, pc);
187 /*
188 * skip over 5 funcs:
189 * __ftrace_trace_stack,
190 * __trace_stack,
191 * function_stack_trace_call
192 * ftrace_list_func
193 * ftrace_call
194 */
195 __trace_stack(tr, flags, 5, pc);
196 }
197
198 atomic_dec(&data->disabled);
199 local_irq_restore(flags);
200}
201
202static struct ftrace_ops trace_ops __read_mostly =
203{
204 .func = function_trace_call,
205 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
206};
207
208static struct ftrace_ops trace_stack_ops __read_mostly =
209{
210 .func = function_stack_trace_call,
211 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
212};
213
214static struct tracer_opt func_opts[] = {
215#ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
217#endif
218 { } /* Always set a last empty entry */
219};
220
221static struct tracer_flags func_flags = {
222 .val = 0, /* By default: all flags disabled */
223 .opts = func_opts
224};
225
226static void tracing_start_function_trace(struct trace_array *tr)
227{
228 tr->function_enabled = 0;
229 register_ftrace_function(tr->ops);
230 tr->function_enabled = 1;
231}
232
233static void tracing_stop_function_trace(struct trace_array *tr)
234{
235 tr->function_enabled = 0;
236 unregister_ftrace_function(tr->ops);
237}
238
239static int
240func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
241{
242 switch (bit) {
243 case TRACE_FUNC_OPT_STACK:
244 /* do nothing if already set */
245 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
246 break;
247
248 unregister_ftrace_function(tr->ops);
249
250 if (set) {
251 tr->ops = &trace_stack_ops;
252 register_ftrace_function(tr->ops);
253 } else {
254 tr->ops = &trace_ops;
255 register_ftrace_function(tr->ops);
256 }
257
258 break;
259 default:
260 return -EINVAL;
261 }
262
263 return 0;
264}
265
266static struct tracer function_trace __tracer_data =
267{
268 .name = "function",
269 .init = function_trace_init,
270 .reset = function_trace_reset,
271 .start = function_trace_start,
272 .wait_pipe = poll_wait_pipe,
273 .flags = &func_flags,
274 .set_flag = func_set_flag,
275 .allow_instances = true,
276#ifdef CONFIG_FTRACE_SELFTEST
277 .selftest = trace_selftest_startup_function,
278#endif
279};
280
281#ifdef CONFIG_DYNAMIC_FTRACE
282static int update_count(void **data)
283{
284 unsigned long *count = (long *)data;
285
286 if (!*count)
287 return 0;
288
289 if (*count != -1)
290 (*count)--;
291
292 return 1;
293}
294
295static void
296ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
297{
298 if (tracing_is_on())
299 return;
300
301 if (update_count(data))
302 tracing_on();
303}
304
305static void
306ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
307{
308 if (!tracing_is_on())
309 return;
310
311 if (update_count(data))
312 tracing_off();
313}
314
315static void
316ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
317{
318 if (tracing_is_on())
319 return;
320
321 tracing_on();
322}
323
324static void
325ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
326{
327 if (!tracing_is_on())
328 return;
329
330 tracing_off();
331}
332
333/*
334 * Skip 4:
335 * ftrace_stacktrace()
336 * function_trace_probe_call()
337 * ftrace_ops_list_func()
338 * ftrace_call()
339 */
340#define STACK_SKIP 4
341
342static void
343ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
344{
345 trace_dump_stack(STACK_SKIP);
346}
347
348static void
349ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
350{
351 if (!tracing_is_on())
352 return;
353
354 if (update_count(data))
355 trace_dump_stack(STACK_SKIP);
356}
357
358static void
359ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
360{
361 if (update_count(data))
362 ftrace_dump(DUMP_ALL);
363}
364
365/* Only dump the current CPU buffer. */
366static void
367ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
368{
369 if (update_count(data))
370 ftrace_dump(DUMP_ORIG);
371}
372
373static int
374ftrace_probe_print(const char *name, struct seq_file *m,
375 unsigned long ip, void *data)
376{
377 long count = (long)data;
378
379 seq_printf(m, "%ps:%s", (void *)ip, name);
380
381 if (count == -1)
382 seq_printf(m, ":unlimited\n");
383 else
384 seq_printf(m, ":count=%ld\n", count);
385
386 return 0;
387}
388
389static int
390ftrace_traceon_print(struct seq_file *m, unsigned long ip,
391 struct ftrace_probe_ops *ops, void *data)
392{
393 return ftrace_probe_print("traceon", m, ip, data);
394}
395
396static int
397ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
398 struct ftrace_probe_ops *ops, void *data)
399{
400 return ftrace_probe_print("traceoff", m, ip, data);
401}
402
403static int
404ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
405 struct ftrace_probe_ops *ops, void *data)
406{
407 return ftrace_probe_print("stacktrace", m, ip, data);
408}
409
410static int
411ftrace_dump_print(struct seq_file *m, unsigned long ip,
412 struct ftrace_probe_ops *ops, void *data)
413{
414 return ftrace_probe_print("dump", m, ip, data);
415}
416
417static int
418ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
419 struct ftrace_probe_ops *ops, void *data)
420{
421 return ftrace_probe_print("cpudump", m, ip, data);
422}
423
424static struct ftrace_probe_ops traceon_count_probe_ops = {
425 .func = ftrace_traceon_count,
426 .print = ftrace_traceon_print,
427};
428
429static struct ftrace_probe_ops traceoff_count_probe_ops = {
430 .func = ftrace_traceoff_count,
431 .print = ftrace_traceoff_print,
432};
433
434static struct ftrace_probe_ops stacktrace_count_probe_ops = {
435 .func = ftrace_stacktrace_count,
436 .print = ftrace_stacktrace_print,
437};
438
439static struct ftrace_probe_ops dump_probe_ops = {
440 .func = ftrace_dump_probe,
441 .print = ftrace_dump_print,
442};
443
444static struct ftrace_probe_ops cpudump_probe_ops = {
445 .func = ftrace_cpudump_probe,
446 .print = ftrace_cpudump_print,
447};
448
449static struct ftrace_probe_ops traceon_probe_ops = {
450 .func = ftrace_traceon,
451 .print = ftrace_traceon_print,
452};
453
454static struct ftrace_probe_ops traceoff_probe_ops = {
455 .func = ftrace_traceoff,
456 .print = ftrace_traceoff_print,
457};
458
459static struct ftrace_probe_ops stacktrace_probe_ops = {
460 .func = ftrace_stacktrace,
461 .print = ftrace_stacktrace_print,
462};
463
464static int
465ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
466 struct ftrace_hash *hash, char *glob,
467 char *cmd, char *param, int enable)
468{
469 void *count = (void *)-1;
470 char *number;
471 int ret;
472
473 /* hash funcs only work with set_ftrace_filter */
474 if (!enable)
475 return -EINVAL;
476
477 if (glob[0] == '!') {
478 unregister_ftrace_function_probe_func(glob+1, ops);
479 return 0;
480 }
481
482 if (!param)
483 goto out_reg;
484
485 number = strsep(¶m, ":");
486
487 if (!strlen(number))
488 goto out_reg;
489
490 /*
491 * We use the callback data field (which is a pointer)
492 * as our counter.
493 */
494 ret = kstrtoul(number, 0, (unsigned long *)&count);
495 if (ret)
496 return ret;
497
498 out_reg:
499 ret = register_ftrace_function_probe(glob, ops, count);
500
501 return ret < 0 ? ret : 0;
502}
503
504static int
505ftrace_trace_onoff_callback(struct ftrace_hash *hash,
506 char *glob, char *cmd, char *param, int enable)
507{
508 struct ftrace_probe_ops *ops;
509
510 /* we register both traceon and traceoff to this callback */
511 if (strcmp(cmd, "traceon") == 0)
512 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
513 else
514 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
515
516 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
517 param, enable);
518}
519
520static int
521ftrace_stacktrace_callback(struct ftrace_hash *hash,
522 char *glob, char *cmd, char *param, int enable)
523{
524 struct ftrace_probe_ops *ops;
525
526 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
527
528 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
529 param, enable);
530}
531
532static int
533ftrace_dump_callback(struct ftrace_hash *hash,
534 char *glob, char *cmd, char *param, int enable)
535{
536 struct ftrace_probe_ops *ops;
537
538 ops = &dump_probe_ops;
539
540 /* Only dump once. */
541 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
542 "1", enable);
543}
544
545static int
546ftrace_cpudump_callback(struct ftrace_hash *hash,
547 char *glob, char *cmd, char *param, int enable)
548{
549 struct ftrace_probe_ops *ops;
550
551 ops = &cpudump_probe_ops;
552
553 /* Only dump once. */
554 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
555 "1", enable);
556}
557
558static struct ftrace_func_command ftrace_traceon_cmd = {
559 .name = "traceon",
560 .func = ftrace_trace_onoff_callback,
561};
562
563static struct ftrace_func_command ftrace_traceoff_cmd = {
564 .name = "traceoff",
565 .func = ftrace_trace_onoff_callback,
566};
567
568static struct ftrace_func_command ftrace_stacktrace_cmd = {
569 .name = "stacktrace",
570 .func = ftrace_stacktrace_callback,
571};
572
573static struct ftrace_func_command ftrace_dump_cmd = {
574 .name = "dump",
575 .func = ftrace_dump_callback,
576};
577
578static struct ftrace_func_command ftrace_cpudump_cmd = {
579 .name = "cpudump",
580 .func = ftrace_cpudump_callback,
581};
582
583static int __init init_func_cmd_traceon(void)
584{
585 int ret;
586
587 ret = register_ftrace_command(&ftrace_traceoff_cmd);
588 if (ret)
589 return ret;
590
591 ret = register_ftrace_command(&ftrace_traceon_cmd);
592 if (ret)
593 goto out_free_traceoff;
594
595 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
596 if (ret)
597 goto out_free_traceon;
598
599 ret = register_ftrace_command(&ftrace_dump_cmd);
600 if (ret)
601 goto out_free_stacktrace;
602
603 ret = register_ftrace_command(&ftrace_cpudump_cmd);
604 if (ret)
605 goto out_free_dump;
606
607 return 0;
608
609 out_free_dump:
610 unregister_ftrace_command(&ftrace_dump_cmd);
611 out_free_stacktrace:
612 unregister_ftrace_command(&ftrace_stacktrace_cmd);
613 out_free_traceon:
614 unregister_ftrace_command(&ftrace_traceon_cmd);
615 out_free_traceoff:
616 unregister_ftrace_command(&ftrace_traceoff_cmd);
617
618 return ret;
619}
620#else
621static inline int init_func_cmd_traceon(void)
622{
623 return 0;
624}
625#endif /* CONFIG_DYNAMIC_FTRACE */
626
627static __init int init_function_trace(void)
628{
629 init_func_cmd_traceon();
630 return register_tracer(&function_trace);
631}
632core_initcall(init_function_trace);
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12#include <linux/ring_buffer.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/slab.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static void tracing_start_function_trace(struct trace_array *tr);
22static void tracing_stop_function_trace(struct trace_array *tr);
23static void
24function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct tracer_flags func_flags;
30
31/* Our option */
32enum {
33 TRACE_FUNC_OPT_STACK = 0x1,
34};
35
36static int allocate_ftrace_ops(struct trace_array *tr)
37{
38 struct ftrace_ops *ops;
39
40 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
41 if (!ops)
42 return -ENOMEM;
43
44 /* Currently only the non stack verision is supported */
45 ops->func = function_trace_call;
46 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
47
48 tr->ops = ops;
49 ops->private = tr;
50 return 0;
51}
52
53
54int ftrace_create_function_files(struct trace_array *tr,
55 struct dentry *parent)
56{
57 int ret;
58
59 /*
60 * The top level array uses the "global_ops", and the files are
61 * created on boot up.
62 */
63 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
64 return 0;
65
66 ret = allocate_ftrace_ops(tr);
67 if (ret)
68 return ret;
69
70 ftrace_create_filter_files(tr->ops, parent);
71
72 return 0;
73}
74
75void ftrace_destroy_function_files(struct trace_array *tr)
76{
77 ftrace_destroy_filter_files(tr->ops);
78 kfree(tr->ops);
79 tr->ops = NULL;
80}
81
82static int function_trace_init(struct trace_array *tr)
83{
84 ftrace_func_t func;
85
86 /*
87 * Instance trace_arrays get their ops allocated
88 * at instance creation. Unless it failed
89 * the allocation.
90 */
91 if (!tr->ops)
92 return -ENOMEM;
93
94 /* Currently only the global instance can do stack tracing */
95 if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
96 func_flags.val & TRACE_FUNC_OPT_STACK)
97 func = function_stack_trace_call;
98 else
99 func = function_trace_call;
100
101 ftrace_init_array_ops(tr, func);
102
103 tr->trace_buffer.cpu = get_cpu();
104 put_cpu();
105
106 tracing_start_cmdline_record();
107 tracing_start_function_trace(tr);
108 return 0;
109}
110
111static void function_trace_reset(struct trace_array *tr)
112{
113 tracing_stop_function_trace(tr);
114 tracing_stop_cmdline_record();
115 ftrace_reset_array_ops(tr);
116}
117
118static void function_trace_start(struct trace_array *tr)
119{
120 tracing_reset_online_cpus(&tr->trace_buffer);
121}
122
123static void
124function_trace_call(unsigned long ip, unsigned long parent_ip,
125 struct ftrace_ops *op, struct pt_regs *pt_regs)
126{
127 struct trace_array *tr = op->private;
128 struct trace_array_cpu *data;
129 unsigned long flags;
130 int bit;
131 int cpu;
132 int pc;
133
134 if (unlikely(!tr->function_enabled))
135 return;
136
137 pc = preempt_count();
138 preempt_disable_notrace();
139
140 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
141 if (bit < 0)
142 goto out;
143
144 cpu = smp_processor_id();
145 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146 if (!atomic_read(&data->disabled)) {
147 local_save_flags(flags);
148 trace_function(tr, ip, parent_ip, flags, pc);
149 }
150 trace_clear_recursion(bit);
151
152 out:
153 preempt_enable_notrace();
154}
155
156static void
157function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158 struct ftrace_ops *op, struct pt_regs *pt_regs)
159{
160 struct trace_array *tr = op->private;
161 struct trace_array_cpu *data;
162 unsigned long flags;
163 long disabled;
164 int cpu;
165 int pc;
166
167 if (unlikely(!tr->function_enabled))
168 return;
169
170 /*
171 * Need to use raw, since this must be called before the
172 * recursive protection is performed.
173 */
174 local_irq_save(flags);
175 cpu = raw_smp_processor_id();
176 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177 disabled = atomic_inc_return(&data->disabled);
178
179 if (likely(disabled == 1)) {
180 pc = preempt_count();
181 trace_function(tr, ip, parent_ip, flags, pc);
182 /*
183 * skip over 5 funcs:
184 * __ftrace_trace_stack,
185 * __trace_stack,
186 * function_stack_trace_call
187 * ftrace_list_func
188 * ftrace_call
189 */
190 __trace_stack(tr, flags, 5, pc);
191 }
192
193 atomic_dec(&data->disabled);
194 local_irq_restore(flags);
195}
196
197static struct tracer_opt func_opts[] = {
198#ifdef CONFIG_STACKTRACE
199 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
200#endif
201 { } /* Always set a last empty entry */
202};
203
204static struct tracer_flags func_flags = {
205 .val = 0, /* By default: all flags disabled */
206 .opts = func_opts
207};
208
209static void tracing_start_function_trace(struct trace_array *tr)
210{
211 tr->function_enabled = 0;
212 register_ftrace_function(tr->ops);
213 tr->function_enabled = 1;
214}
215
216static void tracing_stop_function_trace(struct trace_array *tr)
217{
218 tr->function_enabled = 0;
219 unregister_ftrace_function(tr->ops);
220}
221
222static struct tracer function_trace;
223
224static int
225func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
226{
227 switch (bit) {
228 case TRACE_FUNC_OPT_STACK:
229 /* do nothing if already set */
230 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
231 break;
232
233 /* We can change this flag when not running. */
234 if (tr->current_trace != &function_trace)
235 break;
236
237 unregister_ftrace_function(tr->ops);
238
239 if (set) {
240 tr->ops->func = function_stack_trace_call;
241 register_ftrace_function(tr->ops);
242 } else {
243 tr->ops->func = function_trace_call;
244 register_ftrace_function(tr->ops);
245 }
246
247 break;
248 default:
249 return -EINVAL;
250 }
251
252 return 0;
253}
254
255static struct tracer function_trace __tracer_data =
256{
257 .name = "function",
258 .init = function_trace_init,
259 .reset = function_trace_reset,
260 .start = function_trace_start,
261 .flags = &func_flags,
262 .set_flag = func_set_flag,
263 .allow_instances = true,
264#ifdef CONFIG_FTRACE_SELFTEST
265 .selftest = trace_selftest_startup_function,
266#endif
267};
268
269#ifdef CONFIG_DYNAMIC_FTRACE
270static void update_traceon_count(void **data, bool on)
271{
272 long *count = (long *)data;
273 long old_count = *count;
274
275 /*
276 * Tracing gets disabled (or enabled) once per count.
277 * This function can be called at the same time on multiple CPUs.
278 * It is fine if both disable (or enable) tracing, as disabling
279 * (or enabling) the second time doesn't do anything as the
280 * state of the tracer is already disabled (or enabled).
281 * What needs to be synchronized in this case is that the count
282 * only gets decremented once, even if the tracer is disabled
283 * (or enabled) twice, as the second one is really a nop.
284 *
285 * The memory barriers guarantee that we only decrement the
286 * counter once. First the count is read to a local variable
287 * and a read barrier is used to make sure that it is loaded
288 * before checking if the tracer is in the state we want.
289 * If the tracer is not in the state we want, then the count
290 * is guaranteed to be the old count.
291 *
292 * Next the tracer is set to the state we want (disabled or enabled)
293 * then a write memory barrier is used to make sure that
294 * the new state is visible before changing the counter by
295 * one minus the old counter. This guarantees that another CPU
296 * executing this code will see the new state before seeing
297 * the new counter value, and would not do anything if the new
298 * counter is seen.
299 *
300 * Note, there is no synchronization between this and a user
301 * setting the tracing_on file. But we currently don't care
302 * about that.
303 */
304 if (!old_count)
305 return;
306
307 /* Make sure we see count before checking tracing state */
308 smp_rmb();
309
310 if (on == !!tracing_is_on())
311 return;
312
313 if (on)
314 tracing_on();
315 else
316 tracing_off();
317
318 /* unlimited? */
319 if (old_count == -1)
320 return;
321
322 /* Make sure tracing state is visible before updating count */
323 smp_wmb();
324
325 *count = old_count - 1;
326}
327
328static void
329ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
330{
331 update_traceon_count(data, 1);
332}
333
334static void
335ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
336{
337 update_traceon_count(data, 0);
338}
339
340static void
341ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
342{
343 if (tracing_is_on())
344 return;
345
346 tracing_on();
347}
348
349static void
350ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
351{
352 if (!tracing_is_on())
353 return;
354
355 tracing_off();
356}
357
358/*
359 * Skip 4:
360 * ftrace_stacktrace()
361 * function_trace_probe_call()
362 * ftrace_ops_list_func()
363 * ftrace_call()
364 */
365#define STACK_SKIP 4
366
367static void
368ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
369{
370 trace_dump_stack(STACK_SKIP);
371}
372
373static void
374ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
375{
376 long *count = (long *)data;
377 long old_count;
378 long new_count;
379
380 /*
381 * Stack traces should only execute the number of times the
382 * user specified in the counter.
383 */
384 do {
385
386 if (!tracing_is_on())
387 return;
388
389 old_count = *count;
390
391 if (!old_count)
392 return;
393
394 /* unlimited? */
395 if (old_count == -1) {
396 trace_dump_stack(STACK_SKIP);
397 return;
398 }
399
400 new_count = old_count - 1;
401 new_count = cmpxchg(count, old_count, new_count);
402 if (new_count == old_count)
403 trace_dump_stack(STACK_SKIP);
404
405 } while (new_count != old_count);
406}
407
408static int update_count(void **data)
409{
410 unsigned long *count = (long *)data;
411
412 if (!*count)
413 return 0;
414
415 if (*count != -1)
416 (*count)--;
417
418 return 1;
419}
420
421static void
422ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
423{
424 if (update_count(data))
425 ftrace_dump(DUMP_ALL);
426}
427
428/* Only dump the current CPU buffer. */
429static void
430ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
431{
432 if (update_count(data))
433 ftrace_dump(DUMP_ORIG);
434}
435
436static int
437ftrace_probe_print(const char *name, struct seq_file *m,
438 unsigned long ip, void *data)
439{
440 long count = (long)data;
441
442 seq_printf(m, "%ps:%s", (void *)ip, name);
443
444 if (count == -1)
445 seq_puts(m, ":unlimited\n");
446 else
447 seq_printf(m, ":count=%ld\n", count);
448
449 return 0;
450}
451
452static int
453ftrace_traceon_print(struct seq_file *m, unsigned long ip,
454 struct ftrace_probe_ops *ops, void *data)
455{
456 return ftrace_probe_print("traceon", m, ip, data);
457}
458
459static int
460ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
461 struct ftrace_probe_ops *ops, void *data)
462{
463 return ftrace_probe_print("traceoff", m, ip, data);
464}
465
466static int
467ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
468 struct ftrace_probe_ops *ops, void *data)
469{
470 return ftrace_probe_print("stacktrace", m, ip, data);
471}
472
473static int
474ftrace_dump_print(struct seq_file *m, unsigned long ip,
475 struct ftrace_probe_ops *ops, void *data)
476{
477 return ftrace_probe_print("dump", m, ip, data);
478}
479
480static int
481ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
482 struct ftrace_probe_ops *ops, void *data)
483{
484 return ftrace_probe_print("cpudump", m, ip, data);
485}
486
487static struct ftrace_probe_ops traceon_count_probe_ops = {
488 .func = ftrace_traceon_count,
489 .print = ftrace_traceon_print,
490};
491
492static struct ftrace_probe_ops traceoff_count_probe_ops = {
493 .func = ftrace_traceoff_count,
494 .print = ftrace_traceoff_print,
495};
496
497static struct ftrace_probe_ops stacktrace_count_probe_ops = {
498 .func = ftrace_stacktrace_count,
499 .print = ftrace_stacktrace_print,
500};
501
502static struct ftrace_probe_ops dump_probe_ops = {
503 .func = ftrace_dump_probe,
504 .print = ftrace_dump_print,
505};
506
507static struct ftrace_probe_ops cpudump_probe_ops = {
508 .func = ftrace_cpudump_probe,
509 .print = ftrace_cpudump_print,
510};
511
512static struct ftrace_probe_ops traceon_probe_ops = {
513 .func = ftrace_traceon,
514 .print = ftrace_traceon_print,
515};
516
517static struct ftrace_probe_ops traceoff_probe_ops = {
518 .func = ftrace_traceoff,
519 .print = ftrace_traceoff_print,
520};
521
522static struct ftrace_probe_ops stacktrace_probe_ops = {
523 .func = ftrace_stacktrace,
524 .print = ftrace_stacktrace_print,
525};
526
527static int
528ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
529 struct ftrace_hash *hash, char *glob,
530 char *cmd, char *param, int enable)
531{
532 void *count = (void *)-1;
533 char *number;
534 int ret;
535
536 /* hash funcs only work with set_ftrace_filter */
537 if (!enable)
538 return -EINVAL;
539
540 if (glob[0] == '!') {
541 unregister_ftrace_function_probe_func(glob+1, ops);
542 return 0;
543 }
544
545 if (!param)
546 goto out_reg;
547
548 number = strsep(¶m, ":");
549
550 if (!strlen(number))
551 goto out_reg;
552
553 /*
554 * We use the callback data field (which is a pointer)
555 * as our counter.
556 */
557 ret = kstrtoul(number, 0, (unsigned long *)&count);
558 if (ret)
559 return ret;
560
561 out_reg:
562 ret = register_ftrace_function_probe(glob, ops, count);
563
564 return ret < 0 ? ret : 0;
565}
566
567static int
568ftrace_trace_onoff_callback(struct ftrace_hash *hash,
569 char *glob, char *cmd, char *param, int enable)
570{
571 struct ftrace_probe_ops *ops;
572
573 /* we register both traceon and traceoff to this callback */
574 if (strcmp(cmd, "traceon") == 0)
575 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
576 else
577 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
578
579 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
580 param, enable);
581}
582
583static int
584ftrace_stacktrace_callback(struct ftrace_hash *hash,
585 char *glob, char *cmd, char *param, int enable)
586{
587 struct ftrace_probe_ops *ops;
588
589 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
590
591 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
592 param, enable);
593}
594
595static int
596ftrace_dump_callback(struct ftrace_hash *hash,
597 char *glob, char *cmd, char *param, int enable)
598{
599 struct ftrace_probe_ops *ops;
600
601 ops = &dump_probe_ops;
602
603 /* Only dump once. */
604 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
605 "1", enable);
606}
607
608static int
609ftrace_cpudump_callback(struct ftrace_hash *hash,
610 char *glob, char *cmd, char *param, int enable)
611{
612 struct ftrace_probe_ops *ops;
613
614 ops = &cpudump_probe_ops;
615
616 /* Only dump once. */
617 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
618 "1", enable);
619}
620
621static struct ftrace_func_command ftrace_traceon_cmd = {
622 .name = "traceon",
623 .func = ftrace_trace_onoff_callback,
624};
625
626static struct ftrace_func_command ftrace_traceoff_cmd = {
627 .name = "traceoff",
628 .func = ftrace_trace_onoff_callback,
629};
630
631static struct ftrace_func_command ftrace_stacktrace_cmd = {
632 .name = "stacktrace",
633 .func = ftrace_stacktrace_callback,
634};
635
636static struct ftrace_func_command ftrace_dump_cmd = {
637 .name = "dump",
638 .func = ftrace_dump_callback,
639};
640
641static struct ftrace_func_command ftrace_cpudump_cmd = {
642 .name = "cpudump",
643 .func = ftrace_cpudump_callback,
644};
645
646static int __init init_func_cmd_traceon(void)
647{
648 int ret;
649
650 ret = register_ftrace_command(&ftrace_traceoff_cmd);
651 if (ret)
652 return ret;
653
654 ret = register_ftrace_command(&ftrace_traceon_cmd);
655 if (ret)
656 goto out_free_traceoff;
657
658 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
659 if (ret)
660 goto out_free_traceon;
661
662 ret = register_ftrace_command(&ftrace_dump_cmd);
663 if (ret)
664 goto out_free_stacktrace;
665
666 ret = register_ftrace_command(&ftrace_cpudump_cmd);
667 if (ret)
668 goto out_free_dump;
669
670 return 0;
671
672 out_free_dump:
673 unregister_ftrace_command(&ftrace_dump_cmd);
674 out_free_stacktrace:
675 unregister_ftrace_command(&ftrace_stacktrace_cmd);
676 out_free_traceon:
677 unregister_ftrace_command(&ftrace_traceon_cmd);
678 out_free_traceoff:
679 unregister_ftrace_command(&ftrace_traceoff_cmd);
680
681 return ret;
682}
683#else
684static inline int init_func_cmd_traceon(void)
685{
686 return 0;
687}
688#endif /* CONFIG_DYNAMIC_FTRACE */
689
690static __init int init_function_trace(void)
691{
692 init_func_cmd_traceon();
693 return register_tracer(&function_trace);
694}
695core_initcall(init_function_trace);