Loading...
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12#include <linux/ring_buffer.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/slab.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static void tracing_start_function_trace(struct trace_array *tr);
22static void tracing_stop_function_trace(struct trace_array *tr);
23static void
24function_trace_call(unsigned long ip, unsigned long parent_ip,
25 struct ftrace_ops *op, struct pt_regs *pt_regs);
26static void
27function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
28 struct ftrace_ops *op, struct pt_regs *pt_regs);
29static struct ftrace_ops trace_ops;
30static struct ftrace_ops trace_stack_ops;
31static struct tracer_flags func_flags;
32
33/* Our option */
34enum {
35 TRACE_FUNC_OPT_STACK = 0x1,
36};
37
38static int allocate_ftrace_ops(struct trace_array *tr)
39{
40 struct ftrace_ops *ops;
41
42 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
43 if (!ops)
44 return -ENOMEM;
45
46 /* Currently only the non stack verision is supported */
47 ops->func = function_trace_call;
48 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE;
49
50 tr->ops = ops;
51 ops->private = tr;
52 return 0;
53}
54
55
56int ftrace_create_function_files(struct trace_array *tr,
57 struct dentry *parent)
58{
59 int ret;
60
61 /*
62 * The top level array uses the "global_ops", and the files are
63 * created on boot up.
64 */
65 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
66 return 0;
67
68 ret = allocate_ftrace_ops(tr);
69 if (ret)
70 return ret;
71
72 ftrace_create_filter_files(tr->ops, parent);
73
74 return 0;
75}
76
77void ftrace_destroy_function_files(struct trace_array *tr)
78{
79 ftrace_destroy_filter_files(tr->ops);
80 kfree(tr->ops);
81 tr->ops = NULL;
82}
83
84static int function_trace_init(struct trace_array *tr)
85{
86 struct ftrace_ops *ops;
87
88 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
89 /* There's only one global tr */
90 if (!trace_ops.private) {
91 trace_ops.private = tr;
92 trace_stack_ops.private = tr;
93 }
94
95 if (func_flags.val & TRACE_FUNC_OPT_STACK)
96 ops = &trace_stack_ops;
97 else
98 ops = &trace_ops;
99 tr->ops = ops;
100 } else if (!tr->ops) {
101 /*
102 * Instance trace_arrays get their ops allocated
103 * at instance creation. Unless it failed
104 * the allocation.
105 */
106 return -ENOMEM;
107 }
108
109 tr->trace_buffer.cpu = get_cpu();
110 put_cpu();
111
112 tracing_start_cmdline_record();
113 tracing_start_function_trace(tr);
114 return 0;
115}
116
117static void function_trace_reset(struct trace_array *tr)
118{
119 tracing_stop_function_trace(tr);
120 tracing_stop_cmdline_record();
121}
122
123static void function_trace_start(struct trace_array *tr)
124{
125 tracing_reset_online_cpus(&tr->trace_buffer);
126}
127
128static void
129function_trace_call(unsigned long ip, unsigned long parent_ip,
130 struct ftrace_ops *op, struct pt_regs *pt_regs)
131{
132 struct trace_array *tr = op->private;
133 struct trace_array_cpu *data;
134 unsigned long flags;
135 int bit;
136 int cpu;
137 int pc;
138
139 if (unlikely(!tr->function_enabled))
140 return;
141
142 pc = preempt_count();
143 preempt_disable_notrace();
144
145 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
146 if (bit < 0)
147 goto out;
148
149 cpu = smp_processor_id();
150 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
151 if (!atomic_read(&data->disabled)) {
152 local_save_flags(flags);
153 trace_function(tr, ip, parent_ip, flags, pc);
154 }
155 trace_clear_recursion(bit);
156
157 out:
158 preempt_enable_notrace();
159}
160
161static void
162function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
163 struct ftrace_ops *op, struct pt_regs *pt_regs)
164{
165 struct trace_array *tr = op->private;
166 struct trace_array_cpu *data;
167 unsigned long flags;
168 long disabled;
169 int cpu;
170 int pc;
171
172 if (unlikely(!tr->function_enabled))
173 return;
174
175 /*
176 * Need to use raw, since this must be called before the
177 * recursive protection is performed.
178 */
179 local_irq_save(flags);
180 cpu = raw_smp_processor_id();
181 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
182 disabled = atomic_inc_return(&data->disabled);
183
184 if (likely(disabled == 1)) {
185 pc = preempt_count();
186 trace_function(tr, ip, parent_ip, flags, pc);
187 /*
188 * skip over 5 funcs:
189 * __ftrace_trace_stack,
190 * __trace_stack,
191 * function_stack_trace_call
192 * ftrace_list_func
193 * ftrace_call
194 */
195 __trace_stack(tr, flags, 5, pc);
196 }
197
198 atomic_dec(&data->disabled);
199 local_irq_restore(flags);
200}
201
202static struct ftrace_ops trace_ops __read_mostly =
203{
204 .func = function_trace_call,
205 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
206};
207
208static struct ftrace_ops trace_stack_ops __read_mostly =
209{
210 .func = function_stack_trace_call,
211 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
212};
213
214static struct tracer_opt func_opts[] = {
215#ifdef CONFIG_STACKTRACE
216 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
217#endif
218 { } /* Always set a last empty entry */
219};
220
221static struct tracer_flags func_flags = {
222 .val = 0, /* By default: all flags disabled */
223 .opts = func_opts
224};
225
226static void tracing_start_function_trace(struct trace_array *tr)
227{
228 tr->function_enabled = 0;
229 register_ftrace_function(tr->ops);
230 tr->function_enabled = 1;
231}
232
233static void tracing_stop_function_trace(struct trace_array *tr)
234{
235 tr->function_enabled = 0;
236 unregister_ftrace_function(tr->ops);
237}
238
239static int
240func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
241{
242 switch (bit) {
243 case TRACE_FUNC_OPT_STACK:
244 /* do nothing if already set */
245 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
246 break;
247
248 unregister_ftrace_function(tr->ops);
249
250 if (set) {
251 tr->ops = &trace_stack_ops;
252 register_ftrace_function(tr->ops);
253 } else {
254 tr->ops = &trace_ops;
255 register_ftrace_function(tr->ops);
256 }
257
258 break;
259 default:
260 return -EINVAL;
261 }
262
263 return 0;
264}
265
266static struct tracer function_trace __tracer_data =
267{
268 .name = "function",
269 .init = function_trace_init,
270 .reset = function_trace_reset,
271 .start = function_trace_start,
272 .wait_pipe = poll_wait_pipe,
273 .flags = &func_flags,
274 .set_flag = func_set_flag,
275 .allow_instances = true,
276#ifdef CONFIG_FTRACE_SELFTEST
277 .selftest = trace_selftest_startup_function,
278#endif
279};
280
281#ifdef CONFIG_DYNAMIC_FTRACE
282static int update_count(void **data)
283{
284 unsigned long *count = (long *)data;
285
286 if (!*count)
287 return 0;
288
289 if (*count != -1)
290 (*count)--;
291
292 return 1;
293}
294
295static void
296ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
297{
298 if (tracing_is_on())
299 return;
300
301 if (update_count(data))
302 tracing_on();
303}
304
305static void
306ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
307{
308 if (!tracing_is_on())
309 return;
310
311 if (update_count(data))
312 tracing_off();
313}
314
315static void
316ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
317{
318 if (tracing_is_on())
319 return;
320
321 tracing_on();
322}
323
324static void
325ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
326{
327 if (!tracing_is_on())
328 return;
329
330 tracing_off();
331}
332
333/*
334 * Skip 4:
335 * ftrace_stacktrace()
336 * function_trace_probe_call()
337 * ftrace_ops_list_func()
338 * ftrace_call()
339 */
340#define STACK_SKIP 4
341
342static void
343ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
344{
345 trace_dump_stack(STACK_SKIP);
346}
347
348static void
349ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
350{
351 if (!tracing_is_on())
352 return;
353
354 if (update_count(data))
355 trace_dump_stack(STACK_SKIP);
356}
357
358static void
359ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
360{
361 if (update_count(data))
362 ftrace_dump(DUMP_ALL);
363}
364
365/* Only dump the current CPU buffer. */
366static void
367ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
368{
369 if (update_count(data))
370 ftrace_dump(DUMP_ORIG);
371}
372
373static int
374ftrace_probe_print(const char *name, struct seq_file *m,
375 unsigned long ip, void *data)
376{
377 long count = (long)data;
378
379 seq_printf(m, "%ps:%s", (void *)ip, name);
380
381 if (count == -1)
382 seq_printf(m, ":unlimited\n");
383 else
384 seq_printf(m, ":count=%ld\n", count);
385
386 return 0;
387}
388
389static int
390ftrace_traceon_print(struct seq_file *m, unsigned long ip,
391 struct ftrace_probe_ops *ops, void *data)
392{
393 return ftrace_probe_print("traceon", m, ip, data);
394}
395
396static int
397ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
398 struct ftrace_probe_ops *ops, void *data)
399{
400 return ftrace_probe_print("traceoff", m, ip, data);
401}
402
403static int
404ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
405 struct ftrace_probe_ops *ops, void *data)
406{
407 return ftrace_probe_print("stacktrace", m, ip, data);
408}
409
410static int
411ftrace_dump_print(struct seq_file *m, unsigned long ip,
412 struct ftrace_probe_ops *ops, void *data)
413{
414 return ftrace_probe_print("dump", m, ip, data);
415}
416
417static int
418ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
419 struct ftrace_probe_ops *ops, void *data)
420{
421 return ftrace_probe_print("cpudump", m, ip, data);
422}
423
424static struct ftrace_probe_ops traceon_count_probe_ops = {
425 .func = ftrace_traceon_count,
426 .print = ftrace_traceon_print,
427};
428
429static struct ftrace_probe_ops traceoff_count_probe_ops = {
430 .func = ftrace_traceoff_count,
431 .print = ftrace_traceoff_print,
432};
433
434static struct ftrace_probe_ops stacktrace_count_probe_ops = {
435 .func = ftrace_stacktrace_count,
436 .print = ftrace_stacktrace_print,
437};
438
439static struct ftrace_probe_ops dump_probe_ops = {
440 .func = ftrace_dump_probe,
441 .print = ftrace_dump_print,
442};
443
444static struct ftrace_probe_ops cpudump_probe_ops = {
445 .func = ftrace_cpudump_probe,
446 .print = ftrace_cpudump_print,
447};
448
449static struct ftrace_probe_ops traceon_probe_ops = {
450 .func = ftrace_traceon,
451 .print = ftrace_traceon_print,
452};
453
454static struct ftrace_probe_ops traceoff_probe_ops = {
455 .func = ftrace_traceoff,
456 .print = ftrace_traceoff_print,
457};
458
459static struct ftrace_probe_ops stacktrace_probe_ops = {
460 .func = ftrace_stacktrace,
461 .print = ftrace_stacktrace_print,
462};
463
464static int
465ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
466 struct ftrace_hash *hash, char *glob,
467 char *cmd, char *param, int enable)
468{
469 void *count = (void *)-1;
470 char *number;
471 int ret;
472
473 /* hash funcs only work with set_ftrace_filter */
474 if (!enable)
475 return -EINVAL;
476
477 if (glob[0] == '!') {
478 unregister_ftrace_function_probe_func(glob+1, ops);
479 return 0;
480 }
481
482 if (!param)
483 goto out_reg;
484
485 number = strsep(¶m, ":");
486
487 if (!strlen(number))
488 goto out_reg;
489
490 /*
491 * We use the callback data field (which is a pointer)
492 * as our counter.
493 */
494 ret = kstrtoul(number, 0, (unsigned long *)&count);
495 if (ret)
496 return ret;
497
498 out_reg:
499 ret = register_ftrace_function_probe(glob, ops, count);
500
501 return ret < 0 ? ret : 0;
502}
503
504static int
505ftrace_trace_onoff_callback(struct ftrace_hash *hash,
506 char *glob, char *cmd, char *param, int enable)
507{
508 struct ftrace_probe_ops *ops;
509
510 /* we register both traceon and traceoff to this callback */
511 if (strcmp(cmd, "traceon") == 0)
512 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
513 else
514 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
515
516 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
517 param, enable);
518}
519
520static int
521ftrace_stacktrace_callback(struct ftrace_hash *hash,
522 char *glob, char *cmd, char *param, int enable)
523{
524 struct ftrace_probe_ops *ops;
525
526 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
527
528 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
529 param, enable);
530}
531
532static int
533ftrace_dump_callback(struct ftrace_hash *hash,
534 char *glob, char *cmd, char *param, int enable)
535{
536 struct ftrace_probe_ops *ops;
537
538 ops = &dump_probe_ops;
539
540 /* Only dump once. */
541 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
542 "1", enable);
543}
544
545static int
546ftrace_cpudump_callback(struct ftrace_hash *hash,
547 char *glob, char *cmd, char *param, int enable)
548{
549 struct ftrace_probe_ops *ops;
550
551 ops = &cpudump_probe_ops;
552
553 /* Only dump once. */
554 return ftrace_trace_probe_callback(ops, hash, glob, cmd,
555 "1", enable);
556}
557
558static struct ftrace_func_command ftrace_traceon_cmd = {
559 .name = "traceon",
560 .func = ftrace_trace_onoff_callback,
561};
562
563static struct ftrace_func_command ftrace_traceoff_cmd = {
564 .name = "traceoff",
565 .func = ftrace_trace_onoff_callback,
566};
567
568static struct ftrace_func_command ftrace_stacktrace_cmd = {
569 .name = "stacktrace",
570 .func = ftrace_stacktrace_callback,
571};
572
573static struct ftrace_func_command ftrace_dump_cmd = {
574 .name = "dump",
575 .func = ftrace_dump_callback,
576};
577
578static struct ftrace_func_command ftrace_cpudump_cmd = {
579 .name = "cpudump",
580 .func = ftrace_cpudump_callback,
581};
582
583static int __init init_func_cmd_traceon(void)
584{
585 int ret;
586
587 ret = register_ftrace_command(&ftrace_traceoff_cmd);
588 if (ret)
589 return ret;
590
591 ret = register_ftrace_command(&ftrace_traceon_cmd);
592 if (ret)
593 goto out_free_traceoff;
594
595 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
596 if (ret)
597 goto out_free_traceon;
598
599 ret = register_ftrace_command(&ftrace_dump_cmd);
600 if (ret)
601 goto out_free_stacktrace;
602
603 ret = register_ftrace_command(&ftrace_cpudump_cmd);
604 if (ret)
605 goto out_free_dump;
606
607 return 0;
608
609 out_free_dump:
610 unregister_ftrace_command(&ftrace_dump_cmd);
611 out_free_stacktrace:
612 unregister_ftrace_command(&ftrace_stacktrace_cmd);
613 out_free_traceon:
614 unregister_ftrace_command(&ftrace_traceon_cmd);
615 out_free_traceoff:
616 unregister_ftrace_command(&ftrace_traceoff_cmd);
617
618 return ret;
619}
620#else
621static inline int init_func_cmd_traceon(void)
622{
623 return 0;
624}
625#endif /* CONFIG_DYNAMIC_FTRACE */
626
627static __init int init_function_trace(void)
628{
629 init_func_cmd_traceon();
630 return register_tracer(&function_trace);
631}
632core_initcall(init_function_trace);
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/ring_buffer.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/fs.h>
17
18#include "trace.h"
19
20/* function tracing enabled */
21static int ftrace_function_enabled;
22
23static struct trace_array *func_trace;
24
25static void tracing_start_function_trace(void);
26static void tracing_stop_function_trace(void);
27
28static int function_trace_init(struct trace_array *tr)
29{
30 func_trace = tr;
31 tr->cpu = get_cpu();
32 put_cpu();
33
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
36 return 0;
37}
38
39static void function_trace_reset(struct trace_array *tr)
40{
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
43}
44
45static void function_trace_start(struct trace_array *tr)
46{
47 tracing_reset_online_cpus(tr);
48}
49
50static void
51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
52{
53 struct trace_array *tr = func_trace;
54 struct trace_array_cpu *data;
55 unsigned long flags;
56 long disabled;
57 int cpu;
58 int pc;
59
60 if (unlikely(!ftrace_function_enabled))
61 return;
62
63 pc = preempt_count();
64 preempt_disable_notrace();
65 local_save_flags(flags);
66 cpu = raw_smp_processor_id();
67 data = tr->data[cpu];
68 disabled = atomic_inc_return(&data->disabled);
69
70 if (likely(disabled == 1))
71 trace_function(tr, ip, parent_ip, flags, pc);
72
73 atomic_dec(&data->disabled);
74 preempt_enable_notrace();
75}
76
77static void
78function_trace_call(unsigned long ip, unsigned long parent_ip)
79{
80 struct trace_array *tr = func_trace;
81 struct trace_array_cpu *data;
82 unsigned long flags;
83 long disabled;
84 int cpu;
85 int pc;
86
87 if (unlikely(!ftrace_function_enabled))
88 return;
89
90 /*
91 * Need to use raw, since this must be called before the
92 * recursive protection is performed.
93 */
94 local_irq_save(flags);
95 cpu = raw_smp_processor_id();
96 data = tr->data[cpu];
97 disabled = atomic_inc_return(&data->disabled);
98
99 if (likely(disabled == 1)) {
100 pc = preempt_count();
101 trace_function(tr, ip, parent_ip, flags, pc);
102 }
103
104 atomic_dec(&data->disabled);
105 local_irq_restore(flags);
106}
107
108static void
109function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
110{
111 struct trace_array *tr = func_trace;
112 struct trace_array_cpu *data;
113 unsigned long flags;
114 long disabled;
115 int cpu;
116 int pc;
117
118 if (unlikely(!ftrace_function_enabled))
119 return;
120
121 /*
122 * Need to use raw, since this must be called before the
123 * recursive protection is performed.
124 */
125 local_irq_save(flags);
126 cpu = raw_smp_processor_id();
127 data = tr->data[cpu];
128 disabled = atomic_inc_return(&data->disabled);
129
130 if (likely(disabled == 1)) {
131 pc = preempt_count();
132 trace_function(tr, ip, parent_ip, flags, pc);
133 /*
134 * skip over 5 funcs:
135 * __ftrace_trace_stack,
136 * __trace_stack,
137 * function_stack_trace_call
138 * ftrace_list_func
139 * ftrace_call
140 */
141 __trace_stack(tr, flags, 5, pc);
142 }
143
144 atomic_dec(&data->disabled);
145 local_irq_restore(flags);
146}
147
148
149static struct ftrace_ops trace_ops __read_mostly =
150{
151 .func = function_trace_call,
152 .flags = FTRACE_OPS_FL_GLOBAL,
153};
154
155static struct ftrace_ops trace_stack_ops __read_mostly =
156{
157 .func = function_stack_trace_call,
158 .flags = FTRACE_OPS_FL_GLOBAL,
159};
160
161/* Our two options */
162enum {
163 TRACE_FUNC_OPT_STACK = 0x1,
164};
165
166static struct tracer_opt func_opts[] = {
167#ifdef CONFIG_STACKTRACE
168 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
169#endif
170 { } /* Always set a last empty entry */
171};
172
173static struct tracer_flags func_flags = {
174 .val = 0, /* By default: all flags disabled */
175 .opts = func_opts
176};
177
178static void tracing_start_function_trace(void)
179{
180 ftrace_function_enabled = 0;
181
182 if (trace_flags & TRACE_ITER_PREEMPTONLY)
183 trace_ops.func = function_trace_call_preempt_only;
184 else
185 trace_ops.func = function_trace_call;
186
187 if (func_flags.val & TRACE_FUNC_OPT_STACK)
188 register_ftrace_function(&trace_stack_ops);
189 else
190 register_ftrace_function(&trace_ops);
191
192 ftrace_function_enabled = 1;
193}
194
195static void tracing_stop_function_trace(void)
196{
197 ftrace_function_enabled = 0;
198
199 if (func_flags.val & TRACE_FUNC_OPT_STACK)
200 unregister_ftrace_function(&trace_stack_ops);
201 else
202 unregister_ftrace_function(&trace_ops);
203}
204
205static int func_set_flag(u32 old_flags, u32 bit, int set)
206{
207 if (bit == TRACE_FUNC_OPT_STACK) {
208 /* do nothing if already set */
209 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
210 return 0;
211
212 if (set) {
213 unregister_ftrace_function(&trace_ops);
214 register_ftrace_function(&trace_stack_ops);
215 } else {
216 unregister_ftrace_function(&trace_stack_ops);
217 register_ftrace_function(&trace_ops);
218 }
219
220 return 0;
221 }
222
223 return -EINVAL;
224}
225
226static struct tracer function_trace __read_mostly =
227{
228 .name = "function",
229 .init = function_trace_init,
230 .reset = function_trace_reset,
231 .start = function_trace_start,
232 .wait_pipe = poll_wait_pipe,
233 .flags = &func_flags,
234 .set_flag = func_set_flag,
235#ifdef CONFIG_FTRACE_SELFTEST
236 .selftest = trace_selftest_startup_function,
237#endif
238};
239
240#ifdef CONFIG_DYNAMIC_FTRACE
241static void
242ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
243{
244 long *count = (long *)data;
245
246 if (tracing_is_on())
247 return;
248
249 if (!*count)
250 return;
251
252 if (*count != -1)
253 (*count)--;
254
255 tracing_on();
256}
257
258static void
259ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
260{
261 long *count = (long *)data;
262
263 if (!tracing_is_on())
264 return;
265
266 if (!*count)
267 return;
268
269 if (*count != -1)
270 (*count)--;
271
272 tracing_off();
273}
274
275static int
276ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
277 struct ftrace_probe_ops *ops, void *data);
278
279static struct ftrace_probe_ops traceon_probe_ops = {
280 .func = ftrace_traceon,
281 .print = ftrace_trace_onoff_print,
282};
283
284static struct ftrace_probe_ops traceoff_probe_ops = {
285 .func = ftrace_traceoff,
286 .print = ftrace_trace_onoff_print,
287};
288
289static int
290ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
291 struct ftrace_probe_ops *ops, void *data)
292{
293 long count = (long)data;
294
295 seq_printf(m, "%ps:", (void *)ip);
296
297 if (ops == &traceon_probe_ops)
298 seq_printf(m, "traceon");
299 else
300 seq_printf(m, "traceoff");
301
302 if (count == -1)
303 seq_printf(m, ":unlimited\n");
304 else
305 seq_printf(m, ":count=%ld\n", count);
306
307 return 0;
308}
309
310static int
311ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
312{
313 struct ftrace_probe_ops *ops;
314
315 /* we register both traceon and traceoff to this callback */
316 if (strcmp(cmd, "traceon") == 0)
317 ops = &traceon_probe_ops;
318 else
319 ops = &traceoff_probe_ops;
320
321 unregister_ftrace_function_probe_func(glob, ops);
322
323 return 0;
324}
325
326static int
327ftrace_trace_onoff_callback(struct ftrace_hash *hash,
328 char *glob, char *cmd, char *param, int enable)
329{
330 struct ftrace_probe_ops *ops;
331 void *count = (void *)-1;
332 char *number;
333 int ret;
334
335 /* hash funcs only work with set_ftrace_filter */
336 if (!enable)
337 return -EINVAL;
338
339 if (glob[0] == '!')
340 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
341
342 /* we register both traceon and traceoff to this callback */
343 if (strcmp(cmd, "traceon") == 0)
344 ops = &traceon_probe_ops;
345 else
346 ops = &traceoff_probe_ops;
347
348 if (!param)
349 goto out_reg;
350
351 number = strsep(¶m, ":");
352
353 if (!strlen(number))
354 goto out_reg;
355
356 /*
357 * We use the callback data field (which is a pointer)
358 * as our counter.
359 */
360 ret = strict_strtoul(number, 0, (unsigned long *)&count);
361 if (ret)
362 return ret;
363
364 out_reg:
365 ret = register_ftrace_function_probe(glob, ops, count);
366
367 return ret < 0 ? ret : 0;
368}
369
370static struct ftrace_func_command ftrace_traceon_cmd = {
371 .name = "traceon",
372 .func = ftrace_trace_onoff_callback,
373};
374
375static struct ftrace_func_command ftrace_traceoff_cmd = {
376 .name = "traceoff",
377 .func = ftrace_trace_onoff_callback,
378};
379
380static int __init init_func_cmd_traceon(void)
381{
382 int ret;
383
384 ret = register_ftrace_command(&ftrace_traceoff_cmd);
385 if (ret)
386 return ret;
387
388 ret = register_ftrace_command(&ftrace_traceon_cmd);
389 if (ret)
390 unregister_ftrace_command(&ftrace_traceoff_cmd);
391 return ret;
392}
393#else
394static inline int init_func_cmd_traceon(void)
395{
396 return 0;
397}
398#endif /* CONFIG_DYNAMIC_FTRACE */
399
400static __init int init_function_trace(void)
401{
402 init_func_cmd_traceon();
403 return register_tracer(&function_trace);
404}
405device_initcall(init_function_trace);
406