Loading...
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/ring_buffer.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/fs.h>
17
18#include "trace.h"
19
20/* function tracing enabled */
21static int ftrace_function_enabled;
22
23static struct trace_array *func_trace;
24
25static void tracing_start_function_trace(void);
26static void tracing_stop_function_trace(void);
27
28static int function_trace_init(struct trace_array *tr)
29{
30 func_trace = tr;
31 tr->cpu = get_cpu();
32 put_cpu();
33
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
36 return 0;
37}
38
39static void function_trace_reset(struct trace_array *tr)
40{
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
43}
44
45static void function_trace_start(struct trace_array *tr)
46{
47 tracing_reset_online_cpus(tr);
48}
49
50static void
51function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
52{
53 struct trace_array *tr = func_trace;
54 struct trace_array_cpu *data;
55 unsigned long flags;
56 long disabled;
57 int cpu;
58 int pc;
59
60 if (unlikely(!ftrace_function_enabled))
61 return;
62
63 pc = preempt_count();
64 preempt_disable_notrace();
65 local_save_flags(flags);
66 cpu = raw_smp_processor_id();
67 data = tr->data[cpu];
68 disabled = atomic_inc_return(&data->disabled);
69
70 if (likely(disabled == 1))
71 trace_function(tr, ip, parent_ip, flags, pc);
72
73 atomic_dec(&data->disabled);
74 preempt_enable_notrace();
75}
76
77static void
78function_trace_call(unsigned long ip, unsigned long parent_ip)
79{
80 struct trace_array *tr = func_trace;
81 struct trace_array_cpu *data;
82 unsigned long flags;
83 long disabled;
84 int cpu;
85 int pc;
86
87 if (unlikely(!ftrace_function_enabled))
88 return;
89
90 /*
91 * Need to use raw, since this must be called before the
92 * recursive protection is performed.
93 */
94 local_irq_save(flags);
95 cpu = raw_smp_processor_id();
96 data = tr->data[cpu];
97 disabled = atomic_inc_return(&data->disabled);
98
99 if (likely(disabled == 1)) {
100 pc = preempt_count();
101 trace_function(tr, ip, parent_ip, flags, pc);
102 }
103
104 atomic_dec(&data->disabled);
105 local_irq_restore(flags);
106}
107
108static void
109function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
110{
111 struct trace_array *tr = func_trace;
112 struct trace_array_cpu *data;
113 unsigned long flags;
114 long disabled;
115 int cpu;
116 int pc;
117
118 if (unlikely(!ftrace_function_enabled))
119 return;
120
121 /*
122 * Need to use raw, since this must be called before the
123 * recursive protection is performed.
124 */
125 local_irq_save(flags);
126 cpu = raw_smp_processor_id();
127 data = tr->data[cpu];
128 disabled = atomic_inc_return(&data->disabled);
129
130 if (likely(disabled == 1)) {
131 pc = preempt_count();
132 trace_function(tr, ip, parent_ip, flags, pc);
133 /*
134 * skip over 5 funcs:
135 * __ftrace_trace_stack,
136 * __trace_stack,
137 * function_stack_trace_call
138 * ftrace_list_func
139 * ftrace_call
140 */
141 __trace_stack(tr, flags, 5, pc);
142 }
143
144 atomic_dec(&data->disabled);
145 local_irq_restore(flags);
146}
147
148
149static struct ftrace_ops trace_ops __read_mostly =
150{
151 .func = function_trace_call,
152 .flags = FTRACE_OPS_FL_GLOBAL,
153};
154
155static struct ftrace_ops trace_stack_ops __read_mostly =
156{
157 .func = function_stack_trace_call,
158 .flags = FTRACE_OPS_FL_GLOBAL,
159};
160
161/* Our two options */
162enum {
163 TRACE_FUNC_OPT_STACK = 0x1,
164};
165
166static struct tracer_opt func_opts[] = {
167#ifdef CONFIG_STACKTRACE
168 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
169#endif
170 { } /* Always set a last empty entry */
171};
172
173static struct tracer_flags func_flags = {
174 .val = 0, /* By default: all flags disabled */
175 .opts = func_opts
176};
177
178static void tracing_start_function_trace(void)
179{
180 ftrace_function_enabled = 0;
181
182 if (trace_flags & TRACE_ITER_PREEMPTONLY)
183 trace_ops.func = function_trace_call_preempt_only;
184 else
185 trace_ops.func = function_trace_call;
186
187 if (func_flags.val & TRACE_FUNC_OPT_STACK)
188 register_ftrace_function(&trace_stack_ops);
189 else
190 register_ftrace_function(&trace_ops);
191
192 ftrace_function_enabled = 1;
193}
194
195static void tracing_stop_function_trace(void)
196{
197 ftrace_function_enabled = 0;
198
199 if (func_flags.val & TRACE_FUNC_OPT_STACK)
200 unregister_ftrace_function(&trace_stack_ops);
201 else
202 unregister_ftrace_function(&trace_ops);
203}
204
205static int func_set_flag(u32 old_flags, u32 bit, int set)
206{
207 if (bit == TRACE_FUNC_OPT_STACK) {
208 /* do nothing if already set */
209 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
210 return 0;
211
212 if (set) {
213 unregister_ftrace_function(&trace_ops);
214 register_ftrace_function(&trace_stack_ops);
215 } else {
216 unregister_ftrace_function(&trace_stack_ops);
217 register_ftrace_function(&trace_ops);
218 }
219
220 return 0;
221 }
222
223 return -EINVAL;
224}
225
226static struct tracer function_trace __read_mostly =
227{
228 .name = "function",
229 .init = function_trace_init,
230 .reset = function_trace_reset,
231 .start = function_trace_start,
232 .wait_pipe = poll_wait_pipe,
233 .flags = &func_flags,
234 .set_flag = func_set_flag,
235#ifdef CONFIG_FTRACE_SELFTEST
236 .selftest = trace_selftest_startup_function,
237#endif
238};
239
240#ifdef CONFIG_DYNAMIC_FTRACE
241static void
242ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
243{
244 long *count = (long *)data;
245
246 if (tracing_is_on())
247 return;
248
249 if (!*count)
250 return;
251
252 if (*count != -1)
253 (*count)--;
254
255 tracing_on();
256}
257
258static void
259ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
260{
261 long *count = (long *)data;
262
263 if (!tracing_is_on())
264 return;
265
266 if (!*count)
267 return;
268
269 if (*count != -1)
270 (*count)--;
271
272 tracing_off();
273}
274
275static int
276ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
277 struct ftrace_probe_ops *ops, void *data);
278
279static struct ftrace_probe_ops traceon_probe_ops = {
280 .func = ftrace_traceon,
281 .print = ftrace_trace_onoff_print,
282};
283
284static struct ftrace_probe_ops traceoff_probe_ops = {
285 .func = ftrace_traceoff,
286 .print = ftrace_trace_onoff_print,
287};
288
289static int
290ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
291 struct ftrace_probe_ops *ops, void *data)
292{
293 long count = (long)data;
294
295 seq_printf(m, "%ps:", (void *)ip);
296
297 if (ops == &traceon_probe_ops)
298 seq_printf(m, "traceon");
299 else
300 seq_printf(m, "traceoff");
301
302 if (count == -1)
303 seq_printf(m, ":unlimited\n");
304 else
305 seq_printf(m, ":count=%ld\n", count);
306
307 return 0;
308}
309
310static int
311ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
312{
313 struct ftrace_probe_ops *ops;
314
315 /* we register both traceon and traceoff to this callback */
316 if (strcmp(cmd, "traceon") == 0)
317 ops = &traceon_probe_ops;
318 else
319 ops = &traceoff_probe_ops;
320
321 unregister_ftrace_function_probe_func(glob, ops);
322
323 return 0;
324}
325
326static int
327ftrace_trace_onoff_callback(struct ftrace_hash *hash,
328 char *glob, char *cmd, char *param, int enable)
329{
330 struct ftrace_probe_ops *ops;
331 void *count = (void *)-1;
332 char *number;
333 int ret;
334
335 /* hash funcs only work with set_ftrace_filter */
336 if (!enable)
337 return -EINVAL;
338
339 if (glob[0] == '!')
340 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
341
342 /* we register both traceon and traceoff to this callback */
343 if (strcmp(cmd, "traceon") == 0)
344 ops = &traceon_probe_ops;
345 else
346 ops = &traceoff_probe_ops;
347
348 if (!param)
349 goto out_reg;
350
351 number = strsep(¶m, ":");
352
353 if (!strlen(number))
354 goto out_reg;
355
356 /*
357 * We use the callback data field (which is a pointer)
358 * as our counter.
359 */
360 ret = strict_strtoul(number, 0, (unsigned long *)&count);
361 if (ret)
362 return ret;
363
364 out_reg:
365 ret = register_ftrace_function_probe(glob, ops, count);
366
367 return ret < 0 ? ret : 0;
368}
369
370static struct ftrace_func_command ftrace_traceon_cmd = {
371 .name = "traceon",
372 .func = ftrace_trace_onoff_callback,
373};
374
375static struct ftrace_func_command ftrace_traceoff_cmd = {
376 .name = "traceoff",
377 .func = ftrace_trace_onoff_callback,
378};
379
380static int __init init_func_cmd_traceon(void)
381{
382 int ret;
383
384 ret = register_ftrace_command(&ftrace_traceoff_cmd);
385 if (ret)
386 return ret;
387
388 ret = register_ftrace_command(&ftrace_traceon_cmd);
389 if (ret)
390 unregister_ftrace_command(&ftrace_traceoff_cmd);
391 return ret;
392}
393#else
394static inline int init_func_cmd_traceon(void)
395{
396 return 0;
397}
398#endif /* CONFIG_DYNAMIC_FTRACE */
399
400static __init int init_function_trace(void)
401{
402 init_func_cmd_traceon();
403 return register_tracer(&function_trace);
404}
405device_initcall(init_function_trace);
406
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/ring_buffer.h>
14#include <linux/debugfs.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/slab.h>
18#include <linux/fs.h>
19
20#include "trace.h"
21
22static void tracing_start_function_trace(struct trace_array *tr);
23static void tracing_stop_function_trace(struct trace_array *tr);
24static void
25function_trace_call(unsigned long ip, unsigned long parent_ip,
26 struct ftrace_ops *op, struct ftrace_regs *fregs);
27static void
28function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
29 struct ftrace_ops *op, struct ftrace_regs *fregs);
30static void
31function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
32 struct ftrace_ops *op, struct ftrace_regs *fregs);
33static void
34function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
35 struct ftrace_ops *op,
36 struct ftrace_regs *fregs);
37static struct tracer_flags func_flags;
38
39/* Our option */
40enum {
41
42 TRACE_FUNC_NO_OPTS = 0x0, /* No flags set. */
43 TRACE_FUNC_OPT_STACK = 0x1,
44 TRACE_FUNC_OPT_NO_REPEATS = 0x2,
45
46 /* Update this to next highest bit. */
47 TRACE_FUNC_OPT_HIGHEST_BIT = 0x4
48};
49
50#define TRACE_FUNC_OPT_MASK (TRACE_FUNC_OPT_HIGHEST_BIT - 1)
51
52int ftrace_allocate_ftrace_ops(struct trace_array *tr)
53{
54 struct ftrace_ops *ops;
55
56 /* The top level array uses the "global_ops" */
57 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
58 return 0;
59
60 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
61 if (!ops)
62 return -ENOMEM;
63
64 /* Currently only the non stack version is supported */
65 ops->func = function_trace_call;
66 ops->flags = FTRACE_OPS_FL_PID;
67
68 tr->ops = ops;
69 ops->private = tr;
70
71 return 0;
72}
73
74void ftrace_free_ftrace_ops(struct trace_array *tr)
75{
76 kfree(tr->ops);
77 tr->ops = NULL;
78}
79
80int ftrace_create_function_files(struct trace_array *tr,
81 struct dentry *parent)
82{
83 /*
84 * The top level array uses the "global_ops", and the files are
85 * created on boot up.
86 */
87 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
88 return 0;
89
90 if (!tr->ops)
91 return -EINVAL;
92
93 ftrace_create_filter_files(tr->ops, parent);
94
95 return 0;
96}
97
98void ftrace_destroy_function_files(struct trace_array *tr)
99{
100 ftrace_destroy_filter_files(tr->ops);
101 ftrace_free_ftrace_ops(tr);
102}
103
104static ftrace_func_t select_trace_function(u32 flags_val)
105{
106 switch (flags_val & TRACE_FUNC_OPT_MASK) {
107 case TRACE_FUNC_NO_OPTS:
108 return function_trace_call;
109 case TRACE_FUNC_OPT_STACK:
110 return function_stack_trace_call;
111 case TRACE_FUNC_OPT_NO_REPEATS:
112 return function_no_repeats_trace_call;
113 case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
114 return function_stack_no_repeats_trace_call;
115 default:
116 return NULL;
117 }
118}
119
120static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
121{
122 if (!tr->last_func_repeats &&
123 (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
124 tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
125 if (!tr->last_func_repeats)
126 return false;
127 }
128
129 return true;
130}
131
132static int function_trace_init(struct trace_array *tr)
133{
134 ftrace_func_t func;
135 /*
136 * Instance trace_arrays get their ops allocated
137 * at instance creation. Unless it failed
138 * the allocation.
139 */
140 if (!tr->ops)
141 return -ENOMEM;
142
143 func = select_trace_function(func_flags.val);
144 if (!func)
145 return -EINVAL;
146
147 if (!handle_func_repeats(tr, func_flags.val))
148 return -ENOMEM;
149
150 ftrace_init_array_ops(tr, func);
151
152 tr->array_buffer.cpu = raw_smp_processor_id();
153
154 tracing_start_cmdline_record();
155 tracing_start_function_trace(tr);
156 return 0;
157}
158
159static void function_trace_reset(struct trace_array *tr)
160{
161 tracing_stop_function_trace(tr);
162 tracing_stop_cmdline_record();
163 ftrace_reset_array_ops(tr);
164}
165
166static void function_trace_start(struct trace_array *tr)
167{
168 tracing_reset_online_cpus(&tr->array_buffer);
169}
170
171static void
172function_trace_call(unsigned long ip, unsigned long parent_ip,
173 struct ftrace_ops *op, struct ftrace_regs *fregs)
174{
175 struct trace_array *tr = op->private;
176 struct trace_array_cpu *data;
177 unsigned int trace_ctx;
178 int bit;
179 int cpu;
180
181 if (unlikely(!tr->function_enabled))
182 return;
183
184 bit = ftrace_test_recursion_trylock(ip, parent_ip);
185 if (bit < 0)
186 return;
187
188 trace_ctx = tracing_gen_ctx();
189 preempt_disable_notrace();
190
191 cpu = smp_processor_id();
192 data = per_cpu_ptr(tr->array_buffer.data, cpu);
193 if (!atomic_read(&data->disabled))
194 trace_function(tr, ip, parent_ip, trace_ctx);
195
196 ftrace_test_recursion_unlock(bit);
197 preempt_enable_notrace();
198}
199
200#ifdef CONFIG_UNWINDER_ORC
201/*
202 * Skip 2:
203 *
204 * function_stack_trace_call()
205 * ftrace_call()
206 */
207#define STACK_SKIP 2
208#else
209/*
210 * Skip 3:
211 * __trace_stack()
212 * function_stack_trace_call()
213 * ftrace_call()
214 */
215#define STACK_SKIP 3
216#endif
217
218static void
219function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
220 struct ftrace_ops *op, struct ftrace_regs *fregs)
221{
222 struct trace_array *tr = op->private;
223 struct trace_array_cpu *data;
224 unsigned long flags;
225 long disabled;
226 int cpu;
227 unsigned int trace_ctx;
228
229 if (unlikely(!tr->function_enabled))
230 return;
231
232 /*
233 * Need to use raw, since this must be called before the
234 * recursive protection is performed.
235 */
236 local_irq_save(flags);
237 cpu = raw_smp_processor_id();
238 data = per_cpu_ptr(tr->array_buffer.data, cpu);
239 disabled = atomic_inc_return(&data->disabled);
240
241 if (likely(disabled == 1)) {
242 trace_ctx = tracing_gen_ctx_flags(flags);
243 trace_function(tr, ip, parent_ip, trace_ctx);
244 __trace_stack(tr, trace_ctx, STACK_SKIP);
245 }
246
247 atomic_dec(&data->disabled);
248 local_irq_restore(flags);
249}
250
251static inline bool is_repeat_check(struct trace_array *tr,
252 struct trace_func_repeats *last_info,
253 unsigned long ip, unsigned long parent_ip)
254{
255 if (last_info->ip == ip &&
256 last_info->parent_ip == parent_ip &&
257 last_info->count < U16_MAX) {
258 last_info->ts_last_call =
259 ring_buffer_time_stamp(tr->array_buffer.buffer);
260 last_info->count++;
261 return true;
262 }
263
264 return false;
265}
266
267static inline void process_repeats(struct trace_array *tr,
268 unsigned long ip, unsigned long parent_ip,
269 struct trace_func_repeats *last_info,
270 unsigned int trace_ctx)
271{
272 if (last_info->count) {
273 trace_last_func_repeats(tr, last_info, trace_ctx);
274 last_info->count = 0;
275 }
276
277 last_info->ip = ip;
278 last_info->parent_ip = parent_ip;
279}
280
281static void
282function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
283 struct ftrace_ops *op,
284 struct ftrace_regs *fregs)
285{
286 struct trace_func_repeats *last_info;
287 struct trace_array *tr = op->private;
288 struct trace_array_cpu *data;
289 unsigned int trace_ctx;
290 unsigned long flags;
291 int bit;
292 int cpu;
293
294 if (unlikely(!tr->function_enabled))
295 return;
296
297 bit = ftrace_test_recursion_trylock(ip, parent_ip);
298 if (bit < 0)
299 return;
300
301 preempt_disable_notrace();
302
303 cpu = smp_processor_id();
304 data = per_cpu_ptr(tr->array_buffer.data, cpu);
305 if (atomic_read(&data->disabled))
306 goto out;
307
308 /*
309 * An interrupt may happen at any place here. But as far as I can see,
310 * the only damage that this can cause is to mess up the repetition
311 * counter without valuable data being lost.
312 * TODO: think about a solution that is better than just hoping to be
313 * lucky.
314 */
315 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
316 if (is_repeat_check(tr, last_info, ip, parent_ip))
317 goto out;
318
319 local_save_flags(flags);
320 trace_ctx = tracing_gen_ctx_flags(flags);
321 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
322
323 trace_function(tr, ip, parent_ip, trace_ctx);
324
325out:
326 ftrace_test_recursion_unlock(bit);
327 preempt_enable_notrace();
328}
329
330static void
331function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
332 struct ftrace_ops *op,
333 struct ftrace_regs *fregs)
334{
335 struct trace_func_repeats *last_info;
336 struct trace_array *tr = op->private;
337 struct trace_array_cpu *data;
338 unsigned long flags;
339 long disabled;
340 int cpu;
341 unsigned int trace_ctx;
342
343 if (unlikely(!tr->function_enabled))
344 return;
345
346 /*
347 * Need to use raw, since this must be called before the
348 * recursive protection is performed.
349 */
350 local_irq_save(flags);
351 cpu = raw_smp_processor_id();
352 data = per_cpu_ptr(tr->array_buffer.data, cpu);
353 disabled = atomic_inc_return(&data->disabled);
354
355 if (likely(disabled == 1)) {
356 last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
357 if (is_repeat_check(tr, last_info, ip, parent_ip))
358 goto out;
359
360 trace_ctx = tracing_gen_ctx_flags(flags);
361 process_repeats(tr, ip, parent_ip, last_info, trace_ctx);
362
363 trace_function(tr, ip, parent_ip, trace_ctx);
364 __trace_stack(tr, trace_ctx, STACK_SKIP);
365 }
366
367 out:
368 atomic_dec(&data->disabled);
369 local_irq_restore(flags);
370}
371
372static struct tracer_opt func_opts[] = {
373#ifdef CONFIG_STACKTRACE
374 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
375#endif
376 { TRACER_OPT(func-no-repeats, TRACE_FUNC_OPT_NO_REPEATS) },
377 { } /* Always set a last empty entry */
378};
379
380static struct tracer_flags func_flags = {
381 .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
382 .opts = func_opts
383};
384
385static void tracing_start_function_trace(struct trace_array *tr)
386{
387 tr->function_enabled = 0;
388 register_ftrace_function(tr->ops);
389 tr->function_enabled = 1;
390}
391
392static void tracing_stop_function_trace(struct trace_array *tr)
393{
394 tr->function_enabled = 0;
395 unregister_ftrace_function(tr->ops);
396}
397
398static struct tracer function_trace;
399
400static int
401func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
402{
403 ftrace_func_t func;
404 u32 new_flags;
405
406 /* Do nothing if already set. */
407 if (!!set == !!(func_flags.val & bit))
408 return 0;
409
410 /* We can change this flag only when not running. */
411 if (tr->current_trace != &function_trace)
412 return 0;
413
414 new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
415 func = select_trace_function(new_flags);
416 if (!func)
417 return -EINVAL;
418
419 /* Check if there's anything to change. */
420 if (tr->ops->func == func)
421 return 0;
422
423 if (!handle_func_repeats(tr, new_flags))
424 return -ENOMEM;
425
426 unregister_ftrace_function(tr->ops);
427 tr->ops->func = func;
428 register_ftrace_function(tr->ops);
429
430 return 0;
431}
432
433static struct tracer function_trace __tracer_data =
434{
435 .name = "function",
436 .init = function_trace_init,
437 .reset = function_trace_reset,
438 .start = function_trace_start,
439 .flags = &func_flags,
440 .set_flag = func_set_flag,
441 .allow_instances = true,
442#ifdef CONFIG_FTRACE_SELFTEST
443 .selftest = trace_selftest_startup_function,
444#endif
445};
446
447#ifdef CONFIG_DYNAMIC_FTRACE
448static void update_traceon_count(struct ftrace_probe_ops *ops,
449 unsigned long ip,
450 struct trace_array *tr, bool on,
451 void *data)
452{
453 struct ftrace_func_mapper *mapper = data;
454 long *count;
455 long old_count;
456
457 /*
458 * Tracing gets disabled (or enabled) once per count.
459 * This function can be called at the same time on multiple CPUs.
460 * It is fine if both disable (or enable) tracing, as disabling
461 * (or enabling) the second time doesn't do anything as the
462 * state of the tracer is already disabled (or enabled).
463 * What needs to be synchronized in this case is that the count
464 * only gets decremented once, even if the tracer is disabled
465 * (or enabled) twice, as the second one is really a nop.
466 *
467 * The memory barriers guarantee that we only decrement the
468 * counter once. First the count is read to a local variable
469 * and a read barrier is used to make sure that it is loaded
470 * before checking if the tracer is in the state we want.
471 * If the tracer is not in the state we want, then the count
472 * is guaranteed to be the old count.
473 *
474 * Next the tracer is set to the state we want (disabled or enabled)
475 * then a write memory barrier is used to make sure that
476 * the new state is visible before changing the counter by
477 * one minus the old counter. This guarantees that another CPU
478 * executing this code will see the new state before seeing
479 * the new counter value, and would not do anything if the new
480 * counter is seen.
481 *
482 * Note, there is no synchronization between this and a user
483 * setting the tracing_on file. But we currently don't care
484 * about that.
485 */
486 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
487 old_count = *count;
488
489 if (old_count <= 0)
490 return;
491
492 /* Make sure we see count before checking tracing state */
493 smp_rmb();
494
495 if (on == !!tracer_tracing_is_on(tr))
496 return;
497
498 if (on)
499 tracer_tracing_on(tr);
500 else
501 tracer_tracing_off(tr);
502
503 /* Make sure tracing state is visible before updating count */
504 smp_wmb();
505
506 *count = old_count - 1;
507}
508
509static void
510ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
511 struct trace_array *tr, struct ftrace_probe_ops *ops,
512 void *data)
513{
514 update_traceon_count(ops, ip, tr, 1, data);
515}
516
517static void
518ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
519 struct trace_array *tr, struct ftrace_probe_ops *ops,
520 void *data)
521{
522 update_traceon_count(ops, ip, tr, 0, data);
523}
524
525static void
526ftrace_traceon(unsigned long ip, unsigned long parent_ip,
527 struct trace_array *tr, struct ftrace_probe_ops *ops,
528 void *data)
529{
530 if (tracer_tracing_is_on(tr))
531 return;
532
533 tracer_tracing_on(tr);
534}
535
536static void
537ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
538 struct trace_array *tr, struct ftrace_probe_ops *ops,
539 void *data)
540{
541 if (!tracer_tracing_is_on(tr))
542 return;
543
544 tracer_tracing_off(tr);
545}
546
547#ifdef CONFIG_UNWINDER_ORC
548/*
549 * Skip 3:
550 *
551 * function_trace_probe_call()
552 * ftrace_ops_assist_func()
553 * ftrace_call()
554 */
555#define FTRACE_STACK_SKIP 3
556#else
557/*
558 * Skip 5:
559 *
560 * __trace_stack()
561 * ftrace_stacktrace()
562 * function_trace_probe_call()
563 * ftrace_ops_assist_func()
564 * ftrace_call()
565 */
566#define FTRACE_STACK_SKIP 5
567#endif
568
569static __always_inline void trace_stack(struct trace_array *tr)
570{
571 unsigned int trace_ctx;
572
573 trace_ctx = tracing_gen_ctx();
574
575 __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
576}
577
578static void
579ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
580 struct trace_array *tr, struct ftrace_probe_ops *ops,
581 void *data)
582{
583 trace_stack(tr);
584}
585
586static void
587ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
588 struct trace_array *tr, struct ftrace_probe_ops *ops,
589 void *data)
590{
591 struct ftrace_func_mapper *mapper = data;
592 long *count;
593 long old_count;
594 long new_count;
595
596 if (!tracing_is_on())
597 return;
598
599 /* unlimited? */
600 if (!mapper) {
601 trace_stack(tr);
602 return;
603 }
604
605 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
606
607 /*
608 * Stack traces should only execute the number of times the
609 * user specified in the counter.
610 */
611 do {
612 old_count = *count;
613
614 if (!old_count)
615 return;
616
617 new_count = old_count - 1;
618 new_count = cmpxchg(count, old_count, new_count);
619 if (new_count == old_count)
620 trace_stack(tr);
621
622 if (!tracing_is_on())
623 return;
624
625 } while (new_count != old_count);
626}
627
628static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
629 void *data)
630{
631 struct ftrace_func_mapper *mapper = data;
632 long *count = NULL;
633
634 if (mapper)
635 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
636
637 if (count) {
638 if (*count <= 0)
639 return 0;
640 (*count)--;
641 }
642
643 return 1;
644}
645
646static void
647ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
648 struct trace_array *tr, struct ftrace_probe_ops *ops,
649 void *data)
650{
651 if (update_count(ops, ip, data))
652 ftrace_dump(DUMP_ALL);
653}
654
655/* Only dump the current CPU buffer. */
656static void
657ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
658 struct trace_array *tr, struct ftrace_probe_ops *ops,
659 void *data)
660{
661 if (update_count(ops, ip, data))
662 ftrace_dump(DUMP_ORIG);
663}
664
665static int
666ftrace_probe_print(const char *name, struct seq_file *m,
667 unsigned long ip, struct ftrace_probe_ops *ops,
668 void *data)
669{
670 struct ftrace_func_mapper *mapper = data;
671 long *count = NULL;
672
673 seq_printf(m, "%ps:%s", (void *)ip, name);
674
675 if (mapper)
676 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
677
678 if (count)
679 seq_printf(m, ":count=%ld\n", *count);
680 else
681 seq_puts(m, ":unlimited\n");
682
683 return 0;
684}
685
686static int
687ftrace_traceon_print(struct seq_file *m, unsigned long ip,
688 struct ftrace_probe_ops *ops,
689 void *data)
690{
691 return ftrace_probe_print("traceon", m, ip, ops, data);
692}
693
694static int
695ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
696 struct ftrace_probe_ops *ops, void *data)
697{
698 return ftrace_probe_print("traceoff", m, ip, ops, data);
699}
700
701static int
702ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
703 struct ftrace_probe_ops *ops, void *data)
704{
705 return ftrace_probe_print("stacktrace", m, ip, ops, data);
706}
707
708static int
709ftrace_dump_print(struct seq_file *m, unsigned long ip,
710 struct ftrace_probe_ops *ops, void *data)
711{
712 return ftrace_probe_print("dump", m, ip, ops, data);
713}
714
715static int
716ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
717 struct ftrace_probe_ops *ops, void *data)
718{
719 return ftrace_probe_print("cpudump", m, ip, ops, data);
720}
721
722
723static int
724ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
725 unsigned long ip, void *init_data, void **data)
726{
727 struct ftrace_func_mapper *mapper = *data;
728
729 if (!mapper) {
730 mapper = allocate_ftrace_func_mapper();
731 if (!mapper)
732 return -ENOMEM;
733 *data = mapper;
734 }
735
736 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
737}
738
739static void
740ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
741 unsigned long ip, void *data)
742{
743 struct ftrace_func_mapper *mapper = data;
744
745 if (!ip) {
746 free_ftrace_func_mapper(mapper, NULL);
747 return;
748 }
749
750 ftrace_func_mapper_remove_ip(mapper, ip);
751}
752
753static struct ftrace_probe_ops traceon_count_probe_ops = {
754 .func = ftrace_traceon_count,
755 .print = ftrace_traceon_print,
756 .init = ftrace_count_init,
757 .free = ftrace_count_free,
758};
759
760static struct ftrace_probe_ops traceoff_count_probe_ops = {
761 .func = ftrace_traceoff_count,
762 .print = ftrace_traceoff_print,
763 .init = ftrace_count_init,
764 .free = ftrace_count_free,
765};
766
767static struct ftrace_probe_ops stacktrace_count_probe_ops = {
768 .func = ftrace_stacktrace_count,
769 .print = ftrace_stacktrace_print,
770 .init = ftrace_count_init,
771 .free = ftrace_count_free,
772};
773
774static struct ftrace_probe_ops dump_probe_ops = {
775 .func = ftrace_dump_probe,
776 .print = ftrace_dump_print,
777 .init = ftrace_count_init,
778 .free = ftrace_count_free,
779};
780
781static struct ftrace_probe_ops cpudump_probe_ops = {
782 .func = ftrace_cpudump_probe,
783 .print = ftrace_cpudump_print,
784};
785
786static struct ftrace_probe_ops traceon_probe_ops = {
787 .func = ftrace_traceon,
788 .print = ftrace_traceon_print,
789};
790
791static struct ftrace_probe_ops traceoff_probe_ops = {
792 .func = ftrace_traceoff,
793 .print = ftrace_traceoff_print,
794};
795
796static struct ftrace_probe_ops stacktrace_probe_ops = {
797 .func = ftrace_stacktrace,
798 .print = ftrace_stacktrace_print,
799};
800
801static int
802ftrace_trace_probe_callback(struct trace_array *tr,
803 struct ftrace_probe_ops *ops,
804 struct ftrace_hash *hash, char *glob,
805 char *cmd, char *param, int enable)
806{
807 void *count = (void *)-1;
808 char *number;
809 int ret;
810
811 /* hash funcs only work with set_ftrace_filter */
812 if (!enable)
813 return -EINVAL;
814
815 if (glob[0] == '!')
816 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
817
818 if (!param)
819 goto out_reg;
820
821 number = strsep(¶m, ":");
822
823 if (!strlen(number))
824 goto out_reg;
825
826 /*
827 * We use the callback data field (which is a pointer)
828 * as our counter.
829 */
830 ret = kstrtoul(number, 0, (unsigned long *)&count);
831 if (ret)
832 return ret;
833
834 out_reg:
835 ret = register_ftrace_function_probe(glob, tr, ops, count);
836
837 return ret < 0 ? ret : 0;
838}
839
840static int
841ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
842 char *glob, char *cmd, char *param, int enable)
843{
844 struct ftrace_probe_ops *ops;
845
846 if (!tr)
847 return -ENODEV;
848
849 /* we register both traceon and traceoff to this callback */
850 if (strcmp(cmd, "traceon") == 0)
851 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
852 else
853 ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
854
855 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
856 param, enable);
857}
858
859static int
860ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
861 char *glob, char *cmd, char *param, int enable)
862{
863 struct ftrace_probe_ops *ops;
864
865 if (!tr)
866 return -ENODEV;
867
868 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
869
870 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
871 param, enable);
872}
873
874static int
875ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
876 char *glob, char *cmd, char *param, int enable)
877{
878 struct ftrace_probe_ops *ops;
879
880 if (!tr)
881 return -ENODEV;
882
883 ops = &dump_probe_ops;
884
885 /* Only dump once. */
886 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
887 "1", enable);
888}
889
890static int
891ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
892 char *glob, char *cmd, char *param, int enable)
893{
894 struct ftrace_probe_ops *ops;
895
896 if (!tr)
897 return -ENODEV;
898
899 ops = &cpudump_probe_ops;
900
901 /* Only dump once. */
902 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
903 "1", enable);
904}
905
906static struct ftrace_func_command ftrace_traceon_cmd = {
907 .name = "traceon",
908 .func = ftrace_trace_onoff_callback,
909};
910
911static struct ftrace_func_command ftrace_traceoff_cmd = {
912 .name = "traceoff",
913 .func = ftrace_trace_onoff_callback,
914};
915
916static struct ftrace_func_command ftrace_stacktrace_cmd = {
917 .name = "stacktrace",
918 .func = ftrace_stacktrace_callback,
919};
920
921static struct ftrace_func_command ftrace_dump_cmd = {
922 .name = "dump",
923 .func = ftrace_dump_callback,
924};
925
926static struct ftrace_func_command ftrace_cpudump_cmd = {
927 .name = "cpudump",
928 .func = ftrace_cpudump_callback,
929};
930
931static int __init init_func_cmd_traceon(void)
932{
933 int ret;
934
935 ret = register_ftrace_command(&ftrace_traceoff_cmd);
936 if (ret)
937 return ret;
938
939 ret = register_ftrace_command(&ftrace_traceon_cmd);
940 if (ret)
941 goto out_free_traceoff;
942
943 ret = register_ftrace_command(&ftrace_stacktrace_cmd);
944 if (ret)
945 goto out_free_traceon;
946
947 ret = register_ftrace_command(&ftrace_dump_cmd);
948 if (ret)
949 goto out_free_stacktrace;
950
951 ret = register_ftrace_command(&ftrace_cpudump_cmd);
952 if (ret)
953 goto out_free_dump;
954
955 return 0;
956
957 out_free_dump:
958 unregister_ftrace_command(&ftrace_dump_cmd);
959 out_free_stacktrace:
960 unregister_ftrace_command(&ftrace_stacktrace_cmd);
961 out_free_traceon:
962 unregister_ftrace_command(&ftrace_traceon_cmd);
963 out_free_traceoff:
964 unregister_ftrace_command(&ftrace_traceoff_cmd);
965
966 return ret;
967}
968#else
969static inline int init_func_cmd_traceon(void)
970{
971 return 0;
972}
973#endif /* CONFIG_DYNAMIC_FTRACE */
974
975__init int init_function_trace(void)
976{
977 init_func_cmd_traceon();
978 return register_tracer(&function_trace);
979}