Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kprobes-based tracing events
4 *
5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 *
7 */
8#define pr_fmt(fmt) "trace_kprobe: " fmt
9
10#include <linux/security.h>
11#include <linux/module.h>
12#include <linux/uaccess.h>
13#include <linux/rculist.h>
14#include <linux/error-injection.h>
15
16#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
17
18#include "trace_dynevent.h"
19#include "trace_kprobe_selftest.h"
20#include "trace_probe.h"
21#include "trace_probe_tmpl.h"
22
23#define KPROBE_EVENT_SYSTEM "kprobes"
24#define KRETPROBE_MAXACTIVE_MAX 4096
25
26/* Kprobe early definition from command line */
27static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
28
29static int __init set_kprobe_boot_events(char *str)
30{
31 strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
32 disable_tracing_selftest("running kprobe events");
33
34 return 0;
35}
36__setup("kprobe_event=", set_kprobe_boot_events);
37
38static int trace_kprobe_create(const char *raw_command);
39static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
40static int trace_kprobe_release(struct dyn_event *ev);
41static bool trace_kprobe_is_busy(struct dyn_event *ev);
42static bool trace_kprobe_match(const char *system, const char *event,
43 int argc, const char **argv, struct dyn_event *ev);
44
45static struct dyn_event_operations trace_kprobe_ops = {
46 .create = trace_kprobe_create,
47 .show = trace_kprobe_show,
48 .is_busy = trace_kprobe_is_busy,
49 .free = trace_kprobe_release,
50 .match = trace_kprobe_match,
51};
52
53/*
54 * Kprobe event core functions
55 */
56struct trace_kprobe {
57 struct dyn_event devent;
58 struct kretprobe rp; /* Use rp.kp for kprobe use */
59 unsigned long __percpu *nhit;
60 const char *symbol; /* symbol name */
61 struct trace_probe tp;
62};
63
64static bool is_trace_kprobe(struct dyn_event *ev)
65{
66 return ev->ops == &trace_kprobe_ops;
67}
68
69static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
70{
71 return container_of(ev, struct trace_kprobe, devent);
72}
73
74/**
75 * for_each_trace_kprobe - iterate over the trace_kprobe list
76 * @pos: the struct trace_kprobe * for each entry
77 * @dpos: the struct dyn_event * to use as a loop cursor
78 */
79#define for_each_trace_kprobe(pos, dpos) \
80 for_each_dyn_event(dpos) \
81 if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
82
83#define SIZEOF_TRACE_KPROBE(n) \
84 (offsetof(struct trace_kprobe, tp.args) + \
85 (sizeof(struct probe_arg) * (n)))
86
87static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
88{
89 return tk->rp.handler != NULL;
90}
91
92static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
93{
94 return tk->symbol ? tk->symbol : "unknown";
95}
96
97static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
98{
99 return tk->rp.kp.offset;
100}
101
102static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
103{
104 return !!(kprobe_gone(&tk->rp.kp));
105}
106
107static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
108 struct module *mod)
109{
110 int len = strlen(module_name(mod));
111 const char *name = trace_kprobe_symbol(tk);
112
113 return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
114}
115
116static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
117{
118 char *p;
119 bool ret;
120
121 if (!tk->symbol)
122 return false;
123 p = strchr(tk->symbol, ':');
124 if (!p)
125 return true;
126 *p = '\0';
127 rcu_read_lock_sched();
128 ret = !!find_module(tk->symbol);
129 rcu_read_unlock_sched();
130 *p = ':';
131
132 return ret;
133}
134
135static bool trace_kprobe_is_busy(struct dyn_event *ev)
136{
137 struct trace_kprobe *tk = to_trace_kprobe(ev);
138
139 return trace_probe_is_enabled(&tk->tp);
140}
141
142static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
143 int argc, const char **argv)
144{
145 char buf[MAX_ARGSTR_LEN + 1];
146
147 if (!argc)
148 return true;
149
150 if (!tk->symbol)
151 snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
152 else if (tk->rp.kp.offset)
153 snprintf(buf, sizeof(buf), "%s+%u",
154 trace_kprobe_symbol(tk), tk->rp.kp.offset);
155 else
156 snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
157 if (strcmp(buf, argv[0]))
158 return false;
159 argc--; argv++;
160
161 return trace_probe_match_command_args(&tk->tp, argc, argv);
162}
163
164static bool trace_kprobe_match(const char *system, const char *event,
165 int argc, const char **argv, struct dyn_event *ev)
166{
167 struct trace_kprobe *tk = to_trace_kprobe(ev);
168
169 return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
170 (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
171 trace_kprobe_match_command_head(tk, argc, argv);
172}
173
174static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
175{
176 unsigned long nhit = 0;
177 int cpu;
178
179 for_each_possible_cpu(cpu)
180 nhit += *per_cpu_ptr(tk->nhit, cpu);
181
182 return nhit;
183}
184
185static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
186{
187 return !(list_empty(&tk->rp.kp.list) &&
188 hlist_unhashed(&tk->rp.kp.hlist));
189}
190
191/* Return 0 if it fails to find the symbol address */
192static nokprobe_inline
193unsigned long trace_kprobe_address(struct trace_kprobe *tk)
194{
195 unsigned long addr;
196
197 if (tk->symbol) {
198 addr = (unsigned long)
199 kallsyms_lookup_name(trace_kprobe_symbol(tk));
200 if (addr)
201 addr += tk->rp.kp.offset;
202 } else {
203 addr = (unsigned long)tk->rp.kp.addr;
204 }
205 return addr;
206}
207
208static nokprobe_inline struct trace_kprobe *
209trace_kprobe_primary_from_call(struct trace_event_call *call)
210{
211 struct trace_probe *tp;
212
213 tp = trace_probe_primary_from_call(call);
214 if (WARN_ON_ONCE(!tp))
215 return NULL;
216
217 return container_of(tp, struct trace_kprobe, tp);
218}
219
220bool trace_kprobe_on_func_entry(struct trace_event_call *call)
221{
222 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
223
224 return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
225 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
226 tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
227}
228
229bool trace_kprobe_error_injectable(struct trace_event_call *call)
230{
231 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
232
233 return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
234 false;
235}
236
237static int register_kprobe_event(struct trace_kprobe *tk);
238static int unregister_kprobe_event(struct trace_kprobe *tk);
239
240static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
241static int kretprobe_dispatcher(struct kretprobe_instance *ri,
242 struct pt_regs *regs);
243
244static void free_trace_kprobe(struct trace_kprobe *tk)
245{
246 if (tk) {
247 trace_probe_cleanup(&tk->tp);
248 kfree(tk->symbol);
249 free_percpu(tk->nhit);
250 kfree(tk);
251 }
252}
253
254/*
255 * Allocate new trace_probe and initialize it (including kprobes).
256 */
257static struct trace_kprobe *alloc_trace_kprobe(const char *group,
258 const char *event,
259 void *addr,
260 const char *symbol,
261 unsigned long offs,
262 int maxactive,
263 int nargs, bool is_return)
264{
265 struct trace_kprobe *tk;
266 int ret = -ENOMEM;
267
268 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
269 if (!tk)
270 return ERR_PTR(ret);
271
272 tk->nhit = alloc_percpu(unsigned long);
273 if (!tk->nhit)
274 goto error;
275
276 if (symbol) {
277 tk->symbol = kstrdup(symbol, GFP_KERNEL);
278 if (!tk->symbol)
279 goto error;
280 tk->rp.kp.symbol_name = tk->symbol;
281 tk->rp.kp.offset = offs;
282 } else
283 tk->rp.kp.addr = addr;
284
285 if (is_return)
286 tk->rp.handler = kretprobe_dispatcher;
287 else
288 tk->rp.kp.pre_handler = kprobe_dispatcher;
289
290 tk->rp.maxactive = maxactive;
291 INIT_HLIST_NODE(&tk->rp.kp.hlist);
292 INIT_LIST_HEAD(&tk->rp.kp.list);
293
294 ret = trace_probe_init(&tk->tp, event, group, false);
295 if (ret < 0)
296 goto error;
297
298 dyn_event_init(&tk->devent, &trace_kprobe_ops);
299 return tk;
300error:
301 free_trace_kprobe(tk);
302 return ERR_PTR(ret);
303}
304
305static struct trace_kprobe *find_trace_kprobe(const char *event,
306 const char *group)
307{
308 struct dyn_event *pos;
309 struct trace_kprobe *tk;
310
311 for_each_trace_kprobe(tk, pos)
312 if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
313 strcmp(trace_probe_group_name(&tk->tp), group) == 0)
314 return tk;
315 return NULL;
316}
317
318static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
319{
320 int ret = 0;
321
322 if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
323 if (trace_kprobe_is_return(tk))
324 ret = enable_kretprobe(&tk->rp);
325 else
326 ret = enable_kprobe(&tk->rp.kp);
327 }
328
329 return ret;
330}
331
332static void __disable_trace_kprobe(struct trace_probe *tp)
333{
334 struct trace_probe *pos;
335 struct trace_kprobe *tk;
336
337 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
338 tk = container_of(pos, struct trace_kprobe, tp);
339 if (!trace_kprobe_is_registered(tk))
340 continue;
341 if (trace_kprobe_is_return(tk))
342 disable_kretprobe(&tk->rp);
343 else
344 disable_kprobe(&tk->rp.kp);
345 }
346}
347
348/*
349 * Enable trace_probe
350 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
351 */
352static int enable_trace_kprobe(struct trace_event_call *call,
353 struct trace_event_file *file)
354{
355 struct trace_probe *pos, *tp;
356 struct trace_kprobe *tk;
357 bool enabled;
358 int ret = 0;
359
360 tp = trace_probe_primary_from_call(call);
361 if (WARN_ON_ONCE(!tp))
362 return -ENODEV;
363 enabled = trace_probe_is_enabled(tp);
364
365 /* This also changes "enabled" state */
366 if (file) {
367 ret = trace_probe_add_file(tp, file);
368 if (ret)
369 return ret;
370 } else
371 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
372
373 if (enabled)
374 return 0;
375
376 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
377 tk = container_of(pos, struct trace_kprobe, tp);
378 if (trace_kprobe_has_gone(tk))
379 continue;
380 ret = __enable_trace_kprobe(tk);
381 if (ret)
382 break;
383 enabled = true;
384 }
385
386 if (ret) {
387 /* Failed to enable one of them. Roll back all */
388 if (enabled)
389 __disable_trace_kprobe(tp);
390 if (file)
391 trace_probe_remove_file(tp, file);
392 else
393 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
394 }
395
396 return ret;
397}
398
399/*
400 * Disable trace_probe
401 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
402 */
403static int disable_trace_kprobe(struct trace_event_call *call,
404 struct trace_event_file *file)
405{
406 struct trace_probe *tp;
407
408 tp = trace_probe_primary_from_call(call);
409 if (WARN_ON_ONCE(!tp))
410 return -ENODEV;
411
412 if (file) {
413 if (!trace_probe_get_file_link(tp, file))
414 return -ENOENT;
415 if (!trace_probe_has_single_file(tp))
416 goto out;
417 trace_probe_clear_flag(tp, TP_FLAG_TRACE);
418 } else
419 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
420
421 if (!trace_probe_is_enabled(tp))
422 __disable_trace_kprobe(tp);
423
424 out:
425 if (file)
426 /*
427 * Synchronization is done in below function. For perf event,
428 * file == NULL and perf_trace_event_unreg() calls
429 * tracepoint_synchronize_unregister() to ensure synchronize
430 * event. We don't need to care about it.
431 */
432 trace_probe_remove_file(tp, file);
433
434 return 0;
435}
436
437#if defined(CONFIG_DYNAMIC_FTRACE) && \
438 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
439static bool __within_notrace_func(unsigned long addr)
440{
441 unsigned long offset, size;
442
443 if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
444 return false;
445
446 /* Get the entry address of the target function */
447 addr -= offset;
448
449 /*
450 * Since ftrace_location_range() does inclusive range check, we need
451 * to subtract 1 byte from the end address.
452 */
453 return !ftrace_location_range(addr, addr + size - 1);
454}
455
456static bool within_notrace_func(struct trace_kprobe *tk)
457{
458 unsigned long addr = trace_kprobe_address(tk);
459 char symname[KSYM_NAME_LEN], *p;
460
461 if (!__within_notrace_func(addr))
462 return false;
463
464 /* Check if the address is on a suffixed-symbol */
465 if (!lookup_symbol_name(addr, symname)) {
466 p = strchr(symname, '.');
467 if (!p)
468 return true;
469 *p = '\0';
470 addr = (unsigned long)kprobe_lookup_name(symname, 0);
471 if (addr)
472 return __within_notrace_func(addr);
473 }
474
475 return true;
476}
477#else
478#define within_notrace_func(tk) (false)
479#endif
480
481/* Internal register function - just handle k*probes and flags */
482static int __register_trace_kprobe(struct trace_kprobe *tk)
483{
484 int i, ret;
485
486 ret = security_locked_down(LOCKDOWN_KPROBES);
487 if (ret)
488 return ret;
489
490 if (trace_kprobe_is_registered(tk))
491 return -EINVAL;
492
493 if (within_notrace_func(tk)) {
494 pr_warn("Could not probe notrace function %s\n",
495 trace_kprobe_symbol(tk));
496 return -EINVAL;
497 }
498
499 for (i = 0; i < tk->tp.nr_args; i++) {
500 ret = traceprobe_update_arg(&tk->tp.args[i]);
501 if (ret)
502 return ret;
503 }
504
505 /* Set/clear disabled flag according to tp->flag */
506 if (trace_probe_is_enabled(&tk->tp))
507 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
508 else
509 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
510
511 if (trace_kprobe_is_return(tk))
512 ret = register_kretprobe(&tk->rp);
513 else
514 ret = register_kprobe(&tk->rp.kp);
515
516 return ret;
517}
518
519/* Internal unregister function - just handle k*probes and flags */
520static void __unregister_trace_kprobe(struct trace_kprobe *tk)
521{
522 if (trace_kprobe_is_registered(tk)) {
523 if (trace_kprobe_is_return(tk))
524 unregister_kretprobe(&tk->rp);
525 else
526 unregister_kprobe(&tk->rp.kp);
527 /* Cleanup kprobe for reuse and mark it unregistered */
528 INIT_HLIST_NODE(&tk->rp.kp.hlist);
529 INIT_LIST_HEAD(&tk->rp.kp.list);
530 if (tk->rp.kp.symbol_name)
531 tk->rp.kp.addr = NULL;
532 }
533}
534
535/* Unregister a trace_probe and probe_event */
536static int unregister_trace_kprobe(struct trace_kprobe *tk)
537{
538 /* If other probes are on the event, just unregister kprobe */
539 if (trace_probe_has_sibling(&tk->tp))
540 goto unreg;
541
542 /* Enabled event can not be unregistered */
543 if (trace_probe_is_enabled(&tk->tp))
544 return -EBUSY;
545
546 /* Will fail if probe is being used by ftrace or perf */
547 if (unregister_kprobe_event(tk))
548 return -EBUSY;
549
550unreg:
551 __unregister_trace_kprobe(tk);
552 dyn_event_remove(&tk->devent);
553 trace_probe_unlink(&tk->tp);
554
555 return 0;
556}
557
558static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
559 struct trace_kprobe *comp)
560{
561 struct trace_probe_event *tpe = orig->tp.event;
562 struct trace_probe *pos;
563 int i;
564
565 list_for_each_entry(pos, &tpe->probes, list) {
566 orig = container_of(pos, struct trace_kprobe, tp);
567 if (strcmp(trace_kprobe_symbol(orig),
568 trace_kprobe_symbol(comp)) ||
569 trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
570 continue;
571
572 /*
573 * trace_probe_compare_arg_type() ensured that nr_args and
574 * each argument name and type are same. Let's compare comm.
575 */
576 for (i = 0; i < orig->tp.nr_args; i++) {
577 if (strcmp(orig->tp.args[i].comm,
578 comp->tp.args[i].comm))
579 break;
580 }
581
582 if (i == orig->tp.nr_args)
583 return true;
584 }
585
586 return false;
587}
588
589static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
590{
591 int ret;
592
593 ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
594 if (ret) {
595 /* Note that argument starts index = 2 */
596 trace_probe_log_set_index(ret + 1);
597 trace_probe_log_err(0, DIFF_ARG_TYPE);
598 return -EEXIST;
599 }
600 if (trace_kprobe_has_same_kprobe(to, tk)) {
601 trace_probe_log_set_index(0);
602 trace_probe_log_err(0, SAME_PROBE);
603 return -EEXIST;
604 }
605
606 /* Append to existing event */
607 ret = trace_probe_append(&tk->tp, &to->tp);
608 if (ret)
609 return ret;
610
611 /* Register k*probe */
612 ret = __register_trace_kprobe(tk);
613 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
614 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
615 ret = 0;
616 }
617
618 if (ret)
619 trace_probe_unlink(&tk->tp);
620 else
621 dyn_event_add(&tk->devent);
622
623 return ret;
624}
625
626/* Register a trace_probe and probe_event */
627static int register_trace_kprobe(struct trace_kprobe *tk)
628{
629 struct trace_kprobe *old_tk;
630 int ret;
631
632 mutex_lock(&event_mutex);
633
634 old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
635 trace_probe_group_name(&tk->tp));
636 if (old_tk) {
637 if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
638 trace_probe_log_set_index(0);
639 trace_probe_log_err(0, DIFF_PROBE_TYPE);
640 ret = -EEXIST;
641 } else {
642 ret = append_trace_kprobe(tk, old_tk);
643 }
644 goto end;
645 }
646
647 /* Register new event */
648 ret = register_kprobe_event(tk);
649 if (ret) {
650 if (ret == -EEXIST) {
651 trace_probe_log_set_index(0);
652 trace_probe_log_err(0, EVENT_EXIST);
653 } else
654 pr_warn("Failed to register probe event(%d)\n", ret);
655 goto end;
656 }
657
658 /* Register k*probe */
659 ret = __register_trace_kprobe(tk);
660 if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
661 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
662 ret = 0;
663 }
664
665 if (ret < 0)
666 unregister_kprobe_event(tk);
667 else
668 dyn_event_add(&tk->devent);
669
670end:
671 mutex_unlock(&event_mutex);
672 return ret;
673}
674
675/* Module notifier call back, checking event on the module */
676static int trace_kprobe_module_callback(struct notifier_block *nb,
677 unsigned long val, void *data)
678{
679 struct module *mod = data;
680 struct dyn_event *pos;
681 struct trace_kprobe *tk;
682 int ret;
683
684 if (val != MODULE_STATE_COMING)
685 return NOTIFY_DONE;
686
687 /* Update probes on coming module */
688 mutex_lock(&event_mutex);
689 for_each_trace_kprobe(tk, pos) {
690 if (trace_kprobe_within_module(tk, mod)) {
691 /* Don't need to check busy - this should have gone. */
692 __unregister_trace_kprobe(tk);
693 ret = __register_trace_kprobe(tk);
694 if (ret)
695 pr_warn("Failed to re-register probe %s on %s: %d\n",
696 trace_probe_name(&tk->tp),
697 module_name(mod), ret);
698 }
699 }
700 mutex_unlock(&event_mutex);
701
702 return NOTIFY_DONE;
703}
704
705static struct notifier_block trace_kprobe_module_nb = {
706 .notifier_call = trace_kprobe_module_callback,
707 .priority = 1 /* Invoked after kprobe module callback */
708};
709
710/* Convert certain expected symbols into '_' when generating event names */
711static inline void sanitize_event_name(char *name)
712{
713 while (*name++ != '\0')
714 if (*name == ':' || *name == '.')
715 *name = '_';
716}
717
718static int __trace_kprobe_create(int argc, const char *argv[])
719{
720 /*
721 * Argument syntax:
722 * - Add kprobe:
723 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
724 * - Add kretprobe:
725 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
726 * Or
727 * p:[GRP/]EVENT] [MOD:]KSYM[+0]%return [FETCHARGS]
728 *
729 * Fetch args:
730 * $retval : fetch return value
731 * $stack : fetch stack address
732 * $stackN : fetch Nth of stack (N:0-)
733 * $comm : fetch current task comm
734 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
735 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
736 * %REG : fetch register REG
737 * Dereferencing memory fetch:
738 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
739 * Alias name of args:
740 * NAME=FETCHARG : set NAME as alias of FETCHARG.
741 * Type of args:
742 * FETCHARG:TYPE : use TYPE instead of unsigned long.
743 */
744 struct trace_kprobe *tk = NULL;
745 int i, len, ret = 0;
746 bool is_return = false;
747 char *symbol = NULL, *tmp = NULL;
748 const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
749 int maxactive = 0;
750 long offset = 0;
751 void *addr = NULL;
752 char buf[MAX_EVENT_NAME_LEN];
753 unsigned int flags = TPARG_FL_KERNEL;
754
755 switch (argv[0][0]) {
756 case 'r':
757 is_return = true;
758 break;
759 case 'p':
760 break;
761 default:
762 return -ECANCELED;
763 }
764 if (argc < 2)
765 return -ECANCELED;
766
767 trace_probe_log_init("trace_kprobe", argc, argv);
768
769 event = strchr(&argv[0][1], ':');
770 if (event)
771 event++;
772
773 if (isdigit(argv[0][1])) {
774 if (!is_return) {
775 trace_probe_log_err(1, MAXACT_NO_KPROBE);
776 goto parse_error;
777 }
778 if (event)
779 len = event - &argv[0][1] - 1;
780 else
781 len = strlen(&argv[0][1]);
782 if (len > MAX_EVENT_NAME_LEN - 1) {
783 trace_probe_log_err(1, BAD_MAXACT);
784 goto parse_error;
785 }
786 memcpy(buf, &argv[0][1], len);
787 buf[len] = '\0';
788 ret = kstrtouint(buf, 0, &maxactive);
789 if (ret || !maxactive) {
790 trace_probe_log_err(1, BAD_MAXACT);
791 goto parse_error;
792 }
793 /* kretprobes instances are iterated over via a list. The
794 * maximum should stay reasonable.
795 */
796 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
797 trace_probe_log_err(1, MAXACT_TOO_BIG);
798 goto parse_error;
799 }
800 }
801
802 /* try to parse an address. if that fails, try to read the
803 * input as a symbol. */
804 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
805 trace_probe_log_set_index(1);
806 /* Check whether uprobe event specified */
807 if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
808 ret = -ECANCELED;
809 goto error;
810 }
811 /* a symbol specified */
812 symbol = kstrdup(argv[1], GFP_KERNEL);
813 if (!symbol)
814 return -ENOMEM;
815
816 tmp = strchr(symbol, '%');
817 if (tmp) {
818 if (!strcmp(tmp, "%return")) {
819 *tmp = '\0';
820 is_return = true;
821 } else {
822 trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
823 goto parse_error;
824 }
825 }
826
827 /* TODO: support .init module functions */
828 ret = traceprobe_split_symbol_offset(symbol, &offset);
829 if (ret || offset < 0 || offset > UINT_MAX) {
830 trace_probe_log_err(0, BAD_PROBE_ADDR);
831 goto parse_error;
832 }
833 if (is_return)
834 flags |= TPARG_FL_RETURN;
835 ret = kprobe_on_func_entry(NULL, symbol, offset);
836 if (ret == 0)
837 flags |= TPARG_FL_FENTRY;
838 /* Defer the ENOENT case until register kprobe */
839 if (ret == -EINVAL && is_return) {
840 trace_probe_log_err(0, BAD_RETPROBE);
841 goto parse_error;
842 }
843 }
844
845 trace_probe_log_set_index(0);
846 if (event) {
847 ret = traceprobe_parse_event_name(&event, &group, buf,
848 event - argv[0]);
849 if (ret)
850 goto parse_error;
851 } else {
852 /* Make a new event name */
853 if (symbol)
854 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
855 is_return ? 'r' : 'p', symbol, offset);
856 else
857 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
858 is_return ? 'r' : 'p', addr);
859 sanitize_event_name(buf);
860 event = buf;
861 }
862
863 /* setup a probe */
864 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
865 argc - 2, is_return);
866 if (IS_ERR(tk)) {
867 ret = PTR_ERR(tk);
868 /* This must return -ENOMEM, else there is a bug */
869 WARN_ON_ONCE(ret != -ENOMEM);
870 goto out; /* We know tk is not allocated */
871 }
872 argc -= 2; argv += 2;
873
874 /* parse arguments */
875 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
876 tmp = kstrdup(argv[i], GFP_KERNEL);
877 if (!tmp) {
878 ret = -ENOMEM;
879 goto error;
880 }
881
882 trace_probe_log_set_index(i + 2);
883 ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
884 kfree(tmp);
885 if (ret)
886 goto error; /* This can be -ENOMEM */
887 }
888
889 ret = traceprobe_set_print_fmt(&tk->tp, is_return);
890 if (ret < 0)
891 goto error;
892
893 ret = register_trace_kprobe(tk);
894 if (ret) {
895 trace_probe_log_set_index(1);
896 if (ret == -EILSEQ)
897 trace_probe_log_err(0, BAD_INSN_BNDRY);
898 else if (ret == -ENOENT)
899 trace_probe_log_err(0, BAD_PROBE_ADDR);
900 else if (ret != -ENOMEM && ret != -EEXIST)
901 trace_probe_log_err(0, FAIL_REG_PROBE);
902 goto error;
903 }
904
905out:
906 trace_probe_log_clear();
907 kfree(symbol);
908 return ret;
909
910parse_error:
911 ret = -EINVAL;
912error:
913 free_trace_kprobe(tk);
914 goto out;
915}
916
917static int trace_kprobe_create(const char *raw_command)
918{
919 return trace_probe_create(raw_command, __trace_kprobe_create);
920}
921
922static int create_or_delete_trace_kprobe(const char *raw_command)
923{
924 int ret;
925
926 if (raw_command[0] == '-')
927 return dyn_event_release(raw_command, &trace_kprobe_ops);
928
929 ret = trace_kprobe_create(raw_command);
930 return ret == -ECANCELED ? -EINVAL : ret;
931}
932
933static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
934{
935 return create_or_delete_trace_kprobe(cmd->seq.buffer);
936}
937
938/**
939 * kprobe_event_cmd_init - Initialize a kprobe event command object
940 * @cmd: A pointer to the dynevent_cmd struct representing the new event
941 * @buf: A pointer to the buffer used to build the command
942 * @maxlen: The length of the buffer passed in @buf
943 *
944 * Initialize a synthetic event command object. Use this before
945 * calling any of the other kprobe_event functions.
946 */
947void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
948{
949 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
950 trace_kprobe_run_command);
951}
952EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
953
954/**
955 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
956 * @cmd: A pointer to the dynevent_cmd struct representing the new event
957 * @name: The name of the kprobe event
958 * @loc: The location of the kprobe event
959 * @kretprobe: Is this a return probe?
960 * @args: Variable number of arg (pairs), one pair for each field
961 *
962 * NOTE: Users normally won't want to call this function directly, but
963 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
964 * adds a NULL to the end of the arg list. If this function is used
965 * directly, make sure the last arg in the variable arg list is NULL.
966 *
967 * Generate a kprobe event command to be executed by
968 * kprobe_event_gen_cmd_end(). This function can be used to generate the
969 * complete command or only the first part of it; in the latter case,
970 * kprobe_event_add_fields() can be used to add more fields following this.
971 *
972 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
973 * returns -EINVAL if @loc == NULL.
974 *
975 * Return: 0 if successful, error otherwise.
976 */
977int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
978 const char *name, const char *loc, ...)
979{
980 char buf[MAX_EVENT_NAME_LEN];
981 struct dynevent_arg arg;
982 va_list args;
983 int ret;
984
985 if (cmd->type != DYNEVENT_TYPE_KPROBE)
986 return -EINVAL;
987
988 if (!loc)
989 return -EINVAL;
990
991 if (kretprobe)
992 snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
993 else
994 snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
995
996 ret = dynevent_str_add(cmd, buf);
997 if (ret)
998 return ret;
999
1000 dynevent_arg_init(&arg, 0);
1001 arg.str = loc;
1002 ret = dynevent_arg_add(cmd, &arg, NULL);
1003 if (ret)
1004 return ret;
1005
1006 va_start(args, loc);
1007 for (;;) {
1008 const char *field;
1009
1010 field = va_arg(args, const char *);
1011 if (!field)
1012 break;
1013
1014 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1015 ret = -EINVAL;
1016 break;
1017 }
1018
1019 arg.str = field;
1020 ret = dynevent_arg_add(cmd, &arg, NULL);
1021 if (ret)
1022 break;
1023 }
1024 va_end(args);
1025
1026 return ret;
1027}
1028EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1029
1030/**
1031 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1032 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1033 * @args: Variable number of arg (pairs), one pair for each field
1034 *
1035 * NOTE: Users normally won't want to call this function directly, but
1036 * rather use the kprobe_event_add_fields() wrapper, which
1037 * automatically adds a NULL to the end of the arg list. If this
1038 * function is used directly, make sure the last arg in the variable
1039 * arg list is NULL.
1040 *
1041 * Add probe fields to an existing kprobe command using a variable
1042 * list of args. Fields are added in the same order they're listed.
1043 *
1044 * Return: 0 if successful, error otherwise.
1045 */
1046int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1047{
1048 struct dynevent_arg arg;
1049 va_list args;
1050 int ret = 0;
1051
1052 if (cmd->type != DYNEVENT_TYPE_KPROBE)
1053 return -EINVAL;
1054
1055 dynevent_arg_init(&arg, 0);
1056
1057 va_start(args, cmd);
1058 for (;;) {
1059 const char *field;
1060
1061 field = va_arg(args, const char *);
1062 if (!field)
1063 break;
1064
1065 if (++cmd->n_fields > MAX_TRACE_ARGS) {
1066 ret = -EINVAL;
1067 break;
1068 }
1069
1070 arg.str = field;
1071 ret = dynevent_arg_add(cmd, &arg, NULL);
1072 if (ret)
1073 break;
1074 }
1075 va_end(args);
1076
1077 return ret;
1078}
1079EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1080
1081/**
1082 * kprobe_event_delete - Delete a kprobe event
1083 * @name: The name of the kprobe event to delete
1084 *
1085 * Delete a kprobe event with the give @name from kernel code rather
1086 * than directly from the command line.
1087 *
1088 * Return: 0 if successful, error otherwise.
1089 */
1090int kprobe_event_delete(const char *name)
1091{
1092 char buf[MAX_EVENT_NAME_LEN];
1093
1094 snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1095
1096 return create_or_delete_trace_kprobe(buf);
1097}
1098EXPORT_SYMBOL_GPL(kprobe_event_delete);
1099
1100static int trace_kprobe_release(struct dyn_event *ev)
1101{
1102 struct trace_kprobe *tk = to_trace_kprobe(ev);
1103 int ret = unregister_trace_kprobe(tk);
1104
1105 if (!ret)
1106 free_trace_kprobe(tk);
1107 return ret;
1108}
1109
1110static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1111{
1112 struct trace_kprobe *tk = to_trace_kprobe(ev);
1113 int i;
1114
1115 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1116 if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1117 seq_printf(m, "%d", tk->rp.maxactive);
1118 seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1119 trace_probe_name(&tk->tp));
1120
1121 if (!tk->symbol)
1122 seq_printf(m, " 0x%p", tk->rp.kp.addr);
1123 else if (tk->rp.kp.offset)
1124 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1125 tk->rp.kp.offset);
1126 else
1127 seq_printf(m, " %s", trace_kprobe_symbol(tk));
1128
1129 for (i = 0; i < tk->tp.nr_args; i++)
1130 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1131 seq_putc(m, '\n');
1132
1133 return 0;
1134}
1135
1136static int probes_seq_show(struct seq_file *m, void *v)
1137{
1138 struct dyn_event *ev = v;
1139
1140 if (!is_trace_kprobe(ev))
1141 return 0;
1142
1143 return trace_kprobe_show(m, ev);
1144}
1145
1146static const struct seq_operations probes_seq_op = {
1147 .start = dyn_event_seq_start,
1148 .next = dyn_event_seq_next,
1149 .stop = dyn_event_seq_stop,
1150 .show = probes_seq_show
1151};
1152
1153static int probes_open(struct inode *inode, struct file *file)
1154{
1155 int ret;
1156
1157 ret = security_locked_down(LOCKDOWN_TRACEFS);
1158 if (ret)
1159 return ret;
1160
1161 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1162 ret = dyn_events_release_all(&trace_kprobe_ops);
1163 if (ret < 0)
1164 return ret;
1165 }
1166
1167 return seq_open(file, &probes_seq_op);
1168}
1169
1170static ssize_t probes_write(struct file *file, const char __user *buffer,
1171 size_t count, loff_t *ppos)
1172{
1173 return trace_parse_run_command(file, buffer, count, ppos,
1174 create_or_delete_trace_kprobe);
1175}
1176
1177static const struct file_operations kprobe_events_ops = {
1178 .owner = THIS_MODULE,
1179 .open = probes_open,
1180 .read = seq_read,
1181 .llseek = seq_lseek,
1182 .release = seq_release,
1183 .write = probes_write,
1184};
1185
1186/* Probes profiling interfaces */
1187static int probes_profile_seq_show(struct seq_file *m, void *v)
1188{
1189 struct dyn_event *ev = v;
1190 struct trace_kprobe *tk;
1191
1192 if (!is_trace_kprobe(ev))
1193 return 0;
1194
1195 tk = to_trace_kprobe(ev);
1196 seq_printf(m, " %-44s %15lu %15lu\n",
1197 trace_probe_name(&tk->tp),
1198 trace_kprobe_nhit(tk),
1199 tk->rp.kp.nmissed);
1200
1201 return 0;
1202}
1203
1204static const struct seq_operations profile_seq_op = {
1205 .start = dyn_event_seq_start,
1206 .next = dyn_event_seq_next,
1207 .stop = dyn_event_seq_stop,
1208 .show = probes_profile_seq_show
1209};
1210
1211static int profile_open(struct inode *inode, struct file *file)
1212{
1213 int ret;
1214
1215 ret = security_locked_down(LOCKDOWN_TRACEFS);
1216 if (ret)
1217 return ret;
1218
1219 return seq_open(file, &profile_seq_op);
1220}
1221
1222static const struct file_operations kprobe_profile_ops = {
1223 .owner = THIS_MODULE,
1224 .open = profile_open,
1225 .read = seq_read,
1226 .llseek = seq_lseek,
1227 .release = seq_release,
1228};
1229
1230/* Kprobe specific fetch functions */
1231
1232/* Return the length of string -- including null terminal byte */
1233static nokprobe_inline int
1234fetch_store_strlen_user(unsigned long addr)
1235{
1236 const void __user *uaddr = (__force const void __user *)addr;
1237
1238 return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
1239}
1240
1241/* Return the length of string -- including null terminal byte */
1242static nokprobe_inline int
1243fetch_store_strlen(unsigned long addr)
1244{
1245 int ret, len = 0;
1246 u8 c;
1247
1248#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1249 if (addr < TASK_SIZE)
1250 return fetch_store_strlen_user(addr);
1251#endif
1252
1253 do {
1254 ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
1255 len++;
1256 } while (c && ret == 0 && len < MAX_STRING_SIZE);
1257
1258 return (ret < 0) ? ret : len;
1259}
1260
1261/*
1262 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1263 * with max length and relative data location.
1264 */
1265static nokprobe_inline int
1266fetch_store_string_user(unsigned long addr, void *dest, void *base)
1267{
1268 const void __user *uaddr = (__force const void __user *)addr;
1269 int maxlen = get_loc_len(*(u32 *)dest);
1270 void *__dest;
1271 long ret;
1272
1273 if (unlikely(!maxlen))
1274 return -ENOMEM;
1275
1276 __dest = get_loc_data(dest, base);
1277
1278 ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
1279 if (ret >= 0)
1280 *(u32 *)dest = make_data_loc(ret, __dest - base);
1281
1282 return ret;
1283}
1284
1285/*
1286 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1287 * length and relative data location.
1288 */
1289static nokprobe_inline int
1290fetch_store_string(unsigned long addr, void *dest, void *base)
1291{
1292 int maxlen = get_loc_len(*(u32 *)dest);
1293 void *__dest;
1294 long ret;
1295
1296#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1297 if ((unsigned long)addr < TASK_SIZE)
1298 return fetch_store_string_user(addr, dest, base);
1299#endif
1300
1301 if (unlikely(!maxlen))
1302 return -ENOMEM;
1303
1304 __dest = get_loc_data(dest, base);
1305
1306 /*
1307 * Try to get string again, since the string can be changed while
1308 * probing.
1309 */
1310 ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
1311 if (ret >= 0)
1312 *(u32 *)dest = make_data_loc(ret, __dest - base);
1313
1314 return ret;
1315}
1316
1317static nokprobe_inline int
1318probe_mem_read_user(void *dest, void *src, size_t size)
1319{
1320 const void __user *uaddr = (__force const void __user *)src;
1321
1322 return copy_from_user_nofault(dest, uaddr, size);
1323}
1324
1325static nokprobe_inline int
1326probe_mem_read(void *dest, void *src, size_t size)
1327{
1328#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1329 if ((unsigned long)src < TASK_SIZE)
1330 return probe_mem_read_user(dest, src, size);
1331#endif
1332 return copy_from_kernel_nofault(dest, src, size);
1333}
1334
1335/* Note that we don't verify it, since the code does not come from user space */
1336static int
1337process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
1338 void *base)
1339{
1340 unsigned long val;
1341
1342retry:
1343 /* 1st stage: get value from context */
1344 switch (code->op) {
1345 case FETCH_OP_REG:
1346 val = regs_get_register(regs, code->param);
1347 break;
1348 case FETCH_OP_STACK:
1349 val = regs_get_kernel_stack_nth(regs, code->param);
1350 break;
1351 case FETCH_OP_STACKP:
1352 val = kernel_stack_pointer(regs);
1353 break;
1354 case FETCH_OP_RETVAL:
1355 val = regs_return_value(regs);
1356 break;
1357 case FETCH_OP_IMM:
1358 val = code->immediate;
1359 break;
1360 case FETCH_OP_COMM:
1361 val = (unsigned long)current->comm;
1362 break;
1363 case FETCH_OP_DATA:
1364 val = (unsigned long)code->data;
1365 break;
1366#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1367 case FETCH_OP_ARG:
1368 val = regs_get_kernel_argument(regs, code->param);
1369 break;
1370#endif
1371 case FETCH_NOP_SYMBOL: /* Ignore a place holder */
1372 code++;
1373 goto retry;
1374 default:
1375 return -EILSEQ;
1376 }
1377 code++;
1378
1379 return process_fetch_insn_bottom(code, val, dest, base);
1380}
1381NOKPROBE_SYMBOL(process_fetch_insn)
1382
1383/* Kprobe handler */
1384static nokprobe_inline void
1385__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1386 struct trace_event_file *trace_file)
1387{
1388 struct kprobe_trace_entry_head *entry;
1389 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1390 struct trace_event_buffer fbuffer;
1391 int dsize;
1392
1393 WARN_ON(call != trace_file->event_call);
1394
1395 if (trace_trigger_soft_disabled(trace_file))
1396 return;
1397
1398 fbuffer.trace_ctx = tracing_gen_ctx();
1399 fbuffer.trace_file = trace_file;
1400
1401 dsize = __get_data_size(&tk->tp, regs);
1402
1403 fbuffer.event =
1404 trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1405 call->event.type,
1406 sizeof(*entry) + tk->tp.size + dsize,
1407 fbuffer.trace_ctx);
1408 if (!fbuffer.event)
1409 return;
1410
1411 fbuffer.regs = regs;
1412 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1413 entry->ip = (unsigned long)tk->rp.kp.addr;
1414 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1415
1416 trace_event_buffer_commit(&fbuffer);
1417}
1418
1419static void
1420kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1421{
1422 struct event_file_link *link;
1423
1424 trace_probe_for_each_link_rcu(link, &tk->tp)
1425 __kprobe_trace_func(tk, regs, link->file);
1426}
1427NOKPROBE_SYMBOL(kprobe_trace_func);
1428
1429/* Kretprobe handler */
1430static nokprobe_inline void
1431__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1432 struct pt_regs *regs,
1433 struct trace_event_file *trace_file)
1434{
1435 struct kretprobe_trace_entry_head *entry;
1436 struct trace_event_buffer fbuffer;
1437 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1438 int dsize;
1439
1440 WARN_ON(call != trace_file->event_call);
1441
1442 if (trace_trigger_soft_disabled(trace_file))
1443 return;
1444
1445 fbuffer.trace_ctx = tracing_gen_ctx();
1446 fbuffer.trace_file = trace_file;
1447
1448 dsize = __get_data_size(&tk->tp, regs);
1449 fbuffer.event =
1450 trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1451 call->event.type,
1452 sizeof(*entry) + tk->tp.size + dsize,
1453 fbuffer.trace_ctx);
1454 if (!fbuffer.event)
1455 return;
1456
1457 fbuffer.regs = regs;
1458 entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1459 entry->func = (unsigned long)tk->rp.kp.addr;
1460 entry->ret_ip = (unsigned long)ri->ret_addr;
1461 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1462
1463 trace_event_buffer_commit(&fbuffer);
1464}
1465
1466static void
1467kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1468 struct pt_regs *regs)
1469{
1470 struct event_file_link *link;
1471
1472 trace_probe_for_each_link_rcu(link, &tk->tp)
1473 __kretprobe_trace_func(tk, ri, regs, link->file);
1474}
1475NOKPROBE_SYMBOL(kretprobe_trace_func);
1476
1477/* Event entry printers */
1478static enum print_line_t
1479print_kprobe_event(struct trace_iterator *iter, int flags,
1480 struct trace_event *event)
1481{
1482 struct kprobe_trace_entry_head *field;
1483 struct trace_seq *s = &iter->seq;
1484 struct trace_probe *tp;
1485
1486 field = (struct kprobe_trace_entry_head *)iter->ent;
1487 tp = trace_probe_primary_from_call(
1488 container_of(event, struct trace_event_call, event));
1489 if (WARN_ON_ONCE(!tp))
1490 goto out;
1491
1492 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1493
1494 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1495 goto out;
1496
1497 trace_seq_putc(s, ')');
1498
1499 if (print_probe_args(s, tp->args, tp->nr_args,
1500 (u8 *)&field[1], field) < 0)
1501 goto out;
1502
1503 trace_seq_putc(s, '\n');
1504 out:
1505 return trace_handle_return(s);
1506}
1507
1508static enum print_line_t
1509print_kretprobe_event(struct trace_iterator *iter, int flags,
1510 struct trace_event *event)
1511{
1512 struct kretprobe_trace_entry_head *field;
1513 struct trace_seq *s = &iter->seq;
1514 struct trace_probe *tp;
1515
1516 field = (struct kretprobe_trace_entry_head *)iter->ent;
1517 tp = trace_probe_primary_from_call(
1518 container_of(event, struct trace_event_call, event));
1519 if (WARN_ON_ONCE(!tp))
1520 goto out;
1521
1522 trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1523
1524 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1525 goto out;
1526
1527 trace_seq_puts(s, " <- ");
1528
1529 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1530 goto out;
1531
1532 trace_seq_putc(s, ')');
1533
1534 if (print_probe_args(s, tp->args, tp->nr_args,
1535 (u8 *)&field[1], field) < 0)
1536 goto out;
1537
1538 trace_seq_putc(s, '\n');
1539
1540 out:
1541 return trace_handle_return(s);
1542}
1543
1544
1545static int kprobe_event_define_fields(struct trace_event_call *event_call)
1546{
1547 int ret;
1548 struct kprobe_trace_entry_head field;
1549 struct trace_probe *tp;
1550
1551 tp = trace_probe_primary_from_call(event_call);
1552 if (WARN_ON_ONCE(!tp))
1553 return -ENOENT;
1554
1555 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1556
1557 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1558}
1559
1560static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1561{
1562 int ret;
1563 struct kretprobe_trace_entry_head field;
1564 struct trace_probe *tp;
1565
1566 tp = trace_probe_primary_from_call(event_call);
1567 if (WARN_ON_ONCE(!tp))
1568 return -ENOENT;
1569
1570 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1571 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1572
1573 return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1574}
1575
1576#ifdef CONFIG_PERF_EVENTS
1577
1578/* Kprobe profile handler */
1579static int
1580kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1581{
1582 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1583 struct kprobe_trace_entry_head *entry;
1584 struct hlist_head *head;
1585 int size, __size, dsize;
1586 int rctx;
1587
1588 if (bpf_prog_array_valid(call)) {
1589 unsigned long orig_ip = instruction_pointer(regs);
1590 int ret;
1591
1592 ret = trace_call_bpf(call, regs);
1593
1594 /*
1595 * We need to check and see if we modified the pc of the
1596 * pt_regs, and if so return 1 so that we don't do the
1597 * single stepping.
1598 */
1599 if (orig_ip != instruction_pointer(regs))
1600 return 1;
1601 if (!ret)
1602 return 0;
1603 }
1604
1605 head = this_cpu_ptr(call->perf_events);
1606 if (hlist_empty(head))
1607 return 0;
1608
1609 dsize = __get_data_size(&tk->tp, regs);
1610 __size = sizeof(*entry) + tk->tp.size + dsize;
1611 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1612 size -= sizeof(u32);
1613
1614 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1615 if (!entry)
1616 return 0;
1617
1618 entry->ip = (unsigned long)tk->rp.kp.addr;
1619 memset(&entry[1], 0, dsize);
1620 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1621 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1622 head, NULL);
1623 return 0;
1624}
1625NOKPROBE_SYMBOL(kprobe_perf_func);
1626
1627/* Kretprobe profile handler */
1628static void
1629kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1630 struct pt_regs *regs)
1631{
1632 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1633 struct kretprobe_trace_entry_head *entry;
1634 struct hlist_head *head;
1635 int size, __size, dsize;
1636 int rctx;
1637
1638 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1639 return;
1640
1641 head = this_cpu_ptr(call->perf_events);
1642 if (hlist_empty(head))
1643 return;
1644
1645 dsize = __get_data_size(&tk->tp, regs);
1646 __size = sizeof(*entry) + tk->tp.size + dsize;
1647 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1648 size -= sizeof(u32);
1649
1650 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1651 if (!entry)
1652 return;
1653
1654 entry->func = (unsigned long)tk->rp.kp.addr;
1655 entry->ret_ip = (unsigned long)ri->ret_addr;
1656 store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1657 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1658 head, NULL);
1659}
1660NOKPROBE_SYMBOL(kretprobe_perf_func);
1661
1662int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1663 const char **symbol, u64 *probe_offset,
1664 u64 *probe_addr, bool perf_type_tracepoint)
1665{
1666 const char *pevent = trace_event_name(event->tp_event);
1667 const char *group = event->tp_event->class->system;
1668 struct trace_kprobe *tk;
1669
1670 if (perf_type_tracepoint)
1671 tk = find_trace_kprobe(pevent, group);
1672 else
1673 tk = trace_kprobe_primary_from_call(event->tp_event);
1674 if (!tk)
1675 return -EINVAL;
1676
1677 *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1678 : BPF_FD_TYPE_KPROBE;
1679 if (tk->symbol) {
1680 *symbol = tk->symbol;
1681 *probe_offset = tk->rp.kp.offset;
1682 *probe_addr = 0;
1683 } else {
1684 *symbol = NULL;
1685 *probe_offset = 0;
1686 *probe_addr = (unsigned long)tk->rp.kp.addr;
1687 }
1688 return 0;
1689}
1690#endif /* CONFIG_PERF_EVENTS */
1691
1692/*
1693 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1694 *
1695 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1696 * lockless, but we can't race with this __init function.
1697 */
1698static int kprobe_register(struct trace_event_call *event,
1699 enum trace_reg type, void *data)
1700{
1701 struct trace_event_file *file = data;
1702
1703 switch (type) {
1704 case TRACE_REG_REGISTER:
1705 return enable_trace_kprobe(event, file);
1706 case TRACE_REG_UNREGISTER:
1707 return disable_trace_kprobe(event, file);
1708
1709#ifdef CONFIG_PERF_EVENTS
1710 case TRACE_REG_PERF_REGISTER:
1711 return enable_trace_kprobe(event, NULL);
1712 case TRACE_REG_PERF_UNREGISTER:
1713 return disable_trace_kprobe(event, NULL);
1714 case TRACE_REG_PERF_OPEN:
1715 case TRACE_REG_PERF_CLOSE:
1716 case TRACE_REG_PERF_ADD:
1717 case TRACE_REG_PERF_DEL:
1718 return 0;
1719#endif
1720 }
1721 return 0;
1722}
1723
1724static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1725{
1726 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1727 int ret = 0;
1728
1729 raw_cpu_inc(*tk->nhit);
1730
1731 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1732 kprobe_trace_func(tk, regs);
1733#ifdef CONFIG_PERF_EVENTS
1734 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1735 ret = kprobe_perf_func(tk, regs);
1736#endif
1737 return ret;
1738}
1739NOKPROBE_SYMBOL(kprobe_dispatcher);
1740
1741static int
1742kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1743{
1744 struct kretprobe *rp = get_kretprobe(ri);
1745 struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
1746
1747 raw_cpu_inc(*tk->nhit);
1748
1749 if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1750 kretprobe_trace_func(tk, ri, regs);
1751#ifdef CONFIG_PERF_EVENTS
1752 if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1753 kretprobe_perf_func(tk, ri, regs);
1754#endif
1755 return 0; /* We don't tweak kernel, so just return 0 */
1756}
1757NOKPROBE_SYMBOL(kretprobe_dispatcher);
1758
1759static struct trace_event_functions kretprobe_funcs = {
1760 .trace = print_kretprobe_event
1761};
1762
1763static struct trace_event_functions kprobe_funcs = {
1764 .trace = print_kprobe_event
1765};
1766
1767static struct trace_event_fields kretprobe_fields_array[] = {
1768 { .type = TRACE_FUNCTION_TYPE,
1769 .define_fields = kretprobe_event_define_fields },
1770 {}
1771};
1772
1773static struct trace_event_fields kprobe_fields_array[] = {
1774 { .type = TRACE_FUNCTION_TYPE,
1775 .define_fields = kprobe_event_define_fields },
1776 {}
1777};
1778
1779static inline void init_trace_event_call(struct trace_kprobe *tk)
1780{
1781 struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1782
1783 if (trace_kprobe_is_return(tk)) {
1784 call->event.funcs = &kretprobe_funcs;
1785 call->class->fields_array = kretprobe_fields_array;
1786 } else {
1787 call->event.funcs = &kprobe_funcs;
1788 call->class->fields_array = kprobe_fields_array;
1789 }
1790
1791 call->flags = TRACE_EVENT_FL_KPROBE;
1792 call->class->reg = kprobe_register;
1793}
1794
1795static int register_kprobe_event(struct trace_kprobe *tk)
1796{
1797 init_trace_event_call(tk);
1798
1799 return trace_probe_register_event_call(&tk->tp);
1800}
1801
1802static int unregister_kprobe_event(struct trace_kprobe *tk)
1803{
1804 return trace_probe_unregister_event_call(&tk->tp);
1805}
1806
1807#ifdef CONFIG_PERF_EVENTS
1808/* create a trace_kprobe, but don't add it to global lists */
1809struct trace_event_call *
1810create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1811 bool is_return)
1812{
1813 struct trace_kprobe *tk;
1814 int ret;
1815 char *event;
1816
1817 /*
1818 * local trace_kprobes are not added to dyn_event, so they are never
1819 * searched in find_trace_kprobe(). Therefore, there is no concern of
1820 * duplicated name here.
1821 */
1822 event = func ? func : "DUMMY_EVENT";
1823
1824 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1825 offs, 0 /* maxactive */, 0 /* nargs */,
1826 is_return);
1827
1828 if (IS_ERR(tk)) {
1829 pr_info("Failed to allocate trace_probe.(%d)\n",
1830 (int)PTR_ERR(tk));
1831 return ERR_CAST(tk);
1832 }
1833
1834 init_trace_event_call(tk);
1835
1836 if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1837 ret = -ENOMEM;
1838 goto error;
1839 }
1840
1841 ret = __register_trace_kprobe(tk);
1842 if (ret < 0)
1843 goto error;
1844
1845 return trace_probe_event_call(&tk->tp);
1846error:
1847 free_trace_kprobe(tk);
1848 return ERR_PTR(ret);
1849}
1850
1851void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1852{
1853 struct trace_kprobe *tk;
1854
1855 tk = trace_kprobe_primary_from_call(event_call);
1856 if (unlikely(!tk))
1857 return;
1858
1859 if (trace_probe_is_enabled(&tk->tp)) {
1860 WARN_ON(1);
1861 return;
1862 }
1863
1864 __unregister_trace_kprobe(tk);
1865
1866 free_trace_kprobe(tk);
1867}
1868#endif /* CONFIG_PERF_EVENTS */
1869
1870static __init void enable_boot_kprobe_events(void)
1871{
1872 struct trace_array *tr = top_trace_array();
1873 struct trace_event_file *file;
1874 struct trace_kprobe *tk;
1875 struct dyn_event *pos;
1876
1877 mutex_lock(&event_mutex);
1878 for_each_trace_kprobe(tk, pos) {
1879 list_for_each_entry(file, &tr->events, list)
1880 if (file->event_call == trace_probe_event_call(&tk->tp))
1881 trace_event_enable_disable(file, 1, 0);
1882 }
1883 mutex_unlock(&event_mutex);
1884}
1885
1886static __init void setup_boot_kprobe_events(void)
1887{
1888 char *p, *cmd = kprobe_boot_events_buf;
1889 int ret;
1890
1891 strreplace(kprobe_boot_events_buf, ',', ' ');
1892
1893 while (cmd && *cmd != '\0') {
1894 p = strchr(cmd, ';');
1895 if (p)
1896 *p++ = '\0';
1897
1898 ret = create_or_delete_trace_kprobe(cmd);
1899 if (ret)
1900 pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1901
1902 cmd = p;
1903 }
1904
1905 enable_boot_kprobe_events();
1906}
1907
1908/*
1909 * Register dynevent at core_initcall. This allows kernel to setup kprobe
1910 * events in postcore_initcall without tracefs.
1911 */
1912static __init int init_kprobe_trace_early(void)
1913{
1914 int ret;
1915
1916 ret = dyn_event_register(&trace_kprobe_ops);
1917 if (ret)
1918 return ret;
1919
1920 if (register_module_notifier(&trace_kprobe_module_nb))
1921 return -EINVAL;
1922
1923 return 0;
1924}
1925core_initcall(init_kprobe_trace_early);
1926
1927/* Make a tracefs interface for controlling probe points */
1928static __init int init_kprobe_trace(void)
1929{
1930 int ret;
1931 struct dentry *entry;
1932
1933 ret = tracing_init_dentry();
1934 if (ret)
1935 return 0;
1936
1937 entry = tracefs_create_file("kprobe_events", 0644, NULL,
1938 NULL, &kprobe_events_ops);
1939
1940 /* Event list interface */
1941 if (!entry)
1942 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1943
1944 /* Profile interface */
1945 entry = tracefs_create_file("kprobe_profile", 0444, NULL,
1946 NULL, &kprobe_profile_ops);
1947
1948 if (!entry)
1949 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1950
1951 setup_boot_kprobe_events();
1952
1953 return 0;
1954}
1955fs_initcall(init_kprobe_trace);
1956
1957
1958#ifdef CONFIG_FTRACE_STARTUP_TEST
1959static __init struct trace_event_file *
1960find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1961{
1962 struct trace_event_file *file;
1963
1964 list_for_each_entry(file, &tr->events, list)
1965 if (file->event_call == trace_probe_event_call(&tk->tp))
1966 return file;
1967
1968 return NULL;
1969}
1970
1971/*
1972 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1973 * stage, we can do this lockless.
1974 */
1975static __init int kprobe_trace_self_tests_init(void)
1976{
1977 int ret, warn = 0;
1978 int (*target)(int, int, int, int, int, int);
1979 struct trace_kprobe *tk;
1980 struct trace_event_file *file;
1981
1982 if (tracing_is_disabled())
1983 return -ENODEV;
1984
1985 if (tracing_selftest_disabled)
1986 return 0;
1987
1988 target = kprobe_trace_selftest_target;
1989
1990 pr_info("Testing kprobe tracing: ");
1991
1992 ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
1993 if (WARN_ON_ONCE(ret)) {
1994 pr_warn("error on probing function entry.\n");
1995 warn++;
1996 } else {
1997 /* Enable trace point */
1998 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1999 if (WARN_ON_ONCE(tk == NULL)) {
2000 pr_warn("error on getting new probe.\n");
2001 warn++;
2002 } else {
2003 file = find_trace_probe_file(tk, top_trace_array());
2004 if (WARN_ON_ONCE(file == NULL)) {
2005 pr_warn("error on getting probe file.\n");
2006 warn++;
2007 } else
2008 enable_trace_kprobe(
2009 trace_probe_event_call(&tk->tp), file);
2010 }
2011 }
2012
2013 ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
2014 if (WARN_ON_ONCE(ret)) {
2015 pr_warn("error on probing function return.\n");
2016 warn++;
2017 } else {
2018 /* Enable trace point */
2019 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2020 if (WARN_ON_ONCE(tk == NULL)) {
2021 pr_warn("error on getting 2nd new probe.\n");
2022 warn++;
2023 } else {
2024 file = find_trace_probe_file(tk, top_trace_array());
2025 if (WARN_ON_ONCE(file == NULL)) {
2026 pr_warn("error on getting probe file.\n");
2027 warn++;
2028 } else
2029 enable_trace_kprobe(
2030 trace_probe_event_call(&tk->tp), file);
2031 }
2032 }
2033
2034 if (warn)
2035 goto end;
2036
2037 ret = target(1, 2, 3, 4, 5, 6);
2038
2039 /*
2040 * Not expecting an error here, the check is only to prevent the
2041 * optimizer from removing the call to target() as otherwise there
2042 * are no side-effects and the call is never performed.
2043 */
2044 if (ret != 21)
2045 warn++;
2046
2047 /* Disable trace points before removing it */
2048 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2049 if (WARN_ON_ONCE(tk == NULL)) {
2050 pr_warn("error on getting test probe.\n");
2051 warn++;
2052 } else {
2053 if (trace_kprobe_nhit(tk) != 1) {
2054 pr_warn("incorrect number of testprobe hits\n");
2055 warn++;
2056 }
2057
2058 file = find_trace_probe_file(tk, top_trace_array());
2059 if (WARN_ON_ONCE(file == NULL)) {
2060 pr_warn("error on getting probe file.\n");
2061 warn++;
2062 } else
2063 disable_trace_kprobe(
2064 trace_probe_event_call(&tk->tp), file);
2065 }
2066
2067 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2068 if (WARN_ON_ONCE(tk == NULL)) {
2069 pr_warn("error on getting 2nd test probe.\n");
2070 warn++;
2071 } else {
2072 if (trace_kprobe_nhit(tk) != 1) {
2073 pr_warn("incorrect number of testprobe2 hits\n");
2074 warn++;
2075 }
2076
2077 file = find_trace_probe_file(tk, top_trace_array());
2078 if (WARN_ON_ONCE(file == NULL)) {
2079 pr_warn("error on getting probe file.\n");
2080 warn++;
2081 } else
2082 disable_trace_kprobe(
2083 trace_probe_event_call(&tk->tp), file);
2084 }
2085
2086 ret = create_or_delete_trace_kprobe("-:testprobe");
2087 if (WARN_ON_ONCE(ret)) {
2088 pr_warn("error on deleting a probe.\n");
2089 warn++;
2090 }
2091
2092 ret = create_or_delete_trace_kprobe("-:testprobe2");
2093 if (WARN_ON_ONCE(ret)) {
2094 pr_warn("error on deleting a probe.\n");
2095 warn++;
2096 }
2097
2098end:
2099 ret = dyn_events_release_all(&trace_kprobe_ops);
2100 if (WARN_ON_ONCE(ret)) {
2101 pr_warn("error on cleaning up probes.\n");
2102 warn++;
2103 }
2104 /*
2105 * Wait for the optimizer work to finish. Otherwise it might fiddle
2106 * with probes in already freed __init text.
2107 */
2108 wait_for_kprobe_optimizer();
2109 if (warn)
2110 pr_cont("NG: Some tests are failed. Please check them.\n");
2111 else
2112 pr_cont("OK\n");
2113 return 0;
2114}
2115
2116late_initcall(kprobe_trace_self_tests_init);
2117
2118#endif
1/*
2 * Kprobes-based tracing events
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/uaccess.h>
22
23#include "trace_probe.h"
24
25#define KPROBE_EVENT_SYSTEM "kprobes"
26
27/**
28 * Kprobe event core functions
29 */
30
31struct trace_probe {
32 struct list_head list;
33 struct kretprobe rp; /* Use rp.kp for kprobe use */
34 unsigned long nhit;
35 unsigned int flags; /* For TP_FLAG_* */
36 const char *symbol; /* symbol name */
37 struct ftrace_event_class class;
38 struct ftrace_event_call call;
39 ssize_t size; /* trace entry size */
40 unsigned int nr_args;
41 struct probe_arg args[];
42};
43
44#define SIZEOF_TRACE_PROBE(n) \
45 (offsetof(struct trace_probe, args) + \
46 (sizeof(struct probe_arg) * (n)))
47
48
49static __kprobes int trace_probe_is_return(struct trace_probe *tp)
50{
51 return tp->rp.handler != NULL;
52}
53
54static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
55{
56 return tp->symbol ? tp->symbol : "unknown";
57}
58
59static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
60{
61 return tp->rp.kp.offset;
62}
63
64static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
65{
66 return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
67}
68
69static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
70{
71 return !!(tp->flags & TP_FLAG_REGISTERED);
72}
73
74static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
75{
76 return !!(kprobe_gone(&tp->rp.kp));
77}
78
79static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
80 struct module *mod)
81{
82 int len = strlen(mod->name);
83 const char *name = trace_probe_symbol(tp);
84 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
85}
86
87static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
88{
89 return !!strchr(trace_probe_symbol(tp), ':');
90}
91
92static int register_probe_event(struct trace_probe *tp);
93static void unregister_probe_event(struct trace_probe *tp);
94
95static DEFINE_MUTEX(probe_lock);
96static LIST_HEAD(probe_list);
97
98static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
99static int kretprobe_dispatcher(struct kretprobe_instance *ri,
100 struct pt_regs *regs);
101
102/*
103 * Allocate new trace_probe and initialize it (including kprobes).
104 */
105static struct trace_probe *alloc_trace_probe(const char *group,
106 const char *event,
107 void *addr,
108 const char *symbol,
109 unsigned long offs,
110 int nargs, bool is_return)
111{
112 struct trace_probe *tp;
113 int ret = -ENOMEM;
114
115 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
116 if (!tp)
117 return ERR_PTR(ret);
118
119 if (symbol) {
120 tp->symbol = kstrdup(symbol, GFP_KERNEL);
121 if (!tp->symbol)
122 goto error;
123 tp->rp.kp.symbol_name = tp->symbol;
124 tp->rp.kp.offset = offs;
125 } else
126 tp->rp.kp.addr = addr;
127
128 if (is_return)
129 tp->rp.handler = kretprobe_dispatcher;
130 else
131 tp->rp.kp.pre_handler = kprobe_dispatcher;
132
133 if (!event || !is_good_name(event)) {
134 ret = -EINVAL;
135 goto error;
136 }
137
138 tp->call.class = &tp->class;
139 tp->call.name = kstrdup(event, GFP_KERNEL);
140 if (!tp->call.name)
141 goto error;
142
143 if (!group || !is_good_name(group)) {
144 ret = -EINVAL;
145 goto error;
146 }
147
148 tp->class.system = kstrdup(group, GFP_KERNEL);
149 if (!tp->class.system)
150 goto error;
151
152 INIT_LIST_HEAD(&tp->list);
153 return tp;
154error:
155 kfree(tp->call.name);
156 kfree(tp->symbol);
157 kfree(tp);
158 return ERR_PTR(ret);
159}
160
161static void free_trace_probe(struct trace_probe *tp)
162{
163 int i;
164
165 for (i = 0; i < tp->nr_args; i++)
166 traceprobe_free_probe_arg(&tp->args[i]);
167
168 kfree(tp->call.class->system);
169 kfree(tp->call.name);
170 kfree(tp->symbol);
171 kfree(tp);
172}
173
174static struct trace_probe *find_trace_probe(const char *event,
175 const char *group)
176{
177 struct trace_probe *tp;
178
179 list_for_each_entry(tp, &probe_list, list)
180 if (strcmp(tp->call.name, event) == 0 &&
181 strcmp(tp->call.class->system, group) == 0)
182 return tp;
183 return NULL;
184}
185
186/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
187static int enable_trace_probe(struct trace_probe *tp, int flag)
188{
189 int ret = 0;
190
191 tp->flags |= flag;
192 if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
193 !trace_probe_has_gone(tp)) {
194 if (trace_probe_is_return(tp))
195 ret = enable_kretprobe(&tp->rp);
196 else
197 ret = enable_kprobe(&tp->rp.kp);
198 }
199
200 return ret;
201}
202
203/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
204static void disable_trace_probe(struct trace_probe *tp, int flag)
205{
206 tp->flags &= ~flag;
207 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
208 if (trace_probe_is_return(tp))
209 disable_kretprobe(&tp->rp);
210 else
211 disable_kprobe(&tp->rp.kp);
212 }
213}
214
215/* Internal register function - just handle k*probes and flags */
216static int __register_trace_probe(struct trace_probe *tp)
217{
218 int i, ret;
219
220 if (trace_probe_is_registered(tp))
221 return -EINVAL;
222
223 for (i = 0; i < tp->nr_args; i++)
224 traceprobe_update_arg(&tp->args[i]);
225
226 /* Set/clear disabled flag according to tp->flag */
227 if (trace_probe_is_enabled(tp))
228 tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
229 else
230 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
231
232 if (trace_probe_is_return(tp))
233 ret = register_kretprobe(&tp->rp);
234 else
235 ret = register_kprobe(&tp->rp.kp);
236
237 if (ret == 0)
238 tp->flags |= TP_FLAG_REGISTERED;
239 else {
240 pr_warning("Could not insert probe at %s+%lu: %d\n",
241 trace_probe_symbol(tp), trace_probe_offset(tp), ret);
242 if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
243 pr_warning("This probe might be able to register after"
244 "target module is loaded. Continue.\n");
245 ret = 0;
246 } else if (ret == -EILSEQ) {
247 pr_warning("Probing address(0x%p) is not an "
248 "instruction boundary.\n",
249 tp->rp.kp.addr);
250 ret = -EINVAL;
251 }
252 }
253
254 return ret;
255}
256
257/* Internal unregister function - just handle k*probes and flags */
258static void __unregister_trace_probe(struct trace_probe *tp)
259{
260 if (trace_probe_is_registered(tp)) {
261 if (trace_probe_is_return(tp))
262 unregister_kretprobe(&tp->rp);
263 else
264 unregister_kprobe(&tp->rp.kp);
265 tp->flags &= ~TP_FLAG_REGISTERED;
266 /* Cleanup kprobe for reuse */
267 if (tp->rp.kp.symbol_name)
268 tp->rp.kp.addr = NULL;
269 }
270}
271
272/* Unregister a trace_probe and probe_event: call with locking probe_lock */
273static int unregister_trace_probe(struct trace_probe *tp)
274{
275 /* Enabled event can not be unregistered */
276 if (trace_probe_is_enabled(tp))
277 return -EBUSY;
278
279 __unregister_trace_probe(tp);
280 list_del(&tp->list);
281 unregister_probe_event(tp);
282
283 return 0;
284}
285
286/* Register a trace_probe and probe_event */
287static int register_trace_probe(struct trace_probe *tp)
288{
289 struct trace_probe *old_tp;
290 int ret;
291
292 mutex_lock(&probe_lock);
293
294 /* Delete old (same name) event if exist */
295 old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
296 if (old_tp) {
297 ret = unregister_trace_probe(old_tp);
298 if (ret < 0)
299 goto end;
300 free_trace_probe(old_tp);
301 }
302
303 /* Register new event */
304 ret = register_probe_event(tp);
305 if (ret) {
306 pr_warning("Failed to register probe event(%d)\n", ret);
307 goto end;
308 }
309
310 /* Register k*probe */
311 ret = __register_trace_probe(tp);
312 if (ret < 0)
313 unregister_probe_event(tp);
314 else
315 list_add_tail(&tp->list, &probe_list);
316
317end:
318 mutex_unlock(&probe_lock);
319 return ret;
320}
321
322/* Module notifier call back, checking event on the module */
323static int trace_probe_module_callback(struct notifier_block *nb,
324 unsigned long val, void *data)
325{
326 struct module *mod = data;
327 struct trace_probe *tp;
328 int ret;
329
330 if (val != MODULE_STATE_COMING)
331 return NOTIFY_DONE;
332
333 /* Update probes on coming module */
334 mutex_lock(&probe_lock);
335 list_for_each_entry(tp, &probe_list, list) {
336 if (trace_probe_within_module(tp, mod)) {
337 /* Don't need to check busy - this should have gone. */
338 __unregister_trace_probe(tp);
339 ret = __register_trace_probe(tp);
340 if (ret)
341 pr_warning("Failed to re-register probe %s on"
342 "%s: %d\n",
343 tp->call.name, mod->name, ret);
344 }
345 }
346 mutex_unlock(&probe_lock);
347
348 return NOTIFY_DONE;
349}
350
351static struct notifier_block trace_probe_module_nb = {
352 .notifier_call = trace_probe_module_callback,
353 .priority = 1 /* Invoked after kprobe module callback */
354};
355
356static int create_trace_probe(int argc, char **argv)
357{
358 /*
359 * Argument syntax:
360 * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
361 * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
362 * Fetch args:
363 * $retval : fetch return value
364 * $stack : fetch stack address
365 * $stackN : fetch Nth of stack (N:0-)
366 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
367 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
368 * %REG : fetch register REG
369 * Dereferencing memory fetch:
370 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
371 * Alias name of args:
372 * NAME=FETCHARG : set NAME as alias of FETCHARG.
373 * Type of args:
374 * FETCHARG:TYPE : use TYPE instead of unsigned long.
375 */
376 struct trace_probe *tp;
377 int i, ret = 0;
378 bool is_return = false, is_delete = false;
379 char *symbol = NULL, *event = NULL, *group = NULL;
380 char *arg;
381 unsigned long offset = 0;
382 void *addr = NULL;
383 char buf[MAX_EVENT_NAME_LEN];
384
385 /* argc must be >= 1 */
386 if (argv[0][0] == 'p')
387 is_return = false;
388 else if (argv[0][0] == 'r')
389 is_return = true;
390 else if (argv[0][0] == '-')
391 is_delete = true;
392 else {
393 pr_info("Probe definition must be started with 'p', 'r' or"
394 " '-'.\n");
395 return -EINVAL;
396 }
397
398 if (argv[0][1] == ':') {
399 event = &argv[0][2];
400 if (strchr(event, '/')) {
401 group = event;
402 event = strchr(group, '/') + 1;
403 event[-1] = '\0';
404 if (strlen(group) == 0) {
405 pr_info("Group name is not specified\n");
406 return -EINVAL;
407 }
408 }
409 if (strlen(event) == 0) {
410 pr_info("Event name is not specified\n");
411 return -EINVAL;
412 }
413 }
414 if (!group)
415 group = KPROBE_EVENT_SYSTEM;
416
417 if (is_delete) {
418 if (!event) {
419 pr_info("Delete command needs an event name.\n");
420 return -EINVAL;
421 }
422 mutex_lock(&probe_lock);
423 tp = find_trace_probe(event, group);
424 if (!tp) {
425 mutex_unlock(&probe_lock);
426 pr_info("Event %s/%s doesn't exist.\n", group, event);
427 return -ENOENT;
428 }
429 /* delete an event */
430 ret = unregister_trace_probe(tp);
431 if (ret == 0)
432 free_trace_probe(tp);
433 mutex_unlock(&probe_lock);
434 return ret;
435 }
436
437 if (argc < 2) {
438 pr_info("Probe point is not specified.\n");
439 return -EINVAL;
440 }
441 if (isdigit(argv[1][0])) {
442 if (is_return) {
443 pr_info("Return probe point must be a symbol.\n");
444 return -EINVAL;
445 }
446 /* an address specified */
447 ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
448 if (ret) {
449 pr_info("Failed to parse address.\n");
450 return ret;
451 }
452 } else {
453 /* a symbol specified */
454 symbol = argv[1];
455 /* TODO: support .init module functions */
456 ret = traceprobe_split_symbol_offset(symbol, &offset);
457 if (ret) {
458 pr_info("Failed to parse symbol.\n");
459 return ret;
460 }
461 if (offset && is_return) {
462 pr_info("Return probe must be used without offset.\n");
463 return -EINVAL;
464 }
465 }
466 argc -= 2; argv += 2;
467
468 /* setup a probe */
469 if (!event) {
470 /* Make a new event name */
471 if (symbol)
472 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
473 is_return ? 'r' : 'p', symbol, offset);
474 else
475 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
476 is_return ? 'r' : 'p', addr);
477 event = buf;
478 }
479 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
480 is_return);
481 if (IS_ERR(tp)) {
482 pr_info("Failed to allocate trace_probe.(%d)\n",
483 (int)PTR_ERR(tp));
484 return PTR_ERR(tp);
485 }
486
487 /* parse arguments */
488 ret = 0;
489 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
490 /* Increment count for freeing args in error case */
491 tp->nr_args++;
492
493 /* Parse argument name */
494 arg = strchr(argv[i], '=');
495 if (arg) {
496 *arg++ = '\0';
497 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
498 } else {
499 arg = argv[i];
500 /* If argument name is omitted, set "argN" */
501 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
502 tp->args[i].name = kstrdup(buf, GFP_KERNEL);
503 }
504
505 if (!tp->args[i].name) {
506 pr_info("Failed to allocate argument[%d] name.\n", i);
507 ret = -ENOMEM;
508 goto error;
509 }
510
511 if (!is_good_name(tp->args[i].name)) {
512 pr_info("Invalid argument[%d] name: %s\n",
513 i, tp->args[i].name);
514 ret = -EINVAL;
515 goto error;
516 }
517
518 if (traceprobe_conflict_field_name(tp->args[i].name,
519 tp->args, i)) {
520 pr_info("Argument[%d] name '%s' conflicts with "
521 "another field.\n", i, argv[i]);
522 ret = -EINVAL;
523 goto error;
524 }
525
526 /* Parse fetch argument */
527 ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
528 is_return, true);
529 if (ret) {
530 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
531 goto error;
532 }
533 }
534
535 ret = register_trace_probe(tp);
536 if (ret)
537 goto error;
538 return 0;
539
540error:
541 free_trace_probe(tp);
542 return ret;
543}
544
545static int release_all_trace_probes(void)
546{
547 struct trace_probe *tp;
548 int ret = 0;
549
550 mutex_lock(&probe_lock);
551 /* Ensure no probe is in use. */
552 list_for_each_entry(tp, &probe_list, list)
553 if (trace_probe_is_enabled(tp)) {
554 ret = -EBUSY;
555 goto end;
556 }
557 /* TODO: Use batch unregistration */
558 while (!list_empty(&probe_list)) {
559 tp = list_entry(probe_list.next, struct trace_probe, list);
560 unregister_trace_probe(tp);
561 free_trace_probe(tp);
562 }
563
564end:
565 mutex_unlock(&probe_lock);
566
567 return ret;
568}
569
570/* Probes listing interfaces */
571static void *probes_seq_start(struct seq_file *m, loff_t *pos)
572{
573 mutex_lock(&probe_lock);
574 return seq_list_start(&probe_list, *pos);
575}
576
577static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
578{
579 return seq_list_next(v, &probe_list, pos);
580}
581
582static void probes_seq_stop(struct seq_file *m, void *v)
583{
584 mutex_unlock(&probe_lock);
585}
586
587static int probes_seq_show(struct seq_file *m, void *v)
588{
589 struct trace_probe *tp = v;
590 int i;
591
592 seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
593 seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
594
595 if (!tp->symbol)
596 seq_printf(m, " 0x%p", tp->rp.kp.addr);
597 else if (tp->rp.kp.offset)
598 seq_printf(m, " %s+%u", trace_probe_symbol(tp),
599 tp->rp.kp.offset);
600 else
601 seq_printf(m, " %s", trace_probe_symbol(tp));
602
603 for (i = 0; i < tp->nr_args; i++)
604 seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
605 seq_printf(m, "\n");
606
607 return 0;
608}
609
610static const struct seq_operations probes_seq_op = {
611 .start = probes_seq_start,
612 .next = probes_seq_next,
613 .stop = probes_seq_stop,
614 .show = probes_seq_show
615};
616
617static int probes_open(struct inode *inode, struct file *file)
618{
619 int ret;
620
621 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
622 ret = release_all_trace_probes();
623 if (ret < 0)
624 return ret;
625 }
626
627 return seq_open(file, &probes_seq_op);
628}
629
630static ssize_t probes_write(struct file *file, const char __user *buffer,
631 size_t count, loff_t *ppos)
632{
633 return traceprobe_probes_write(file, buffer, count, ppos,
634 create_trace_probe);
635}
636
637static const struct file_operations kprobe_events_ops = {
638 .owner = THIS_MODULE,
639 .open = probes_open,
640 .read = seq_read,
641 .llseek = seq_lseek,
642 .release = seq_release,
643 .write = probes_write,
644};
645
646/* Probes profiling interfaces */
647static int probes_profile_seq_show(struct seq_file *m, void *v)
648{
649 struct trace_probe *tp = v;
650
651 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
652 tp->rp.kp.nmissed);
653
654 return 0;
655}
656
657static const struct seq_operations profile_seq_op = {
658 .start = probes_seq_start,
659 .next = probes_seq_next,
660 .stop = probes_seq_stop,
661 .show = probes_profile_seq_show
662};
663
664static int profile_open(struct inode *inode, struct file *file)
665{
666 return seq_open(file, &profile_seq_op);
667}
668
669static const struct file_operations kprobe_profile_ops = {
670 .owner = THIS_MODULE,
671 .open = profile_open,
672 .read = seq_read,
673 .llseek = seq_lseek,
674 .release = seq_release,
675};
676
677/* Sum up total data length for dynamic arraies (strings) */
678static __kprobes int __get_data_size(struct trace_probe *tp,
679 struct pt_regs *regs)
680{
681 int i, ret = 0;
682 u32 len;
683
684 for (i = 0; i < tp->nr_args; i++)
685 if (unlikely(tp->args[i].fetch_size.fn)) {
686 call_fetch(&tp->args[i].fetch_size, regs, &len);
687 ret += len;
688 }
689
690 return ret;
691}
692
693/* Store the value of each argument */
694static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
695 struct pt_regs *regs,
696 u8 *data, int maxlen)
697{
698 int i;
699 u32 end = tp->size;
700 u32 *dl; /* Data (relative) location */
701
702 for (i = 0; i < tp->nr_args; i++) {
703 if (unlikely(tp->args[i].fetch_size.fn)) {
704 /*
705 * First, we set the relative location and
706 * maximum data length to *dl
707 */
708 dl = (u32 *)(data + tp->args[i].offset);
709 *dl = make_data_rloc(maxlen, end - tp->args[i].offset);
710 /* Then try to fetch string or dynamic array data */
711 call_fetch(&tp->args[i].fetch, regs, dl);
712 /* Reduce maximum length */
713 end += get_rloc_len(*dl);
714 maxlen -= get_rloc_len(*dl);
715 /* Trick here, convert data_rloc to data_loc */
716 *dl = convert_rloc_to_loc(*dl,
717 ent_size + tp->args[i].offset);
718 } else
719 /* Just fetching data normally */
720 call_fetch(&tp->args[i].fetch, regs,
721 data + tp->args[i].offset);
722 }
723}
724
725/* Kprobe handler */
726static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
727{
728 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
729 struct kprobe_trace_entry_head *entry;
730 struct ring_buffer_event *event;
731 struct ring_buffer *buffer;
732 int size, dsize, pc;
733 unsigned long irq_flags;
734 struct ftrace_event_call *call = &tp->call;
735
736 tp->nhit++;
737
738 local_save_flags(irq_flags);
739 pc = preempt_count();
740
741 dsize = __get_data_size(tp, regs);
742 size = sizeof(*entry) + tp->size + dsize;
743
744 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
745 size, irq_flags, pc);
746 if (!event)
747 return;
748
749 entry = ring_buffer_event_data(event);
750 entry->ip = (unsigned long)kp->addr;
751 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
752
753 if (!filter_current_check_discard(buffer, call, entry, event))
754 trace_nowake_buffer_unlock_commit_regs(buffer, event,
755 irq_flags, pc, regs);
756}
757
758/* Kretprobe handler */
759static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
760 struct pt_regs *regs)
761{
762 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
763 struct kretprobe_trace_entry_head *entry;
764 struct ring_buffer_event *event;
765 struct ring_buffer *buffer;
766 int size, pc, dsize;
767 unsigned long irq_flags;
768 struct ftrace_event_call *call = &tp->call;
769
770 local_save_flags(irq_flags);
771 pc = preempt_count();
772
773 dsize = __get_data_size(tp, regs);
774 size = sizeof(*entry) + tp->size + dsize;
775
776 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
777 size, irq_flags, pc);
778 if (!event)
779 return;
780
781 entry = ring_buffer_event_data(event);
782 entry->func = (unsigned long)tp->rp.kp.addr;
783 entry->ret_ip = (unsigned long)ri->ret_addr;
784 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
785
786 if (!filter_current_check_discard(buffer, call, entry, event))
787 trace_nowake_buffer_unlock_commit_regs(buffer, event,
788 irq_flags, pc, regs);
789}
790
791/* Event entry printers */
792enum print_line_t
793print_kprobe_event(struct trace_iterator *iter, int flags,
794 struct trace_event *event)
795{
796 struct kprobe_trace_entry_head *field;
797 struct trace_seq *s = &iter->seq;
798 struct trace_probe *tp;
799 u8 *data;
800 int i;
801
802 field = (struct kprobe_trace_entry_head *)iter->ent;
803 tp = container_of(event, struct trace_probe, call.event);
804
805 if (!trace_seq_printf(s, "%s: (", tp->call.name))
806 goto partial;
807
808 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
809 goto partial;
810
811 if (!trace_seq_puts(s, ")"))
812 goto partial;
813
814 data = (u8 *)&field[1];
815 for (i = 0; i < tp->nr_args; i++)
816 if (!tp->args[i].type->print(s, tp->args[i].name,
817 data + tp->args[i].offset, field))
818 goto partial;
819
820 if (!trace_seq_puts(s, "\n"))
821 goto partial;
822
823 return TRACE_TYPE_HANDLED;
824partial:
825 return TRACE_TYPE_PARTIAL_LINE;
826}
827
828enum print_line_t
829print_kretprobe_event(struct trace_iterator *iter, int flags,
830 struct trace_event *event)
831{
832 struct kretprobe_trace_entry_head *field;
833 struct trace_seq *s = &iter->seq;
834 struct trace_probe *tp;
835 u8 *data;
836 int i;
837
838 field = (struct kretprobe_trace_entry_head *)iter->ent;
839 tp = container_of(event, struct trace_probe, call.event);
840
841 if (!trace_seq_printf(s, "%s: (", tp->call.name))
842 goto partial;
843
844 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
845 goto partial;
846
847 if (!trace_seq_puts(s, " <- "))
848 goto partial;
849
850 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
851 goto partial;
852
853 if (!trace_seq_puts(s, ")"))
854 goto partial;
855
856 data = (u8 *)&field[1];
857 for (i = 0; i < tp->nr_args; i++)
858 if (!tp->args[i].type->print(s, tp->args[i].name,
859 data + tp->args[i].offset, field))
860 goto partial;
861
862 if (!trace_seq_puts(s, "\n"))
863 goto partial;
864
865 return TRACE_TYPE_HANDLED;
866partial:
867 return TRACE_TYPE_PARTIAL_LINE;
868}
869
870
871static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
872{
873 int ret, i;
874 struct kprobe_trace_entry_head field;
875 struct trace_probe *tp = (struct trace_probe *)event_call->data;
876
877 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
878 /* Set argument names as fields */
879 for (i = 0; i < tp->nr_args; i++) {
880 ret = trace_define_field(event_call, tp->args[i].type->fmttype,
881 tp->args[i].name,
882 sizeof(field) + tp->args[i].offset,
883 tp->args[i].type->size,
884 tp->args[i].type->is_signed,
885 FILTER_OTHER);
886 if (ret)
887 return ret;
888 }
889 return 0;
890}
891
892static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
893{
894 int ret, i;
895 struct kretprobe_trace_entry_head field;
896 struct trace_probe *tp = (struct trace_probe *)event_call->data;
897
898 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
899 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
900 /* Set argument names as fields */
901 for (i = 0; i < tp->nr_args; i++) {
902 ret = trace_define_field(event_call, tp->args[i].type->fmttype,
903 tp->args[i].name,
904 sizeof(field) + tp->args[i].offset,
905 tp->args[i].type->size,
906 tp->args[i].type->is_signed,
907 FILTER_OTHER);
908 if (ret)
909 return ret;
910 }
911 return 0;
912}
913
914static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
915{
916 int i;
917 int pos = 0;
918
919 const char *fmt, *arg;
920
921 if (!trace_probe_is_return(tp)) {
922 fmt = "(%lx)";
923 arg = "REC->" FIELD_STRING_IP;
924 } else {
925 fmt = "(%lx <- %lx)";
926 arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
927 }
928
929 /* When len=0, we just calculate the needed length */
930#define LEN_OR_ZERO (len ? len - pos : 0)
931
932 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
933
934 for (i = 0; i < tp->nr_args; i++) {
935 pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
936 tp->args[i].name, tp->args[i].type->fmt);
937 }
938
939 pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
940
941 for (i = 0; i < tp->nr_args; i++) {
942 if (strcmp(tp->args[i].type->name, "string") == 0)
943 pos += snprintf(buf + pos, LEN_OR_ZERO,
944 ", __get_str(%s)",
945 tp->args[i].name);
946 else
947 pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
948 tp->args[i].name);
949 }
950
951#undef LEN_OR_ZERO
952
953 /* return the length of print_fmt */
954 return pos;
955}
956
957static int set_print_fmt(struct trace_probe *tp)
958{
959 int len;
960 char *print_fmt;
961
962 /* First: called with 0 length to calculate the needed length */
963 len = __set_print_fmt(tp, NULL, 0);
964 print_fmt = kmalloc(len + 1, GFP_KERNEL);
965 if (!print_fmt)
966 return -ENOMEM;
967
968 /* Second: actually write the @print_fmt */
969 __set_print_fmt(tp, print_fmt, len + 1);
970 tp->call.print_fmt = print_fmt;
971
972 return 0;
973}
974
975#ifdef CONFIG_PERF_EVENTS
976
977/* Kprobe profile handler */
978static __kprobes void kprobe_perf_func(struct kprobe *kp,
979 struct pt_regs *regs)
980{
981 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
982 struct ftrace_event_call *call = &tp->call;
983 struct kprobe_trace_entry_head *entry;
984 struct hlist_head *head;
985 int size, __size, dsize;
986 int rctx;
987
988 dsize = __get_data_size(tp, regs);
989 __size = sizeof(*entry) + tp->size + dsize;
990 size = ALIGN(__size + sizeof(u32), sizeof(u64));
991 size -= sizeof(u32);
992 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
993 "profile buffer not large enough"))
994 return;
995
996 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
997 if (!entry)
998 return;
999
1000 entry->ip = (unsigned long)kp->addr;
1001 memset(&entry[1], 0, dsize);
1002 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1003
1004 head = this_cpu_ptr(call->perf_events);
1005 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
1006}
1007
1008/* Kretprobe profile handler */
1009static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1010 struct pt_regs *regs)
1011{
1012 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1013 struct ftrace_event_call *call = &tp->call;
1014 struct kretprobe_trace_entry_head *entry;
1015 struct hlist_head *head;
1016 int size, __size, dsize;
1017 int rctx;
1018
1019 dsize = __get_data_size(tp, regs);
1020 __size = sizeof(*entry) + tp->size + dsize;
1021 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1022 size -= sizeof(u32);
1023 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1024 "profile buffer not large enough"))
1025 return;
1026
1027 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1028 if (!entry)
1029 return;
1030
1031 entry->func = (unsigned long)tp->rp.kp.addr;
1032 entry->ret_ip = (unsigned long)ri->ret_addr;
1033 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1034
1035 head = this_cpu_ptr(call->perf_events);
1036 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
1037}
1038#endif /* CONFIG_PERF_EVENTS */
1039
1040static __kprobes
1041int kprobe_register(struct ftrace_event_call *event,
1042 enum trace_reg type, void *data)
1043{
1044 struct trace_probe *tp = (struct trace_probe *)event->data;
1045
1046 switch (type) {
1047 case TRACE_REG_REGISTER:
1048 return enable_trace_probe(tp, TP_FLAG_TRACE);
1049 case TRACE_REG_UNREGISTER:
1050 disable_trace_probe(tp, TP_FLAG_TRACE);
1051 return 0;
1052
1053#ifdef CONFIG_PERF_EVENTS
1054 case TRACE_REG_PERF_REGISTER:
1055 return enable_trace_probe(tp, TP_FLAG_PROFILE);
1056 case TRACE_REG_PERF_UNREGISTER:
1057 disable_trace_probe(tp, TP_FLAG_PROFILE);
1058 return 0;
1059 case TRACE_REG_PERF_OPEN:
1060 case TRACE_REG_PERF_CLOSE:
1061 case TRACE_REG_PERF_ADD:
1062 case TRACE_REG_PERF_DEL:
1063 return 0;
1064#endif
1065 }
1066 return 0;
1067}
1068
1069static __kprobes
1070int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1071{
1072 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1073
1074 if (tp->flags & TP_FLAG_TRACE)
1075 kprobe_trace_func(kp, regs);
1076#ifdef CONFIG_PERF_EVENTS
1077 if (tp->flags & TP_FLAG_PROFILE)
1078 kprobe_perf_func(kp, regs);
1079#endif
1080 return 0; /* We don't tweek kernel, so just return 0 */
1081}
1082
1083static __kprobes
1084int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1085{
1086 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1087
1088 if (tp->flags & TP_FLAG_TRACE)
1089 kretprobe_trace_func(ri, regs);
1090#ifdef CONFIG_PERF_EVENTS
1091 if (tp->flags & TP_FLAG_PROFILE)
1092 kretprobe_perf_func(ri, regs);
1093#endif
1094 return 0; /* We don't tweek kernel, so just return 0 */
1095}
1096
1097static struct trace_event_functions kretprobe_funcs = {
1098 .trace = print_kretprobe_event
1099};
1100
1101static struct trace_event_functions kprobe_funcs = {
1102 .trace = print_kprobe_event
1103};
1104
1105static int register_probe_event(struct trace_probe *tp)
1106{
1107 struct ftrace_event_call *call = &tp->call;
1108 int ret;
1109
1110 /* Initialize ftrace_event_call */
1111 INIT_LIST_HEAD(&call->class->fields);
1112 if (trace_probe_is_return(tp)) {
1113 call->event.funcs = &kretprobe_funcs;
1114 call->class->define_fields = kretprobe_event_define_fields;
1115 } else {
1116 call->event.funcs = &kprobe_funcs;
1117 call->class->define_fields = kprobe_event_define_fields;
1118 }
1119 if (set_print_fmt(tp) < 0)
1120 return -ENOMEM;
1121 ret = register_ftrace_event(&call->event);
1122 if (!ret) {
1123 kfree(call->print_fmt);
1124 return -ENODEV;
1125 }
1126 call->flags = 0;
1127 call->class->reg = kprobe_register;
1128 call->data = tp;
1129 ret = trace_add_event_call(call);
1130 if (ret) {
1131 pr_info("Failed to register kprobe event: %s\n", call->name);
1132 kfree(call->print_fmt);
1133 unregister_ftrace_event(&call->event);
1134 }
1135 return ret;
1136}
1137
1138static void unregister_probe_event(struct trace_probe *tp)
1139{
1140 /* tp->event is unregistered in trace_remove_event_call() */
1141 trace_remove_event_call(&tp->call);
1142 kfree(tp->call.print_fmt);
1143}
1144
1145/* Make a debugfs interface for controlling probe points */
1146static __init int init_kprobe_trace(void)
1147{
1148 struct dentry *d_tracer;
1149 struct dentry *entry;
1150
1151 if (register_module_notifier(&trace_probe_module_nb))
1152 return -EINVAL;
1153
1154 d_tracer = tracing_init_dentry();
1155 if (!d_tracer)
1156 return 0;
1157
1158 entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1159 NULL, &kprobe_events_ops);
1160
1161 /* Event list interface */
1162 if (!entry)
1163 pr_warning("Could not create debugfs "
1164 "'kprobe_events' entry\n");
1165
1166 /* Profile interface */
1167 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1168 NULL, &kprobe_profile_ops);
1169
1170 if (!entry)
1171 pr_warning("Could not create debugfs "
1172 "'kprobe_profile' entry\n");
1173 return 0;
1174}
1175fs_initcall(init_kprobe_trace);
1176
1177
1178#ifdef CONFIG_FTRACE_STARTUP_TEST
1179
1180/*
1181 * The "__used" keeps gcc from removing the function symbol
1182 * from the kallsyms table.
1183 */
1184static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1185 int a4, int a5, int a6)
1186{
1187 return a1 + a2 + a3 + a4 + a5 + a6;
1188}
1189
1190static __init int kprobe_trace_self_tests_init(void)
1191{
1192 int ret, warn = 0;
1193 int (*target)(int, int, int, int, int, int);
1194 struct trace_probe *tp;
1195
1196 target = kprobe_trace_selftest_target;
1197
1198 pr_info("Testing kprobe tracing: ");
1199
1200 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1201 "$stack $stack0 +0($stack)",
1202 create_trace_probe);
1203 if (WARN_ON_ONCE(ret)) {
1204 pr_warning("error on probing function entry.\n");
1205 warn++;
1206 } else {
1207 /* Enable trace point */
1208 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1209 if (WARN_ON_ONCE(tp == NULL)) {
1210 pr_warning("error on getting new probe.\n");
1211 warn++;
1212 } else
1213 enable_trace_probe(tp, TP_FLAG_TRACE);
1214 }
1215
1216 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1217 "$retval", create_trace_probe);
1218 if (WARN_ON_ONCE(ret)) {
1219 pr_warning("error on probing function return.\n");
1220 warn++;
1221 } else {
1222 /* Enable trace point */
1223 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1224 if (WARN_ON_ONCE(tp == NULL)) {
1225 pr_warning("error on getting new probe.\n");
1226 warn++;
1227 } else
1228 enable_trace_probe(tp, TP_FLAG_TRACE);
1229 }
1230
1231 if (warn)
1232 goto end;
1233
1234 ret = target(1, 2, 3, 4, 5, 6);
1235
1236 /* Disable trace points before removing it */
1237 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1238 if (WARN_ON_ONCE(tp == NULL)) {
1239 pr_warning("error on getting test probe.\n");
1240 warn++;
1241 } else
1242 disable_trace_probe(tp, TP_FLAG_TRACE);
1243
1244 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1245 if (WARN_ON_ONCE(tp == NULL)) {
1246 pr_warning("error on getting 2nd test probe.\n");
1247 warn++;
1248 } else
1249 disable_trace_probe(tp, TP_FLAG_TRACE);
1250
1251 ret = traceprobe_command("-:testprobe", create_trace_probe);
1252 if (WARN_ON_ONCE(ret)) {
1253 pr_warning("error on deleting a probe.\n");
1254 warn++;
1255 }
1256
1257 ret = traceprobe_command("-:testprobe2", create_trace_probe);
1258 if (WARN_ON_ONCE(ret)) {
1259 pr_warning("error on deleting a probe.\n");
1260 warn++;
1261 }
1262
1263end:
1264 release_all_trace_probes();
1265 if (warn)
1266 pr_cont("NG: Some tests are failed. Please check them.\n");
1267 else
1268 pr_cont("OK\n");
1269 return 0;
1270}
1271
1272late_initcall(kprobe_trace_self_tests_init);
1273
1274#endif