Loading...
1/*
2 * Kprobes-based tracing events
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#define pr_fmt(fmt) "trace_kprobe: " fmt
20
21#include <linux/module.h>
22#include <linux/uaccess.h>
23#include <linux/rculist.h>
24#include <linux/error-injection.h>
25
26#include "trace_probe.h"
27
28#define KPROBE_EVENT_SYSTEM "kprobes"
29#define KRETPROBE_MAXACTIVE_MAX 4096
30
31/**
32 * Kprobe event core functions
33 */
34struct trace_kprobe {
35 struct list_head list;
36 struct kretprobe rp; /* Use rp.kp for kprobe use */
37 unsigned long __percpu *nhit;
38 const char *symbol; /* symbol name */
39 struct trace_probe tp;
40};
41
42#define SIZEOF_TRACE_KPROBE(n) \
43 (offsetof(struct trace_kprobe, tp.args) + \
44 (sizeof(struct probe_arg) * (n)))
45
46static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
47{
48 return tk->rp.handler != NULL;
49}
50
51static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
52{
53 return tk->symbol ? tk->symbol : "unknown";
54}
55
56static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
57{
58 return tk->rp.kp.offset;
59}
60
61static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
62{
63 return !!(kprobe_gone(&tk->rp.kp));
64}
65
66static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
67 struct module *mod)
68{
69 int len = strlen(mod->name);
70 const char *name = trace_kprobe_symbol(tk);
71 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
72}
73
74static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
75{
76 return !!strchr(trace_kprobe_symbol(tk), ':');
77}
78
79static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
80{
81 unsigned long nhit = 0;
82 int cpu;
83
84 for_each_possible_cpu(cpu)
85 nhit += *per_cpu_ptr(tk->nhit, cpu);
86
87 return nhit;
88}
89
90bool trace_kprobe_on_func_entry(struct trace_event_call *call)
91{
92 struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
93
94 return kprobe_on_func_entry(tk->rp.kp.addr,
95 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
96 tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
97}
98
99bool trace_kprobe_error_injectable(struct trace_event_call *call)
100{
101 struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
102 unsigned long addr;
103
104 if (tk->symbol) {
105 addr = (unsigned long)
106 kallsyms_lookup_name(trace_kprobe_symbol(tk));
107 addr += tk->rp.kp.offset;
108 } else {
109 addr = (unsigned long)tk->rp.kp.addr;
110 }
111 return within_error_injection_list(addr);
112}
113
114static int register_kprobe_event(struct trace_kprobe *tk);
115static int unregister_kprobe_event(struct trace_kprobe *tk);
116
117static DEFINE_MUTEX(probe_lock);
118static LIST_HEAD(probe_list);
119
120static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
121static int kretprobe_dispatcher(struct kretprobe_instance *ri,
122 struct pt_regs *regs);
123
124/* Memory fetching by symbol */
125struct symbol_cache {
126 char *symbol;
127 long offset;
128 unsigned long addr;
129};
130
131unsigned long update_symbol_cache(struct symbol_cache *sc)
132{
133 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
134
135 if (sc->addr)
136 sc->addr += sc->offset;
137
138 return sc->addr;
139}
140
141void free_symbol_cache(struct symbol_cache *sc)
142{
143 kfree(sc->symbol);
144 kfree(sc);
145}
146
147struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
148{
149 struct symbol_cache *sc;
150
151 if (!sym || strlen(sym) == 0)
152 return NULL;
153
154 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
155 if (!sc)
156 return NULL;
157
158 sc->symbol = kstrdup(sym, GFP_KERNEL);
159 if (!sc->symbol) {
160 kfree(sc);
161 return NULL;
162 }
163 sc->offset = offset;
164 update_symbol_cache(sc);
165
166 return sc;
167}
168
169/*
170 * Kprobes-specific fetch functions
171 */
172#define DEFINE_FETCH_stack(type) \
173static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
174 void *offset, void *dest) \
175{ \
176 *(type *)dest = (type)regs_get_kernel_stack_nth(regs, \
177 (unsigned int)((unsigned long)offset)); \
178} \
179NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
180
181DEFINE_BASIC_FETCH_FUNCS(stack)
182/* No string on the stack entry */
183#define fetch_stack_string NULL
184#define fetch_stack_string_size NULL
185
186#define DEFINE_FETCH_memory(type) \
187static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
188 void *addr, void *dest) \
189{ \
190 type retval; \
191 if (probe_kernel_address(addr, retval)) \
192 *(type *)dest = 0; \
193 else \
194 *(type *)dest = retval; \
195} \
196NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
197
198DEFINE_BASIC_FETCH_FUNCS(memory)
199/*
200 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
201 * length and relative data location.
202 */
203static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
204 void *addr, void *dest)
205{
206 int maxlen = get_rloc_len(*(u32 *)dest);
207 u8 *dst = get_rloc_data(dest);
208 long ret;
209
210 if (!maxlen)
211 return;
212
213 /*
214 * Try to get string again, since the string can be changed while
215 * probing.
216 */
217 ret = strncpy_from_unsafe(dst, addr, maxlen);
218
219 if (ret < 0) { /* Failed to fetch string */
220 dst[0] = '\0';
221 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
222 } else {
223 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
224 }
225}
226NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
227
228/* Return the length of string -- including null terminal byte */
229static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
230 void *addr, void *dest)
231{
232 mm_segment_t old_fs;
233 int ret, len = 0;
234 u8 c;
235
236 old_fs = get_fs();
237 set_fs(KERNEL_DS);
238 pagefault_disable();
239
240 do {
241 ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
242 len++;
243 } while (c && ret == 0 && len < MAX_STRING_SIZE);
244
245 pagefault_enable();
246 set_fs(old_fs);
247
248 if (ret < 0) /* Failed to check the length */
249 *(u32 *)dest = 0;
250 else
251 *(u32 *)dest = len;
252}
253NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
254
255#define DEFINE_FETCH_symbol(type) \
256void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
257{ \
258 struct symbol_cache *sc = data; \
259 if (sc->addr) \
260 fetch_memory_##type(regs, (void *)sc->addr, dest); \
261 else \
262 *(type *)dest = 0; \
263} \
264NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
265
266DEFINE_BASIC_FETCH_FUNCS(symbol)
267DEFINE_FETCH_symbol(string)
268DEFINE_FETCH_symbol(string_size)
269
270/* kprobes don't support file_offset fetch methods */
271#define fetch_file_offset_u8 NULL
272#define fetch_file_offset_u16 NULL
273#define fetch_file_offset_u32 NULL
274#define fetch_file_offset_u64 NULL
275#define fetch_file_offset_string NULL
276#define fetch_file_offset_string_size NULL
277
278/* Fetch type information table */
279static const struct fetch_type kprobes_fetch_type_table[] = {
280 /* Special types */
281 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
282 sizeof(u32), 1, "__data_loc char[]"),
283 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
284 string_size, sizeof(u32), 0, "u32"),
285 /* Basic types */
286 ASSIGN_FETCH_TYPE(u8, u8, 0),
287 ASSIGN_FETCH_TYPE(u16, u16, 0),
288 ASSIGN_FETCH_TYPE(u32, u32, 0),
289 ASSIGN_FETCH_TYPE(u64, u64, 0),
290 ASSIGN_FETCH_TYPE(s8, u8, 1),
291 ASSIGN_FETCH_TYPE(s16, u16, 1),
292 ASSIGN_FETCH_TYPE(s32, u32, 1),
293 ASSIGN_FETCH_TYPE(s64, u64, 1),
294 ASSIGN_FETCH_TYPE_ALIAS(x8, u8, u8, 0),
295 ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
296 ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
297 ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
298
299 ASSIGN_FETCH_TYPE_END
300};
301
302/*
303 * Allocate new trace_probe and initialize it (including kprobes).
304 */
305static struct trace_kprobe *alloc_trace_kprobe(const char *group,
306 const char *event,
307 void *addr,
308 const char *symbol,
309 unsigned long offs,
310 int maxactive,
311 int nargs, bool is_return)
312{
313 struct trace_kprobe *tk;
314 int ret = -ENOMEM;
315
316 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
317 if (!tk)
318 return ERR_PTR(ret);
319
320 tk->nhit = alloc_percpu(unsigned long);
321 if (!tk->nhit)
322 goto error;
323
324 if (symbol) {
325 tk->symbol = kstrdup(symbol, GFP_KERNEL);
326 if (!tk->symbol)
327 goto error;
328 tk->rp.kp.symbol_name = tk->symbol;
329 tk->rp.kp.offset = offs;
330 } else
331 tk->rp.kp.addr = addr;
332
333 if (is_return)
334 tk->rp.handler = kretprobe_dispatcher;
335 else
336 tk->rp.kp.pre_handler = kprobe_dispatcher;
337
338 tk->rp.maxactive = maxactive;
339
340 if (!event || !is_good_name(event)) {
341 ret = -EINVAL;
342 goto error;
343 }
344
345 tk->tp.call.class = &tk->tp.class;
346 tk->tp.call.name = kstrdup(event, GFP_KERNEL);
347 if (!tk->tp.call.name)
348 goto error;
349
350 if (!group || !is_good_name(group)) {
351 ret = -EINVAL;
352 goto error;
353 }
354
355 tk->tp.class.system = kstrdup(group, GFP_KERNEL);
356 if (!tk->tp.class.system)
357 goto error;
358
359 INIT_LIST_HEAD(&tk->list);
360 INIT_LIST_HEAD(&tk->tp.files);
361 return tk;
362error:
363 kfree(tk->tp.call.name);
364 kfree(tk->symbol);
365 free_percpu(tk->nhit);
366 kfree(tk);
367 return ERR_PTR(ret);
368}
369
370static void free_trace_kprobe(struct trace_kprobe *tk)
371{
372 int i;
373
374 for (i = 0; i < tk->tp.nr_args; i++)
375 traceprobe_free_probe_arg(&tk->tp.args[i]);
376
377 kfree(tk->tp.call.class->system);
378 kfree(tk->tp.call.name);
379 kfree(tk->symbol);
380 free_percpu(tk->nhit);
381 kfree(tk);
382}
383
384static struct trace_kprobe *find_trace_kprobe(const char *event,
385 const char *group)
386{
387 struct trace_kprobe *tk;
388
389 list_for_each_entry(tk, &probe_list, list)
390 if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
391 strcmp(tk->tp.call.class->system, group) == 0)
392 return tk;
393 return NULL;
394}
395
396/*
397 * Enable trace_probe
398 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
399 */
400static int
401enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
402{
403 int ret = 0;
404
405 if (file) {
406 struct event_file_link *link;
407
408 link = kmalloc(sizeof(*link), GFP_KERNEL);
409 if (!link) {
410 ret = -ENOMEM;
411 goto out;
412 }
413
414 link->file = file;
415 list_add_tail_rcu(&link->list, &tk->tp.files);
416
417 tk->tp.flags |= TP_FLAG_TRACE;
418 } else
419 tk->tp.flags |= TP_FLAG_PROFILE;
420
421 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
422 if (trace_kprobe_is_return(tk))
423 ret = enable_kretprobe(&tk->rp);
424 else
425 ret = enable_kprobe(&tk->rp.kp);
426 }
427 out:
428 return ret;
429}
430
431/*
432 * Disable trace_probe
433 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
434 */
435static int
436disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
437{
438 struct event_file_link *link = NULL;
439 int wait = 0;
440 int ret = 0;
441
442 if (file) {
443 link = find_event_file_link(&tk->tp, file);
444 if (!link) {
445 ret = -EINVAL;
446 goto out;
447 }
448
449 list_del_rcu(&link->list);
450 wait = 1;
451 if (!list_empty(&tk->tp.files))
452 goto out;
453
454 tk->tp.flags &= ~TP_FLAG_TRACE;
455 } else
456 tk->tp.flags &= ~TP_FLAG_PROFILE;
457
458 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
459 if (trace_kprobe_is_return(tk))
460 disable_kretprobe(&tk->rp);
461 else
462 disable_kprobe(&tk->rp.kp);
463 wait = 1;
464 }
465
466 /*
467 * if tk is not added to any list, it must be a local trace_kprobe
468 * created with perf_event_open. We don't need to wait for these
469 * trace_kprobes
470 */
471 if (list_empty(&tk->list))
472 wait = 0;
473 out:
474 if (wait) {
475 /*
476 * Synchronize with kprobe_trace_func/kretprobe_trace_func
477 * to ensure disabled (all running handlers are finished).
478 * This is not only for kfree(), but also the caller,
479 * trace_remove_event_call() supposes it for releasing
480 * event_call related objects, which will be accessed in
481 * the kprobe_trace_func/kretprobe_trace_func.
482 */
483 synchronize_sched();
484 kfree(link); /* Ignored if link == NULL */
485 }
486
487 return ret;
488}
489
490/* Internal register function - just handle k*probes and flags */
491static int __register_trace_kprobe(struct trace_kprobe *tk)
492{
493 int i, ret;
494
495 if (trace_probe_is_registered(&tk->tp))
496 return -EINVAL;
497
498 for (i = 0; i < tk->tp.nr_args; i++)
499 traceprobe_update_arg(&tk->tp.args[i]);
500
501 /* Set/clear disabled flag according to tp->flag */
502 if (trace_probe_is_enabled(&tk->tp))
503 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
504 else
505 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
506
507 if (trace_kprobe_is_return(tk))
508 ret = register_kretprobe(&tk->rp);
509 else
510 ret = register_kprobe(&tk->rp.kp);
511
512 if (ret == 0)
513 tk->tp.flags |= TP_FLAG_REGISTERED;
514 else {
515 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
516 pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
517 ret = 0;
518 } else if (ret == -EILSEQ) {
519 pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
520 tk->rp.kp.addr);
521 ret = -EINVAL;
522 }
523 }
524
525 return ret;
526}
527
528/* Internal unregister function - just handle k*probes and flags */
529static void __unregister_trace_kprobe(struct trace_kprobe *tk)
530{
531 if (trace_probe_is_registered(&tk->tp)) {
532 if (trace_kprobe_is_return(tk))
533 unregister_kretprobe(&tk->rp);
534 else
535 unregister_kprobe(&tk->rp.kp);
536 tk->tp.flags &= ~TP_FLAG_REGISTERED;
537 /* Cleanup kprobe for reuse */
538 if (tk->rp.kp.symbol_name)
539 tk->rp.kp.addr = NULL;
540 }
541}
542
543/* Unregister a trace_probe and probe_event: call with locking probe_lock */
544static int unregister_trace_kprobe(struct trace_kprobe *tk)
545{
546 /* Enabled event can not be unregistered */
547 if (trace_probe_is_enabled(&tk->tp))
548 return -EBUSY;
549
550 /* Will fail if probe is being used by ftrace or perf */
551 if (unregister_kprobe_event(tk))
552 return -EBUSY;
553
554 __unregister_trace_kprobe(tk);
555 list_del(&tk->list);
556
557 return 0;
558}
559
560/* Register a trace_probe and probe_event */
561static int register_trace_kprobe(struct trace_kprobe *tk)
562{
563 struct trace_kprobe *old_tk;
564 int ret;
565
566 mutex_lock(&probe_lock);
567
568 /* Delete old (same name) event if exist */
569 old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
570 tk->tp.call.class->system);
571 if (old_tk) {
572 ret = unregister_trace_kprobe(old_tk);
573 if (ret < 0)
574 goto end;
575 free_trace_kprobe(old_tk);
576 }
577
578 /* Register new event */
579 ret = register_kprobe_event(tk);
580 if (ret) {
581 pr_warn("Failed to register probe event(%d)\n", ret);
582 goto end;
583 }
584
585 /* Register k*probe */
586 ret = __register_trace_kprobe(tk);
587 if (ret < 0)
588 unregister_kprobe_event(tk);
589 else
590 list_add_tail(&tk->list, &probe_list);
591
592end:
593 mutex_unlock(&probe_lock);
594 return ret;
595}
596
597/* Module notifier call back, checking event on the module */
598static int trace_kprobe_module_callback(struct notifier_block *nb,
599 unsigned long val, void *data)
600{
601 struct module *mod = data;
602 struct trace_kprobe *tk;
603 int ret;
604
605 if (val != MODULE_STATE_COMING)
606 return NOTIFY_DONE;
607
608 /* Update probes on coming module */
609 mutex_lock(&probe_lock);
610 list_for_each_entry(tk, &probe_list, list) {
611 if (trace_kprobe_within_module(tk, mod)) {
612 /* Don't need to check busy - this should have gone. */
613 __unregister_trace_kprobe(tk);
614 ret = __register_trace_kprobe(tk);
615 if (ret)
616 pr_warn("Failed to re-register probe %s on %s: %d\n",
617 trace_event_name(&tk->tp.call),
618 mod->name, ret);
619 }
620 }
621 mutex_unlock(&probe_lock);
622
623 return NOTIFY_DONE;
624}
625
626static struct notifier_block trace_kprobe_module_nb = {
627 .notifier_call = trace_kprobe_module_callback,
628 .priority = 1 /* Invoked after kprobe module callback */
629};
630
631/* Convert certain expected symbols into '_' when generating event names */
632static inline void sanitize_event_name(char *name)
633{
634 while (*name++ != '\0')
635 if (*name == ':' || *name == '.')
636 *name = '_';
637}
638
639static int create_trace_kprobe(int argc, char **argv)
640{
641 /*
642 * Argument syntax:
643 * - Add kprobe:
644 * p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
645 * - Add kretprobe:
646 * r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
647 * Fetch args:
648 * $retval : fetch return value
649 * $stack : fetch stack address
650 * $stackN : fetch Nth of stack (N:0-)
651 * $comm : fetch current task comm
652 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
653 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
654 * %REG : fetch register REG
655 * Dereferencing memory fetch:
656 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
657 * Alias name of args:
658 * NAME=FETCHARG : set NAME as alias of FETCHARG.
659 * Type of args:
660 * FETCHARG:TYPE : use TYPE instead of unsigned long.
661 */
662 struct trace_kprobe *tk;
663 int i, ret = 0;
664 bool is_return = false, is_delete = false;
665 char *symbol = NULL, *event = NULL, *group = NULL;
666 int maxactive = 0;
667 char *arg;
668 long offset = 0;
669 void *addr = NULL;
670 char buf[MAX_EVENT_NAME_LEN];
671
672 /* argc must be >= 1 */
673 if (argv[0][0] == 'p')
674 is_return = false;
675 else if (argv[0][0] == 'r')
676 is_return = true;
677 else if (argv[0][0] == '-')
678 is_delete = true;
679 else {
680 pr_info("Probe definition must be started with 'p', 'r' or"
681 " '-'.\n");
682 return -EINVAL;
683 }
684
685 event = strchr(&argv[0][1], ':');
686 if (event) {
687 event[0] = '\0';
688 event++;
689 }
690 if (is_return && isdigit(argv[0][1])) {
691 ret = kstrtouint(&argv[0][1], 0, &maxactive);
692 if (ret) {
693 pr_info("Failed to parse maxactive.\n");
694 return ret;
695 }
696 /* kretprobes instances are iterated over via a list. The
697 * maximum should stay reasonable.
698 */
699 if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
700 pr_info("Maxactive is too big (%d > %d).\n",
701 maxactive, KRETPROBE_MAXACTIVE_MAX);
702 return -E2BIG;
703 }
704 }
705
706 if (event) {
707 if (strchr(event, '/')) {
708 group = event;
709 event = strchr(group, '/') + 1;
710 event[-1] = '\0';
711 if (strlen(group) == 0) {
712 pr_info("Group name is not specified\n");
713 return -EINVAL;
714 }
715 }
716 if (strlen(event) == 0) {
717 pr_info("Event name is not specified\n");
718 return -EINVAL;
719 }
720 }
721 if (!group)
722 group = KPROBE_EVENT_SYSTEM;
723
724 if (is_delete) {
725 if (!event) {
726 pr_info("Delete command needs an event name.\n");
727 return -EINVAL;
728 }
729 mutex_lock(&probe_lock);
730 tk = find_trace_kprobe(event, group);
731 if (!tk) {
732 mutex_unlock(&probe_lock);
733 pr_info("Event %s/%s doesn't exist.\n", group, event);
734 return -ENOENT;
735 }
736 /* delete an event */
737 ret = unregister_trace_kprobe(tk);
738 if (ret == 0)
739 free_trace_kprobe(tk);
740 mutex_unlock(&probe_lock);
741 return ret;
742 }
743
744 if (argc < 2) {
745 pr_info("Probe point is not specified.\n");
746 return -EINVAL;
747 }
748
749 /* try to parse an address. if that fails, try to read the
750 * input as a symbol. */
751 if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
752 /* a symbol specified */
753 symbol = argv[1];
754 /* TODO: support .init module functions */
755 ret = traceprobe_split_symbol_offset(symbol, &offset);
756 if (ret || offset < 0 || offset > UINT_MAX) {
757 pr_info("Failed to parse either an address or a symbol.\n");
758 return ret;
759 }
760 if (offset && is_return &&
761 !kprobe_on_func_entry(NULL, symbol, offset)) {
762 pr_info("Given offset is not valid for return probe.\n");
763 return -EINVAL;
764 }
765 }
766 argc -= 2; argv += 2;
767
768 /* setup a probe */
769 if (!event) {
770 /* Make a new event name */
771 if (symbol)
772 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
773 is_return ? 'r' : 'p', symbol, offset);
774 else
775 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
776 is_return ? 'r' : 'p', addr);
777 sanitize_event_name(buf);
778 event = buf;
779 }
780 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
781 argc, is_return);
782 if (IS_ERR(tk)) {
783 pr_info("Failed to allocate trace_probe.(%d)\n",
784 (int)PTR_ERR(tk));
785 return PTR_ERR(tk);
786 }
787
788 /* parse arguments */
789 ret = 0;
790 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
791 struct probe_arg *parg = &tk->tp.args[i];
792
793 /* Increment count for freeing args in error case */
794 tk->tp.nr_args++;
795
796 /* Parse argument name */
797 arg = strchr(argv[i], '=');
798 if (arg) {
799 *arg++ = '\0';
800 parg->name = kstrdup(argv[i], GFP_KERNEL);
801 } else {
802 arg = argv[i];
803 /* If argument name is omitted, set "argN" */
804 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
805 parg->name = kstrdup(buf, GFP_KERNEL);
806 }
807
808 if (!parg->name) {
809 pr_info("Failed to allocate argument[%d] name.\n", i);
810 ret = -ENOMEM;
811 goto error;
812 }
813
814 if (!is_good_name(parg->name)) {
815 pr_info("Invalid argument[%d] name: %s\n",
816 i, parg->name);
817 ret = -EINVAL;
818 goto error;
819 }
820
821 if (traceprobe_conflict_field_name(parg->name,
822 tk->tp.args, i)) {
823 pr_info("Argument[%d] name '%s' conflicts with "
824 "another field.\n", i, argv[i]);
825 ret = -EINVAL;
826 goto error;
827 }
828
829 /* Parse fetch argument */
830 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
831 is_return, true,
832 kprobes_fetch_type_table);
833 if (ret) {
834 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
835 goto error;
836 }
837 }
838
839 ret = register_trace_kprobe(tk);
840 if (ret)
841 goto error;
842 return 0;
843
844error:
845 free_trace_kprobe(tk);
846 return ret;
847}
848
849static int release_all_trace_kprobes(void)
850{
851 struct trace_kprobe *tk;
852 int ret = 0;
853
854 mutex_lock(&probe_lock);
855 /* Ensure no probe is in use. */
856 list_for_each_entry(tk, &probe_list, list)
857 if (trace_probe_is_enabled(&tk->tp)) {
858 ret = -EBUSY;
859 goto end;
860 }
861 /* TODO: Use batch unregistration */
862 while (!list_empty(&probe_list)) {
863 tk = list_entry(probe_list.next, struct trace_kprobe, list);
864 ret = unregister_trace_kprobe(tk);
865 if (ret)
866 goto end;
867 free_trace_kprobe(tk);
868 }
869
870end:
871 mutex_unlock(&probe_lock);
872
873 return ret;
874}
875
876/* Probes listing interfaces */
877static void *probes_seq_start(struct seq_file *m, loff_t *pos)
878{
879 mutex_lock(&probe_lock);
880 return seq_list_start(&probe_list, *pos);
881}
882
883static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
884{
885 return seq_list_next(v, &probe_list, pos);
886}
887
888static void probes_seq_stop(struct seq_file *m, void *v)
889{
890 mutex_unlock(&probe_lock);
891}
892
893static int probes_seq_show(struct seq_file *m, void *v)
894{
895 struct trace_kprobe *tk = v;
896 int i;
897
898 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
899 seq_printf(m, ":%s/%s", tk->tp.call.class->system,
900 trace_event_name(&tk->tp.call));
901
902 if (!tk->symbol)
903 seq_printf(m, " 0x%p", tk->rp.kp.addr);
904 else if (tk->rp.kp.offset)
905 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
906 tk->rp.kp.offset);
907 else
908 seq_printf(m, " %s", trace_kprobe_symbol(tk));
909
910 for (i = 0; i < tk->tp.nr_args; i++)
911 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
912 seq_putc(m, '\n');
913
914 return 0;
915}
916
917static const struct seq_operations probes_seq_op = {
918 .start = probes_seq_start,
919 .next = probes_seq_next,
920 .stop = probes_seq_stop,
921 .show = probes_seq_show
922};
923
924static int probes_open(struct inode *inode, struct file *file)
925{
926 int ret;
927
928 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
929 ret = release_all_trace_kprobes();
930 if (ret < 0)
931 return ret;
932 }
933
934 return seq_open(file, &probes_seq_op);
935}
936
937static ssize_t probes_write(struct file *file, const char __user *buffer,
938 size_t count, loff_t *ppos)
939{
940 return trace_parse_run_command(file, buffer, count, ppos,
941 create_trace_kprobe);
942}
943
944static const struct file_operations kprobe_events_ops = {
945 .owner = THIS_MODULE,
946 .open = probes_open,
947 .read = seq_read,
948 .llseek = seq_lseek,
949 .release = seq_release,
950 .write = probes_write,
951};
952
953/* Probes profiling interfaces */
954static int probes_profile_seq_show(struct seq_file *m, void *v)
955{
956 struct trace_kprobe *tk = v;
957
958 seq_printf(m, " %-44s %15lu %15lu\n",
959 trace_event_name(&tk->tp.call),
960 trace_kprobe_nhit(tk),
961 tk->rp.kp.nmissed);
962
963 return 0;
964}
965
966static const struct seq_operations profile_seq_op = {
967 .start = probes_seq_start,
968 .next = probes_seq_next,
969 .stop = probes_seq_stop,
970 .show = probes_profile_seq_show
971};
972
973static int profile_open(struct inode *inode, struct file *file)
974{
975 return seq_open(file, &profile_seq_op);
976}
977
978static const struct file_operations kprobe_profile_ops = {
979 .owner = THIS_MODULE,
980 .open = profile_open,
981 .read = seq_read,
982 .llseek = seq_lseek,
983 .release = seq_release,
984};
985
986/* Kprobe handler */
987static nokprobe_inline void
988__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
989 struct trace_event_file *trace_file)
990{
991 struct kprobe_trace_entry_head *entry;
992 struct ring_buffer_event *event;
993 struct ring_buffer *buffer;
994 int size, dsize, pc;
995 unsigned long irq_flags;
996 struct trace_event_call *call = &tk->tp.call;
997
998 WARN_ON(call != trace_file->event_call);
999
1000 if (trace_trigger_soft_disabled(trace_file))
1001 return;
1002
1003 local_save_flags(irq_flags);
1004 pc = preempt_count();
1005
1006 dsize = __get_data_size(&tk->tp, regs);
1007 size = sizeof(*entry) + tk->tp.size + dsize;
1008
1009 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1010 call->event.type,
1011 size, irq_flags, pc);
1012 if (!event)
1013 return;
1014
1015 entry = ring_buffer_event_data(event);
1016 entry->ip = (unsigned long)tk->rp.kp.addr;
1017 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1018
1019 event_trigger_unlock_commit_regs(trace_file, buffer, event,
1020 entry, irq_flags, pc, regs);
1021}
1022
1023static void
1024kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1025{
1026 struct event_file_link *link;
1027
1028 list_for_each_entry_rcu(link, &tk->tp.files, list)
1029 __kprobe_trace_func(tk, regs, link->file);
1030}
1031NOKPROBE_SYMBOL(kprobe_trace_func);
1032
1033/* Kretprobe handler */
1034static nokprobe_inline void
1035__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1036 struct pt_regs *regs,
1037 struct trace_event_file *trace_file)
1038{
1039 struct kretprobe_trace_entry_head *entry;
1040 struct ring_buffer_event *event;
1041 struct ring_buffer *buffer;
1042 int size, pc, dsize;
1043 unsigned long irq_flags;
1044 struct trace_event_call *call = &tk->tp.call;
1045
1046 WARN_ON(call != trace_file->event_call);
1047
1048 if (trace_trigger_soft_disabled(trace_file))
1049 return;
1050
1051 local_save_flags(irq_flags);
1052 pc = preempt_count();
1053
1054 dsize = __get_data_size(&tk->tp, regs);
1055 size = sizeof(*entry) + tk->tp.size + dsize;
1056
1057 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1058 call->event.type,
1059 size, irq_flags, pc);
1060 if (!event)
1061 return;
1062
1063 entry = ring_buffer_event_data(event);
1064 entry->func = (unsigned long)tk->rp.kp.addr;
1065 entry->ret_ip = (unsigned long)ri->ret_addr;
1066 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1067
1068 event_trigger_unlock_commit_regs(trace_file, buffer, event,
1069 entry, irq_flags, pc, regs);
1070}
1071
1072static void
1073kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1074 struct pt_regs *regs)
1075{
1076 struct event_file_link *link;
1077
1078 list_for_each_entry_rcu(link, &tk->tp.files, list)
1079 __kretprobe_trace_func(tk, ri, regs, link->file);
1080}
1081NOKPROBE_SYMBOL(kretprobe_trace_func);
1082
1083/* Event entry printers */
1084static enum print_line_t
1085print_kprobe_event(struct trace_iterator *iter, int flags,
1086 struct trace_event *event)
1087{
1088 struct kprobe_trace_entry_head *field;
1089 struct trace_seq *s = &iter->seq;
1090 struct trace_probe *tp;
1091 u8 *data;
1092 int i;
1093
1094 field = (struct kprobe_trace_entry_head *)iter->ent;
1095 tp = container_of(event, struct trace_probe, call.event);
1096
1097 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1098
1099 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1100 goto out;
1101
1102 trace_seq_putc(s, ')');
1103
1104 data = (u8 *)&field[1];
1105 for (i = 0; i < tp->nr_args; i++)
1106 if (!tp->args[i].type->print(s, tp->args[i].name,
1107 data + tp->args[i].offset, field))
1108 goto out;
1109
1110 trace_seq_putc(s, '\n');
1111 out:
1112 return trace_handle_return(s);
1113}
1114
1115static enum print_line_t
1116print_kretprobe_event(struct trace_iterator *iter, int flags,
1117 struct trace_event *event)
1118{
1119 struct kretprobe_trace_entry_head *field;
1120 struct trace_seq *s = &iter->seq;
1121 struct trace_probe *tp;
1122 u8 *data;
1123 int i;
1124
1125 field = (struct kretprobe_trace_entry_head *)iter->ent;
1126 tp = container_of(event, struct trace_probe, call.event);
1127
1128 trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1129
1130 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1131 goto out;
1132
1133 trace_seq_puts(s, " <- ");
1134
1135 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1136 goto out;
1137
1138 trace_seq_putc(s, ')');
1139
1140 data = (u8 *)&field[1];
1141 for (i = 0; i < tp->nr_args; i++)
1142 if (!tp->args[i].type->print(s, tp->args[i].name,
1143 data + tp->args[i].offset, field))
1144 goto out;
1145
1146 trace_seq_putc(s, '\n');
1147
1148 out:
1149 return trace_handle_return(s);
1150}
1151
1152
1153static int kprobe_event_define_fields(struct trace_event_call *event_call)
1154{
1155 int ret, i;
1156 struct kprobe_trace_entry_head field;
1157 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1158
1159 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1160 /* Set argument names as fields */
1161 for (i = 0; i < tk->tp.nr_args; i++) {
1162 struct probe_arg *parg = &tk->tp.args[i];
1163
1164 ret = trace_define_field(event_call, parg->type->fmttype,
1165 parg->name,
1166 sizeof(field) + parg->offset,
1167 parg->type->size,
1168 parg->type->is_signed,
1169 FILTER_OTHER);
1170 if (ret)
1171 return ret;
1172 }
1173 return 0;
1174}
1175
1176static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1177{
1178 int ret, i;
1179 struct kretprobe_trace_entry_head field;
1180 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1181
1182 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1183 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1184 /* Set argument names as fields */
1185 for (i = 0; i < tk->tp.nr_args; i++) {
1186 struct probe_arg *parg = &tk->tp.args[i];
1187
1188 ret = trace_define_field(event_call, parg->type->fmttype,
1189 parg->name,
1190 sizeof(field) + parg->offset,
1191 parg->type->size,
1192 parg->type->is_signed,
1193 FILTER_OTHER);
1194 if (ret)
1195 return ret;
1196 }
1197 return 0;
1198}
1199
1200#ifdef CONFIG_PERF_EVENTS
1201
1202/* Kprobe profile handler */
1203static int
1204kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1205{
1206 struct trace_event_call *call = &tk->tp.call;
1207 struct kprobe_trace_entry_head *entry;
1208 struct hlist_head *head;
1209 int size, __size, dsize;
1210 int rctx;
1211
1212 if (bpf_prog_array_valid(call)) {
1213 unsigned long orig_ip = instruction_pointer(regs);
1214 int ret;
1215
1216 ret = trace_call_bpf(call, regs);
1217
1218 /*
1219 * We need to check and see if we modified the pc of the
1220 * pt_regs, and if so clear the kprobe and return 1 so that we
1221 * don't do the single stepping.
1222 * The ftrace kprobe handler leaves it up to us to re-enable
1223 * preemption here before returning if we've modified the ip.
1224 */
1225 if (orig_ip != instruction_pointer(regs)) {
1226 reset_current_kprobe();
1227 preempt_enable_no_resched();
1228 return 1;
1229 }
1230 if (!ret)
1231 return 0;
1232 }
1233
1234 head = this_cpu_ptr(call->perf_events);
1235 if (hlist_empty(head))
1236 return 0;
1237
1238 dsize = __get_data_size(&tk->tp, regs);
1239 __size = sizeof(*entry) + tk->tp.size + dsize;
1240 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1241 size -= sizeof(u32);
1242
1243 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1244 if (!entry)
1245 return 0;
1246
1247 entry->ip = (unsigned long)tk->rp.kp.addr;
1248 memset(&entry[1], 0, dsize);
1249 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1250 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1251 head, NULL);
1252 return 0;
1253}
1254NOKPROBE_SYMBOL(kprobe_perf_func);
1255
1256/* Kretprobe profile handler */
1257static void
1258kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1259 struct pt_regs *regs)
1260{
1261 struct trace_event_call *call = &tk->tp.call;
1262 struct kretprobe_trace_entry_head *entry;
1263 struct hlist_head *head;
1264 int size, __size, dsize;
1265 int rctx;
1266
1267 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1268 return;
1269
1270 head = this_cpu_ptr(call->perf_events);
1271 if (hlist_empty(head))
1272 return;
1273
1274 dsize = __get_data_size(&tk->tp, regs);
1275 __size = sizeof(*entry) + tk->tp.size + dsize;
1276 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1277 size -= sizeof(u32);
1278
1279 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1280 if (!entry)
1281 return;
1282
1283 entry->func = (unsigned long)tk->rp.kp.addr;
1284 entry->ret_ip = (unsigned long)ri->ret_addr;
1285 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1286 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1287 head, NULL);
1288}
1289NOKPROBE_SYMBOL(kretprobe_perf_func);
1290#endif /* CONFIG_PERF_EVENTS */
1291
1292/*
1293 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1294 *
1295 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1296 * lockless, but we can't race with this __init function.
1297 */
1298static int kprobe_register(struct trace_event_call *event,
1299 enum trace_reg type, void *data)
1300{
1301 struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1302 struct trace_event_file *file = data;
1303
1304 switch (type) {
1305 case TRACE_REG_REGISTER:
1306 return enable_trace_kprobe(tk, file);
1307 case TRACE_REG_UNREGISTER:
1308 return disable_trace_kprobe(tk, file);
1309
1310#ifdef CONFIG_PERF_EVENTS
1311 case TRACE_REG_PERF_REGISTER:
1312 return enable_trace_kprobe(tk, NULL);
1313 case TRACE_REG_PERF_UNREGISTER:
1314 return disable_trace_kprobe(tk, NULL);
1315 case TRACE_REG_PERF_OPEN:
1316 case TRACE_REG_PERF_CLOSE:
1317 case TRACE_REG_PERF_ADD:
1318 case TRACE_REG_PERF_DEL:
1319 return 0;
1320#endif
1321 }
1322 return 0;
1323}
1324
1325static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1326{
1327 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1328 int ret = 0;
1329
1330 raw_cpu_inc(*tk->nhit);
1331
1332 if (tk->tp.flags & TP_FLAG_TRACE)
1333 kprobe_trace_func(tk, regs);
1334#ifdef CONFIG_PERF_EVENTS
1335 if (tk->tp.flags & TP_FLAG_PROFILE)
1336 ret = kprobe_perf_func(tk, regs);
1337#endif
1338 return ret;
1339}
1340NOKPROBE_SYMBOL(kprobe_dispatcher);
1341
1342static int
1343kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1344{
1345 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1346
1347 raw_cpu_inc(*tk->nhit);
1348
1349 if (tk->tp.flags & TP_FLAG_TRACE)
1350 kretprobe_trace_func(tk, ri, regs);
1351#ifdef CONFIG_PERF_EVENTS
1352 if (tk->tp.flags & TP_FLAG_PROFILE)
1353 kretprobe_perf_func(tk, ri, regs);
1354#endif
1355 return 0; /* We don't tweek kernel, so just return 0 */
1356}
1357NOKPROBE_SYMBOL(kretprobe_dispatcher);
1358
1359static struct trace_event_functions kretprobe_funcs = {
1360 .trace = print_kretprobe_event
1361};
1362
1363static struct trace_event_functions kprobe_funcs = {
1364 .trace = print_kprobe_event
1365};
1366
1367static inline void init_trace_event_call(struct trace_kprobe *tk,
1368 struct trace_event_call *call)
1369{
1370 INIT_LIST_HEAD(&call->class->fields);
1371 if (trace_kprobe_is_return(tk)) {
1372 call->event.funcs = &kretprobe_funcs;
1373 call->class->define_fields = kretprobe_event_define_fields;
1374 } else {
1375 call->event.funcs = &kprobe_funcs;
1376 call->class->define_fields = kprobe_event_define_fields;
1377 }
1378
1379 call->flags = TRACE_EVENT_FL_KPROBE;
1380 call->class->reg = kprobe_register;
1381 call->data = tk;
1382}
1383
1384static int register_kprobe_event(struct trace_kprobe *tk)
1385{
1386 struct trace_event_call *call = &tk->tp.call;
1387 int ret = 0;
1388
1389 init_trace_event_call(tk, call);
1390
1391 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1392 return -ENOMEM;
1393 ret = register_trace_event(&call->event);
1394 if (!ret) {
1395 kfree(call->print_fmt);
1396 return -ENODEV;
1397 }
1398 ret = trace_add_event_call(call);
1399 if (ret) {
1400 pr_info("Failed to register kprobe event: %s\n",
1401 trace_event_name(call));
1402 kfree(call->print_fmt);
1403 unregister_trace_event(&call->event);
1404 }
1405 return ret;
1406}
1407
1408static int unregister_kprobe_event(struct trace_kprobe *tk)
1409{
1410 int ret;
1411
1412 /* tp->event is unregistered in trace_remove_event_call() */
1413 ret = trace_remove_event_call(&tk->tp.call);
1414 if (!ret)
1415 kfree(tk->tp.call.print_fmt);
1416 return ret;
1417}
1418
1419#ifdef CONFIG_PERF_EVENTS
1420/* create a trace_kprobe, but don't add it to global lists */
1421struct trace_event_call *
1422create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1423 bool is_return)
1424{
1425 struct trace_kprobe *tk;
1426 int ret;
1427 char *event;
1428
1429 /*
1430 * local trace_kprobes are not added to probe_list, so they are never
1431 * searched in find_trace_kprobe(). Therefore, there is no concern of
1432 * duplicated name here.
1433 */
1434 event = func ? func : "DUMMY_EVENT";
1435
1436 tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1437 offs, 0 /* maxactive */, 0 /* nargs */,
1438 is_return);
1439
1440 if (IS_ERR(tk)) {
1441 pr_info("Failed to allocate trace_probe.(%d)\n",
1442 (int)PTR_ERR(tk));
1443 return ERR_CAST(tk);
1444 }
1445
1446 init_trace_event_call(tk, &tk->tp.call);
1447
1448 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1449 ret = -ENOMEM;
1450 goto error;
1451 }
1452
1453 ret = __register_trace_kprobe(tk);
1454 if (ret < 0)
1455 goto error;
1456
1457 return &tk->tp.call;
1458error:
1459 free_trace_kprobe(tk);
1460 return ERR_PTR(ret);
1461}
1462
1463void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1464{
1465 struct trace_kprobe *tk;
1466
1467 tk = container_of(event_call, struct trace_kprobe, tp.call);
1468
1469 if (trace_probe_is_enabled(&tk->tp)) {
1470 WARN_ON(1);
1471 return;
1472 }
1473
1474 __unregister_trace_kprobe(tk);
1475 free_trace_kprobe(tk);
1476}
1477#endif /* CONFIG_PERF_EVENTS */
1478
1479/* Make a tracefs interface for controlling probe points */
1480static __init int init_kprobe_trace(void)
1481{
1482 struct dentry *d_tracer;
1483 struct dentry *entry;
1484
1485 if (register_module_notifier(&trace_kprobe_module_nb))
1486 return -EINVAL;
1487
1488 d_tracer = tracing_init_dentry();
1489 if (IS_ERR(d_tracer))
1490 return 0;
1491
1492 entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1493 NULL, &kprobe_events_ops);
1494
1495 /* Event list interface */
1496 if (!entry)
1497 pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1498
1499 /* Profile interface */
1500 entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1501 NULL, &kprobe_profile_ops);
1502
1503 if (!entry)
1504 pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1505 return 0;
1506}
1507fs_initcall(init_kprobe_trace);
1508
1509
1510#ifdef CONFIG_FTRACE_STARTUP_TEST
1511/*
1512 * The "__used" keeps gcc from removing the function symbol
1513 * from the kallsyms table. 'noinline' makes sure that there
1514 * isn't an inlined version used by the test method below
1515 */
1516static __used __init noinline int
1517kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1518{
1519 return a1 + a2 + a3 + a4 + a5 + a6;
1520}
1521
1522static __init struct trace_event_file *
1523find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1524{
1525 struct trace_event_file *file;
1526
1527 list_for_each_entry(file, &tr->events, list)
1528 if (file->event_call == &tk->tp.call)
1529 return file;
1530
1531 return NULL;
1532}
1533
1534/*
1535 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1536 * stage, we can do this lockless.
1537 */
1538static __init int kprobe_trace_self_tests_init(void)
1539{
1540 int ret, warn = 0;
1541 int (*target)(int, int, int, int, int, int);
1542 struct trace_kprobe *tk;
1543 struct trace_event_file *file;
1544
1545 if (tracing_is_disabled())
1546 return -ENODEV;
1547
1548 target = kprobe_trace_selftest_target;
1549
1550 pr_info("Testing kprobe tracing: ");
1551
1552 ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
1553 "$stack $stack0 +0($stack)",
1554 create_trace_kprobe);
1555 if (WARN_ON_ONCE(ret)) {
1556 pr_warn("error on probing function entry.\n");
1557 warn++;
1558 } else {
1559 /* Enable trace point */
1560 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1561 if (WARN_ON_ONCE(tk == NULL)) {
1562 pr_warn("error on getting new probe.\n");
1563 warn++;
1564 } else {
1565 file = find_trace_probe_file(tk, top_trace_array());
1566 if (WARN_ON_ONCE(file == NULL)) {
1567 pr_warn("error on getting probe file.\n");
1568 warn++;
1569 } else
1570 enable_trace_kprobe(tk, file);
1571 }
1572 }
1573
1574 ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
1575 "$retval", create_trace_kprobe);
1576 if (WARN_ON_ONCE(ret)) {
1577 pr_warn("error on probing function return.\n");
1578 warn++;
1579 } else {
1580 /* Enable trace point */
1581 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1582 if (WARN_ON_ONCE(tk == NULL)) {
1583 pr_warn("error on getting 2nd new probe.\n");
1584 warn++;
1585 } else {
1586 file = find_trace_probe_file(tk, top_trace_array());
1587 if (WARN_ON_ONCE(file == NULL)) {
1588 pr_warn("error on getting probe file.\n");
1589 warn++;
1590 } else
1591 enable_trace_kprobe(tk, file);
1592 }
1593 }
1594
1595 if (warn)
1596 goto end;
1597
1598 ret = target(1, 2, 3, 4, 5, 6);
1599
1600 /*
1601 * Not expecting an error here, the check is only to prevent the
1602 * optimizer from removing the call to target() as otherwise there
1603 * are no side-effects and the call is never performed.
1604 */
1605 if (ret != 21)
1606 warn++;
1607
1608 /* Disable trace points before removing it */
1609 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1610 if (WARN_ON_ONCE(tk == NULL)) {
1611 pr_warn("error on getting test probe.\n");
1612 warn++;
1613 } else {
1614 if (trace_kprobe_nhit(tk) != 1) {
1615 pr_warn("incorrect number of testprobe hits\n");
1616 warn++;
1617 }
1618
1619 file = find_trace_probe_file(tk, top_trace_array());
1620 if (WARN_ON_ONCE(file == NULL)) {
1621 pr_warn("error on getting probe file.\n");
1622 warn++;
1623 } else
1624 disable_trace_kprobe(tk, file);
1625 }
1626
1627 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1628 if (WARN_ON_ONCE(tk == NULL)) {
1629 pr_warn("error on getting 2nd test probe.\n");
1630 warn++;
1631 } else {
1632 if (trace_kprobe_nhit(tk) != 1) {
1633 pr_warn("incorrect number of testprobe2 hits\n");
1634 warn++;
1635 }
1636
1637 file = find_trace_probe_file(tk, top_trace_array());
1638 if (WARN_ON_ONCE(file == NULL)) {
1639 pr_warn("error on getting probe file.\n");
1640 warn++;
1641 } else
1642 disable_trace_kprobe(tk, file);
1643 }
1644
1645 ret = trace_run_command("-:testprobe", create_trace_kprobe);
1646 if (WARN_ON_ONCE(ret)) {
1647 pr_warn("error on deleting a probe.\n");
1648 warn++;
1649 }
1650
1651 ret = trace_run_command("-:testprobe2", create_trace_kprobe);
1652 if (WARN_ON_ONCE(ret)) {
1653 pr_warn("error on deleting a probe.\n");
1654 warn++;
1655 }
1656
1657end:
1658 release_all_trace_kprobes();
1659 /*
1660 * Wait for the optimizer work to finish. Otherwise it might fiddle
1661 * with probes in already freed __init text.
1662 */
1663 wait_for_kprobe_optimizer();
1664 if (warn)
1665 pr_cont("NG: Some tests are failed. Please check them.\n");
1666 else
1667 pr_cont("OK\n");
1668 return 0;
1669}
1670
1671late_initcall(kprobe_trace_self_tests_init);
1672
1673#endif
1/*
2 * Kprobes-based tracing events
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/uaccess.h>
22
23#include "trace_probe.h"
24
25#define KPROBE_EVENT_SYSTEM "kprobes"
26
27/**
28 * Kprobe event core functions
29 */
30
31struct trace_probe {
32 struct list_head list;
33 struct kretprobe rp; /* Use rp.kp for kprobe use */
34 unsigned long nhit;
35 unsigned int flags; /* For TP_FLAG_* */
36 const char *symbol; /* symbol name */
37 struct ftrace_event_class class;
38 struct ftrace_event_call call;
39 ssize_t size; /* trace entry size */
40 unsigned int nr_args;
41 struct probe_arg args[];
42};
43
44#define SIZEOF_TRACE_PROBE(n) \
45 (offsetof(struct trace_probe, args) + \
46 (sizeof(struct probe_arg) * (n)))
47
48
49static __kprobes int trace_probe_is_return(struct trace_probe *tp)
50{
51 return tp->rp.handler != NULL;
52}
53
54static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
55{
56 return tp->symbol ? tp->symbol : "unknown";
57}
58
59static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
60{
61 return tp->rp.kp.offset;
62}
63
64static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
65{
66 return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
67}
68
69static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
70{
71 return !!(tp->flags & TP_FLAG_REGISTERED);
72}
73
74static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
75{
76 return !!(kprobe_gone(&tp->rp.kp));
77}
78
79static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
80 struct module *mod)
81{
82 int len = strlen(mod->name);
83 const char *name = trace_probe_symbol(tp);
84 return strncmp(mod->name, name, len) == 0 && name[len] == ':';
85}
86
87static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
88{
89 return !!strchr(trace_probe_symbol(tp), ':');
90}
91
92static int register_probe_event(struct trace_probe *tp);
93static void unregister_probe_event(struct trace_probe *tp);
94
95static DEFINE_MUTEX(probe_lock);
96static LIST_HEAD(probe_list);
97
98static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
99static int kretprobe_dispatcher(struct kretprobe_instance *ri,
100 struct pt_regs *regs);
101
102/*
103 * Allocate new trace_probe and initialize it (including kprobes).
104 */
105static struct trace_probe *alloc_trace_probe(const char *group,
106 const char *event,
107 void *addr,
108 const char *symbol,
109 unsigned long offs,
110 int nargs, bool is_return)
111{
112 struct trace_probe *tp;
113 int ret = -ENOMEM;
114
115 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
116 if (!tp)
117 return ERR_PTR(ret);
118
119 if (symbol) {
120 tp->symbol = kstrdup(symbol, GFP_KERNEL);
121 if (!tp->symbol)
122 goto error;
123 tp->rp.kp.symbol_name = tp->symbol;
124 tp->rp.kp.offset = offs;
125 } else
126 tp->rp.kp.addr = addr;
127
128 if (is_return)
129 tp->rp.handler = kretprobe_dispatcher;
130 else
131 tp->rp.kp.pre_handler = kprobe_dispatcher;
132
133 if (!event || !is_good_name(event)) {
134 ret = -EINVAL;
135 goto error;
136 }
137
138 tp->call.class = &tp->class;
139 tp->call.name = kstrdup(event, GFP_KERNEL);
140 if (!tp->call.name)
141 goto error;
142
143 if (!group || !is_good_name(group)) {
144 ret = -EINVAL;
145 goto error;
146 }
147
148 tp->class.system = kstrdup(group, GFP_KERNEL);
149 if (!tp->class.system)
150 goto error;
151
152 INIT_LIST_HEAD(&tp->list);
153 return tp;
154error:
155 kfree(tp->call.name);
156 kfree(tp->symbol);
157 kfree(tp);
158 return ERR_PTR(ret);
159}
160
161static void free_trace_probe(struct trace_probe *tp)
162{
163 int i;
164
165 for (i = 0; i < tp->nr_args; i++)
166 traceprobe_free_probe_arg(&tp->args[i]);
167
168 kfree(tp->call.class->system);
169 kfree(tp->call.name);
170 kfree(tp->symbol);
171 kfree(tp);
172}
173
174static struct trace_probe *find_trace_probe(const char *event,
175 const char *group)
176{
177 struct trace_probe *tp;
178
179 list_for_each_entry(tp, &probe_list, list)
180 if (strcmp(tp->call.name, event) == 0 &&
181 strcmp(tp->call.class->system, group) == 0)
182 return tp;
183 return NULL;
184}
185
186/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
187static int enable_trace_probe(struct trace_probe *tp, int flag)
188{
189 int ret = 0;
190
191 tp->flags |= flag;
192 if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
193 !trace_probe_has_gone(tp)) {
194 if (trace_probe_is_return(tp))
195 ret = enable_kretprobe(&tp->rp);
196 else
197 ret = enable_kprobe(&tp->rp.kp);
198 }
199
200 return ret;
201}
202
203/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
204static void disable_trace_probe(struct trace_probe *tp, int flag)
205{
206 tp->flags &= ~flag;
207 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
208 if (trace_probe_is_return(tp))
209 disable_kretprobe(&tp->rp);
210 else
211 disable_kprobe(&tp->rp.kp);
212 }
213}
214
215/* Internal register function - just handle k*probes and flags */
216static int __register_trace_probe(struct trace_probe *tp)
217{
218 int i, ret;
219
220 if (trace_probe_is_registered(tp))
221 return -EINVAL;
222
223 for (i = 0; i < tp->nr_args; i++)
224 traceprobe_update_arg(&tp->args[i]);
225
226 /* Set/clear disabled flag according to tp->flag */
227 if (trace_probe_is_enabled(tp))
228 tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
229 else
230 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
231
232 if (trace_probe_is_return(tp))
233 ret = register_kretprobe(&tp->rp);
234 else
235 ret = register_kprobe(&tp->rp.kp);
236
237 if (ret == 0)
238 tp->flags |= TP_FLAG_REGISTERED;
239 else {
240 pr_warning("Could not insert probe at %s+%lu: %d\n",
241 trace_probe_symbol(tp), trace_probe_offset(tp), ret);
242 if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
243 pr_warning("This probe might be able to register after"
244 "target module is loaded. Continue.\n");
245 ret = 0;
246 } else if (ret == -EILSEQ) {
247 pr_warning("Probing address(0x%p) is not an "
248 "instruction boundary.\n",
249 tp->rp.kp.addr);
250 ret = -EINVAL;
251 }
252 }
253
254 return ret;
255}
256
257/* Internal unregister function - just handle k*probes and flags */
258static void __unregister_trace_probe(struct trace_probe *tp)
259{
260 if (trace_probe_is_registered(tp)) {
261 if (trace_probe_is_return(tp))
262 unregister_kretprobe(&tp->rp);
263 else
264 unregister_kprobe(&tp->rp.kp);
265 tp->flags &= ~TP_FLAG_REGISTERED;
266 /* Cleanup kprobe for reuse */
267 if (tp->rp.kp.symbol_name)
268 tp->rp.kp.addr = NULL;
269 }
270}
271
272/* Unregister a trace_probe and probe_event: call with locking probe_lock */
273static int unregister_trace_probe(struct trace_probe *tp)
274{
275 /* Enabled event can not be unregistered */
276 if (trace_probe_is_enabled(tp))
277 return -EBUSY;
278
279 __unregister_trace_probe(tp);
280 list_del(&tp->list);
281 unregister_probe_event(tp);
282
283 return 0;
284}
285
286/* Register a trace_probe and probe_event */
287static int register_trace_probe(struct trace_probe *tp)
288{
289 struct trace_probe *old_tp;
290 int ret;
291
292 mutex_lock(&probe_lock);
293
294 /* Delete old (same name) event if exist */
295 old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
296 if (old_tp) {
297 ret = unregister_trace_probe(old_tp);
298 if (ret < 0)
299 goto end;
300 free_trace_probe(old_tp);
301 }
302
303 /* Register new event */
304 ret = register_probe_event(tp);
305 if (ret) {
306 pr_warning("Failed to register probe event(%d)\n", ret);
307 goto end;
308 }
309
310 /* Register k*probe */
311 ret = __register_trace_probe(tp);
312 if (ret < 0)
313 unregister_probe_event(tp);
314 else
315 list_add_tail(&tp->list, &probe_list);
316
317end:
318 mutex_unlock(&probe_lock);
319 return ret;
320}
321
322/* Module notifier call back, checking event on the module */
323static int trace_probe_module_callback(struct notifier_block *nb,
324 unsigned long val, void *data)
325{
326 struct module *mod = data;
327 struct trace_probe *tp;
328 int ret;
329
330 if (val != MODULE_STATE_COMING)
331 return NOTIFY_DONE;
332
333 /* Update probes on coming module */
334 mutex_lock(&probe_lock);
335 list_for_each_entry(tp, &probe_list, list) {
336 if (trace_probe_within_module(tp, mod)) {
337 /* Don't need to check busy - this should have gone. */
338 __unregister_trace_probe(tp);
339 ret = __register_trace_probe(tp);
340 if (ret)
341 pr_warning("Failed to re-register probe %s on"
342 "%s: %d\n",
343 tp->call.name, mod->name, ret);
344 }
345 }
346 mutex_unlock(&probe_lock);
347
348 return NOTIFY_DONE;
349}
350
351static struct notifier_block trace_probe_module_nb = {
352 .notifier_call = trace_probe_module_callback,
353 .priority = 1 /* Invoked after kprobe module callback */
354};
355
356static int create_trace_probe(int argc, char **argv)
357{
358 /*
359 * Argument syntax:
360 * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
361 * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
362 * Fetch args:
363 * $retval : fetch return value
364 * $stack : fetch stack address
365 * $stackN : fetch Nth of stack (N:0-)
366 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
367 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
368 * %REG : fetch register REG
369 * Dereferencing memory fetch:
370 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
371 * Alias name of args:
372 * NAME=FETCHARG : set NAME as alias of FETCHARG.
373 * Type of args:
374 * FETCHARG:TYPE : use TYPE instead of unsigned long.
375 */
376 struct trace_probe *tp;
377 int i, ret = 0;
378 bool is_return = false, is_delete = false;
379 char *symbol = NULL, *event = NULL, *group = NULL;
380 char *arg;
381 unsigned long offset = 0;
382 void *addr = NULL;
383 char buf[MAX_EVENT_NAME_LEN];
384
385 /* argc must be >= 1 */
386 if (argv[0][0] == 'p')
387 is_return = false;
388 else if (argv[0][0] == 'r')
389 is_return = true;
390 else if (argv[0][0] == '-')
391 is_delete = true;
392 else {
393 pr_info("Probe definition must be started with 'p', 'r' or"
394 " '-'.\n");
395 return -EINVAL;
396 }
397
398 if (argv[0][1] == ':') {
399 event = &argv[0][2];
400 if (strchr(event, '/')) {
401 group = event;
402 event = strchr(group, '/') + 1;
403 event[-1] = '\0';
404 if (strlen(group) == 0) {
405 pr_info("Group name is not specified\n");
406 return -EINVAL;
407 }
408 }
409 if (strlen(event) == 0) {
410 pr_info("Event name is not specified\n");
411 return -EINVAL;
412 }
413 }
414 if (!group)
415 group = KPROBE_EVENT_SYSTEM;
416
417 if (is_delete) {
418 if (!event) {
419 pr_info("Delete command needs an event name.\n");
420 return -EINVAL;
421 }
422 mutex_lock(&probe_lock);
423 tp = find_trace_probe(event, group);
424 if (!tp) {
425 mutex_unlock(&probe_lock);
426 pr_info("Event %s/%s doesn't exist.\n", group, event);
427 return -ENOENT;
428 }
429 /* delete an event */
430 ret = unregister_trace_probe(tp);
431 if (ret == 0)
432 free_trace_probe(tp);
433 mutex_unlock(&probe_lock);
434 return ret;
435 }
436
437 if (argc < 2) {
438 pr_info("Probe point is not specified.\n");
439 return -EINVAL;
440 }
441 if (isdigit(argv[1][0])) {
442 if (is_return) {
443 pr_info("Return probe point must be a symbol.\n");
444 return -EINVAL;
445 }
446 /* an address specified */
447 ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
448 if (ret) {
449 pr_info("Failed to parse address.\n");
450 return ret;
451 }
452 } else {
453 /* a symbol specified */
454 symbol = argv[1];
455 /* TODO: support .init module functions */
456 ret = traceprobe_split_symbol_offset(symbol, &offset);
457 if (ret) {
458 pr_info("Failed to parse symbol.\n");
459 return ret;
460 }
461 if (offset && is_return) {
462 pr_info("Return probe must be used without offset.\n");
463 return -EINVAL;
464 }
465 }
466 argc -= 2; argv += 2;
467
468 /* setup a probe */
469 if (!event) {
470 /* Make a new event name */
471 if (symbol)
472 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
473 is_return ? 'r' : 'p', symbol, offset);
474 else
475 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
476 is_return ? 'r' : 'p', addr);
477 event = buf;
478 }
479 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
480 is_return);
481 if (IS_ERR(tp)) {
482 pr_info("Failed to allocate trace_probe.(%d)\n",
483 (int)PTR_ERR(tp));
484 return PTR_ERR(tp);
485 }
486
487 /* parse arguments */
488 ret = 0;
489 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
490 /* Increment count for freeing args in error case */
491 tp->nr_args++;
492
493 /* Parse argument name */
494 arg = strchr(argv[i], '=');
495 if (arg) {
496 *arg++ = '\0';
497 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
498 } else {
499 arg = argv[i];
500 /* If argument name is omitted, set "argN" */
501 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
502 tp->args[i].name = kstrdup(buf, GFP_KERNEL);
503 }
504
505 if (!tp->args[i].name) {
506 pr_info("Failed to allocate argument[%d] name.\n", i);
507 ret = -ENOMEM;
508 goto error;
509 }
510
511 if (!is_good_name(tp->args[i].name)) {
512 pr_info("Invalid argument[%d] name: %s\n",
513 i, tp->args[i].name);
514 ret = -EINVAL;
515 goto error;
516 }
517
518 if (traceprobe_conflict_field_name(tp->args[i].name,
519 tp->args, i)) {
520 pr_info("Argument[%d] name '%s' conflicts with "
521 "another field.\n", i, argv[i]);
522 ret = -EINVAL;
523 goto error;
524 }
525
526 /* Parse fetch argument */
527 ret = traceprobe_parse_probe_arg(arg, &tp->size, &tp->args[i],
528 is_return, true);
529 if (ret) {
530 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
531 goto error;
532 }
533 }
534
535 ret = register_trace_probe(tp);
536 if (ret)
537 goto error;
538 return 0;
539
540error:
541 free_trace_probe(tp);
542 return ret;
543}
544
545static int release_all_trace_probes(void)
546{
547 struct trace_probe *tp;
548 int ret = 0;
549
550 mutex_lock(&probe_lock);
551 /* Ensure no probe is in use. */
552 list_for_each_entry(tp, &probe_list, list)
553 if (trace_probe_is_enabled(tp)) {
554 ret = -EBUSY;
555 goto end;
556 }
557 /* TODO: Use batch unregistration */
558 while (!list_empty(&probe_list)) {
559 tp = list_entry(probe_list.next, struct trace_probe, list);
560 unregister_trace_probe(tp);
561 free_trace_probe(tp);
562 }
563
564end:
565 mutex_unlock(&probe_lock);
566
567 return ret;
568}
569
570/* Probes listing interfaces */
571static void *probes_seq_start(struct seq_file *m, loff_t *pos)
572{
573 mutex_lock(&probe_lock);
574 return seq_list_start(&probe_list, *pos);
575}
576
577static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
578{
579 return seq_list_next(v, &probe_list, pos);
580}
581
582static void probes_seq_stop(struct seq_file *m, void *v)
583{
584 mutex_unlock(&probe_lock);
585}
586
587static int probes_seq_show(struct seq_file *m, void *v)
588{
589 struct trace_probe *tp = v;
590 int i;
591
592 seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
593 seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
594
595 if (!tp->symbol)
596 seq_printf(m, " 0x%p", tp->rp.kp.addr);
597 else if (tp->rp.kp.offset)
598 seq_printf(m, " %s+%u", trace_probe_symbol(tp),
599 tp->rp.kp.offset);
600 else
601 seq_printf(m, " %s", trace_probe_symbol(tp));
602
603 for (i = 0; i < tp->nr_args; i++)
604 seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
605 seq_printf(m, "\n");
606
607 return 0;
608}
609
610static const struct seq_operations probes_seq_op = {
611 .start = probes_seq_start,
612 .next = probes_seq_next,
613 .stop = probes_seq_stop,
614 .show = probes_seq_show
615};
616
617static int probes_open(struct inode *inode, struct file *file)
618{
619 int ret;
620
621 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
622 ret = release_all_trace_probes();
623 if (ret < 0)
624 return ret;
625 }
626
627 return seq_open(file, &probes_seq_op);
628}
629
630static ssize_t probes_write(struct file *file, const char __user *buffer,
631 size_t count, loff_t *ppos)
632{
633 return traceprobe_probes_write(file, buffer, count, ppos,
634 create_trace_probe);
635}
636
637static const struct file_operations kprobe_events_ops = {
638 .owner = THIS_MODULE,
639 .open = probes_open,
640 .read = seq_read,
641 .llseek = seq_lseek,
642 .release = seq_release,
643 .write = probes_write,
644};
645
646/* Probes profiling interfaces */
647static int probes_profile_seq_show(struct seq_file *m, void *v)
648{
649 struct trace_probe *tp = v;
650
651 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
652 tp->rp.kp.nmissed);
653
654 return 0;
655}
656
657static const struct seq_operations profile_seq_op = {
658 .start = probes_seq_start,
659 .next = probes_seq_next,
660 .stop = probes_seq_stop,
661 .show = probes_profile_seq_show
662};
663
664static int profile_open(struct inode *inode, struct file *file)
665{
666 return seq_open(file, &profile_seq_op);
667}
668
669static const struct file_operations kprobe_profile_ops = {
670 .owner = THIS_MODULE,
671 .open = profile_open,
672 .read = seq_read,
673 .llseek = seq_lseek,
674 .release = seq_release,
675};
676
677/* Sum up total data length for dynamic arraies (strings) */
678static __kprobes int __get_data_size(struct trace_probe *tp,
679 struct pt_regs *regs)
680{
681 int i, ret = 0;
682 u32 len;
683
684 for (i = 0; i < tp->nr_args; i++)
685 if (unlikely(tp->args[i].fetch_size.fn)) {
686 call_fetch(&tp->args[i].fetch_size, regs, &len);
687 ret += len;
688 }
689
690 return ret;
691}
692
693/* Store the value of each argument */
694static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
695 struct pt_regs *regs,
696 u8 *data, int maxlen)
697{
698 int i;
699 u32 end = tp->size;
700 u32 *dl; /* Data (relative) location */
701
702 for (i = 0; i < tp->nr_args; i++) {
703 if (unlikely(tp->args[i].fetch_size.fn)) {
704 /*
705 * First, we set the relative location and
706 * maximum data length to *dl
707 */
708 dl = (u32 *)(data + tp->args[i].offset);
709 *dl = make_data_rloc(maxlen, end - tp->args[i].offset);
710 /* Then try to fetch string or dynamic array data */
711 call_fetch(&tp->args[i].fetch, regs, dl);
712 /* Reduce maximum length */
713 end += get_rloc_len(*dl);
714 maxlen -= get_rloc_len(*dl);
715 /* Trick here, convert data_rloc to data_loc */
716 *dl = convert_rloc_to_loc(*dl,
717 ent_size + tp->args[i].offset);
718 } else
719 /* Just fetching data normally */
720 call_fetch(&tp->args[i].fetch, regs,
721 data + tp->args[i].offset);
722 }
723}
724
725/* Kprobe handler */
726static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
727{
728 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
729 struct kprobe_trace_entry_head *entry;
730 struct ring_buffer_event *event;
731 struct ring_buffer *buffer;
732 int size, dsize, pc;
733 unsigned long irq_flags;
734 struct ftrace_event_call *call = &tp->call;
735
736 tp->nhit++;
737
738 local_save_flags(irq_flags);
739 pc = preempt_count();
740
741 dsize = __get_data_size(tp, regs);
742 size = sizeof(*entry) + tp->size + dsize;
743
744 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
745 size, irq_flags, pc);
746 if (!event)
747 return;
748
749 entry = ring_buffer_event_data(event);
750 entry->ip = (unsigned long)kp->addr;
751 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
752
753 if (!filter_current_check_discard(buffer, call, entry, event))
754 trace_nowake_buffer_unlock_commit_regs(buffer, event,
755 irq_flags, pc, regs);
756}
757
758/* Kretprobe handler */
759static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
760 struct pt_regs *regs)
761{
762 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
763 struct kretprobe_trace_entry_head *entry;
764 struct ring_buffer_event *event;
765 struct ring_buffer *buffer;
766 int size, pc, dsize;
767 unsigned long irq_flags;
768 struct ftrace_event_call *call = &tp->call;
769
770 local_save_flags(irq_flags);
771 pc = preempt_count();
772
773 dsize = __get_data_size(tp, regs);
774 size = sizeof(*entry) + tp->size + dsize;
775
776 event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
777 size, irq_flags, pc);
778 if (!event)
779 return;
780
781 entry = ring_buffer_event_data(event);
782 entry->func = (unsigned long)tp->rp.kp.addr;
783 entry->ret_ip = (unsigned long)ri->ret_addr;
784 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
785
786 if (!filter_current_check_discard(buffer, call, entry, event))
787 trace_nowake_buffer_unlock_commit_regs(buffer, event,
788 irq_flags, pc, regs);
789}
790
791/* Event entry printers */
792enum print_line_t
793print_kprobe_event(struct trace_iterator *iter, int flags,
794 struct trace_event *event)
795{
796 struct kprobe_trace_entry_head *field;
797 struct trace_seq *s = &iter->seq;
798 struct trace_probe *tp;
799 u8 *data;
800 int i;
801
802 field = (struct kprobe_trace_entry_head *)iter->ent;
803 tp = container_of(event, struct trace_probe, call.event);
804
805 if (!trace_seq_printf(s, "%s: (", tp->call.name))
806 goto partial;
807
808 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
809 goto partial;
810
811 if (!trace_seq_puts(s, ")"))
812 goto partial;
813
814 data = (u8 *)&field[1];
815 for (i = 0; i < tp->nr_args; i++)
816 if (!tp->args[i].type->print(s, tp->args[i].name,
817 data + tp->args[i].offset, field))
818 goto partial;
819
820 if (!trace_seq_puts(s, "\n"))
821 goto partial;
822
823 return TRACE_TYPE_HANDLED;
824partial:
825 return TRACE_TYPE_PARTIAL_LINE;
826}
827
828enum print_line_t
829print_kretprobe_event(struct trace_iterator *iter, int flags,
830 struct trace_event *event)
831{
832 struct kretprobe_trace_entry_head *field;
833 struct trace_seq *s = &iter->seq;
834 struct trace_probe *tp;
835 u8 *data;
836 int i;
837
838 field = (struct kretprobe_trace_entry_head *)iter->ent;
839 tp = container_of(event, struct trace_probe, call.event);
840
841 if (!trace_seq_printf(s, "%s: (", tp->call.name))
842 goto partial;
843
844 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
845 goto partial;
846
847 if (!trace_seq_puts(s, " <- "))
848 goto partial;
849
850 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
851 goto partial;
852
853 if (!trace_seq_puts(s, ")"))
854 goto partial;
855
856 data = (u8 *)&field[1];
857 for (i = 0; i < tp->nr_args; i++)
858 if (!tp->args[i].type->print(s, tp->args[i].name,
859 data + tp->args[i].offset, field))
860 goto partial;
861
862 if (!trace_seq_puts(s, "\n"))
863 goto partial;
864
865 return TRACE_TYPE_HANDLED;
866partial:
867 return TRACE_TYPE_PARTIAL_LINE;
868}
869
870
871static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
872{
873 int ret, i;
874 struct kprobe_trace_entry_head field;
875 struct trace_probe *tp = (struct trace_probe *)event_call->data;
876
877 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
878 /* Set argument names as fields */
879 for (i = 0; i < tp->nr_args; i++) {
880 ret = trace_define_field(event_call, tp->args[i].type->fmttype,
881 tp->args[i].name,
882 sizeof(field) + tp->args[i].offset,
883 tp->args[i].type->size,
884 tp->args[i].type->is_signed,
885 FILTER_OTHER);
886 if (ret)
887 return ret;
888 }
889 return 0;
890}
891
892static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
893{
894 int ret, i;
895 struct kretprobe_trace_entry_head field;
896 struct trace_probe *tp = (struct trace_probe *)event_call->data;
897
898 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
899 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
900 /* Set argument names as fields */
901 for (i = 0; i < tp->nr_args; i++) {
902 ret = trace_define_field(event_call, tp->args[i].type->fmttype,
903 tp->args[i].name,
904 sizeof(field) + tp->args[i].offset,
905 tp->args[i].type->size,
906 tp->args[i].type->is_signed,
907 FILTER_OTHER);
908 if (ret)
909 return ret;
910 }
911 return 0;
912}
913
914static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
915{
916 int i;
917 int pos = 0;
918
919 const char *fmt, *arg;
920
921 if (!trace_probe_is_return(tp)) {
922 fmt = "(%lx)";
923 arg = "REC->" FIELD_STRING_IP;
924 } else {
925 fmt = "(%lx <- %lx)";
926 arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
927 }
928
929 /* When len=0, we just calculate the needed length */
930#define LEN_OR_ZERO (len ? len - pos : 0)
931
932 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
933
934 for (i = 0; i < tp->nr_args; i++) {
935 pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
936 tp->args[i].name, tp->args[i].type->fmt);
937 }
938
939 pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
940
941 for (i = 0; i < tp->nr_args; i++) {
942 if (strcmp(tp->args[i].type->name, "string") == 0)
943 pos += snprintf(buf + pos, LEN_OR_ZERO,
944 ", __get_str(%s)",
945 tp->args[i].name);
946 else
947 pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
948 tp->args[i].name);
949 }
950
951#undef LEN_OR_ZERO
952
953 /* return the length of print_fmt */
954 return pos;
955}
956
957static int set_print_fmt(struct trace_probe *tp)
958{
959 int len;
960 char *print_fmt;
961
962 /* First: called with 0 length to calculate the needed length */
963 len = __set_print_fmt(tp, NULL, 0);
964 print_fmt = kmalloc(len + 1, GFP_KERNEL);
965 if (!print_fmt)
966 return -ENOMEM;
967
968 /* Second: actually write the @print_fmt */
969 __set_print_fmt(tp, print_fmt, len + 1);
970 tp->call.print_fmt = print_fmt;
971
972 return 0;
973}
974
975#ifdef CONFIG_PERF_EVENTS
976
977/* Kprobe profile handler */
978static __kprobes void kprobe_perf_func(struct kprobe *kp,
979 struct pt_regs *regs)
980{
981 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
982 struct ftrace_event_call *call = &tp->call;
983 struct kprobe_trace_entry_head *entry;
984 struct hlist_head *head;
985 int size, __size, dsize;
986 int rctx;
987
988 dsize = __get_data_size(tp, regs);
989 __size = sizeof(*entry) + tp->size + dsize;
990 size = ALIGN(__size + sizeof(u32), sizeof(u64));
991 size -= sizeof(u32);
992 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
993 "profile buffer not large enough"))
994 return;
995
996 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
997 if (!entry)
998 return;
999
1000 entry->ip = (unsigned long)kp->addr;
1001 memset(&entry[1], 0, dsize);
1002 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1003
1004 head = this_cpu_ptr(call->perf_events);
1005 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
1006}
1007
1008/* Kretprobe profile handler */
1009static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1010 struct pt_regs *regs)
1011{
1012 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1013 struct ftrace_event_call *call = &tp->call;
1014 struct kretprobe_trace_entry_head *entry;
1015 struct hlist_head *head;
1016 int size, __size, dsize;
1017 int rctx;
1018
1019 dsize = __get_data_size(tp, regs);
1020 __size = sizeof(*entry) + tp->size + dsize;
1021 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1022 size -= sizeof(u32);
1023 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1024 "profile buffer not large enough"))
1025 return;
1026
1027 entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1028 if (!entry)
1029 return;
1030
1031 entry->func = (unsigned long)tp->rp.kp.addr;
1032 entry->ret_ip = (unsigned long)ri->ret_addr;
1033 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1034
1035 head = this_cpu_ptr(call->perf_events);
1036 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
1037}
1038#endif /* CONFIG_PERF_EVENTS */
1039
1040static __kprobes
1041int kprobe_register(struct ftrace_event_call *event,
1042 enum trace_reg type, void *data)
1043{
1044 struct trace_probe *tp = (struct trace_probe *)event->data;
1045
1046 switch (type) {
1047 case TRACE_REG_REGISTER:
1048 return enable_trace_probe(tp, TP_FLAG_TRACE);
1049 case TRACE_REG_UNREGISTER:
1050 disable_trace_probe(tp, TP_FLAG_TRACE);
1051 return 0;
1052
1053#ifdef CONFIG_PERF_EVENTS
1054 case TRACE_REG_PERF_REGISTER:
1055 return enable_trace_probe(tp, TP_FLAG_PROFILE);
1056 case TRACE_REG_PERF_UNREGISTER:
1057 disable_trace_probe(tp, TP_FLAG_PROFILE);
1058 return 0;
1059 case TRACE_REG_PERF_OPEN:
1060 case TRACE_REG_PERF_CLOSE:
1061 case TRACE_REG_PERF_ADD:
1062 case TRACE_REG_PERF_DEL:
1063 return 0;
1064#endif
1065 }
1066 return 0;
1067}
1068
1069static __kprobes
1070int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1071{
1072 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1073
1074 if (tp->flags & TP_FLAG_TRACE)
1075 kprobe_trace_func(kp, regs);
1076#ifdef CONFIG_PERF_EVENTS
1077 if (tp->flags & TP_FLAG_PROFILE)
1078 kprobe_perf_func(kp, regs);
1079#endif
1080 return 0; /* We don't tweek kernel, so just return 0 */
1081}
1082
1083static __kprobes
1084int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1085{
1086 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1087
1088 if (tp->flags & TP_FLAG_TRACE)
1089 kretprobe_trace_func(ri, regs);
1090#ifdef CONFIG_PERF_EVENTS
1091 if (tp->flags & TP_FLAG_PROFILE)
1092 kretprobe_perf_func(ri, regs);
1093#endif
1094 return 0; /* We don't tweek kernel, so just return 0 */
1095}
1096
1097static struct trace_event_functions kretprobe_funcs = {
1098 .trace = print_kretprobe_event
1099};
1100
1101static struct trace_event_functions kprobe_funcs = {
1102 .trace = print_kprobe_event
1103};
1104
1105static int register_probe_event(struct trace_probe *tp)
1106{
1107 struct ftrace_event_call *call = &tp->call;
1108 int ret;
1109
1110 /* Initialize ftrace_event_call */
1111 INIT_LIST_HEAD(&call->class->fields);
1112 if (trace_probe_is_return(tp)) {
1113 call->event.funcs = &kretprobe_funcs;
1114 call->class->define_fields = kretprobe_event_define_fields;
1115 } else {
1116 call->event.funcs = &kprobe_funcs;
1117 call->class->define_fields = kprobe_event_define_fields;
1118 }
1119 if (set_print_fmt(tp) < 0)
1120 return -ENOMEM;
1121 ret = register_ftrace_event(&call->event);
1122 if (!ret) {
1123 kfree(call->print_fmt);
1124 return -ENODEV;
1125 }
1126 call->flags = 0;
1127 call->class->reg = kprobe_register;
1128 call->data = tp;
1129 ret = trace_add_event_call(call);
1130 if (ret) {
1131 pr_info("Failed to register kprobe event: %s\n", call->name);
1132 kfree(call->print_fmt);
1133 unregister_ftrace_event(&call->event);
1134 }
1135 return ret;
1136}
1137
1138static void unregister_probe_event(struct trace_probe *tp)
1139{
1140 /* tp->event is unregistered in trace_remove_event_call() */
1141 trace_remove_event_call(&tp->call);
1142 kfree(tp->call.print_fmt);
1143}
1144
1145/* Make a debugfs interface for controlling probe points */
1146static __init int init_kprobe_trace(void)
1147{
1148 struct dentry *d_tracer;
1149 struct dentry *entry;
1150
1151 if (register_module_notifier(&trace_probe_module_nb))
1152 return -EINVAL;
1153
1154 d_tracer = tracing_init_dentry();
1155 if (!d_tracer)
1156 return 0;
1157
1158 entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1159 NULL, &kprobe_events_ops);
1160
1161 /* Event list interface */
1162 if (!entry)
1163 pr_warning("Could not create debugfs "
1164 "'kprobe_events' entry\n");
1165
1166 /* Profile interface */
1167 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1168 NULL, &kprobe_profile_ops);
1169
1170 if (!entry)
1171 pr_warning("Could not create debugfs "
1172 "'kprobe_profile' entry\n");
1173 return 0;
1174}
1175fs_initcall(init_kprobe_trace);
1176
1177
1178#ifdef CONFIG_FTRACE_STARTUP_TEST
1179
1180/*
1181 * The "__used" keeps gcc from removing the function symbol
1182 * from the kallsyms table.
1183 */
1184static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1185 int a4, int a5, int a6)
1186{
1187 return a1 + a2 + a3 + a4 + a5 + a6;
1188}
1189
1190static __init int kprobe_trace_self_tests_init(void)
1191{
1192 int ret, warn = 0;
1193 int (*target)(int, int, int, int, int, int);
1194 struct trace_probe *tp;
1195
1196 target = kprobe_trace_selftest_target;
1197
1198 pr_info("Testing kprobe tracing: ");
1199
1200 ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1201 "$stack $stack0 +0($stack)",
1202 create_trace_probe);
1203 if (WARN_ON_ONCE(ret)) {
1204 pr_warning("error on probing function entry.\n");
1205 warn++;
1206 } else {
1207 /* Enable trace point */
1208 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1209 if (WARN_ON_ONCE(tp == NULL)) {
1210 pr_warning("error on getting new probe.\n");
1211 warn++;
1212 } else
1213 enable_trace_probe(tp, TP_FLAG_TRACE);
1214 }
1215
1216 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1217 "$retval", create_trace_probe);
1218 if (WARN_ON_ONCE(ret)) {
1219 pr_warning("error on probing function return.\n");
1220 warn++;
1221 } else {
1222 /* Enable trace point */
1223 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1224 if (WARN_ON_ONCE(tp == NULL)) {
1225 pr_warning("error on getting new probe.\n");
1226 warn++;
1227 } else
1228 enable_trace_probe(tp, TP_FLAG_TRACE);
1229 }
1230
1231 if (warn)
1232 goto end;
1233
1234 ret = target(1, 2, 3, 4, 5, 6);
1235
1236 /* Disable trace points before removing it */
1237 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
1238 if (WARN_ON_ONCE(tp == NULL)) {
1239 pr_warning("error on getting test probe.\n");
1240 warn++;
1241 } else
1242 disable_trace_probe(tp, TP_FLAG_TRACE);
1243
1244 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
1245 if (WARN_ON_ONCE(tp == NULL)) {
1246 pr_warning("error on getting 2nd test probe.\n");
1247 warn++;
1248 } else
1249 disable_trace_probe(tp, TP_FLAG_TRACE);
1250
1251 ret = traceprobe_command("-:testprobe", create_trace_probe);
1252 if (WARN_ON_ONCE(ret)) {
1253 pr_warning("error on deleting a probe.\n");
1254 warn++;
1255 }
1256
1257 ret = traceprobe_command("-:testprobe2", create_trace_probe);
1258 if (WARN_ON_ONCE(ret)) {
1259 pr_warning("error on deleting a probe.\n");
1260 warn++;
1261 }
1262
1263end:
1264 release_all_trace_probes();
1265 if (warn)
1266 pr_cont("NG: Some tests are failed. Please check them.\n");
1267 else
1268 pr_cont("OK\n");
1269 return 0;
1270}
1271
1272late_initcall(kprobe_trace_self_tests_init);
1273
1274#endif