Loading...
1/*
2 * uprobes-based tracing events
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) IBM Corporation, 2010-2012
18 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
19 */
20
21#include <linux/module.h>
22#include <linux/uaccess.h>
23#include <linux/uprobes.h>
24#include <linux/namei.h>
25#include <linux/string.h>
26
27#include "trace_probe.h"
28
29#define UPROBE_EVENT_SYSTEM "uprobes"
30
31struct uprobe_trace_entry_head {
32 struct trace_entry ent;
33 unsigned long vaddr[];
34};
35
36#define SIZEOF_TRACE_ENTRY(is_return) \
37 (sizeof(struct uprobe_trace_entry_head) + \
38 sizeof(unsigned long) * (is_return ? 2 : 1))
39
40#define DATAOF_TRACE_ENTRY(entry, is_return) \
41 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
42
43struct trace_uprobe_filter {
44 rwlock_t rwlock;
45 int nr_systemwide;
46 struct list_head perf_events;
47};
48
49/*
50 * uprobe event core functions
51 */
52struct trace_uprobe {
53 struct list_head list;
54 struct trace_uprobe_filter filter;
55 struct uprobe_consumer consumer;
56 struct inode *inode;
57 char *filename;
58 unsigned long offset;
59 unsigned long nhit;
60 struct trace_probe tp;
61};
62
63#define SIZEOF_TRACE_UPROBE(n) \
64 (offsetof(struct trace_uprobe, tp.args) + \
65 (sizeof(struct probe_arg) * (n)))
66
67static int register_uprobe_event(struct trace_uprobe *tu);
68static int unregister_uprobe_event(struct trace_uprobe *tu);
69
70static DEFINE_MUTEX(uprobe_lock);
71static LIST_HEAD(uprobe_list);
72
73struct uprobe_dispatch_data {
74 struct trace_uprobe *tu;
75 unsigned long bp_addr;
76};
77
78static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
79static int uretprobe_dispatcher(struct uprobe_consumer *con,
80 unsigned long func, struct pt_regs *regs);
81
82#ifdef CONFIG_STACK_GROWSUP
83static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
84{
85 return addr - (n * sizeof(long));
86}
87#else
88static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
89{
90 return addr + (n * sizeof(long));
91}
92#endif
93
94static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
95{
96 unsigned long ret;
97 unsigned long addr = user_stack_pointer(regs);
98
99 addr = adjust_stack_addr(addr, n);
100
101 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
102 return 0;
103
104 return ret;
105}
106
107/*
108 * Uprobes-specific fetch functions
109 */
110#define DEFINE_FETCH_stack(type) \
111static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs, \
112 void *offset, void *dest) \
113{ \
114 *(type *)dest = (type)get_user_stack_nth(regs, \
115 ((unsigned long)offset)); \
116}
117DEFINE_BASIC_FETCH_FUNCS(stack)
118/* No string on the stack entry */
119#define fetch_stack_string NULL
120#define fetch_stack_string_size NULL
121
122#define DEFINE_FETCH_memory(type) \
123static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs, \
124 void *addr, void *dest) \
125{ \
126 type retval; \
127 void __user *vaddr = (void __force __user *) addr; \
128 \
129 if (copy_from_user(&retval, vaddr, sizeof(type))) \
130 *(type *)dest = 0; \
131 else \
132 *(type *) dest = retval; \
133}
134DEFINE_BASIC_FETCH_FUNCS(memory)
135/*
136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
137 * length and relative data location.
138 */
139static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
140 void *addr, void *dest)
141{
142 long ret;
143 u32 rloc = *(u32 *)dest;
144 int maxlen = get_rloc_len(rloc);
145 u8 *dst = get_rloc_data(dest);
146 void __user *src = (void __force __user *) addr;
147
148 if (!maxlen)
149 return;
150
151 ret = strncpy_from_user(dst, src, maxlen);
152
153 if (ret < 0) { /* Failed to fetch string */
154 ((u8 *)get_rloc_data(dest))[0] = '\0';
155 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
156 } else {
157 *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
158 }
159}
160
161static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
162 void *addr, void *dest)
163{
164 int len;
165 void __user *vaddr = (void __force __user *) addr;
166
167 len = strnlen_user(vaddr, MAX_STRING_SIZE);
168
169 if (len == 0 || len > MAX_STRING_SIZE) /* Failed to check length */
170 *(u32 *)dest = 0;
171 else
172 *(u32 *)dest = len;
173}
174
175static unsigned long translate_user_vaddr(void *file_offset)
176{
177 unsigned long base_addr;
178 struct uprobe_dispatch_data *udd;
179
180 udd = (void *) current->utask->vaddr;
181
182 base_addr = udd->bp_addr - udd->tu->offset;
183 return base_addr + (unsigned long)file_offset;
184}
185
186#define DEFINE_FETCH_file_offset(type) \
187static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs, \
188 void *offset, void *dest)\
189{ \
190 void *vaddr = (void *)translate_user_vaddr(offset); \
191 \
192 FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest); \
193}
194DEFINE_BASIC_FETCH_FUNCS(file_offset)
195DEFINE_FETCH_file_offset(string)
196DEFINE_FETCH_file_offset(string_size)
197
198/* Fetch type information table */
199static const struct fetch_type uprobes_fetch_type_table[] = {
200 /* Special types */
201 [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
202 sizeof(u32), 1, "__data_loc char[]"),
203 [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
204 string_size, sizeof(u32), 0, "u32"),
205 /* Basic types */
206 ASSIGN_FETCH_TYPE(u8, u8, 0),
207 ASSIGN_FETCH_TYPE(u16, u16, 0),
208 ASSIGN_FETCH_TYPE(u32, u32, 0),
209 ASSIGN_FETCH_TYPE(u64, u64, 0),
210 ASSIGN_FETCH_TYPE(s8, u8, 1),
211 ASSIGN_FETCH_TYPE(s16, u16, 1),
212 ASSIGN_FETCH_TYPE(s32, u32, 1),
213 ASSIGN_FETCH_TYPE(s64, u64, 1),
214
215 ASSIGN_FETCH_TYPE_END
216};
217
218static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
219{
220 rwlock_init(&filter->rwlock);
221 filter->nr_systemwide = 0;
222 INIT_LIST_HEAD(&filter->perf_events);
223}
224
225static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
226{
227 return !filter->nr_systemwide && list_empty(&filter->perf_events);
228}
229
230static inline bool is_ret_probe(struct trace_uprobe *tu)
231{
232 return tu->consumer.ret_handler != NULL;
233}
234
235/*
236 * Allocate new trace_uprobe and initialize it (including uprobes).
237 */
238static struct trace_uprobe *
239alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
240{
241 struct trace_uprobe *tu;
242
243 if (!event || !is_good_name(event))
244 return ERR_PTR(-EINVAL);
245
246 if (!group || !is_good_name(group))
247 return ERR_PTR(-EINVAL);
248
249 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
250 if (!tu)
251 return ERR_PTR(-ENOMEM);
252
253 tu->tp.call.class = &tu->tp.class;
254 tu->tp.call.name = kstrdup(event, GFP_KERNEL);
255 if (!tu->tp.call.name)
256 goto error;
257
258 tu->tp.class.system = kstrdup(group, GFP_KERNEL);
259 if (!tu->tp.class.system)
260 goto error;
261
262 INIT_LIST_HEAD(&tu->list);
263 INIT_LIST_HEAD(&tu->tp.files);
264 tu->consumer.handler = uprobe_dispatcher;
265 if (is_ret)
266 tu->consumer.ret_handler = uretprobe_dispatcher;
267 init_trace_uprobe_filter(&tu->filter);
268 return tu;
269
270error:
271 kfree(tu->tp.call.name);
272 kfree(tu);
273
274 return ERR_PTR(-ENOMEM);
275}
276
277static void free_trace_uprobe(struct trace_uprobe *tu)
278{
279 int i;
280
281 for (i = 0; i < tu->tp.nr_args; i++)
282 traceprobe_free_probe_arg(&tu->tp.args[i]);
283
284 iput(tu->inode);
285 kfree(tu->tp.call.class->system);
286 kfree(tu->tp.call.name);
287 kfree(tu->filename);
288 kfree(tu);
289}
290
291static struct trace_uprobe *find_probe_event(const char *event, const char *group)
292{
293 struct trace_uprobe *tu;
294
295 list_for_each_entry(tu, &uprobe_list, list)
296 if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
297 strcmp(tu->tp.call.class->system, group) == 0)
298 return tu;
299
300 return NULL;
301}
302
303/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
304static int unregister_trace_uprobe(struct trace_uprobe *tu)
305{
306 int ret;
307
308 ret = unregister_uprobe_event(tu);
309 if (ret)
310 return ret;
311
312 list_del(&tu->list);
313 free_trace_uprobe(tu);
314 return 0;
315}
316
317/* Register a trace_uprobe and probe_event */
318static int register_trace_uprobe(struct trace_uprobe *tu)
319{
320 struct trace_uprobe *old_tu;
321 int ret;
322
323 mutex_lock(&uprobe_lock);
324
325 /* register as an event */
326 old_tu = find_probe_event(trace_event_name(&tu->tp.call),
327 tu->tp.call.class->system);
328 if (old_tu) {
329 /* delete old event */
330 ret = unregister_trace_uprobe(old_tu);
331 if (ret)
332 goto end;
333 }
334
335 ret = register_uprobe_event(tu);
336 if (ret) {
337 pr_warn("Failed to register probe event(%d)\n", ret);
338 goto end;
339 }
340
341 list_add_tail(&tu->list, &uprobe_list);
342
343end:
344 mutex_unlock(&uprobe_lock);
345
346 return ret;
347}
348
349/*
350 * Argument syntax:
351 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
352 *
353 * - Remove uprobe: -:[GRP/]EVENT
354 */
355static int create_trace_uprobe(int argc, char **argv)
356{
357 struct trace_uprobe *tu;
358 struct inode *inode;
359 char *arg, *event, *group, *filename;
360 char buf[MAX_EVENT_NAME_LEN];
361 struct path path;
362 unsigned long offset;
363 bool is_delete, is_return;
364 int i, ret;
365
366 inode = NULL;
367 ret = 0;
368 is_delete = false;
369 is_return = false;
370 event = NULL;
371 group = NULL;
372
373 /* argc must be >= 1 */
374 if (argv[0][0] == '-')
375 is_delete = true;
376 else if (argv[0][0] == 'r')
377 is_return = true;
378 else if (argv[0][0] != 'p') {
379 pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
380 return -EINVAL;
381 }
382
383 if (argv[0][1] == ':') {
384 event = &argv[0][2];
385 arg = strchr(event, '/');
386
387 if (arg) {
388 group = event;
389 event = arg + 1;
390 event[-1] = '\0';
391
392 if (strlen(group) == 0) {
393 pr_info("Group name is not specified\n");
394 return -EINVAL;
395 }
396 }
397 if (strlen(event) == 0) {
398 pr_info("Event name is not specified\n");
399 return -EINVAL;
400 }
401 }
402 if (!group)
403 group = UPROBE_EVENT_SYSTEM;
404
405 if (is_delete) {
406 int ret;
407
408 if (!event) {
409 pr_info("Delete command needs an event name.\n");
410 return -EINVAL;
411 }
412 mutex_lock(&uprobe_lock);
413 tu = find_probe_event(event, group);
414
415 if (!tu) {
416 mutex_unlock(&uprobe_lock);
417 pr_info("Event %s/%s doesn't exist.\n", group, event);
418 return -ENOENT;
419 }
420 /* delete an event */
421 ret = unregister_trace_uprobe(tu);
422 mutex_unlock(&uprobe_lock);
423 return ret;
424 }
425
426 if (argc < 2) {
427 pr_info("Probe point is not specified.\n");
428 return -EINVAL;
429 }
430 if (isdigit(argv[1][0])) {
431 pr_info("probe point must be have a filename.\n");
432 return -EINVAL;
433 }
434 arg = strchr(argv[1], ':');
435 if (!arg) {
436 ret = -EINVAL;
437 goto fail_address_parse;
438 }
439
440 *arg++ = '\0';
441 filename = argv[1];
442 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
443 if (ret)
444 goto fail_address_parse;
445
446 inode = igrab(d_inode(path.dentry));
447 path_put(&path);
448
449 if (!inode || !S_ISREG(inode->i_mode)) {
450 ret = -EINVAL;
451 goto fail_address_parse;
452 }
453
454 ret = kstrtoul(arg, 0, &offset);
455 if (ret)
456 goto fail_address_parse;
457
458 argc -= 2;
459 argv += 2;
460
461 /* setup a probe */
462 if (!event) {
463 char *tail;
464 char *ptr;
465
466 tail = kstrdup(kbasename(filename), GFP_KERNEL);
467 if (!tail) {
468 ret = -ENOMEM;
469 goto fail_address_parse;
470 }
471
472 ptr = strpbrk(tail, ".-_");
473 if (ptr)
474 *ptr = '\0';
475
476 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
477 event = buf;
478 kfree(tail);
479 }
480
481 tu = alloc_trace_uprobe(group, event, argc, is_return);
482 if (IS_ERR(tu)) {
483 pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
484 ret = PTR_ERR(tu);
485 goto fail_address_parse;
486 }
487 tu->offset = offset;
488 tu->inode = inode;
489 tu->filename = kstrdup(filename, GFP_KERNEL);
490
491 if (!tu->filename) {
492 pr_info("Failed to allocate filename.\n");
493 ret = -ENOMEM;
494 goto error;
495 }
496
497 /* parse arguments */
498 ret = 0;
499 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
500 struct probe_arg *parg = &tu->tp.args[i];
501
502 /* Increment count for freeing args in error case */
503 tu->tp.nr_args++;
504
505 /* Parse argument name */
506 arg = strchr(argv[i], '=');
507 if (arg) {
508 *arg++ = '\0';
509 parg->name = kstrdup(argv[i], GFP_KERNEL);
510 } else {
511 arg = argv[i];
512 /* If argument name is omitted, set "argN" */
513 snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
514 parg->name = kstrdup(buf, GFP_KERNEL);
515 }
516
517 if (!parg->name) {
518 pr_info("Failed to allocate argument[%d] name.\n", i);
519 ret = -ENOMEM;
520 goto error;
521 }
522
523 if (!is_good_name(parg->name)) {
524 pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
525 ret = -EINVAL;
526 goto error;
527 }
528
529 if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
530 pr_info("Argument[%d] name '%s' conflicts with "
531 "another field.\n", i, argv[i]);
532 ret = -EINVAL;
533 goto error;
534 }
535
536 /* Parse fetch argument */
537 ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
538 is_return, false,
539 uprobes_fetch_type_table);
540 if (ret) {
541 pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
542 goto error;
543 }
544 }
545
546 ret = register_trace_uprobe(tu);
547 if (ret)
548 goto error;
549 return 0;
550
551error:
552 free_trace_uprobe(tu);
553 return ret;
554
555fail_address_parse:
556 iput(inode);
557
558 pr_info("Failed to parse address or file.\n");
559
560 return ret;
561}
562
563static int cleanup_all_probes(void)
564{
565 struct trace_uprobe *tu;
566 int ret = 0;
567
568 mutex_lock(&uprobe_lock);
569 while (!list_empty(&uprobe_list)) {
570 tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
571 ret = unregister_trace_uprobe(tu);
572 if (ret)
573 break;
574 }
575 mutex_unlock(&uprobe_lock);
576 return ret;
577}
578
579/* Probes listing interfaces */
580static void *probes_seq_start(struct seq_file *m, loff_t *pos)
581{
582 mutex_lock(&uprobe_lock);
583 return seq_list_start(&uprobe_list, *pos);
584}
585
586static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
587{
588 return seq_list_next(v, &uprobe_list, pos);
589}
590
591static void probes_seq_stop(struct seq_file *m, void *v)
592{
593 mutex_unlock(&uprobe_lock);
594}
595
596static int probes_seq_show(struct seq_file *m, void *v)
597{
598 struct trace_uprobe *tu = v;
599 char c = is_ret_probe(tu) ? 'r' : 'p';
600 int i;
601
602 seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
603 trace_event_name(&tu->tp.call));
604 seq_printf(m, " %s:", tu->filename);
605
606 /* Don't print "0x (null)" when offset is 0 */
607 if (tu->offset) {
608 seq_printf(m, "0x%p", (void *)tu->offset);
609 } else {
610 switch (sizeof(void *)) {
611 case 4:
612 seq_printf(m, "0x00000000");
613 break;
614 case 8:
615 default:
616 seq_printf(m, "0x0000000000000000");
617 break;
618 }
619 }
620
621 for (i = 0; i < tu->tp.nr_args; i++)
622 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
623
624 seq_putc(m, '\n');
625 return 0;
626}
627
628static const struct seq_operations probes_seq_op = {
629 .start = probes_seq_start,
630 .next = probes_seq_next,
631 .stop = probes_seq_stop,
632 .show = probes_seq_show
633};
634
635static int probes_open(struct inode *inode, struct file *file)
636{
637 int ret;
638
639 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
640 ret = cleanup_all_probes();
641 if (ret)
642 return ret;
643 }
644
645 return seq_open(file, &probes_seq_op);
646}
647
648static ssize_t probes_write(struct file *file, const char __user *buffer,
649 size_t count, loff_t *ppos)
650{
651 return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
652}
653
654static const struct file_operations uprobe_events_ops = {
655 .owner = THIS_MODULE,
656 .open = probes_open,
657 .read = seq_read,
658 .llseek = seq_lseek,
659 .release = seq_release,
660 .write = probes_write,
661};
662
663/* Probes profiling interfaces */
664static int probes_profile_seq_show(struct seq_file *m, void *v)
665{
666 struct trace_uprobe *tu = v;
667
668 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
669 trace_event_name(&tu->tp.call), tu->nhit);
670 return 0;
671}
672
673static const struct seq_operations profile_seq_op = {
674 .start = probes_seq_start,
675 .next = probes_seq_next,
676 .stop = probes_seq_stop,
677 .show = probes_profile_seq_show
678};
679
680static int profile_open(struct inode *inode, struct file *file)
681{
682 return seq_open(file, &profile_seq_op);
683}
684
685static const struct file_operations uprobe_profile_ops = {
686 .owner = THIS_MODULE,
687 .open = profile_open,
688 .read = seq_read,
689 .llseek = seq_lseek,
690 .release = seq_release,
691};
692
693struct uprobe_cpu_buffer {
694 struct mutex mutex;
695 void *buf;
696};
697static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
698static int uprobe_buffer_refcnt;
699
700static int uprobe_buffer_init(void)
701{
702 int cpu, err_cpu;
703
704 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
705 if (uprobe_cpu_buffer == NULL)
706 return -ENOMEM;
707
708 for_each_possible_cpu(cpu) {
709 struct page *p = alloc_pages_node(cpu_to_node(cpu),
710 GFP_KERNEL, 0);
711 if (p == NULL) {
712 err_cpu = cpu;
713 goto err;
714 }
715 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
716 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
717 }
718
719 return 0;
720
721err:
722 for_each_possible_cpu(cpu) {
723 if (cpu == err_cpu)
724 break;
725 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
726 }
727
728 free_percpu(uprobe_cpu_buffer);
729 return -ENOMEM;
730}
731
732static int uprobe_buffer_enable(void)
733{
734 int ret = 0;
735
736 BUG_ON(!mutex_is_locked(&event_mutex));
737
738 if (uprobe_buffer_refcnt++ == 0) {
739 ret = uprobe_buffer_init();
740 if (ret < 0)
741 uprobe_buffer_refcnt--;
742 }
743
744 return ret;
745}
746
747static void uprobe_buffer_disable(void)
748{
749 int cpu;
750
751 BUG_ON(!mutex_is_locked(&event_mutex));
752
753 if (--uprobe_buffer_refcnt == 0) {
754 for_each_possible_cpu(cpu)
755 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
756 cpu)->buf);
757
758 free_percpu(uprobe_cpu_buffer);
759 uprobe_cpu_buffer = NULL;
760 }
761}
762
763static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
764{
765 struct uprobe_cpu_buffer *ucb;
766 int cpu;
767
768 cpu = raw_smp_processor_id();
769 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
770
771 /*
772 * Use per-cpu buffers for fastest access, but we might migrate
773 * so the mutex makes sure we have sole access to it.
774 */
775 mutex_lock(&ucb->mutex);
776
777 return ucb;
778}
779
780static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
781{
782 mutex_unlock(&ucb->mutex);
783}
784
785static void __uprobe_trace_func(struct trace_uprobe *tu,
786 unsigned long func, struct pt_regs *regs,
787 struct uprobe_cpu_buffer *ucb, int dsize,
788 struct trace_event_file *trace_file)
789{
790 struct uprobe_trace_entry_head *entry;
791 struct ring_buffer_event *event;
792 struct ring_buffer *buffer;
793 void *data;
794 int size, esize;
795 struct trace_event_call *call = &tu->tp.call;
796
797 WARN_ON(call != trace_file->event_call);
798
799 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
800 return;
801
802 if (trace_trigger_soft_disabled(trace_file))
803 return;
804
805 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
806 size = esize + tu->tp.size + dsize;
807 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
808 call->event.type, size, 0, 0);
809 if (!event)
810 return;
811
812 entry = ring_buffer_event_data(event);
813 if (is_ret_probe(tu)) {
814 entry->vaddr[0] = func;
815 entry->vaddr[1] = instruction_pointer(regs);
816 data = DATAOF_TRACE_ENTRY(entry, true);
817 } else {
818 entry->vaddr[0] = instruction_pointer(regs);
819 data = DATAOF_TRACE_ENTRY(entry, false);
820 }
821
822 memcpy(data, ucb->buf, tu->tp.size + dsize);
823
824 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
825}
826
827/* uprobe handler */
828static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
829 struct uprobe_cpu_buffer *ucb, int dsize)
830{
831 struct event_file_link *link;
832
833 if (is_ret_probe(tu))
834 return 0;
835
836 rcu_read_lock();
837 list_for_each_entry_rcu(link, &tu->tp.files, list)
838 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
839 rcu_read_unlock();
840
841 return 0;
842}
843
844static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
845 struct pt_regs *regs,
846 struct uprobe_cpu_buffer *ucb, int dsize)
847{
848 struct event_file_link *link;
849
850 rcu_read_lock();
851 list_for_each_entry_rcu(link, &tu->tp.files, list)
852 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
853 rcu_read_unlock();
854}
855
856/* Event entry printers */
857static enum print_line_t
858print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
859{
860 struct uprobe_trace_entry_head *entry;
861 struct trace_seq *s = &iter->seq;
862 struct trace_uprobe *tu;
863 u8 *data;
864 int i;
865
866 entry = (struct uprobe_trace_entry_head *)iter->ent;
867 tu = container_of(event, struct trace_uprobe, tp.call.event);
868
869 if (is_ret_probe(tu)) {
870 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
871 trace_event_name(&tu->tp.call),
872 entry->vaddr[1], entry->vaddr[0]);
873 data = DATAOF_TRACE_ENTRY(entry, true);
874 } else {
875 trace_seq_printf(s, "%s: (0x%lx)",
876 trace_event_name(&tu->tp.call),
877 entry->vaddr[0]);
878 data = DATAOF_TRACE_ENTRY(entry, false);
879 }
880
881 for (i = 0; i < tu->tp.nr_args; i++) {
882 struct probe_arg *parg = &tu->tp.args[i];
883
884 if (!parg->type->print(s, parg->name, data + parg->offset, entry))
885 goto out;
886 }
887
888 trace_seq_putc(s, '\n');
889
890 out:
891 return trace_handle_return(s);
892}
893
894typedef bool (*filter_func_t)(struct uprobe_consumer *self,
895 enum uprobe_filter_ctx ctx,
896 struct mm_struct *mm);
897
898static int
899probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
900 filter_func_t filter)
901{
902 bool enabled = trace_probe_is_enabled(&tu->tp);
903 struct event_file_link *link = NULL;
904 int ret;
905
906 if (file) {
907 if (tu->tp.flags & TP_FLAG_PROFILE)
908 return -EINTR;
909
910 link = kmalloc(sizeof(*link), GFP_KERNEL);
911 if (!link)
912 return -ENOMEM;
913
914 link->file = file;
915 list_add_tail_rcu(&link->list, &tu->tp.files);
916
917 tu->tp.flags |= TP_FLAG_TRACE;
918 } else {
919 if (tu->tp.flags & TP_FLAG_TRACE)
920 return -EINTR;
921
922 tu->tp.flags |= TP_FLAG_PROFILE;
923 }
924
925 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
926
927 if (enabled)
928 return 0;
929
930 ret = uprobe_buffer_enable();
931 if (ret)
932 goto err_flags;
933
934 tu->consumer.filter = filter;
935 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
936 if (ret)
937 goto err_buffer;
938
939 return 0;
940
941 err_buffer:
942 uprobe_buffer_disable();
943
944 err_flags:
945 if (file) {
946 list_del(&link->list);
947 kfree(link);
948 tu->tp.flags &= ~TP_FLAG_TRACE;
949 } else {
950 tu->tp.flags &= ~TP_FLAG_PROFILE;
951 }
952 return ret;
953}
954
955static void
956probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
957{
958 if (!trace_probe_is_enabled(&tu->tp))
959 return;
960
961 if (file) {
962 struct event_file_link *link;
963
964 link = find_event_file_link(&tu->tp, file);
965 if (!link)
966 return;
967
968 list_del_rcu(&link->list);
969 /* synchronize with u{,ret}probe_trace_func */
970 synchronize_sched();
971 kfree(link);
972
973 if (!list_empty(&tu->tp.files))
974 return;
975 }
976
977 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
978
979 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
980 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
981
982 uprobe_buffer_disable();
983}
984
985static int uprobe_event_define_fields(struct trace_event_call *event_call)
986{
987 int ret, i, size;
988 struct uprobe_trace_entry_head field;
989 struct trace_uprobe *tu = event_call->data;
990
991 if (is_ret_probe(tu)) {
992 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
993 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
994 size = SIZEOF_TRACE_ENTRY(true);
995 } else {
996 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
997 size = SIZEOF_TRACE_ENTRY(false);
998 }
999 /* Set argument names as fields */
1000 for (i = 0; i < tu->tp.nr_args; i++) {
1001 struct probe_arg *parg = &tu->tp.args[i];
1002
1003 ret = trace_define_field(event_call, parg->type->fmttype,
1004 parg->name, size + parg->offset,
1005 parg->type->size, parg->type->is_signed,
1006 FILTER_OTHER);
1007
1008 if (ret)
1009 return ret;
1010 }
1011 return 0;
1012}
1013
1014#ifdef CONFIG_PERF_EVENTS
1015static bool
1016__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1017{
1018 struct perf_event *event;
1019
1020 if (filter->nr_systemwide)
1021 return true;
1022
1023 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1024 if (event->hw.target->mm == mm)
1025 return true;
1026 }
1027
1028 return false;
1029}
1030
1031static inline bool
1032uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1033{
1034 return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1035}
1036
1037static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1038{
1039 bool done;
1040
1041 write_lock(&tu->filter.rwlock);
1042 if (event->hw.target) {
1043 list_del(&event->hw.tp_list);
1044 done = tu->filter.nr_systemwide ||
1045 (event->hw.target->flags & PF_EXITING) ||
1046 uprobe_filter_event(tu, event);
1047 } else {
1048 tu->filter.nr_systemwide--;
1049 done = tu->filter.nr_systemwide;
1050 }
1051 write_unlock(&tu->filter.rwlock);
1052
1053 if (!done)
1054 return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1055
1056 return 0;
1057}
1058
1059static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1060{
1061 bool done;
1062 int err;
1063
1064 write_lock(&tu->filter.rwlock);
1065 if (event->hw.target) {
1066 /*
1067 * event->parent != NULL means copy_process(), we can avoid
1068 * uprobe_apply(). current->mm must be probed and we can rely
1069 * on dup_mmap() which preserves the already installed bp's.
1070 *
1071 * attr.enable_on_exec means that exec/mmap will install the
1072 * breakpoints we need.
1073 */
1074 done = tu->filter.nr_systemwide ||
1075 event->parent || event->attr.enable_on_exec ||
1076 uprobe_filter_event(tu, event);
1077 list_add(&event->hw.tp_list, &tu->filter.perf_events);
1078 } else {
1079 done = tu->filter.nr_systemwide;
1080 tu->filter.nr_systemwide++;
1081 }
1082 write_unlock(&tu->filter.rwlock);
1083
1084 err = 0;
1085 if (!done) {
1086 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1087 if (err)
1088 uprobe_perf_close(tu, event);
1089 }
1090 return err;
1091}
1092
1093static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1094 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1095{
1096 struct trace_uprobe *tu;
1097 int ret;
1098
1099 tu = container_of(uc, struct trace_uprobe, consumer);
1100 read_lock(&tu->filter.rwlock);
1101 ret = __uprobe_perf_filter(&tu->filter, mm);
1102 read_unlock(&tu->filter.rwlock);
1103
1104 return ret;
1105}
1106
1107static void __uprobe_perf_func(struct trace_uprobe *tu,
1108 unsigned long func, struct pt_regs *regs,
1109 struct uprobe_cpu_buffer *ucb, int dsize)
1110{
1111 struct trace_event_call *call = &tu->tp.call;
1112 struct uprobe_trace_entry_head *entry;
1113 struct bpf_prog *prog = call->prog;
1114 struct hlist_head *head;
1115 void *data;
1116 int size, esize;
1117 int rctx;
1118
1119 if (prog && !trace_call_bpf(prog, regs))
1120 return;
1121
1122 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1123
1124 size = esize + tu->tp.size + dsize;
1125 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1126 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1127 return;
1128
1129 preempt_disable();
1130 head = this_cpu_ptr(call->perf_events);
1131 if (hlist_empty(head))
1132 goto out;
1133
1134 entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1135 if (!entry)
1136 goto out;
1137
1138 if (is_ret_probe(tu)) {
1139 entry->vaddr[0] = func;
1140 entry->vaddr[1] = instruction_pointer(regs);
1141 data = DATAOF_TRACE_ENTRY(entry, true);
1142 } else {
1143 entry->vaddr[0] = instruction_pointer(regs);
1144 data = DATAOF_TRACE_ENTRY(entry, false);
1145 }
1146
1147 memcpy(data, ucb->buf, tu->tp.size + dsize);
1148
1149 if (size - esize > tu->tp.size + dsize) {
1150 int len = tu->tp.size + dsize;
1151
1152 memset(data + len, 0, size - esize - len);
1153 }
1154
1155 perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1156 out:
1157 preempt_enable();
1158}
1159
1160/* uprobe profile handler */
1161static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1162 struct uprobe_cpu_buffer *ucb, int dsize)
1163{
1164 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1165 return UPROBE_HANDLER_REMOVE;
1166
1167 if (!is_ret_probe(tu))
1168 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1169 return 0;
1170}
1171
1172static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1173 struct pt_regs *regs,
1174 struct uprobe_cpu_buffer *ucb, int dsize)
1175{
1176 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1177}
1178#endif /* CONFIG_PERF_EVENTS */
1179
1180static int
1181trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1182 void *data)
1183{
1184 struct trace_uprobe *tu = event->data;
1185 struct trace_event_file *file = data;
1186
1187 switch (type) {
1188 case TRACE_REG_REGISTER:
1189 return probe_event_enable(tu, file, NULL);
1190
1191 case TRACE_REG_UNREGISTER:
1192 probe_event_disable(tu, file);
1193 return 0;
1194
1195#ifdef CONFIG_PERF_EVENTS
1196 case TRACE_REG_PERF_REGISTER:
1197 return probe_event_enable(tu, NULL, uprobe_perf_filter);
1198
1199 case TRACE_REG_PERF_UNREGISTER:
1200 probe_event_disable(tu, NULL);
1201 return 0;
1202
1203 case TRACE_REG_PERF_OPEN:
1204 return uprobe_perf_open(tu, data);
1205
1206 case TRACE_REG_PERF_CLOSE:
1207 return uprobe_perf_close(tu, data);
1208
1209#endif
1210 default:
1211 return 0;
1212 }
1213 return 0;
1214}
1215
1216static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1217{
1218 struct trace_uprobe *tu;
1219 struct uprobe_dispatch_data udd;
1220 struct uprobe_cpu_buffer *ucb;
1221 int dsize, esize;
1222 int ret = 0;
1223
1224
1225 tu = container_of(con, struct trace_uprobe, consumer);
1226 tu->nhit++;
1227
1228 udd.tu = tu;
1229 udd.bp_addr = instruction_pointer(regs);
1230
1231 current->utask->vaddr = (unsigned long) &udd;
1232
1233 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1234 return 0;
1235
1236 dsize = __get_data_size(&tu->tp, regs);
1237 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1238
1239 ucb = uprobe_buffer_get();
1240 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1241
1242 if (tu->tp.flags & TP_FLAG_TRACE)
1243 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1244
1245#ifdef CONFIG_PERF_EVENTS
1246 if (tu->tp.flags & TP_FLAG_PROFILE)
1247 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1248#endif
1249 uprobe_buffer_put(ucb);
1250 return ret;
1251}
1252
1253static int uretprobe_dispatcher(struct uprobe_consumer *con,
1254 unsigned long func, struct pt_regs *regs)
1255{
1256 struct trace_uprobe *tu;
1257 struct uprobe_dispatch_data udd;
1258 struct uprobe_cpu_buffer *ucb;
1259 int dsize, esize;
1260
1261 tu = container_of(con, struct trace_uprobe, consumer);
1262
1263 udd.tu = tu;
1264 udd.bp_addr = func;
1265
1266 current->utask->vaddr = (unsigned long) &udd;
1267
1268 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1269 return 0;
1270
1271 dsize = __get_data_size(&tu->tp, regs);
1272 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1273
1274 ucb = uprobe_buffer_get();
1275 store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1276
1277 if (tu->tp.flags & TP_FLAG_TRACE)
1278 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1279
1280#ifdef CONFIG_PERF_EVENTS
1281 if (tu->tp.flags & TP_FLAG_PROFILE)
1282 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1283#endif
1284 uprobe_buffer_put(ucb);
1285 return 0;
1286}
1287
1288static struct trace_event_functions uprobe_funcs = {
1289 .trace = print_uprobe_event
1290};
1291
1292static int register_uprobe_event(struct trace_uprobe *tu)
1293{
1294 struct trace_event_call *call = &tu->tp.call;
1295 int ret;
1296
1297 /* Initialize trace_event_call */
1298 INIT_LIST_HEAD(&call->class->fields);
1299 call->event.funcs = &uprobe_funcs;
1300 call->class->define_fields = uprobe_event_define_fields;
1301
1302 if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1303 return -ENOMEM;
1304
1305 ret = register_trace_event(&call->event);
1306 if (!ret) {
1307 kfree(call->print_fmt);
1308 return -ENODEV;
1309 }
1310
1311 call->flags = TRACE_EVENT_FL_UPROBE;
1312 call->class->reg = trace_uprobe_register;
1313 call->data = tu;
1314 ret = trace_add_event_call(call);
1315
1316 if (ret) {
1317 pr_info("Failed to register uprobe event: %s\n",
1318 trace_event_name(call));
1319 kfree(call->print_fmt);
1320 unregister_trace_event(&call->event);
1321 }
1322
1323 return ret;
1324}
1325
1326static int unregister_uprobe_event(struct trace_uprobe *tu)
1327{
1328 int ret;
1329
1330 /* tu->event is unregistered in trace_remove_event_call() */
1331 ret = trace_remove_event_call(&tu->tp.call);
1332 if (ret)
1333 return ret;
1334 kfree(tu->tp.call.print_fmt);
1335 tu->tp.call.print_fmt = NULL;
1336 return 0;
1337}
1338
1339/* Make a trace interface for controling probe points */
1340static __init int init_uprobe_trace(void)
1341{
1342 struct dentry *d_tracer;
1343
1344 d_tracer = tracing_init_dentry();
1345 if (IS_ERR(d_tracer))
1346 return 0;
1347
1348 trace_create_file("uprobe_events", 0644, d_tracer,
1349 NULL, &uprobe_events_ops);
1350 /* Profile interface */
1351 trace_create_file("uprobe_profile", 0444, d_tracer,
1352 NULL, &uprobe_profile_ops);
1353 return 0;
1354}
1355
1356fs_initcall(init_uprobe_trace);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * uprobes-based tracing events
4 *
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
8#define pr_fmt(fmt) "trace_uprobe: " fmt
9
10#include <linux/security.h>
11#include <linux/ctype.h>
12#include <linux/module.h>
13#include <linux/uaccess.h>
14#include <linux/uprobes.h>
15#include <linux/namei.h>
16#include <linux/string.h>
17#include <linux/rculist.h>
18
19#include "trace_dynevent.h"
20#include "trace_probe.h"
21#include "trace_probe_tmpl.h"
22
23#define UPROBE_EVENT_SYSTEM "uprobes"
24
25struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
28};
29
30#define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
33
34#define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36
37static int trace_uprobe_create(const char *raw_command);
38static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39static int trace_uprobe_release(struct dyn_event *ev);
40static bool trace_uprobe_is_busy(struct dyn_event *ev);
41static bool trace_uprobe_match(const char *system, const char *event,
42 int argc, const char **argv, struct dyn_event *ev);
43
44static struct dyn_event_operations trace_uprobe_ops = {
45 .create = trace_uprobe_create,
46 .show = trace_uprobe_show,
47 .is_busy = trace_uprobe_is_busy,
48 .free = trace_uprobe_release,
49 .match = trace_uprobe_match,
50};
51
52/*
53 * uprobe event core functions
54 */
55struct trace_uprobe {
56 struct dyn_event devent;
57 struct uprobe_consumer consumer;
58 struct path path;
59 struct inode *inode;
60 char *filename;
61 unsigned long offset;
62 unsigned long ref_ctr_offset;
63 unsigned long nhit;
64 struct trace_probe tp;
65};
66
67static bool is_trace_uprobe(struct dyn_event *ev)
68{
69 return ev->ops == &trace_uprobe_ops;
70}
71
72static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
73{
74 return container_of(ev, struct trace_uprobe, devent);
75}
76
77/**
78 * for_each_trace_uprobe - iterate over the trace_uprobe list
79 * @pos: the struct trace_uprobe * for each entry
80 * @dpos: the struct dyn_event * to use as a loop cursor
81 */
82#define for_each_trace_uprobe(pos, dpos) \
83 for_each_dyn_event(dpos) \
84 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
85
86#define SIZEOF_TRACE_UPROBE(n) \
87 (offsetof(struct trace_uprobe, tp.args) + \
88 (sizeof(struct probe_arg) * (n)))
89
90static int register_uprobe_event(struct trace_uprobe *tu);
91static int unregister_uprobe_event(struct trace_uprobe *tu);
92
93struct uprobe_dispatch_data {
94 struct trace_uprobe *tu;
95 unsigned long bp_addr;
96};
97
98static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
99static int uretprobe_dispatcher(struct uprobe_consumer *con,
100 unsigned long func, struct pt_regs *regs);
101
102#ifdef CONFIG_STACK_GROWSUP
103static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
104{
105 return addr - (n * sizeof(long));
106}
107#else
108static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
109{
110 return addr + (n * sizeof(long));
111}
112#endif
113
114static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
115{
116 unsigned long ret;
117 unsigned long addr = user_stack_pointer(regs);
118
119 addr = adjust_stack_addr(addr, n);
120
121 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
122 return 0;
123
124 return ret;
125}
126
127/*
128 * Uprobes-specific fetch functions
129 */
130static nokprobe_inline int
131probe_mem_read(void *dest, void *src, size_t size)
132{
133 void __user *vaddr = (void __force __user *)src;
134
135 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
136}
137
138static nokprobe_inline int
139probe_mem_read_user(void *dest, void *src, size_t size)
140{
141 return probe_mem_read(dest, src, size);
142}
143
144/*
145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
146 * length and relative data location.
147 */
148static nokprobe_inline int
149fetch_store_string(unsigned long addr, void *dest, void *base)
150{
151 long ret;
152 u32 loc = *(u32 *)dest;
153 int maxlen = get_loc_len(loc);
154 u8 *dst = get_loc_data(dest, base);
155 void __user *src = (void __force __user *) addr;
156
157 if (unlikely(!maxlen))
158 return -ENOMEM;
159
160 if (addr == FETCH_TOKEN_COMM)
161 ret = strlcpy(dst, current->comm, maxlen);
162 else
163 ret = strncpy_from_user(dst, src, maxlen);
164 if (ret >= 0) {
165 if (ret == maxlen)
166 dst[ret - 1] = '\0';
167 else
168 /*
169 * Include the terminating null byte. In this case it
170 * was copied by strncpy_from_user but not accounted
171 * for in ret.
172 */
173 ret++;
174 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
175 }
176
177 return ret;
178}
179
180static nokprobe_inline int
181fetch_store_string_user(unsigned long addr, void *dest, void *base)
182{
183 return fetch_store_string(addr, dest, base);
184}
185
186/* Return the length of string -- including null terminal byte */
187static nokprobe_inline int
188fetch_store_strlen(unsigned long addr)
189{
190 int len;
191 void __user *vaddr = (void __force __user *) addr;
192
193 if (addr == FETCH_TOKEN_COMM)
194 len = strlen(current->comm) + 1;
195 else
196 len = strnlen_user(vaddr, MAX_STRING_SIZE);
197
198 return (len > MAX_STRING_SIZE) ? 0 : len;
199}
200
201static nokprobe_inline int
202fetch_store_strlen_user(unsigned long addr)
203{
204 return fetch_store_strlen(addr);
205}
206
207static unsigned long translate_user_vaddr(unsigned long file_offset)
208{
209 unsigned long base_addr;
210 struct uprobe_dispatch_data *udd;
211
212 udd = (void *) current->utask->vaddr;
213
214 base_addr = udd->bp_addr - udd->tu->offset;
215 return base_addr + file_offset;
216}
217
218/* Note that we don't verify it, since the code does not come from user space */
219static int
220process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
221 void *base)
222{
223 unsigned long val;
224
225 /* 1st stage: get value from context */
226 switch (code->op) {
227 case FETCH_OP_REG:
228 val = regs_get_register(regs, code->param);
229 break;
230 case FETCH_OP_STACK:
231 val = get_user_stack_nth(regs, code->param);
232 break;
233 case FETCH_OP_STACKP:
234 val = user_stack_pointer(regs);
235 break;
236 case FETCH_OP_RETVAL:
237 val = regs_return_value(regs);
238 break;
239 case FETCH_OP_IMM:
240 val = code->immediate;
241 break;
242 case FETCH_OP_COMM:
243 val = FETCH_TOKEN_COMM;
244 break;
245 case FETCH_OP_DATA:
246 val = (unsigned long)code->data;
247 break;
248 case FETCH_OP_FOFFS:
249 val = translate_user_vaddr(code->immediate);
250 break;
251 default:
252 return -EILSEQ;
253 }
254 code++;
255
256 return process_fetch_insn_bottom(code, val, dest, base);
257}
258NOKPROBE_SYMBOL(process_fetch_insn)
259
260static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
261{
262 rwlock_init(&filter->rwlock);
263 filter->nr_systemwide = 0;
264 INIT_LIST_HEAD(&filter->perf_events);
265}
266
267static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
268{
269 return !filter->nr_systemwide && list_empty(&filter->perf_events);
270}
271
272static inline bool is_ret_probe(struct trace_uprobe *tu)
273{
274 return tu->consumer.ret_handler != NULL;
275}
276
277static bool trace_uprobe_is_busy(struct dyn_event *ev)
278{
279 struct trace_uprobe *tu = to_trace_uprobe(ev);
280
281 return trace_probe_is_enabled(&tu->tp);
282}
283
284static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
285 int argc, const char **argv)
286{
287 char buf[MAX_ARGSTR_LEN + 1];
288 int len;
289
290 if (!argc)
291 return true;
292
293 len = strlen(tu->filename);
294 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
295 return false;
296
297 if (tu->ref_ctr_offset == 0)
298 snprintf(buf, sizeof(buf), "0x%0*lx",
299 (int)(sizeof(void *) * 2), tu->offset);
300 else
301 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
302 (int)(sizeof(void *) * 2), tu->offset,
303 tu->ref_ctr_offset);
304 if (strcmp(buf, &argv[0][len + 1]))
305 return false;
306
307 argc--; argv++;
308
309 return trace_probe_match_command_args(&tu->tp, argc, argv);
310}
311
312static bool trace_uprobe_match(const char *system, const char *event,
313 int argc, const char **argv, struct dyn_event *ev)
314{
315 struct trace_uprobe *tu = to_trace_uprobe(ev);
316
317 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
318 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
319 trace_uprobe_match_command_head(tu, argc, argv);
320}
321
322static nokprobe_inline struct trace_uprobe *
323trace_uprobe_primary_from_call(struct trace_event_call *call)
324{
325 struct trace_probe *tp;
326
327 tp = trace_probe_primary_from_call(call);
328 if (WARN_ON_ONCE(!tp))
329 return NULL;
330
331 return container_of(tp, struct trace_uprobe, tp);
332}
333
334/*
335 * Allocate new trace_uprobe and initialize it (including uprobes).
336 */
337static struct trace_uprobe *
338alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
339{
340 struct trace_uprobe *tu;
341 int ret;
342
343 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
344 if (!tu)
345 return ERR_PTR(-ENOMEM);
346
347 ret = trace_probe_init(&tu->tp, event, group, true);
348 if (ret < 0)
349 goto error;
350
351 dyn_event_init(&tu->devent, &trace_uprobe_ops);
352 tu->consumer.handler = uprobe_dispatcher;
353 if (is_ret)
354 tu->consumer.ret_handler = uretprobe_dispatcher;
355 init_trace_uprobe_filter(tu->tp.event->filter);
356 return tu;
357
358error:
359 kfree(tu);
360
361 return ERR_PTR(ret);
362}
363
364static void free_trace_uprobe(struct trace_uprobe *tu)
365{
366 if (!tu)
367 return;
368
369 path_put(&tu->path);
370 trace_probe_cleanup(&tu->tp);
371 kfree(tu->filename);
372 kfree(tu);
373}
374
375static struct trace_uprobe *find_probe_event(const char *event, const char *group)
376{
377 struct dyn_event *pos;
378 struct trace_uprobe *tu;
379
380 for_each_trace_uprobe(tu, pos)
381 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
382 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
383 return tu;
384
385 return NULL;
386}
387
388/* Unregister a trace_uprobe and probe_event */
389static int unregister_trace_uprobe(struct trace_uprobe *tu)
390{
391 int ret;
392
393 if (trace_probe_has_sibling(&tu->tp))
394 goto unreg;
395
396 ret = unregister_uprobe_event(tu);
397 if (ret)
398 return ret;
399
400unreg:
401 dyn_event_remove(&tu->devent);
402 trace_probe_unlink(&tu->tp);
403 free_trace_uprobe(tu);
404 return 0;
405}
406
407static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
408 struct trace_uprobe *comp)
409{
410 struct trace_probe_event *tpe = orig->tp.event;
411 struct trace_probe *pos;
412 struct inode *comp_inode = d_real_inode(comp->path.dentry);
413 int i;
414
415 list_for_each_entry(pos, &tpe->probes, list) {
416 orig = container_of(pos, struct trace_uprobe, tp);
417 if (comp_inode != d_real_inode(orig->path.dentry) ||
418 comp->offset != orig->offset)
419 continue;
420
421 /*
422 * trace_probe_compare_arg_type() ensured that nr_args and
423 * each argument name and type are same. Let's compare comm.
424 */
425 for (i = 0; i < orig->tp.nr_args; i++) {
426 if (strcmp(orig->tp.args[i].comm,
427 comp->tp.args[i].comm))
428 break;
429 }
430
431 if (i == orig->tp.nr_args)
432 return true;
433 }
434
435 return false;
436}
437
438static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
439{
440 int ret;
441
442 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
443 if (ret) {
444 /* Note that argument starts index = 2 */
445 trace_probe_log_set_index(ret + 1);
446 trace_probe_log_err(0, DIFF_ARG_TYPE);
447 return -EEXIST;
448 }
449 if (trace_uprobe_has_same_uprobe(to, tu)) {
450 trace_probe_log_set_index(0);
451 trace_probe_log_err(0, SAME_PROBE);
452 return -EEXIST;
453 }
454
455 /* Append to existing event */
456 ret = trace_probe_append(&tu->tp, &to->tp);
457 if (!ret)
458 dyn_event_add(&tu->devent);
459
460 return ret;
461}
462
463/*
464 * Uprobe with multiple reference counter is not allowed. i.e.
465 * If inode and offset matches, reference counter offset *must*
466 * match as well. Though, there is one exception: If user is
467 * replacing old trace_uprobe with new one(same group/event),
468 * then we allow same uprobe with new reference counter as far
469 * as the new one does not conflict with any other existing
470 * ones.
471 */
472static int validate_ref_ctr_offset(struct trace_uprobe *new)
473{
474 struct dyn_event *pos;
475 struct trace_uprobe *tmp;
476 struct inode *new_inode = d_real_inode(new->path.dentry);
477
478 for_each_trace_uprobe(tmp, pos) {
479 if (new_inode == d_real_inode(tmp->path.dentry) &&
480 new->offset == tmp->offset &&
481 new->ref_ctr_offset != tmp->ref_ctr_offset) {
482 pr_warn("Reference counter offset mismatch.");
483 return -EINVAL;
484 }
485 }
486 return 0;
487}
488
489/* Register a trace_uprobe and probe_event */
490static int register_trace_uprobe(struct trace_uprobe *tu)
491{
492 struct trace_uprobe *old_tu;
493 int ret;
494
495 mutex_lock(&event_mutex);
496
497 ret = validate_ref_ctr_offset(tu);
498 if (ret)
499 goto end;
500
501 /* register as an event */
502 old_tu = find_probe_event(trace_probe_name(&tu->tp),
503 trace_probe_group_name(&tu->tp));
504 if (old_tu) {
505 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
506 trace_probe_log_set_index(0);
507 trace_probe_log_err(0, DIFF_PROBE_TYPE);
508 ret = -EEXIST;
509 } else {
510 ret = append_trace_uprobe(tu, old_tu);
511 }
512 goto end;
513 }
514
515 ret = register_uprobe_event(tu);
516 if (ret) {
517 if (ret == -EEXIST) {
518 trace_probe_log_set_index(0);
519 trace_probe_log_err(0, EVENT_EXIST);
520 } else
521 pr_warn("Failed to register probe event(%d)\n", ret);
522 goto end;
523 }
524
525 dyn_event_add(&tu->devent);
526
527end:
528 mutex_unlock(&event_mutex);
529
530 return ret;
531}
532
533/*
534 * Argument syntax:
535 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET[%return][(REF)] [FETCHARGS]
536 */
537static int __trace_uprobe_create(int argc, const char **argv)
538{
539 struct trace_uprobe *tu;
540 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
541 char *arg, *filename, *rctr, *rctr_end, *tmp;
542 char buf[MAX_EVENT_NAME_LEN];
543 struct path path;
544 unsigned long offset, ref_ctr_offset;
545 bool is_return = false;
546 int i, ret;
547
548 ret = 0;
549 ref_ctr_offset = 0;
550
551 switch (argv[0][0]) {
552 case 'r':
553 is_return = true;
554 break;
555 case 'p':
556 break;
557 default:
558 return -ECANCELED;
559 }
560
561 if (argc < 2)
562 return -ECANCELED;
563
564 if (argv[0][1] == ':')
565 event = &argv[0][2];
566
567 if (!strchr(argv[1], '/'))
568 return -ECANCELED;
569
570 filename = kstrdup(argv[1], GFP_KERNEL);
571 if (!filename)
572 return -ENOMEM;
573
574 /* Find the last occurrence, in case the path contains ':' too. */
575 arg = strrchr(filename, ':');
576 if (!arg || !isdigit(arg[1])) {
577 kfree(filename);
578 return -ECANCELED;
579 }
580
581 trace_probe_log_init("trace_uprobe", argc, argv);
582 trace_probe_log_set_index(1); /* filename is the 2nd argument */
583
584 *arg++ = '\0';
585 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
586 if (ret) {
587 trace_probe_log_err(0, FILE_NOT_FOUND);
588 kfree(filename);
589 trace_probe_log_clear();
590 return ret;
591 }
592 if (!d_is_reg(path.dentry)) {
593 trace_probe_log_err(0, NO_REGULAR_FILE);
594 ret = -EINVAL;
595 goto fail_address_parse;
596 }
597
598 /* Parse reference counter offset if specified. */
599 rctr = strchr(arg, '(');
600 if (rctr) {
601 rctr_end = strchr(rctr, ')');
602 if (!rctr_end) {
603 ret = -EINVAL;
604 rctr_end = rctr + strlen(rctr);
605 trace_probe_log_err(rctr_end - filename,
606 REFCNT_OPEN_BRACE);
607 goto fail_address_parse;
608 } else if (rctr_end[1] != '\0') {
609 ret = -EINVAL;
610 trace_probe_log_err(rctr_end + 1 - filename,
611 BAD_REFCNT_SUFFIX);
612 goto fail_address_parse;
613 }
614
615 *rctr++ = '\0';
616 *rctr_end = '\0';
617 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
618 if (ret) {
619 trace_probe_log_err(rctr - filename, BAD_REFCNT);
620 goto fail_address_parse;
621 }
622 }
623
624 /* Check if there is %return suffix */
625 tmp = strchr(arg, '%');
626 if (tmp) {
627 if (!strcmp(tmp, "%return")) {
628 *tmp = '\0';
629 is_return = true;
630 } else {
631 trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
632 ret = -EINVAL;
633 goto fail_address_parse;
634 }
635 }
636
637 /* Parse uprobe offset. */
638 ret = kstrtoul(arg, 0, &offset);
639 if (ret) {
640 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
641 goto fail_address_parse;
642 }
643
644 /* setup a probe */
645 trace_probe_log_set_index(0);
646 if (event) {
647 ret = traceprobe_parse_event_name(&event, &group, buf,
648 event - argv[0]);
649 if (ret)
650 goto fail_address_parse;
651 } else {
652 char *tail;
653 char *ptr;
654
655 tail = kstrdup(kbasename(filename), GFP_KERNEL);
656 if (!tail) {
657 ret = -ENOMEM;
658 goto fail_address_parse;
659 }
660
661 ptr = strpbrk(tail, ".-_");
662 if (ptr)
663 *ptr = '\0';
664
665 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
666 event = buf;
667 kfree(tail);
668 }
669
670 argc -= 2;
671 argv += 2;
672
673 tu = alloc_trace_uprobe(group, event, argc, is_return);
674 if (IS_ERR(tu)) {
675 ret = PTR_ERR(tu);
676 /* This must return -ENOMEM otherwise there is a bug */
677 WARN_ON_ONCE(ret != -ENOMEM);
678 goto fail_address_parse;
679 }
680 tu->offset = offset;
681 tu->ref_ctr_offset = ref_ctr_offset;
682 tu->path = path;
683 tu->filename = filename;
684
685 /* parse arguments */
686 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
687 tmp = kstrdup(argv[i], GFP_KERNEL);
688 if (!tmp) {
689 ret = -ENOMEM;
690 goto error;
691 }
692
693 trace_probe_log_set_index(i + 2);
694 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
695 is_return ? TPARG_FL_RETURN : 0);
696 kfree(tmp);
697 if (ret)
698 goto error;
699 }
700
701 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
702 if (ret < 0)
703 goto error;
704
705 ret = register_trace_uprobe(tu);
706 if (!ret)
707 goto out;
708
709error:
710 free_trace_uprobe(tu);
711out:
712 trace_probe_log_clear();
713 return ret;
714
715fail_address_parse:
716 trace_probe_log_clear();
717 path_put(&path);
718 kfree(filename);
719
720 return ret;
721}
722
723int trace_uprobe_create(const char *raw_command)
724{
725 return trace_probe_create(raw_command, __trace_uprobe_create);
726}
727
728static int create_or_delete_trace_uprobe(const char *raw_command)
729{
730 int ret;
731
732 if (raw_command[0] == '-')
733 return dyn_event_release(raw_command, &trace_uprobe_ops);
734
735 ret = trace_uprobe_create(raw_command);
736 return ret == -ECANCELED ? -EINVAL : ret;
737}
738
739static int trace_uprobe_release(struct dyn_event *ev)
740{
741 struct trace_uprobe *tu = to_trace_uprobe(ev);
742
743 return unregister_trace_uprobe(tu);
744}
745
746/* Probes listing interfaces */
747static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
748{
749 struct trace_uprobe *tu = to_trace_uprobe(ev);
750 char c = is_ret_probe(tu) ? 'r' : 'p';
751 int i;
752
753 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
754 trace_probe_name(&tu->tp), tu->filename,
755 (int)(sizeof(void *) * 2), tu->offset);
756
757 if (tu->ref_ctr_offset)
758 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
759
760 for (i = 0; i < tu->tp.nr_args; i++)
761 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
762
763 seq_putc(m, '\n');
764 return 0;
765}
766
767static int probes_seq_show(struct seq_file *m, void *v)
768{
769 struct dyn_event *ev = v;
770
771 if (!is_trace_uprobe(ev))
772 return 0;
773
774 return trace_uprobe_show(m, ev);
775}
776
777static const struct seq_operations probes_seq_op = {
778 .start = dyn_event_seq_start,
779 .next = dyn_event_seq_next,
780 .stop = dyn_event_seq_stop,
781 .show = probes_seq_show
782};
783
784static int probes_open(struct inode *inode, struct file *file)
785{
786 int ret;
787
788 ret = security_locked_down(LOCKDOWN_TRACEFS);
789 if (ret)
790 return ret;
791
792 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
793 ret = dyn_events_release_all(&trace_uprobe_ops);
794 if (ret)
795 return ret;
796 }
797
798 return seq_open(file, &probes_seq_op);
799}
800
801static ssize_t probes_write(struct file *file, const char __user *buffer,
802 size_t count, loff_t *ppos)
803{
804 return trace_parse_run_command(file, buffer, count, ppos,
805 create_or_delete_trace_uprobe);
806}
807
808static const struct file_operations uprobe_events_ops = {
809 .owner = THIS_MODULE,
810 .open = probes_open,
811 .read = seq_read,
812 .llseek = seq_lseek,
813 .release = seq_release,
814 .write = probes_write,
815};
816
817/* Probes profiling interfaces */
818static int probes_profile_seq_show(struct seq_file *m, void *v)
819{
820 struct dyn_event *ev = v;
821 struct trace_uprobe *tu;
822
823 if (!is_trace_uprobe(ev))
824 return 0;
825
826 tu = to_trace_uprobe(ev);
827 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
828 trace_probe_name(&tu->tp), tu->nhit);
829 return 0;
830}
831
832static const struct seq_operations profile_seq_op = {
833 .start = dyn_event_seq_start,
834 .next = dyn_event_seq_next,
835 .stop = dyn_event_seq_stop,
836 .show = probes_profile_seq_show
837};
838
839static int profile_open(struct inode *inode, struct file *file)
840{
841 int ret;
842
843 ret = security_locked_down(LOCKDOWN_TRACEFS);
844 if (ret)
845 return ret;
846
847 return seq_open(file, &profile_seq_op);
848}
849
850static const struct file_operations uprobe_profile_ops = {
851 .owner = THIS_MODULE,
852 .open = profile_open,
853 .read = seq_read,
854 .llseek = seq_lseek,
855 .release = seq_release,
856};
857
858struct uprobe_cpu_buffer {
859 struct mutex mutex;
860 void *buf;
861};
862static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
863static int uprobe_buffer_refcnt;
864
865static int uprobe_buffer_init(void)
866{
867 int cpu, err_cpu;
868
869 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
870 if (uprobe_cpu_buffer == NULL)
871 return -ENOMEM;
872
873 for_each_possible_cpu(cpu) {
874 struct page *p = alloc_pages_node(cpu_to_node(cpu),
875 GFP_KERNEL, 0);
876 if (p == NULL) {
877 err_cpu = cpu;
878 goto err;
879 }
880 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
881 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
882 }
883
884 return 0;
885
886err:
887 for_each_possible_cpu(cpu) {
888 if (cpu == err_cpu)
889 break;
890 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
891 }
892
893 free_percpu(uprobe_cpu_buffer);
894 return -ENOMEM;
895}
896
897static int uprobe_buffer_enable(void)
898{
899 int ret = 0;
900
901 BUG_ON(!mutex_is_locked(&event_mutex));
902
903 if (uprobe_buffer_refcnt++ == 0) {
904 ret = uprobe_buffer_init();
905 if (ret < 0)
906 uprobe_buffer_refcnt--;
907 }
908
909 return ret;
910}
911
912static void uprobe_buffer_disable(void)
913{
914 int cpu;
915
916 BUG_ON(!mutex_is_locked(&event_mutex));
917
918 if (--uprobe_buffer_refcnt == 0) {
919 for_each_possible_cpu(cpu)
920 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
921 cpu)->buf);
922
923 free_percpu(uprobe_cpu_buffer);
924 uprobe_cpu_buffer = NULL;
925 }
926}
927
928static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
929{
930 struct uprobe_cpu_buffer *ucb;
931 int cpu;
932
933 cpu = raw_smp_processor_id();
934 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
935
936 /*
937 * Use per-cpu buffers for fastest access, but we might migrate
938 * so the mutex makes sure we have sole access to it.
939 */
940 mutex_lock(&ucb->mutex);
941
942 return ucb;
943}
944
945static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
946{
947 mutex_unlock(&ucb->mutex);
948}
949
950static void __uprobe_trace_func(struct trace_uprobe *tu,
951 unsigned long func, struct pt_regs *regs,
952 struct uprobe_cpu_buffer *ucb, int dsize,
953 struct trace_event_file *trace_file)
954{
955 struct uprobe_trace_entry_head *entry;
956 struct trace_buffer *buffer;
957 struct ring_buffer_event *event;
958 void *data;
959 int size, esize;
960 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
961
962 WARN_ON(call != trace_file->event_call);
963
964 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
965 return;
966
967 if (trace_trigger_soft_disabled(trace_file))
968 return;
969
970 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
971 size = esize + tu->tp.size + dsize;
972 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
973 call->event.type, size, 0);
974 if (!event)
975 return;
976
977 entry = ring_buffer_event_data(event);
978 if (is_ret_probe(tu)) {
979 entry->vaddr[0] = func;
980 entry->vaddr[1] = instruction_pointer(regs);
981 data = DATAOF_TRACE_ENTRY(entry, true);
982 } else {
983 entry->vaddr[0] = instruction_pointer(regs);
984 data = DATAOF_TRACE_ENTRY(entry, false);
985 }
986
987 memcpy(data, ucb->buf, tu->tp.size + dsize);
988
989 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0);
990}
991
992/* uprobe handler */
993static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
994 struct uprobe_cpu_buffer *ucb, int dsize)
995{
996 struct event_file_link *link;
997
998 if (is_ret_probe(tu))
999 return 0;
1000
1001 rcu_read_lock();
1002 trace_probe_for_each_link_rcu(link, &tu->tp)
1003 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
1004 rcu_read_unlock();
1005
1006 return 0;
1007}
1008
1009static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1010 struct pt_regs *regs,
1011 struct uprobe_cpu_buffer *ucb, int dsize)
1012{
1013 struct event_file_link *link;
1014
1015 rcu_read_lock();
1016 trace_probe_for_each_link_rcu(link, &tu->tp)
1017 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1018 rcu_read_unlock();
1019}
1020
1021/* Event entry printers */
1022static enum print_line_t
1023print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1024{
1025 struct uprobe_trace_entry_head *entry;
1026 struct trace_seq *s = &iter->seq;
1027 struct trace_uprobe *tu;
1028 u8 *data;
1029
1030 entry = (struct uprobe_trace_entry_head *)iter->ent;
1031 tu = trace_uprobe_primary_from_call(
1032 container_of(event, struct trace_event_call, event));
1033 if (unlikely(!tu))
1034 goto out;
1035
1036 if (is_ret_probe(tu)) {
1037 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1038 trace_probe_name(&tu->tp),
1039 entry->vaddr[1], entry->vaddr[0]);
1040 data = DATAOF_TRACE_ENTRY(entry, true);
1041 } else {
1042 trace_seq_printf(s, "%s: (0x%lx)",
1043 trace_probe_name(&tu->tp),
1044 entry->vaddr[0]);
1045 data = DATAOF_TRACE_ENTRY(entry, false);
1046 }
1047
1048 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1049 goto out;
1050
1051 trace_seq_putc(s, '\n');
1052
1053 out:
1054 return trace_handle_return(s);
1055}
1056
1057typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1058 enum uprobe_filter_ctx ctx,
1059 struct mm_struct *mm);
1060
1061static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1062{
1063 int ret;
1064
1065 tu->consumer.filter = filter;
1066 tu->inode = d_real_inode(tu->path.dentry);
1067
1068 if (tu->ref_ctr_offset)
1069 ret = uprobe_register_refctr(tu->inode, tu->offset,
1070 tu->ref_ctr_offset, &tu->consumer);
1071 else
1072 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1073
1074 if (ret)
1075 tu->inode = NULL;
1076
1077 return ret;
1078}
1079
1080static void __probe_event_disable(struct trace_probe *tp)
1081{
1082 struct trace_probe *pos;
1083 struct trace_uprobe *tu;
1084
1085 tu = container_of(tp, struct trace_uprobe, tp);
1086 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1087
1088 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1089 tu = container_of(pos, struct trace_uprobe, tp);
1090 if (!tu->inode)
1091 continue;
1092
1093 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1094 tu->inode = NULL;
1095 }
1096}
1097
1098static int probe_event_enable(struct trace_event_call *call,
1099 struct trace_event_file *file, filter_func_t filter)
1100{
1101 struct trace_probe *pos, *tp;
1102 struct trace_uprobe *tu;
1103 bool enabled;
1104 int ret;
1105
1106 tp = trace_probe_primary_from_call(call);
1107 if (WARN_ON_ONCE(!tp))
1108 return -ENODEV;
1109 enabled = trace_probe_is_enabled(tp);
1110
1111 /* This may also change "enabled" state */
1112 if (file) {
1113 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1114 return -EINTR;
1115
1116 ret = trace_probe_add_file(tp, file);
1117 if (ret < 0)
1118 return ret;
1119 } else {
1120 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1121 return -EINTR;
1122
1123 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1124 }
1125
1126 tu = container_of(tp, struct trace_uprobe, tp);
1127 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1128
1129 if (enabled)
1130 return 0;
1131
1132 ret = uprobe_buffer_enable();
1133 if (ret)
1134 goto err_flags;
1135
1136 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1137 tu = container_of(pos, struct trace_uprobe, tp);
1138 ret = trace_uprobe_enable(tu, filter);
1139 if (ret) {
1140 __probe_event_disable(tp);
1141 goto err_buffer;
1142 }
1143 }
1144
1145 return 0;
1146
1147 err_buffer:
1148 uprobe_buffer_disable();
1149
1150 err_flags:
1151 if (file)
1152 trace_probe_remove_file(tp, file);
1153 else
1154 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1155
1156 return ret;
1157}
1158
1159static void probe_event_disable(struct trace_event_call *call,
1160 struct trace_event_file *file)
1161{
1162 struct trace_probe *tp;
1163
1164 tp = trace_probe_primary_from_call(call);
1165 if (WARN_ON_ONCE(!tp))
1166 return;
1167
1168 if (!trace_probe_is_enabled(tp))
1169 return;
1170
1171 if (file) {
1172 if (trace_probe_remove_file(tp, file) < 0)
1173 return;
1174
1175 if (trace_probe_is_enabled(tp))
1176 return;
1177 } else
1178 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1179
1180 __probe_event_disable(tp);
1181 uprobe_buffer_disable();
1182}
1183
1184static int uprobe_event_define_fields(struct trace_event_call *event_call)
1185{
1186 int ret, size;
1187 struct uprobe_trace_entry_head field;
1188 struct trace_uprobe *tu;
1189
1190 tu = trace_uprobe_primary_from_call(event_call);
1191 if (unlikely(!tu))
1192 return -ENODEV;
1193
1194 if (is_ret_probe(tu)) {
1195 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1196 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1197 size = SIZEOF_TRACE_ENTRY(true);
1198 } else {
1199 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1200 size = SIZEOF_TRACE_ENTRY(false);
1201 }
1202
1203 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1204}
1205
1206#ifdef CONFIG_PERF_EVENTS
1207static bool
1208__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1209{
1210 struct perf_event *event;
1211
1212 if (filter->nr_systemwide)
1213 return true;
1214
1215 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1216 if (event->hw.target->mm == mm)
1217 return true;
1218 }
1219
1220 return false;
1221}
1222
1223static inline bool
1224trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1225 struct perf_event *event)
1226{
1227 return __uprobe_perf_filter(filter, event->hw.target->mm);
1228}
1229
1230static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1231 struct perf_event *event)
1232{
1233 bool done;
1234
1235 write_lock(&filter->rwlock);
1236 if (event->hw.target) {
1237 list_del(&event->hw.tp_list);
1238 done = filter->nr_systemwide ||
1239 (event->hw.target->flags & PF_EXITING) ||
1240 trace_uprobe_filter_event(filter, event);
1241 } else {
1242 filter->nr_systemwide--;
1243 done = filter->nr_systemwide;
1244 }
1245 write_unlock(&filter->rwlock);
1246
1247 return done;
1248}
1249
1250/* This returns true if the filter always covers target mm */
1251static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1252 struct perf_event *event)
1253{
1254 bool done;
1255
1256 write_lock(&filter->rwlock);
1257 if (event->hw.target) {
1258 /*
1259 * event->parent != NULL means copy_process(), we can avoid
1260 * uprobe_apply(). current->mm must be probed and we can rely
1261 * on dup_mmap() which preserves the already installed bp's.
1262 *
1263 * attr.enable_on_exec means that exec/mmap will install the
1264 * breakpoints we need.
1265 */
1266 done = filter->nr_systemwide ||
1267 event->parent || event->attr.enable_on_exec ||
1268 trace_uprobe_filter_event(filter, event);
1269 list_add(&event->hw.tp_list, &filter->perf_events);
1270 } else {
1271 done = filter->nr_systemwide;
1272 filter->nr_systemwide++;
1273 }
1274 write_unlock(&filter->rwlock);
1275
1276 return done;
1277}
1278
1279static int uprobe_perf_close(struct trace_event_call *call,
1280 struct perf_event *event)
1281{
1282 struct trace_probe *pos, *tp;
1283 struct trace_uprobe *tu;
1284 int ret = 0;
1285
1286 tp = trace_probe_primary_from_call(call);
1287 if (WARN_ON_ONCE(!tp))
1288 return -ENODEV;
1289
1290 tu = container_of(tp, struct trace_uprobe, tp);
1291 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1292 return 0;
1293
1294 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1295 tu = container_of(pos, struct trace_uprobe, tp);
1296 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1297 if (ret)
1298 break;
1299 }
1300
1301 return ret;
1302}
1303
1304static int uprobe_perf_open(struct trace_event_call *call,
1305 struct perf_event *event)
1306{
1307 struct trace_probe *pos, *tp;
1308 struct trace_uprobe *tu;
1309 int err = 0;
1310
1311 tp = trace_probe_primary_from_call(call);
1312 if (WARN_ON_ONCE(!tp))
1313 return -ENODEV;
1314
1315 tu = container_of(tp, struct trace_uprobe, tp);
1316 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1317 return 0;
1318
1319 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1320 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1321 if (err) {
1322 uprobe_perf_close(call, event);
1323 break;
1324 }
1325 }
1326
1327 return err;
1328}
1329
1330static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1331 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1332{
1333 struct trace_uprobe_filter *filter;
1334 struct trace_uprobe *tu;
1335 int ret;
1336
1337 tu = container_of(uc, struct trace_uprobe, consumer);
1338 filter = tu->tp.event->filter;
1339
1340 read_lock(&filter->rwlock);
1341 ret = __uprobe_perf_filter(filter, mm);
1342 read_unlock(&filter->rwlock);
1343
1344 return ret;
1345}
1346
1347static void __uprobe_perf_func(struct trace_uprobe *tu,
1348 unsigned long func, struct pt_regs *regs,
1349 struct uprobe_cpu_buffer *ucb, int dsize)
1350{
1351 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1352 struct uprobe_trace_entry_head *entry;
1353 struct hlist_head *head;
1354 void *data;
1355 int size, esize;
1356 int rctx;
1357
1358 if (bpf_prog_array_valid(call)) {
1359 u32 ret;
1360
1361 preempt_disable();
1362 ret = trace_call_bpf(call, regs);
1363 preempt_enable();
1364 if (!ret)
1365 return;
1366 }
1367
1368 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1369
1370 size = esize + tu->tp.size + dsize;
1371 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1372 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1373 return;
1374
1375 preempt_disable();
1376 head = this_cpu_ptr(call->perf_events);
1377 if (hlist_empty(head))
1378 goto out;
1379
1380 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1381 if (!entry)
1382 goto out;
1383
1384 if (is_ret_probe(tu)) {
1385 entry->vaddr[0] = func;
1386 entry->vaddr[1] = instruction_pointer(regs);
1387 data = DATAOF_TRACE_ENTRY(entry, true);
1388 } else {
1389 entry->vaddr[0] = instruction_pointer(regs);
1390 data = DATAOF_TRACE_ENTRY(entry, false);
1391 }
1392
1393 memcpy(data, ucb->buf, tu->tp.size + dsize);
1394
1395 if (size - esize > tu->tp.size + dsize) {
1396 int len = tu->tp.size + dsize;
1397
1398 memset(data + len, 0, size - esize - len);
1399 }
1400
1401 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1402 head, NULL);
1403 out:
1404 preempt_enable();
1405}
1406
1407/* uprobe profile handler */
1408static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1409 struct uprobe_cpu_buffer *ucb, int dsize)
1410{
1411 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1412 return UPROBE_HANDLER_REMOVE;
1413
1414 if (!is_ret_probe(tu))
1415 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1416 return 0;
1417}
1418
1419static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1420 struct pt_regs *regs,
1421 struct uprobe_cpu_buffer *ucb, int dsize)
1422{
1423 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1424}
1425
1426int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1427 const char **filename, u64 *probe_offset,
1428 bool perf_type_tracepoint)
1429{
1430 const char *pevent = trace_event_name(event->tp_event);
1431 const char *group = event->tp_event->class->system;
1432 struct trace_uprobe *tu;
1433
1434 if (perf_type_tracepoint)
1435 tu = find_probe_event(pevent, group);
1436 else
1437 tu = trace_uprobe_primary_from_call(event->tp_event);
1438 if (!tu)
1439 return -EINVAL;
1440
1441 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1442 : BPF_FD_TYPE_UPROBE;
1443 *filename = tu->filename;
1444 *probe_offset = tu->offset;
1445 return 0;
1446}
1447#endif /* CONFIG_PERF_EVENTS */
1448
1449static int
1450trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1451 void *data)
1452{
1453 struct trace_event_file *file = data;
1454
1455 switch (type) {
1456 case TRACE_REG_REGISTER:
1457 return probe_event_enable(event, file, NULL);
1458
1459 case TRACE_REG_UNREGISTER:
1460 probe_event_disable(event, file);
1461 return 0;
1462
1463#ifdef CONFIG_PERF_EVENTS
1464 case TRACE_REG_PERF_REGISTER:
1465 return probe_event_enable(event, NULL, uprobe_perf_filter);
1466
1467 case TRACE_REG_PERF_UNREGISTER:
1468 probe_event_disable(event, NULL);
1469 return 0;
1470
1471 case TRACE_REG_PERF_OPEN:
1472 return uprobe_perf_open(event, data);
1473
1474 case TRACE_REG_PERF_CLOSE:
1475 return uprobe_perf_close(event, data);
1476
1477#endif
1478 default:
1479 return 0;
1480 }
1481}
1482
1483static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1484{
1485 struct trace_uprobe *tu;
1486 struct uprobe_dispatch_data udd;
1487 struct uprobe_cpu_buffer *ucb;
1488 int dsize, esize;
1489 int ret = 0;
1490
1491
1492 tu = container_of(con, struct trace_uprobe, consumer);
1493 tu->nhit++;
1494
1495 udd.tu = tu;
1496 udd.bp_addr = instruction_pointer(regs);
1497
1498 current->utask->vaddr = (unsigned long) &udd;
1499
1500 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1501 return 0;
1502
1503 dsize = __get_data_size(&tu->tp, regs);
1504 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1505
1506 ucb = uprobe_buffer_get();
1507 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1508
1509 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1510 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1511
1512#ifdef CONFIG_PERF_EVENTS
1513 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1514 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1515#endif
1516 uprobe_buffer_put(ucb);
1517 return ret;
1518}
1519
1520static int uretprobe_dispatcher(struct uprobe_consumer *con,
1521 unsigned long func, struct pt_regs *regs)
1522{
1523 struct trace_uprobe *tu;
1524 struct uprobe_dispatch_data udd;
1525 struct uprobe_cpu_buffer *ucb;
1526 int dsize, esize;
1527
1528 tu = container_of(con, struct trace_uprobe, consumer);
1529
1530 udd.tu = tu;
1531 udd.bp_addr = func;
1532
1533 current->utask->vaddr = (unsigned long) &udd;
1534
1535 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1536 return 0;
1537
1538 dsize = __get_data_size(&tu->tp, regs);
1539 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1540
1541 ucb = uprobe_buffer_get();
1542 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1543
1544 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1545 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1546
1547#ifdef CONFIG_PERF_EVENTS
1548 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1549 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1550#endif
1551 uprobe_buffer_put(ucb);
1552 return 0;
1553}
1554
1555static struct trace_event_functions uprobe_funcs = {
1556 .trace = print_uprobe_event
1557};
1558
1559static struct trace_event_fields uprobe_fields_array[] = {
1560 { .type = TRACE_FUNCTION_TYPE,
1561 .define_fields = uprobe_event_define_fields },
1562 {}
1563};
1564
1565static inline void init_trace_event_call(struct trace_uprobe *tu)
1566{
1567 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1568 call->event.funcs = &uprobe_funcs;
1569 call->class->fields_array = uprobe_fields_array;
1570
1571 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1572 call->class->reg = trace_uprobe_register;
1573}
1574
1575static int register_uprobe_event(struct trace_uprobe *tu)
1576{
1577 init_trace_event_call(tu);
1578
1579 return trace_probe_register_event_call(&tu->tp);
1580}
1581
1582static int unregister_uprobe_event(struct trace_uprobe *tu)
1583{
1584 return trace_probe_unregister_event_call(&tu->tp);
1585}
1586
1587#ifdef CONFIG_PERF_EVENTS
1588struct trace_event_call *
1589create_local_trace_uprobe(char *name, unsigned long offs,
1590 unsigned long ref_ctr_offset, bool is_return)
1591{
1592 struct trace_uprobe *tu;
1593 struct path path;
1594 int ret;
1595
1596 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1597 if (ret)
1598 return ERR_PTR(ret);
1599
1600 if (!d_is_reg(path.dentry)) {
1601 path_put(&path);
1602 return ERR_PTR(-EINVAL);
1603 }
1604
1605 /*
1606 * local trace_kprobes are not added to dyn_event, so they are never
1607 * searched in find_trace_kprobe(). Therefore, there is no concern of
1608 * duplicated name "DUMMY_EVENT" here.
1609 */
1610 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1611 is_return);
1612
1613 if (IS_ERR(tu)) {
1614 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1615 (int)PTR_ERR(tu));
1616 path_put(&path);
1617 return ERR_CAST(tu);
1618 }
1619
1620 tu->offset = offs;
1621 tu->path = path;
1622 tu->ref_ctr_offset = ref_ctr_offset;
1623 tu->filename = kstrdup(name, GFP_KERNEL);
1624 init_trace_event_call(tu);
1625
1626 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1627 ret = -ENOMEM;
1628 goto error;
1629 }
1630
1631 return trace_probe_event_call(&tu->tp);
1632error:
1633 free_trace_uprobe(tu);
1634 return ERR_PTR(ret);
1635}
1636
1637void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1638{
1639 struct trace_uprobe *tu;
1640
1641 tu = trace_uprobe_primary_from_call(event_call);
1642
1643 free_trace_uprobe(tu);
1644}
1645#endif /* CONFIG_PERF_EVENTS */
1646
1647/* Make a trace interface for controlling probe points */
1648static __init int init_uprobe_trace(void)
1649{
1650 int ret;
1651
1652 ret = dyn_event_register(&trace_uprobe_ops);
1653 if (ret)
1654 return ret;
1655
1656 ret = tracing_init_dentry();
1657 if (ret)
1658 return 0;
1659
1660 trace_create_file("uprobe_events", 0644, NULL,
1661 NULL, &uprobe_events_ops);
1662 /* Profile interface */
1663 trace_create_file("uprobe_profile", 0444, NULL,
1664 NULL, &uprobe_profile_ops);
1665 return 0;
1666}
1667
1668fs_initcall(init_uprobe_trace);