Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * uprobes-based tracing events
   4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 * Copyright (C) IBM Corporation, 2010-2012
   6 * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
   7 */
   8#define pr_fmt(fmt)	"trace_uprobe: " fmt
   9
  10#include <linux/bpf-cgroup.h>
  11#include <linux/security.h>
  12#include <linux/ctype.h>
  13#include <linux/module.h>
  14#include <linux/uaccess.h>
  15#include <linux/uprobes.h>
  16#include <linux/namei.h>
  17#include <linux/string.h>
  18#include <linux/rculist.h>
  19#include <linux/filter.h>
  20
  21#include "trace_dynevent.h"
  22#include "trace_probe.h"
  23#include "trace_probe_tmpl.h"
  24
  25#define UPROBE_EVENT_SYSTEM	"uprobes"
  26
  27struct uprobe_trace_entry_head {
  28	struct trace_entry	ent;
  29	unsigned long		vaddr[];
  30};
  31
  32#define SIZEOF_TRACE_ENTRY(is_return)			\
  33	(sizeof(struct uprobe_trace_entry_head) +	\
  34	 sizeof(unsigned long) * (is_return ? 2 : 1))
  35
  36#define DATAOF_TRACE_ENTRY(entry, is_return)		\
  37	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  38
  39static int trace_uprobe_create(const char *raw_command);
  40static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
  41static int trace_uprobe_release(struct dyn_event *ev);
  42static bool trace_uprobe_is_busy(struct dyn_event *ev);
  43static bool trace_uprobe_match(const char *system, const char *event,
  44			int argc, const char **argv, struct dyn_event *ev);
  45
  46static struct dyn_event_operations trace_uprobe_ops = {
  47	.create = trace_uprobe_create,
  48	.show = trace_uprobe_show,
  49	.is_busy = trace_uprobe_is_busy,
  50	.free = trace_uprobe_release,
  51	.match = trace_uprobe_match,
  52};
  53
  54/*
  55 * uprobe event core functions
  56 */
  57struct trace_uprobe {
  58	struct dyn_event		devent;
 
  59	struct uprobe_consumer		consumer;
  60	struct path			path;
  61	struct inode			*inode;
  62	char				*filename;
  63	unsigned long			offset;
  64	unsigned long			ref_ctr_offset;
  65	unsigned long			nhit;
  66	struct trace_probe		tp;
  67};
  68
  69static bool is_trace_uprobe(struct dyn_event *ev)
  70{
  71	return ev->ops == &trace_uprobe_ops;
  72}
  73
  74static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
  75{
  76	return container_of(ev, struct trace_uprobe, devent);
  77}
  78
  79/**
  80 * for_each_trace_uprobe - iterate over the trace_uprobe list
  81 * @pos:	the struct trace_uprobe * for each entry
  82 * @dpos:	the struct dyn_event * to use as a loop cursor
  83 */
  84#define for_each_trace_uprobe(pos, dpos)	\
  85	for_each_dyn_event(dpos)		\
  86		if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
  87
  88static int register_uprobe_event(struct trace_uprobe *tu);
  89static int unregister_uprobe_event(struct trace_uprobe *tu);
  90
 
 
 
 
 
 
 
 
  91static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
  92static int uretprobe_dispatcher(struct uprobe_consumer *con,
  93				unsigned long func, struct pt_regs *regs);
  94
  95#ifdef CONFIG_STACK_GROWSUP
  96static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  97{
  98	return addr - (n * sizeof(long));
  99}
 100#else
 101static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
 102{
 103	return addr + (n * sizeof(long));
 104}
 105#endif
 106
 107static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
 108{
 109	unsigned long ret;
 110	unsigned long addr = user_stack_pointer(regs);
 111
 112	addr = adjust_stack_addr(addr, n);
 113
 114	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
 115		return 0;
 116
 117	return ret;
 118}
 119
 120/*
 121 * Uprobes-specific fetch functions
 122 */
 123static nokprobe_inline int
 124probe_mem_read(void *dest, void *src, size_t size)
 125{
 126	void __user *vaddr = (void __force __user *)src;
 127
 128	return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
 129}
 130
 131static nokprobe_inline int
 132probe_mem_read_user(void *dest, void *src, size_t size)
 133{
 134	return probe_mem_read(dest, src, size);
 
 
 
 
 
 
 
 
 
 
 
 135}
 136
 137/*
 138 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 139 * length and relative data location.
 140 */
 141static nokprobe_inline int
 142fetch_store_string(unsigned long addr, void *dest, void *base)
 143{
 144	long ret;
 145	u32 loc = *(u32 *)dest;
 146	int maxlen  = get_loc_len(loc);
 147	u8 *dst = get_loc_data(dest, base);
 148	void __user *src = (void __force __user *) addr;
 149
 150	if (unlikely(!maxlen))
 151		return -ENOMEM;
 152
 153	if (addr == FETCH_TOKEN_COMM)
 154		ret = strscpy(dst, current->comm, maxlen);
 155	else
 156		ret = strncpy_from_user(dst, src, maxlen);
 157	if (ret >= 0) {
 158		if (ret == maxlen)
 159			dst[ret - 1] = '\0';
 160		else
 161			/*
 162			 * Include the terminating null byte. In this case it
 163			 * was copied by strncpy_from_user but not accounted
 164			 * for in ret.
 165			 */
 166			ret++;
 167		*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
 168	} else
 169		*(u32 *)dest = make_data_loc(0, (void *)dst - base);
 170
 171	return ret;
 172}
 173
 174static nokprobe_inline int
 175fetch_store_string_user(unsigned long addr, void *dest, void *base)
 176{
 177	return fetch_store_string(addr, dest, base);
 
 
 
 
 
 
 178}
 179
 180/* Return the length of string -- including null terminal byte */
 181static nokprobe_inline int
 182fetch_store_strlen(unsigned long addr)
 183{
 184	int len;
 185	void __user *vaddr = (void __force __user *) addr;
 186
 187	if (addr == FETCH_TOKEN_COMM)
 188		len = strlen(current->comm) + 1;
 189	else
 190		len = strnlen_user(vaddr, MAX_STRING_SIZE);
 191
 192	return (len > MAX_STRING_SIZE) ? 0 : len;
 193}
 194
 195static nokprobe_inline int
 196fetch_store_strlen_user(unsigned long addr)
 197{
 198	return fetch_store_strlen(addr);
 199}
 200
 201static unsigned long translate_user_vaddr(unsigned long file_offset)
 202{
 203	unsigned long base_addr;
 204	struct uprobe_dispatch_data *udd;
 205
 206	udd = (void *) current->utask->vaddr;
 207
 208	base_addr = udd->bp_addr - udd->tu->offset;
 209	return base_addr + file_offset;
 210}
 211
 212/* Note that we don't verify it, since the code does not come from user space */
 213static int
 214process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
 215		   void *base)
 216{
 217	struct pt_regs *regs = rec;
 218	unsigned long val;
 219	int ret;
 220
 221	/* 1st stage: get value from context */
 222	switch (code->op) {
 223	case FETCH_OP_REG:
 224		val = regs_get_register(regs, code->param);
 225		break;
 226	case FETCH_OP_STACK:
 227		val = get_user_stack_nth(regs, code->param);
 228		break;
 229	case FETCH_OP_STACKP:
 230		val = user_stack_pointer(regs);
 231		break;
 232	case FETCH_OP_RETVAL:
 233		val = regs_return_value(regs);
 234		break;
 235	case FETCH_OP_COMM:
 236		val = FETCH_TOKEN_COMM;
 237		break;
 238	case FETCH_OP_FOFFS:
 239		val = translate_user_vaddr(code->immediate);
 240		break;
 241	default:
 242		ret = process_common_fetch_insn(code, &val);
 243		if (ret < 0)
 244			return ret;
 245	}
 246	code++;
 247
 248	return process_fetch_insn_bottom(code, val, dest, base);
 249}
 250NOKPROBE_SYMBOL(process_fetch_insn)
 251
 252static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
 253{
 254	rwlock_init(&filter->rwlock);
 255	filter->nr_systemwide = 0;
 256	INIT_LIST_HEAD(&filter->perf_events);
 257}
 258
 259static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
 260{
 261	return !filter->nr_systemwide && list_empty(&filter->perf_events);
 262}
 263
 264static inline bool is_ret_probe(struct trace_uprobe *tu)
 265{
 266	return tu->consumer.ret_handler != NULL;
 267}
 268
 269static bool trace_uprobe_is_busy(struct dyn_event *ev)
 270{
 271	struct trace_uprobe *tu = to_trace_uprobe(ev);
 272
 273	return trace_probe_is_enabled(&tu->tp);
 274}
 275
 276static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
 277					    int argc, const char **argv)
 278{
 279	char buf[MAX_ARGSTR_LEN + 1];
 280	int len;
 281
 282	if (!argc)
 283		return true;
 284
 285	len = strlen(tu->filename);
 286	if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
 287		return false;
 288
 289	if (tu->ref_ctr_offset == 0)
 290		snprintf(buf, sizeof(buf), "0x%0*lx",
 291				(int)(sizeof(void *) * 2), tu->offset);
 292	else
 293		snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
 294				(int)(sizeof(void *) * 2), tu->offset,
 295				tu->ref_ctr_offset);
 296	if (strcmp(buf, &argv[0][len + 1]))
 297		return false;
 298
 299	argc--; argv++;
 300
 301	return trace_probe_match_command_args(&tu->tp, argc, argv);
 302}
 303
 304static bool trace_uprobe_match(const char *system, const char *event,
 305			int argc, const char **argv, struct dyn_event *ev)
 306{
 307	struct trace_uprobe *tu = to_trace_uprobe(ev);
 308
 309	return (event[0] == '\0' ||
 310		strcmp(trace_probe_name(&tu->tp), event) == 0) &&
 311	   (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
 312	   trace_uprobe_match_command_head(tu, argc, argv);
 313}
 314
 315static nokprobe_inline struct trace_uprobe *
 316trace_uprobe_primary_from_call(struct trace_event_call *call)
 317{
 318	struct trace_probe *tp;
 319
 320	tp = trace_probe_primary_from_call(call);
 321	if (WARN_ON_ONCE(!tp))
 322		return NULL;
 323
 324	return container_of(tp, struct trace_uprobe, tp);
 325}
 326
 327/*
 328 * Allocate new trace_uprobe and initialize it (including uprobes).
 329 */
 330static struct trace_uprobe *
 331alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 332{
 333	struct trace_uprobe *tu;
 334	int ret;
 335
 336	tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
 
 
 
 
 
 
 337	if (!tu)
 338		return ERR_PTR(-ENOMEM);
 339
 340	ret = trace_probe_init(&tu->tp, event, group, true);
 341	if (ret < 0)
 
 342		goto error;
 343
 344	dyn_event_init(&tu->devent, &trace_uprobe_ops);
 
 
 
 
 
 345	tu->consumer.handler = uprobe_dispatcher;
 346	if (is_ret)
 347		tu->consumer.ret_handler = uretprobe_dispatcher;
 348	init_trace_uprobe_filter(tu->tp.event->filter);
 349	return tu;
 350
 351error:
 
 352	kfree(tu);
 353
 354	return ERR_PTR(ret);
 355}
 356
 357static void free_trace_uprobe(struct trace_uprobe *tu)
 358{
 359	if (!tu)
 360		return;
 
 
 361
 362	path_put(&tu->path);
 363	trace_probe_cleanup(&tu->tp);
 
 364	kfree(tu->filename);
 365	kfree(tu);
 366}
 367
 368static struct trace_uprobe *find_probe_event(const char *event, const char *group)
 369{
 370	struct dyn_event *pos;
 371	struct trace_uprobe *tu;
 372
 373	for_each_trace_uprobe(tu, pos)
 374		if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
 375		    strcmp(trace_probe_group_name(&tu->tp), group) == 0)
 376			return tu;
 377
 378	return NULL;
 379}
 380
 381/* Unregister a trace_uprobe and probe_event */
 382static int unregister_trace_uprobe(struct trace_uprobe *tu)
 383{
 384	int ret;
 385
 386	if (trace_probe_has_sibling(&tu->tp))
 387		goto unreg;
 388
 389	/* If there's a reference to the dynamic event */
 390	if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
 391		return -EBUSY;
 392
 393	ret = unregister_uprobe_event(tu);
 394	if (ret)
 395		return ret;
 396
 397unreg:
 398	dyn_event_remove(&tu->devent);
 399	trace_probe_unlink(&tu->tp);
 400	free_trace_uprobe(tu);
 401	return 0;
 402}
 403
 404static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
 405					 struct trace_uprobe *comp)
 406{
 407	struct trace_probe_event *tpe = orig->tp.event;
 408	struct inode *comp_inode = d_real_inode(comp->path.dentry);
 409	int i;
 410
 411	list_for_each_entry(orig, &tpe->probes, tp.list) {
 412		if (comp_inode != d_real_inode(orig->path.dentry) ||
 413		    comp->offset != orig->offset)
 414			continue;
 415
 416		/*
 417		 * trace_probe_compare_arg_type() ensured that nr_args and
 418		 * each argument name and type are same. Let's compare comm.
 419		 */
 420		for (i = 0; i < orig->tp.nr_args; i++) {
 421			if (strcmp(orig->tp.args[i].comm,
 422				   comp->tp.args[i].comm))
 423				break;
 424		}
 425
 426		if (i == orig->tp.nr_args)
 427			return true;
 428	}
 429
 430	return false;
 431}
 432
 433static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
 434{
 435	int ret;
 436
 437	ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
 438	if (ret) {
 439		/* Note that argument starts index = 2 */
 440		trace_probe_log_set_index(ret + 1);
 441		trace_probe_log_err(0, DIFF_ARG_TYPE);
 442		return -EEXIST;
 443	}
 444	if (trace_uprobe_has_same_uprobe(to, tu)) {
 445		trace_probe_log_set_index(0);
 446		trace_probe_log_err(0, SAME_PROBE);
 447		return -EEXIST;
 448	}
 449
 450	/* Append to existing event */
 451	ret = trace_probe_append(&tu->tp, &to->tp);
 452	if (!ret)
 453		dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
 454
 455	return ret;
 456}
 457
 458/*
 459 * Uprobe with multiple reference counter is not allowed. i.e.
 460 * If inode and offset matches, reference counter offset *must*
 461 * match as well. Though, there is one exception: If user is
 462 * replacing old trace_uprobe with new one(same group/event),
 463 * then we allow same uprobe with new reference counter as far
 464 * as the new one does not conflict with any other existing
 465 * ones.
 466 */
 467static int validate_ref_ctr_offset(struct trace_uprobe *new)
 468{
 469	struct dyn_event *pos;
 470	struct trace_uprobe *tmp;
 471	struct inode *new_inode = d_real_inode(new->path.dentry);
 472
 473	for_each_trace_uprobe(tmp, pos) {
 474		if (new_inode == d_real_inode(tmp->path.dentry) &&
 475		    new->offset == tmp->offset &&
 476		    new->ref_ctr_offset != tmp->ref_ctr_offset) {
 477			pr_warn("Reference counter offset mismatch.");
 478			return -EINVAL;
 479		}
 480	}
 481	return 0;
 482}
 483
 484/* Register a trace_uprobe and probe_event */
 485static int register_trace_uprobe(struct trace_uprobe *tu)
 486{
 487	struct trace_uprobe *old_tu;
 488	int ret;
 489
 490	mutex_lock(&event_mutex);
 491
 492	ret = validate_ref_ctr_offset(tu);
 493	if (ret)
 494		goto end;
 495
 496	/* register as an event */
 497	old_tu = find_probe_event(trace_probe_name(&tu->tp),
 498				  trace_probe_group_name(&tu->tp));
 499	if (old_tu) {
 500		if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
 501			trace_probe_log_set_index(0);
 502			trace_probe_log_err(0, DIFF_PROBE_TYPE);
 503			ret = -EEXIST;
 504		} else {
 505			ret = append_trace_uprobe(tu, old_tu);
 506		}
 507		goto end;
 508	}
 509
 510	ret = register_uprobe_event(tu);
 511	if (ret) {
 512		if (ret == -EEXIST) {
 513			trace_probe_log_set_index(0);
 514			trace_probe_log_err(0, EVENT_EXIST);
 515		} else
 516			pr_warn("Failed to register probe event(%d)\n", ret);
 517		goto end;
 518	}
 519
 520	dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
 521
 522end:
 523	mutex_unlock(&event_mutex);
 524
 525	return ret;
 526}
 527
 528/*
 529 * Argument syntax:
 530 *  - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
 
 
 531 */
 532static int __trace_uprobe_create(int argc, const char **argv)
 533{
 534	struct trace_uprobe *tu;
 535	const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
 536	char *arg, *filename, *rctr, *rctr_end, *tmp;
 537	char buf[MAX_EVENT_NAME_LEN];
 538	char gbuf[MAX_EVENT_NAME_LEN];
 539	enum probe_print_type ptype;
 540	struct path path;
 541	unsigned long offset, ref_ctr_offset;
 542	bool is_return = false;
 543	int i, ret;
 544
 545	ref_ctr_offset = 0;
 546
 547	switch (argv[0][0]) {
 548	case 'r':
 
 
 
 
 
 
 549		is_return = true;
 550		break;
 551	case 'p':
 552		break;
 553	default:
 554		return -ECANCELED;
 555	}
 556
 557	if (argc < 2)
 558		return -ECANCELED;
 559
 560	if (argv[0][1] == ':')
 561		event = &argv[0][2];
 
 562
 563	if (!strchr(argv[1], '/'))
 564		return -ECANCELED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 565
 566	filename = kstrdup(argv[1], GFP_KERNEL);
 567	if (!filename)
 568		return -ENOMEM;
 569
 570	/* Find the last occurrence, in case the path contains ':' too. */
 571	arg = strrchr(filename, ':');
 572	if (!arg || !isdigit(arg[1])) {
 573		kfree(filename);
 574		return -ECANCELED;
 
 
 
 
 
 
 
 
 
 
 
 575	}
 576
 577	trace_probe_log_init("trace_uprobe", argc, argv);
 578	trace_probe_log_set_index(1);	/* filename is the 2nd argument */
 
 
 
 
 
 
 579
 580	*arg++ = '\0';
 
 581	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
 582	if (ret) {
 583		trace_probe_log_err(0, FILE_NOT_FOUND);
 584		kfree(filename);
 585		trace_probe_log_clear();
 586		return ret;
 587	}
 588	if (!d_is_reg(path.dentry)) {
 589		trace_probe_log_err(0, NO_REGULAR_FILE);
 590		ret = -EINVAL;
 591		goto fail_address_parse;
 592	}
 593
 594	/* Parse reference counter offset if specified. */
 595	rctr = strchr(arg, '(');
 596	if (rctr) {
 597		rctr_end = strchr(rctr, ')');
 598		if (!rctr_end) {
 599			ret = -EINVAL;
 600			rctr_end = rctr + strlen(rctr);
 601			trace_probe_log_err(rctr_end - filename,
 602					    REFCNT_OPEN_BRACE);
 603			goto fail_address_parse;
 604		} else if (rctr_end[1] != '\0') {
 605			ret = -EINVAL;
 606			trace_probe_log_err(rctr_end + 1 - filename,
 607					    BAD_REFCNT_SUFFIX);
 608			goto fail_address_parse;
 609		}
 610
 611		*rctr++ = '\0';
 612		*rctr_end = '\0';
 613		ret = kstrtoul(rctr, 0, &ref_ctr_offset);
 614		if (ret) {
 615			trace_probe_log_err(rctr - filename, BAD_REFCNT);
 616			goto fail_address_parse;
 617		}
 618	}
 619
 620	/* Check if there is %return suffix */
 621	tmp = strchr(arg, '%');
 622	if (tmp) {
 623		if (!strcmp(tmp, "%return")) {
 624			*tmp = '\0';
 625			is_return = true;
 626		} else {
 627			trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
 628			ret = -EINVAL;
 629			goto fail_address_parse;
 630		}
 631	}
 632
 633	/* Parse uprobe offset. */
 634	ret = kstrtoul(arg, 0, &offset);
 635	if (ret) {
 636		trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
 637		goto fail_address_parse;
 638	}
 639
 640	/* setup a probe */
 641	trace_probe_log_set_index(0);
 642	if (event) {
 643		ret = traceprobe_parse_event_name(&event, &group, gbuf,
 644						  event - argv[0]);
 645		if (ret)
 646			goto fail_address_parse;
 647	}
 648
 
 649	if (!event) {
 650		char *tail;
 651		char *ptr;
 652
 653		tail = kstrdup(kbasename(filename), GFP_KERNEL);
 654		if (!tail) {
 655			ret = -ENOMEM;
 656			goto fail_address_parse;
 657		}
 658
 659		ptr = strpbrk(tail, ".-_");
 660		if (ptr)
 661			*ptr = '\0';
 662
 663		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
 664		event = buf;
 665		kfree(tail);
 666	}
 667
 668	argc -= 2;
 669	argv += 2;
 670
 671	tu = alloc_trace_uprobe(group, event, argc, is_return);
 672	if (IS_ERR(tu)) {
 
 673		ret = PTR_ERR(tu);
 674		/* This must return -ENOMEM otherwise there is a bug */
 675		WARN_ON_ONCE(ret != -ENOMEM);
 676		goto fail_address_parse;
 677	}
 678	tu->offset = offset;
 679	tu->ref_ctr_offset = ref_ctr_offset;
 680	tu->path = path;
 681	tu->filename = filename;
 
 
 
 
 
 
 682
 683	/* parse arguments */
 
 684	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 685		struct traceprobe_parse_context ctx = {
 686			.flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER,
 687		};
 688
 689		trace_probe_log_set_index(i + 2);
 690		ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx);
 691		traceprobe_finish_parse(&ctx);
 692		if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 693			goto error;
 694	}
 695
 696	ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
 697	ret = traceprobe_set_print_fmt(&tu->tp, ptype);
 698	if (ret < 0)
 699		goto error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 700
 701	ret = register_trace_uprobe(tu);
 702	if (!ret)
 703		goto out;
 
 704
 705error:
 706	free_trace_uprobe(tu);
 707out:
 708	trace_probe_log_clear();
 709	return ret;
 710
 711fail_address_parse:
 712	trace_probe_log_clear();
 713	path_put(&path);
 714	kfree(filename);
 
 715
 716	return ret;
 717}
 718
 719int trace_uprobe_create(const char *raw_command)
 720{
 721	return trace_probe_create(raw_command, __trace_uprobe_create);
 
 
 
 
 
 
 
 
 
 
 
 722}
 723
 724static int create_or_delete_trace_uprobe(const char *raw_command)
 
 725{
 726	int ret;
 727
 728	if (raw_command[0] == '-')
 729		return dyn_event_release(raw_command, &trace_uprobe_ops);
 730
 731	ret = trace_uprobe_create(raw_command);
 732	return ret == -ECANCELED ? -EINVAL : ret;
 733}
 734
 735static int trace_uprobe_release(struct dyn_event *ev)
 736{
 737	struct trace_uprobe *tu = to_trace_uprobe(ev);
 
 738
 739	return unregister_trace_uprobe(tu);
 
 
 740}
 741
 742/* Probes listing interfaces */
 743static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
 744{
 745	struct trace_uprobe *tu = to_trace_uprobe(ev);
 746	char c = is_ret_probe(tu) ? 'r' : 'p';
 747	int i;
 748
 749	seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
 750			trace_probe_name(&tu->tp), tu->filename,
 751			(int)(sizeof(void *) * 2), tu->offset);
 752
 753	if (tu->ref_ctr_offset)
 754		seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
 755
 756	for (i = 0; i < tu->tp.nr_args; i++)
 757		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
 758
 759	seq_putc(m, '\n');
 760	return 0;
 761}
 762
 763static int probes_seq_show(struct seq_file *m, void *v)
 764{
 765	struct dyn_event *ev = v;
 766
 767	if (!is_trace_uprobe(ev))
 768		return 0;
 769
 770	return trace_uprobe_show(m, ev);
 771}
 772
 773static const struct seq_operations probes_seq_op = {
 774	.start  = dyn_event_seq_start,
 775	.next   = dyn_event_seq_next,
 776	.stop   = dyn_event_seq_stop,
 777	.show   = probes_seq_show
 778};
 779
 780static int probes_open(struct inode *inode, struct file *file)
 781{
 782	int ret;
 783
 784	ret = security_locked_down(LOCKDOWN_TRACEFS);
 785	if (ret)
 786		return ret;
 787
 788	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 789		ret = dyn_events_release_all(&trace_uprobe_ops);
 790		if (ret)
 791			return ret;
 792	}
 793
 794	return seq_open(file, &probes_seq_op);
 795}
 796
 797static ssize_t probes_write(struct file *file, const char __user *buffer,
 798			    size_t count, loff_t *ppos)
 799{
 800	return trace_parse_run_command(file, buffer, count, ppos,
 801					create_or_delete_trace_uprobe);
 802}
 803
 804static const struct file_operations uprobe_events_ops = {
 805	.owner		= THIS_MODULE,
 806	.open		= probes_open,
 807	.read		= seq_read,
 808	.llseek		= seq_lseek,
 809	.release	= seq_release,
 810	.write		= probes_write,
 811};
 812
 813/* Probes profiling interfaces */
 814static int probes_profile_seq_show(struct seq_file *m, void *v)
 815{
 816	struct dyn_event *ev = v;
 817	struct trace_uprobe *tu;
 818
 819	if (!is_trace_uprobe(ev))
 820		return 0;
 821
 822	tu = to_trace_uprobe(ev);
 823	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
 824			trace_probe_name(&tu->tp), tu->nhit);
 825	return 0;
 826}
 827
 828static const struct seq_operations profile_seq_op = {
 829	.start  = dyn_event_seq_start,
 830	.next   = dyn_event_seq_next,
 831	.stop   = dyn_event_seq_stop,
 832	.show	= probes_profile_seq_show
 833};
 834
 835static int profile_open(struct inode *inode, struct file *file)
 836{
 837	int ret;
 838
 839	ret = security_locked_down(LOCKDOWN_TRACEFS);
 840	if (ret)
 841		return ret;
 842
 843	return seq_open(file, &profile_seq_op);
 844}
 845
 846static const struct file_operations uprobe_profile_ops = {
 847	.owner		= THIS_MODULE,
 848	.open		= profile_open,
 849	.read		= seq_read,
 850	.llseek		= seq_lseek,
 851	.release	= seq_release,
 852};
 853
 854struct uprobe_cpu_buffer {
 855	struct mutex mutex;
 856	void *buf;
 857};
 858static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
 859static int uprobe_buffer_refcnt;
 860
 861static int uprobe_buffer_init(void)
 862{
 863	int cpu, err_cpu;
 864
 865	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
 866	if (uprobe_cpu_buffer == NULL)
 867		return -ENOMEM;
 868
 869	for_each_possible_cpu(cpu) {
 870		struct page *p = alloc_pages_node(cpu_to_node(cpu),
 871						  GFP_KERNEL, 0);
 872		if (p == NULL) {
 873			err_cpu = cpu;
 874			goto err;
 875		}
 876		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
 877		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
 878	}
 879
 880	return 0;
 881
 882err:
 883	for_each_possible_cpu(cpu) {
 884		if (cpu == err_cpu)
 885			break;
 886		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
 887	}
 888
 889	free_percpu(uprobe_cpu_buffer);
 890	return -ENOMEM;
 891}
 892
 893static int uprobe_buffer_enable(void)
 894{
 895	int ret = 0;
 896
 897	BUG_ON(!mutex_is_locked(&event_mutex));
 898
 899	if (uprobe_buffer_refcnt++ == 0) {
 900		ret = uprobe_buffer_init();
 901		if (ret < 0)
 902			uprobe_buffer_refcnt--;
 903	}
 904
 905	return ret;
 906}
 907
 908static void uprobe_buffer_disable(void)
 909{
 910	int cpu;
 911
 912	BUG_ON(!mutex_is_locked(&event_mutex));
 913
 914	if (--uprobe_buffer_refcnt == 0) {
 915		for_each_possible_cpu(cpu)
 916			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
 917							     cpu)->buf);
 918
 919		free_percpu(uprobe_cpu_buffer);
 920		uprobe_cpu_buffer = NULL;
 921	}
 922}
 923
 924static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
 925{
 926	struct uprobe_cpu_buffer *ucb;
 927	int cpu;
 928
 929	cpu = raw_smp_processor_id();
 930	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
 931
 932	/*
 933	 * Use per-cpu buffers for fastest access, but we might migrate
 934	 * so the mutex makes sure we have sole access to it.
 935	 */
 936	mutex_lock(&ucb->mutex);
 937
 938	return ucb;
 939}
 940
 941static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
 942{
 943	mutex_unlock(&ucb->mutex);
 944}
 945
 946static void __uprobe_trace_func(struct trace_uprobe *tu,
 947				unsigned long func, struct pt_regs *regs,
 948				struct uprobe_cpu_buffer *ucb, int dsize,
 949				struct trace_event_file *trace_file)
 950{
 951	struct uprobe_trace_entry_head *entry;
 952	struct trace_event_buffer fbuffer;
 
 953	void *data;
 954	int size, esize;
 955	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
 956
 957	WARN_ON(call != trace_file->event_call);
 958
 959	if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
 960		return;
 961
 962	if (trace_trigger_soft_disabled(trace_file))
 963		return;
 964
 965	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
 966	size = esize + tu->tp.size + dsize;
 967	entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
 968	if (!entry)
 
 969		return;
 970
 
 971	if (is_ret_probe(tu)) {
 972		entry->vaddr[0] = func;
 973		entry->vaddr[1] = instruction_pointer(regs);
 974		data = DATAOF_TRACE_ENTRY(entry, true);
 975	} else {
 976		entry->vaddr[0] = instruction_pointer(regs);
 977		data = DATAOF_TRACE_ENTRY(entry, false);
 978	}
 979
 980	memcpy(data, ucb->buf, tu->tp.size + dsize);
 981
 982	trace_event_buffer_commit(&fbuffer);
 983}
 984
 985/* uprobe handler */
 986static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
 987			     struct uprobe_cpu_buffer *ucb, int dsize)
 988{
 989	struct event_file_link *link;
 990
 991	if (is_ret_probe(tu))
 992		return 0;
 993
 994	rcu_read_lock();
 995	trace_probe_for_each_link_rcu(link, &tu->tp)
 996		__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
 997	rcu_read_unlock();
 998
 999	return 0;
1000}
1001
1002static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1003				 struct pt_regs *regs,
1004				 struct uprobe_cpu_buffer *ucb, int dsize)
1005{
1006	struct event_file_link *link;
1007
1008	rcu_read_lock();
1009	trace_probe_for_each_link_rcu(link, &tu->tp)
1010		__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1011	rcu_read_unlock();
1012}
1013
1014/* Event entry printers */
1015static enum print_line_t
1016print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1017{
1018	struct uprobe_trace_entry_head *entry;
1019	struct trace_seq *s = &iter->seq;
1020	struct trace_uprobe *tu;
1021	u8 *data;
 
1022
1023	entry = (struct uprobe_trace_entry_head *)iter->ent;
1024	tu = trace_uprobe_primary_from_call(
1025		container_of(event, struct trace_event_call, event));
1026	if (unlikely(!tu))
1027		goto out;
1028
1029	if (is_ret_probe(tu)) {
1030		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1031				 trace_probe_name(&tu->tp),
1032				 entry->vaddr[1], entry->vaddr[0]);
1033		data = DATAOF_TRACE_ENTRY(entry, true);
1034	} else {
1035		trace_seq_printf(s, "%s: (0x%lx)",
1036				 trace_probe_name(&tu->tp),
1037				 entry->vaddr[0]);
1038		data = DATAOF_TRACE_ENTRY(entry, false);
1039	}
1040
1041	if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1042		goto out;
 
 
 
 
1043
1044	trace_seq_putc(s, '\n');
1045
1046 out:
1047	return trace_handle_return(s);
1048}
1049
1050typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1051				enum uprobe_filter_ctx ctx,
1052				struct mm_struct *mm);
1053
1054static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1055{
1056	int ret;
1057
1058	tu->consumer.filter = filter;
1059	tu->inode = d_real_inode(tu->path.dentry);
1060
1061	if (tu->ref_ctr_offset)
1062		ret = uprobe_register_refctr(tu->inode, tu->offset,
1063				tu->ref_ctr_offset, &tu->consumer);
1064	else
1065		ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1066
1067	if (ret)
1068		tu->inode = NULL;
1069
1070	return ret;
1071}
1072
1073static void __probe_event_disable(struct trace_probe *tp)
1074{
1075	struct trace_uprobe *tu;
1076
1077	tu = container_of(tp, struct trace_uprobe, tp);
1078	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1079
1080	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1081		if (!tu->inode)
1082			continue;
1083
1084		uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1085		tu->inode = NULL;
1086	}
1087}
1088
1089static int probe_event_enable(struct trace_event_call *call,
1090			struct trace_event_file *file, filter_func_t filter)
1091{
1092	struct trace_probe *tp;
1093	struct trace_uprobe *tu;
1094	bool enabled;
1095	int ret;
1096
1097	tp = trace_probe_primary_from_call(call);
1098	if (WARN_ON_ONCE(!tp))
1099		return -ENODEV;
1100	enabled = trace_probe_is_enabled(tp);
1101
1102	/* This may also change "enabled" state */
1103	if (file) {
1104		if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1105			return -EINTR;
1106
1107		ret = trace_probe_add_file(tp, file);
1108		if (ret < 0)
1109			return ret;
 
 
 
 
 
1110	} else {
1111		if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1112			return -EINTR;
1113
1114		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1115	}
1116
1117	tu = container_of(tp, struct trace_uprobe, tp);
1118	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1119
1120	if (enabled)
1121		return 0;
1122
1123	ret = uprobe_buffer_enable();
1124	if (ret)
1125		goto err_flags;
1126
1127	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1128		ret = trace_uprobe_enable(tu, filter);
1129		if (ret) {
1130			__probe_event_disable(tp);
1131			goto err_buffer;
1132		}
1133	}
1134
1135	return 0;
1136
1137 err_buffer:
1138	uprobe_buffer_disable();
1139
1140 err_flags:
1141	if (file)
1142		trace_probe_remove_file(tp, file);
1143	else
1144		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1145
 
 
1146	return ret;
1147}
1148
1149static void probe_event_disable(struct trace_event_call *call,
1150				struct trace_event_file *file)
1151{
1152	struct trace_probe *tp;
1153
1154	tp = trace_probe_primary_from_call(call);
1155	if (WARN_ON_ONCE(!tp))
1156		return;
1157
1158	if (!trace_probe_is_enabled(tp))
1159		return;
1160
1161	if (file) {
1162		if (trace_probe_remove_file(tp, file) < 0)
 
 
 
1163			return;
1164
1165		if (trace_probe_is_enabled(tp))
 
 
 
 
 
1166			return;
1167	} else
1168		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 
 
 
 
 
1169
1170	__probe_event_disable(tp);
1171	uprobe_buffer_disable();
1172}
1173
1174static int uprobe_event_define_fields(struct trace_event_call *event_call)
1175{
1176	int ret, size;
1177	struct uprobe_trace_entry_head field;
1178	struct trace_uprobe *tu;
1179
1180	tu = trace_uprobe_primary_from_call(event_call);
1181	if (unlikely(!tu))
1182		return -ENODEV;
1183
1184	if (is_ret_probe(tu)) {
1185		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1186		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1187		size = SIZEOF_TRACE_ENTRY(true);
1188	} else {
1189		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1190		size = SIZEOF_TRACE_ENTRY(false);
1191	}
 
 
 
 
 
 
 
 
1192
1193	return traceprobe_define_arg_fields(event_call, size, &tu->tp);
 
 
 
1194}
1195
1196#ifdef CONFIG_PERF_EVENTS
1197static bool
1198__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1199{
1200	struct perf_event *event;
1201
1202	if (filter->nr_systemwide)
1203		return true;
1204
1205	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1206		if (event->hw.target->mm == mm)
1207			return true;
1208	}
1209
1210	return false;
1211}
1212
1213static inline bool
1214trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1215			  struct perf_event *event)
1216{
1217	return __uprobe_perf_filter(filter, event->hw.target->mm);
1218}
1219
1220static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1221				       struct perf_event *event)
1222{
1223	bool done;
1224
1225	write_lock(&filter->rwlock);
1226	if (event->hw.target) {
1227		list_del(&event->hw.tp_list);
1228		done = filter->nr_systemwide ||
1229			(event->hw.target->flags & PF_EXITING) ||
1230			trace_uprobe_filter_event(filter, event);
1231	} else {
1232		filter->nr_systemwide--;
1233		done = filter->nr_systemwide;
1234	}
1235	write_unlock(&filter->rwlock);
 
 
 
1236
1237	return done;
1238}
1239
1240/* This returns true if the filter always covers target mm */
1241static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1242				    struct perf_event *event)
1243{
1244	bool done;
 
1245
1246	write_lock(&filter->rwlock);
1247	if (event->hw.target) {
1248		/*
1249		 * event->parent != NULL means copy_process(), we can avoid
1250		 * uprobe_apply(). current->mm must be probed and we can rely
1251		 * on dup_mmap() which preserves the already installed bp's.
1252		 *
1253		 * attr.enable_on_exec means that exec/mmap will install the
1254		 * breakpoints we need.
1255		 */
1256		done = filter->nr_systemwide ||
1257			event->parent || event->attr.enable_on_exec ||
1258			trace_uprobe_filter_event(filter, event);
1259		list_add(&event->hw.tp_list, &filter->perf_events);
1260	} else {
1261		done = filter->nr_systemwide;
1262		filter->nr_systemwide++;
1263	}
1264	write_unlock(&filter->rwlock);
1265
1266	return done;
1267}
1268
1269static int uprobe_perf_close(struct trace_event_call *call,
1270			     struct perf_event *event)
1271{
1272	struct trace_probe *tp;
1273	struct trace_uprobe *tu;
1274	int ret = 0;
1275
1276	tp = trace_probe_primary_from_call(call);
1277	if (WARN_ON_ONCE(!tp))
1278		return -ENODEV;
1279
1280	tu = container_of(tp, struct trace_uprobe, tp);
1281	if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1282		return 0;
1283
1284	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1285		ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1286		if (ret)
1287			break;
1288	}
1289
1290	return ret;
1291}
1292
1293static int uprobe_perf_open(struct trace_event_call *call,
1294			    struct perf_event *event)
1295{
1296	struct trace_probe *tp;
1297	struct trace_uprobe *tu;
1298	int err = 0;
1299
1300	tp = trace_probe_primary_from_call(call);
1301	if (WARN_ON_ONCE(!tp))
1302		return -ENODEV;
1303
1304	tu = container_of(tp, struct trace_uprobe, tp);
1305	if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1306		return 0;
1307
1308	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1309		err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1310		if (err) {
1311			uprobe_perf_close(call, event);
1312			break;
1313		}
1314	}
1315
1316	return err;
1317}
1318
1319static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1320				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1321{
1322	struct trace_uprobe_filter *filter;
1323	struct trace_uprobe *tu;
1324	int ret;
1325
1326	tu = container_of(uc, struct trace_uprobe, consumer);
1327	filter = tu->tp.event->filter;
1328
1329	read_lock(&filter->rwlock);
1330	ret = __uprobe_perf_filter(filter, mm);
1331	read_unlock(&filter->rwlock);
1332
1333	return ret;
1334}
1335
1336static void __uprobe_perf_func(struct trace_uprobe *tu,
1337			       unsigned long func, struct pt_regs *regs,
1338			       struct uprobe_cpu_buffer *ucb, int dsize)
1339{
1340	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1341	struct uprobe_trace_entry_head *entry;
1342	struct hlist_head *head;
1343	void *data;
1344	int size, esize;
1345	int rctx;
1346
1347#ifdef CONFIG_BPF_EVENTS
1348	if (bpf_prog_array_valid(call)) {
1349		u32 ret;
1350
1351		ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run);
1352		if (!ret)
1353			return;
1354	}
1355#endif /* CONFIG_BPF_EVENTS */
1356
1357	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1358
1359	size = esize + tu->tp.size + dsize;
1360	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1361	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1362		return;
1363
1364	preempt_disable();
1365	head = this_cpu_ptr(call->perf_events);
1366	if (hlist_empty(head))
1367		goto out;
1368
1369	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1370	if (!entry)
1371		goto out;
1372
1373	if (is_ret_probe(tu)) {
1374		entry->vaddr[0] = func;
1375		entry->vaddr[1] = instruction_pointer(regs);
1376		data = DATAOF_TRACE_ENTRY(entry, true);
1377	} else {
1378		entry->vaddr[0] = instruction_pointer(regs);
1379		data = DATAOF_TRACE_ENTRY(entry, false);
1380	}
1381
1382	memcpy(data, ucb->buf, tu->tp.size + dsize);
1383
1384	if (size - esize > tu->tp.size + dsize) {
1385		int len = tu->tp.size + dsize;
1386
1387		memset(data + len, 0, size - esize - len);
1388	}
1389
1390	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1391			      head, NULL);
1392 out:
1393	preempt_enable();
1394}
1395
1396/* uprobe profile handler */
1397static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1398			    struct uprobe_cpu_buffer *ucb, int dsize)
1399{
1400	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1401		return UPROBE_HANDLER_REMOVE;
1402
1403	if (!is_ret_probe(tu))
1404		__uprobe_perf_func(tu, 0, regs, ucb, dsize);
1405	return 0;
1406}
1407
1408static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1409				struct pt_regs *regs,
1410				struct uprobe_cpu_buffer *ucb, int dsize)
1411{
1412	__uprobe_perf_func(tu, func, regs, ucb, dsize);
1413}
1414
1415int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1416			const char **filename, u64 *probe_offset,
1417			u64 *probe_addr, bool perf_type_tracepoint)
1418{
1419	const char *pevent = trace_event_name(event->tp_event);
1420	const char *group = event->tp_event->class->system;
1421	struct trace_uprobe *tu;
1422
1423	if (perf_type_tracepoint)
1424		tu = find_probe_event(pevent, group);
1425	else
1426		tu = trace_uprobe_primary_from_call(event->tp_event);
1427	if (!tu)
1428		return -EINVAL;
1429
1430	*fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1431				    : BPF_FD_TYPE_UPROBE;
1432	*filename = tu->filename;
1433	*probe_offset = tu->offset;
1434	*probe_addr = 0;
1435	return 0;
1436}
1437#endif	/* CONFIG_PERF_EVENTS */
1438
1439static int
1440trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1441		      void *data)
1442{
 
1443	struct trace_event_file *file = data;
1444
1445	switch (type) {
1446	case TRACE_REG_REGISTER:
1447		return probe_event_enable(event, file, NULL);
1448
1449	case TRACE_REG_UNREGISTER:
1450		probe_event_disable(event, file);
1451		return 0;
1452
1453#ifdef CONFIG_PERF_EVENTS
1454	case TRACE_REG_PERF_REGISTER:
1455		return probe_event_enable(event, NULL, uprobe_perf_filter);
1456
1457	case TRACE_REG_PERF_UNREGISTER:
1458		probe_event_disable(event, NULL);
1459		return 0;
1460
1461	case TRACE_REG_PERF_OPEN:
1462		return uprobe_perf_open(event, data);
1463
1464	case TRACE_REG_PERF_CLOSE:
1465		return uprobe_perf_close(event, data);
1466
1467#endif
1468	default:
1469		return 0;
1470	}
 
1471}
1472
1473static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1474{
1475	struct trace_uprobe *tu;
1476	struct uprobe_dispatch_data udd;
1477	struct uprobe_cpu_buffer *ucb;
1478	int dsize, esize;
1479	int ret = 0;
1480
1481
1482	tu = container_of(con, struct trace_uprobe, consumer);
1483	tu->nhit++;
1484
1485	udd.tu = tu;
1486	udd.bp_addr = instruction_pointer(regs);
1487
1488	current->utask->vaddr = (unsigned long) &udd;
1489
1490	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1491		return 0;
1492
1493	dsize = __get_data_size(&tu->tp, regs);
1494	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1495
1496	ucb = uprobe_buffer_get();
1497	store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1498
1499	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1500		ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1501
1502#ifdef CONFIG_PERF_EVENTS
1503	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1504		ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1505#endif
1506	uprobe_buffer_put(ucb);
1507	return ret;
1508}
1509
1510static int uretprobe_dispatcher(struct uprobe_consumer *con,
1511				unsigned long func, struct pt_regs *regs)
1512{
1513	struct trace_uprobe *tu;
1514	struct uprobe_dispatch_data udd;
1515	struct uprobe_cpu_buffer *ucb;
1516	int dsize, esize;
1517
1518	tu = container_of(con, struct trace_uprobe, consumer);
1519
1520	udd.tu = tu;
1521	udd.bp_addr = func;
1522
1523	current->utask->vaddr = (unsigned long) &udd;
1524
1525	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1526		return 0;
1527
1528	dsize = __get_data_size(&tu->tp, regs);
1529	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1530
1531	ucb = uprobe_buffer_get();
1532	store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1533
1534	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1535		uretprobe_trace_func(tu, func, regs, ucb, dsize);
1536
1537#ifdef CONFIG_PERF_EVENTS
1538	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1539		uretprobe_perf_func(tu, func, regs, ucb, dsize);
1540#endif
1541	uprobe_buffer_put(ucb);
1542	return 0;
1543}
1544
1545static struct trace_event_functions uprobe_funcs = {
1546	.trace		= print_uprobe_event
1547};
1548
1549static struct trace_event_fields uprobe_fields_array[] = {
1550	{ .type = TRACE_FUNCTION_TYPE,
1551	  .define_fields = uprobe_event_define_fields },
1552	{}
1553};
1554
1555static inline void init_trace_event_call(struct trace_uprobe *tu)
1556{
1557	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1558	call->event.funcs = &uprobe_funcs;
1559	call->class->fields_array = uprobe_fields_array;
1560
1561	call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1562	call->class->reg = trace_uprobe_register;
 
1563}
1564
1565static int register_uprobe_event(struct trace_uprobe *tu)
1566{
1567	init_trace_event_call(tu);
 
 
 
1568
1569	return trace_probe_register_event_call(&tu->tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1570}
1571
1572static int unregister_uprobe_event(struct trace_uprobe *tu)
1573{
1574	return trace_probe_unregister_event_call(&tu->tp);
 
 
 
 
 
 
 
 
1575}
1576
1577#ifdef CONFIG_PERF_EVENTS
1578struct trace_event_call *
1579create_local_trace_uprobe(char *name, unsigned long offs,
1580			  unsigned long ref_ctr_offset, bool is_return)
1581{
1582	enum probe_print_type ptype;
1583	struct trace_uprobe *tu;
1584	struct path path;
1585	int ret;
1586
1587	ret = kern_path(name, LOOKUP_FOLLOW, &path);
1588	if (ret)
1589		return ERR_PTR(ret);
1590
1591	if (!d_is_reg(path.dentry)) {
1592		path_put(&path);
1593		return ERR_PTR(-EINVAL);
1594	}
1595
1596	/*
1597	 * local trace_kprobes are not added to dyn_event, so they are never
1598	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1599	 * duplicated name "DUMMY_EVENT" here.
1600	 */
1601	tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1602				is_return);
1603
1604	if (IS_ERR(tu)) {
1605		pr_info("Failed to allocate trace_uprobe.(%d)\n",
1606			(int)PTR_ERR(tu));
1607		path_put(&path);
1608		return ERR_CAST(tu);
1609	}
1610
1611	tu->offset = offs;
1612	tu->path = path;
1613	tu->ref_ctr_offset = ref_ctr_offset;
1614	tu->filename = kstrdup(name, GFP_KERNEL);
1615	if (!tu->filename) {
1616		ret = -ENOMEM;
1617		goto error;
1618	}
1619
1620	init_trace_event_call(tu);
1621
1622	ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1623	if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1624		ret = -ENOMEM;
1625		goto error;
1626	}
1627
1628	return trace_probe_event_call(&tu->tp);
1629error:
1630	free_trace_uprobe(tu);
1631	return ERR_PTR(ret);
1632}
1633
1634void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1635{
1636	struct trace_uprobe *tu;
1637
1638	tu = trace_uprobe_primary_from_call(event_call);
 
 
 
1639
1640	free_trace_uprobe(tu);
1641}
1642#endif /* CONFIG_PERF_EVENTS */
1643
1644/* Make a trace interface for controlling probe points */
1645static __init int init_uprobe_trace(void)
1646{
1647	int ret;
1648
1649	ret = dyn_event_register(&trace_uprobe_ops);
1650	if (ret)
1651		return ret;
1652
1653	ret = tracing_init_dentry();
1654	if (ret)
1655		return 0;
1656
1657	trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1658				    NULL, &uprobe_events_ops);
1659	/* Profile interface */
1660	trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1661				    NULL, &uprobe_profile_ops);
1662	return 0;
1663}
1664
1665fs_initcall(init_uprobe_trace);
v4.17
 
   1/*
   2 * uprobes-based tracing events
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  16 *
  17 * Copyright (C) IBM Corporation, 2010-2012
  18 * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
  19 */
  20#define pr_fmt(fmt)	"trace_kprobe: " fmt
  21
 
 
 
  22#include <linux/module.h>
  23#include <linux/uaccess.h>
  24#include <linux/uprobes.h>
  25#include <linux/namei.h>
  26#include <linux/string.h>
  27#include <linux/rculist.h>
 
  28
 
  29#include "trace_probe.h"
 
  30
  31#define UPROBE_EVENT_SYSTEM	"uprobes"
  32
  33struct uprobe_trace_entry_head {
  34	struct trace_entry	ent;
  35	unsigned long		vaddr[];
  36};
  37
  38#define SIZEOF_TRACE_ENTRY(is_return)			\
  39	(sizeof(struct uprobe_trace_entry_head) +	\
  40	 sizeof(unsigned long) * (is_return ? 2 : 1))
  41
  42#define DATAOF_TRACE_ENTRY(entry, is_return)		\
  43	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  44
  45struct trace_uprobe_filter {
  46	rwlock_t		rwlock;
  47	int			nr_systemwide;
  48	struct list_head	perf_events;
 
 
 
 
 
 
 
 
 
  49};
  50
  51/*
  52 * uprobe event core functions
  53 */
  54struct trace_uprobe {
  55	struct list_head		list;
  56	struct trace_uprobe_filter	filter;
  57	struct uprobe_consumer		consumer;
  58	struct path			path;
  59	struct inode			*inode;
  60	char				*filename;
  61	unsigned long			offset;
 
  62	unsigned long			nhit;
  63	struct trace_probe		tp;
  64};
  65
  66#define SIZEOF_TRACE_UPROBE(n)				\
  67	(offsetof(struct trace_uprobe, tp.args) +	\
  68	(sizeof(struct probe_arg) * (n)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69
  70static int register_uprobe_event(struct trace_uprobe *tu);
  71static int unregister_uprobe_event(struct trace_uprobe *tu);
  72
  73static DEFINE_MUTEX(uprobe_lock);
  74static LIST_HEAD(uprobe_list);
  75
  76struct uprobe_dispatch_data {
  77	struct trace_uprobe	*tu;
  78	unsigned long		bp_addr;
  79};
  80
  81static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
  82static int uretprobe_dispatcher(struct uprobe_consumer *con,
  83				unsigned long func, struct pt_regs *regs);
  84
  85#ifdef CONFIG_STACK_GROWSUP
  86static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  87{
  88	return addr - (n * sizeof(long));
  89}
  90#else
  91static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  92{
  93	return addr + (n * sizeof(long));
  94}
  95#endif
  96
  97static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
  98{
  99	unsigned long ret;
 100	unsigned long addr = user_stack_pointer(regs);
 101
 102	addr = adjust_stack_addr(addr, n);
 103
 104	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
 105		return 0;
 106
 107	return ret;
 108}
 109
 110/*
 111 * Uprobes-specific fetch functions
 112 */
 113#define DEFINE_FETCH_stack(type)					\
 114static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,		\
 115					 void *offset, void *dest)	\
 116{									\
 117	*(type *)dest = (type)get_user_stack_nth(regs,			\
 118					      ((unsigned long)offset)); \
 119}
 120DEFINE_BASIC_FETCH_FUNCS(stack)
 121/* No string on the stack entry */
 122#define fetch_stack_string	NULL
 123#define fetch_stack_string_size	NULL
 124
 125#define DEFINE_FETCH_memory(type)					\
 126static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,		\
 127					  void *addr, void *dest)	\
 128{									\
 129	type retval;							\
 130	void __user *vaddr = (void __force __user *) addr;		\
 131									\
 132	if (copy_from_user(&retval, vaddr, sizeof(type)))		\
 133		*(type *)dest = 0;					\
 134	else								\
 135		*(type *) dest = retval;				\
 136}
 137DEFINE_BASIC_FETCH_FUNCS(memory)
 138/*
 139 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 140 * length and relative data location.
 141 */
 142static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 143					    void *addr, void *dest)
 144{
 145	long ret;
 146	u32 rloc = *(u32 *)dest;
 147	int maxlen  = get_rloc_len(rloc);
 148	u8 *dst = get_rloc_data(dest);
 149	void __user *src = (void __force __user *) addr;
 150
 151	if (!maxlen)
 152		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 153
 154	ret = strncpy_from_user(dst, src, maxlen);
 155	if (ret == maxlen)
 156		dst[--ret] = '\0';
 157
 158	if (ret < 0) {	/* Failed to fetch string */
 159		((u8 *)get_rloc_data(dest))[0] = '\0';
 160		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
 161	} else {
 162		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
 163	}
 164}
 165
 166static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
 167						 void *addr, void *dest)
 
 168{
 169	int len;
 170	void __user *vaddr = (void __force __user *) addr;
 171
 172	len = strnlen_user(vaddr, MAX_STRING_SIZE);
 
 
 
 
 
 
 173
 174	if (len == 0 || len > MAX_STRING_SIZE)  /* Failed to check length */
 175		*(u32 *)dest = 0;
 176	else
 177		*(u32 *)dest = len;
 178}
 179
 180static unsigned long translate_user_vaddr(void *file_offset)
 181{
 182	unsigned long base_addr;
 183	struct uprobe_dispatch_data *udd;
 184
 185	udd = (void *) current->utask->vaddr;
 186
 187	base_addr = udd->bp_addr - udd->tu->offset;
 188	return base_addr + (unsigned long)file_offset;
 189}
 190
 191#define DEFINE_FETCH_file_offset(type)					\
 192static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,	\
 193					       void *offset, void *dest)\
 194{									\
 195	void *vaddr = (void *)translate_user_vaddr(offset);		\
 196									\
 197	FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest);		\
 198}
 199DEFINE_BASIC_FETCH_FUNCS(file_offset)
 200DEFINE_FETCH_file_offset(string)
 201DEFINE_FETCH_file_offset(string_size)
 202
 203/* Fetch type information table */
 204static const struct fetch_type uprobes_fetch_type_table[] = {
 205	/* Special types */
 206	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
 207					sizeof(u32), 1, "__data_loc char[]"),
 208	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
 209					string_size, sizeof(u32), 0, "u32"),
 210	/* Basic types */
 211	ASSIGN_FETCH_TYPE(u8,  u8,  0),
 212	ASSIGN_FETCH_TYPE(u16, u16, 0),
 213	ASSIGN_FETCH_TYPE(u32, u32, 0),
 214	ASSIGN_FETCH_TYPE(u64, u64, 0),
 215	ASSIGN_FETCH_TYPE(s8,  u8,  1),
 216	ASSIGN_FETCH_TYPE(s16, u16, 1),
 217	ASSIGN_FETCH_TYPE(s32, u32, 1),
 218	ASSIGN_FETCH_TYPE(s64, u64, 1),
 219	ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
 220	ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
 221	ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
 222	ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
 
 
 
 223
 224	ASSIGN_FETCH_TYPE_END
 225};
 
 226
 227static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
 228{
 229	rwlock_init(&filter->rwlock);
 230	filter->nr_systemwide = 0;
 231	INIT_LIST_HEAD(&filter->perf_events);
 232}
 233
 234static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
 235{
 236	return !filter->nr_systemwide && list_empty(&filter->perf_events);
 237}
 238
 239static inline bool is_ret_probe(struct trace_uprobe *tu)
 240{
 241	return tu->consumer.ret_handler != NULL;
 242}
 243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 244/*
 245 * Allocate new trace_uprobe and initialize it (including uprobes).
 246 */
 247static struct trace_uprobe *
 248alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 249{
 250	struct trace_uprobe *tu;
 
 251
 252	if (!event || !is_good_name(event))
 253		return ERR_PTR(-EINVAL);
 254
 255	if (!group || !is_good_name(group))
 256		return ERR_PTR(-EINVAL);
 257
 258	tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
 259	if (!tu)
 260		return ERR_PTR(-ENOMEM);
 261
 262	tu->tp.call.class = &tu->tp.class;
 263	tu->tp.call.name = kstrdup(event, GFP_KERNEL);
 264	if (!tu->tp.call.name)
 265		goto error;
 266
 267	tu->tp.class.system = kstrdup(group, GFP_KERNEL);
 268	if (!tu->tp.class.system)
 269		goto error;
 270
 271	INIT_LIST_HEAD(&tu->list);
 272	INIT_LIST_HEAD(&tu->tp.files);
 273	tu->consumer.handler = uprobe_dispatcher;
 274	if (is_ret)
 275		tu->consumer.ret_handler = uretprobe_dispatcher;
 276	init_trace_uprobe_filter(&tu->filter);
 277	return tu;
 278
 279error:
 280	kfree(tu->tp.call.name);
 281	kfree(tu);
 282
 283	return ERR_PTR(-ENOMEM);
 284}
 285
 286static void free_trace_uprobe(struct trace_uprobe *tu)
 287{
 288	int i;
 289
 290	for (i = 0; i < tu->tp.nr_args; i++)
 291		traceprobe_free_probe_arg(&tu->tp.args[i]);
 292
 293	path_put(&tu->path);
 294	kfree(tu->tp.call.class->system);
 295	kfree(tu->tp.call.name);
 296	kfree(tu->filename);
 297	kfree(tu);
 298}
 299
 300static struct trace_uprobe *find_probe_event(const char *event, const char *group)
 301{
 
 302	struct trace_uprobe *tu;
 303
 304	list_for_each_entry(tu, &uprobe_list, list)
 305		if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
 306		    strcmp(tu->tp.call.class->system, group) == 0)
 307			return tu;
 308
 309	return NULL;
 310}
 311
 312/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
 313static int unregister_trace_uprobe(struct trace_uprobe *tu)
 314{
 315	int ret;
 316
 
 
 
 
 
 
 
 317	ret = unregister_uprobe_event(tu);
 318	if (ret)
 319		return ret;
 320
 321	list_del(&tu->list);
 
 
 322	free_trace_uprobe(tu);
 323	return 0;
 324}
 325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 326/* Register a trace_uprobe and probe_event */
 327static int register_trace_uprobe(struct trace_uprobe *tu)
 328{
 329	struct trace_uprobe *old_tu;
 330	int ret;
 331
 332	mutex_lock(&uprobe_lock);
 
 
 
 
 333
 334	/* register as an event */
 335	old_tu = find_probe_event(trace_event_name(&tu->tp.call),
 336			tu->tp.call.class->system);
 337	if (old_tu) {
 338		/* delete old event */
 339		ret = unregister_trace_uprobe(old_tu);
 340		if (ret)
 341			goto end;
 
 
 
 
 342	}
 343
 344	ret = register_uprobe_event(tu);
 345	if (ret) {
 346		pr_warn("Failed to register probe event(%d)\n", ret);
 
 
 
 
 347		goto end;
 348	}
 349
 350	list_add_tail(&tu->list, &uprobe_list);
 351
 352end:
 353	mutex_unlock(&uprobe_lock);
 354
 355	return ret;
 356}
 357
 358/*
 359 * Argument syntax:
 360 *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
 361 *
 362 *  - Remove uprobe: -:[GRP/]EVENT
 363 */
 364static int create_trace_uprobe(int argc, char **argv)
 365{
 366	struct trace_uprobe *tu;
 367	char *arg, *event, *group, *filename;
 
 368	char buf[MAX_EVENT_NAME_LEN];
 
 
 369	struct path path;
 370	unsigned long offset;
 371	bool is_delete, is_return;
 372	int i, ret;
 373
 374	ret = 0;
 375	is_delete = false;
 376	is_return = false;
 377	event = NULL;
 378	group = NULL;
 379
 380	/* argc must be >= 1 */
 381	if (argv[0][0] == '-')
 382		is_delete = true;
 383	else if (argv[0][0] == 'r')
 384		is_return = true;
 385	else if (argv[0][0] != 'p') {
 386		pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
 387		return -EINVAL;
 
 
 388	}
 389
 390	if (argv[0][1] == ':') {
 
 
 
 391		event = &argv[0][2];
 392		arg = strchr(event, '/');
 393
 394		if (arg) {
 395			group = event;
 396			event = arg + 1;
 397			event[-1] = '\0';
 398
 399			if (strlen(group) == 0) {
 400				pr_info("Group name is not specified\n");
 401				return -EINVAL;
 402			}
 403		}
 404		if (strlen(event) == 0) {
 405			pr_info("Event name is not specified\n");
 406			return -EINVAL;
 407		}
 408	}
 409	if (!group)
 410		group = UPROBE_EVENT_SYSTEM;
 411
 412	if (is_delete) {
 413		int ret;
 
 414
 415		if (!event) {
 416			pr_info("Delete command needs an event name.\n");
 417			return -EINVAL;
 418		}
 419		mutex_lock(&uprobe_lock);
 420		tu = find_probe_event(event, group);
 421
 422		if (!tu) {
 423			mutex_unlock(&uprobe_lock);
 424			pr_info("Event %s/%s doesn't exist.\n", group, event);
 425			return -ENOENT;
 426		}
 427		/* delete an event */
 428		ret = unregister_trace_uprobe(tu);
 429		mutex_unlock(&uprobe_lock);
 430		return ret;
 431	}
 432
 433	if (argc < 2) {
 434		pr_info("Probe point is not specified.\n");
 435		return -EINVAL;
 436	}
 437	/* Find the last occurrence, in case the path contains ':' too. */
 438	arg = strrchr(argv[1], ':');
 439	if (!arg)
 440		return -EINVAL;
 441
 442	*arg++ = '\0';
 443	filename = argv[1];
 444	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
 445	if (ret)
 
 
 
 446		return ret;
 447
 448	if (!d_is_reg(path.dentry)) {
 
 449		ret = -EINVAL;
 450		goto fail_address_parse;
 451	}
 452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453	ret = kstrtoul(arg, 0, &offset);
 454	if (ret)
 
 455		goto fail_address_parse;
 
 456
 457	argc -= 2;
 458	argv += 2;
 
 
 
 
 
 
 459
 460	/* setup a probe */
 461	if (!event) {
 462		char *tail;
 463		char *ptr;
 464
 465		tail = kstrdup(kbasename(filename), GFP_KERNEL);
 466		if (!tail) {
 467			ret = -ENOMEM;
 468			goto fail_address_parse;
 469		}
 470
 471		ptr = strpbrk(tail, ".-_");
 472		if (ptr)
 473			*ptr = '\0';
 474
 475		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
 476		event = buf;
 477		kfree(tail);
 478	}
 479
 
 
 
 480	tu = alloc_trace_uprobe(group, event, argc, is_return);
 481	if (IS_ERR(tu)) {
 482		pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
 483		ret = PTR_ERR(tu);
 
 
 484		goto fail_address_parse;
 485	}
 486	tu->offset = offset;
 
 487	tu->path = path;
 488	tu->filename = kstrdup(filename, GFP_KERNEL);
 489
 490	if (!tu->filename) {
 491		pr_info("Failed to allocate filename.\n");
 492		ret = -ENOMEM;
 493		goto error;
 494	}
 495
 496	/* parse arguments */
 497	ret = 0;
 498	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 499		struct probe_arg *parg = &tu->tp.args[i];
 500
 501		/* Increment count for freeing args in error case */
 502		tu->tp.nr_args++;
 503
 504		/* Parse argument name */
 505		arg = strchr(argv[i], '=');
 506		if (arg) {
 507			*arg++ = '\0';
 508			parg->name = kstrdup(argv[i], GFP_KERNEL);
 509		} else {
 510			arg = argv[i];
 511			/* If argument name is omitted, set "argN" */
 512			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
 513			parg->name = kstrdup(buf, GFP_KERNEL);
 514		}
 515
 516		if (!parg->name) {
 517			pr_info("Failed to allocate argument[%d] name.\n", i);
 518			ret = -ENOMEM;
 519			goto error;
 520		}
 521
 522		if (!is_good_name(parg->name)) {
 523			pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
 524			ret = -EINVAL;
 525			goto error;
 526		}
 527
 528		if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
 529			pr_info("Argument[%d] name '%s' conflicts with "
 530				"another field.\n", i, argv[i]);
 531			ret = -EINVAL;
 532			goto error;
 533		}
 534
 535		/* Parse fetch argument */
 536		ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
 537						 is_return, false,
 538						 uprobes_fetch_type_table);
 539		if (ret) {
 540			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
 541			goto error;
 542		}
 543	}
 544
 545	ret = register_trace_uprobe(tu);
 546	if (ret)
 547		goto error;
 548	return 0;
 549
 550error:
 551	free_trace_uprobe(tu);
 
 
 552	return ret;
 553
 554fail_address_parse:
 
 555	path_put(&path);
 556
 557	pr_info("Failed to parse address or file.\n");
 558
 559	return ret;
 560}
 561
 562static int cleanup_all_probes(void)
 563{
 564	struct trace_uprobe *tu;
 565	int ret = 0;
 566
 567	mutex_lock(&uprobe_lock);
 568	while (!list_empty(&uprobe_list)) {
 569		tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
 570		ret = unregister_trace_uprobe(tu);
 571		if (ret)
 572			break;
 573	}
 574	mutex_unlock(&uprobe_lock);
 575	return ret;
 576}
 577
 578/* Probes listing interfaces */
 579static void *probes_seq_start(struct seq_file *m, loff_t *pos)
 580{
 581	mutex_lock(&uprobe_lock);
 582	return seq_list_start(&uprobe_list, *pos);
 
 
 
 
 
 583}
 584
 585static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
 586{
 587	return seq_list_next(v, &uprobe_list, pos);
 588}
 589
 590static void probes_seq_stop(struct seq_file *m, void *v)
 591{
 592	mutex_unlock(&uprobe_lock);
 593}
 594
 595static int probes_seq_show(struct seq_file *m, void *v)
 
 596{
 597	struct trace_uprobe *tu = v;
 598	char c = is_ret_probe(tu) ? 'r' : 'p';
 599	int i;
 600
 601	seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system,
 602			trace_event_name(&tu->tp.call), tu->filename,
 603			(int)(sizeof(void *) * 2), tu->offset);
 604
 
 
 
 605	for (i = 0; i < tu->tp.nr_args; i++)
 606		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
 607
 608	seq_putc(m, '\n');
 609	return 0;
 610}
 611
 
 
 
 
 
 
 
 
 
 
 612static const struct seq_operations probes_seq_op = {
 613	.start	= probes_seq_start,
 614	.next	= probes_seq_next,
 615	.stop	= probes_seq_stop,
 616	.show	= probes_seq_show
 617};
 618
 619static int probes_open(struct inode *inode, struct file *file)
 620{
 621	int ret;
 622
 
 
 
 
 623	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 624		ret = cleanup_all_probes();
 625		if (ret)
 626			return ret;
 627	}
 628
 629	return seq_open(file, &probes_seq_op);
 630}
 631
 632static ssize_t probes_write(struct file *file, const char __user *buffer,
 633			    size_t count, loff_t *ppos)
 634{
 635	return trace_parse_run_command(file, buffer, count, ppos, create_trace_uprobe);
 
 636}
 637
 638static const struct file_operations uprobe_events_ops = {
 639	.owner		= THIS_MODULE,
 640	.open		= probes_open,
 641	.read		= seq_read,
 642	.llseek		= seq_lseek,
 643	.release	= seq_release,
 644	.write		= probes_write,
 645};
 646
 647/* Probes profiling interfaces */
 648static int probes_profile_seq_show(struct seq_file *m, void *v)
 649{
 650	struct trace_uprobe *tu = v;
 
 
 
 
 651
 
 652	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
 653			trace_event_name(&tu->tp.call), tu->nhit);
 654	return 0;
 655}
 656
 657static const struct seq_operations profile_seq_op = {
 658	.start	= probes_seq_start,
 659	.next	= probes_seq_next,
 660	.stop	= probes_seq_stop,
 661	.show	= probes_profile_seq_show
 662};
 663
 664static int profile_open(struct inode *inode, struct file *file)
 665{
 
 
 
 
 
 
 666	return seq_open(file, &profile_seq_op);
 667}
 668
 669static const struct file_operations uprobe_profile_ops = {
 670	.owner		= THIS_MODULE,
 671	.open		= profile_open,
 672	.read		= seq_read,
 673	.llseek		= seq_lseek,
 674	.release	= seq_release,
 675};
 676
 677struct uprobe_cpu_buffer {
 678	struct mutex mutex;
 679	void *buf;
 680};
 681static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
 682static int uprobe_buffer_refcnt;
 683
 684static int uprobe_buffer_init(void)
 685{
 686	int cpu, err_cpu;
 687
 688	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
 689	if (uprobe_cpu_buffer == NULL)
 690		return -ENOMEM;
 691
 692	for_each_possible_cpu(cpu) {
 693		struct page *p = alloc_pages_node(cpu_to_node(cpu),
 694						  GFP_KERNEL, 0);
 695		if (p == NULL) {
 696			err_cpu = cpu;
 697			goto err;
 698		}
 699		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
 700		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
 701	}
 702
 703	return 0;
 704
 705err:
 706	for_each_possible_cpu(cpu) {
 707		if (cpu == err_cpu)
 708			break;
 709		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
 710	}
 711
 712	free_percpu(uprobe_cpu_buffer);
 713	return -ENOMEM;
 714}
 715
 716static int uprobe_buffer_enable(void)
 717{
 718	int ret = 0;
 719
 720	BUG_ON(!mutex_is_locked(&event_mutex));
 721
 722	if (uprobe_buffer_refcnt++ == 0) {
 723		ret = uprobe_buffer_init();
 724		if (ret < 0)
 725			uprobe_buffer_refcnt--;
 726	}
 727
 728	return ret;
 729}
 730
 731static void uprobe_buffer_disable(void)
 732{
 733	int cpu;
 734
 735	BUG_ON(!mutex_is_locked(&event_mutex));
 736
 737	if (--uprobe_buffer_refcnt == 0) {
 738		for_each_possible_cpu(cpu)
 739			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
 740							     cpu)->buf);
 741
 742		free_percpu(uprobe_cpu_buffer);
 743		uprobe_cpu_buffer = NULL;
 744	}
 745}
 746
 747static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
 748{
 749	struct uprobe_cpu_buffer *ucb;
 750	int cpu;
 751
 752	cpu = raw_smp_processor_id();
 753	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
 754
 755	/*
 756	 * Use per-cpu buffers for fastest access, but we might migrate
 757	 * so the mutex makes sure we have sole access to it.
 758	 */
 759	mutex_lock(&ucb->mutex);
 760
 761	return ucb;
 762}
 763
 764static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
 765{
 766	mutex_unlock(&ucb->mutex);
 767}
 768
 769static void __uprobe_trace_func(struct trace_uprobe *tu,
 770				unsigned long func, struct pt_regs *regs,
 771				struct uprobe_cpu_buffer *ucb, int dsize,
 772				struct trace_event_file *trace_file)
 773{
 774	struct uprobe_trace_entry_head *entry;
 775	struct ring_buffer_event *event;
 776	struct ring_buffer *buffer;
 777	void *data;
 778	int size, esize;
 779	struct trace_event_call *call = &tu->tp.call;
 780
 781	WARN_ON(call != trace_file->event_call);
 782
 783	if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
 784		return;
 785
 786	if (trace_trigger_soft_disabled(trace_file))
 787		return;
 788
 789	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
 790	size = esize + tu->tp.size + dsize;
 791	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 792						call->event.type, size, 0, 0);
 793	if (!event)
 794		return;
 795
 796	entry = ring_buffer_event_data(event);
 797	if (is_ret_probe(tu)) {
 798		entry->vaddr[0] = func;
 799		entry->vaddr[1] = instruction_pointer(regs);
 800		data = DATAOF_TRACE_ENTRY(entry, true);
 801	} else {
 802		entry->vaddr[0] = instruction_pointer(regs);
 803		data = DATAOF_TRACE_ENTRY(entry, false);
 804	}
 805
 806	memcpy(data, ucb->buf, tu->tp.size + dsize);
 807
 808	event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
 809}
 810
 811/* uprobe handler */
 812static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
 813			     struct uprobe_cpu_buffer *ucb, int dsize)
 814{
 815	struct event_file_link *link;
 816
 817	if (is_ret_probe(tu))
 818		return 0;
 819
 820	rcu_read_lock();
 821	list_for_each_entry_rcu(link, &tu->tp.files, list)
 822		__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
 823	rcu_read_unlock();
 824
 825	return 0;
 826}
 827
 828static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
 829				 struct pt_regs *regs,
 830				 struct uprobe_cpu_buffer *ucb, int dsize)
 831{
 832	struct event_file_link *link;
 833
 834	rcu_read_lock();
 835	list_for_each_entry_rcu(link, &tu->tp.files, list)
 836		__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
 837	rcu_read_unlock();
 838}
 839
 840/* Event entry printers */
 841static enum print_line_t
 842print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
 843{
 844	struct uprobe_trace_entry_head *entry;
 845	struct trace_seq *s = &iter->seq;
 846	struct trace_uprobe *tu;
 847	u8 *data;
 848	int i;
 849
 850	entry = (struct uprobe_trace_entry_head *)iter->ent;
 851	tu = container_of(event, struct trace_uprobe, tp.call.event);
 
 
 
 852
 853	if (is_ret_probe(tu)) {
 854		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
 855				 trace_event_name(&tu->tp.call),
 856				 entry->vaddr[1], entry->vaddr[0]);
 857		data = DATAOF_TRACE_ENTRY(entry, true);
 858	} else {
 859		trace_seq_printf(s, "%s: (0x%lx)",
 860				 trace_event_name(&tu->tp.call),
 861				 entry->vaddr[0]);
 862		data = DATAOF_TRACE_ENTRY(entry, false);
 863	}
 864
 865	for (i = 0; i < tu->tp.nr_args; i++) {
 866		struct probe_arg *parg = &tu->tp.args[i];
 867
 868		if (!parg->type->print(s, parg->name, data + parg->offset, entry))
 869			goto out;
 870	}
 871
 872	trace_seq_putc(s, '\n');
 873
 874 out:
 875	return trace_handle_return(s);
 876}
 877
 878typedef bool (*filter_func_t)(struct uprobe_consumer *self,
 879				enum uprobe_filter_ctx ctx,
 880				struct mm_struct *mm);
 881
 882static int
 883probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
 884		   filter_func_t filter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 885{
 886	bool enabled = trace_probe_is_enabled(&tu->tp);
 887	struct event_file_link *link = NULL;
 
 888	int ret;
 889
 
 
 
 
 
 
 890	if (file) {
 891		if (tu->tp.flags & TP_FLAG_PROFILE)
 892			return -EINTR;
 893
 894		link = kmalloc(sizeof(*link), GFP_KERNEL);
 895		if (!link)
 896			return -ENOMEM;
 897
 898		link->file = file;
 899		list_add_tail_rcu(&link->list, &tu->tp.files);
 900
 901		tu->tp.flags |= TP_FLAG_TRACE;
 902	} else {
 903		if (tu->tp.flags & TP_FLAG_TRACE)
 904			return -EINTR;
 905
 906		tu->tp.flags |= TP_FLAG_PROFILE;
 907	}
 908
 909	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 
 910
 911	if (enabled)
 912		return 0;
 913
 914	ret = uprobe_buffer_enable();
 915	if (ret)
 916		goto err_flags;
 917
 918	tu->consumer.filter = filter;
 919	tu->inode = d_real_inode(tu->path.dentry);
 920	ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
 921	if (ret)
 922		goto err_buffer;
 
 
 923
 924	return 0;
 925
 926 err_buffer:
 927	uprobe_buffer_disable();
 928
 929 err_flags:
 930	if (file) {
 931		list_del(&link->list);
 932		kfree(link);
 933		tu->tp.flags &= ~TP_FLAG_TRACE;
 934	} else {
 935		tu->tp.flags &= ~TP_FLAG_PROFILE;
 936	}
 937	return ret;
 938}
 939
 940static void
 941probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
 942{
 943	if (!trace_probe_is_enabled(&tu->tp))
 
 
 
 
 
 
 944		return;
 945
 946	if (file) {
 947		struct event_file_link *link;
 948
 949		link = find_event_file_link(&tu->tp, file);
 950		if (!link)
 951			return;
 952
 953		list_del_rcu(&link->list);
 954		/* synchronize with u{,ret}probe_trace_func */
 955		synchronize_sched();
 956		kfree(link);
 957
 958		if (!list_empty(&tu->tp.files))
 959			return;
 960	}
 961
 962	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 963
 964	uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
 965	tu->inode = NULL;
 966	tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
 967
 
 968	uprobe_buffer_disable();
 969}
 970
 971static int uprobe_event_define_fields(struct trace_event_call *event_call)
 972{
 973	int ret, i, size;
 974	struct uprobe_trace_entry_head field;
 975	struct trace_uprobe *tu = event_call->data;
 
 
 
 
 976
 977	if (is_ret_probe(tu)) {
 978		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
 979		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
 980		size = SIZEOF_TRACE_ENTRY(true);
 981	} else {
 982		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
 983		size = SIZEOF_TRACE_ENTRY(false);
 984	}
 985	/* Set argument names as fields */
 986	for (i = 0; i < tu->tp.nr_args; i++) {
 987		struct probe_arg *parg = &tu->tp.args[i];
 988
 989		ret = trace_define_field(event_call, parg->type->fmttype,
 990					 parg->name, size + parg->offset,
 991					 parg->type->size, parg->type->is_signed,
 992					 FILTER_OTHER);
 993
 994		if (ret)
 995			return ret;
 996	}
 997	return 0;
 998}
 999
1000#ifdef CONFIG_PERF_EVENTS
1001static bool
1002__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1003{
1004	struct perf_event *event;
1005
1006	if (filter->nr_systemwide)
1007		return true;
1008
1009	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1010		if (event->hw.target->mm == mm)
1011			return true;
1012	}
1013
1014	return false;
1015}
1016
1017static inline bool
1018uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
 
1019{
1020	return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1021}
1022
1023static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
 
1024{
1025	bool done;
1026
1027	write_lock(&tu->filter.rwlock);
1028	if (event->hw.target) {
1029		list_del(&event->hw.tp_list);
1030		done = tu->filter.nr_systemwide ||
1031			(event->hw.target->flags & PF_EXITING) ||
1032			uprobe_filter_event(tu, event);
1033	} else {
1034		tu->filter.nr_systemwide--;
1035		done = tu->filter.nr_systemwide;
1036	}
1037	write_unlock(&tu->filter.rwlock);
1038
1039	if (!done)
1040		return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1041
1042	return 0;
1043}
1044
1045static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
 
 
1046{
1047	bool done;
1048	int err;
1049
1050	write_lock(&tu->filter.rwlock);
1051	if (event->hw.target) {
1052		/*
1053		 * event->parent != NULL means copy_process(), we can avoid
1054		 * uprobe_apply(). current->mm must be probed and we can rely
1055		 * on dup_mmap() which preserves the already installed bp's.
1056		 *
1057		 * attr.enable_on_exec means that exec/mmap will install the
1058		 * breakpoints we need.
1059		 */
1060		done = tu->filter.nr_systemwide ||
1061			event->parent || event->attr.enable_on_exec ||
1062			uprobe_filter_event(tu, event);
1063		list_add(&event->hw.tp_list, &tu->filter.perf_events);
1064	} else {
1065		done = tu->filter.nr_systemwide;
1066		tu->filter.nr_systemwide++;
1067	}
1068	write_unlock(&tu->filter.rwlock);
1069
1070	err = 0;
1071	if (!done) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072		err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1073		if (err)
1074			uprobe_perf_close(tu, event);
 
 
1075	}
 
1076	return err;
1077}
1078
1079static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1080				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1081{
 
1082	struct trace_uprobe *tu;
1083	int ret;
1084
1085	tu = container_of(uc, struct trace_uprobe, consumer);
1086	read_lock(&tu->filter.rwlock);
1087	ret = __uprobe_perf_filter(&tu->filter, mm);
1088	read_unlock(&tu->filter.rwlock);
 
 
1089
1090	return ret;
1091}
1092
1093static void __uprobe_perf_func(struct trace_uprobe *tu,
1094			       unsigned long func, struct pt_regs *regs,
1095			       struct uprobe_cpu_buffer *ucb, int dsize)
1096{
1097	struct trace_event_call *call = &tu->tp.call;
1098	struct uprobe_trace_entry_head *entry;
1099	struct hlist_head *head;
1100	void *data;
1101	int size, esize;
1102	int rctx;
1103
1104	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1105		return;
 
 
 
 
 
 
 
1106
1107	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1108
1109	size = esize + tu->tp.size + dsize;
1110	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1111	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1112		return;
1113
1114	preempt_disable();
1115	head = this_cpu_ptr(call->perf_events);
1116	if (hlist_empty(head))
1117		goto out;
1118
1119	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1120	if (!entry)
1121		goto out;
1122
1123	if (is_ret_probe(tu)) {
1124		entry->vaddr[0] = func;
1125		entry->vaddr[1] = instruction_pointer(regs);
1126		data = DATAOF_TRACE_ENTRY(entry, true);
1127	} else {
1128		entry->vaddr[0] = instruction_pointer(regs);
1129		data = DATAOF_TRACE_ENTRY(entry, false);
1130	}
1131
1132	memcpy(data, ucb->buf, tu->tp.size + dsize);
1133
1134	if (size - esize > tu->tp.size + dsize) {
1135		int len = tu->tp.size + dsize;
1136
1137		memset(data + len, 0, size - esize - len);
1138	}
1139
1140	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1141			      head, NULL);
1142 out:
1143	preempt_enable();
1144}
1145
1146/* uprobe profile handler */
1147static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1148			    struct uprobe_cpu_buffer *ucb, int dsize)
1149{
1150	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1151		return UPROBE_HANDLER_REMOVE;
1152
1153	if (!is_ret_probe(tu))
1154		__uprobe_perf_func(tu, 0, regs, ucb, dsize);
1155	return 0;
1156}
1157
1158static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1159				struct pt_regs *regs,
1160				struct uprobe_cpu_buffer *ucb, int dsize)
1161{
1162	__uprobe_perf_func(tu, func, regs, ucb, dsize);
1163}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1164#endif	/* CONFIG_PERF_EVENTS */
1165
1166static int
1167trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1168		      void *data)
1169{
1170	struct trace_uprobe *tu = event->data;
1171	struct trace_event_file *file = data;
1172
1173	switch (type) {
1174	case TRACE_REG_REGISTER:
1175		return probe_event_enable(tu, file, NULL);
1176
1177	case TRACE_REG_UNREGISTER:
1178		probe_event_disable(tu, file);
1179		return 0;
1180
1181#ifdef CONFIG_PERF_EVENTS
1182	case TRACE_REG_PERF_REGISTER:
1183		return probe_event_enable(tu, NULL, uprobe_perf_filter);
1184
1185	case TRACE_REG_PERF_UNREGISTER:
1186		probe_event_disable(tu, NULL);
1187		return 0;
1188
1189	case TRACE_REG_PERF_OPEN:
1190		return uprobe_perf_open(tu, data);
1191
1192	case TRACE_REG_PERF_CLOSE:
1193		return uprobe_perf_close(tu, data);
1194
1195#endif
1196	default:
1197		return 0;
1198	}
1199	return 0;
1200}
1201
1202static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1203{
1204	struct trace_uprobe *tu;
1205	struct uprobe_dispatch_data udd;
1206	struct uprobe_cpu_buffer *ucb;
1207	int dsize, esize;
1208	int ret = 0;
1209
1210
1211	tu = container_of(con, struct trace_uprobe, consumer);
1212	tu->nhit++;
1213
1214	udd.tu = tu;
1215	udd.bp_addr = instruction_pointer(regs);
1216
1217	current->utask->vaddr = (unsigned long) &udd;
1218
1219	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1220		return 0;
1221
1222	dsize = __get_data_size(&tu->tp, regs);
1223	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1224
1225	ucb = uprobe_buffer_get();
1226	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1227
1228	if (tu->tp.flags & TP_FLAG_TRACE)
1229		ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1230
1231#ifdef CONFIG_PERF_EVENTS
1232	if (tu->tp.flags & TP_FLAG_PROFILE)
1233		ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1234#endif
1235	uprobe_buffer_put(ucb);
1236	return ret;
1237}
1238
1239static int uretprobe_dispatcher(struct uprobe_consumer *con,
1240				unsigned long func, struct pt_regs *regs)
1241{
1242	struct trace_uprobe *tu;
1243	struct uprobe_dispatch_data udd;
1244	struct uprobe_cpu_buffer *ucb;
1245	int dsize, esize;
1246
1247	tu = container_of(con, struct trace_uprobe, consumer);
1248
1249	udd.tu = tu;
1250	udd.bp_addr = func;
1251
1252	current->utask->vaddr = (unsigned long) &udd;
1253
1254	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1255		return 0;
1256
1257	dsize = __get_data_size(&tu->tp, regs);
1258	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1259
1260	ucb = uprobe_buffer_get();
1261	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1262
1263	if (tu->tp.flags & TP_FLAG_TRACE)
1264		uretprobe_trace_func(tu, func, regs, ucb, dsize);
1265
1266#ifdef CONFIG_PERF_EVENTS
1267	if (tu->tp.flags & TP_FLAG_PROFILE)
1268		uretprobe_perf_func(tu, func, regs, ucb, dsize);
1269#endif
1270	uprobe_buffer_put(ucb);
1271	return 0;
1272}
1273
1274static struct trace_event_functions uprobe_funcs = {
1275	.trace		= print_uprobe_event
1276};
1277
1278static inline void init_trace_event_call(struct trace_uprobe *tu,
1279					 struct trace_event_call *call)
 
 
 
 
 
1280{
1281	INIT_LIST_HEAD(&call->class->fields);
1282	call->event.funcs = &uprobe_funcs;
1283	call->class->define_fields = uprobe_event_define_fields;
1284
1285	call->flags = TRACE_EVENT_FL_UPROBE;
1286	call->class->reg = trace_uprobe_register;
1287	call->data = tu;
1288}
1289
1290static int register_uprobe_event(struct trace_uprobe *tu)
1291{
1292	struct trace_event_call *call = &tu->tp.call;
1293	int ret = 0;
1294
1295	init_trace_event_call(tu, call);
1296
1297	if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1298		return -ENOMEM;
1299
1300	ret = register_trace_event(&call->event);
1301	if (!ret) {
1302		kfree(call->print_fmt);
1303		return -ENODEV;
1304	}
1305
1306	ret = trace_add_event_call(call);
1307
1308	if (ret) {
1309		pr_info("Failed to register uprobe event: %s\n",
1310			trace_event_name(call));
1311		kfree(call->print_fmt);
1312		unregister_trace_event(&call->event);
1313	}
1314
1315	return ret;
1316}
1317
1318static int unregister_uprobe_event(struct trace_uprobe *tu)
1319{
1320	int ret;
1321
1322	/* tu->event is unregistered in trace_remove_event_call() */
1323	ret = trace_remove_event_call(&tu->tp.call);
1324	if (ret)
1325		return ret;
1326	kfree(tu->tp.call.print_fmt);
1327	tu->tp.call.print_fmt = NULL;
1328	return 0;
1329}
1330
1331#ifdef CONFIG_PERF_EVENTS
1332struct trace_event_call *
1333create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
 
1334{
 
1335	struct trace_uprobe *tu;
1336	struct path path;
1337	int ret;
1338
1339	ret = kern_path(name, LOOKUP_FOLLOW, &path);
1340	if (ret)
1341		return ERR_PTR(ret);
1342
1343	if (!d_is_reg(path.dentry)) {
1344		path_put(&path);
1345		return ERR_PTR(-EINVAL);
1346	}
1347
1348	/*
1349	 * local trace_kprobes are not added to probe_list, so they are never
1350	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1351	 * duplicated name "DUMMY_EVENT" here.
1352	 */
1353	tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1354				is_return);
1355
1356	if (IS_ERR(tu)) {
1357		pr_info("Failed to allocate trace_uprobe.(%d)\n",
1358			(int)PTR_ERR(tu));
1359		path_put(&path);
1360		return ERR_CAST(tu);
1361	}
1362
1363	tu->offset = offs;
1364	tu->path = path;
 
1365	tu->filename = kstrdup(name, GFP_KERNEL);
1366	init_trace_event_call(tu, &tu->tp.call);
 
 
 
 
 
1367
1368	if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
 
1369		ret = -ENOMEM;
1370		goto error;
1371	}
1372
1373	return &tu->tp.call;
1374error:
1375	free_trace_uprobe(tu);
1376	return ERR_PTR(ret);
1377}
1378
1379void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1380{
1381	struct trace_uprobe *tu;
1382
1383	tu = container_of(event_call, struct trace_uprobe, tp.call);
1384
1385	kfree(tu->tp.call.print_fmt);
1386	tu->tp.call.print_fmt = NULL;
1387
1388	free_trace_uprobe(tu);
1389}
1390#endif /* CONFIG_PERF_EVENTS */
1391
1392/* Make a trace interface for controling probe points */
1393static __init int init_uprobe_trace(void)
1394{
1395	struct dentry *d_tracer;
1396
1397	d_tracer = tracing_init_dentry();
1398	if (IS_ERR(d_tracer))
 
 
 
 
1399		return 0;
1400
1401	trace_create_file("uprobe_events", 0644, d_tracer,
1402				    NULL, &uprobe_events_ops);
1403	/* Profile interface */
1404	trace_create_file("uprobe_profile", 0444, d_tracer,
1405				    NULL, &uprobe_profile_ops);
1406	return 0;
1407}
1408
1409fs_initcall(init_uprobe_trace);