Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * uprobes-based tracing events
   4 *
   5 * Copyright (C) IBM Corporation, 2010-2012
   6 * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
   7 */
   8#define pr_fmt(fmt)	"trace_uprobe: " fmt
   9
  10#include <linux/security.h>
  11#include <linux/ctype.h>
  12#include <linux/module.h>
  13#include <linux/uaccess.h>
  14#include <linux/uprobes.h>
  15#include <linux/namei.h>
  16#include <linux/string.h>
  17#include <linux/rculist.h>
  18
  19#include "trace_dynevent.h"
  20#include "trace_probe.h"
  21#include "trace_probe_tmpl.h"
  22
  23#define UPROBE_EVENT_SYSTEM	"uprobes"
  24
  25struct uprobe_trace_entry_head {
  26	struct trace_entry	ent;
  27	unsigned long		vaddr[];
  28};
  29
  30#define SIZEOF_TRACE_ENTRY(is_return)			\
  31	(sizeof(struct uprobe_trace_entry_head) +	\
  32	 sizeof(unsigned long) * (is_return ? 2 : 1))
  33
  34#define DATAOF_TRACE_ENTRY(entry, is_return)		\
  35	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  36
  37struct trace_uprobe_filter {
  38	rwlock_t		rwlock;
  39	int			nr_systemwide;
  40	struct list_head	perf_events;
  41};
  42
  43static int trace_uprobe_create(int argc, const char **argv);
  44static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
  45static int trace_uprobe_release(struct dyn_event *ev);
  46static bool trace_uprobe_is_busy(struct dyn_event *ev);
  47static bool trace_uprobe_match(const char *system, const char *event,
  48			int argc, const char **argv, struct dyn_event *ev);
  49
  50static struct dyn_event_operations trace_uprobe_ops = {
  51	.create = trace_uprobe_create,
  52	.show = trace_uprobe_show,
  53	.is_busy = trace_uprobe_is_busy,
  54	.free = trace_uprobe_release,
  55	.match = trace_uprobe_match,
  56};
  57
  58/*
  59 * uprobe event core functions
  60 */
  61struct trace_uprobe {
  62	struct dyn_event		devent;
  63	struct trace_uprobe_filter	filter;
  64	struct uprobe_consumer		consumer;
  65	struct path			path;
  66	struct inode			*inode;
  67	char				*filename;
  68	unsigned long			offset;
  69	unsigned long			ref_ctr_offset;
  70	unsigned long			nhit;
  71	struct trace_probe		tp;
  72};
  73
  74static bool is_trace_uprobe(struct dyn_event *ev)
  75{
  76	return ev->ops == &trace_uprobe_ops;
  77}
  78
  79static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
  80{
  81	return container_of(ev, struct trace_uprobe, devent);
  82}
  83
  84/**
  85 * for_each_trace_uprobe - iterate over the trace_uprobe list
  86 * @pos:	the struct trace_uprobe * for each entry
  87 * @dpos:	the struct dyn_event * to use as a loop cursor
  88 */
  89#define for_each_trace_uprobe(pos, dpos)	\
  90	for_each_dyn_event(dpos)		\
  91		if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
  92
  93#define SIZEOF_TRACE_UPROBE(n)				\
  94	(offsetof(struct trace_uprobe, tp.args) +	\
  95	(sizeof(struct probe_arg) * (n)))
  96
  97static int register_uprobe_event(struct trace_uprobe *tu);
  98static int unregister_uprobe_event(struct trace_uprobe *tu);
  99
 100struct uprobe_dispatch_data {
 101	struct trace_uprobe	*tu;
 102	unsigned long		bp_addr;
 103};
 104
 105static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
 106static int uretprobe_dispatcher(struct uprobe_consumer *con,
 107				unsigned long func, struct pt_regs *regs);
 108
 109#ifdef CONFIG_STACK_GROWSUP
 110static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
 111{
 112	return addr - (n * sizeof(long));
 113}
 114#else
 115static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
 116{
 117	return addr + (n * sizeof(long));
 118}
 119#endif
 120
 121static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
 122{
 123	unsigned long ret;
 124	unsigned long addr = user_stack_pointer(regs);
 125
 126	addr = adjust_stack_addr(addr, n);
 127
 128	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
 129		return 0;
 130
 131	return ret;
 132}
 133
 134/*
 135 * Uprobes-specific fetch functions
 136 */
 137static nokprobe_inline int
 138probe_mem_read(void *dest, void *src, size_t size)
 139{
 140	void __user *vaddr = (void __force __user *)src;
 141
 142	return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
 143}
 144
 145static nokprobe_inline int
 146probe_mem_read_user(void *dest, void *src, size_t size)
 147{
 148	return probe_mem_read(dest, src, size);
 149}
 150
 151/*
 152 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 153 * length and relative data location.
 154 */
 155static nokprobe_inline int
 156fetch_store_string(unsigned long addr, void *dest, void *base)
 157{
 158	long ret;
 159	u32 loc = *(u32 *)dest;
 160	int maxlen  = get_loc_len(loc);
 161	u8 *dst = get_loc_data(dest, base);
 162	void __user *src = (void __force __user *) addr;
 163
 164	if (unlikely(!maxlen))
 165		return -ENOMEM;
 166
 167	if (addr == FETCH_TOKEN_COMM)
 168		ret = strlcpy(dst, current->comm, maxlen);
 169	else
 170		ret = strncpy_from_user(dst, src, maxlen);
 171	if (ret >= 0) {
 172		if (ret == maxlen)
 173			dst[ret - 1] = '\0';
 174		else
 175			/*
 176			 * Include the terminating null byte. In this case it
 177			 * was copied by strncpy_from_user but not accounted
 178			 * for in ret.
 179			 */
 180			ret++;
 181		*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
 182	}
 183
 184	return ret;
 185}
 186
 187static nokprobe_inline int
 188fetch_store_string_user(unsigned long addr, void *dest, void *base)
 189{
 190	return fetch_store_string(addr, dest, base);
 191}
 192
 193/* Return the length of string -- including null terminal byte */
 194static nokprobe_inline int
 195fetch_store_strlen(unsigned long addr)
 196{
 197	int len;
 198	void __user *vaddr = (void __force __user *) addr;
 199
 200	if (addr == FETCH_TOKEN_COMM)
 201		len = strlen(current->comm) + 1;
 202	else
 203		len = strnlen_user(vaddr, MAX_STRING_SIZE);
 204
 205	return (len > MAX_STRING_SIZE) ? 0 : len;
 206}
 207
 208static nokprobe_inline int
 209fetch_store_strlen_user(unsigned long addr)
 210{
 211	return fetch_store_strlen(addr);
 212}
 213
 214static unsigned long translate_user_vaddr(unsigned long file_offset)
 215{
 216	unsigned long base_addr;
 217	struct uprobe_dispatch_data *udd;
 218
 219	udd = (void *) current->utask->vaddr;
 220
 221	base_addr = udd->bp_addr - udd->tu->offset;
 222	return base_addr + file_offset;
 223}
 224
 225/* Note that we don't verify it, since the code does not come from user space */
 226static int
 227process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
 228		   void *base)
 229{
 230	unsigned long val;
 231
 232	/* 1st stage: get value from context */
 233	switch (code->op) {
 234	case FETCH_OP_REG:
 235		val = regs_get_register(regs, code->param);
 236		break;
 237	case FETCH_OP_STACK:
 238		val = get_user_stack_nth(regs, code->param);
 239		break;
 240	case FETCH_OP_STACKP:
 241		val = user_stack_pointer(regs);
 242		break;
 243	case FETCH_OP_RETVAL:
 244		val = regs_return_value(regs);
 245		break;
 246	case FETCH_OP_IMM:
 247		val = code->immediate;
 248		break;
 249	case FETCH_OP_COMM:
 250		val = FETCH_TOKEN_COMM;
 251		break;
 252	case FETCH_OP_DATA:
 253		val = (unsigned long)code->data;
 254		break;
 255	case FETCH_OP_FOFFS:
 256		val = translate_user_vaddr(code->immediate);
 257		break;
 258	default:
 259		return -EILSEQ;
 260	}
 261	code++;
 262
 263	return process_fetch_insn_bottom(code, val, dest, base);
 264}
 265NOKPROBE_SYMBOL(process_fetch_insn)
 266
 267static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
 268{
 269	rwlock_init(&filter->rwlock);
 270	filter->nr_systemwide = 0;
 271	INIT_LIST_HEAD(&filter->perf_events);
 272}
 273
 274static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
 275{
 276	return !filter->nr_systemwide && list_empty(&filter->perf_events);
 277}
 278
 279static inline bool is_ret_probe(struct trace_uprobe *tu)
 280{
 281	return tu->consumer.ret_handler != NULL;
 282}
 283
 284static bool trace_uprobe_is_busy(struct dyn_event *ev)
 285{
 286	struct trace_uprobe *tu = to_trace_uprobe(ev);
 287
 288	return trace_probe_is_enabled(&tu->tp);
 289}
 290
 291static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
 292					    int argc, const char **argv)
 293{
 294	char buf[MAX_ARGSTR_LEN + 1];
 295	int len;
 296
 297	if (!argc)
 298		return true;
 299
 300	len = strlen(tu->filename);
 301	if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
 302		return false;
 303
 304	if (tu->ref_ctr_offset == 0)
 305		snprintf(buf, sizeof(buf), "0x%0*lx",
 306				(int)(sizeof(void *) * 2), tu->offset);
 307	else
 308		snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
 309				(int)(sizeof(void *) * 2), tu->offset,
 310				tu->ref_ctr_offset);
 311	if (strcmp(buf, &argv[0][len + 1]))
 312		return false;
 313
 314	argc--; argv++;
 315
 316	return trace_probe_match_command_args(&tu->tp, argc, argv);
 317}
 318
 319static bool trace_uprobe_match(const char *system, const char *event,
 320			int argc, const char **argv, struct dyn_event *ev)
 321{
 322	struct trace_uprobe *tu = to_trace_uprobe(ev);
 323
 324	return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
 325	   (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
 326	   trace_uprobe_match_command_head(tu, argc, argv);
 327}
 328
 329static nokprobe_inline struct trace_uprobe *
 330trace_uprobe_primary_from_call(struct trace_event_call *call)
 331{
 332	struct trace_probe *tp;
 333
 334	tp = trace_probe_primary_from_call(call);
 335	if (WARN_ON_ONCE(!tp))
 336		return NULL;
 337
 338	return container_of(tp, struct trace_uprobe, tp);
 339}
 340
 341/*
 342 * Allocate new trace_uprobe and initialize it (including uprobes).
 343 */
 344static struct trace_uprobe *
 345alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 346{
 347	struct trace_uprobe *tu;
 348	int ret;
 349
 350	tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
 351	if (!tu)
 352		return ERR_PTR(-ENOMEM);
 353
 354	ret = trace_probe_init(&tu->tp, event, group);
 355	if (ret < 0)
 356		goto error;
 357
 358	dyn_event_init(&tu->devent, &trace_uprobe_ops);
 359	tu->consumer.handler = uprobe_dispatcher;
 360	if (is_ret)
 361		tu->consumer.ret_handler = uretprobe_dispatcher;
 362	init_trace_uprobe_filter(&tu->filter);
 363	return tu;
 364
 365error:
 366	kfree(tu);
 367
 368	return ERR_PTR(ret);
 369}
 370
 371static void free_trace_uprobe(struct trace_uprobe *tu)
 372{
 373	if (!tu)
 374		return;
 375
 376	path_put(&tu->path);
 377	trace_probe_cleanup(&tu->tp);
 378	kfree(tu->filename);
 379	kfree(tu);
 380}
 381
 382static struct trace_uprobe *find_probe_event(const char *event, const char *group)
 383{
 384	struct dyn_event *pos;
 385	struct trace_uprobe *tu;
 386
 387	for_each_trace_uprobe(tu, pos)
 388		if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
 389		    strcmp(trace_probe_group_name(&tu->tp), group) == 0)
 390			return tu;
 391
 392	return NULL;
 393}
 394
 395/* Unregister a trace_uprobe and probe_event */
 396static int unregister_trace_uprobe(struct trace_uprobe *tu)
 397{
 398	int ret;
 399
 400	if (trace_probe_has_sibling(&tu->tp))
 401		goto unreg;
 402
 403	ret = unregister_uprobe_event(tu);
 404	if (ret)
 405		return ret;
 406
 407unreg:
 408	dyn_event_remove(&tu->devent);
 409	trace_probe_unlink(&tu->tp);
 410	free_trace_uprobe(tu);
 411	return 0;
 412}
 413
 414static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
 415					 struct trace_uprobe *comp)
 416{
 417	struct trace_probe_event *tpe = orig->tp.event;
 418	struct trace_probe *pos;
 419	struct inode *comp_inode = d_real_inode(comp->path.dentry);
 420	int i;
 421
 422	list_for_each_entry(pos, &tpe->probes, list) {
 423		orig = container_of(pos, struct trace_uprobe, tp);
 424		if (comp_inode != d_real_inode(orig->path.dentry) ||
 425		    comp->offset != orig->offset)
 426			continue;
 427
 428		/*
 429		 * trace_probe_compare_arg_type() ensured that nr_args and
 430		 * each argument name and type are same. Let's compare comm.
 431		 */
 432		for (i = 0; i < orig->tp.nr_args; i++) {
 433			if (strcmp(orig->tp.args[i].comm,
 434				   comp->tp.args[i].comm))
 435				break;
 436		}
 437
 438		if (i == orig->tp.nr_args)
 439			return true;
 440	}
 441
 442	return false;
 443}
 444
 445static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
 446{
 447	int ret;
 448
 449	ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
 450	if (ret) {
 451		/* Note that argument starts index = 2 */
 452		trace_probe_log_set_index(ret + 1);
 453		trace_probe_log_err(0, DIFF_ARG_TYPE);
 454		return -EEXIST;
 455	}
 456	if (trace_uprobe_has_same_uprobe(to, tu)) {
 457		trace_probe_log_set_index(0);
 458		trace_probe_log_err(0, SAME_PROBE);
 459		return -EEXIST;
 460	}
 461
 462	/* Append to existing event */
 463	ret = trace_probe_append(&tu->tp, &to->tp);
 464	if (!ret)
 465		dyn_event_add(&tu->devent);
 466
 467	return ret;
 468}
 469
 470/*
 471 * Uprobe with multiple reference counter is not allowed. i.e.
 472 * If inode and offset matches, reference counter offset *must*
 473 * match as well. Though, there is one exception: If user is
 474 * replacing old trace_uprobe with new one(same group/event),
 475 * then we allow same uprobe with new reference counter as far
 476 * as the new one does not conflict with any other existing
 477 * ones.
 478 */
 479static int validate_ref_ctr_offset(struct trace_uprobe *new)
 480{
 481	struct dyn_event *pos;
 482	struct trace_uprobe *tmp;
 483	struct inode *new_inode = d_real_inode(new->path.dentry);
 484
 485	for_each_trace_uprobe(tmp, pos) {
 486		if (new_inode == d_real_inode(tmp->path.dentry) &&
 487		    new->offset == tmp->offset &&
 488		    new->ref_ctr_offset != tmp->ref_ctr_offset) {
 489			pr_warn("Reference counter offset mismatch.");
 490			return -EINVAL;
 491		}
 492	}
 493	return 0;
 494}
 495
 496/* Register a trace_uprobe and probe_event */
 497static int register_trace_uprobe(struct trace_uprobe *tu)
 498{
 499	struct trace_uprobe *old_tu;
 500	int ret;
 501
 502	mutex_lock(&event_mutex);
 503
 504	ret = validate_ref_ctr_offset(tu);
 505	if (ret)
 506		goto end;
 507
 508	/* register as an event */
 509	old_tu = find_probe_event(trace_probe_name(&tu->tp),
 510				  trace_probe_group_name(&tu->tp));
 511	if (old_tu) {
 512		if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
 513			trace_probe_log_set_index(0);
 514			trace_probe_log_err(0, DIFF_PROBE_TYPE);
 515			ret = -EEXIST;
 516		} else {
 517			ret = append_trace_uprobe(tu, old_tu);
 518		}
 519		goto end;
 520	}
 521
 522	ret = register_uprobe_event(tu);
 523	if (ret) {
 524		pr_warn("Failed to register probe event(%d)\n", ret);
 525		goto end;
 526	}
 527
 528	dyn_event_add(&tu->devent);
 529
 530end:
 531	mutex_unlock(&event_mutex);
 532
 533	return ret;
 534}
 535
 536/*
 537 * Argument syntax:
 538 *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
 539 */
 540static int trace_uprobe_create(int argc, const char **argv)
 541{
 542	struct trace_uprobe *tu;
 543	const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
 544	char *arg, *filename, *rctr, *rctr_end, *tmp;
 545	char buf[MAX_EVENT_NAME_LEN];
 546	struct path path;
 547	unsigned long offset, ref_ctr_offset;
 548	bool is_return = false;
 549	int i, ret;
 550
 551	ret = 0;
 552	ref_ctr_offset = 0;
 553
 554	switch (argv[0][0]) {
 555	case 'r':
 556		is_return = true;
 557		break;
 558	case 'p':
 559		break;
 560	default:
 561		return -ECANCELED;
 562	}
 563
 564	if (argc < 2)
 565		return -ECANCELED;
 566
 567	if (argv[0][1] == ':')
 568		event = &argv[0][2];
 569
 570	if (!strchr(argv[1], '/'))
 571		return -ECANCELED;
 572
 573	filename = kstrdup(argv[1], GFP_KERNEL);
 574	if (!filename)
 575		return -ENOMEM;
 576
 577	/* Find the last occurrence, in case the path contains ':' too. */
 578	arg = strrchr(filename, ':');
 579	if (!arg || !isdigit(arg[1])) {
 580		kfree(filename);
 581		return -ECANCELED;
 582	}
 583
 584	trace_probe_log_init("trace_uprobe", argc, argv);
 585	trace_probe_log_set_index(1);	/* filename is the 2nd argument */
 586
 587	*arg++ = '\0';
 588	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
 589	if (ret) {
 590		trace_probe_log_err(0, FILE_NOT_FOUND);
 591		kfree(filename);
 592		trace_probe_log_clear();
 593		return ret;
 594	}
 595	if (!d_is_reg(path.dentry)) {
 596		trace_probe_log_err(0, NO_REGULAR_FILE);
 597		ret = -EINVAL;
 598		goto fail_address_parse;
 599	}
 600
 601	/* Parse reference counter offset if specified. */
 602	rctr = strchr(arg, '(');
 603	if (rctr) {
 604		rctr_end = strchr(rctr, ')');
 605		if (!rctr_end) {
 606			ret = -EINVAL;
 607			rctr_end = rctr + strlen(rctr);
 608			trace_probe_log_err(rctr_end - filename,
 609					    REFCNT_OPEN_BRACE);
 610			goto fail_address_parse;
 611		} else if (rctr_end[1] != '\0') {
 612			ret = -EINVAL;
 613			trace_probe_log_err(rctr_end + 1 - filename,
 614					    BAD_REFCNT_SUFFIX);
 615			goto fail_address_parse;
 616		}
 617
 618		*rctr++ = '\0';
 619		*rctr_end = '\0';
 620		ret = kstrtoul(rctr, 0, &ref_ctr_offset);
 621		if (ret) {
 622			trace_probe_log_err(rctr - filename, BAD_REFCNT);
 623			goto fail_address_parse;
 624		}
 625	}
 626
 627	/* Parse uprobe offset. */
 628	ret = kstrtoul(arg, 0, &offset);
 629	if (ret) {
 630		trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
 631		goto fail_address_parse;
 632	}
 633
 634	/* setup a probe */
 635	trace_probe_log_set_index(0);
 636	if (event) {
 637		ret = traceprobe_parse_event_name(&event, &group, buf,
 638						  event - argv[0]);
 639		if (ret)
 640			goto fail_address_parse;
 641	} else {
 642		char *tail;
 643		char *ptr;
 644
 645		tail = kstrdup(kbasename(filename), GFP_KERNEL);
 646		if (!tail) {
 647			ret = -ENOMEM;
 648			goto fail_address_parse;
 649		}
 650
 651		ptr = strpbrk(tail, ".-_");
 652		if (ptr)
 653			*ptr = '\0';
 654
 655		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
 656		event = buf;
 657		kfree(tail);
 658	}
 659
 660	argc -= 2;
 661	argv += 2;
 662
 663	tu = alloc_trace_uprobe(group, event, argc, is_return);
 664	if (IS_ERR(tu)) {
 665		ret = PTR_ERR(tu);
 666		/* This must return -ENOMEM otherwise there is a bug */
 667		WARN_ON_ONCE(ret != -ENOMEM);
 668		goto fail_address_parse;
 669	}
 670	tu->offset = offset;
 671	tu->ref_ctr_offset = ref_ctr_offset;
 672	tu->path = path;
 673	tu->filename = filename;
 674
 675	/* parse arguments */
 676	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 677		tmp = kstrdup(argv[i], GFP_KERNEL);
 678		if (!tmp) {
 679			ret = -ENOMEM;
 680			goto error;
 681		}
 682
 683		trace_probe_log_set_index(i + 2);
 684		ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
 685					is_return ? TPARG_FL_RETURN : 0);
 686		kfree(tmp);
 687		if (ret)
 688			goto error;
 689	}
 690
 691	ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
 692	if (ret < 0)
 693		goto error;
 694
 695	ret = register_trace_uprobe(tu);
 696	if (!ret)
 697		goto out;
 698
 699error:
 700	free_trace_uprobe(tu);
 701out:
 702	trace_probe_log_clear();
 703	return ret;
 704
 705fail_address_parse:
 706	trace_probe_log_clear();
 707	path_put(&path);
 708	kfree(filename);
 709
 710	return ret;
 711}
 712
 713static int create_or_delete_trace_uprobe(int argc, char **argv)
 714{
 715	int ret;
 716
 717	if (argv[0][0] == '-')
 718		return dyn_event_release(argc, argv, &trace_uprobe_ops);
 719
 720	ret = trace_uprobe_create(argc, (const char **)argv);
 721	return ret == -ECANCELED ? -EINVAL : ret;
 722}
 723
 724static int trace_uprobe_release(struct dyn_event *ev)
 725{
 726	struct trace_uprobe *tu = to_trace_uprobe(ev);
 727
 728	return unregister_trace_uprobe(tu);
 729}
 730
 731/* Probes listing interfaces */
 732static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
 733{
 734	struct trace_uprobe *tu = to_trace_uprobe(ev);
 735	char c = is_ret_probe(tu) ? 'r' : 'p';
 736	int i;
 737
 738	seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
 739			trace_probe_name(&tu->tp), tu->filename,
 740			(int)(sizeof(void *) * 2), tu->offset);
 741
 742	if (tu->ref_ctr_offset)
 743		seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
 744
 745	for (i = 0; i < tu->tp.nr_args; i++)
 746		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
 747
 748	seq_putc(m, '\n');
 749	return 0;
 750}
 751
 752static int probes_seq_show(struct seq_file *m, void *v)
 753{
 754	struct dyn_event *ev = v;
 755
 756	if (!is_trace_uprobe(ev))
 757		return 0;
 758
 759	return trace_uprobe_show(m, ev);
 760}
 761
 762static const struct seq_operations probes_seq_op = {
 763	.start  = dyn_event_seq_start,
 764	.next   = dyn_event_seq_next,
 765	.stop   = dyn_event_seq_stop,
 766	.show   = probes_seq_show
 767};
 768
 769static int probes_open(struct inode *inode, struct file *file)
 770{
 771	int ret;
 772
 773	ret = security_locked_down(LOCKDOWN_TRACEFS);
 774	if (ret)
 775		return ret;
 776
 777	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 778		ret = dyn_events_release_all(&trace_uprobe_ops);
 779		if (ret)
 780			return ret;
 781	}
 782
 783	return seq_open(file, &probes_seq_op);
 784}
 785
 786static ssize_t probes_write(struct file *file, const char __user *buffer,
 787			    size_t count, loff_t *ppos)
 788{
 789	return trace_parse_run_command(file, buffer, count, ppos,
 790					create_or_delete_trace_uprobe);
 791}
 792
 793static const struct file_operations uprobe_events_ops = {
 794	.owner		= THIS_MODULE,
 795	.open		= probes_open,
 796	.read		= seq_read,
 797	.llseek		= seq_lseek,
 798	.release	= seq_release,
 799	.write		= probes_write,
 800};
 801
 802/* Probes profiling interfaces */
 803static int probes_profile_seq_show(struct seq_file *m, void *v)
 804{
 805	struct dyn_event *ev = v;
 806	struct trace_uprobe *tu;
 807
 808	if (!is_trace_uprobe(ev))
 809		return 0;
 810
 811	tu = to_trace_uprobe(ev);
 812	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
 813			trace_probe_name(&tu->tp), tu->nhit);
 814	return 0;
 815}
 816
 817static const struct seq_operations profile_seq_op = {
 818	.start  = dyn_event_seq_start,
 819	.next   = dyn_event_seq_next,
 820	.stop   = dyn_event_seq_stop,
 821	.show	= probes_profile_seq_show
 822};
 823
 824static int profile_open(struct inode *inode, struct file *file)
 825{
 826	int ret;
 827
 828	ret = security_locked_down(LOCKDOWN_TRACEFS);
 829	if (ret)
 830		return ret;
 831
 832	return seq_open(file, &profile_seq_op);
 833}
 834
 835static const struct file_operations uprobe_profile_ops = {
 836	.owner		= THIS_MODULE,
 837	.open		= profile_open,
 838	.read		= seq_read,
 839	.llseek		= seq_lseek,
 840	.release	= seq_release,
 841};
 842
 843struct uprobe_cpu_buffer {
 844	struct mutex mutex;
 845	void *buf;
 846};
 847static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
 848static int uprobe_buffer_refcnt;
 849
 850static int uprobe_buffer_init(void)
 851{
 852	int cpu, err_cpu;
 853
 854	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
 855	if (uprobe_cpu_buffer == NULL)
 856		return -ENOMEM;
 857
 858	for_each_possible_cpu(cpu) {
 859		struct page *p = alloc_pages_node(cpu_to_node(cpu),
 860						  GFP_KERNEL, 0);
 861		if (p == NULL) {
 862			err_cpu = cpu;
 863			goto err;
 864		}
 865		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
 866		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
 867	}
 868
 869	return 0;
 870
 871err:
 872	for_each_possible_cpu(cpu) {
 873		if (cpu == err_cpu)
 874			break;
 875		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
 876	}
 877
 878	free_percpu(uprobe_cpu_buffer);
 879	return -ENOMEM;
 880}
 881
 882static int uprobe_buffer_enable(void)
 883{
 884	int ret = 0;
 885
 886	BUG_ON(!mutex_is_locked(&event_mutex));
 887
 888	if (uprobe_buffer_refcnt++ == 0) {
 889		ret = uprobe_buffer_init();
 890		if (ret < 0)
 891			uprobe_buffer_refcnt--;
 892	}
 893
 894	return ret;
 895}
 896
 897static void uprobe_buffer_disable(void)
 898{
 899	int cpu;
 900
 901	BUG_ON(!mutex_is_locked(&event_mutex));
 902
 903	if (--uprobe_buffer_refcnt == 0) {
 904		for_each_possible_cpu(cpu)
 905			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
 906							     cpu)->buf);
 907
 908		free_percpu(uprobe_cpu_buffer);
 909		uprobe_cpu_buffer = NULL;
 910	}
 911}
 912
 913static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
 914{
 915	struct uprobe_cpu_buffer *ucb;
 916	int cpu;
 917
 918	cpu = raw_smp_processor_id();
 919	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
 920
 921	/*
 922	 * Use per-cpu buffers for fastest access, but we might migrate
 923	 * so the mutex makes sure we have sole access to it.
 924	 */
 925	mutex_lock(&ucb->mutex);
 926
 927	return ucb;
 928}
 929
 930static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
 931{
 932	mutex_unlock(&ucb->mutex);
 933}
 934
 935static void __uprobe_trace_func(struct trace_uprobe *tu,
 936				unsigned long func, struct pt_regs *regs,
 937				struct uprobe_cpu_buffer *ucb, int dsize,
 938				struct trace_event_file *trace_file)
 939{
 940	struct uprobe_trace_entry_head *entry;
 
 941	struct ring_buffer_event *event;
 942	struct ring_buffer *buffer;
 943	void *data;
 944	int size, esize;
 945	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
 946
 947	WARN_ON(call != trace_file->event_call);
 948
 949	if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
 950		return;
 951
 952	if (trace_trigger_soft_disabled(trace_file))
 953		return;
 954
 955	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
 956	size = esize + tu->tp.size + dsize;
 957	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 958						call->event.type, size, 0, 0);
 959	if (!event)
 960		return;
 961
 962	entry = ring_buffer_event_data(event);
 963	if (is_ret_probe(tu)) {
 964		entry->vaddr[0] = func;
 965		entry->vaddr[1] = instruction_pointer(regs);
 966		data = DATAOF_TRACE_ENTRY(entry, true);
 967	} else {
 968		entry->vaddr[0] = instruction_pointer(regs);
 969		data = DATAOF_TRACE_ENTRY(entry, false);
 970	}
 971
 972	memcpy(data, ucb->buf, tu->tp.size + dsize);
 973
 974	event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
 975}
 976
 977/* uprobe handler */
 978static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
 979			     struct uprobe_cpu_buffer *ucb, int dsize)
 980{
 981	struct event_file_link *link;
 982
 983	if (is_ret_probe(tu))
 984		return 0;
 985
 986	rcu_read_lock();
 987	trace_probe_for_each_link_rcu(link, &tu->tp)
 988		__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
 989	rcu_read_unlock();
 990
 991	return 0;
 992}
 993
 994static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
 995				 struct pt_regs *regs,
 996				 struct uprobe_cpu_buffer *ucb, int dsize)
 997{
 998	struct event_file_link *link;
 999
1000	rcu_read_lock();
1001	trace_probe_for_each_link_rcu(link, &tu->tp)
1002		__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1003	rcu_read_unlock();
1004}
1005
1006/* Event entry printers */
1007static enum print_line_t
1008print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1009{
1010	struct uprobe_trace_entry_head *entry;
1011	struct trace_seq *s = &iter->seq;
1012	struct trace_uprobe *tu;
1013	u8 *data;
1014
1015	entry = (struct uprobe_trace_entry_head *)iter->ent;
1016	tu = trace_uprobe_primary_from_call(
1017		container_of(event, struct trace_event_call, event));
1018	if (unlikely(!tu))
1019		goto out;
1020
1021	if (is_ret_probe(tu)) {
1022		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1023				 trace_probe_name(&tu->tp),
1024				 entry->vaddr[1], entry->vaddr[0]);
1025		data = DATAOF_TRACE_ENTRY(entry, true);
1026	} else {
1027		trace_seq_printf(s, "%s: (0x%lx)",
1028				 trace_probe_name(&tu->tp),
1029				 entry->vaddr[0]);
1030		data = DATAOF_TRACE_ENTRY(entry, false);
1031	}
1032
1033	if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1034		goto out;
1035
1036	trace_seq_putc(s, '\n');
1037
1038 out:
1039	return trace_handle_return(s);
1040}
1041
1042typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1043				enum uprobe_filter_ctx ctx,
1044				struct mm_struct *mm);
1045
1046static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1047{
1048	int ret;
1049
1050	tu->consumer.filter = filter;
1051	tu->inode = d_real_inode(tu->path.dentry);
1052
1053	if (tu->ref_ctr_offset)
1054		ret = uprobe_register_refctr(tu->inode, tu->offset,
1055				tu->ref_ctr_offset, &tu->consumer);
1056	else
1057		ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1058
1059	if (ret)
1060		tu->inode = NULL;
1061
1062	return ret;
1063}
1064
1065static void __probe_event_disable(struct trace_probe *tp)
1066{
1067	struct trace_probe *pos;
1068	struct trace_uprobe *tu;
1069
 
 
 
1070	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1071		tu = container_of(pos, struct trace_uprobe, tp);
1072		if (!tu->inode)
1073			continue;
1074
1075		WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1076
1077		uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1078		tu->inode = NULL;
1079	}
1080}
1081
1082static int probe_event_enable(struct trace_event_call *call,
1083			struct trace_event_file *file, filter_func_t filter)
1084{
1085	struct trace_probe *pos, *tp;
1086	struct trace_uprobe *tu;
1087	bool enabled;
1088	int ret;
1089
1090	tp = trace_probe_primary_from_call(call);
1091	if (WARN_ON_ONCE(!tp))
1092		return -ENODEV;
1093	enabled = trace_probe_is_enabled(tp);
1094
1095	/* This may also change "enabled" state */
1096	if (file) {
1097		if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1098			return -EINTR;
1099
1100		ret = trace_probe_add_file(tp, file);
1101		if (ret < 0)
1102			return ret;
1103	} else {
1104		if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1105			return -EINTR;
1106
1107		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1108	}
1109
1110	tu = container_of(tp, struct trace_uprobe, tp);
1111	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
1112
1113	if (enabled)
1114		return 0;
1115
1116	ret = uprobe_buffer_enable();
1117	if (ret)
1118		goto err_flags;
1119
1120	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1121		tu = container_of(pos, struct trace_uprobe, tp);
1122		ret = trace_uprobe_enable(tu, filter);
1123		if (ret) {
1124			__probe_event_disable(tp);
1125			goto err_buffer;
1126		}
1127	}
1128
1129	return 0;
1130
1131 err_buffer:
1132	uprobe_buffer_disable();
1133
1134 err_flags:
1135	if (file)
1136		trace_probe_remove_file(tp, file);
1137	else
1138		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1139
1140	return ret;
1141}
1142
1143static void probe_event_disable(struct trace_event_call *call,
1144				struct trace_event_file *file)
1145{
1146	struct trace_probe *tp;
1147
1148	tp = trace_probe_primary_from_call(call);
1149	if (WARN_ON_ONCE(!tp))
1150		return;
1151
1152	if (!trace_probe_is_enabled(tp))
1153		return;
1154
1155	if (file) {
1156		if (trace_probe_remove_file(tp, file) < 0)
1157			return;
1158
1159		if (trace_probe_is_enabled(tp))
1160			return;
1161	} else
1162		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1163
1164	__probe_event_disable(tp);
1165	uprobe_buffer_disable();
1166}
1167
1168static int uprobe_event_define_fields(struct trace_event_call *event_call)
1169{
1170	int ret, size;
1171	struct uprobe_trace_entry_head field;
1172	struct trace_uprobe *tu;
1173
1174	tu = trace_uprobe_primary_from_call(event_call);
1175	if (unlikely(!tu))
1176		return -ENODEV;
1177
1178	if (is_ret_probe(tu)) {
1179		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1180		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1181		size = SIZEOF_TRACE_ENTRY(true);
1182	} else {
1183		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1184		size = SIZEOF_TRACE_ENTRY(false);
1185	}
1186
1187	return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1188}
1189
1190#ifdef CONFIG_PERF_EVENTS
1191static bool
1192__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1193{
1194	struct perf_event *event;
1195
1196	if (filter->nr_systemwide)
1197		return true;
1198
1199	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1200		if (event->hw.target->mm == mm)
1201			return true;
1202	}
1203
1204	return false;
1205}
1206
1207static inline bool
1208uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
 
1209{
1210	return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1211}
1212
1213static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
 
1214{
1215	bool done;
1216
1217	write_lock(&tu->filter.rwlock);
1218	if (event->hw.target) {
1219		list_del(&event->hw.tp_list);
1220		done = tu->filter.nr_systemwide ||
1221			(event->hw.target->flags & PF_EXITING) ||
1222			uprobe_filter_event(tu, event);
1223	} else {
1224		tu->filter.nr_systemwide--;
1225		done = tu->filter.nr_systemwide;
1226	}
1227	write_unlock(&tu->filter.rwlock);
1228
1229	if (!done)
1230		return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1231
1232	return 0;
1233}
1234
1235static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
 
 
1236{
1237	bool done;
1238	int err;
1239
1240	write_lock(&tu->filter.rwlock);
1241	if (event->hw.target) {
1242		/*
1243		 * event->parent != NULL means copy_process(), we can avoid
1244		 * uprobe_apply(). current->mm must be probed and we can rely
1245		 * on dup_mmap() which preserves the already installed bp's.
1246		 *
1247		 * attr.enable_on_exec means that exec/mmap will install the
1248		 * breakpoints we need.
1249		 */
1250		done = tu->filter.nr_systemwide ||
1251			event->parent || event->attr.enable_on_exec ||
1252			uprobe_filter_event(tu, event);
1253		list_add(&event->hw.tp_list, &tu->filter.perf_events);
1254	} else {
1255		done = tu->filter.nr_systemwide;
1256		tu->filter.nr_systemwide++;
1257	}
1258	write_unlock(&tu->filter.rwlock);
1259
1260	err = 0;
1261	if (!done) {
1262		err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1263		if (err)
1264			uprobe_perf_close(tu, event);
1265	}
1266	return err;
1267}
1268
1269static int uprobe_perf_multi_call(struct trace_event_call *call,
1270				  struct perf_event *event,
1271		int (*op)(struct trace_uprobe *tu, struct perf_event *event))
1272{
1273	struct trace_probe *pos, *tp;
1274	struct trace_uprobe *tu;
1275	int ret = 0;
1276
1277	tp = trace_probe_primary_from_call(call);
1278	if (WARN_ON_ONCE(!tp))
1279		return -ENODEV;
1280
 
 
 
 
1281	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1282		tu = container_of(pos, struct trace_uprobe, tp);
1283		ret = op(tu, event);
1284		if (ret)
1285			break;
1286	}
1287
1288	return ret;
1289}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1290static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1291				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1292{
 
1293	struct trace_uprobe *tu;
1294	int ret;
1295
1296	tu = container_of(uc, struct trace_uprobe, consumer);
1297	read_lock(&tu->filter.rwlock);
1298	ret = __uprobe_perf_filter(&tu->filter, mm);
1299	read_unlock(&tu->filter.rwlock);
 
 
1300
1301	return ret;
1302}
1303
1304static void __uprobe_perf_func(struct trace_uprobe *tu,
1305			       unsigned long func, struct pt_regs *regs,
1306			       struct uprobe_cpu_buffer *ucb, int dsize)
1307{
1308	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1309	struct uprobe_trace_entry_head *entry;
1310	struct hlist_head *head;
1311	void *data;
1312	int size, esize;
1313	int rctx;
1314
1315	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1316		return;
 
 
 
 
 
 
 
1317
1318	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1319
1320	size = esize + tu->tp.size + dsize;
1321	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1322	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1323		return;
1324
1325	preempt_disable();
1326	head = this_cpu_ptr(call->perf_events);
1327	if (hlist_empty(head))
1328		goto out;
1329
1330	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1331	if (!entry)
1332		goto out;
1333
1334	if (is_ret_probe(tu)) {
1335		entry->vaddr[0] = func;
1336		entry->vaddr[1] = instruction_pointer(regs);
1337		data = DATAOF_TRACE_ENTRY(entry, true);
1338	} else {
1339		entry->vaddr[0] = instruction_pointer(regs);
1340		data = DATAOF_TRACE_ENTRY(entry, false);
1341	}
1342
1343	memcpy(data, ucb->buf, tu->tp.size + dsize);
1344
1345	if (size - esize > tu->tp.size + dsize) {
1346		int len = tu->tp.size + dsize;
1347
1348		memset(data + len, 0, size - esize - len);
1349	}
1350
1351	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1352			      head, NULL);
1353 out:
1354	preempt_enable();
1355}
1356
1357/* uprobe profile handler */
1358static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1359			    struct uprobe_cpu_buffer *ucb, int dsize)
1360{
1361	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1362		return UPROBE_HANDLER_REMOVE;
1363
1364	if (!is_ret_probe(tu))
1365		__uprobe_perf_func(tu, 0, regs, ucb, dsize);
1366	return 0;
1367}
1368
1369static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1370				struct pt_regs *regs,
1371				struct uprobe_cpu_buffer *ucb, int dsize)
1372{
1373	__uprobe_perf_func(tu, func, regs, ucb, dsize);
1374}
1375
1376int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1377			const char **filename, u64 *probe_offset,
1378			bool perf_type_tracepoint)
1379{
1380	const char *pevent = trace_event_name(event->tp_event);
1381	const char *group = event->tp_event->class->system;
1382	struct trace_uprobe *tu;
1383
1384	if (perf_type_tracepoint)
1385		tu = find_probe_event(pevent, group);
1386	else
1387		tu = event->tp_event->data;
1388	if (!tu)
1389		return -EINVAL;
1390
1391	*fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1392				    : BPF_FD_TYPE_UPROBE;
1393	*filename = tu->filename;
1394	*probe_offset = tu->offset;
1395	return 0;
1396}
1397#endif	/* CONFIG_PERF_EVENTS */
1398
1399static int
1400trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1401		      void *data)
1402{
1403	struct trace_event_file *file = data;
1404
1405	switch (type) {
1406	case TRACE_REG_REGISTER:
1407		return probe_event_enable(event, file, NULL);
1408
1409	case TRACE_REG_UNREGISTER:
1410		probe_event_disable(event, file);
1411		return 0;
1412
1413#ifdef CONFIG_PERF_EVENTS
1414	case TRACE_REG_PERF_REGISTER:
1415		return probe_event_enable(event, NULL, uprobe_perf_filter);
1416
1417	case TRACE_REG_PERF_UNREGISTER:
1418		probe_event_disable(event, NULL);
1419		return 0;
1420
1421	case TRACE_REG_PERF_OPEN:
1422		return uprobe_perf_multi_call(event, data, uprobe_perf_open);
1423
1424	case TRACE_REG_PERF_CLOSE:
1425		return uprobe_perf_multi_call(event, data, uprobe_perf_close);
1426
1427#endif
1428	default:
1429		return 0;
1430	}
1431	return 0;
1432}
1433
1434static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1435{
1436	struct trace_uprobe *tu;
1437	struct uprobe_dispatch_data udd;
1438	struct uprobe_cpu_buffer *ucb;
1439	int dsize, esize;
1440	int ret = 0;
1441
1442
1443	tu = container_of(con, struct trace_uprobe, consumer);
1444	tu->nhit++;
1445
1446	udd.tu = tu;
1447	udd.bp_addr = instruction_pointer(regs);
1448
1449	current->utask->vaddr = (unsigned long) &udd;
1450
1451	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1452		return 0;
1453
1454	dsize = __get_data_size(&tu->tp, regs);
1455	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1456
1457	ucb = uprobe_buffer_get();
1458	store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1459
1460	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1461		ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1462
1463#ifdef CONFIG_PERF_EVENTS
1464	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1465		ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1466#endif
1467	uprobe_buffer_put(ucb);
1468	return ret;
1469}
1470
1471static int uretprobe_dispatcher(struct uprobe_consumer *con,
1472				unsigned long func, struct pt_regs *regs)
1473{
1474	struct trace_uprobe *tu;
1475	struct uprobe_dispatch_data udd;
1476	struct uprobe_cpu_buffer *ucb;
1477	int dsize, esize;
1478
1479	tu = container_of(con, struct trace_uprobe, consumer);
1480
1481	udd.tu = tu;
1482	udd.bp_addr = func;
1483
1484	current->utask->vaddr = (unsigned long) &udd;
1485
1486	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1487		return 0;
1488
1489	dsize = __get_data_size(&tu->tp, regs);
1490	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1491
1492	ucb = uprobe_buffer_get();
1493	store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1494
1495	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1496		uretprobe_trace_func(tu, func, regs, ucb, dsize);
1497
1498#ifdef CONFIG_PERF_EVENTS
1499	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1500		uretprobe_perf_func(tu, func, regs, ucb, dsize);
1501#endif
1502	uprobe_buffer_put(ucb);
1503	return 0;
1504}
1505
1506static struct trace_event_functions uprobe_funcs = {
1507	.trace		= print_uprobe_event
1508};
1509
 
 
 
 
 
 
1510static inline void init_trace_event_call(struct trace_uprobe *tu)
1511{
1512	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1513
1514	call->event.funcs = &uprobe_funcs;
1515	call->class->define_fields = uprobe_event_define_fields;
1516
1517	call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1518	call->class->reg = trace_uprobe_register;
1519}
1520
1521static int register_uprobe_event(struct trace_uprobe *tu)
1522{
1523	init_trace_event_call(tu);
1524
1525	return trace_probe_register_event_call(&tu->tp);
1526}
1527
1528static int unregister_uprobe_event(struct trace_uprobe *tu)
1529{
1530	return trace_probe_unregister_event_call(&tu->tp);
1531}
1532
1533#ifdef CONFIG_PERF_EVENTS
1534struct trace_event_call *
1535create_local_trace_uprobe(char *name, unsigned long offs,
1536			  unsigned long ref_ctr_offset, bool is_return)
1537{
1538	struct trace_uprobe *tu;
1539	struct path path;
1540	int ret;
1541
1542	ret = kern_path(name, LOOKUP_FOLLOW, &path);
1543	if (ret)
1544		return ERR_PTR(ret);
1545
1546	if (!d_is_reg(path.dentry)) {
1547		path_put(&path);
1548		return ERR_PTR(-EINVAL);
1549	}
1550
1551	/*
1552	 * local trace_kprobes are not added to dyn_event, so they are never
1553	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1554	 * duplicated name "DUMMY_EVENT" here.
1555	 */
1556	tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1557				is_return);
1558
1559	if (IS_ERR(tu)) {
1560		pr_info("Failed to allocate trace_uprobe.(%d)\n",
1561			(int)PTR_ERR(tu));
1562		path_put(&path);
1563		return ERR_CAST(tu);
1564	}
1565
1566	tu->offset = offs;
1567	tu->path = path;
1568	tu->ref_ctr_offset = ref_ctr_offset;
1569	tu->filename = kstrdup(name, GFP_KERNEL);
1570	init_trace_event_call(tu);
1571
1572	if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1573		ret = -ENOMEM;
1574		goto error;
1575	}
1576
1577	return trace_probe_event_call(&tu->tp);
1578error:
1579	free_trace_uprobe(tu);
1580	return ERR_PTR(ret);
1581}
1582
1583void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1584{
1585	struct trace_uprobe *tu;
1586
1587	tu = trace_uprobe_primary_from_call(event_call);
1588
1589	free_trace_uprobe(tu);
1590}
1591#endif /* CONFIG_PERF_EVENTS */
1592
1593/* Make a trace interface for controling probe points */
1594static __init int init_uprobe_trace(void)
1595{
1596	struct dentry *d_tracer;
1597	int ret;
1598
1599	ret = dyn_event_register(&trace_uprobe_ops);
1600	if (ret)
1601		return ret;
1602
1603	d_tracer = tracing_init_dentry();
1604	if (IS_ERR(d_tracer))
1605		return 0;
1606
1607	trace_create_file("uprobe_events", 0644, d_tracer,
1608				    NULL, &uprobe_events_ops);
1609	/* Profile interface */
1610	trace_create_file("uprobe_profile", 0444, d_tracer,
1611				    NULL, &uprobe_profile_ops);
1612	return 0;
1613}
1614
1615fs_initcall(init_uprobe_trace);
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * uprobes-based tracing events
   4 *
   5 * Copyright (C) IBM Corporation, 2010-2012
   6 * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
   7 */
   8#define pr_fmt(fmt)	"trace_uprobe: " fmt
   9
  10#include <linux/security.h>
  11#include <linux/ctype.h>
  12#include <linux/module.h>
  13#include <linux/uaccess.h>
  14#include <linux/uprobes.h>
  15#include <linux/namei.h>
  16#include <linux/string.h>
  17#include <linux/rculist.h>
  18
  19#include "trace_dynevent.h"
  20#include "trace_probe.h"
  21#include "trace_probe_tmpl.h"
  22
  23#define UPROBE_EVENT_SYSTEM	"uprobes"
  24
  25struct uprobe_trace_entry_head {
  26	struct trace_entry	ent;
  27	unsigned long		vaddr[];
  28};
  29
  30#define SIZEOF_TRACE_ENTRY(is_return)			\
  31	(sizeof(struct uprobe_trace_entry_head) +	\
  32	 sizeof(unsigned long) * (is_return ? 2 : 1))
  33
  34#define DATAOF_TRACE_ENTRY(entry, is_return)		\
  35	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  36
 
 
 
 
 
 
  37static int trace_uprobe_create(int argc, const char **argv);
  38static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
  39static int trace_uprobe_release(struct dyn_event *ev);
  40static bool trace_uprobe_is_busy(struct dyn_event *ev);
  41static bool trace_uprobe_match(const char *system, const char *event,
  42			int argc, const char **argv, struct dyn_event *ev);
  43
  44static struct dyn_event_operations trace_uprobe_ops = {
  45	.create = trace_uprobe_create,
  46	.show = trace_uprobe_show,
  47	.is_busy = trace_uprobe_is_busy,
  48	.free = trace_uprobe_release,
  49	.match = trace_uprobe_match,
  50};
  51
  52/*
  53 * uprobe event core functions
  54 */
  55struct trace_uprobe {
  56	struct dyn_event		devent;
 
  57	struct uprobe_consumer		consumer;
  58	struct path			path;
  59	struct inode			*inode;
  60	char				*filename;
  61	unsigned long			offset;
  62	unsigned long			ref_ctr_offset;
  63	unsigned long			nhit;
  64	struct trace_probe		tp;
  65};
  66
  67static bool is_trace_uprobe(struct dyn_event *ev)
  68{
  69	return ev->ops == &trace_uprobe_ops;
  70}
  71
  72static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
  73{
  74	return container_of(ev, struct trace_uprobe, devent);
  75}
  76
  77/**
  78 * for_each_trace_uprobe - iterate over the trace_uprobe list
  79 * @pos:	the struct trace_uprobe * for each entry
  80 * @dpos:	the struct dyn_event * to use as a loop cursor
  81 */
  82#define for_each_trace_uprobe(pos, dpos)	\
  83	for_each_dyn_event(dpos)		\
  84		if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
  85
  86#define SIZEOF_TRACE_UPROBE(n)				\
  87	(offsetof(struct trace_uprobe, tp.args) +	\
  88	(sizeof(struct probe_arg) * (n)))
  89
  90static int register_uprobe_event(struct trace_uprobe *tu);
  91static int unregister_uprobe_event(struct trace_uprobe *tu);
  92
  93struct uprobe_dispatch_data {
  94	struct trace_uprobe	*tu;
  95	unsigned long		bp_addr;
  96};
  97
  98static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
  99static int uretprobe_dispatcher(struct uprobe_consumer *con,
 100				unsigned long func, struct pt_regs *regs);
 101
 102#ifdef CONFIG_STACK_GROWSUP
 103static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
 104{
 105	return addr - (n * sizeof(long));
 106}
 107#else
 108static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
 109{
 110	return addr + (n * sizeof(long));
 111}
 112#endif
 113
 114static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
 115{
 116	unsigned long ret;
 117	unsigned long addr = user_stack_pointer(regs);
 118
 119	addr = adjust_stack_addr(addr, n);
 120
 121	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
 122		return 0;
 123
 124	return ret;
 125}
 126
 127/*
 128 * Uprobes-specific fetch functions
 129 */
 130static nokprobe_inline int
 131probe_mem_read(void *dest, void *src, size_t size)
 132{
 133	void __user *vaddr = (void __force __user *)src;
 134
 135	return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
 136}
 137
 138static nokprobe_inline int
 139probe_mem_read_user(void *dest, void *src, size_t size)
 140{
 141	return probe_mem_read(dest, src, size);
 142}
 143
 144/*
 145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 146 * length and relative data location.
 147 */
 148static nokprobe_inline int
 149fetch_store_string(unsigned long addr, void *dest, void *base)
 150{
 151	long ret;
 152	u32 loc = *(u32 *)dest;
 153	int maxlen  = get_loc_len(loc);
 154	u8 *dst = get_loc_data(dest, base);
 155	void __user *src = (void __force __user *) addr;
 156
 157	if (unlikely(!maxlen))
 158		return -ENOMEM;
 159
 160	if (addr == FETCH_TOKEN_COMM)
 161		ret = strlcpy(dst, current->comm, maxlen);
 162	else
 163		ret = strncpy_from_user(dst, src, maxlen);
 164	if (ret >= 0) {
 165		if (ret == maxlen)
 166			dst[ret - 1] = '\0';
 167		else
 168			/*
 169			 * Include the terminating null byte. In this case it
 170			 * was copied by strncpy_from_user but not accounted
 171			 * for in ret.
 172			 */
 173			ret++;
 174		*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
 175	}
 176
 177	return ret;
 178}
 179
 180static nokprobe_inline int
 181fetch_store_string_user(unsigned long addr, void *dest, void *base)
 182{
 183	return fetch_store_string(addr, dest, base);
 184}
 185
 186/* Return the length of string -- including null terminal byte */
 187static nokprobe_inline int
 188fetch_store_strlen(unsigned long addr)
 189{
 190	int len;
 191	void __user *vaddr = (void __force __user *) addr;
 192
 193	if (addr == FETCH_TOKEN_COMM)
 194		len = strlen(current->comm) + 1;
 195	else
 196		len = strnlen_user(vaddr, MAX_STRING_SIZE);
 197
 198	return (len > MAX_STRING_SIZE) ? 0 : len;
 199}
 200
 201static nokprobe_inline int
 202fetch_store_strlen_user(unsigned long addr)
 203{
 204	return fetch_store_strlen(addr);
 205}
 206
 207static unsigned long translate_user_vaddr(unsigned long file_offset)
 208{
 209	unsigned long base_addr;
 210	struct uprobe_dispatch_data *udd;
 211
 212	udd = (void *) current->utask->vaddr;
 213
 214	base_addr = udd->bp_addr - udd->tu->offset;
 215	return base_addr + file_offset;
 216}
 217
 218/* Note that we don't verify it, since the code does not come from user space */
 219static int
 220process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
 221		   void *base)
 222{
 223	unsigned long val;
 224
 225	/* 1st stage: get value from context */
 226	switch (code->op) {
 227	case FETCH_OP_REG:
 228		val = regs_get_register(regs, code->param);
 229		break;
 230	case FETCH_OP_STACK:
 231		val = get_user_stack_nth(regs, code->param);
 232		break;
 233	case FETCH_OP_STACKP:
 234		val = user_stack_pointer(regs);
 235		break;
 236	case FETCH_OP_RETVAL:
 237		val = regs_return_value(regs);
 238		break;
 239	case FETCH_OP_IMM:
 240		val = code->immediate;
 241		break;
 242	case FETCH_OP_COMM:
 243		val = FETCH_TOKEN_COMM;
 244		break;
 245	case FETCH_OP_DATA:
 246		val = (unsigned long)code->data;
 247		break;
 248	case FETCH_OP_FOFFS:
 249		val = translate_user_vaddr(code->immediate);
 250		break;
 251	default:
 252		return -EILSEQ;
 253	}
 254	code++;
 255
 256	return process_fetch_insn_bottom(code, val, dest, base);
 257}
 258NOKPROBE_SYMBOL(process_fetch_insn)
 259
 260static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
 261{
 262	rwlock_init(&filter->rwlock);
 263	filter->nr_systemwide = 0;
 264	INIT_LIST_HEAD(&filter->perf_events);
 265}
 266
 267static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
 268{
 269	return !filter->nr_systemwide && list_empty(&filter->perf_events);
 270}
 271
 272static inline bool is_ret_probe(struct trace_uprobe *tu)
 273{
 274	return tu->consumer.ret_handler != NULL;
 275}
 276
 277static bool trace_uprobe_is_busy(struct dyn_event *ev)
 278{
 279	struct trace_uprobe *tu = to_trace_uprobe(ev);
 280
 281	return trace_probe_is_enabled(&tu->tp);
 282}
 283
 284static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
 285					    int argc, const char **argv)
 286{
 287	char buf[MAX_ARGSTR_LEN + 1];
 288	int len;
 289
 290	if (!argc)
 291		return true;
 292
 293	len = strlen(tu->filename);
 294	if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
 295		return false;
 296
 297	if (tu->ref_ctr_offset == 0)
 298		snprintf(buf, sizeof(buf), "0x%0*lx",
 299				(int)(sizeof(void *) * 2), tu->offset);
 300	else
 301		snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
 302				(int)(sizeof(void *) * 2), tu->offset,
 303				tu->ref_ctr_offset);
 304	if (strcmp(buf, &argv[0][len + 1]))
 305		return false;
 306
 307	argc--; argv++;
 308
 309	return trace_probe_match_command_args(&tu->tp, argc, argv);
 310}
 311
 312static bool trace_uprobe_match(const char *system, const char *event,
 313			int argc, const char **argv, struct dyn_event *ev)
 314{
 315	struct trace_uprobe *tu = to_trace_uprobe(ev);
 316
 317	return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
 318	   (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
 319	   trace_uprobe_match_command_head(tu, argc, argv);
 320}
 321
 322static nokprobe_inline struct trace_uprobe *
 323trace_uprobe_primary_from_call(struct trace_event_call *call)
 324{
 325	struct trace_probe *tp;
 326
 327	tp = trace_probe_primary_from_call(call);
 328	if (WARN_ON_ONCE(!tp))
 329		return NULL;
 330
 331	return container_of(tp, struct trace_uprobe, tp);
 332}
 333
 334/*
 335 * Allocate new trace_uprobe and initialize it (including uprobes).
 336 */
 337static struct trace_uprobe *
 338alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 339{
 340	struct trace_uprobe *tu;
 341	int ret;
 342
 343	tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
 344	if (!tu)
 345		return ERR_PTR(-ENOMEM);
 346
 347	ret = trace_probe_init(&tu->tp, event, group, true);
 348	if (ret < 0)
 349		goto error;
 350
 351	dyn_event_init(&tu->devent, &trace_uprobe_ops);
 352	tu->consumer.handler = uprobe_dispatcher;
 353	if (is_ret)
 354		tu->consumer.ret_handler = uretprobe_dispatcher;
 355	init_trace_uprobe_filter(tu->tp.event->filter);
 356	return tu;
 357
 358error:
 359	kfree(tu);
 360
 361	return ERR_PTR(ret);
 362}
 363
 364static void free_trace_uprobe(struct trace_uprobe *tu)
 365{
 366	if (!tu)
 367		return;
 368
 369	path_put(&tu->path);
 370	trace_probe_cleanup(&tu->tp);
 371	kfree(tu->filename);
 372	kfree(tu);
 373}
 374
 375static struct trace_uprobe *find_probe_event(const char *event, const char *group)
 376{
 377	struct dyn_event *pos;
 378	struct trace_uprobe *tu;
 379
 380	for_each_trace_uprobe(tu, pos)
 381		if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
 382		    strcmp(trace_probe_group_name(&tu->tp), group) == 0)
 383			return tu;
 384
 385	return NULL;
 386}
 387
 388/* Unregister a trace_uprobe and probe_event */
 389static int unregister_trace_uprobe(struct trace_uprobe *tu)
 390{
 391	int ret;
 392
 393	if (trace_probe_has_sibling(&tu->tp))
 394		goto unreg;
 395
 396	ret = unregister_uprobe_event(tu);
 397	if (ret)
 398		return ret;
 399
 400unreg:
 401	dyn_event_remove(&tu->devent);
 402	trace_probe_unlink(&tu->tp);
 403	free_trace_uprobe(tu);
 404	return 0;
 405}
 406
 407static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
 408					 struct trace_uprobe *comp)
 409{
 410	struct trace_probe_event *tpe = orig->tp.event;
 411	struct trace_probe *pos;
 412	struct inode *comp_inode = d_real_inode(comp->path.dentry);
 413	int i;
 414
 415	list_for_each_entry(pos, &tpe->probes, list) {
 416		orig = container_of(pos, struct trace_uprobe, tp);
 417		if (comp_inode != d_real_inode(orig->path.dentry) ||
 418		    comp->offset != orig->offset)
 419			continue;
 420
 421		/*
 422		 * trace_probe_compare_arg_type() ensured that nr_args and
 423		 * each argument name and type are same. Let's compare comm.
 424		 */
 425		for (i = 0; i < orig->tp.nr_args; i++) {
 426			if (strcmp(orig->tp.args[i].comm,
 427				   comp->tp.args[i].comm))
 428				break;
 429		}
 430
 431		if (i == orig->tp.nr_args)
 432			return true;
 433	}
 434
 435	return false;
 436}
 437
 438static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
 439{
 440	int ret;
 441
 442	ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
 443	if (ret) {
 444		/* Note that argument starts index = 2 */
 445		trace_probe_log_set_index(ret + 1);
 446		trace_probe_log_err(0, DIFF_ARG_TYPE);
 447		return -EEXIST;
 448	}
 449	if (trace_uprobe_has_same_uprobe(to, tu)) {
 450		trace_probe_log_set_index(0);
 451		trace_probe_log_err(0, SAME_PROBE);
 452		return -EEXIST;
 453	}
 454
 455	/* Append to existing event */
 456	ret = trace_probe_append(&tu->tp, &to->tp);
 457	if (!ret)
 458		dyn_event_add(&tu->devent);
 459
 460	return ret;
 461}
 462
 463/*
 464 * Uprobe with multiple reference counter is not allowed. i.e.
 465 * If inode and offset matches, reference counter offset *must*
 466 * match as well. Though, there is one exception: If user is
 467 * replacing old trace_uprobe with new one(same group/event),
 468 * then we allow same uprobe with new reference counter as far
 469 * as the new one does not conflict with any other existing
 470 * ones.
 471 */
 472static int validate_ref_ctr_offset(struct trace_uprobe *new)
 473{
 474	struct dyn_event *pos;
 475	struct trace_uprobe *tmp;
 476	struct inode *new_inode = d_real_inode(new->path.dentry);
 477
 478	for_each_trace_uprobe(tmp, pos) {
 479		if (new_inode == d_real_inode(tmp->path.dentry) &&
 480		    new->offset == tmp->offset &&
 481		    new->ref_ctr_offset != tmp->ref_ctr_offset) {
 482			pr_warn("Reference counter offset mismatch.");
 483			return -EINVAL;
 484		}
 485	}
 486	return 0;
 487}
 488
 489/* Register a trace_uprobe and probe_event */
 490static int register_trace_uprobe(struct trace_uprobe *tu)
 491{
 492	struct trace_uprobe *old_tu;
 493	int ret;
 494
 495	mutex_lock(&event_mutex);
 496
 497	ret = validate_ref_ctr_offset(tu);
 498	if (ret)
 499		goto end;
 500
 501	/* register as an event */
 502	old_tu = find_probe_event(trace_probe_name(&tu->tp),
 503				  trace_probe_group_name(&tu->tp));
 504	if (old_tu) {
 505		if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
 506			trace_probe_log_set_index(0);
 507			trace_probe_log_err(0, DIFF_PROBE_TYPE);
 508			ret = -EEXIST;
 509		} else {
 510			ret = append_trace_uprobe(tu, old_tu);
 511		}
 512		goto end;
 513	}
 514
 515	ret = register_uprobe_event(tu);
 516	if (ret) {
 517		pr_warn("Failed to register probe event(%d)\n", ret);
 518		goto end;
 519	}
 520
 521	dyn_event_add(&tu->devent);
 522
 523end:
 524	mutex_unlock(&event_mutex);
 525
 526	return ret;
 527}
 528
 529/*
 530 * Argument syntax:
 531 *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
 532 */
 533static int trace_uprobe_create(int argc, const char **argv)
 534{
 535	struct trace_uprobe *tu;
 536	const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
 537	char *arg, *filename, *rctr, *rctr_end, *tmp;
 538	char buf[MAX_EVENT_NAME_LEN];
 539	struct path path;
 540	unsigned long offset, ref_ctr_offset;
 541	bool is_return = false;
 542	int i, ret;
 543
 544	ret = 0;
 545	ref_ctr_offset = 0;
 546
 547	switch (argv[0][0]) {
 548	case 'r':
 549		is_return = true;
 550		break;
 551	case 'p':
 552		break;
 553	default:
 554		return -ECANCELED;
 555	}
 556
 557	if (argc < 2)
 558		return -ECANCELED;
 559
 560	if (argv[0][1] == ':')
 561		event = &argv[0][2];
 562
 563	if (!strchr(argv[1], '/'))
 564		return -ECANCELED;
 565
 566	filename = kstrdup(argv[1], GFP_KERNEL);
 567	if (!filename)
 568		return -ENOMEM;
 569
 570	/* Find the last occurrence, in case the path contains ':' too. */
 571	arg = strrchr(filename, ':');
 572	if (!arg || !isdigit(arg[1])) {
 573		kfree(filename);
 574		return -ECANCELED;
 575	}
 576
 577	trace_probe_log_init("trace_uprobe", argc, argv);
 578	trace_probe_log_set_index(1);	/* filename is the 2nd argument */
 579
 580	*arg++ = '\0';
 581	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
 582	if (ret) {
 583		trace_probe_log_err(0, FILE_NOT_FOUND);
 584		kfree(filename);
 585		trace_probe_log_clear();
 586		return ret;
 587	}
 588	if (!d_is_reg(path.dentry)) {
 589		trace_probe_log_err(0, NO_REGULAR_FILE);
 590		ret = -EINVAL;
 591		goto fail_address_parse;
 592	}
 593
 594	/* Parse reference counter offset if specified. */
 595	rctr = strchr(arg, '(');
 596	if (rctr) {
 597		rctr_end = strchr(rctr, ')');
 598		if (!rctr_end) {
 599			ret = -EINVAL;
 600			rctr_end = rctr + strlen(rctr);
 601			trace_probe_log_err(rctr_end - filename,
 602					    REFCNT_OPEN_BRACE);
 603			goto fail_address_parse;
 604		} else if (rctr_end[1] != '\0') {
 605			ret = -EINVAL;
 606			trace_probe_log_err(rctr_end + 1 - filename,
 607					    BAD_REFCNT_SUFFIX);
 608			goto fail_address_parse;
 609		}
 610
 611		*rctr++ = '\0';
 612		*rctr_end = '\0';
 613		ret = kstrtoul(rctr, 0, &ref_ctr_offset);
 614		if (ret) {
 615			trace_probe_log_err(rctr - filename, BAD_REFCNT);
 616			goto fail_address_parse;
 617		}
 618	}
 619
 620	/* Parse uprobe offset. */
 621	ret = kstrtoul(arg, 0, &offset);
 622	if (ret) {
 623		trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
 624		goto fail_address_parse;
 625	}
 626
 627	/* setup a probe */
 628	trace_probe_log_set_index(0);
 629	if (event) {
 630		ret = traceprobe_parse_event_name(&event, &group, buf,
 631						  event - argv[0]);
 632		if (ret)
 633			goto fail_address_parse;
 634	} else {
 635		char *tail;
 636		char *ptr;
 637
 638		tail = kstrdup(kbasename(filename), GFP_KERNEL);
 639		if (!tail) {
 640			ret = -ENOMEM;
 641			goto fail_address_parse;
 642		}
 643
 644		ptr = strpbrk(tail, ".-_");
 645		if (ptr)
 646			*ptr = '\0';
 647
 648		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
 649		event = buf;
 650		kfree(tail);
 651	}
 652
 653	argc -= 2;
 654	argv += 2;
 655
 656	tu = alloc_trace_uprobe(group, event, argc, is_return);
 657	if (IS_ERR(tu)) {
 658		ret = PTR_ERR(tu);
 659		/* This must return -ENOMEM otherwise there is a bug */
 660		WARN_ON_ONCE(ret != -ENOMEM);
 661		goto fail_address_parse;
 662	}
 663	tu->offset = offset;
 664	tu->ref_ctr_offset = ref_ctr_offset;
 665	tu->path = path;
 666	tu->filename = filename;
 667
 668	/* parse arguments */
 669	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 670		tmp = kstrdup(argv[i], GFP_KERNEL);
 671		if (!tmp) {
 672			ret = -ENOMEM;
 673			goto error;
 674		}
 675
 676		trace_probe_log_set_index(i + 2);
 677		ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
 678					is_return ? TPARG_FL_RETURN : 0);
 679		kfree(tmp);
 680		if (ret)
 681			goto error;
 682	}
 683
 684	ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
 685	if (ret < 0)
 686		goto error;
 687
 688	ret = register_trace_uprobe(tu);
 689	if (!ret)
 690		goto out;
 691
 692error:
 693	free_trace_uprobe(tu);
 694out:
 695	trace_probe_log_clear();
 696	return ret;
 697
 698fail_address_parse:
 699	trace_probe_log_clear();
 700	path_put(&path);
 701	kfree(filename);
 702
 703	return ret;
 704}
 705
 706static int create_or_delete_trace_uprobe(int argc, char **argv)
 707{
 708	int ret;
 709
 710	if (argv[0][0] == '-')
 711		return dyn_event_release(argc, argv, &trace_uprobe_ops);
 712
 713	ret = trace_uprobe_create(argc, (const char **)argv);
 714	return ret == -ECANCELED ? -EINVAL : ret;
 715}
 716
 717static int trace_uprobe_release(struct dyn_event *ev)
 718{
 719	struct trace_uprobe *tu = to_trace_uprobe(ev);
 720
 721	return unregister_trace_uprobe(tu);
 722}
 723
 724/* Probes listing interfaces */
 725static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
 726{
 727	struct trace_uprobe *tu = to_trace_uprobe(ev);
 728	char c = is_ret_probe(tu) ? 'r' : 'p';
 729	int i;
 730
 731	seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
 732			trace_probe_name(&tu->tp), tu->filename,
 733			(int)(sizeof(void *) * 2), tu->offset);
 734
 735	if (tu->ref_ctr_offset)
 736		seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
 737
 738	for (i = 0; i < tu->tp.nr_args; i++)
 739		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
 740
 741	seq_putc(m, '\n');
 742	return 0;
 743}
 744
 745static int probes_seq_show(struct seq_file *m, void *v)
 746{
 747	struct dyn_event *ev = v;
 748
 749	if (!is_trace_uprobe(ev))
 750		return 0;
 751
 752	return trace_uprobe_show(m, ev);
 753}
 754
 755static const struct seq_operations probes_seq_op = {
 756	.start  = dyn_event_seq_start,
 757	.next   = dyn_event_seq_next,
 758	.stop   = dyn_event_seq_stop,
 759	.show   = probes_seq_show
 760};
 761
 762static int probes_open(struct inode *inode, struct file *file)
 763{
 764	int ret;
 765
 766	ret = security_locked_down(LOCKDOWN_TRACEFS);
 767	if (ret)
 768		return ret;
 769
 770	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 771		ret = dyn_events_release_all(&trace_uprobe_ops);
 772		if (ret)
 773			return ret;
 774	}
 775
 776	return seq_open(file, &probes_seq_op);
 777}
 778
 779static ssize_t probes_write(struct file *file, const char __user *buffer,
 780			    size_t count, loff_t *ppos)
 781{
 782	return trace_parse_run_command(file, buffer, count, ppos,
 783					create_or_delete_trace_uprobe);
 784}
 785
 786static const struct file_operations uprobe_events_ops = {
 787	.owner		= THIS_MODULE,
 788	.open		= probes_open,
 789	.read		= seq_read,
 790	.llseek		= seq_lseek,
 791	.release	= seq_release,
 792	.write		= probes_write,
 793};
 794
 795/* Probes profiling interfaces */
 796static int probes_profile_seq_show(struct seq_file *m, void *v)
 797{
 798	struct dyn_event *ev = v;
 799	struct trace_uprobe *tu;
 800
 801	if (!is_trace_uprobe(ev))
 802		return 0;
 803
 804	tu = to_trace_uprobe(ev);
 805	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
 806			trace_probe_name(&tu->tp), tu->nhit);
 807	return 0;
 808}
 809
 810static const struct seq_operations profile_seq_op = {
 811	.start  = dyn_event_seq_start,
 812	.next   = dyn_event_seq_next,
 813	.stop   = dyn_event_seq_stop,
 814	.show	= probes_profile_seq_show
 815};
 816
 817static int profile_open(struct inode *inode, struct file *file)
 818{
 819	int ret;
 820
 821	ret = security_locked_down(LOCKDOWN_TRACEFS);
 822	if (ret)
 823		return ret;
 824
 825	return seq_open(file, &profile_seq_op);
 826}
 827
 828static const struct file_operations uprobe_profile_ops = {
 829	.owner		= THIS_MODULE,
 830	.open		= profile_open,
 831	.read		= seq_read,
 832	.llseek		= seq_lseek,
 833	.release	= seq_release,
 834};
 835
 836struct uprobe_cpu_buffer {
 837	struct mutex mutex;
 838	void *buf;
 839};
 840static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
 841static int uprobe_buffer_refcnt;
 842
 843static int uprobe_buffer_init(void)
 844{
 845	int cpu, err_cpu;
 846
 847	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
 848	if (uprobe_cpu_buffer == NULL)
 849		return -ENOMEM;
 850
 851	for_each_possible_cpu(cpu) {
 852		struct page *p = alloc_pages_node(cpu_to_node(cpu),
 853						  GFP_KERNEL, 0);
 854		if (p == NULL) {
 855			err_cpu = cpu;
 856			goto err;
 857		}
 858		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
 859		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
 860	}
 861
 862	return 0;
 863
 864err:
 865	for_each_possible_cpu(cpu) {
 866		if (cpu == err_cpu)
 867			break;
 868		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
 869	}
 870
 871	free_percpu(uprobe_cpu_buffer);
 872	return -ENOMEM;
 873}
 874
 875static int uprobe_buffer_enable(void)
 876{
 877	int ret = 0;
 878
 879	BUG_ON(!mutex_is_locked(&event_mutex));
 880
 881	if (uprobe_buffer_refcnt++ == 0) {
 882		ret = uprobe_buffer_init();
 883		if (ret < 0)
 884			uprobe_buffer_refcnt--;
 885	}
 886
 887	return ret;
 888}
 889
 890static void uprobe_buffer_disable(void)
 891{
 892	int cpu;
 893
 894	BUG_ON(!mutex_is_locked(&event_mutex));
 895
 896	if (--uprobe_buffer_refcnt == 0) {
 897		for_each_possible_cpu(cpu)
 898			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
 899							     cpu)->buf);
 900
 901		free_percpu(uprobe_cpu_buffer);
 902		uprobe_cpu_buffer = NULL;
 903	}
 904}
 905
 906static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
 907{
 908	struct uprobe_cpu_buffer *ucb;
 909	int cpu;
 910
 911	cpu = raw_smp_processor_id();
 912	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
 913
 914	/*
 915	 * Use per-cpu buffers for fastest access, but we might migrate
 916	 * so the mutex makes sure we have sole access to it.
 917	 */
 918	mutex_lock(&ucb->mutex);
 919
 920	return ucb;
 921}
 922
 923static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
 924{
 925	mutex_unlock(&ucb->mutex);
 926}
 927
 928static void __uprobe_trace_func(struct trace_uprobe *tu,
 929				unsigned long func, struct pt_regs *regs,
 930				struct uprobe_cpu_buffer *ucb, int dsize,
 931				struct trace_event_file *trace_file)
 932{
 933	struct uprobe_trace_entry_head *entry;
 934	struct trace_buffer *buffer;
 935	struct ring_buffer_event *event;
 
 936	void *data;
 937	int size, esize;
 938	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
 939
 940	WARN_ON(call != trace_file->event_call);
 941
 942	if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
 943		return;
 944
 945	if (trace_trigger_soft_disabled(trace_file))
 946		return;
 947
 948	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
 949	size = esize + tu->tp.size + dsize;
 950	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 951						call->event.type, size, 0, 0);
 952	if (!event)
 953		return;
 954
 955	entry = ring_buffer_event_data(event);
 956	if (is_ret_probe(tu)) {
 957		entry->vaddr[0] = func;
 958		entry->vaddr[1] = instruction_pointer(regs);
 959		data = DATAOF_TRACE_ENTRY(entry, true);
 960	} else {
 961		entry->vaddr[0] = instruction_pointer(regs);
 962		data = DATAOF_TRACE_ENTRY(entry, false);
 963	}
 964
 965	memcpy(data, ucb->buf, tu->tp.size + dsize);
 966
 967	event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
 968}
 969
 970/* uprobe handler */
 971static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
 972			     struct uprobe_cpu_buffer *ucb, int dsize)
 973{
 974	struct event_file_link *link;
 975
 976	if (is_ret_probe(tu))
 977		return 0;
 978
 979	rcu_read_lock();
 980	trace_probe_for_each_link_rcu(link, &tu->tp)
 981		__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
 982	rcu_read_unlock();
 983
 984	return 0;
 985}
 986
 987static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
 988				 struct pt_regs *regs,
 989				 struct uprobe_cpu_buffer *ucb, int dsize)
 990{
 991	struct event_file_link *link;
 992
 993	rcu_read_lock();
 994	trace_probe_for_each_link_rcu(link, &tu->tp)
 995		__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
 996	rcu_read_unlock();
 997}
 998
 999/* Event entry printers */
1000static enum print_line_t
1001print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1002{
1003	struct uprobe_trace_entry_head *entry;
1004	struct trace_seq *s = &iter->seq;
1005	struct trace_uprobe *tu;
1006	u8 *data;
1007
1008	entry = (struct uprobe_trace_entry_head *)iter->ent;
1009	tu = trace_uprobe_primary_from_call(
1010		container_of(event, struct trace_event_call, event));
1011	if (unlikely(!tu))
1012		goto out;
1013
1014	if (is_ret_probe(tu)) {
1015		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1016				 trace_probe_name(&tu->tp),
1017				 entry->vaddr[1], entry->vaddr[0]);
1018		data = DATAOF_TRACE_ENTRY(entry, true);
1019	} else {
1020		trace_seq_printf(s, "%s: (0x%lx)",
1021				 trace_probe_name(&tu->tp),
1022				 entry->vaddr[0]);
1023		data = DATAOF_TRACE_ENTRY(entry, false);
1024	}
1025
1026	if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1027		goto out;
1028
1029	trace_seq_putc(s, '\n');
1030
1031 out:
1032	return trace_handle_return(s);
1033}
1034
1035typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1036				enum uprobe_filter_ctx ctx,
1037				struct mm_struct *mm);
1038
1039static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
1040{
1041	int ret;
1042
1043	tu->consumer.filter = filter;
1044	tu->inode = d_real_inode(tu->path.dentry);
1045
1046	if (tu->ref_ctr_offset)
1047		ret = uprobe_register_refctr(tu->inode, tu->offset,
1048				tu->ref_ctr_offset, &tu->consumer);
1049	else
1050		ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1051
1052	if (ret)
1053		tu->inode = NULL;
1054
1055	return ret;
1056}
1057
1058static void __probe_event_disable(struct trace_probe *tp)
1059{
1060	struct trace_probe *pos;
1061	struct trace_uprobe *tu;
1062
1063	tu = container_of(tp, struct trace_uprobe, tp);
1064	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1065
1066	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1067		tu = container_of(pos, struct trace_uprobe, tp);
1068		if (!tu->inode)
1069			continue;
1070
 
 
1071		uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1072		tu->inode = NULL;
1073	}
1074}
1075
1076static int probe_event_enable(struct trace_event_call *call,
1077			struct trace_event_file *file, filter_func_t filter)
1078{
1079	struct trace_probe *pos, *tp;
1080	struct trace_uprobe *tu;
1081	bool enabled;
1082	int ret;
1083
1084	tp = trace_probe_primary_from_call(call);
1085	if (WARN_ON_ONCE(!tp))
1086		return -ENODEV;
1087	enabled = trace_probe_is_enabled(tp);
1088
1089	/* This may also change "enabled" state */
1090	if (file) {
1091		if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1092			return -EINTR;
1093
1094		ret = trace_probe_add_file(tp, file);
1095		if (ret < 0)
1096			return ret;
1097	} else {
1098		if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1099			return -EINTR;
1100
1101		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1102	}
1103
1104	tu = container_of(tp, struct trace_uprobe, tp);
1105	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1106
1107	if (enabled)
1108		return 0;
1109
1110	ret = uprobe_buffer_enable();
1111	if (ret)
1112		goto err_flags;
1113
1114	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1115		tu = container_of(pos, struct trace_uprobe, tp);
1116		ret = trace_uprobe_enable(tu, filter);
1117		if (ret) {
1118			__probe_event_disable(tp);
1119			goto err_buffer;
1120		}
1121	}
1122
1123	return 0;
1124
1125 err_buffer:
1126	uprobe_buffer_disable();
1127
1128 err_flags:
1129	if (file)
1130		trace_probe_remove_file(tp, file);
1131	else
1132		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1133
1134	return ret;
1135}
1136
1137static void probe_event_disable(struct trace_event_call *call,
1138				struct trace_event_file *file)
1139{
1140	struct trace_probe *tp;
1141
1142	tp = trace_probe_primary_from_call(call);
1143	if (WARN_ON_ONCE(!tp))
1144		return;
1145
1146	if (!trace_probe_is_enabled(tp))
1147		return;
1148
1149	if (file) {
1150		if (trace_probe_remove_file(tp, file) < 0)
1151			return;
1152
1153		if (trace_probe_is_enabled(tp))
1154			return;
1155	} else
1156		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1157
1158	__probe_event_disable(tp);
1159	uprobe_buffer_disable();
1160}
1161
1162static int uprobe_event_define_fields(struct trace_event_call *event_call)
1163{
1164	int ret, size;
1165	struct uprobe_trace_entry_head field;
1166	struct trace_uprobe *tu;
1167
1168	tu = trace_uprobe_primary_from_call(event_call);
1169	if (unlikely(!tu))
1170		return -ENODEV;
1171
1172	if (is_ret_probe(tu)) {
1173		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1174		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1175		size = SIZEOF_TRACE_ENTRY(true);
1176	} else {
1177		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1178		size = SIZEOF_TRACE_ENTRY(false);
1179	}
1180
1181	return traceprobe_define_arg_fields(event_call, size, &tu->tp);
1182}
1183
1184#ifdef CONFIG_PERF_EVENTS
1185static bool
1186__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1187{
1188	struct perf_event *event;
1189
1190	if (filter->nr_systemwide)
1191		return true;
1192
1193	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1194		if (event->hw.target->mm == mm)
1195			return true;
1196	}
1197
1198	return false;
1199}
1200
1201static inline bool
1202trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1203			  struct perf_event *event)
1204{
1205	return __uprobe_perf_filter(filter, event->hw.target->mm);
1206}
1207
1208static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1209				       struct perf_event *event)
1210{
1211	bool done;
1212
1213	write_lock(&filter->rwlock);
1214	if (event->hw.target) {
1215		list_del(&event->hw.tp_list);
1216		done = filter->nr_systemwide ||
1217			(event->hw.target->flags & PF_EXITING) ||
1218			trace_uprobe_filter_event(filter, event);
1219	} else {
1220		filter->nr_systemwide--;
1221		done = filter->nr_systemwide;
1222	}
1223	write_unlock(&filter->rwlock);
 
 
 
1224
1225	return done;
1226}
1227
1228/* This returns true if the filter always covers target mm */
1229static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1230				    struct perf_event *event)
1231{
1232	bool done;
 
1233
1234	write_lock(&filter->rwlock);
1235	if (event->hw.target) {
1236		/*
1237		 * event->parent != NULL means copy_process(), we can avoid
1238		 * uprobe_apply(). current->mm must be probed and we can rely
1239		 * on dup_mmap() which preserves the already installed bp's.
1240		 *
1241		 * attr.enable_on_exec means that exec/mmap will install the
1242		 * breakpoints we need.
1243		 */
1244		done = filter->nr_systemwide ||
1245			event->parent || event->attr.enable_on_exec ||
1246			trace_uprobe_filter_event(filter, event);
1247		list_add(&event->hw.tp_list, &filter->perf_events);
1248	} else {
1249		done = filter->nr_systemwide;
1250		filter->nr_systemwide++;
1251	}
1252	write_unlock(&filter->rwlock);
1253
1254	return done;
 
 
 
 
 
 
1255}
1256
1257static int uprobe_perf_close(struct trace_event_call *call,
1258			     struct perf_event *event)
 
1259{
1260	struct trace_probe *pos, *tp;
1261	struct trace_uprobe *tu;
1262	int ret = 0;
1263
1264	tp = trace_probe_primary_from_call(call);
1265	if (WARN_ON_ONCE(!tp))
1266		return -ENODEV;
1267
1268	tu = container_of(tp, struct trace_uprobe, tp);
1269	if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1270		return 0;
1271
1272	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1273		tu = container_of(pos, struct trace_uprobe, tp);
1274		ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1275		if (ret)
1276			break;
1277	}
1278
1279	return ret;
1280}
1281
1282static int uprobe_perf_open(struct trace_event_call *call,
1283			    struct perf_event *event)
1284{
1285	struct trace_probe *pos, *tp;
1286	struct trace_uprobe *tu;
1287	int err = 0;
1288
1289	tp = trace_probe_primary_from_call(call);
1290	if (WARN_ON_ONCE(!tp))
1291		return -ENODEV;
1292
1293	tu = container_of(tp, struct trace_uprobe, tp);
1294	if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1295		return 0;
1296
1297	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1298		err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1299		if (err) {
1300			uprobe_perf_close(call, event);
1301			break;
1302		}
1303	}
1304
1305	return err;
1306}
1307
1308static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1309				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1310{
1311	struct trace_uprobe_filter *filter;
1312	struct trace_uprobe *tu;
1313	int ret;
1314
1315	tu = container_of(uc, struct trace_uprobe, consumer);
1316	filter = tu->tp.event->filter;
1317
1318	read_lock(&filter->rwlock);
1319	ret = __uprobe_perf_filter(filter, mm);
1320	read_unlock(&filter->rwlock);
1321
1322	return ret;
1323}
1324
1325static void __uprobe_perf_func(struct trace_uprobe *tu,
1326			       unsigned long func, struct pt_regs *regs,
1327			       struct uprobe_cpu_buffer *ucb, int dsize)
1328{
1329	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1330	struct uprobe_trace_entry_head *entry;
1331	struct hlist_head *head;
1332	void *data;
1333	int size, esize;
1334	int rctx;
1335
1336	if (bpf_prog_array_valid(call)) {
1337		u32 ret;
1338
1339		preempt_disable();
1340		ret = trace_call_bpf(call, regs);
1341		preempt_enable();
1342		if (!ret)
1343			return;
1344	}
1345
1346	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1347
1348	size = esize + tu->tp.size + dsize;
1349	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1350	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1351		return;
1352
1353	preempt_disable();
1354	head = this_cpu_ptr(call->perf_events);
1355	if (hlist_empty(head))
1356		goto out;
1357
1358	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1359	if (!entry)
1360		goto out;
1361
1362	if (is_ret_probe(tu)) {
1363		entry->vaddr[0] = func;
1364		entry->vaddr[1] = instruction_pointer(regs);
1365		data = DATAOF_TRACE_ENTRY(entry, true);
1366	} else {
1367		entry->vaddr[0] = instruction_pointer(regs);
1368		data = DATAOF_TRACE_ENTRY(entry, false);
1369	}
1370
1371	memcpy(data, ucb->buf, tu->tp.size + dsize);
1372
1373	if (size - esize > tu->tp.size + dsize) {
1374		int len = tu->tp.size + dsize;
1375
1376		memset(data + len, 0, size - esize - len);
1377	}
1378
1379	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1380			      head, NULL);
1381 out:
1382	preempt_enable();
1383}
1384
1385/* uprobe profile handler */
1386static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1387			    struct uprobe_cpu_buffer *ucb, int dsize)
1388{
1389	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1390		return UPROBE_HANDLER_REMOVE;
1391
1392	if (!is_ret_probe(tu))
1393		__uprobe_perf_func(tu, 0, regs, ucb, dsize);
1394	return 0;
1395}
1396
1397static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1398				struct pt_regs *regs,
1399				struct uprobe_cpu_buffer *ucb, int dsize)
1400{
1401	__uprobe_perf_func(tu, func, regs, ucb, dsize);
1402}
1403
1404int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1405			const char **filename, u64 *probe_offset,
1406			bool perf_type_tracepoint)
1407{
1408	const char *pevent = trace_event_name(event->tp_event);
1409	const char *group = event->tp_event->class->system;
1410	struct trace_uprobe *tu;
1411
1412	if (perf_type_tracepoint)
1413		tu = find_probe_event(pevent, group);
1414	else
1415		tu = trace_uprobe_primary_from_call(event->tp_event);
1416	if (!tu)
1417		return -EINVAL;
1418
1419	*fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1420				    : BPF_FD_TYPE_UPROBE;
1421	*filename = tu->filename;
1422	*probe_offset = tu->offset;
1423	return 0;
1424}
1425#endif	/* CONFIG_PERF_EVENTS */
1426
1427static int
1428trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1429		      void *data)
1430{
1431	struct trace_event_file *file = data;
1432
1433	switch (type) {
1434	case TRACE_REG_REGISTER:
1435		return probe_event_enable(event, file, NULL);
1436
1437	case TRACE_REG_UNREGISTER:
1438		probe_event_disable(event, file);
1439		return 0;
1440
1441#ifdef CONFIG_PERF_EVENTS
1442	case TRACE_REG_PERF_REGISTER:
1443		return probe_event_enable(event, NULL, uprobe_perf_filter);
1444
1445	case TRACE_REG_PERF_UNREGISTER:
1446		probe_event_disable(event, NULL);
1447		return 0;
1448
1449	case TRACE_REG_PERF_OPEN:
1450		return uprobe_perf_open(event, data);
1451
1452	case TRACE_REG_PERF_CLOSE:
1453		return uprobe_perf_close(event, data);
1454
1455#endif
1456	default:
1457		return 0;
1458	}
 
1459}
1460
1461static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1462{
1463	struct trace_uprobe *tu;
1464	struct uprobe_dispatch_data udd;
1465	struct uprobe_cpu_buffer *ucb;
1466	int dsize, esize;
1467	int ret = 0;
1468
1469
1470	tu = container_of(con, struct trace_uprobe, consumer);
1471	tu->nhit++;
1472
1473	udd.tu = tu;
1474	udd.bp_addr = instruction_pointer(regs);
1475
1476	current->utask->vaddr = (unsigned long) &udd;
1477
1478	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1479		return 0;
1480
1481	dsize = __get_data_size(&tu->tp, regs);
1482	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1483
1484	ucb = uprobe_buffer_get();
1485	store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1486
1487	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1488		ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1489
1490#ifdef CONFIG_PERF_EVENTS
1491	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1492		ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1493#endif
1494	uprobe_buffer_put(ucb);
1495	return ret;
1496}
1497
1498static int uretprobe_dispatcher(struct uprobe_consumer *con,
1499				unsigned long func, struct pt_regs *regs)
1500{
1501	struct trace_uprobe *tu;
1502	struct uprobe_dispatch_data udd;
1503	struct uprobe_cpu_buffer *ucb;
1504	int dsize, esize;
1505
1506	tu = container_of(con, struct trace_uprobe, consumer);
1507
1508	udd.tu = tu;
1509	udd.bp_addr = func;
1510
1511	current->utask->vaddr = (unsigned long) &udd;
1512
1513	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1514		return 0;
1515
1516	dsize = __get_data_size(&tu->tp, regs);
1517	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1518
1519	ucb = uprobe_buffer_get();
1520	store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
1521
1522	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1523		uretprobe_trace_func(tu, func, regs, ucb, dsize);
1524
1525#ifdef CONFIG_PERF_EVENTS
1526	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1527		uretprobe_perf_func(tu, func, regs, ucb, dsize);
1528#endif
1529	uprobe_buffer_put(ucb);
1530	return 0;
1531}
1532
1533static struct trace_event_functions uprobe_funcs = {
1534	.trace		= print_uprobe_event
1535};
1536
1537static struct trace_event_fields uprobe_fields_array[] = {
1538	{ .type = TRACE_FUNCTION_TYPE,
1539	  .define_fields = uprobe_event_define_fields },
1540	{}
1541};
1542
1543static inline void init_trace_event_call(struct trace_uprobe *tu)
1544{
1545	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
 
1546	call->event.funcs = &uprobe_funcs;
1547	call->class->fields_array = uprobe_fields_array;
1548
1549	call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1550	call->class->reg = trace_uprobe_register;
1551}
1552
1553static int register_uprobe_event(struct trace_uprobe *tu)
1554{
1555	init_trace_event_call(tu);
1556
1557	return trace_probe_register_event_call(&tu->tp);
1558}
1559
1560static int unregister_uprobe_event(struct trace_uprobe *tu)
1561{
1562	return trace_probe_unregister_event_call(&tu->tp);
1563}
1564
1565#ifdef CONFIG_PERF_EVENTS
1566struct trace_event_call *
1567create_local_trace_uprobe(char *name, unsigned long offs,
1568			  unsigned long ref_ctr_offset, bool is_return)
1569{
1570	struct trace_uprobe *tu;
1571	struct path path;
1572	int ret;
1573
1574	ret = kern_path(name, LOOKUP_FOLLOW, &path);
1575	if (ret)
1576		return ERR_PTR(ret);
1577
1578	if (!d_is_reg(path.dentry)) {
1579		path_put(&path);
1580		return ERR_PTR(-EINVAL);
1581	}
1582
1583	/*
1584	 * local trace_kprobes are not added to dyn_event, so they are never
1585	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1586	 * duplicated name "DUMMY_EVENT" here.
1587	 */
1588	tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1589				is_return);
1590
1591	if (IS_ERR(tu)) {
1592		pr_info("Failed to allocate trace_uprobe.(%d)\n",
1593			(int)PTR_ERR(tu));
1594		path_put(&path);
1595		return ERR_CAST(tu);
1596	}
1597
1598	tu->offset = offs;
1599	tu->path = path;
1600	tu->ref_ctr_offset = ref_ctr_offset;
1601	tu->filename = kstrdup(name, GFP_KERNEL);
1602	init_trace_event_call(tu);
1603
1604	if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
1605		ret = -ENOMEM;
1606		goto error;
1607	}
1608
1609	return trace_probe_event_call(&tu->tp);
1610error:
1611	free_trace_uprobe(tu);
1612	return ERR_PTR(ret);
1613}
1614
1615void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1616{
1617	struct trace_uprobe *tu;
1618
1619	tu = trace_uprobe_primary_from_call(event_call);
1620
1621	free_trace_uprobe(tu);
1622}
1623#endif /* CONFIG_PERF_EVENTS */
1624
1625/* Make a trace interface for controling probe points */
1626static __init int init_uprobe_trace(void)
1627{
1628	struct dentry *d_tracer;
1629	int ret;
1630
1631	ret = dyn_event_register(&trace_uprobe_ops);
1632	if (ret)
1633		return ret;
1634
1635	d_tracer = tracing_init_dentry();
1636	if (IS_ERR(d_tracer))
1637		return 0;
1638
1639	trace_create_file("uprobe_events", 0644, d_tracer,
1640				    NULL, &uprobe_events_ops);
1641	/* Profile interface */
1642	trace_create_file("uprobe_profile", 0444, d_tracer,
1643				    NULL, &uprobe_profile_ops);
1644	return 0;
1645}
1646
1647fs_initcall(init_uprobe_trace);