Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * uprobes-based tracing events
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  16 *
  17 * Copyright (C) IBM Corporation, 2010-2012
  18 * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
  19 */
 
  20
 
 
 
  21#include <linux/module.h>
  22#include <linux/uaccess.h>
  23#include <linux/uprobes.h>
  24#include <linux/namei.h>
  25#include <linux/string.h>
 
 
 
  26
 
  27#include "trace_probe.h"
 
  28
  29#define UPROBE_EVENT_SYSTEM	"uprobes"
  30
  31struct uprobe_trace_entry_head {
  32	struct trace_entry	ent;
  33	unsigned long		vaddr[];
  34};
  35
  36#define SIZEOF_TRACE_ENTRY(is_return)			\
  37	(sizeof(struct uprobe_trace_entry_head) +	\
  38	 sizeof(unsigned long) * (is_return ? 2 : 1))
  39
  40#define DATAOF_TRACE_ENTRY(entry, is_return)		\
  41	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  42
  43struct trace_uprobe_filter {
  44	rwlock_t		rwlock;
  45	int			nr_systemwide;
  46	struct list_head	perf_events;
 
 
 
 
 
 
 
 
 
  47};
  48
  49/*
  50 * uprobe event core functions
  51 */
  52struct trace_uprobe {
  53	struct list_head		list;
  54	struct trace_uprobe_filter	filter;
  55	struct uprobe_consumer		consumer;
  56	struct inode			*inode;
  57	char				*filename;
 
  58	unsigned long			offset;
  59	unsigned long			nhit;
 
  60	struct trace_probe		tp;
  61};
  62
  63#define SIZEOF_TRACE_UPROBE(n)				\
  64	(offsetof(struct trace_uprobe, tp.args) +	\
  65	(sizeof(struct probe_arg) * (n)))
 
  66
  67static int register_uprobe_event(struct trace_uprobe *tu);
  68static int unregister_uprobe_event(struct trace_uprobe *tu);
 
 
  69
  70static DEFINE_MUTEX(uprobe_lock);
  71static LIST_HEAD(uprobe_list);
 
 
 
 
 
 
  72
  73struct uprobe_dispatch_data {
  74	struct trace_uprobe	*tu;
  75	unsigned long		bp_addr;
  76};
  77
  78static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
 
  79static int uretprobe_dispatcher(struct uprobe_consumer *con,
  80				unsigned long func, struct pt_regs *regs);
 
  81
  82#ifdef CONFIG_STACK_GROWSUP
  83static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  84{
  85	return addr - (n * sizeof(long));
  86}
  87#else
  88static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  89{
  90	return addr + (n * sizeof(long));
  91}
  92#endif
  93
  94static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
  95{
  96	unsigned long ret;
  97	unsigned long addr = user_stack_pointer(regs);
  98
  99	addr = adjust_stack_addr(addr, n);
 100
 101	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
 102		return 0;
 103
 104	return ret;
 105}
 106
 107/*
 108 * Uprobes-specific fetch functions
 109 */
 110#define DEFINE_FETCH_stack(type)					\
 111static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,		\
 112					 void *offset, void *dest)	\
 113{									\
 114	*(type *)dest = (type)get_user_stack_nth(regs,			\
 115					      ((unsigned long)offset)); \
 116}
 117DEFINE_BASIC_FETCH_FUNCS(stack)
 118/* No string on the stack entry */
 119#define fetch_stack_string	NULL
 120#define fetch_stack_string_size	NULL
 121
 122#define DEFINE_FETCH_memory(type)					\
 123static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,		\
 124					  void *addr, void *dest)	\
 125{									\
 126	type retval;							\
 127	void __user *vaddr = (void __force __user *) addr;		\
 128									\
 129	if (copy_from_user(&retval, vaddr, sizeof(type)))		\
 130		*(type *)dest = 0;					\
 131	else								\
 132		*(type *) dest = retval;				\
 133}
 134DEFINE_BASIC_FETCH_FUNCS(memory)
 135/*
 136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 137 * length and relative data location.
 138 */
 139static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 140					    void *addr, void *dest)
 141{
 142	long ret;
 143	u32 rloc = *(u32 *)dest;
 144	int maxlen  = get_rloc_len(rloc);
 145	u8 *dst = get_rloc_data(dest);
 146	void __user *src = (void __force __user *) addr;
 147
 148	if (!maxlen)
 149		return;
 150
 151	ret = strncpy_from_user(dst, src, maxlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 152
 153	if (ret < 0) {	/* Failed to fetch string */
 154		((u8 *)get_rloc_data(dest))[0] = '\0';
 155		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
 156	} else {
 157		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
 158	}
 
 159}
 160
 161static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
 162						 void *addr, void *dest)
 
 163{
 164	int len;
 165	void __user *vaddr = (void __force __user *) addr;
 166
 167	len = strnlen_user(vaddr, MAX_STRING_SIZE);
 168
 169	if (len == 0 || len > MAX_STRING_SIZE)  /* Failed to check length */
 170		*(u32 *)dest = 0;
 171	else
 172		*(u32 *)dest = len;
 
 
 173}
 174
 175static unsigned long translate_user_vaddr(void *file_offset)
 
 
 
 
 
 
 176{
 177	unsigned long base_addr;
 178	struct uprobe_dispatch_data *udd;
 179
 180	udd = (void *) current->utask->vaddr;
 181
 182	base_addr = udd->bp_addr - udd->tu->offset;
 183	return base_addr + (unsigned long)file_offset;
 184}
 185
 186#define DEFINE_FETCH_file_offset(type)					\
 187static void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,	\
 188					       void *offset, void *dest)\
 189{									\
 190	void *vaddr = (void *)translate_user_vaddr(offset);		\
 191									\
 192	FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest);		\
 193}
 194DEFINE_BASIC_FETCH_FUNCS(file_offset)
 195DEFINE_FETCH_file_offset(string)
 196DEFINE_FETCH_file_offset(string_size)
 197
 198/* Fetch type information table */
 199static const struct fetch_type uprobes_fetch_type_table[] = {
 200	/* Special types */
 201	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
 202					sizeof(u32), 1, "__data_loc char[]"),
 203	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
 204					string_size, sizeof(u32), 0, "u32"),
 205	/* Basic types */
 206	ASSIGN_FETCH_TYPE(u8,  u8,  0),
 207	ASSIGN_FETCH_TYPE(u16, u16, 0),
 208	ASSIGN_FETCH_TYPE(u32, u32, 0),
 209	ASSIGN_FETCH_TYPE(u64, u64, 0),
 210	ASSIGN_FETCH_TYPE(s8,  u8,  1),
 211	ASSIGN_FETCH_TYPE(s16, u16, 1),
 212	ASSIGN_FETCH_TYPE(s32, u32, 1),
 213	ASSIGN_FETCH_TYPE(s64, u64, 1),
 
 
 
 
 
 
 
 214
 215	ASSIGN_FETCH_TYPE_END
 216};
 
 217
 218static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
 219{
 220	rwlock_init(&filter->rwlock);
 221	filter->nr_systemwide = 0;
 222	INIT_LIST_HEAD(&filter->perf_events);
 223}
 224
 225static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
 226{
 227	return !filter->nr_systemwide && list_empty(&filter->perf_events);
 228}
 229
 230static inline bool is_ret_probe(struct trace_uprobe *tu)
 231{
 232	return tu->consumer.ret_handler != NULL;
 233}
 234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 235/*
 236 * Allocate new trace_uprobe and initialize it (including uprobes).
 237 */
 238static struct trace_uprobe *
 239alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 240{
 241	struct trace_uprobe *tu;
 
 242
 243	if (!event || !is_good_name(event))
 244		return ERR_PTR(-EINVAL);
 245
 246	if (!group || !is_good_name(group))
 247		return ERR_PTR(-EINVAL);
 248
 249	tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
 250	if (!tu)
 251		return ERR_PTR(-ENOMEM);
 252
 253	tu->tp.call.class = &tu->tp.class;
 254	tu->tp.call.name = kstrdup(event, GFP_KERNEL);
 255	if (!tu->tp.call.name)
 256		goto error;
 
 257
 258	tu->tp.class.system = kstrdup(group, GFP_KERNEL);
 259	if (!tu->tp.class.system)
 260		goto error;
 261
 262	INIT_LIST_HEAD(&tu->list);
 263	INIT_LIST_HEAD(&tu->tp.files);
 264	tu->consumer.handler = uprobe_dispatcher;
 265	if (is_ret)
 266		tu->consumer.ret_handler = uretprobe_dispatcher;
 267	init_trace_uprobe_filter(&tu->filter);
 268	return tu;
 269
 270error:
 271	kfree(tu->tp.call.name);
 272	kfree(tu);
 273
 274	return ERR_PTR(-ENOMEM);
 275}
 276
 277static void free_trace_uprobe(struct trace_uprobe *tu)
 278{
 279	int i;
 280
 281	for (i = 0; i < tu->tp.nr_args; i++)
 282		traceprobe_free_probe_arg(&tu->tp.args[i]);
 283
 284	iput(tu->inode);
 285	kfree(tu->tp.call.class->system);
 286	kfree(tu->tp.call.name);
 287	kfree(tu->filename);
 
 288	kfree(tu);
 289}
 290
 291static struct trace_uprobe *find_probe_event(const char *event, const char *group)
 292{
 
 293	struct trace_uprobe *tu;
 294
 295	list_for_each_entry(tu, &uprobe_list, list)
 296		if (strcmp(trace_event_name(&tu->tp.call), event) == 0 &&
 297		    strcmp(tu->tp.call.class->system, group) == 0)
 298			return tu;
 299
 300	return NULL;
 301}
 302
 303/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
 304static int unregister_trace_uprobe(struct trace_uprobe *tu)
 305{
 306	int ret;
 307
 
 
 
 
 
 
 
 308	ret = unregister_uprobe_event(tu);
 309	if (ret)
 310		return ret;
 311
 312	list_del(&tu->list);
 
 
 313	free_trace_uprobe(tu);
 314	return 0;
 315}
 316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 317/* Register a trace_uprobe and probe_event */
 318static int register_trace_uprobe(struct trace_uprobe *tu)
 319{
 320	struct trace_uprobe *old_tu;
 321	int ret;
 322
 323	mutex_lock(&uprobe_lock);
 
 
 
 
 324
 325	/* register as an event */
 326	old_tu = find_probe_event(trace_event_name(&tu->tp.call),
 327			tu->tp.call.class->system);
 328	if (old_tu) {
 329		/* delete old event */
 330		ret = unregister_trace_uprobe(old_tu);
 331		if (ret)
 332			goto end;
 
 
 
 
 333	}
 334
 335	ret = register_uprobe_event(tu);
 336	if (ret) {
 337		pr_warn("Failed to register probe event(%d)\n", ret);
 
 
 
 
 338		goto end;
 339	}
 340
 341	list_add_tail(&tu->list, &uprobe_list);
 342
 343end:
 344	mutex_unlock(&uprobe_lock);
 345
 346	return ret;
 347}
 348
 349/*
 350 * Argument syntax:
 351 *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
 352 *
 353 *  - Remove uprobe: -:[GRP/]EVENT
 354 */
 355static int create_trace_uprobe(int argc, char **argv)
 356{
 357	struct trace_uprobe *tu;
 358	struct inode *inode;
 359	char *arg, *event, *group, *filename;
 360	char buf[MAX_EVENT_NAME_LEN];
 
 
 361	struct path path;
 362	unsigned long offset;
 363	bool is_delete, is_return;
 364	int i, ret;
 365
 366	inode = NULL;
 367	ret = 0;
 368	is_delete = false;
 369	is_return = false;
 370	event = NULL;
 371	group = NULL;
 372
 373	/* argc must be >= 1 */
 374	if (argv[0][0] == '-')
 375		is_delete = true;
 376	else if (argv[0][0] == 'r')
 377		is_return = true;
 378	else if (argv[0][0] != 'p') {
 379		pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
 380		return -EINVAL;
 
 
 381	}
 382
 383	if (argv[0][1] == ':') {
 384		event = &argv[0][2];
 385		arg = strchr(event, '/');
 
 386
 387		if (arg) {
 388			group = event;
 389			event = arg + 1;
 390			event[-1] = '\0';
 391
 392			if (strlen(group) == 0) {
 393				pr_info("Group name is not specified\n");
 394				return -EINVAL;
 395			}
 396		}
 397		if (strlen(event) == 0) {
 398			pr_info("Event name is not specified\n");
 399			return -EINVAL;
 400		}
 401	}
 402	if (!group)
 403		group = UPROBE_EVENT_SYSTEM;
 404
 405	if (is_delete) {
 406		int ret;
 407
 408		if (!event) {
 409			pr_info("Delete command needs an event name.\n");
 410			return -EINVAL;
 411		}
 412		mutex_lock(&uprobe_lock);
 413		tu = find_probe_event(event, group);
 414
 415		if (!tu) {
 416			mutex_unlock(&uprobe_lock);
 417			pr_info("Event %s/%s doesn't exist.\n", group, event);
 418			return -ENOENT;
 419		}
 420		/* delete an event */
 421		ret = unregister_trace_uprobe(tu);
 422		mutex_unlock(&uprobe_lock);
 423		return ret;
 424	}
 425
 426	if (argc < 2) {
 427		pr_info("Probe point is not specified.\n");
 428		return -EINVAL;
 429	}
 430	if (isdigit(argv[1][0])) {
 431		pr_info("probe point must be have a filename.\n");
 432		return -EINVAL;
 
 
 
 433	}
 434	arg = strchr(argv[1], ':');
 435	if (!arg) {
 436		ret = -EINVAL;
 437		goto fail_address_parse;
 438	}
 439
 440	*arg++ = '\0';
 441	filename = argv[1];
 442	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
 443	if (ret)
 444		goto fail_address_parse;
 
 
 
 
 
 
 
 
 
 
 
 445
 446	inode = igrab(d_inode(path.dentry));
 447	path_put(&path);
 
 
 
 
 
 
 448
 449	if (!inode || !S_ISREG(inode->i_mode)) {
 450		ret = -EINVAL;
 451		goto fail_address_parse;
 
 
 
 
 
 
 
 
 452	}
 453
 
 454	ret = kstrtoul(arg, 0, &offset);
 455	if (ret)
 
 456		goto fail_address_parse;
 457
 458	argc -= 2;
 459	argv += 2;
 460
 461	/* setup a probe */
 
 
 
 
 
 
 
 
 462	if (!event) {
 463		char *tail;
 464		char *ptr;
 465
 466		tail = kstrdup(kbasename(filename), GFP_KERNEL);
 467		if (!tail) {
 468			ret = -ENOMEM;
 469			goto fail_address_parse;
 470		}
 471
 472		ptr = strpbrk(tail, ".-_");
 473		if (ptr)
 474			*ptr = '\0';
 475
 476		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
 477		event = buf;
 478		kfree(tail);
 479	}
 480
 
 
 
 481	tu = alloc_trace_uprobe(group, event, argc, is_return);
 482	if (IS_ERR(tu)) {
 483		pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
 484		ret = PTR_ERR(tu);
 
 
 485		goto fail_address_parse;
 486	}
 487	tu->offset = offset;
 488	tu->inode = inode;
 489	tu->filename = kstrdup(filename, GFP_KERNEL);
 490
 491	if (!tu->filename) {
 492		pr_info("Failed to allocate filename.\n");
 493		ret = -ENOMEM;
 494		goto error;
 495	}
 496
 497	/* parse arguments */
 498	ret = 0;
 499	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 500		struct probe_arg *parg = &tu->tp.args[i];
 501
 502		/* Increment count for freeing args in error case */
 503		tu->tp.nr_args++;
 504
 505		/* Parse argument name */
 506		arg = strchr(argv[i], '=');
 507		if (arg) {
 508			*arg++ = '\0';
 509			parg->name = kstrdup(argv[i], GFP_KERNEL);
 510		} else {
 511			arg = argv[i];
 512			/* If argument name is omitted, set "argN" */
 513			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
 514			parg->name = kstrdup(buf, GFP_KERNEL);
 515		}
 516
 517		if (!parg->name) {
 518			pr_info("Failed to allocate argument[%d] name.\n", i);
 519			ret = -ENOMEM;
 520			goto error;
 521		}
 522
 523		if (!is_good_name(parg->name)) {
 524			pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
 525			ret = -EINVAL;
 526			goto error;
 527		}
 528
 529		if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
 530			pr_info("Argument[%d] name '%s' conflicts with "
 531				"another field.\n", i, argv[i]);
 532			ret = -EINVAL;
 533			goto error;
 534		}
 535
 536		/* Parse fetch argument */
 537		ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
 538						 is_return, false,
 539						 uprobes_fetch_type_table);
 540		if (ret) {
 541			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
 542			goto error;
 543		}
 544	}
 545
 546	ret = register_trace_uprobe(tu);
 547	if (ret)
 
 548		goto error;
 549	return 0;
 
 
 
 550
 551error:
 552	free_trace_uprobe(tu);
 
 
 553	return ret;
 554
 555fail_address_parse:
 556	iput(inode);
 557
 558	pr_info("Failed to parse address or file.\n");
 559
 560	return ret;
 561}
 562
 563static int cleanup_all_probes(void)
 564{
 565	struct trace_uprobe *tu;
 566	int ret = 0;
 567
 568	mutex_lock(&uprobe_lock);
 569	while (!list_empty(&uprobe_list)) {
 570		tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
 571		ret = unregister_trace_uprobe(tu);
 572		if (ret)
 573			break;
 574	}
 575	mutex_unlock(&uprobe_lock);
 576	return ret;
 577}
 578
 579/* Probes listing interfaces */
 580static void *probes_seq_start(struct seq_file *m, loff_t *pos)
 581{
 582	mutex_lock(&uprobe_lock);
 583	return seq_list_start(&uprobe_list, *pos);
 584}
 585
 586static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
 587{
 588	return seq_list_next(v, &uprobe_list, pos);
 
 
 589}
 590
 591static void probes_seq_stop(struct seq_file *m, void *v)
 592{
 593	mutex_unlock(&uprobe_lock);
 
 
 594}
 595
 596static int probes_seq_show(struct seq_file *m, void *v)
 
 597{
 598	struct trace_uprobe *tu = v;
 599	char c = is_ret_probe(tu) ? 'r' : 'p';
 600	int i;
 601
 602	seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
 603			trace_event_name(&tu->tp.call));
 604	seq_printf(m, " %s:", tu->filename);
 605
 606	/* Don't print "0x  (null)" when offset is 0 */
 607	if (tu->offset) {
 608		seq_printf(m, "0x%p", (void *)tu->offset);
 609	} else {
 610		switch (sizeof(void *)) {
 611		case 4:
 612			seq_printf(m, "0x00000000");
 613			break;
 614		case 8:
 615		default:
 616			seq_printf(m, "0x0000000000000000");
 617			break;
 618		}
 619	}
 620
 621	for (i = 0; i < tu->tp.nr_args; i++)
 622		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
 623
 624	seq_putc(m, '\n');
 625	return 0;
 626}
 627
 
 
 
 
 
 
 
 
 
 
 628static const struct seq_operations probes_seq_op = {
 629	.start	= probes_seq_start,
 630	.next	= probes_seq_next,
 631	.stop	= probes_seq_stop,
 632	.show	= probes_seq_show
 633};
 634
 635static int probes_open(struct inode *inode, struct file *file)
 636{
 637	int ret;
 638
 
 
 
 
 639	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 640		ret = cleanup_all_probes();
 641		if (ret)
 642			return ret;
 643	}
 644
 645	return seq_open(file, &probes_seq_op);
 646}
 647
 648static ssize_t probes_write(struct file *file, const char __user *buffer,
 649			    size_t count, loff_t *ppos)
 650{
 651	return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
 
 652}
 653
 654static const struct file_operations uprobe_events_ops = {
 655	.owner		= THIS_MODULE,
 656	.open		= probes_open,
 657	.read		= seq_read,
 658	.llseek		= seq_lseek,
 659	.release	= seq_release,
 660	.write		= probes_write,
 661};
 662
 663/* Probes profiling interfaces */
 664static int probes_profile_seq_show(struct seq_file *m, void *v)
 665{
 666	struct trace_uprobe *tu = v;
 
 
 
 
 
 
 
 
 
 
 
 
 
 667
 668	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
 669			trace_event_name(&tu->tp.call), tu->nhit);
 670	return 0;
 671}
 672
 673static const struct seq_operations profile_seq_op = {
 674	.start	= probes_seq_start,
 675	.next	= probes_seq_next,
 676	.stop	= probes_seq_stop,
 677	.show	= probes_profile_seq_show
 678};
 679
 680static int profile_open(struct inode *inode, struct file *file)
 681{
 
 
 
 
 
 
 682	return seq_open(file, &profile_seq_op);
 683}
 684
 685static const struct file_operations uprobe_profile_ops = {
 686	.owner		= THIS_MODULE,
 687	.open		= profile_open,
 688	.read		= seq_read,
 689	.llseek		= seq_lseek,
 690	.release	= seq_release,
 691};
 692
 693struct uprobe_cpu_buffer {
 694	struct mutex mutex;
 695	void *buf;
 
 696};
 697static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
 698static int uprobe_buffer_refcnt;
 
 699
 700static int uprobe_buffer_init(void)
 701{
 702	int cpu, err_cpu;
 703
 704	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
 705	if (uprobe_cpu_buffer == NULL)
 706		return -ENOMEM;
 707
 708	for_each_possible_cpu(cpu) {
 709		struct page *p = alloc_pages_node(cpu_to_node(cpu),
 710						  GFP_KERNEL, 0);
 711		if (p == NULL) {
 712			err_cpu = cpu;
 713			goto err;
 714		}
 715		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
 716		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
 717	}
 718
 719	return 0;
 720
 721err:
 722	for_each_possible_cpu(cpu) {
 723		if (cpu == err_cpu)
 724			break;
 725		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
 726	}
 727
 728	free_percpu(uprobe_cpu_buffer);
 729	return -ENOMEM;
 730}
 731
 732static int uprobe_buffer_enable(void)
 733{
 734	int ret = 0;
 735
 736	BUG_ON(!mutex_is_locked(&event_mutex));
 737
 738	if (uprobe_buffer_refcnt++ == 0) {
 739		ret = uprobe_buffer_init();
 740		if (ret < 0)
 741			uprobe_buffer_refcnt--;
 742	}
 743
 744	return ret;
 745}
 746
 747static void uprobe_buffer_disable(void)
 748{
 749	int cpu;
 750
 751	BUG_ON(!mutex_is_locked(&event_mutex));
 752
 753	if (--uprobe_buffer_refcnt == 0) {
 754		for_each_possible_cpu(cpu)
 755			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
 756							     cpu)->buf);
 757
 758		free_percpu(uprobe_cpu_buffer);
 759		uprobe_cpu_buffer = NULL;
 760	}
 761}
 762
 763static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
 764{
 765	struct uprobe_cpu_buffer *ucb;
 766	int cpu;
 767
 768	cpu = raw_smp_processor_id();
 769	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
 770
 771	/*
 772	 * Use per-cpu buffers for fastest access, but we might migrate
 773	 * so the mutex makes sure we have sole access to it.
 774	 */
 775	mutex_lock(&ucb->mutex);
 776
 777	return ucb;
 778}
 779
 780static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
 781{
 
 
 782	mutex_unlock(&ucb->mutex);
 783}
 784
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 785static void __uprobe_trace_func(struct trace_uprobe *tu,
 786				unsigned long func, struct pt_regs *regs,
 787				struct uprobe_cpu_buffer *ucb, int dsize,
 788				struct trace_event_file *trace_file)
 789{
 790	struct uprobe_trace_entry_head *entry;
 791	struct ring_buffer_event *event;
 792	struct ring_buffer *buffer;
 793	void *data;
 794	int size, esize;
 795	struct trace_event_call *call = &tu->tp.call;
 796
 797	WARN_ON(call != trace_file->event_call);
 798
 799	if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
 800		return;
 801
 802	if (trace_trigger_soft_disabled(trace_file))
 803		return;
 804
 805	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
 806	size = esize + tu->tp.size + dsize;
 807	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 808						call->event.type, size, 0, 0);
 809	if (!event)
 810		return;
 811
 812	entry = ring_buffer_event_data(event);
 813	if (is_ret_probe(tu)) {
 814		entry->vaddr[0] = func;
 815		entry->vaddr[1] = instruction_pointer(regs);
 816		data = DATAOF_TRACE_ENTRY(entry, true);
 817	} else {
 818		entry->vaddr[0] = instruction_pointer(regs);
 819		data = DATAOF_TRACE_ENTRY(entry, false);
 820	}
 821
 822	memcpy(data, ucb->buf, tu->tp.size + dsize);
 823
 824	event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
 825}
 826
 827/* uprobe handler */
 828static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
 829			     struct uprobe_cpu_buffer *ucb, int dsize)
 830{
 831	struct event_file_link *link;
 
 832
 833	if (is_ret_probe(tu))
 834		return 0;
 835
 
 
 836	rcu_read_lock();
 837	list_for_each_entry_rcu(link, &tu->tp.files, list)
 838		__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
 839	rcu_read_unlock();
 840
 841	return 0;
 842}
 843
 844static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
 845				 struct pt_regs *regs,
 846				 struct uprobe_cpu_buffer *ucb, int dsize)
 847{
 848	struct event_file_link *link;
 
 
 
 849
 850	rcu_read_lock();
 851	list_for_each_entry_rcu(link, &tu->tp.files, list)
 852		__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
 853	rcu_read_unlock();
 854}
 855
 856/* Event entry printers */
 857static enum print_line_t
 858print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
 859{
 860	struct uprobe_trace_entry_head *entry;
 861	struct trace_seq *s = &iter->seq;
 862	struct trace_uprobe *tu;
 863	u8 *data;
 864	int i;
 865
 866	entry = (struct uprobe_trace_entry_head *)iter->ent;
 867	tu = container_of(event, struct trace_uprobe, tp.call.event);
 
 
 
 868
 869	if (is_ret_probe(tu)) {
 870		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
 871				 trace_event_name(&tu->tp.call),
 872				 entry->vaddr[1], entry->vaddr[0]);
 873		data = DATAOF_TRACE_ENTRY(entry, true);
 874	} else {
 875		trace_seq_printf(s, "%s: (0x%lx)",
 876				 trace_event_name(&tu->tp.call),
 877				 entry->vaddr[0]);
 878		data = DATAOF_TRACE_ENTRY(entry, false);
 879	}
 880
 881	for (i = 0; i < tu->tp.nr_args; i++) {
 882		struct probe_arg *parg = &tu->tp.args[i];
 883
 884		if (!parg->type->print(s, parg->name, data + parg->offset, entry))
 885			goto out;
 886	}
 887
 888	trace_seq_putc(s, '\n');
 889
 890 out:
 891	return trace_handle_return(s);
 892}
 893
 894typedef bool (*filter_func_t)(struct uprobe_consumer *self,
 895				enum uprobe_filter_ctx ctx,
 896				struct mm_struct *mm);
 897
 898static int
 899probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
 900		   filter_func_t filter)
 901{
 902	bool enabled = trace_probe_is_enabled(&tu->tp);
 903	struct event_file_link *link = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 904	int ret;
 905
 
 
 
 
 
 
 906	if (file) {
 907		if (tu->tp.flags & TP_FLAG_PROFILE)
 908			return -EINTR;
 909
 910		link = kmalloc(sizeof(*link), GFP_KERNEL);
 911		if (!link)
 912			return -ENOMEM;
 913
 914		link->file = file;
 915		list_add_tail_rcu(&link->list, &tu->tp.files);
 916
 917		tu->tp.flags |= TP_FLAG_TRACE;
 918	} else {
 919		if (tu->tp.flags & TP_FLAG_TRACE)
 920			return -EINTR;
 921
 922		tu->tp.flags |= TP_FLAG_PROFILE;
 923	}
 924
 925	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 
 926
 927	if (enabled)
 928		return 0;
 929
 930	ret = uprobe_buffer_enable();
 931	if (ret)
 932		goto err_flags;
 933
 934	tu->consumer.filter = filter;
 935	ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
 936	if (ret)
 937		goto err_buffer;
 
 
 
 938
 939	return 0;
 940
 941 err_buffer:
 942	uprobe_buffer_disable();
 943
 944 err_flags:
 945	if (file) {
 946		list_del(&link->list);
 947		kfree(link);
 948		tu->tp.flags &= ~TP_FLAG_TRACE;
 949	} else {
 950		tu->tp.flags &= ~TP_FLAG_PROFILE;
 951	}
 952	return ret;
 953}
 954
 955static void
 956probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
 957{
 958	if (!trace_probe_is_enabled(&tu->tp))
 
 
 
 959		return;
 960
 961	if (file) {
 962		struct event_file_link *link;
 963
 964		link = find_event_file_link(&tu->tp, file);
 965		if (!link)
 966			return;
 967
 968		list_del_rcu(&link->list);
 969		/* synchronize with u{,ret}probe_trace_func */
 970		synchronize_sched();
 971		kfree(link);
 972
 973		if (!list_empty(&tu->tp.files))
 974			return;
 975	}
 976
 977	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 978
 979	uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
 980	tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
 981
 
 982	uprobe_buffer_disable();
 983}
 984
 985static int uprobe_event_define_fields(struct trace_event_call *event_call)
 986{
 987	int ret, i, size;
 988	struct uprobe_trace_entry_head field;
 989	struct trace_uprobe *tu = event_call->data;
 
 
 
 
 990
 991	if (is_ret_probe(tu)) {
 992		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
 993		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
 994		size = SIZEOF_TRACE_ENTRY(true);
 995	} else {
 996		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
 997		size = SIZEOF_TRACE_ENTRY(false);
 998	}
 999	/* Set argument names as fields */
1000	for (i = 0; i < tu->tp.nr_args; i++) {
1001		struct probe_arg *parg = &tu->tp.args[i];
1002
1003		ret = trace_define_field(event_call, parg->type->fmttype,
1004					 parg->name, size + parg->offset,
1005					 parg->type->size, parg->type->is_signed,
1006					 FILTER_OTHER);
1007
1008		if (ret)
1009			return ret;
1010	}
1011	return 0;
1012}
1013
1014#ifdef CONFIG_PERF_EVENTS
1015static bool
1016__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1017{
1018	struct perf_event *event;
1019
1020	if (filter->nr_systemwide)
1021		return true;
1022
1023	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1024		if (event->hw.target->mm == mm)
1025			return true;
1026	}
1027
1028	return false;
1029}
1030
1031static inline bool
1032uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
 
1033{
1034	return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
1035}
1036
1037static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
 
1038{
1039	bool done;
1040
1041	write_lock(&tu->filter.rwlock);
1042	if (event->hw.target) {
1043		list_del(&event->hw.tp_list);
1044		done = tu->filter.nr_systemwide ||
1045			(event->hw.target->flags & PF_EXITING) ||
1046			uprobe_filter_event(tu, event);
1047	} else {
1048		tu->filter.nr_systemwide--;
1049		done = tu->filter.nr_systemwide;
1050	}
1051	write_unlock(&tu->filter.rwlock);
1052
1053	if (!done)
1054		return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1055
1056	return 0;
1057}
1058
1059static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
 
 
1060{
1061	bool done;
1062	int err;
1063
1064	write_lock(&tu->filter.rwlock);
1065	if (event->hw.target) {
1066		/*
1067		 * event->parent != NULL means copy_process(), we can avoid
1068		 * uprobe_apply(). current->mm must be probed and we can rely
1069		 * on dup_mmap() which preserves the already installed bp's.
1070		 *
1071		 * attr.enable_on_exec means that exec/mmap will install the
1072		 * breakpoints we need.
1073		 */
1074		done = tu->filter.nr_systemwide ||
1075			event->parent || event->attr.enable_on_exec ||
1076			uprobe_filter_event(tu, event);
1077		list_add(&event->hw.tp_list, &tu->filter.perf_events);
1078	} else {
1079		done = tu->filter.nr_systemwide;
1080		tu->filter.nr_systemwide++;
1081	}
1082	write_unlock(&tu->filter.rwlock);
 
 
 
 
 
 
 
 
 
 
1083
1084	err = 0;
1085	if (!done) {
1086		err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1087		if (err)
1088			uprobe_perf_close(tu, event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1089	}
 
1090	return err;
1091}
1092
1093static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1094				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1095{
 
1096	struct trace_uprobe *tu;
1097	int ret;
1098
1099	tu = container_of(uc, struct trace_uprobe, consumer);
1100	read_lock(&tu->filter.rwlock);
1101	ret = __uprobe_perf_filter(&tu->filter, mm);
1102	read_unlock(&tu->filter.rwlock);
 
 
 
 
 
 
 
 
 
1103
1104	return ret;
1105}
1106
1107static void __uprobe_perf_func(struct trace_uprobe *tu,
1108			       unsigned long func, struct pt_regs *regs,
1109			       struct uprobe_cpu_buffer *ucb, int dsize)
1110{
1111	struct trace_event_call *call = &tu->tp.call;
1112	struct uprobe_trace_entry_head *entry;
1113	struct bpf_prog *prog = call->prog;
1114	struct hlist_head *head;
1115	void *data;
1116	int size, esize;
1117	int rctx;
1118
1119	if (prog && !trace_call_bpf(prog, regs))
1120		return;
 
 
 
 
 
 
 
 
 
 
 
1121
1122	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1123
1124	size = esize + tu->tp.size + dsize;
 
1125	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1126	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1127		return;
1128
1129	preempt_disable();
1130	head = this_cpu_ptr(call->perf_events);
1131	if (hlist_empty(head))
1132		goto out;
1133
1134	entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1135	if (!entry)
1136		goto out;
1137
1138	if (is_ret_probe(tu)) {
1139		entry->vaddr[0] = func;
1140		entry->vaddr[1] = instruction_pointer(regs);
1141		data = DATAOF_TRACE_ENTRY(entry, true);
1142	} else {
1143		entry->vaddr[0] = instruction_pointer(regs);
1144		data = DATAOF_TRACE_ENTRY(entry, false);
1145	}
1146
1147	memcpy(data, ucb->buf, tu->tp.size + dsize);
1148
1149	if (size - esize > tu->tp.size + dsize) {
1150		int len = tu->tp.size + dsize;
1151
1152		memset(data + len, 0, size - esize - len);
1153	}
1154
1155	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
 
1156 out:
1157	preempt_enable();
1158}
1159
1160/* uprobe profile handler */
1161static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1162			    struct uprobe_cpu_buffer *ucb, int dsize)
1163{
1164	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1165		return UPROBE_HANDLER_REMOVE;
1166
1167	if (!is_ret_probe(tu))
1168		__uprobe_perf_func(tu, 0, regs, ucb, dsize);
1169	return 0;
1170}
1171
1172static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1173				struct pt_regs *regs,
1174				struct uprobe_cpu_buffer *ucb, int dsize)
 
 
 
 
 
 
 
1175{
1176	__uprobe_perf_func(tu, func, regs, ucb, dsize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177}
1178#endif	/* CONFIG_PERF_EVENTS */
1179
1180static int
1181trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1182		      void *data)
1183{
1184	struct trace_uprobe *tu = event->data;
1185	struct trace_event_file *file = data;
1186
1187	switch (type) {
1188	case TRACE_REG_REGISTER:
1189		return probe_event_enable(tu, file, NULL);
1190
1191	case TRACE_REG_UNREGISTER:
1192		probe_event_disable(tu, file);
1193		return 0;
1194
1195#ifdef CONFIG_PERF_EVENTS
1196	case TRACE_REG_PERF_REGISTER:
1197		return probe_event_enable(tu, NULL, uprobe_perf_filter);
1198
1199	case TRACE_REG_PERF_UNREGISTER:
1200		probe_event_disable(tu, NULL);
1201		return 0;
1202
1203	case TRACE_REG_PERF_OPEN:
1204		return uprobe_perf_open(tu, data);
1205
1206	case TRACE_REG_PERF_CLOSE:
1207		return uprobe_perf_close(tu, data);
1208
1209#endif
1210	default:
1211		return 0;
1212	}
1213	return 0;
1214}
1215
1216static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
 
1217{
1218	struct trace_uprobe *tu;
1219	struct uprobe_dispatch_data udd;
1220	struct uprobe_cpu_buffer *ucb;
1221	int dsize, esize;
1222	int ret = 0;
1223
1224
1225	tu = container_of(con, struct trace_uprobe, consumer);
1226	tu->nhit++;
 
1227
1228	udd.tu = tu;
1229	udd.bp_addr = instruction_pointer(regs);
1230
1231	current->utask->vaddr = (unsigned long) &udd;
1232
1233	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1234		return 0;
1235
1236	dsize = __get_data_size(&tu->tp, regs);
1237	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1238
1239	ucb = uprobe_buffer_get();
1240	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1241
1242	if (tu->tp.flags & TP_FLAG_TRACE)
1243		ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1244
1245#ifdef CONFIG_PERF_EVENTS
1246	if (tu->tp.flags & TP_FLAG_PROFILE)
1247		ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1248#endif
1249	uprobe_buffer_put(ucb);
1250	return ret;
1251}
1252
1253static int uretprobe_dispatcher(struct uprobe_consumer *con,
1254				unsigned long func, struct pt_regs *regs)
 
1255{
1256	struct trace_uprobe *tu;
1257	struct uprobe_dispatch_data udd;
1258	struct uprobe_cpu_buffer *ucb;
1259	int dsize, esize;
1260
1261	tu = container_of(con, struct trace_uprobe, consumer);
1262
1263	udd.tu = tu;
1264	udd.bp_addr = func;
1265
1266	current->utask->vaddr = (unsigned long) &udd;
1267
1268	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1269		return 0;
1270
1271	dsize = __get_data_size(&tu->tp, regs);
1272	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1273
1274	ucb = uprobe_buffer_get();
1275	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1276
1277	if (tu->tp.flags & TP_FLAG_TRACE)
1278		uretprobe_trace_func(tu, func, regs, ucb, dsize);
1279
1280#ifdef CONFIG_PERF_EVENTS
1281	if (tu->tp.flags & TP_FLAG_PROFILE)
1282		uretprobe_perf_func(tu, func, regs, ucb, dsize);
1283#endif
1284	uprobe_buffer_put(ucb);
1285	return 0;
1286}
1287
1288static struct trace_event_functions uprobe_funcs = {
1289	.trace		= print_uprobe_event
1290};
1291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1292static int register_uprobe_event(struct trace_uprobe *tu)
1293{
1294	struct trace_event_call *call = &tu->tp.call;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1295	int ret;
1296
1297	/* Initialize trace_event_call */
1298	INIT_LIST_HEAD(&call->class->fields);
1299	call->event.funcs = &uprobe_funcs;
1300	call->class->define_fields = uprobe_event_define_fields;
1301
1302	if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1303		return -ENOMEM;
 
 
1304
1305	ret = register_trace_event(&call->event);
1306	if (!ret) {
1307		kfree(call->print_fmt);
1308		return -ENODEV;
 
 
 
 
 
 
 
 
 
1309	}
1310
1311	call->flags = TRACE_EVENT_FL_UPROBE;
1312	call->class->reg = trace_uprobe_register;
1313	call->data = tu;
1314	ret = trace_add_event_call(call);
 
 
 
 
1315
1316	if (ret) {
1317		pr_info("Failed to register uprobe event: %s\n",
1318			trace_event_name(call));
1319		kfree(call->print_fmt);
1320		unregister_trace_event(&call->event);
 
1321	}
1322
1323	return ret;
 
 
 
1324}
1325
1326static int unregister_uprobe_event(struct trace_uprobe *tu)
1327{
1328	int ret;
1329
1330	/* tu->event is unregistered in trace_remove_event_call() */
1331	ret = trace_remove_event_call(&tu->tp.call);
1332	if (ret)
1333		return ret;
1334	kfree(tu->tp.call.print_fmt);
1335	tu->tp.call.print_fmt = NULL;
1336	return 0;
1337}
 
1338
1339/* Make a trace interface for controling probe points */
1340static __init int init_uprobe_trace(void)
1341{
1342	struct dentry *d_tracer;
1343
1344	d_tracer = tracing_init_dentry();
1345	if (IS_ERR(d_tracer))
 
 
 
 
1346		return 0;
1347
1348	trace_create_file("uprobe_events", 0644, d_tracer,
1349				    NULL, &uprobe_events_ops);
1350	/* Profile interface */
1351	trace_create_file("uprobe_profile", 0444, d_tracer,
1352				    NULL, &uprobe_profile_ops);
1353	return 0;
1354}
1355
1356fs_initcall(init_uprobe_trace);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * uprobes-based tracing events
   4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 * Copyright (C) IBM Corporation, 2010-2012
   6 * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
   7 */
   8#define pr_fmt(fmt)	"trace_uprobe: " fmt
   9
  10#include <linux/bpf-cgroup.h>
  11#include <linux/security.h>
  12#include <linux/ctype.h>
  13#include <linux/module.h>
  14#include <linux/uaccess.h>
  15#include <linux/uprobes.h>
  16#include <linux/namei.h>
  17#include <linux/string.h>
  18#include <linux/rculist.h>
  19#include <linux/filter.h>
  20#include <linux/percpu.h>
  21
  22#include "trace_dynevent.h"
  23#include "trace_probe.h"
  24#include "trace_probe_tmpl.h"
  25
  26#define UPROBE_EVENT_SYSTEM	"uprobes"
  27
  28struct uprobe_trace_entry_head {
  29	struct trace_entry	ent;
  30	unsigned long		vaddr[];
  31};
  32
  33#define SIZEOF_TRACE_ENTRY(is_return)			\
  34	(sizeof(struct uprobe_trace_entry_head) +	\
  35	 sizeof(unsigned long) * (is_return ? 2 : 1))
  36
  37#define DATAOF_TRACE_ENTRY(entry, is_return)		\
  38	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  39
  40static int trace_uprobe_create(const char *raw_command);
  41static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
  42static int trace_uprobe_release(struct dyn_event *ev);
  43static bool trace_uprobe_is_busy(struct dyn_event *ev);
  44static bool trace_uprobe_match(const char *system, const char *event,
  45			int argc, const char **argv, struct dyn_event *ev);
  46
  47static struct dyn_event_operations trace_uprobe_ops = {
  48	.create = trace_uprobe_create,
  49	.show = trace_uprobe_show,
  50	.is_busy = trace_uprobe_is_busy,
  51	.free = trace_uprobe_release,
  52	.match = trace_uprobe_match,
  53};
  54
  55/*
  56 * uprobe event core functions
  57 */
  58struct trace_uprobe {
  59	struct dyn_event		devent;
 
  60	struct uprobe_consumer		consumer;
  61	struct path			path;
  62	char				*filename;
  63	struct uprobe			*uprobe;
  64	unsigned long			offset;
  65	unsigned long			ref_ctr_offset;
  66	unsigned long __percpu		*nhits;
  67	struct trace_probe		tp;
  68};
  69
  70static bool is_trace_uprobe(struct dyn_event *ev)
  71{
  72	return ev->ops == &trace_uprobe_ops;
  73}
  74
  75static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
  76{
  77	return container_of(ev, struct trace_uprobe, devent);
  78}
  79
  80/**
  81 * for_each_trace_uprobe - iterate over the trace_uprobe list
  82 * @pos:	the struct trace_uprobe * for each entry
  83 * @dpos:	the struct dyn_event * to use as a loop cursor
  84 */
  85#define for_each_trace_uprobe(pos, dpos)	\
  86	for_each_dyn_event(dpos)		\
  87		if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
  88
  89static int register_uprobe_event(struct trace_uprobe *tu);
  90static int unregister_uprobe_event(struct trace_uprobe *tu);
 
 
  91
  92static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
  93			     __u64 *data);
  94static int uretprobe_dispatcher(struct uprobe_consumer *con,
  95				unsigned long func, struct pt_regs *regs,
  96				__u64 *data);
  97
  98#ifdef CONFIG_STACK_GROWSUP
  99static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
 100{
 101	return addr - (n * sizeof(long));
 102}
 103#else
 104static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
 105{
 106	return addr + (n * sizeof(long));
 107}
 108#endif
 109
 110static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
 111{
 112	unsigned long ret;
 113	unsigned long addr = user_stack_pointer(regs);
 114
 115	addr = adjust_stack_addr(addr, n);
 116
 117	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
 118		return 0;
 119
 120	return ret;
 121}
 122
 123/*
 124 * Uprobes-specific fetch functions
 125 */
 126static nokprobe_inline int
 127probe_mem_read(void *dest, void *src, size_t size)
 128{
 129	void __user *vaddr = (void __force __user *)src;
 130
 131	return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
 132}
 133
 134static nokprobe_inline int
 135probe_mem_read_user(void *dest, void *src, size_t size)
 136{
 137	return probe_mem_read(dest, src, size);
 
 
 
 
 
 
 
 
 
 
 
 138}
 139
 140/*
 141 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 142 * length and relative data location.
 143 */
 144static nokprobe_inline int
 145fetch_store_string(unsigned long addr, void *dest, void *base)
 146{
 147	long ret;
 148	u32 loc = *(u32 *)dest;
 149	int maxlen  = get_loc_len(loc);
 150	u8 *dst = get_loc_data(dest, base);
 151	void __user *src = (void __force __user *) addr;
 152
 153	if (unlikely(!maxlen))
 154		return -ENOMEM;
 155
 156	if (addr == FETCH_TOKEN_COMM)
 157		ret = strscpy(dst, current->comm, maxlen);
 158	else
 159		ret = strncpy_from_user(dst, src, maxlen);
 160	if (ret >= 0) {
 161		if (ret == maxlen)
 162			dst[ret - 1] = '\0';
 163		else
 164			/*
 165			 * Include the terminating null byte. In this case it
 166			 * was copied by strncpy_from_user but not accounted
 167			 * for in ret.
 168			 */
 169			ret++;
 170		*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
 171	} else
 172		*(u32 *)dest = make_data_loc(0, (void *)dst - base);
 173
 174	return ret;
 175}
 176
 177static nokprobe_inline int
 178fetch_store_string_user(unsigned long addr, void *dest, void *base)
 179{
 180	return fetch_store_string(addr, dest, base);
 181}
 182
 183/* Return the length of string -- including null terminal byte */
 184static nokprobe_inline int
 185fetch_store_strlen(unsigned long addr)
 186{
 187	int len;
 188	void __user *vaddr = (void __force __user *) addr;
 189
 190	if (addr == FETCH_TOKEN_COMM)
 191		len = strlen(current->comm) + 1;
 
 
 192	else
 193		len = strnlen_user(vaddr, MAX_STRING_SIZE);
 194
 195	return (len > MAX_STRING_SIZE) ? 0 : len;
 196}
 197
 198static nokprobe_inline int
 199fetch_store_strlen_user(unsigned long addr)
 200{
 201	return fetch_store_strlen(addr);
 202}
 203
 204static unsigned long translate_user_vaddr(unsigned long file_offset)
 205{
 206	unsigned long base_addr;
 207	struct uprobe_dispatch_data *udd;
 208
 209	udd = (void *) current->utask->vaddr;
 210
 211	base_addr = udd->bp_addr - udd->tu->offset;
 212	return base_addr + file_offset;
 213}
 214
 215/* Note that we don't verify it, since the code does not come from user space */
 216static int
 217process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
 218		   void *dest, void *base)
 219{
 220	struct pt_regs *regs = rec;
 221	unsigned long val;
 222	int ret;
 223
 224	/* 1st stage: get value from context */
 225	switch (code->op) {
 226	case FETCH_OP_REG:
 227		val = regs_get_register(regs, code->param);
 228		break;
 229	case FETCH_OP_STACK:
 230		val = get_user_stack_nth(regs, code->param);
 231		break;
 232	case FETCH_OP_STACKP:
 233		val = user_stack_pointer(regs);
 234		break;
 235	case FETCH_OP_RETVAL:
 236		val = regs_return_value(regs);
 237		break;
 238	case FETCH_OP_COMM:
 239		val = FETCH_TOKEN_COMM;
 240		break;
 241	case FETCH_OP_FOFFS:
 242		val = translate_user_vaddr(code->immediate);
 243		break;
 244	default:
 245		ret = process_common_fetch_insn(code, &val);
 246		if (ret < 0)
 247			return ret;
 248	}
 249	code++;
 250
 251	return process_fetch_insn_bottom(code, val, dest, base);
 252}
 253NOKPROBE_SYMBOL(process_fetch_insn)
 254
 255static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
 256{
 257	rwlock_init(&filter->rwlock);
 258	filter->nr_systemwide = 0;
 259	INIT_LIST_HEAD(&filter->perf_events);
 260}
 261
 262static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
 263{
 264	return !filter->nr_systemwide && list_empty(&filter->perf_events);
 265}
 266
 267static inline bool is_ret_probe(struct trace_uprobe *tu)
 268{
 269	return tu->consumer.ret_handler != NULL;
 270}
 271
 272static bool trace_uprobe_is_busy(struct dyn_event *ev)
 273{
 274	struct trace_uprobe *tu = to_trace_uprobe(ev);
 275
 276	return trace_probe_is_enabled(&tu->tp);
 277}
 278
 279static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
 280					    int argc, const char **argv)
 281{
 282	char buf[MAX_ARGSTR_LEN + 1];
 283	int len;
 284
 285	if (!argc)
 286		return true;
 287
 288	len = strlen(tu->filename);
 289	if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
 290		return false;
 291
 292	if (tu->ref_ctr_offset == 0)
 293		snprintf(buf, sizeof(buf), "0x%0*lx",
 294				(int)(sizeof(void *) * 2), tu->offset);
 295	else
 296		snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
 297				(int)(sizeof(void *) * 2), tu->offset,
 298				tu->ref_ctr_offset);
 299	if (strcmp(buf, &argv[0][len + 1]))
 300		return false;
 301
 302	argc--; argv++;
 303
 304	return trace_probe_match_command_args(&tu->tp, argc, argv);
 305}
 306
 307static bool trace_uprobe_match(const char *system, const char *event,
 308			int argc, const char **argv, struct dyn_event *ev)
 309{
 310	struct trace_uprobe *tu = to_trace_uprobe(ev);
 311
 312	return (event[0] == '\0' ||
 313		strcmp(trace_probe_name(&tu->tp), event) == 0) &&
 314	   (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
 315	   trace_uprobe_match_command_head(tu, argc, argv);
 316}
 317
 318static nokprobe_inline struct trace_uprobe *
 319trace_uprobe_primary_from_call(struct trace_event_call *call)
 320{
 321	struct trace_probe *tp;
 322
 323	tp = trace_probe_primary_from_call(call);
 324	if (WARN_ON_ONCE(!tp))
 325		return NULL;
 326
 327	return container_of(tp, struct trace_uprobe, tp);
 328}
 329
 330/*
 331 * Allocate new trace_uprobe and initialize it (including uprobes).
 332 */
 333static struct trace_uprobe *
 334alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 335{
 336	struct trace_uprobe *tu;
 337	int ret;
 338
 339	tu = kzalloc(struct_size(tu, tp.args, nargs), GFP_KERNEL);
 
 
 
 
 
 
 340	if (!tu)
 341		return ERR_PTR(-ENOMEM);
 342
 343	tu->nhits = alloc_percpu(unsigned long);
 344	if (!tu->nhits) {
 345		ret = -ENOMEM;
 346		goto error;
 347	}
 348
 349	ret = trace_probe_init(&tu->tp, event, group, true, nargs);
 350	if (ret < 0)
 351		goto error;
 352
 353	dyn_event_init(&tu->devent, &trace_uprobe_ops);
 
 354	tu->consumer.handler = uprobe_dispatcher;
 355	if (is_ret)
 356		tu->consumer.ret_handler = uretprobe_dispatcher;
 357	init_trace_uprobe_filter(tu->tp.event->filter);
 358	return tu;
 359
 360error:
 361	free_percpu(tu->nhits);
 362	kfree(tu);
 363
 364	return ERR_PTR(ret);
 365}
 366
 367static void free_trace_uprobe(struct trace_uprobe *tu)
 368{
 369	if (!tu)
 370		return;
 
 
 371
 372	path_put(&tu->path);
 373	trace_probe_cleanup(&tu->tp);
 
 374	kfree(tu->filename);
 375	free_percpu(tu->nhits);
 376	kfree(tu);
 377}
 378
 379static struct trace_uprobe *find_probe_event(const char *event, const char *group)
 380{
 381	struct dyn_event *pos;
 382	struct trace_uprobe *tu;
 383
 384	for_each_trace_uprobe(tu, pos)
 385		if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
 386		    strcmp(trace_probe_group_name(&tu->tp), group) == 0)
 387			return tu;
 388
 389	return NULL;
 390}
 391
 392/* Unregister a trace_uprobe and probe_event */
 393static int unregister_trace_uprobe(struct trace_uprobe *tu)
 394{
 395	int ret;
 396
 397	if (trace_probe_has_sibling(&tu->tp))
 398		goto unreg;
 399
 400	/* If there's a reference to the dynamic event */
 401	if (trace_event_dyn_busy(trace_probe_event_call(&tu->tp)))
 402		return -EBUSY;
 403
 404	ret = unregister_uprobe_event(tu);
 405	if (ret)
 406		return ret;
 407
 408unreg:
 409	dyn_event_remove(&tu->devent);
 410	trace_probe_unlink(&tu->tp);
 411	free_trace_uprobe(tu);
 412	return 0;
 413}
 414
 415static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
 416					 struct trace_uprobe *comp)
 417{
 418	struct trace_probe_event *tpe = orig->tp.event;
 419	struct inode *comp_inode = d_real_inode(comp->path.dentry);
 420	int i;
 421
 422	list_for_each_entry(orig, &tpe->probes, tp.list) {
 423		if (comp_inode != d_real_inode(orig->path.dentry) ||
 424		    comp->offset != orig->offset)
 425			continue;
 426
 427		/*
 428		 * trace_probe_compare_arg_type() ensured that nr_args and
 429		 * each argument name and type are same. Let's compare comm.
 430		 */
 431		for (i = 0; i < orig->tp.nr_args; i++) {
 432			if (strcmp(orig->tp.args[i].comm,
 433				   comp->tp.args[i].comm))
 434				break;
 435		}
 436
 437		if (i == orig->tp.nr_args)
 438			return true;
 439	}
 440
 441	return false;
 442}
 443
 444static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
 445{
 446	int ret;
 447
 448	ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
 449	if (ret) {
 450		/* Note that argument starts index = 2 */
 451		trace_probe_log_set_index(ret + 1);
 452		trace_probe_log_err(0, DIFF_ARG_TYPE);
 453		return -EEXIST;
 454	}
 455	if (trace_uprobe_has_same_uprobe(to, tu)) {
 456		trace_probe_log_set_index(0);
 457		trace_probe_log_err(0, SAME_PROBE);
 458		return -EEXIST;
 459	}
 460
 461	/* Append to existing event */
 462	ret = trace_probe_append(&tu->tp, &to->tp);
 463	if (!ret)
 464		dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
 465
 466	return ret;
 467}
 468
 469/*
 470 * Uprobe with multiple reference counter is not allowed. i.e.
 471 * If inode and offset matches, reference counter offset *must*
 472 * match as well. Though, there is one exception: If user is
 473 * replacing old trace_uprobe with new one(same group/event),
 474 * then we allow same uprobe with new reference counter as far
 475 * as the new one does not conflict with any other existing
 476 * ones.
 477 */
 478static int validate_ref_ctr_offset(struct trace_uprobe *new)
 479{
 480	struct dyn_event *pos;
 481	struct trace_uprobe *tmp;
 482	struct inode *new_inode = d_real_inode(new->path.dentry);
 483
 484	for_each_trace_uprobe(tmp, pos) {
 485		if (new_inode == d_real_inode(tmp->path.dentry) &&
 486		    new->offset == tmp->offset &&
 487		    new->ref_ctr_offset != tmp->ref_ctr_offset) {
 488			pr_warn("Reference counter offset mismatch.");
 489			return -EINVAL;
 490		}
 491	}
 492	return 0;
 493}
 494
 495/* Register a trace_uprobe and probe_event */
 496static int register_trace_uprobe(struct trace_uprobe *tu)
 497{
 498	struct trace_uprobe *old_tu;
 499	int ret;
 500
 501	mutex_lock(&event_mutex);
 502
 503	ret = validate_ref_ctr_offset(tu);
 504	if (ret)
 505		goto end;
 506
 507	/* register as an event */
 508	old_tu = find_probe_event(trace_probe_name(&tu->tp),
 509				  trace_probe_group_name(&tu->tp));
 510	if (old_tu) {
 511		if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
 512			trace_probe_log_set_index(0);
 513			trace_probe_log_err(0, DIFF_PROBE_TYPE);
 514			ret = -EEXIST;
 515		} else {
 516			ret = append_trace_uprobe(tu, old_tu);
 517		}
 518		goto end;
 519	}
 520
 521	ret = register_uprobe_event(tu);
 522	if (ret) {
 523		if (ret == -EEXIST) {
 524			trace_probe_log_set_index(0);
 525			trace_probe_log_err(0, EVENT_EXIST);
 526		} else
 527			pr_warn("Failed to register probe event(%d)\n", ret);
 528		goto end;
 529	}
 530
 531	dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
 532
 533end:
 534	mutex_unlock(&event_mutex);
 535
 536	return ret;
 537}
 538
 539/*
 540 * Argument syntax:
 541 *  - Add uprobe: p|r[:[GRP/][EVENT]] PATH:OFFSET[%return][(REF)] [FETCHARGS]
 
 
 542 */
 543static int __trace_uprobe_create(int argc, const char **argv)
 544{
 545	struct trace_uprobe *tu;
 546	const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
 547	char *arg, *filename, *rctr, *rctr_end, *tmp;
 548	char buf[MAX_EVENT_NAME_LEN];
 549	char gbuf[MAX_EVENT_NAME_LEN];
 550	enum probe_print_type ptype;
 551	struct path path;
 552	unsigned long offset, ref_ctr_offset;
 553	bool is_return = false;
 554	int i, ret;
 555
 556	ref_ctr_offset = 0;
 557
 558	switch (argv[0][0]) {
 559	case 'r':
 
 
 
 
 
 
 
 560		is_return = true;
 561		break;
 562	case 'p':
 563		break;
 564	default:
 565		return -ECANCELED;
 566	}
 567
 568	if (argc < 2)
 569		return -ECANCELED;
 570	if (argc - 2 > MAX_TRACE_ARGS)
 571		return -E2BIG;
 572
 573	if (argv[0][1] == ':')
 574		event = &argv[0][2];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 575
 576	if (!strchr(argv[1], '/'))
 577		return -ECANCELED;
 578
 579	filename = kstrdup(argv[1], GFP_KERNEL);
 580	if (!filename)
 581		return -ENOMEM;
 
 
 
 582
 583	/* Find the last occurrence, in case the path contains ':' too. */
 584	arg = strrchr(filename, ':');
 585	if (!arg || !isdigit(arg[1])) {
 586		kfree(filename);
 587		return -ECANCELED;
 
 
 
 
 588	}
 589
 590	trace_probe_log_init("trace_uprobe", argc, argv);
 591	trace_probe_log_set_index(1);	/* filename is the 2nd argument */
 592
 593	*arg++ = '\0';
 594	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
 595	if (ret) {
 596		trace_probe_log_err(0, FILE_NOT_FOUND);
 597		kfree(filename);
 598		trace_probe_log_clear();
 599		return ret;
 600	}
 601	if (!d_is_reg(path.dentry)) {
 602		trace_probe_log_err(0, NO_REGULAR_FILE);
 603		ret = -EINVAL;
 604		goto fail_address_parse;
 605	}
 606
 607	/* Parse reference counter offset if specified. */
 608	rctr = strchr(arg, '(');
 609	if (rctr) {
 610		rctr_end = strchr(rctr, ')');
 611		if (!rctr_end) {
 612			ret = -EINVAL;
 613			rctr_end = rctr + strlen(rctr);
 614			trace_probe_log_err(rctr_end - filename,
 615					    REFCNT_OPEN_BRACE);
 616			goto fail_address_parse;
 617		} else if (rctr_end[1] != '\0') {
 618			ret = -EINVAL;
 619			trace_probe_log_err(rctr_end + 1 - filename,
 620					    BAD_REFCNT_SUFFIX);
 621			goto fail_address_parse;
 622		}
 623
 624		*rctr++ = '\0';
 625		*rctr_end = '\0';
 626		ret = kstrtoul(rctr, 0, &ref_ctr_offset);
 627		if (ret) {
 628			trace_probe_log_err(rctr - filename, BAD_REFCNT);
 629			goto fail_address_parse;
 630		}
 631	}
 632
 633	/* Check if there is %return suffix */
 634	tmp = strchr(arg, '%');
 635	if (tmp) {
 636		if (!strcmp(tmp, "%return")) {
 637			*tmp = '\0';
 638			is_return = true;
 639		} else {
 640			trace_probe_log_err(tmp - filename, BAD_ADDR_SUFFIX);
 641			ret = -EINVAL;
 642			goto fail_address_parse;
 643		}
 644	}
 645
 646	/* Parse uprobe offset. */
 647	ret = kstrtoul(arg, 0, &offset);
 648	if (ret) {
 649		trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
 650		goto fail_address_parse;
 651	}
 
 
 652
 653	/* setup a probe */
 654	trace_probe_log_set_index(0);
 655	if (event) {
 656		ret = traceprobe_parse_event_name(&event, &group, gbuf,
 657						  event - argv[0]);
 658		if (ret)
 659			goto fail_address_parse;
 660	}
 661
 662	if (!event) {
 663		char *tail;
 664		char *ptr;
 665
 666		tail = kstrdup(kbasename(filename), GFP_KERNEL);
 667		if (!tail) {
 668			ret = -ENOMEM;
 669			goto fail_address_parse;
 670		}
 671
 672		ptr = strpbrk(tail, ".-_");
 673		if (ptr)
 674			*ptr = '\0';
 675
 676		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
 677		event = buf;
 678		kfree(tail);
 679	}
 680
 681	argc -= 2;
 682	argv += 2;
 683
 684	tu = alloc_trace_uprobe(group, event, argc, is_return);
 685	if (IS_ERR(tu)) {
 
 686		ret = PTR_ERR(tu);
 687		/* This must return -ENOMEM otherwise there is a bug */
 688		WARN_ON_ONCE(ret != -ENOMEM);
 689		goto fail_address_parse;
 690	}
 691	tu->offset = offset;
 692	tu->ref_ctr_offset = ref_ctr_offset;
 693	tu->path = path;
 694	tu->filename = filename;
 
 
 
 
 
 695
 696	/* parse arguments */
 697	for (i = 0; i < argc; i++) {
 698		struct traceprobe_parse_context ctx = {
 699			.flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER,
 700		};
 701
 702		trace_probe_log_set_index(i + 2);
 703		ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx);
 704		traceprobe_finish_parse(&ctx);
 705		if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 706			goto error;
 
 707	}
 708
 709	ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
 710	ret = traceprobe_set_print_fmt(&tu->tp, ptype);
 711	if (ret < 0)
 712		goto error;
 713
 714	ret = register_trace_uprobe(tu);
 715	if (!ret)
 716		goto out;
 717
 718error:
 719	free_trace_uprobe(tu);
 720out:
 721	trace_probe_log_clear();
 722	return ret;
 723
 724fail_address_parse:
 725	trace_probe_log_clear();
 726	path_put(&path);
 727	kfree(filename);
 728
 729	return ret;
 730}
 731
 732int trace_uprobe_create(const char *raw_command)
 733{
 734	return trace_probe_create(raw_command, __trace_uprobe_create);
 
 
 
 
 
 
 
 
 
 
 
 735}
 736
 737static int create_or_delete_trace_uprobe(const char *raw_command)
 
 738{
 739	int ret;
 
 
 740
 741	if (raw_command[0] == '-')
 742		return dyn_event_release(raw_command, &trace_uprobe_ops);
 743
 744	ret = trace_uprobe_create(raw_command);
 745	return ret == -ECANCELED ? -EINVAL : ret;
 746}
 747
 748static int trace_uprobe_release(struct dyn_event *ev)
 749{
 750	struct trace_uprobe *tu = to_trace_uprobe(ev);
 751
 752	return unregister_trace_uprobe(tu);
 753}
 754
 755/* Probes listing interfaces */
 756static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
 757{
 758	struct trace_uprobe *tu = to_trace_uprobe(ev);
 759	char c = is_ret_probe(tu) ? 'r' : 'p';
 760	int i;
 761
 762	seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
 763			trace_probe_name(&tu->tp), tu->filename,
 764			(int)(sizeof(void *) * 2), tu->offset);
 765
 766	if (tu->ref_ctr_offset)
 767		seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
 
 
 
 
 
 
 
 
 
 
 
 
 768
 769	for (i = 0; i < tu->tp.nr_args; i++)
 770		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
 771
 772	seq_putc(m, '\n');
 773	return 0;
 774}
 775
 776static int probes_seq_show(struct seq_file *m, void *v)
 777{
 778	struct dyn_event *ev = v;
 779
 780	if (!is_trace_uprobe(ev))
 781		return 0;
 782
 783	return trace_uprobe_show(m, ev);
 784}
 785
 786static const struct seq_operations probes_seq_op = {
 787	.start  = dyn_event_seq_start,
 788	.next   = dyn_event_seq_next,
 789	.stop   = dyn_event_seq_stop,
 790	.show   = probes_seq_show
 791};
 792
 793static int probes_open(struct inode *inode, struct file *file)
 794{
 795	int ret;
 796
 797	ret = security_locked_down(LOCKDOWN_TRACEFS);
 798	if (ret)
 799		return ret;
 800
 801	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 802		ret = dyn_events_release_all(&trace_uprobe_ops);
 803		if (ret)
 804			return ret;
 805	}
 806
 807	return seq_open(file, &probes_seq_op);
 808}
 809
 810static ssize_t probes_write(struct file *file, const char __user *buffer,
 811			    size_t count, loff_t *ppos)
 812{
 813	return trace_parse_run_command(file, buffer, count, ppos,
 814					create_or_delete_trace_uprobe);
 815}
 816
 817static const struct file_operations uprobe_events_ops = {
 818	.owner		= THIS_MODULE,
 819	.open		= probes_open,
 820	.read		= seq_read,
 821	.llseek		= seq_lseek,
 822	.release	= seq_release,
 823	.write		= probes_write,
 824};
 825
 826/* Probes profiling interfaces */
 827static int probes_profile_seq_show(struct seq_file *m, void *v)
 828{
 829	struct dyn_event *ev = v;
 830	struct trace_uprobe *tu;
 831	unsigned long nhits;
 832	int cpu;
 833
 834	if (!is_trace_uprobe(ev))
 835		return 0;
 836
 837	tu = to_trace_uprobe(ev);
 838
 839	nhits = 0;
 840	for_each_possible_cpu(cpu) {
 841		nhits += per_cpu(*tu->nhits, cpu);
 842	}
 843
 844	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
 845		   trace_probe_name(&tu->tp), nhits);
 846	return 0;
 847}
 848
 849static const struct seq_operations profile_seq_op = {
 850	.start  = dyn_event_seq_start,
 851	.next   = dyn_event_seq_next,
 852	.stop   = dyn_event_seq_stop,
 853	.show	= probes_profile_seq_show
 854};
 855
 856static int profile_open(struct inode *inode, struct file *file)
 857{
 858	int ret;
 859
 860	ret = security_locked_down(LOCKDOWN_TRACEFS);
 861	if (ret)
 862		return ret;
 863
 864	return seq_open(file, &profile_seq_op);
 865}
 866
 867static const struct file_operations uprobe_profile_ops = {
 868	.owner		= THIS_MODULE,
 869	.open		= profile_open,
 870	.read		= seq_read,
 871	.llseek		= seq_lseek,
 872	.release	= seq_release,
 873};
 874
 875struct uprobe_cpu_buffer {
 876	struct mutex mutex;
 877	void *buf;
 878	int dsize;
 879};
 880static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
 881static int uprobe_buffer_refcnt;
 882#define MAX_UCB_BUFFER_SIZE PAGE_SIZE
 883
 884static int uprobe_buffer_init(void)
 885{
 886	int cpu, err_cpu;
 887
 888	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
 889	if (uprobe_cpu_buffer == NULL)
 890		return -ENOMEM;
 891
 892	for_each_possible_cpu(cpu) {
 893		struct page *p = alloc_pages_node(cpu_to_node(cpu),
 894						  GFP_KERNEL, 0);
 895		if (p == NULL) {
 896			err_cpu = cpu;
 897			goto err;
 898		}
 899		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
 900		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
 901	}
 902
 903	return 0;
 904
 905err:
 906	for_each_possible_cpu(cpu) {
 907		if (cpu == err_cpu)
 908			break;
 909		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
 910	}
 911
 912	free_percpu(uprobe_cpu_buffer);
 913	return -ENOMEM;
 914}
 915
 916static int uprobe_buffer_enable(void)
 917{
 918	int ret = 0;
 919
 920	BUG_ON(!mutex_is_locked(&event_mutex));
 921
 922	if (uprobe_buffer_refcnt++ == 0) {
 923		ret = uprobe_buffer_init();
 924		if (ret < 0)
 925			uprobe_buffer_refcnt--;
 926	}
 927
 928	return ret;
 929}
 930
 931static void uprobe_buffer_disable(void)
 932{
 933	int cpu;
 934
 935	BUG_ON(!mutex_is_locked(&event_mutex));
 936
 937	if (--uprobe_buffer_refcnt == 0) {
 938		for_each_possible_cpu(cpu)
 939			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
 940							     cpu)->buf);
 941
 942		free_percpu(uprobe_cpu_buffer);
 943		uprobe_cpu_buffer = NULL;
 944	}
 945}
 946
 947static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
 948{
 949	struct uprobe_cpu_buffer *ucb;
 950	int cpu;
 951
 952	cpu = raw_smp_processor_id();
 953	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
 954
 955	/*
 956	 * Use per-cpu buffers for fastest access, but we might migrate
 957	 * so the mutex makes sure we have sole access to it.
 958	 */
 959	mutex_lock(&ucb->mutex);
 960
 961	return ucb;
 962}
 963
 964static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
 965{
 966	if (!ucb)
 967		return;
 968	mutex_unlock(&ucb->mutex);
 969}
 970
 971static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
 972						       struct pt_regs *regs,
 973						       struct uprobe_cpu_buffer **ucbp)
 974{
 975	struct uprobe_cpu_buffer *ucb;
 976	int dsize, esize;
 977
 978	if (*ucbp)
 979		return *ucbp;
 980
 981	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
 982	dsize = __get_data_size(&tu->tp, regs, NULL);
 983
 984	ucb = uprobe_buffer_get();
 985	ucb->dsize = tu->tp.size + dsize;
 986
 987	if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) {
 988		ucb->dsize = MAX_UCB_BUFFER_SIZE;
 989		dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size;
 990	}
 991
 992	store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize);
 993
 994	*ucbp = ucb;
 995	return ucb;
 996}
 997
 998static void __uprobe_trace_func(struct trace_uprobe *tu,
 999				unsigned long func, struct pt_regs *regs,
1000				struct uprobe_cpu_buffer *ucb,
1001				struct trace_event_file *trace_file)
1002{
1003	struct uprobe_trace_entry_head *entry;
1004	struct trace_event_buffer fbuffer;
 
1005	void *data;
1006	int size, esize;
1007	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1008
1009	WARN_ON(call != trace_file->event_call);
1010
 
 
 
1011	if (trace_trigger_soft_disabled(trace_file))
1012		return;
1013
1014	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1015	size = esize + ucb->dsize;
1016	entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
1017	if (!entry)
 
1018		return;
1019
 
1020	if (is_ret_probe(tu)) {
1021		entry->vaddr[0] = func;
1022		entry->vaddr[1] = instruction_pointer(regs);
1023		data = DATAOF_TRACE_ENTRY(entry, true);
1024	} else {
1025		entry->vaddr[0] = instruction_pointer(regs);
1026		data = DATAOF_TRACE_ENTRY(entry, false);
1027	}
1028
1029	memcpy(data, ucb->buf, ucb->dsize);
1030
1031	trace_event_buffer_commit(&fbuffer);
1032}
1033
1034/* uprobe handler */
1035static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
1036			     struct uprobe_cpu_buffer **ucbp)
1037{
1038	struct event_file_link *link;
1039	struct uprobe_cpu_buffer *ucb;
1040
1041	if (is_ret_probe(tu))
1042		return 0;
1043
1044	ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1045
1046	rcu_read_lock();
1047	trace_probe_for_each_link_rcu(link, &tu->tp)
1048		__uprobe_trace_func(tu, 0, regs, ucb, link->file);
1049	rcu_read_unlock();
1050
1051	return 0;
1052}
1053
1054static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
1055				 struct pt_regs *regs,
1056				 struct uprobe_cpu_buffer **ucbp)
1057{
1058	struct event_file_link *link;
1059	struct uprobe_cpu_buffer *ucb;
1060
1061	ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1062
1063	rcu_read_lock();
1064	trace_probe_for_each_link_rcu(link, &tu->tp)
1065		__uprobe_trace_func(tu, func, regs, ucb, link->file);
1066	rcu_read_unlock();
1067}
1068
1069/* Event entry printers */
1070static enum print_line_t
1071print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1072{
1073	struct uprobe_trace_entry_head *entry;
1074	struct trace_seq *s = &iter->seq;
1075	struct trace_uprobe *tu;
1076	u8 *data;
 
1077
1078	entry = (struct uprobe_trace_entry_head *)iter->ent;
1079	tu = trace_uprobe_primary_from_call(
1080		container_of(event, struct trace_event_call, event));
1081	if (unlikely(!tu))
1082		goto out;
1083
1084	if (is_ret_probe(tu)) {
1085		trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
1086				 trace_probe_name(&tu->tp),
1087				 entry->vaddr[1], entry->vaddr[0]);
1088		data = DATAOF_TRACE_ENTRY(entry, true);
1089	} else {
1090		trace_seq_printf(s, "%s: (0x%lx)",
1091				 trace_probe_name(&tu->tp),
1092				 entry->vaddr[0]);
1093		data = DATAOF_TRACE_ENTRY(entry, false);
1094	}
1095
1096	if (trace_probe_print_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1097		goto out;
 
 
 
 
1098
1099	trace_seq_putc(s, '\n');
1100
1101 out:
1102	return trace_handle_return(s);
1103}
1104
1105typedef bool (*filter_func_t)(struct uprobe_consumer *self, struct mm_struct *mm);
 
 
1106
1107static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
 
 
1108{
1109	struct inode *inode = d_real_inode(tu->path.dentry);
1110	struct uprobe *uprobe;
1111
1112	tu->consumer.filter = filter;
1113	uprobe = uprobe_register(inode, tu->offset, tu->ref_ctr_offset, &tu->consumer);
1114	if (IS_ERR(uprobe))
1115		return PTR_ERR(uprobe);
1116
1117	tu->uprobe = uprobe;
1118	return 0;
1119}
1120
1121static void __probe_event_disable(struct trace_probe *tp)
1122{
1123	struct trace_uprobe *tu;
1124	bool sync = false;
1125
1126	tu = container_of(tp, struct trace_uprobe, tp);
1127	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1128
1129	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1130		if (!tu->uprobe)
1131			continue;
1132
1133		uprobe_unregister_nosync(tu->uprobe, &tu->consumer);
1134		sync = true;
1135		tu->uprobe = NULL;
1136	}
1137	if (sync)
1138		uprobe_unregister_sync();
1139}
1140
1141static int probe_event_enable(struct trace_event_call *call,
1142			struct trace_event_file *file, filter_func_t filter)
1143{
1144	struct trace_probe *tp;
1145	struct trace_uprobe *tu;
1146	bool enabled;
1147	int ret;
1148
1149	tp = trace_probe_primary_from_call(call);
1150	if (WARN_ON_ONCE(!tp))
1151		return -ENODEV;
1152	enabled = trace_probe_is_enabled(tp);
1153
1154	/* This may also change "enabled" state */
1155	if (file) {
1156		if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
1157			return -EINTR;
1158
1159		ret = trace_probe_add_file(tp, file);
1160		if (ret < 0)
1161			return ret;
 
 
 
 
 
1162	} else {
1163		if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
1164			return -EINTR;
1165
1166		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
1167	}
1168
1169	tu = container_of(tp, struct trace_uprobe, tp);
1170	WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1171
1172	if (enabled)
1173		return 0;
1174
1175	ret = uprobe_buffer_enable();
1176	if (ret)
1177		goto err_flags;
1178
1179	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1180		ret = trace_uprobe_enable(tu, filter);
1181		if (ret) {
1182			__probe_event_disable(tp);
1183			goto err_buffer;
1184		}
1185	}
1186
1187	return 0;
1188
1189 err_buffer:
1190	uprobe_buffer_disable();
1191
1192 err_flags:
1193	if (file)
1194		trace_probe_remove_file(tp, file);
1195	else
1196		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1197
 
 
1198	return ret;
1199}
1200
1201static void probe_event_disable(struct trace_event_call *call,
1202				struct trace_event_file *file)
1203{
1204	struct trace_probe *tp;
1205
1206	tp = trace_probe_primary_from_call(call);
1207	if (WARN_ON_ONCE(!tp))
1208		return;
1209
1210	if (!trace_probe_is_enabled(tp))
1211		return;
1212
1213	if (file) {
1214		if (trace_probe_remove_file(tp, file) < 0)
1215			return;
1216
1217		if (trace_probe_is_enabled(tp))
 
 
 
 
 
1218			return;
1219	} else
1220		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 
 
 
 
1221
1222	__probe_event_disable(tp);
1223	uprobe_buffer_disable();
1224}
1225
1226static int uprobe_event_define_fields(struct trace_event_call *event_call)
1227{
1228	int ret, size;
1229	struct uprobe_trace_entry_head field;
1230	struct trace_uprobe *tu;
1231
1232	tu = trace_uprobe_primary_from_call(event_call);
1233	if (unlikely(!tu))
1234		return -ENODEV;
1235
1236	if (is_ret_probe(tu)) {
1237		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1238		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1239		size = SIZEOF_TRACE_ENTRY(true);
1240	} else {
1241		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1242		size = SIZEOF_TRACE_ENTRY(false);
1243	}
 
 
 
 
 
 
 
 
1244
1245	return traceprobe_define_arg_fields(event_call, size, &tu->tp);
 
 
 
1246}
1247
1248#ifdef CONFIG_PERF_EVENTS
1249static bool
1250__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1251{
1252	struct perf_event *event;
1253
 
 
 
1254	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1255		if (event->hw.target->mm == mm)
1256			return true;
1257	}
1258
1259	return false;
1260}
1261
1262static inline bool
1263trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1264			  struct perf_event *event)
1265{
1266	return __uprobe_perf_filter(filter, event->hw.target->mm);
1267}
1268
1269static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1270				       struct perf_event *event)
1271{
1272	bool done;
1273
1274	write_lock(&filter->rwlock);
1275	if (event->hw.target) {
1276		list_del(&event->hw.tp_list);
1277		done = filter->nr_systemwide ||
1278			(event->hw.target->flags & PF_EXITING) ||
1279			trace_uprobe_filter_event(filter, event);
1280	} else {
1281		filter->nr_systemwide--;
1282		done = filter->nr_systemwide;
1283	}
1284	write_unlock(&filter->rwlock);
 
 
 
1285
1286	return done;
1287}
1288
1289/* This returns true if the filter always covers target mm */
1290static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1291				    struct perf_event *event)
1292{
1293	bool done;
 
1294
1295	write_lock(&filter->rwlock);
1296	if (event->hw.target) {
1297		/*
1298		 * event->parent != NULL means copy_process(), we can avoid
1299		 * uprobe_apply(). current->mm must be probed and we can rely
1300		 * on dup_mmap() which preserves the already installed bp's.
1301		 *
1302		 * attr.enable_on_exec means that exec/mmap will install the
1303		 * breakpoints we need.
1304		 */
1305		done = filter->nr_systemwide ||
1306			event->parent || event->attr.enable_on_exec ||
1307			trace_uprobe_filter_event(filter, event);
1308		list_add(&event->hw.tp_list, &filter->perf_events);
1309	} else {
1310		done = filter->nr_systemwide;
1311		filter->nr_systemwide++;
1312	}
1313	write_unlock(&filter->rwlock);
1314
1315	return done;
1316}
1317
1318static int uprobe_perf_close(struct trace_event_call *call,
1319			     struct perf_event *event)
1320{
1321	struct trace_probe *tp;
1322	struct trace_uprobe *tu;
1323	int ret = 0;
1324
1325	tp = trace_probe_primary_from_call(call);
1326	if (WARN_ON_ONCE(!tp))
1327		return -ENODEV;
1328
1329	tu = container_of(tp, struct trace_uprobe, tp);
1330	if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1331		return 0;
1332
1333	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1334		ret = uprobe_apply(tu->uprobe, &tu->consumer, false);
1335		if (ret)
1336			break;
1337	}
1338
1339	return ret;
1340}
1341
1342static int uprobe_perf_open(struct trace_event_call *call,
1343			    struct perf_event *event)
1344{
1345	struct trace_probe *tp;
1346	struct trace_uprobe *tu;
1347	int err = 0;
1348
1349	tp = trace_probe_primary_from_call(call);
1350	if (WARN_ON_ONCE(!tp))
1351		return -ENODEV;
1352
1353	tu = container_of(tp, struct trace_uprobe, tp);
1354	if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1355		return 0;
1356
1357	list_for_each_entry(tu, trace_probe_probe_list(tp), tp.list) {
1358		err = uprobe_apply(tu->uprobe, &tu->consumer, true);
1359		if (err) {
1360			uprobe_perf_close(call, event);
1361			break;
1362		}
1363	}
1364
1365	return err;
1366}
1367
1368static bool uprobe_perf_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
 
1369{
1370	struct trace_uprobe_filter *filter;
1371	struct trace_uprobe *tu;
1372	int ret;
1373
1374	tu = container_of(uc, struct trace_uprobe, consumer);
1375	filter = tu->tp.event->filter;
1376
1377	/*
1378	 * speculative short-circuiting check to avoid unnecessarily taking
1379	 * filter->rwlock below, if the uprobe has system-wide consumer
1380	 */
1381	if (READ_ONCE(filter->nr_systemwide))
1382		return true;
1383
1384	read_lock(&filter->rwlock);
1385	ret = __uprobe_perf_filter(filter, mm);
1386	read_unlock(&filter->rwlock);
1387
1388	return ret;
1389}
1390
1391static void __uprobe_perf_func(struct trace_uprobe *tu,
1392			       unsigned long func, struct pt_regs *regs,
1393			       struct uprobe_cpu_buffer **ucbp)
1394{
1395	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1396	struct uprobe_trace_entry_head *entry;
1397	struct uprobe_cpu_buffer *ucb;
1398	struct hlist_head *head;
1399	void *data;
1400	int size, esize;
1401	int rctx;
1402
1403#ifdef CONFIG_BPF_EVENTS
1404	if (bpf_prog_array_valid(call)) {
1405		const struct bpf_prog_array *array;
1406		u32 ret;
1407
1408		rcu_read_lock_trace();
1409		array = rcu_dereference_check(call->prog_array, rcu_read_lock_trace_held());
1410		ret = bpf_prog_run_array_uprobe(array, regs, bpf_prog_run);
1411		rcu_read_unlock_trace();
1412		if (!ret)
1413			return;
1414	}
1415#endif /* CONFIG_BPF_EVENTS */
1416
1417	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1418
1419	ucb = prepare_uprobe_buffer(tu, regs, ucbp);
1420	size = esize + ucb->dsize;
1421	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1422	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1423		return;
1424
1425	preempt_disable();
1426	head = this_cpu_ptr(call->perf_events);
1427	if (hlist_empty(head))
1428		goto out;
1429
1430	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1431	if (!entry)
1432		goto out;
1433
1434	if (is_ret_probe(tu)) {
1435		entry->vaddr[0] = func;
1436		entry->vaddr[1] = instruction_pointer(regs);
1437		data = DATAOF_TRACE_ENTRY(entry, true);
1438	} else {
1439		entry->vaddr[0] = instruction_pointer(regs);
1440		data = DATAOF_TRACE_ENTRY(entry, false);
1441	}
1442
1443	memcpy(data, ucb->buf, ucb->dsize);
1444
1445	if (size - esize > ucb->dsize)
1446		memset(data + ucb->dsize, 0, size - esize - ucb->dsize);
 
 
 
1447
1448	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1449			      head, NULL);
1450 out:
1451	preempt_enable();
1452}
1453
1454/* uprobe profile handler */
1455static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1456			    struct uprobe_cpu_buffer **ucbp)
1457{
1458	if (!uprobe_perf_filter(&tu->consumer, current->mm))
1459		return UPROBE_HANDLER_REMOVE;
1460
1461	if (!is_ret_probe(tu))
1462		__uprobe_perf_func(tu, 0, regs, ucbp);
1463	return 0;
1464}
1465
1466static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1467				struct pt_regs *regs,
1468				struct uprobe_cpu_buffer **ucbp)
1469{
1470	__uprobe_perf_func(tu, func, regs, ucbp);
1471}
1472
1473int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1474			const char **filename, u64 *probe_offset,
1475			u64 *probe_addr, bool perf_type_tracepoint)
1476{
1477	const char *pevent = trace_event_name(event->tp_event);
1478	const char *group = event->tp_event->class->system;
1479	struct trace_uprobe *tu;
1480
1481	if (perf_type_tracepoint)
1482		tu = find_probe_event(pevent, group);
1483	else
1484		tu = trace_uprobe_primary_from_call(event->tp_event);
1485	if (!tu)
1486		return -EINVAL;
1487
1488	*fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1489				    : BPF_FD_TYPE_UPROBE;
1490	*filename = tu->filename;
1491	*probe_offset = tu->offset;
1492	*probe_addr = 0;
1493	return 0;
1494}
1495#endif	/* CONFIG_PERF_EVENTS */
1496
1497static int
1498trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1499		      void *data)
1500{
 
1501	struct trace_event_file *file = data;
1502
1503	switch (type) {
1504	case TRACE_REG_REGISTER:
1505		return probe_event_enable(event, file, NULL);
1506
1507	case TRACE_REG_UNREGISTER:
1508		probe_event_disable(event, file);
1509		return 0;
1510
1511#ifdef CONFIG_PERF_EVENTS
1512	case TRACE_REG_PERF_REGISTER:
1513		return probe_event_enable(event, NULL, uprobe_perf_filter);
1514
1515	case TRACE_REG_PERF_UNREGISTER:
1516		probe_event_disable(event, NULL);
1517		return 0;
1518
1519	case TRACE_REG_PERF_OPEN:
1520		return uprobe_perf_open(event, data);
1521
1522	case TRACE_REG_PERF_CLOSE:
1523		return uprobe_perf_close(event, data);
1524
1525#endif
1526	default:
1527		return 0;
1528	}
 
1529}
1530
1531static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs,
1532			     __u64 *data)
1533{
1534	struct trace_uprobe *tu;
1535	struct uprobe_dispatch_data udd;
1536	struct uprobe_cpu_buffer *ucb = NULL;
 
1537	int ret = 0;
1538
 
1539	tu = container_of(con, struct trace_uprobe, consumer);
1540
1541	this_cpu_inc(*tu->nhits);
1542
1543	udd.tu = tu;
1544	udd.bp_addr = instruction_pointer(regs);
1545
1546	current->utask->vaddr = (unsigned long) &udd;
1547
1548	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1549		return 0;
1550
1551	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1552		ret |= uprobe_trace_func(tu, regs, &ucb);
 
 
 
 
 
 
1553
1554#ifdef CONFIG_PERF_EVENTS
1555	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1556		ret |= uprobe_perf_func(tu, regs, &ucb);
1557#endif
1558	uprobe_buffer_put(ucb);
1559	return ret;
1560}
1561
1562static int uretprobe_dispatcher(struct uprobe_consumer *con,
1563				unsigned long func, struct pt_regs *regs,
1564				__u64 *data)
1565{
1566	struct trace_uprobe *tu;
1567	struct uprobe_dispatch_data udd;
1568	struct uprobe_cpu_buffer *ucb = NULL;
 
1569
1570	tu = container_of(con, struct trace_uprobe, consumer);
1571
1572	udd.tu = tu;
1573	udd.bp_addr = func;
1574
1575	current->utask->vaddr = (unsigned long) &udd;
1576
1577	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1578		return 0;
1579
1580	if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
1581		uretprobe_trace_func(tu, func, regs, &ucb);
 
 
 
 
 
 
1582
1583#ifdef CONFIG_PERF_EVENTS
1584	if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
1585		uretprobe_perf_func(tu, func, regs, &ucb);
1586#endif
1587	uprobe_buffer_put(ucb);
1588	return 0;
1589}
1590
1591static struct trace_event_functions uprobe_funcs = {
1592	.trace		= print_uprobe_event
1593};
1594
1595static struct trace_event_fields uprobe_fields_array[] = {
1596	{ .type = TRACE_FUNCTION_TYPE,
1597	  .define_fields = uprobe_event_define_fields },
1598	{}
1599};
1600
1601static inline void init_trace_event_call(struct trace_uprobe *tu)
1602{
1603	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1604	call->event.funcs = &uprobe_funcs;
1605	call->class->fields_array = uprobe_fields_array;
1606
1607	call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
1608	call->class->reg = trace_uprobe_register;
1609}
1610
1611static int register_uprobe_event(struct trace_uprobe *tu)
1612{
1613	init_trace_event_call(tu);
1614
1615	return trace_probe_register_event_call(&tu->tp);
1616}
1617
1618static int unregister_uprobe_event(struct trace_uprobe *tu)
1619{
1620	return trace_probe_unregister_event_call(&tu->tp);
1621}
1622
1623#ifdef CONFIG_PERF_EVENTS
1624struct trace_event_call *
1625create_local_trace_uprobe(char *name, unsigned long offs,
1626			  unsigned long ref_ctr_offset, bool is_return)
1627{
1628	enum probe_print_type ptype;
1629	struct trace_uprobe *tu;
1630	struct path path;
1631	int ret;
1632
1633	ret = kern_path(name, LOOKUP_FOLLOW, &path);
1634	if (ret)
1635		return ERR_PTR(ret);
 
1636
1637	if (!d_is_reg(path.dentry)) {
1638		path_put(&path);
1639		return ERR_PTR(-EINVAL);
1640	}
1641
1642	/*
1643	 * local trace_kprobes are not added to dyn_event, so they are never
1644	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1645	 * duplicated name "DUMMY_EVENT" here.
1646	 */
1647	tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1648				is_return);
1649
1650	if (IS_ERR(tu)) {
1651		pr_info("Failed to allocate trace_uprobe.(%d)\n",
1652			(int)PTR_ERR(tu));
1653		path_put(&path);
1654		return ERR_CAST(tu);
1655	}
1656
1657	tu->offset = offs;
1658	tu->path = path;
1659	tu->ref_ctr_offset = ref_ctr_offset;
1660	tu->filename = kstrdup(name, GFP_KERNEL);
1661	if (!tu->filename) {
1662		ret = -ENOMEM;
1663		goto error;
1664	}
1665
1666	init_trace_event_call(tu);
1667
1668	ptype = is_ret_probe(tu) ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1669	if (traceprobe_set_print_fmt(&tu->tp, ptype) < 0) {
1670		ret = -ENOMEM;
1671		goto error;
1672	}
1673
1674	return trace_probe_event_call(&tu->tp);
1675error:
1676	free_trace_uprobe(tu);
1677	return ERR_PTR(ret);
1678}
1679
1680void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1681{
1682	struct trace_uprobe *tu;
1683
1684	tu = trace_uprobe_primary_from_call(event_call);
1685
1686	free_trace_uprobe(tu);
 
 
 
 
1687}
1688#endif /* CONFIG_PERF_EVENTS */
1689
1690/* Make a trace interface for controlling probe points */
1691static __init int init_uprobe_trace(void)
1692{
1693	int ret;
1694
1695	ret = dyn_event_register(&trace_uprobe_ops);
1696	if (ret)
1697		return ret;
1698
1699	ret = tracing_init_dentry();
1700	if (ret)
1701		return 0;
1702
1703	trace_create_file("uprobe_events", TRACE_MODE_WRITE, NULL,
1704				    NULL, &uprobe_events_ops);
1705	/* Profile interface */
1706	trace_create_file("uprobe_profile", TRACE_MODE_READ, NULL,
1707				    NULL, &uprobe_profile_ops);
1708	return 0;
1709}
1710
1711fs_initcall(init_uprobe_trace);