Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * uprobes-based tracing events
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  16 *
  17 * Copyright (C) IBM Corporation, 2010-2012
  18 * Author:	Srikar Dronamraju <srikar@linux.vnet.ibm.com>
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/uaccess.h>
  23#include <linux/uprobes.h>
  24#include <linux/namei.h>
  25#include <linux/string.h>
  26
  27#include "trace_probe.h"
  28
  29#define UPROBE_EVENT_SYSTEM	"uprobes"
  30
  31struct uprobe_trace_entry_head {
  32	struct trace_entry	ent;
  33	unsigned long		vaddr[];
  34};
  35
  36#define SIZEOF_TRACE_ENTRY(is_return)			\
  37	(sizeof(struct uprobe_trace_entry_head) +	\
  38	 sizeof(unsigned long) * (is_return ? 2 : 1))
  39
  40#define DATAOF_TRACE_ENTRY(entry, is_return)		\
  41	((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
  42
  43struct trace_uprobe_filter {
  44	rwlock_t		rwlock;
  45	int			nr_systemwide;
  46	struct list_head	perf_events;
  47};
  48
  49/*
  50 * uprobe event core functions
  51 */
  52struct trace_uprobe {
  53	struct list_head		list;
  54	struct trace_uprobe_filter	filter;
  55	struct uprobe_consumer		consumer;
  56	struct inode			*inode;
  57	char				*filename;
  58	unsigned long			offset;
  59	unsigned long			nhit;
  60	struct trace_probe		tp;
  61};
  62
  63#define SIZEOF_TRACE_UPROBE(n)				\
  64	(offsetof(struct trace_uprobe, tp.args) +	\
  65	(sizeof(struct probe_arg) * (n)))
  66
  67static int register_uprobe_event(struct trace_uprobe *tu);
  68static int unregister_uprobe_event(struct trace_uprobe *tu);
  69
  70static DEFINE_MUTEX(uprobe_lock);
  71static LIST_HEAD(uprobe_list);
  72
  73struct uprobe_dispatch_data {
  74	struct trace_uprobe	*tu;
  75	unsigned long		bp_addr;
  76};
  77
  78static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
  79static int uretprobe_dispatcher(struct uprobe_consumer *con,
  80				unsigned long func, struct pt_regs *regs);
  81
  82#ifdef CONFIG_STACK_GROWSUP
  83static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  84{
  85	return addr - (n * sizeof(long));
  86}
  87#else
  88static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
  89{
  90	return addr + (n * sizeof(long));
  91}
  92#endif
  93
  94static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
  95{
  96	unsigned long ret;
  97	unsigned long addr = user_stack_pointer(regs);
  98
  99	addr = adjust_stack_addr(addr, n);
 100
 101	if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
 102		return 0;
 103
 104	return ret;
 105}
 106
 107/*
 108 * Uprobes-specific fetch functions
 109 */
 110#define DEFINE_FETCH_stack(type)					\
 111static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
 112					  void *offset, void *dest)	\
 113{									\
 114	*(type *)dest = (type)get_user_stack_nth(regs,			\
 115					      ((unsigned long)offset)); \
 116}
 117DEFINE_BASIC_FETCH_FUNCS(stack)
 118/* No string on the stack entry */
 119#define fetch_stack_string	NULL
 120#define fetch_stack_string_size	NULL
 121
 122#define DEFINE_FETCH_memory(type)					\
 123static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
 124						void *addr, void *dest) \
 125{									\
 126	type retval;							\
 127	void __user *vaddr = (void __force __user *) addr;		\
 128									\
 129	if (copy_from_user(&retval, vaddr, sizeof(type)))		\
 130		*(type *)dest = 0;					\
 131	else								\
 132		*(type *) dest = retval;				\
 133}
 134DEFINE_BASIC_FETCH_FUNCS(memory)
 135/*
 136 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 137 * length and relative data location.
 138 */
 139static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 140						      void *addr, void *dest)
 141{
 142	long ret;
 143	u32 rloc = *(u32 *)dest;
 144	int maxlen  = get_rloc_len(rloc);
 145	u8 *dst = get_rloc_data(dest);
 146	void __user *src = (void __force __user *) addr;
 147
 148	if (!maxlen)
 149		return;
 150
 151	ret = strncpy_from_user(dst, src, maxlen);
 152
 153	if (ret < 0) {	/* Failed to fetch string */
 154		((u8 *)get_rloc_data(dest))[0] = '\0';
 155		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(rloc));
 156	} else {
 157		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(rloc));
 158	}
 159}
 160
 161static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
 162						      void *addr, void *dest)
 163{
 164	int len;
 165	void __user *vaddr = (void __force __user *) addr;
 166
 167	len = strnlen_user(vaddr, MAX_STRING_SIZE);
 168
 169	if (len == 0 || len > MAX_STRING_SIZE)  /* Failed to check length */
 170		*(u32 *)dest = 0;
 171	else
 172		*(u32 *)dest = len;
 173}
 174
 175static unsigned long translate_user_vaddr(void *file_offset)
 176{
 177	unsigned long base_addr;
 178	struct uprobe_dispatch_data *udd;
 179
 180	udd = (void *) current->utask->vaddr;
 181
 182	base_addr = udd->bp_addr - udd->tu->offset;
 183	return base_addr + (unsigned long)file_offset;
 184}
 185
 186#define DEFINE_FETCH_file_offset(type)					\
 187static __kprobes void FETCH_FUNC_NAME(file_offset, type)(struct pt_regs *regs,\
 188					void *offset, void *dest) 	\
 189{									\
 190	void *vaddr = (void *)translate_user_vaddr(offset);		\
 191									\
 192	FETCH_FUNC_NAME(memory, type)(regs, vaddr, dest);		\
 193}
 194DEFINE_BASIC_FETCH_FUNCS(file_offset)
 195DEFINE_FETCH_file_offset(string)
 196DEFINE_FETCH_file_offset(string_size)
 197
 198/* Fetch type information table */
 199const struct fetch_type uprobes_fetch_type_table[] = {
 200	/* Special types */
 201	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
 202					sizeof(u32), 1, "__data_loc char[]"),
 203	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
 204					string_size, sizeof(u32), 0, "u32"),
 205	/* Basic types */
 206	ASSIGN_FETCH_TYPE(u8,  u8,  0),
 207	ASSIGN_FETCH_TYPE(u16, u16, 0),
 208	ASSIGN_FETCH_TYPE(u32, u32, 0),
 209	ASSIGN_FETCH_TYPE(u64, u64, 0),
 210	ASSIGN_FETCH_TYPE(s8,  u8,  1),
 211	ASSIGN_FETCH_TYPE(s16, u16, 1),
 212	ASSIGN_FETCH_TYPE(s32, u32, 1),
 213	ASSIGN_FETCH_TYPE(s64, u64, 1),
 214
 215	ASSIGN_FETCH_TYPE_END
 216};
 217
 218static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
 219{
 220	rwlock_init(&filter->rwlock);
 221	filter->nr_systemwide = 0;
 222	INIT_LIST_HEAD(&filter->perf_events);
 223}
 224
 225static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
 226{
 227	return !filter->nr_systemwide && list_empty(&filter->perf_events);
 228}
 229
 230static inline bool is_ret_probe(struct trace_uprobe *tu)
 231{
 232	return tu->consumer.ret_handler != NULL;
 233}
 234
 235/*
 236 * Allocate new trace_uprobe and initialize it (including uprobes).
 237 */
 238static struct trace_uprobe *
 239alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
 240{
 241	struct trace_uprobe *tu;
 242
 243	if (!event || !is_good_name(event))
 244		return ERR_PTR(-EINVAL);
 245
 246	if (!group || !is_good_name(group))
 247		return ERR_PTR(-EINVAL);
 248
 249	tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
 250	if (!tu)
 251		return ERR_PTR(-ENOMEM);
 252
 253	tu->tp.call.class = &tu->tp.class;
 254	tu->tp.call.name = kstrdup(event, GFP_KERNEL);
 255	if (!tu->tp.call.name)
 256		goto error;
 257
 258	tu->tp.class.system = kstrdup(group, GFP_KERNEL);
 259	if (!tu->tp.class.system)
 260		goto error;
 261
 262	INIT_LIST_HEAD(&tu->list);
 263	INIT_LIST_HEAD(&tu->tp.files);
 264	tu->consumer.handler = uprobe_dispatcher;
 265	if (is_ret)
 266		tu->consumer.ret_handler = uretprobe_dispatcher;
 267	init_trace_uprobe_filter(&tu->filter);
 268	tu->tp.call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER;
 269	return tu;
 270
 271error:
 272	kfree(tu->tp.call.name);
 273	kfree(tu);
 274
 275	return ERR_PTR(-ENOMEM);
 276}
 277
 278static void free_trace_uprobe(struct trace_uprobe *tu)
 279{
 280	int i;
 281
 282	for (i = 0; i < tu->tp.nr_args; i++)
 283		traceprobe_free_probe_arg(&tu->tp.args[i]);
 284
 285	iput(tu->inode);
 286	kfree(tu->tp.call.class->system);
 287	kfree(tu->tp.call.name);
 288	kfree(tu->filename);
 289	kfree(tu);
 290}
 291
 292static struct trace_uprobe *find_probe_event(const char *event, const char *group)
 293{
 294	struct trace_uprobe *tu;
 295
 296	list_for_each_entry(tu, &uprobe_list, list)
 297		if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 &&
 298		    strcmp(tu->tp.call.class->system, group) == 0)
 299			return tu;
 300
 301	return NULL;
 302}
 303
 304/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
 305static int unregister_trace_uprobe(struct trace_uprobe *tu)
 306{
 307	int ret;
 308
 309	ret = unregister_uprobe_event(tu);
 310	if (ret)
 311		return ret;
 312
 313	list_del(&tu->list);
 314	free_trace_uprobe(tu);
 315	return 0;
 316}
 317
 318/* Register a trace_uprobe and probe_event */
 319static int register_trace_uprobe(struct trace_uprobe *tu)
 320{
 321	struct trace_uprobe *old_tu;
 322	int ret;
 323
 324	mutex_lock(&uprobe_lock);
 325
 326	/* register as an event */
 327	old_tu = find_probe_event(ftrace_event_name(&tu->tp.call),
 328			tu->tp.call.class->system);
 329	if (old_tu) {
 330		/* delete old event */
 331		ret = unregister_trace_uprobe(old_tu);
 332		if (ret)
 333			goto end;
 334	}
 335
 336	ret = register_uprobe_event(tu);
 337	if (ret) {
 338		pr_warning("Failed to register probe event(%d)\n", ret);
 339		goto end;
 340	}
 341
 342	list_add_tail(&tu->list, &uprobe_list);
 343
 344end:
 345	mutex_unlock(&uprobe_lock);
 346
 347	return ret;
 348}
 349
 350/*
 351 * Argument syntax:
 352 *  - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
 353 *
 354 *  - Remove uprobe: -:[GRP/]EVENT
 355 */
 356static int create_trace_uprobe(int argc, char **argv)
 357{
 358	struct trace_uprobe *tu;
 359	struct inode *inode;
 360	char *arg, *event, *group, *filename;
 361	char buf[MAX_EVENT_NAME_LEN];
 362	struct path path;
 363	unsigned long offset;
 364	bool is_delete, is_return;
 365	int i, ret;
 366
 367	inode = NULL;
 368	ret = 0;
 369	is_delete = false;
 370	is_return = false;
 371	event = NULL;
 372	group = NULL;
 373
 374	/* argc must be >= 1 */
 375	if (argv[0][0] == '-')
 376		is_delete = true;
 377	else if (argv[0][0] == 'r')
 378		is_return = true;
 379	else if (argv[0][0] != 'p') {
 380		pr_info("Probe definition must be started with 'p', 'r' or '-'.\n");
 381		return -EINVAL;
 382	}
 383
 384	if (argv[0][1] == ':') {
 385		event = &argv[0][2];
 386		arg = strchr(event, '/');
 387
 388		if (arg) {
 389			group = event;
 390			event = arg + 1;
 391			event[-1] = '\0';
 392
 393			if (strlen(group) == 0) {
 394				pr_info("Group name is not specified\n");
 395				return -EINVAL;
 396			}
 397		}
 398		if (strlen(event) == 0) {
 399			pr_info("Event name is not specified\n");
 400			return -EINVAL;
 401		}
 402	}
 403	if (!group)
 404		group = UPROBE_EVENT_SYSTEM;
 405
 406	if (is_delete) {
 407		int ret;
 408
 409		if (!event) {
 410			pr_info("Delete command needs an event name.\n");
 411			return -EINVAL;
 412		}
 413		mutex_lock(&uprobe_lock);
 414		tu = find_probe_event(event, group);
 415
 416		if (!tu) {
 417			mutex_unlock(&uprobe_lock);
 418			pr_info("Event %s/%s doesn't exist.\n", group, event);
 419			return -ENOENT;
 420		}
 421		/* delete an event */
 422		ret = unregister_trace_uprobe(tu);
 423		mutex_unlock(&uprobe_lock);
 424		return ret;
 425	}
 426
 427	if (argc < 2) {
 428		pr_info("Probe point is not specified.\n");
 429		return -EINVAL;
 430	}
 431	if (isdigit(argv[1][0])) {
 432		pr_info("probe point must be have a filename.\n");
 433		return -EINVAL;
 434	}
 435	arg = strchr(argv[1], ':');
 436	if (!arg) {
 437		ret = -EINVAL;
 438		goto fail_address_parse;
 439	}
 440
 441	*arg++ = '\0';
 442	filename = argv[1];
 443	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
 444	if (ret)
 445		goto fail_address_parse;
 446
 447	inode = igrab(path.dentry->d_inode);
 448	path_put(&path);
 449
 450	if (!inode || !S_ISREG(inode->i_mode)) {
 451		ret = -EINVAL;
 452		goto fail_address_parse;
 453	}
 454
 455	ret = kstrtoul(arg, 0, &offset);
 456	if (ret)
 457		goto fail_address_parse;
 458
 459	argc -= 2;
 460	argv += 2;
 461
 462	/* setup a probe */
 463	if (!event) {
 464		char *tail;
 465		char *ptr;
 466
 467		tail = kstrdup(kbasename(filename), GFP_KERNEL);
 468		if (!tail) {
 469			ret = -ENOMEM;
 470			goto fail_address_parse;
 471		}
 472
 473		ptr = strpbrk(tail, ".-_");
 474		if (ptr)
 475			*ptr = '\0';
 476
 477		snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
 478		event = buf;
 479		kfree(tail);
 480	}
 481
 482	tu = alloc_trace_uprobe(group, event, argc, is_return);
 483	if (IS_ERR(tu)) {
 484		pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu));
 485		ret = PTR_ERR(tu);
 486		goto fail_address_parse;
 487	}
 488	tu->offset = offset;
 489	tu->inode = inode;
 490	tu->filename = kstrdup(filename, GFP_KERNEL);
 491
 492	if (!tu->filename) {
 493		pr_info("Failed to allocate filename.\n");
 494		ret = -ENOMEM;
 495		goto error;
 496	}
 497
 498	/* parse arguments */
 499	ret = 0;
 500	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 501		struct probe_arg *parg = &tu->tp.args[i];
 502
 503		/* Increment count for freeing args in error case */
 504		tu->tp.nr_args++;
 505
 506		/* Parse argument name */
 507		arg = strchr(argv[i], '=');
 508		if (arg) {
 509			*arg++ = '\0';
 510			parg->name = kstrdup(argv[i], GFP_KERNEL);
 511		} else {
 512			arg = argv[i];
 513			/* If argument name is omitted, set "argN" */
 514			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
 515			parg->name = kstrdup(buf, GFP_KERNEL);
 516		}
 517
 518		if (!parg->name) {
 519			pr_info("Failed to allocate argument[%d] name.\n", i);
 520			ret = -ENOMEM;
 521			goto error;
 522		}
 523
 524		if (!is_good_name(parg->name)) {
 525			pr_info("Invalid argument[%d] name: %s\n", i, parg->name);
 526			ret = -EINVAL;
 527			goto error;
 528		}
 529
 530		if (traceprobe_conflict_field_name(parg->name, tu->tp.args, i)) {
 531			pr_info("Argument[%d] name '%s' conflicts with "
 532				"another field.\n", i, argv[i]);
 533			ret = -EINVAL;
 534			goto error;
 535		}
 536
 537		/* Parse fetch argument */
 538		ret = traceprobe_parse_probe_arg(arg, &tu->tp.size, parg,
 539						 is_return, false);
 540		if (ret) {
 541			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
 542			goto error;
 543		}
 544	}
 545
 546	ret = register_trace_uprobe(tu);
 547	if (ret)
 548		goto error;
 549	return 0;
 550
 551error:
 552	free_trace_uprobe(tu);
 553	return ret;
 554
 555fail_address_parse:
 556	if (inode)
 557		iput(inode);
 558
 559	pr_info("Failed to parse address or file.\n");
 560
 561	return ret;
 562}
 563
 564static int cleanup_all_probes(void)
 565{
 566	struct trace_uprobe *tu;
 567	int ret = 0;
 568
 569	mutex_lock(&uprobe_lock);
 570	while (!list_empty(&uprobe_list)) {
 571		tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
 572		ret = unregister_trace_uprobe(tu);
 573		if (ret)
 574			break;
 575	}
 576	mutex_unlock(&uprobe_lock);
 577	return ret;
 578}
 579
 580/* Probes listing interfaces */
 581static void *probes_seq_start(struct seq_file *m, loff_t *pos)
 582{
 583	mutex_lock(&uprobe_lock);
 584	return seq_list_start(&uprobe_list, *pos);
 585}
 586
 587static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
 588{
 589	return seq_list_next(v, &uprobe_list, pos);
 590}
 591
 592static void probes_seq_stop(struct seq_file *m, void *v)
 593{
 594	mutex_unlock(&uprobe_lock);
 595}
 596
 597static int probes_seq_show(struct seq_file *m, void *v)
 598{
 599	struct trace_uprobe *tu = v;
 600	char c = is_ret_probe(tu) ? 'r' : 'p';
 601	int i;
 602
 603	seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
 604			ftrace_event_name(&tu->tp.call));
 605	seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
 606
 607	for (i = 0; i < tu->tp.nr_args; i++)
 608		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
 609
 610	seq_printf(m, "\n");
 611	return 0;
 612}
 613
 614static const struct seq_operations probes_seq_op = {
 615	.start	= probes_seq_start,
 616	.next	= probes_seq_next,
 617	.stop	= probes_seq_stop,
 618	.show	= probes_seq_show
 619};
 620
 621static int probes_open(struct inode *inode, struct file *file)
 622{
 623	int ret;
 624
 625	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 626		ret = cleanup_all_probes();
 627		if (ret)
 628			return ret;
 629	}
 630
 631	return seq_open(file, &probes_seq_op);
 632}
 633
 634static ssize_t probes_write(struct file *file, const char __user *buffer,
 635			    size_t count, loff_t *ppos)
 636{
 637	return traceprobe_probes_write(file, buffer, count, ppos, create_trace_uprobe);
 638}
 639
 640static const struct file_operations uprobe_events_ops = {
 641	.owner		= THIS_MODULE,
 642	.open		= probes_open,
 643	.read		= seq_read,
 644	.llseek		= seq_lseek,
 645	.release	= seq_release,
 646	.write		= probes_write,
 647};
 648
 649/* Probes profiling interfaces */
 650static int probes_profile_seq_show(struct seq_file *m, void *v)
 651{
 652	struct trace_uprobe *tu = v;
 653
 654	seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
 655			ftrace_event_name(&tu->tp.call), tu->nhit);
 656	return 0;
 657}
 658
 659static const struct seq_operations profile_seq_op = {
 660	.start	= probes_seq_start,
 661	.next	= probes_seq_next,
 662	.stop	= probes_seq_stop,
 663	.show	= probes_profile_seq_show
 664};
 665
 666static int profile_open(struct inode *inode, struct file *file)
 667{
 668	return seq_open(file, &profile_seq_op);
 669}
 670
 671static const struct file_operations uprobe_profile_ops = {
 672	.owner		= THIS_MODULE,
 673	.open		= profile_open,
 674	.read		= seq_read,
 675	.llseek		= seq_lseek,
 676	.release	= seq_release,
 677};
 678
 679struct uprobe_cpu_buffer {
 680	struct mutex mutex;
 681	void *buf;
 682};
 683static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
 684static int uprobe_buffer_refcnt;
 685
 686static int uprobe_buffer_init(void)
 687{
 688	int cpu, err_cpu;
 689
 690	uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
 691	if (uprobe_cpu_buffer == NULL)
 692		return -ENOMEM;
 693
 694	for_each_possible_cpu(cpu) {
 695		struct page *p = alloc_pages_node(cpu_to_node(cpu),
 696						  GFP_KERNEL, 0);
 697		if (p == NULL) {
 698			err_cpu = cpu;
 699			goto err;
 700		}
 701		per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
 702		mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
 703	}
 704
 705	return 0;
 706
 707err:
 708	for_each_possible_cpu(cpu) {
 709		if (cpu == err_cpu)
 710			break;
 711		free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
 712	}
 713
 714	free_percpu(uprobe_cpu_buffer);
 715	return -ENOMEM;
 716}
 717
 718static int uprobe_buffer_enable(void)
 719{
 720	int ret = 0;
 721
 722	BUG_ON(!mutex_is_locked(&event_mutex));
 723
 724	if (uprobe_buffer_refcnt++ == 0) {
 725		ret = uprobe_buffer_init();
 726		if (ret < 0)
 727			uprobe_buffer_refcnt--;
 728	}
 729
 730	return ret;
 731}
 732
 733static void uprobe_buffer_disable(void)
 734{
 735	int cpu;
 736
 737	BUG_ON(!mutex_is_locked(&event_mutex));
 738
 739	if (--uprobe_buffer_refcnt == 0) {
 740		for_each_possible_cpu(cpu)
 741			free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
 742							     cpu)->buf);
 743
 744		free_percpu(uprobe_cpu_buffer);
 745		uprobe_cpu_buffer = NULL;
 746	}
 747}
 748
 749static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
 750{
 751	struct uprobe_cpu_buffer *ucb;
 752	int cpu;
 753
 754	cpu = raw_smp_processor_id();
 755	ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
 756
 757	/*
 758	 * Use per-cpu buffers for fastest access, but we might migrate
 759	 * so the mutex makes sure we have sole access to it.
 760	 */
 761	mutex_lock(&ucb->mutex);
 762
 763	return ucb;
 764}
 765
 766static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
 767{
 768	mutex_unlock(&ucb->mutex);
 769}
 770
 771static void __uprobe_trace_func(struct trace_uprobe *tu,
 772				unsigned long func, struct pt_regs *regs,
 773				struct uprobe_cpu_buffer *ucb, int dsize,
 774				struct ftrace_event_file *ftrace_file)
 775{
 776	struct uprobe_trace_entry_head *entry;
 777	struct ring_buffer_event *event;
 778	struct ring_buffer *buffer;
 779	void *data;
 780	int size, esize;
 781	struct ftrace_event_call *call = &tu->tp.call;
 782
 783	WARN_ON(call != ftrace_file->event_call);
 784
 785	if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
 786		return;
 787
 788	if (ftrace_trigger_soft_disabled(ftrace_file))
 789		return;
 790
 791	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
 792	size = esize + tu->tp.size + dsize;
 793	event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
 794						call->event.type, size, 0, 0);
 795	if (!event)
 796		return;
 797
 798	entry = ring_buffer_event_data(event);
 799	if (is_ret_probe(tu)) {
 800		entry->vaddr[0] = func;
 801		entry->vaddr[1] = instruction_pointer(regs);
 802		data = DATAOF_TRACE_ENTRY(entry, true);
 803	} else {
 804		entry->vaddr[0] = instruction_pointer(regs);
 805		data = DATAOF_TRACE_ENTRY(entry, false);
 806	}
 807
 808	memcpy(data, ucb->buf, tu->tp.size + dsize);
 809
 810	event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0);
 811}
 812
 813/* uprobe handler */
 814static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
 815			     struct uprobe_cpu_buffer *ucb, int dsize)
 816{
 817	struct event_file_link *link;
 818
 819	if (is_ret_probe(tu))
 820		return 0;
 821
 822	rcu_read_lock();
 823	list_for_each_entry_rcu(link, &tu->tp.files, list)
 824		__uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
 825	rcu_read_unlock();
 826
 827	return 0;
 828}
 829
 830static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
 831				 struct pt_regs *regs,
 832				 struct uprobe_cpu_buffer *ucb, int dsize)
 833{
 834	struct event_file_link *link;
 835
 836	rcu_read_lock();
 837	list_for_each_entry_rcu(link, &tu->tp.files, list)
 838		__uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
 839	rcu_read_unlock();
 840}
 841
 842/* Event entry printers */
 843static enum print_line_t
 844print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
 845{
 846	struct uprobe_trace_entry_head *entry;
 847	struct trace_seq *s = &iter->seq;
 848	struct trace_uprobe *tu;
 849	u8 *data;
 850	int i;
 851
 852	entry = (struct uprobe_trace_entry_head *)iter->ent;
 853	tu = container_of(event, struct trace_uprobe, tp.call.event);
 854
 855	if (is_ret_probe(tu)) {
 856		if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
 857					ftrace_event_name(&tu->tp.call),
 858					entry->vaddr[1], entry->vaddr[0]))
 859			goto partial;
 860		data = DATAOF_TRACE_ENTRY(entry, true);
 861	} else {
 862		if (!trace_seq_printf(s, "%s: (0x%lx)",
 863					ftrace_event_name(&tu->tp.call),
 864					entry->vaddr[0]))
 865			goto partial;
 866		data = DATAOF_TRACE_ENTRY(entry, false);
 867	}
 868
 869	for (i = 0; i < tu->tp.nr_args; i++) {
 870		struct probe_arg *parg = &tu->tp.args[i];
 871
 872		if (!parg->type->print(s, parg->name, data + parg->offset, entry))
 873			goto partial;
 874	}
 875
 876	if (trace_seq_puts(s, "\n"))
 877		return TRACE_TYPE_HANDLED;
 878
 879partial:
 880	return TRACE_TYPE_PARTIAL_LINE;
 881}
 882
 883typedef bool (*filter_func_t)(struct uprobe_consumer *self,
 884				enum uprobe_filter_ctx ctx,
 885				struct mm_struct *mm);
 886
 887static int
 888probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
 889		   filter_func_t filter)
 890{
 891	bool enabled = trace_probe_is_enabled(&tu->tp);
 892	struct event_file_link *link = NULL;
 893	int ret;
 894
 895	if (file) {
 896		link = kmalloc(sizeof(*link), GFP_KERNEL);
 897		if (!link)
 898			return -ENOMEM;
 899
 900		link->file = file;
 901		list_add_tail_rcu(&link->list, &tu->tp.files);
 902
 903		tu->tp.flags |= TP_FLAG_TRACE;
 904	} else
 905		tu->tp.flags |= TP_FLAG_PROFILE;
 906
 907	ret = uprobe_buffer_enable();
 908	if (ret < 0)
 909		return ret;
 910
 911	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 912
 913	if (enabled)
 914		return 0;
 915
 916	tu->consumer.filter = filter;
 917	ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
 918	if (ret) {
 919		if (file) {
 920			list_del(&link->list);
 921			kfree(link);
 922			tu->tp.flags &= ~TP_FLAG_TRACE;
 923		} else
 924			tu->tp.flags &= ~TP_FLAG_PROFILE;
 925	}
 926
 927	return ret;
 928}
 929
 930static void
 931probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file)
 932{
 933	if (!trace_probe_is_enabled(&tu->tp))
 934		return;
 935
 936	if (file) {
 937		struct event_file_link *link;
 938
 939		link = find_event_file_link(&tu->tp, file);
 940		if (!link)
 941			return;
 942
 943		list_del_rcu(&link->list);
 944		/* synchronize with u{,ret}probe_trace_func */
 945		synchronize_sched();
 946		kfree(link);
 947
 948		if (!list_empty(&tu->tp.files))
 949			return;
 950	}
 951
 952	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 953
 954	uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
 955	tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
 956
 957	uprobe_buffer_disable();
 958}
 959
 960static int uprobe_event_define_fields(struct ftrace_event_call *event_call)
 961{
 962	int ret, i, size;
 963	struct uprobe_trace_entry_head field;
 964	struct trace_uprobe *tu = event_call->data;
 965
 966	if (is_ret_probe(tu)) {
 967		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
 968		DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
 969		size = SIZEOF_TRACE_ENTRY(true);
 970	} else {
 971		DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
 972		size = SIZEOF_TRACE_ENTRY(false);
 973	}
 974	/* Set argument names as fields */
 975	for (i = 0; i < tu->tp.nr_args; i++) {
 976		struct probe_arg *parg = &tu->tp.args[i];
 977
 978		ret = trace_define_field(event_call, parg->type->fmttype,
 979					 parg->name, size + parg->offset,
 980					 parg->type->size, parg->type->is_signed,
 981					 FILTER_OTHER);
 982
 983		if (ret)
 984			return ret;
 985	}
 986	return 0;
 987}
 988
 989#ifdef CONFIG_PERF_EVENTS
 990static bool
 991__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
 992{
 993	struct perf_event *event;
 994
 995	if (filter->nr_systemwide)
 996		return true;
 997
 998	list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
 999		if (event->hw.tp_target->mm == mm)
1000			return true;
1001	}
1002
1003	return false;
1004}
1005
1006static inline bool
1007uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
1008{
1009	return __uprobe_perf_filter(&tu->filter, event->hw.tp_target->mm);
1010}
1011
1012static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
1013{
1014	bool done;
1015
1016	write_lock(&tu->filter.rwlock);
1017	if (event->hw.tp_target) {
1018		/*
1019		 * event->parent != NULL means copy_process(), we can avoid
1020		 * uprobe_apply(). current->mm must be probed and we can rely
1021		 * on dup_mmap() which preserves the already installed bp's.
1022		 *
1023		 * attr.enable_on_exec means that exec/mmap will install the
1024		 * breakpoints we need.
1025		 */
1026		done = tu->filter.nr_systemwide ||
1027			event->parent || event->attr.enable_on_exec ||
1028			uprobe_filter_event(tu, event);
1029		list_add(&event->hw.tp_list, &tu->filter.perf_events);
1030	} else {
1031		done = tu->filter.nr_systemwide;
1032		tu->filter.nr_systemwide++;
1033	}
1034	write_unlock(&tu->filter.rwlock);
1035
1036	if (!done)
1037		uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1038
1039	return 0;
1040}
1041
1042static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
1043{
1044	bool done;
1045
1046	write_lock(&tu->filter.rwlock);
1047	if (event->hw.tp_target) {
1048		list_del(&event->hw.tp_list);
1049		done = tu->filter.nr_systemwide ||
1050			(event->hw.tp_target->flags & PF_EXITING) ||
1051			uprobe_filter_event(tu, event);
1052	} else {
1053		tu->filter.nr_systemwide--;
1054		done = tu->filter.nr_systemwide;
1055	}
1056	write_unlock(&tu->filter.rwlock);
1057
1058	if (!done)
1059		uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
1060
1061	return 0;
1062}
1063
1064static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1065				enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1066{
1067	struct trace_uprobe *tu;
1068	int ret;
1069
1070	tu = container_of(uc, struct trace_uprobe, consumer);
1071	read_lock(&tu->filter.rwlock);
1072	ret = __uprobe_perf_filter(&tu->filter, mm);
1073	read_unlock(&tu->filter.rwlock);
1074
1075	return ret;
1076}
1077
1078static void __uprobe_perf_func(struct trace_uprobe *tu,
1079			       unsigned long func, struct pt_regs *regs,
1080			       struct uprobe_cpu_buffer *ucb, int dsize)
1081{
1082	struct ftrace_event_call *call = &tu->tp.call;
1083	struct uprobe_trace_entry_head *entry;
1084	struct hlist_head *head;
1085	void *data;
1086	int size, esize;
1087	int rctx;
1088
1089	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1090
1091	size = esize + tu->tp.size + dsize;
1092	size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1093	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1094		return;
1095
1096	preempt_disable();
1097	head = this_cpu_ptr(call->perf_events);
1098	if (hlist_empty(head))
1099		goto out;
1100
1101	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1102	if (!entry)
1103		goto out;
1104
1105	if (is_ret_probe(tu)) {
1106		entry->vaddr[0] = func;
1107		entry->vaddr[1] = instruction_pointer(regs);
1108		data = DATAOF_TRACE_ENTRY(entry, true);
1109	} else {
1110		entry->vaddr[0] = instruction_pointer(regs);
1111		data = DATAOF_TRACE_ENTRY(entry, false);
1112	}
1113
1114	memcpy(data, ucb->buf, tu->tp.size + dsize);
1115
1116	if (size - esize > tu->tp.size + dsize) {
1117		int len = tu->tp.size + dsize;
1118
1119		memset(data + len, 0, size - esize - len);
1120	}
1121
1122	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1123 out:
1124	preempt_enable();
1125}
1126
1127/* uprobe profile handler */
1128static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1129			    struct uprobe_cpu_buffer *ucb, int dsize)
1130{
1131	if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1132		return UPROBE_HANDLER_REMOVE;
1133
1134	if (!is_ret_probe(tu))
1135		__uprobe_perf_func(tu, 0, regs, ucb, dsize);
1136	return 0;
1137}
1138
1139static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1140				struct pt_regs *regs,
1141				struct uprobe_cpu_buffer *ucb, int dsize)
1142{
1143	__uprobe_perf_func(tu, func, regs, ucb, dsize);
1144}
1145#endif	/* CONFIG_PERF_EVENTS */
1146
1147static int
1148trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type,
1149		      void *data)
1150{
1151	struct trace_uprobe *tu = event->data;
1152	struct ftrace_event_file *file = data;
1153
1154	switch (type) {
1155	case TRACE_REG_REGISTER:
1156		return probe_event_enable(tu, file, NULL);
1157
1158	case TRACE_REG_UNREGISTER:
1159		probe_event_disable(tu, file);
1160		return 0;
1161
1162#ifdef CONFIG_PERF_EVENTS
1163	case TRACE_REG_PERF_REGISTER:
1164		return probe_event_enable(tu, NULL, uprobe_perf_filter);
1165
1166	case TRACE_REG_PERF_UNREGISTER:
1167		probe_event_disable(tu, NULL);
1168		return 0;
1169
1170	case TRACE_REG_PERF_OPEN:
1171		return uprobe_perf_open(tu, data);
1172
1173	case TRACE_REG_PERF_CLOSE:
1174		return uprobe_perf_close(tu, data);
1175
1176#endif
1177	default:
1178		return 0;
1179	}
1180	return 0;
1181}
1182
1183static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1184{
1185	struct trace_uprobe *tu;
1186	struct uprobe_dispatch_data udd;
1187	struct uprobe_cpu_buffer *ucb;
1188	int dsize, esize;
1189	int ret = 0;
1190
1191
1192	tu = container_of(con, struct trace_uprobe, consumer);
1193	tu->nhit++;
1194
1195	udd.tu = tu;
1196	udd.bp_addr = instruction_pointer(regs);
1197
1198	current->utask->vaddr = (unsigned long) &udd;
1199
1200#ifdef CONFIG_PERF_EVENTS
1201	if ((tu->tp.flags & TP_FLAG_TRACE) == 0 &&
1202	    !uprobe_perf_filter(&tu->consumer, 0, current->mm))
1203		return UPROBE_HANDLER_REMOVE;
1204#endif
1205
1206	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1207		return 0;
1208
1209	dsize = __get_data_size(&tu->tp, regs);
1210	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1211
1212	ucb = uprobe_buffer_get();
1213	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1214
1215	if (tu->tp.flags & TP_FLAG_TRACE)
1216		ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1217
1218#ifdef CONFIG_PERF_EVENTS
1219	if (tu->tp.flags & TP_FLAG_PROFILE)
1220		ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1221#endif
1222	uprobe_buffer_put(ucb);
1223	return ret;
1224}
1225
1226static int uretprobe_dispatcher(struct uprobe_consumer *con,
1227				unsigned long func, struct pt_regs *regs)
1228{
1229	struct trace_uprobe *tu;
1230	struct uprobe_dispatch_data udd;
1231	struct uprobe_cpu_buffer *ucb;
1232	int dsize, esize;
1233
1234	tu = container_of(con, struct trace_uprobe, consumer);
1235
1236	udd.tu = tu;
1237	udd.bp_addr = func;
1238
1239	current->utask->vaddr = (unsigned long) &udd;
1240
1241	if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1242		return 0;
1243
1244	dsize = __get_data_size(&tu->tp, regs);
1245	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1246
1247	ucb = uprobe_buffer_get();
1248	store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize);
1249
1250	if (tu->tp.flags & TP_FLAG_TRACE)
1251		uretprobe_trace_func(tu, func, regs, ucb, dsize);
1252
1253#ifdef CONFIG_PERF_EVENTS
1254	if (tu->tp.flags & TP_FLAG_PROFILE)
1255		uretprobe_perf_func(tu, func, regs, ucb, dsize);
1256#endif
1257	uprobe_buffer_put(ucb);
1258	return 0;
1259}
1260
1261static struct trace_event_functions uprobe_funcs = {
1262	.trace		= print_uprobe_event
1263};
1264
1265static int register_uprobe_event(struct trace_uprobe *tu)
1266{
1267	struct ftrace_event_call *call = &tu->tp.call;
1268	int ret;
1269
1270	/* Initialize ftrace_event_call */
1271	INIT_LIST_HEAD(&call->class->fields);
1272	call->event.funcs = &uprobe_funcs;
1273	call->class->define_fields = uprobe_event_define_fields;
1274
1275	if (set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0)
1276		return -ENOMEM;
1277
1278	ret = register_ftrace_event(&call->event);
1279	if (!ret) {
1280		kfree(call->print_fmt);
1281		return -ENODEV;
1282	}
1283	call->flags = 0;
1284	call->class->reg = trace_uprobe_register;
1285	call->data = tu;
1286	ret = trace_add_event_call(call);
1287
1288	if (ret) {
1289		pr_info("Failed to register uprobe event: %s\n",
1290			ftrace_event_name(call));
1291		kfree(call->print_fmt);
1292		unregister_ftrace_event(&call->event);
1293	}
1294
1295	return ret;
1296}
1297
1298static int unregister_uprobe_event(struct trace_uprobe *tu)
1299{
1300	int ret;
1301
1302	/* tu->event is unregistered in trace_remove_event_call() */
1303	ret = trace_remove_event_call(&tu->tp.call);
1304	if (ret)
1305		return ret;
1306	kfree(tu->tp.call.print_fmt);
1307	tu->tp.call.print_fmt = NULL;
1308	return 0;
1309}
1310
1311/* Make a trace interface for controling probe points */
1312static __init int init_uprobe_trace(void)
1313{
1314	struct dentry *d_tracer;
1315
1316	d_tracer = tracing_init_dentry();
1317	if (!d_tracer)
1318		return 0;
1319
1320	trace_create_file("uprobe_events", 0644, d_tracer,
1321				    NULL, &uprobe_events_ops);
1322	/* Profile interface */
1323	trace_create_file("uprobe_profile", 0444, d_tracer,
1324				    NULL, &uprobe_profile_ops);
1325	return 0;
1326}
1327
1328fs_initcall(init_uprobe_trace);