Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Kprobes-based tracing events
   3 *
   4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
 
  19
  20#include <linux/module.h>
  21#include <linux/uaccess.h>
 
 
  22
  23#include "trace_probe.h"
  24
  25#define KPROBE_EVENT_SYSTEM "kprobes"
 
  26
  27/**
  28 * Kprobe event core functions
  29 */
  30struct trace_kprobe {
  31	struct list_head	list;
  32	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
  33	unsigned long __percpu *nhit;
  34	const char		*symbol;	/* symbol name */
  35	struct trace_probe	tp;
  36};
  37
  38#define SIZEOF_TRACE_KPROBE(n)				\
  39	(offsetof(struct trace_kprobe, tp.args) +	\
  40	(sizeof(struct probe_arg) * (n)))
  41
  42
  43static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
  44{
  45	return tk->rp.handler != NULL;
  46}
  47
  48static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  49{
  50	return tk->symbol ? tk->symbol : "unknown";
  51}
  52
  53static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  54{
  55	return tk->rp.kp.offset;
  56}
  57
  58static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
  59{
  60	return !!(kprobe_gone(&tk->rp.kp));
  61}
  62
  63static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
  64						 struct module *mod)
  65{
  66	int len = strlen(mod->name);
  67	const char *name = trace_kprobe_symbol(tk);
  68	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  69}
  70
  71static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
  72{
  73	return !!strchr(trace_kprobe_symbol(tk), ':');
  74}
  75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76static int register_kprobe_event(struct trace_kprobe *tk);
  77static int unregister_kprobe_event(struct trace_kprobe *tk);
  78
  79static DEFINE_MUTEX(probe_lock);
  80static LIST_HEAD(probe_list);
  81
  82static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  83static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  84				struct pt_regs *regs);
  85
  86/* Memory fetching by symbol */
  87struct symbol_cache {
  88	char		*symbol;
  89	long		offset;
  90	unsigned long	addr;
  91};
  92
  93unsigned long update_symbol_cache(struct symbol_cache *sc)
  94{
  95	sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
  96
  97	if (sc->addr)
  98		sc->addr += sc->offset;
  99
 100	return sc->addr;
 101}
 102
 103void free_symbol_cache(struct symbol_cache *sc)
 104{
 105	kfree(sc->symbol);
 106	kfree(sc);
 107}
 108
 109struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
 110{
 111	struct symbol_cache *sc;
 112
 113	if (!sym || strlen(sym) == 0)
 114		return NULL;
 115
 116	sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
 117	if (!sc)
 118		return NULL;
 119
 120	sc->symbol = kstrdup(sym, GFP_KERNEL);
 121	if (!sc->symbol) {
 122		kfree(sc);
 123		return NULL;
 124	}
 125	sc->offset = offset;
 126	update_symbol_cache(sc);
 127
 128	return sc;
 129}
 130
 131/*
 132 * Kprobes-specific fetch functions
 133 */
 134#define DEFINE_FETCH_stack(type)					\
 135static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,		\
 136					  void *offset, void *dest)	\
 137{									\
 138	*(type *)dest = (type)regs_get_kernel_stack_nth(regs,		\
 139				(unsigned int)((unsigned long)offset));	\
 140}									\
 141NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
 142
 143DEFINE_BASIC_FETCH_FUNCS(stack)
 144/* No string on the stack entry */
 145#define fetch_stack_string	NULL
 146#define fetch_stack_string_size	NULL
 147
 148#define DEFINE_FETCH_memory(type)					\
 149static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,		\
 150					  void *addr, void *dest)	\
 151{									\
 152	type retval;							\
 153	if (probe_kernel_address(addr, retval))				\
 154		*(type *)dest = 0;					\
 155	else								\
 156		*(type *)dest = retval;					\
 157}									\
 158NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
 159
 160DEFINE_BASIC_FETCH_FUNCS(memory)
 161/*
 162 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 163 * length and relative data location.
 164 */
 165static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 166					    void *addr, void *dest)
 167{
 168	int maxlen = get_rloc_len(*(u32 *)dest);
 169	u8 *dst = get_rloc_data(dest);
 170	long ret;
 171
 172	if (!maxlen)
 173		return;
 174
 175	/*
 176	 * Try to get string again, since the string can be changed while
 177	 * probing.
 178	 */
 179	ret = strncpy_from_unsafe(dst, addr, maxlen);
 180
 181	if (ret < 0) {	/* Failed to fetch string */
 182		dst[0] = '\0';
 183		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
 184	} else {
 185		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
 186	}
 187}
 188NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
 189
 190/* Return the length of string -- including null terminal byte */
 191static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
 192						 void *addr, void *dest)
 193{
 194	mm_segment_t old_fs;
 195	int ret, len = 0;
 196	u8 c;
 197
 198	old_fs = get_fs();
 199	set_fs(KERNEL_DS);
 200	pagefault_disable();
 201
 202	do {
 203		ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
 204		len++;
 205	} while (c && ret == 0 && len < MAX_STRING_SIZE);
 206
 207	pagefault_enable();
 208	set_fs(old_fs);
 209
 210	if (ret < 0)	/* Failed to check the length */
 211		*(u32 *)dest = 0;
 212	else
 213		*(u32 *)dest = len;
 214}
 215NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
 216
 217#define DEFINE_FETCH_symbol(type)					\
 218void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
 219{									\
 220	struct symbol_cache *sc = data;					\
 221	if (sc->addr)							\
 222		fetch_memory_##type(regs, (void *)sc->addr, dest);	\
 223	else								\
 224		*(type *)dest = 0;					\
 225}									\
 226NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
 227
 228DEFINE_BASIC_FETCH_FUNCS(symbol)
 229DEFINE_FETCH_symbol(string)
 230DEFINE_FETCH_symbol(string_size)
 231
 232/* kprobes don't support file_offset fetch methods */
 233#define fetch_file_offset_u8		NULL
 234#define fetch_file_offset_u16		NULL
 235#define fetch_file_offset_u32		NULL
 236#define fetch_file_offset_u64		NULL
 237#define fetch_file_offset_string	NULL
 238#define fetch_file_offset_string_size	NULL
 239
 240/* Fetch type information table */
 241static const struct fetch_type kprobes_fetch_type_table[] = {
 242	/* Special types */
 243	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
 244					sizeof(u32), 1, "__data_loc char[]"),
 245	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
 246					string_size, sizeof(u32), 0, "u32"),
 247	/* Basic types */
 248	ASSIGN_FETCH_TYPE(u8,  u8,  0),
 249	ASSIGN_FETCH_TYPE(u16, u16, 0),
 250	ASSIGN_FETCH_TYPE(u32, u32, 0),
 251	ASSIGN_FETCH_TYPE(u64, u64, 0),
 252	ASSIGN_FETCH_TYPE(s8,  u8,  1),
 253	ASSIGN_FETCH_TYPE(s16, u16, 1),
 254	ASSIGN_FETCH_TYPE(s32, u32, 1),
 255	ASSIGN_FETCH_TYPE(s64, u64, 1),
 
 
 
 
 256
 257	ASSIGN_FETCH_TYPE_END
 258};
 259
 260/*
 261 * Allocate new trace_probe and initialize it (including kprobes).
 262 */
 263static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 264					     const char *event,
 265					     void *addr,
 266					     const char *symbol,
 267					     unsigned long offs,
 
 268					     int nargs, bool is_return)
 269{
 270	struct trace_kprobe *tk;
 271	int ret = -ENOMEM;
 272
 273	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
 274	if (!tk)
 275		return ERR_PTR(ret);
 276
 277	tk->nhit = alloc_percpu(unsigned long);
 278	if (!tk->nhit)
 279		goto error;
 280
 281	if (symbol) {
 282		tk->symbol = kstrdup(symbol, GFP_KERNEL);
 283		if (!tk->symbol)
 284			goto error;
 285		tk->rp.kp.symbol_name = tk->symbol;
 286		tk->rp.kp.offset = offs;
 287	} else
 288		tk->rp.kp.addr = addr;
 289
 290	if (is_return)
 291		tk->rp.handler = kretprobe_dispatcher;
 292	else
 293		tk->rp.kp.pre_handler = kprobe_dispatcher;
 294
 
 
 295	if (!event || !is_good_name(event)) {
 296		ret = -EINVAL;
 297		goto error;
 298	}
 299
 300	tk->tp.call.class = &tk->tp.class;
 301	tk->tp.call.name = kstrdup(event, GFP_KERNEL);
 302	if (!tk->tp.call.name)
 303		goto error;
 304
 305	if (!group || !is_good_name(group)) {
 306		ret = -EINVAL;
 307		goto error;
 308	}
 309
 310	tk->tp.class.system = kstrdup(group, GFP_KERNEL);
 311	if (!tk->tp.class.system)
 312		goto error;
 313
 314	INIT_LIST_HEAD(&tk->list);
 315	INIT_LIST_HEAD(&tk->tp.files);
 316	return tk;
 317error:
 318	kfree(tk->tp.call.name);
 319	kfree(tk->symbol);
 320	free_percpu(tk->nhit);
 321	kfree(tk);
 322	return ERR_PTR(ret);
 323}
 324
 325static void free_trace_kprobe(struct trace_kprobe *tk)
 326{
 327	int i;
 328
 329	for (i = 0; i < tk->tp.nr_args; i++)
 330		traceprobe_free_probe_arg(&tk->tp.args[i]);
 331
 332	kfree(tk->tp.call.class->system);
 333	kfree(tk->tp.call.name);
 334	kfree(tk->symbol);
 335	free_percpu(tk->nhit);
 336	kfree(tk);
 337}
 338
 339static struct trace_kprobe *find_trace_kprobe(const char *event,
 340					      const char *group)
 341{
 342	struct trace_kprobe *tk;
 343
 344	list_for_each_entry(tk, &probe_list, list)
 345		if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
 346		    strcmp(tk->tp.call.class->system, group) == 0)
 347			return tk;
 348	return NULL;
 349}
 350
 351/*
 352 * Enable trace_probe
 353 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 354 */
 355static int
 356enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 357{
 358	int ret = 0;
 359
 360	if (file) {
 361		struct event_file_link *link;
 362
 363		link = kmalloc(sizeof(*link), GFP_KERNEL);
 364		if (!link) {
 365			ret = -ENOMEM;
 366			goto out;
 367		}
 368
 369		link->file = file;
 370		list_add_tail_rcu(&link->list, &tk->tp.files);
 371
 372		tk->tp.flags |= TP_FLAG_TRACE;
 373	} else
 374		tk->tp.flags |= TP_FLAG_PROFILE;
 375
 376	if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
 377		if (trace_kprobe_is_return(tk))
 378			ret = enable_kretprobe(&tk->rp);
 379		else
 380			ret = enable_kprobe(&tk->rp.kp);
 381	}
 382 out:
 383	return ret;
 384}
 385
 386/*
 387 * Disable trace_probe
 388 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 389 */
 390static int
 391disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 392{
 393	struct event_file_link *link = NULL;
 394	int wait = 0;
 395	int ret = 0;
 396
 397	if (file) {
 398		link = find_event_file_link(&tk->tp, file);
 399		if (!link) {
 400			ret = -EINVAL;
 401			goto out;
 402		}
 403
 404		list_del_rcu(&link->list);
 405		wait = 1;
 406		if (!list_empty(&tk->tp.files))
 407			goto out;
 408
 409		tk->tp.flags &= ~TP_FLAG_TRACE;
 410	} else
 411		tk->tp.flags &= ~TP_FLAG_PROFILE;
 412
 413	if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
 414		if (trace_kprobe_is_return(tk))
 415			disable_kretprobe(&tk->rp);
 416		else
 417			disable_kprobe(&tk->rp.kp);
 418		wait = 1;
 419	}
 
 
 
 
 
 
 
 
 420 out:
 421	if (wait) {
 422		/*
 423		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
 424		 * to ensure disabled (all running handlers are finished).
 425		 * This is not only for kfree(), but also the caller,
 426		 * trace_remove_event_call() supposes it for releasing
 427		 * event_call related objects, which will be accessed in
 428		 * the kprobe_trace_func/kretprobe_trace_func.
 429		 */
 430		synchronize_sched();
 431		kfree(link);	/* Ignored if link == NULL */
 432	}
 433
 434	return ret;
 435}
 436
 437/* Internal register function - just handle k*probes and flags */
 438static int __register_trace_kprobe(struct trace_kprobe *tk)
 439{
 440	int i, ret;
 441
 442	if (trace_probe_is_registered(&tk->tp))
 443		return -EINVAL;
 444
 445	for (i = 0; i < tk->tp.nr_args; i++)
 446		traceprobe_update_arg(&tk->tp.args[i]);
 447
 448	/* Set/clear disabled flag according to tp->flag */
 449	if (trace_probe_is_enabled(&tk->tp))
 450		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 451	else
 452		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 453
 454	if (trace_kprobe_is_return(tk))
 455		ret = register_kretprobe(&tk->rp);
 456	else
 457		ret = register_kprobe(&tk->rp.kp);
 458
 459	if (ret == 0)
 460		tk->tp.flags |= TP_FLAG_REGISTERED;
 461	else {
 462		pr_warn("Could not insert probe at %s+%lu: %d\n",
 463			trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
 464		if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
 465			pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 466			ret = 0;
 467		} else if (ret == -EILSEQ) {
 468			pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
 469				tk->rp.kp.addr);
 470			ret = -EINVAL;
 471		}
 472	}
 473
 474	return ret;
 475}
 476
 477/* Internal unregister function - just handle k*probes and flags */
 478static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 479{
 480	if (trace_probe_is_registered(&tk->tp)) {
 481		if (trace_kprobe_is_return(tk))
 482			unregister_kretprobe(&tk->rp);
 483		else
 484			unregister_kprobe(&tk->rp.kp);
 485		tk->tp.flags &= ~TP_FLAG_REGISTERED;
 486		/* Cleanup kprobe for reuse */
 487		if (tk->rp.kp.symbol_name)
 488			tk->rp.kp.addr = NULL;
 489	}
 490}
 491
 492/* Unregister a trace_probe and probe_event: call with locking probe_lock */
 493static int unregister_trace_kprobe(struct trace_kprobe *tk)
 494{
 495	/* Enabled event can not be unregistered */
 496	if (trace_probe_is_enabled(&tk->tp))
 497		return -EBUSY;
 498
 499	/* Will fail if probe is being used by ftrace or perf */
 500	if (unregister_kprobe_event(tk))
 501		return -EBUSY;
 502
 503	__unregister_trace_kprobe(tk);
 504	list_del(&tk->list);
 505
 506	return 0;
 507}
 508
 509/* Register a trace_probe and probe_event */
 510static int register_trace_kprobe(struct trace_kprobe *tk)
 511{
 512	struct trace_kprobe *old_tk;
 513	int ret;
 514
 515	mutex_lock(&probe_lock);
 516
 517	/* Delete old (same name) event if exist */
 518	old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
 519			tk->tp.call.class->system);
 520	if (old_tk) {
 521		ret = unregister_trace_kprobe(old_tk);
 522		if (ret < 0)
 523			goto end;
 524		free_trace_kprobe(old_tk);
 525	}
 526
 527	/* Register new event */
 528	ret = register_kprobe_event(tk);
 529	if (ret) {
 530		pr_warn("Failed to register probe event(%d)\n", ret);
 531		goto end;
 532	}
 533
 534	/* Register k*probe */
 535	ret = __register_trace_kprobe(tk);
 536	if (ret < 0)
 537		unregister_kprobe_event(tk);
 538	else
 539		list_add_tail(&tk->list, &probe_list);
 540
 541end:
 542	mutex_unlock(&probe_lock);
 543	return ret;
 544}
 545
 546/* Module notifier call back, checking event on the module */
 547static int trace_kprobe_module_callback(struct notifier_block *nb,
 548				       unsigned long val, void *data)
 549{
 550	struct module *mod = data;
 551	struct trace_kprobe *tk;
 552	int ret;
 553
 554	if (val != MODULE_STATE_COMING)
 555		return NOTIFY_DONE;
 556
 557	/* Update probes on coming module */
 558	mutex_lock(&probe_lock);
 559	list_for_each_entry(tk, &probe_list, list) {
 560		if (trace_kprobe_within_module(tk, mod)) {
 561			/* Don't need to check busy - this should have gone. */
 562			__unregister_trace_kprobe(tk);
 563			ret = __register_trace_kprobe(tk);
 564			if (ret)
 565				pr_warn("Failed to re-register probe %s on %s: %d\n",
 566					trace_event_name(&tk->tp.call),
 567					mod->name, ret);
 568		}
 569	}
 570	mutex_unlock(&probe_lock);
 571
 572	return NOTIFY_DONE;
 573}
 574
 575static struct notifier_block trace_kprobe_module_nb = {
 576	.notifier_call = trace_kprobe_module_callback,
 577	.priority = 1	/* Invoked after kprobe module callback */
 578};
 579
 
 
 
 
 
 
 
 
 580static int create_trace_kprobe(int argc, char **argv)
 581{
 582	/*
 583	 * Argument syntax:
 584	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
 585	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
 
 
 586	 * Fetch args:
 587	 *  $retval	: fetch return value
 588	 *  $stack	: fetch stack address
 589	 *  $stackN	: fetch Nth of stack (N:0-)
 
 590	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
 591	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
 592	 *  %REG	: fetch register REG
 593	 * Dereferencing memory fetch:
 594	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
 595	 * Alias name of args:
 596	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
 597	 * Type of args:
 598	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
 599	 */
 600	struct trace_kprobe *tk;
 601	int i, ret = 0;
 602	bool is_return = false, is_delete = false;
 603	char *symbol = NULL, *event = NULL, *group = NULL;
 
 604	char *arg;
 605	unsigned long offset = 0;
 606	void *addr = NULL;
 607	char buf[MAX_EVENT_NAME_LEN];
 608
 609	/* argc must be >= 1 */
 610	if (argv[0][0] == 'p')
 611		is_return = false;
 612	else if (argv[0][0] == 'r')
 613		is_return = true;
 614	else if (argv[0][0] == '-')
 615		is_delete = true;
 616	else {
 617		pr_info("Probe definition must be started with 'p', 'r' or"
 618			" '-'.\n");
 619		return -EINVAL;
 620	}
 621
 622	if (argv[0][1] == ':') {
 623		event = &argv[0][2];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 624		if (strchr(event, '/')) {
 625			group = event;
 626			event = strchr(group, '/') + 1;
 627			event[-1] = '\0';
 628			if (strlen(group) == 0) {
 629				pr_info("Group name is not specified\n");
 630				return -EINVAL;
 631			}
 632		}
 633		if (strlen(event) == 0) {
 634			pr_info("Event name is not specified\n");
 635			return -EINVAL;
 636		}
 637	}
 638	if (!group)
 639		group = KPROBE_EVENT_SYSTEM;
 640
 641	if (is_delete) {
 642		if (!event) {
 643			pr_info("Delete command needs an event name.\n");
 644			return -EINVAL;
 645		}
 646		mutex_lock(&probe_lock);
 647		tk = find_trace_kprobe(event, group);
 648		if (!tk) {
 649			mutex_unlock(&probe_lock);
 650			pr_info("Event %s/%s doesn't exist.\n", group, event);
 651			return -ENOENT;
 652		}
 653		/* delete an event */
 654		ret = unregister_trace_kprobe(tk);
 655		if (ret == 0)
 656			free_trace_kprobe(tk);
 657		mutex_unlock(&probe_lock);
 658		return ret;
 659	}
 660
 661	if (argc < 2) {
 662		pr_info("Probe point is not specified.\n");
 663		return -EINVAL;
 664	}
 665	if (isdigit(argv[1][0])) {
 666		if (is_return) {
 667			pr_info("Return probe point must be a symbol.\n");
 668			return -EINVAL;
 669		}
 670		/* an address specified */
 671		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
 672		if (ret) {
 673			pr_info("Failed to parse address.\n");
 674			return ret;
 675		}
 676	} else {
 677		/* a symbol specified */
 678		symbol = argv[1];
 679		/* TODO: support .init module functions */
 680		ret = traceprobe_split_symbol_offset(symbol, &offset);
 681		if (ret) {
 682			pr_info("Failed to parse symbol.\n");
 683			return ret;
 684		}
 685		if (offset && is_return) {
 686			pr_info("Return probe must be used without offset.\n");
 
 687			return -EINVAL;
 688		}
 689	}
 690	argc -= 2; argv += 2;
 691
 692	/* setup a probe */
 693	if (!event) {
 694		/* Make a new event name */
 695		if (symbol)
 696			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
 697				 is_return ? 'r' : 'p', symbol, offset);
 698		else
 699			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
 700				 is_return ? 'r' : 'p', addr);
 
 701		event = buf;
 702	}
 703	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
 704			       is_return);
 705	if (IS_ERR(tk)) {
 706		pr_info("Failed to allocate trace_probe.(%d)\n",
 707			(int)PTR_ERR(tk));
 708		return PTR_ERR(tk);
 709	}
 710
 711	/* parse arguments */
 712	ret = 0;
 713	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 714		struct probe_arg *parg = &tk->tp.args[i];
 715
 716		/* Increment count for freeing args in error case */
 717		tk->tp.nr_args++;
 718
 719		/* Parse argument name */
 720		arg = strchr(argv[i], '=');
 721		if (arg) {
 722			*arg++ = '\0';
 723			parg->name = kstrdup(argv[i], GFP_KERNEL);
 724		} else {
 725			arg = argv[i];
 726			/* If argument name is omitted, set "argN" */
 727			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
 728			parg->name = kstrdup(buf, GFP_KERNEL);
 729		}
 730
 731		if (!parg->name) {
 732			pr_info("Failed to allocate argument[%d] name.\n", i);
 733			ret = -ENOMEM;
 734			goto error;
 735		}
 736
 737		if (!is_good_name(parg->name)) {
 738			pr_info("Invalid argument[%d] name: %s\n",
 739				i, parg->name);
 740			ret = -EINVAL;
 741			goto error;
 742		}
 743
 744		if (traceprobe_conflict_field_name(parg->name,
 745							tk->tp.args, i)) {
 746			pr_info("Argument[%d] name '%s' conflicts with "
 747				"another field.\n", i, argv[i]);
 748			ret = -EINVAL;
 749			goto error;
 750		}
 751
 752		/* Parse fetch argument */
 753		ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
 754						is_return, true,
 755						kprobes_fetch_type_table);
 756		if (ret) {
 757			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
 758			goto error;
 759		}
 760	}
 761
 762	ret = register_trace_kprobe(tk);
 763	if (ret)
 764		goto error;
 765	return 0;
 766
 767error:
 768	free_trace_kprobe(tk);
 769	return ret;
 770}
 771
 772static int release_all_trace_kprobes(void)
 773{
 774	struct trace_kprobe *tk;
 775	int ret = 0;
 776
 777	mutex_lock(&probe_lock);
 778	/* Ensure no probe is in use. */
 779	list_for_each_entry(tk, &probe_list, list)
 780		if (trace_probe_is_enabled(&tk->tp)) {
 781			ret = -EBUSY;
 782			goto end;
 783		}
 784	/* TODO: Use batch unregistration */
 785	while (!list_empty(&probe_list)) {
 786		tk = list_entry(probe_list.next, struct trace_kprobe, list);
 787		ret = unregister_trace_kprobe(tk);
 788		if (ret)
 789			goto end;
 790		free_trace_kprobe(tk);
 791	}
 792
 793end:
 794	mutex_unlock(&probe_lock);
 795
 796	return ret;
 797}
 798
 799/* Probes listing interfaces */
 800static void *probes_seq_start(struct seq_file *m, loff_t *pos)
 801{
 802	mutex_lock(&probe_lock);
 803	return seq_list_start(&probe_list, *pos);
 804}
 805
 806static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
 807{
 808	return seq_list_next(v, &probe_list, pos);
 809}
 810
 811static void probes_seq_stop(struct seq_file *m, void *v)
 812{
 813	mutex_unlock(&probe_lock);
 814}
 815
 816static int probes_seq_show(struct seq_file *m, void *v)
 817{
 818	struct trace_kprobe *tk = v;
 819	int i;
 820
 821	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
 822	seq_printf(m, ":%s/%s", tk->tp.call.class->system,
 823			trace_event_name(&tk->tp.call));
 824
 825	if (!tk->symbol)
 826		seq_printf(m, " 0x%p", tk->rp.kp.addr);
 827	else if (tk->rp.kp.offset)
 828		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
 829			   tk->rp.kp.offset);
 830	else
 831		seq_printf(m, " %s", trace_kprobe_symbol(tk));
 832
 833	for (i = 0; i < tk->tp.nr_args; i++)
 834		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
 835	seq_putc(m, '\n');
 836
 837	return 0;
 838}
 839
 840static const struct seq_operations probes_seq_op = {
 841	.start  = probes_seq_start,
 842	.next   = probes_seq_next,
 843	.stop   = probes_seq_stop,
 844	.show   = probes_seq_show
 845};
 846
 847static int probes_open(struct inode *inode, struct file *file)
 848{
 849	int ret;
 850
 851	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 852		ret = release_all_trace_kprobes();
 853		if (ret < 0)
 854			return ret;
 855	}
 856
 857	return seq_open(file, &probes_seq_op);
 858}
 859
 860static ssize_t probes_write(struct file *file, const char __user *buffer,
 861			    size_t count, loff_t *ppos)
 862{
 863	return traceprobe_probes_write(file, buffer, count, ppos,
 864			create_trace_kprobe);
 865}
 866
 867static const struct file_operations kprobe_events_ops = {
 868	.owner          = THIS_MODULE,
 869	.open           = probes_open,
 870	.read           = seq_read,
 871	.llseek         = seq_lseek,
 872	.release        = seq_release,
 873	.write		= probes_write,
 874};
 875
 876/* Probes profiling interfaces */
 877static int probes_profile_seq_show(struct seq_file *m, void *v)
 878{
 879	struct trace_kprobe *tk = v;
 880	unsigned long nhit = 0;
 881	int cpu;
 882
 883	for_each_possible_cpu(cpu)
 884		nhit += *per_cpu_ptr(tk->nhit, cpu);
 885
 886	seq_printf(m, "  %-44s %15lu %15lu\n",
 887		   trace_event_name(&tk->tp.call), nhit,
 
 888		   tk->rp.kp.nmissed);
 889
 890	return 0;
 891}
 892
 893static const struct seq_operations profile_seq_op = {
 894	.start  = probes_seq_start,
 895	.next   = probes_seq_next,
 896	.stop   = probes_seq_stop,
 897	.show   = probes_profile_seq_show
 898};
 899
 900static int profile_open(struct inode *inode, struct file *file)
 901{
 902	return seq_open(file, &profile_seq_op);
 903}
 904
 905static const struct file_operations kprobe_profile_ops = {
 906	.owner          = THIS_MODULE,
 907	.open           = profile_open,
 908	.read           = seq_read,
 909	.llseek         = seq_lseek,
 910	.release        = seq_release,
 911};
 912
 913/* Kprobe handler */
 914static nokprobe_inline void
 915__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
 916		    struct trace_event_file *trace_file)
 917{
 918	struct kprobe_trace_entry_head *entry;
 919	struct ring_buffer_event *event;
 920	struct ring_buffer *buffer;
 921	int size, dsize, pc;
 922	unsigned long irq_flags;
 923	struct trace_event_call *call = &tk->tp.call;
 924
 925	WARN_ON(call != trace_file->event_call);
 926
 927	if (trace_trigger_soft_disabled(trace_file))
 928		return;
 929
 930	local_save_flags(irq_flags);
 931	pc = preempt_count();
 932
 933	dsize = __get_data_size(&tk->tp, regs);
 934	size = sizeof(*entry) + tk->tp.size + dsize;
 935
 936	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 937						call->event.type,
 938						size, irq_flags, pc);
 939	if (!event)
 940		return;
 941
 942	entry = ring_buffer_event_data(event);
 943	entry->ip = (unsigned long)tk->rp.kp.addr;
 944	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
 945
 946	event_trigger_unlock_commit_regs(trace_file, buffer, event,
 947					 entry, irq_flags, pc, regs);
 948}
 949
 950static void
 951kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
 952{
 953	struct event_file_link *link;
 954
 955	list_for_each_entry_rcu(link, &tk->tp.files, list)
 956		__kprobe_trace_func(tk, regs, link->file);
 957}
 958NOKPROBE_SYMBOL(kprobe_trace_func);
 959
 960/* Kretprobe handler */
 961static nokprobe_inline void
 962__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
 963		       struct pt_regs *regs,
 964		       struct trace_event_file *trace_file)
 965{
 966	struct kretprobe_trace_entry_head *entry;
 967	struct ring_buffer_event *event;
 968	struct ring_buffer *buffer;
 969	int size, pc, dsize;
 970	unsigned long irq_flags;
 971	struct trace_event_call *call = &tk->tp.call;
 972
 973	WARN_ON(call != trace_file->event_call);
 974
 975	if (trace_trigger_soft_disabled(trace_file))
 976		return;
 977
 978	local_save_flags(irq_flags);
 979	pc = preempt_count();
 980
 981	dsize = __get_data_size(&tk->tp, regs);
 982	size = sizeof(*entry) + tk->tp.size + dsize;
 983
 984	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 985						call->event.type,
 986						size, irq_flags, pc);
 987	if (!event)
 988		return;
 989
 990	entry = ring_buffer_event_data(event);
 991	entry->func = (unsigned long)tk->rp.kp.addr;
 992	entry->ret_ip = (unsigned long)ri->ret_addr;
 993	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
 994
 995	event_trigger_unlock_commit_regs(trace_file, buffer, event,
 996					 entry, irq_flags, pc, regs);
 997}
 998
 999static void
1000kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1001		     struct pt_regs *regs)
1002{
1003	struct event_file_link *link;
1004
1005	list_for_each_entry_rcu(link, &tk->tp.files, list)
1006		__kretprobe_trace_func(tk, ri, regs, link->file);
1007}
1008NOKPROBE_SYMBOL(kretprobe_trace_func);
1009
1010/* Event entry printers */
1011static enum print_line_t
1012print_kprobe_event(struct trace_iterator *iter, int flags,
1013		   struct trace_event *event)
1014{
1015	struct kprobe_trace_entry_head *field;
1016	struct trace_seq *s = &iter->seq;
1017	struct trace_probe *tp;
1018	u8 *data;
1019	int i;
1020
1021	field = (struct kprobe_trace_entry_head *)iter->ent;
1022	tp = container_of(event, struct trace_probe, call.event);
1023
1024	trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1025
1026	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1027		goto out;
1028
1029	trace_seq_putc(s, ')');
1030
1031	data = (u8 *)&field[1];
1032	for (i = 0; i < tp->nr_args; i++)
1033		if (!tp->args[i].type->print(s, tp->args[i].name,
1034					     data + tp->args[i].offset, field))
1035			goto out;
1036
1037	trace_seq_putc(s, '\n');
1038 out:
1039	return trace_handle_return(s);
1040}
1041
1042static enum print_line_t
1043print_kretprobe_event(struct trace_iterator *iter, int flags,
1044		      struct trace_event *event)
1045{
1046	struct kretprobe_trace_entry_head *field;
1047	struct trace_seq *s = &iter->seq;
1048	struct trace_probe *tp;
1049	u8 *data;
1050	int i;
1051
1052	field = (struct kretprobe_trace_entry_head *)iter->ent;
1053	tp = container_of(event, struct trace_probe, call.event);
1054
1055	trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1056
1057	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1058		goto out;
1059
1060	trace_seq_puts(s, " <- ");
1061
1062	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1063		goto out;
1064
1065	trace_seq_putc(s, ')');
1066
1067	data = (u8 *)&field[1];
1068	for (i = 0; i < tp->nr_args; i++)
1069		if (!tp->args[i].type->print(s, tp->args[i].name,
1070					     data + tp->args[i].offset, field))
1071			goto out;
1072
1073	trace_seq_putc(s, '\n');
1074
1075 out:
1076	return trace_handle_return(s);
1077}
1078
1079
1080static int kprobe_event_define_fields(struct trace_event_call *event_call)
1081{
1082	int ret, i;
1083	struct kprobe_trace_entry_head field;
1084	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1085
1086	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1087	/* Set argument names as fields */
1088	for (i = 0; i < tk->tp.nr_args; i++) {
1089		struct probe_arg *parg = &tk->tp.args[i];
1090
1091		ret = trace_define_field(event_call, parg->type->fmttype,
1092					 parg->name,
1093					 sizeof(field) + parg->offset,
1094					 parg->type->size,
1095					 parg->type->is_signed,
1096					 FILTER_OTHER);
1097		if (ret)
1098			return ret;
1099	}
1100	return 0;
1101}
1102
1103static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1104{
1105	int ret, i;
1106	struct kretprobe_trace_entry_head field;
1107	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1108
1109	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1110	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1111	/* Set argument names as fields */
1112	for (i = 0; i < tk->tp.nr_args; i++) {
1113		struct probe_arg *parg = &tk->tp.args[i];
1114
1115		ret = trace_define_field(event_call, parg->type->fmttype,
1116					 parg->name,
1117					 sizeof(field) + parg->offset,
1118					 parg->type->size,
1119					 parg->type->is_signed,
1120					 FILTER_OTHER);
1121		if (ret)
1122			return ret;
1123	}
1124	return 0;
1125}
1126
1127#ifdef CONFIG_PERF_EVENTS
1128
1129/* Kprobe profile handler */
1130static void
1131kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1132{
1133	struct trace_event_call *call = &tk->tp.call;
1134	struct bpf_prog *prog = call->prog;
1135	struct kprobe_trace_entry_head *entry;
1136	struct hlist_head *head;
1137	int size, __size, dsize;
1138	int rctx;
1139
1140	if (prog && !trace_call_bpf(prog, regs))
1141		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1142
1143	head = this_cpu_ptr(call->perf_events);
1144	if (hlist_empty(head))
1145		return;
1146
1147	dsize = __get_data_size(&tk->tp, regs);
1148	__size = sizeof(*entry) + tk->tp.size + dsize;
1149	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1150	size -= sizeof(u32);
1151
1152	entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1153	if (!entry)
1154		return;
1155
1156	entry->ip = (unsigned long)tk->rp.kp.addr;
1157	memset(&entry[1], 0, dsize);
1158	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1159	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
 
 
1160}
1161NOKPROBE_SYMBOL(kprobe_perf_func);
1162
1163/* Kretprobe profile handler */
1164static void
1165kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1166		    struct pt_regs *regs)
1167{
1168	struct trace_event_call *call = &tk->tp.call;
1169	struct bpf_prog *prog = call->prog;
1170	struct kretprobe_trace_entry_head *entry;
1171	struct hlist_head *head;
1172	int size, __size, dsize;
1173	int rctx;
1174
1175	if (prog && !trace_call_bpf(prog, regs))
1176		return;
1177
1178	head = this_cpu_ptr(call->perf_events);
1179	if (hlist_empty(head))
1180		return;
1181
1182	dsize = __get_data_size(&tk->tp, regs);
1183	__size = sizeof(*entry) + tk->tp.size + dsize;
1184	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1185	size -= sizeof(u32);
1186
1187	entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1188	if (!entry)
1189		return;
1190
1191	entry->func = (unsigned long)tk->rp.kp.addr;
1192	entry->ret_ip = (unsigned long)ri->ret_addr;
1193	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1194	perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
 
1195}
1196NOKPROBE_SYMBOL(kretprobe_perf_func);
1197#endif	/* CONFIG_PERF_EVENTS */
1198
1199/*
1200 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1201 *
1202 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1203 * lockless, but we can't race with this __init function.
1204 */
1205static int kprobe_register(struct trace_event_call *event,
1206			   enum trace_reg type, void *data)
1207{
1208	struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1209	struct trace_event_file *file = data;
1210
1211	switch (type) {
1212	case TRACE_REG_REGISTER:
1213		return enable_trace_kprobe(tk, file);
1214	case TRACE_REG_UNREGISTER:
1215		return disable_trace_kprobe(tk, file);
1216
1217#ifdef CONFIG_PERF_EVENTS
1218	case TRACE_REG_PERF_REGISTER:
1219		return enable_trace_kprobe(tk, NULL);
1220	case TRACE_REG_PERF_UNREGISTER:
1221		return disable_trace_kprobe(tk, NULL);
1222	case TRACE_REG_PERF_OPEN:
1223	case TRACE_REG_PERF_CLOSE:
1224	case TRACE_REG_PERF_ADD:
1225	case TRACE_REG_PERF_DEL:
1226		return 0;
1227#endif
1228	}
1229	return 0;
1230}
1231
1232static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1233{
1234	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
 
1235
1236	raw_cpu_inc(*tk->nhit);
1237
1238	if (tk->tp.flags & TP_FLAG_TRACE)
1239		kprobe_trace_func(tk, regs);
1240#ifdef CONFIG_PERF_EVENTS
1241	if (tk->tp.flags & TP_FLAG_PROFILE)
1242		kprobe_perf_func(tk, regs);
1243#endif
1244	return 0;	/* We don't tweek kernel, so just return 0 */
1245}
1246NOKPROBE_SYMBOL(kprobe_dispatcher);
1247
1248static int
1249kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1250{
1251	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1252
1253	raw_cpu_inc(*tk->nhit);
1254
1255	if (tk->tp.flags & TP_FLAG_TRACE)
1256		kretprobe_trace_func(tk, ri, regs);
1257#ifdef CONFIG_PERF_EVENTS
1258	if (tk->tp.flags & TP_FLAG_PROFILE)
1259		kretprobe_perf_func(tk, ri, regs);
1260#endif
1261	return 0;	/* We don't tweek kernel, so just return 0 */
1262}
1263NOKPROBE_SYMBOL(kretprobe_dispatcher);
1264
1265static struct trace_event_functions kretprobe_funcs = {
1266	.trace		= print_kretprobe_event
1267};
1268
1269static struct trace_event_functions kprobe_funcs = {
1270	.trace		= print_kprobe_event
1271};
1272
1273static int register_kprobe_event(struct trace_kprobe *tk)
 
1274{
1275	struct trace_event_call *call = &tk->tp.call;
1276	int ret;
1277
1278	/* Initialize trace_event_call */
1279	INIT_LIST_HEAD(&call->class->fields);
1280	if (trace_kprobe_is_return(tk)) {
1281		call->event.funcs = &kretprobe_funcs;
1282		call->class->define_fields = kretprobe_event_define_fields;
1283	} else {
1284		call->event.funcs = &kprobe_funcs;
1285		call->class->define_fields = kprobe_event_define_fields;
1286	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1287	if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1288		return -ENOMEM;
1289	ret = register_trace_event(&call->event);
1290	if (!ret) {
1291		kfree(call->print_fmt);
1292		return -ENODEV;
1293	}
1294	call->flags = TRACE_EVENT_FL_KPROBE;
1295	call->class->reg = kprobe_register;
1296	call->data = tk;
1297	ret = trace_add_event_call(call);
1298	if (ret) {
1299		pr_info("Failed to register kprobe event: %s\n",
1300			trace_event_name(call));
1301		kfree(call->print_fmt);
1302		unregister_trace_event(&call->event);
1303	}
1304	return ret;
1305}
1306
1307static int unregister_kprobe_event(struct trace_kprobe *tk)
1308{
1309	int ret;
1310
1311	/* tp->event is unregistered in trace_remove_event_call() */
1312	ret = trace_remove_event_call(&tk->tp.call);
1313	if (!ret)
1314		kfree(tk->tp.call.print_fmt);
1315	return ret;
1316}
1317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1318/* Make a tracefs interface for controlling probe points */
1319static __init int init_kprobe_trace(void)
1320{
1321	struct dentry *d_tracer;
1322	struct dentry *entry;
1323
1324	if (register_module_notifier(&trace_kprobe_module_nb))
1325		return -EINVAL;
1326
1327	d_tracer = tracing_init_dentry();
1328	if (IS_ERR(d_tracer))
1329		return 0;
1330
1331	entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1332				    NULL, &kprobe_events_ops);
1333
1334	/* Event list interface */
1335	if (!entry)
1336		pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1337
1338	/* Profile interface */
1339	entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1340				    NULL, &kprobe_profile_ops);
1341
1342	if (!entry)
1343		pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1344	return 0;
1345}
1346fs_initcall(init_kprobe_trace);
1347
1348
1349#ifdef CONFIG_FTRACE_STARTUP_TEST
1350
1351/*
1352 * The "__used" keeps gcc from removing the function symbol
1353 * from the kallsyms table.
 
1354 */
1355static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1356					       int a4, int a5, int a6)
1357{
1358	return a1 + a2 + a3 + a4 + a5 + a6;
1359}
1360
1361static struct trace_event_file *
1362find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1363{
1364	struct trace_event_file *file;
1365
1366	list_for_each_entry(file, &tr->events, list)
1367		if (file->event_call == &tk->tp.call)
1368			return file;
1369
1370	return NULL;
1371}
1372
1373/*
1374 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1375 * stage, we can do this lockless.
1376 */
1377static __init int kprobe_trace_self_tests_init(void)
1378{
1379	int ret, warn = 0;
1380	int (*target)(int, int, int, int, int, int);
1381	struct trace_kprobe *tk;
1382	struct trace_event_file *file;
1383
1384	if (tracing_is_disabled())
1385		return -ENODEV;
1386
1387	target = kprobe_trace_selftest_target;
1388
1389	pr_info("Testing kprobe tracing: ");
1390
1391	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1392				  "$stack $stack0 +0($stack)",
1393				  create_trace_kprobe);
1394	if (WARN_ON_ONCE(ret)) {
1395		pr_warn("error on probing function entry.\n");
1396		warn++;
1397	} else {
1398		/* Enable trace point */
1399		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1400		if (WARN_ON_ONCE(tk == NULL)) {
1401			pr_warn("error on getting new probe.\n");
1402			warn++;
1403		} else {
1404			file = find_trace_probe_file(tk, top_trace_array());
1405			if (WARN_ON_ONCE(file == NULL)) {
1406				pr_warn("error on getting probe file.\n");
1407				warn++;
1408			} else
1409				enable_trace_kprobe(tk, file);
1410		}
1411	}
1412
1413	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1414				  "$retval", create_trace_kprobe);
1415	if (WARN_ON_ONCE(ret)) {
1416		pr_warn("error on probing function return.\n");
1417		warn++;
1418	} else {
1419		/* Enable trace point */
1420		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1421		if (WARN_ON_ONCE(tk == NULL)) {
1422			pr_warn("error on getting 2nd new probe.\n");
1423			warn++;
1424		} else {
1425			file = find_trace_probe_file(tk, top_trace_array());
1426			if (WARN_ON_ONCE(file == NULL)) {
1427				pr_warn("error on getting probe file.\n");
1428				warn++;
1429			} else
1430				enable_trace_kprobe(tk, file);
1431		}
1432	}
1433
1434	if (warn)
1435		goto end;
1436
1437	ret = target(1, 2, 3, 4, 5, 6);
1438
 
 
 
 
 
 
 
 
1439	/* Disable trace points before removing it */
1440	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1441	if (WARN_ON_ONCE(tk == NULL)) {
1442		pr_warn("error on getting test probe.\n");
1443		warn++;
1444	} else {
 
 
 
 
 
1445		file = find_trace_probe_file(tk, top_trace_array());
1446		if (WARN_ON_ONCE(file == NULL)) {
1447			pr_warn("error on getting probe file.\n");
1448			warn++;
1449		} else
1450			disable_trace_kprobe(tk, file);
1451	}
1452
1453	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1454	if (WARN_ON_ONCE(tk == NULL)) {
1455		pr_warn("error on getting 2nd test probe.\n");
1456		warn++;
1457	} else {
 
 
 
 
 
1458		file = find_trace_probe_file(tk, top_trace_array());
1459		if (WARN_ON_ONCE(file == NULL)) {
1460			pr_warn("error on getting probe file.\n");
1461			warn++;
1462		} else
1463			disable_trace_kprobe(tk, file);
1464	}
1465
1466	ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1467	if (WARN_ON_ONCE(ret)) {
1468		pr_warn("error on deleting a probe.\n");
1469		warn++;
1470	}
1471
1472	ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1473	if (WARN_ON_ONCE(ret)) {
1474		pr_warn("error on deleting a probe.\n");
1475		warn++;
1476	}
1477
1478end:
1479	release_all_trace_kprobes();
 
 
 
 
 
1480	if (warn)
1481		pr_cont("NG: Some tests are failed. Please check them.\n");
1482	else
1483		pr_cont("OK\n");
1484	return 0;
1485}
1486
1487late_initcall(kprobe_trace_self_tests_init);
1488
1489#endif
v4.17
   1/*
   2 * Kprobes-based tracing events
   3 *
   4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19#define pr_fmt(fmt)	"trace_kprobe: " fmt
  20
  21#include <linux/module.h>
  22#include <linux/uaccess.h>
  23#include <linux/rculist.h>
  24#include <linux/error-injection.h>
  25
  26#include "trace_probe.h"
  27
  28#define KPROBE_EVENT_SYSTEM "kprobes"
  29#define KRETPROBE_MAXACTIVE_MAX 4096
  30
  31/**
  32 * Kprobe event core functions
  33 */
  34struct trace_kprobe {
  35	struct list_head	list;
  36	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
  37	unsigned long __percpu *nhit;
  38	const char		*symbol;	/* symbol name */
  39	struct trace_probe	tp;
  40};
  41
  42#define SIZEOF_TRACE_KPROBE(n)				\
  43	(offsetof(struct trace_kprobe, tp.args) +	\
  44	(sizeof(struct probe_arg) * (n)))
  45
 
  46static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
  47{
  48	return tk->rp.handler != NULL;
  49}
  50
  51static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  52{
  53	return tk->symbol ? tk->symbol : "unknown";
  54}
  55
  56static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  57{
  58	return tk->rp.kp.offset;
  59}
  60
  61static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
  62{
  63	return !!(kprobe_gone(&tk->rp.kp));
  64}
  65
  66static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
  67						 struct module *mod)
  68{
  69	int len = strlen(mod->name);
  70	const char *name = trace_kprobe_symbol(tk);
  71	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  72}
  73
  74static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
  75{
  76	return !!strchr(trace_kprobe_symbol(tk), ':');
  77}
  78
  79static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
  80{
  81	unsigned long nhit = 0;
  82	int cpu;
  83
  84	for_each_possible_cpu(cpu)
  85		nhit += *per_cpu_ptr(tk->nhit, cpu);
  86
  87	return nhit;
  88}
  89
  90bool trace_kprobe_on_func_entry(struct trace_event_call *call)
  91{
  92	struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
  93
  94	return kprobe_on_func_entry(tk->rp.kp.addr,
  95			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
  96			tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
  97}
  98
  99bool trace_kprobe_error_injectable(struct trace_event_call *call)
 100{
 101	struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
 102	unsigned long addr;
 103
 104	if (tk->symbol) {
 105		addr = (unsigned long)
 106			kallsyms_lookup_name(trace_kprobe_symbol(tk));
 107		addr += tk->rp.kp.offset;
 108	} else {
 109		addr = (unsigned long)tk->rp.kp.addr;
 110	}
 111	return within_error_injection_list(addr);
 112}
 113
 114static int register_kprobe_event(struct trace_kprobe *tk);
 115static int unregister_kprobe_event(struct trace_kprobe *tk);
 116
 117static DEFINE_MUTEX(probe_lock);
 118static LIST_HEAD(probe_list);
 119
 120static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 121static int kretprobe_dispatcher(struct kretprobe_instance *ri,
 122				struct pt_regs *regs);
 123
 124/* Memory fetching by symbol */
 125struct symbol_cache {
 126	char		*symbol;
 127	long		offset;
 128	unsigned long	addr;
 129};
 130
 131unsigned long update_symbol_cache(struct symbol_cache *sc)
 132{
 133	sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
 134
 135	if (sc->addr)
 136		sc->addr += sc->offset;
 137
 138	return sc->addr;
 139}
 140
 141void free_symbol_cache(struct symbol_cache *sc)
 142{
 143	kfree(sc->symbol);
 144	kfree(sc);
 145}
 146
 147struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
 148{
 149	struct symbol_cache *sc;
 150
 151	if (!sym || strlen(sym) == 0)
 152		return NULL;
 153
 154	sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
 155	if (!sc)
 156		return NULL;
 157
 158	sc->symbol = kstrdup(sym, GFP_KERNEL);
 159	if (!sc->symbol) {
 160		kfree(sc);
 161		return NULL;
 162	}
 163	sc->offset = offset;
 164	update_symbol_cache(sc);
 165
 166	return sc;
 167}
 168
 169/*
 170 * Kprobes-specific fetch functions
 171 */
 172#define DEFINE_FETCH_stack(type)					\
 173static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,		\
 174					  void *offset, void *dest)	\
 175{									\
 176	*(type *)dest = (type)regs_get_kernel_stack_nth(regs,		\
 177				(unsigned int)((unsigned long)offset));	\
 178}									\
 179NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
 180
 181DEFINE_BASIC_FETCH_FUNCS(stack)
 182/* No string on the stack entry */
 183#define fetch_stack_string	NULL
 184#define fetch_stack_string_size	NULL
 185
 186#define DEFINE_FETCH_memory(type)					\
 187static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,		\
 188					  void *addr, void *dest)	\
 189{									\
 190	type retval;							\
 191	if (probe_kernel_address(addr, retval))				\
 192		*(type *)dest = 0;					\
 193	else								\
 194		*(type *)dest = retval;					\
 195}									\
 196NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
 197
 198DEFINE_BASIC_FETCH_FUNCS(memory)
 199/*
 200 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 201 * length and relative data location.
 202 */
 203static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 204					    void *addr, void *dest)
 205{
 206	int maxlen = get_rloc_len(*(u32 *)dest);
 207	u8 *dst = get_rloc_data(dest);
 208	long ret;
 209
 210	if (!maxlen)
 211		return;
 212
 213	/*
 214	 * Try to get string again, since the string can be changed while
 215	 * probing.
 216	 */
 217	ret = strncpy_from_unsafe(dst, addr, maxlen);
 218
 219	if (ret < 0) {	/* Failed to fetch string */
 220		dst[0] = '\0';
 221		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
 222	} else {
 223		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
 224	}
 225}
 226NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
 227
 228/* Return the length of string -- including null terminal byte */
 229static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
 230						 void *addr, void *dest)
 231{
 232	mm_segment_t old_fs;
 233	int ret, len = 0;
 234	u8 c;
 235
 236	old_fs = get_fs();
 237	set_fs(KERNEL_DS);
 238	pagefault_disable();
 239
 240	do {
 241		ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
 242		len++;
 243	} while (c && ret == 0 && len < MAX_STRING_SIZE);
 244
 245	pagefault_enable();
 246	set_fs(old_fs);
 247
 248	if (ret < 0)	/* Failed to check the length */
 249		*(u32 *)dest = 0;
 250	else
 251		*(u32 *)dest = len;
 252}
 253NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
 254
 255#define DEFINE_FETCH_symbol(type)					\
 256void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
 257{									\
 258	struct symbol_cache *sc = data;					\
 259	if (sc->addr)							\
 260		fetch_memory_##type(regs, (void *)sc->addr, dest);	\
 261	else								\
 262		*(type *)dest = 0;					\
 263}									\
 264NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
 265
 266DEFINE_BASIC_FETCH_FUNCS(symbol)
 267DEFINE_FETCH_symbol(string)
 268DEFINE_FETCH_symbol(string_size)
 269
 270/* kprobes don't support file_offset fetch methods */
 271#define fetch_file_offset_u8		NULL
 272#define fetch_file_offset_u16		NULL
 273#define fetch_file_offset_u32		NULL
 274#define fetch_file_offset_u64		NULL
 275#define fetch_file_offset_string	NULL
 276#define fetch_file_offset_string_size	NULL
 277
 278/* Fetch type information table */
 279static const struct fetch_type kprobes_fetch_type_table[] = {
 280	/* Special types */
 281	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
 282					sizeof(u32), 1, "__data_loc char[]"),
 283	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
 284					string_size, sizeof(u32), 0, "u32"),
 285	/* Basic types */
 286	ASSIGN_FETCH_TYPE(u8,  u8,  0),
 287	ASSIGN_FETCH_TYPE(u16, u16, 0),
 288	ASSIGN_FETCH_TYPE(u32, u32, 0),
 289	ASSIGN_FETCH_TYPE(u64, u64, 0),
 290	ASSIGN_FETCH_TYPE(s8,  u8,  1),
 291	ASSIGN_FETCH_TYPE(s16, u16, 1),
 292	ASSIGN_FETCH_TYPE(s32, u32, 1),
 293	ASSIGN_FETCH_TYPE(s64, u64, 1),
 294	ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
 295	ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
 296	ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
 297	ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
 298
 299	ASSIGN_FETCH_TYPE_END
 300};
 301
 302/*
 303 * Allocate new trace_probe and initialize it (including kprobes).
 304 */
 305static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 306					     const char *event,
 307					     void *addr,
 308					     const char *symbol,
 309					     unsigned long offs,
 310					     int maxactive,
 311					     int nargs, bool is_return)
 312{
 313	struct trace_kprobe *tk;
 314	int ret = -ENOMEM;
 315
 316	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
 317	if (!tk)
 318		return ERR_PTR(ret);
 319
 320	tk->nhit = alloc_percpu(unsigned long);
 321	if (!tk->nhit)
 322		goto error;
 323
 324	if (symbol) {
 325		tk->symbol = kstrdup(symbol, GFP_KERNEL);
 326		if (!tk->symbol)
 327			goto error;
 328		tk->rp.kp.symbol_name = tk->symbol;
 329		tk->rp.kp.offset = offs;
 330	} else
 331		tk->rp.kp.addr = addr;
 332
 333	if (is_return)
 334		tk->rp.handler = kretprobe_dispatcher;
 335	else
 336		tk->rp.kp.pre_handler = kprobe_dispatcher;
 337
 338	tk->rp.maxactive = maxactive;
 339
 340	if (!event || !is_good_name(event)) {
 341		ret = -EINVAL;
 342		goto error;
 343	}
 344
 345	tk->tp.call.class = &tk->tp.class;
 346	tk->tp.call.name = kstrdup(event, GFP_KERNEL);
 347	if (!tk->tp.call.name)
 348		goto error;
 349
 350	if (!group || !is_good_name(group)) {
 351		ret = -EINVAL;
 352		goto error;
 353	}
 354
 355	tk->tp.class.system = kstrdup(group, GFP_KERNEL);
 356	if (!tk->tp.class.system)
 357		goto error;
 358
 359	INIT_LIST_HEAD(&tk->list);
 360	INIT_LIST_HEAD(&tk->tp.files);
 361	return tk;
 362error:
 363	kfree(tk->tp.call.name);
 364	kfree(tk->symbol);
 365	free_percpu(tk->nhit);
 366	kfree(tk);
 367	return ERR_PTR(ret);
 368}
 369
 370static void free_trace_kprobe(struct trace_kprobe *tk)
 371{
 372	int i;
 373
 374	for (i = 0; i < tk->tp.nr_args; i++)
 375		traceprobe_free_probe_arg(&tk->tp.args[i]);
 376
 377	kfree(tk->tp.call.class->system);
 378	kfree(tk->tp.call.name);
 379	kfree(tk->symbol);
 380	free_percpu(tk->nhit);
 381	kfree(tk);
 382}
 383
 384static struct trace_kprobe *find_trace_kprobe(const char *event,
 385					      const char *group)
 386{
 387	struct trace_kprobe *tk;
 388
 389	list_for_each_entry(tk, &probe_list, list)
 390		if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
 391		    strcmp(tk->tp.call.class->system, group) == 0)
 392			return tk;
 393	return NULL;
 394}
 395
 396/*
 397 * Enable trace_probe
 398 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 399 */
 400static int
 401enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 402{
 403	int ret = 0;
 404
 405	if (file) {
 406		struct event_file_link *link;
 407
 408		link = kmalloc(sizeof(*link), GFP_KERNEL);
 409		if (!link) {
 410			ret = -ENOMEM;
 411			goto out;
 412		}
 413
 414		link->file = file;
 415		list_add_tail_rcu(&link->list, &tk->tp.files);
 416
 417		tk->tp.flags |= TP_FLAG_TRACE;
 418	} else
 419		tk->tp.flags |= TP_FLAG_PROFILE;
 420
 421	if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
 422		if (trace_kprobe_is_return(tk))
 423			ret = enable_kretprobe(&tk->rp);
 424		else
 425			ret = enable_kprobe(&tk->rp.kp);
 426	}
 427 out:
 428	return ret;
 429}
 430
 431/*
 432 * Disable trace_probe
 433 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 434 */
 435static int
 436disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 437{
 438	struct event_file_link *link = NULL;
 439	int wait = 0;
 440	int ret = 0;
 441
 442	if (file) {
 443		link = find_event_file_link(&tk->tp, file);
 444		if (!link) {
 445			ret = -EINVAL;
 446			goto out;
 447		}
 448
 449		list_del_rcu(&link->list);
 450		wait = 1;
 451		if (!list_empty(&tk->tp.files))
 452			goto out;
 453
 454		tk->tp.flags &= ~TP_FLAG_TRACE;
 455	} else
 456		tk->tp.flags &= ~TP_FLAG_PROFILE;
 457
 458	if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
 459		if (trace_kprobe_is_return(tk))
 460			disable_kretprobe(&tk->rp);
 461		else
 462			disable_kprobe(&tk->rp.kp);
 463		wait = 1;
 464	}
 465
 466	/*
 467	 * if tk is not added to any list, it must be a local trace_kprobe
 468	 * created with perf_event_open. We don't need to wait for these
 469	 * trace_kprobes
 470	 */
 471	if (list_empty(&tk->list))
 472		wait = 0;
 473 out:
 474	if (wait) {
 475		/*
 476		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
 477		 * to ensure disabled (all running handlers are finished).
 478		 * This is not only for kfree(), but also the caller,
 479		 * trace_remove_event_call() supposes it for releasing
 480		 * event_call related objects, which will be accessed in
 481		 * the kprobe_trace_func/kretprobe_trace_func.
 482		 */
 483		synchronize_sched();
 484		kfree(link);	/* Ignored if link == NULL */
 485	}
 486
 487	return ret;
 488}
 489
 490/* Internal register function - just handle k*probes and flags */
 491static int __register_trace_kprobe(struct trace_kprobe *tk)
 492{
 493	int i, ret;
 494
 495	if (trace_probe_is_registered(&tk->tp))
 496		return -EINVAL;
 497
 498	for (i = 0; i < tk->tp.nr_args; i++)
 499		traceprobe_update_arg(&tk->tp.args[i]);
 500
 501	/* Set/clear disabled flag according to tp->flag */
 502	if (trace_probe_is_enabled(&tk->tp))
 503		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 504	else
 505		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 506
 507	if (trace_kprobe_is_return(tk))
 508		ret = register_kretprobe(&tk->rp);
 509	else
 510		ret = register_kprobe(&tk->rp.kp);
 511
 512	if (ret == 0)
 513		tk->tp.flags |= TP_FLAG_REGISTERED;
 514	else {
 
 
 515		if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
 516			pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 517			ret = 0;
 518		} else if (ret == -EILSEQ) {
 519			pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
 520				tk->rp.kp.addr);
 521			ret = -EINVAL;
 522		}
 523	}
 524
 525	return ret;
 526}
 527
 528/* Internal unregister function - just handle k*probes and flags */
 529static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 530{
 531	if (trace_probe_is_registered(&tk->tp)) {
 532		if (trace_kprobe_is_return(tk))
 533			unregister_kretprobe(&tk->rp);
 534		else
 535			unregister_kprobe(&tk->rp.kp);
 536		tk->tp.flags &= ~TP_FLAG_REGISTERED;
 537		/* Cleanup kprobe for reuse */
 538		if (tk->rp.kp.symbol_name)
 539			tk->rp.kp.addr = NULL;
 540	}
 541}
 542
 543/* Unregister a trace_probe and probe_event: call with locking probe_lock */
 544static int unregister_trace_kprobe(struct trace_kprobe *tk)
 545{
 546	/* Enabled event can not be unregistered */
 547	if (trace_probe_is_enabled(&tk->tp))
 548		return -EBUSY;
 549
 550	/* Will fail if probe is being used by ftrace or perf */
 551	if (unregister_kprobe_event(tk))
 552		return -EBUSY;
 553
 554	__unregister_trace_kprobe(tk);
 555	list_del(&tk->list);
 556
 557	return 0;
 558}
 559
 560/* Register a trace_probe and probe_event */
 561static int register_trace_kprobe(struct trace_kprobe *tk)
 562{
 563	struct trace_kprobe *old_tk;
 564	int ret;
 565
 566	mutex_lock(&probe_lock);
 567
 568	/* Delete old (same name) event if exist */
 569	old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
 570			tk->tp.call.class->system);
 571	if (old_tk) {
 572		ret = unregister_trace_kprobe(old_tk);
 573		if (ret < 0)
 574			goto end;
 575		free_trace_kprobe(old_tk);
 576	}
 577
 578	/* Register new event */
 579	ret = register_kprobe_event(tk);
 580	if (ret) {
 581		pr_warn("Failed to register probe event(%d)\n", ret);
 582		goto end;
 583	}
 584
 585	/* Register k*probe */
 586	ret = __register_trace_kprobe(tk);
 587	if (ret < 0)
 588		unregister_kprobe_event(tk);
 589	else
 590		list_add_tail(&tk->list, &probe_list);
 591
 592end:
 593	mutex_unlock(&probe_lock);
 594	return ret;
 595}
 596
 597/* Module notifier call back, checking event on the module */
 598static int trace_kprobe_module_callback(struct notifier_block *nb,
 599				       unsigned long val, void *data)
 600{
 601	struct module *mod = data;
 602	struct trace_kprobe *tk;
 603	int ret;
 604
 605	if (val != MODULE_STATE_COMING)
 606		return NOTIFY_DONE;
 607
 608	/* Update probes on coming module */
 609	mutex_lock(&probe_lock);
 610	list_for_each_entry(tk, &probe_list, list) {
 611		if (trace_kprobe_within_module(tk, mod)) {
 612			/* Don't need to check busy - this should have gone. */
 613			__unregister_trace_kprobe(tk);
 614			ret = __register_trace_kprobe(tk);
 615			if (ret)
 616				pr_warn("Failed to re-register probe %s on %s: %d\n",
 617					trace_event_name(&tk->tp.call),
 618					mod->name, ret);
 619		}
 620	}
 621	mutex_unlock(&probe_lock);
 622
 623	return NOTIFY_DONE;
 624}
 625
 626static struct notifier_block trace_kprobe_module_nb = {
 627	.notifier_call = trace_kprobe_module_callback,
 628	.priority = 1	/* Invoked after kprobe module callback */
 629};
 630
 631/* Convert certain expected symbols into '_' when generating event names */
 632static inline void sanitize_event_name(char *name)
 633{
 634	while (*name++ != '\0')
 635		if (*name == ':' || *name == '.')
 636			*name = '_';
 637}
 638
 639static int create_trace_kprobe(int argc, char **argv)
 640{
 641	/*
 642	 * Argument syntax:
 643	 *  - Add kprobe:
 644	 *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
 645	 *  - Add kretprobe:
 646	 *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
 647	 * Fetch args:
 648	 *  $retval	: fetch return value
 649	 *  $stack	: fetch stack address
 650	 *  $stackN	: fetch Nth of stack (N:0-)
 651	 *  $comm       : fetch current task comm
 652	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
 653	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
 654	 *  %REG	: fetch register REG
 655	 * Dereferencing memory fetch:
 656	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
 657	 * Alias name of args:
 658	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
 659	 * Type of args:
 660	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
 661	 */
 662	struct trace_kprobe *tk;
 663	int i, ret = 0;
 664	bool is_return = false, is_delete = false;
 665	char *symbol = NULL, *event = NULL, *group = NULL;
 666	int maxactive = 0;
 667	char *arg;
 668	long offset = 0;
 669	void *addr = NULL;
 670	char buf[MAX_EVENT_NAME_LEN];
 671
 672	/* argc must be >= 1 */
 673	if (argv[0][0] == 'p')
 674		is_return = false;
 675	else if (argv[0][0] == 'r')
 676		is_return = true;
 677	else if (argv[0][0] == '-')
 678		is_delete = true;
 679	else {
 680		pr_info("Probe definition must be started with 'p', 'r' or"
 681			" '-'.\n");
 682		return -EINVAL;
 683	}
 684
 685	event = strchr(&argv[0][1], ':');
 686	if (event) {
 687		event[0] = '\0';
 688		event++;
 689	}
 690	if (is_return && isdigit(argv[0][1])) {
 691		ret = kstrtouint(&argv[0][1], 0, &maxactive);
 692		if (ret) {
 693			pr_info("Failed to parse maxactive.\n");
 694			return ret;
 695		}
 696		/* kretprobes instances are iterated over via a list. The
 697		 * maximum should stay reasonable.
 698		 */
 699		if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
 700			pr_info("Maxactive is too big (%d > %d).\n",
 701				maxactive, KRETPROBE_MAXACTIVE_MAX);
 702			return -E2BIG;
 703		}
 704	}
 705
 706	if (event) {
 707		if (strchr(event, '/')) {
 708			group = event;
 709			event = strchr(group, '/') + 1;
 710			event[-1] = '\0';
 711			if (strlen(group) == 0) {
 712				pr_info("Group name is not specified\n");
 713				return -EINVAL;
 714			}
 715		}
 716		if (strlen(event) == 0) {
 717			pr_info("Event name is not specified\n");
 718			return -EINVAL;
 719		}
 720	}
 721	if (!group)
 722		group = KPROBE_EVENT_SYSTEM;
 723
 724	if (is_delete) {
 725		if (!event) {
 726			pr_info("Delete command needs an event name.\n");
 727			return -EINVAL;
 728		}
 729		mutex_lock(&probe_lock);
 730		tk = find_trace_kprobe(event, group);
 731		if (!tk) {
 732			mutex_unlock(&probe_lock);
 733			pr_info("Event %s/%s doesn't exist.\n", group, event);
 734			return -ENOENT;
 735		}
 736		/* delete an event */
 737		ret = unregister_trace_kprobe(tk);
 738		if (ret == 0)
 739			free_trace_kprobe(tk);
 740		mutex_unlock(&probe_lock);
 741		return ret;
 742	}
 743
 744	if (argc < 2) {
 745		pr_info("Probe point is not specified.\n");
 746		return -EINVAL;
 747	}
 748
 749	/* try to parse an address. if that fails, try to read the
 750	 * input as a symbol. */
 751	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
 
 
 
 
 
 
 
 
 752		/* a symbol specified */
 753		symbol = argv[1];
 754		/* TODO: support .init module functions */
 755		ret = traceprobe_split_symbol_offset(symbol, &offset);
 756		if (ret || offset < 0 || offset > UINT_MAX) {
 757			pr_info("Failed to parse either an address or a symbol.\n");
 758			return ret;
 759		}
 760		if (offset && is_return &&
 761		    !kprobe_on_func_entry(NULL, symbol, offset)) {
 762			pr_info("Given offset is not valid for return probe.\n");
 763			return -EINVAL;
 764		}
 765	}
 766	argc -= 2; argv += 2;
 767
 768	/* setup a probe */
 769	if (!event) {
 770		/* Make a new event name */
 771		if (symbol)
 772			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
 773				 is_return ? 'r' : 'p', symbol, offset);
 774		else
 775			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
 776				 is_return ? 'r' : 'p', addr);
 777		sanitize_event_name(buf);
 778		event = buf;
 779	}
 780	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
 781			       argc, is_return);
 782	if (IS_ERR(tk)) {
 783		pr_info("Failed to allocate trace_probe.(%d)\n",
 784			(int)PTR_ERR(tk));
 785		return PTR_ERR(tk);
 786	}
 787
 788	/* parse arguments */
 789	ret = 0;
 790	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 791		struct probe_arg *parg = &tk->tp.args[i];
 792
 793		/* Increment count for freeing args in error case */
 794		tk->tp.nr_args++;
 795
 796		/* Parse argument name */
 797		arg = strchr(argv[i], '=');
 798		if (arg) {
 799			*arg++ = '\0';
 800			parg->name = kstrdup(argv[i], GFP_KERNEL);
 801		} else {
 802			arg = argv[i];
 803			/* If argument name is omitted, set "argN" */
 804			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
 805			parg->name = kstrdup(buf, GFP_KERNEL);
 806		}
 807
 808		if (!parg->name) {
 809			pr_info("Failed to allocate argument[%d] name.\n", i);
 810			ret = -ENOMEM;
 811			goto error;
 812		}
 813
 814		if (!is_good_name(parg->name)) {
 815			pr_info("Invalid argument[%d] name: %s\n",
 816				i, parg->name);
 817			ret = -EINVAL;
 818			goto error;
 819		}
 820
 821		if (traceprobe_conflict_field_name(parg->name,
 822							tk->tp.args, i)) {
 823			pr_info("Argument[%d] name '%s' conflicts with "
 824				"another field.\n", i, argv[i]);
 825			ret = -EINVAL;
 826			goto error;
 827		}
 828
 829		/* Parse fetch argument */
 830		ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
 831						is_return, true,
 832						kprobes_fetch_type_table);
 833		if (ret) {
 834			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
 835			goto error;
 836		}
 837	}
 838
 839	ret = register_trace_kprobe(tk);
 840	if (ret)
 841		goto error;
 842	return 0;
 843
 844error:
 845	free_trace_kprobe(tk);
 846	return ret;
 847}
 848
 849static int release_all_trace_kprobes(void)
 850{
 851	struct trace_kprobe *tk;
 852	int ret = 0;
 853
 854	mutex_lock(&probe_lock);
 855	/* Ensure no probe is in use. */
 856	list_for_each_entry(tk, &probe_list, list)
 857		if (trace_probe_is_enabled(&tk->tp)) {
 858			ret = -EBUSY;
 859			goto end;
 860		}
 861	/* TODO: Use batch unregistration */
 862	while (!list_empty(&probe_list)) {
 863		tk = list_entry(probe_list.next, struct trace_kprobe, list);
 864		ret = unregister_trace_kprobe(tk);
 865		if (ret)
 866			goto end;
 867		free_trace_kprobe(tk);
 868	}
 869
 870end:
 871	mutex_unlock(&probe_lock);
 872
 873	return ret;
 874}
 875
 876/* Probes listing interfaces */
 877static void *probes_seq_start(struct seq_file *m, loff_t *pos)
 878{
 879	mutex_lock(&probe_lock);
 880	return seq_list_start(&probe_list, *pos);
 881}
 882
 883static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
 884{
 885	return seq_list_next(v, &probe_list, pos);
 886}
 887
 888static void probes_seq_stop(struct seq_file *m, void *v)
 889{
 890	mutex_unlock(&probe_lock);
 891}
 892
 893static int probes_seq_show(struct seq_file *m, void *v)
 894{
 895	struct trace_kprobe *tk = v;
 896	int i;
 897
 898	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
 899	seq_printf(m, ":%s/%s", tk->tp.call.class->system,
 900			trace_event_name(&tk->tp.call));
 901
 902	if (!tk->symbol)
 903		seq_printf(m, " 0x%p", tk->rp.kp.addr);
 904	else if (tk->rp.kp.offset)
 905		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
 906			   tk->rp.kp.offset);
 907	else
 908		seq_printf(m, " %s", trace_kprobe_symbol(tk));
 909
 910	for (i = 0; i < tk->tp.nr_args; i++)
 911		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
 912	seq_putc(m, '\n');
 913
 914	return 0;
 915}
 916
 917static const struct seq_operations probes_seq_op = {
 918	.start  = probes_seq_start,
 919	.next   = probes_seq_next,
 920	.stop   = probes_seq_stop,
 921	.show   = probes_seq_show
 922};
 923
 924static int probes_open(struct inode *inode, struct file *file)
 925{
 926	int ret;
 927
 928	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 929		ret = release_all_trace_kprobes();
 930		if (ret < 0)
 931			return ret;
 932	}
 933
 934	return seq_open(file, &probes_seq_op);
 935}
 936
 937static ssize_t probes_write(struct file *file, const char __user *buffer,
 938			    size_t count, loff_t *ppos)
 939{
 940	return trace_parse_run_command(file, buffer, count, ppos,
 941				       create_trace_kprobe);
 942}
 943
 944static const struct file_operations kprobe_events_ops = {
 945	.owner          = THIS_MODULE,
 946	.open           = probes_open,
 947	.read           = seq_read,
 948	.llseek         = seq_lseek,
 949	.release        = seq_release,
 950	.write		= probes_write,
 951};
 952
 953/* Probes profiling interfaces */
 954static int probes_profile_seq_show(struct seq_file *m, void *v)
 955{
 956	struct trace_kprobe *tk = v;
 
 
 
 
 
 957
 958	seq_printf(m, "  %-44s %15lu %15lu\n",
 959		   trace_event_name(&tk->tp.call),
 960		   trace_kprobe_nhit(tk),
 961		   tk->rp.kp.nmissed);
 962
 963	return 0;
 964}
 965
 966static const struct seq_operations profile_seq_op = {
 967	.start  = probes_seq_start,
 968	.next   = probes_seq_next,
 969	.stop   = probes_seq_stop,
 970	.show   = probes_profile_seq_show
 971};
 972
 973static int profile_open(struct inode *inode, struct file *file)
 974{
 975	return seq_open(file, &profile_seq_op);
 976}
 977
 978static const struct file_operations kprobe_profile_ops = {
 979	.owner          = THIS_MODULE,
 980	.open           = profile_open,
 981	.read           = seq_read,
 982	.llseek         = seq_lseek,
 983	.release        = seq_release,
 984};
 985
 986/* Kprobe handler */
 987static nokprobe_inline void
 988__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
 989		    struct trace_event_file *trace_file)
 990{
 991	struct kprobe_trace_entry_head *entry;
 992	struct ring_buffer_event *event;
 993	struct ring_buffer *buffer;
 994	int size, dsize, pc;
 995	unsigned long irq_flags;
 996	struct trace_event_call *call = &tk->tp.call;
 997
 998	WARN_ON(call != trace_file->event_call);
 999
1000	if (trace_trigger_soft_disabled(trace_file))
1001		return;
1002
1003	local_save_flags(irq_flags);
1004	pc = preempt_count();
1005
1006	dsize = __get_data_size(&tk->tp, regs);
1007	size = sizeof(*entry) + tk->tp.size + dsize;
1008
1009	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1010						call->event.type,
1011						size, irq_flags, pc);
1012	if (!event)
1013		return;
1014
1015	entry = ring_buffer_event_data(event);
1016	entry->ip = (unsigned long)tk->rp.kp.addr;
1017	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1018
1019	event_trigger_unlock_commit_regs(trace_file, buffer, event,
1020					 entry, irq_flags, pc, regs);
1021}
1022
1023static void
1024kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1025{
1026	struct event_file_link *link;
1027
1028	list_for_each_entry_rcu(link, &tk->tp.files, list)
1029		__kprobe_trace_func(tk, regs, link->file);
1030}
1031NOKPROBE_SYMBOL(kprobe_trace_func);
1032
1033/* Kretprobe handler */
1034static nokprobe_inline void
1035__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1036		       struct pt_regs *regs,
1037		       struct trace_event_file *trace_file)
1038{
1039	struct kretprobe_trace_entry_head *entry;
1040	struct ring_buffer_event *event;
1041	struct ring_buffer *buffer;
1042	int size, pc, dsize;
1043	unsigned long irq_flags;
1044	struct trace_event_call *call = &tk->tp.call;
1045
1046	WARN_ON(call != trace_file->event_call);
1047
1048	if (trace_trigger_soft_disabled(trace_file))
1049		return;
1050
1051	local_save_flags(irq_flags);
1052	pc = preempt_count();
1053
1054	dsize = __get_data_size(&tk->tp, regs);
1055	size = sizeof(*entry) + tk->tp.size + dsize;
1056
1057	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1058						call->event.type,
1059						size, irq_flags, pc);
1060	if (!event)
1061		return;
1062
1063	entry = ring_buffer_event_data(event);
1064	entry->func = (unsigned long)tk->rp.kp.addr;
1065	entry->ret_ip = (unsigned long)ri->ret_addr;
1066	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1067
1068	event_trigger_unlock_commit_regs(trace_file, buffer, event,
1069					 entry, irq_flags, pc, regs);
1070}
1071
1072static void
1073kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1074		     struct pt_regs *regs)
1075{
1076	struct event_file_link *link;
1077
1078	list_for_each_entry_rcu(link, &tk->tp.files, list)
1079		__kretprobe_trace_func(tk, ri, regs, link->file);
1080}
1081NOKPROBE_SYMBOL(kretprobe_trace_func);
1082
1083/* Event entry printers */
1084static enum print_line_t
1085print_kprobe_event(struct trace_iterator *iter, int flags,
1086		   struct trace_event *event)
1087{
1088	struct kprobe_trace_entry_head *field;
1089	struct trace_seq *s = &iter->seq;
1090	struct trace_probe *tp;
1091	u8 *data;
1092	int i;
1093
1094	field = (struct kprobe_trace_entry_head *)iter->ent;
1095	tp = container_of(event, struct trace_probe, call.event);
1096
1097	trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1098
1099	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1100		goto out;
1101
1102	trace_seq_putc(s, ')');
1103
1104	data = (u8 *)&field[1];
1105	for (i = 0; i < tp->nr_args; i++)
1106		if (!tp->args[i].type->print(s, tp->args[i].name,
1107					     data + tp->args[i].offset, field))
1108			goto out;
1109
1110	trace_seq_putc(s, '\n');
1111 out:
1112	return trace_handle_return(s);
1113}
1114
1115static enum print_line_t
1116print_kretprobe_event(struct trace_iterator *iter, int flags,
1117		      struct trace_event *event)
1118{
1119	struct kretprobe_trace_entry_head *field;
1120	struct trace_seq *s = &iter->seq;
1121	struct trace_probe *tp;
1122	u8 *data;
1123	int i;
1124
1125	field = (struct kretprobe_trace_entry_head *)iter->ent;
1126	tp = container_of(event, struct trace_probe, call.event);
1127
1128	trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1129
1130	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1131		goto out;
1132
1133	trace_seq_puts(s, " <- ");
1134
1135	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1136		goto out;
1137
1138	trace_seq_putc(s, ')');
1139
1140	data = (u8 *)&field[1];
1141	for (i = 0; i < tp->nr_args; i++)
1142		if (!tp->args[i].type->print(s, tp->args[i].name,
1143					     data + tp->args[i].offset, field))
1144			goto out;
1145
1146	trace_seq_putc(s, '\n');
1147
1148 out:
1149	return trace_handle_return(s);
1150}
1151
1152
1153static int kprobe_event_define_fields(struct trace_event_call *event_call)
1154{
1155	int ret, i;
1156	struct kprobe_trace_entry_head field;
1157	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1158
1159	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1160	/* Set argument names as fields */
1161	for (i = 0; i < tk->tp.nr_args; i++) {
1162		struct probe_arg *parg = &tk->tp.args[i];
1163
1164		ret = trace_define_field(event_call, parg->type->fmttype,
1165					 parg->name,
1166					 sizeof(field) + parg->offset,
1167					 parg->type->size,
1168					 parg->type->is_signed,
1169					 FILTER_OTHER);
1170		if (ret)
1171			return ret;
1172	}
1173	return 0;
1174}
1175
1176static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1177{
1178	int ret, i;
1179	struct kretprobe_trace_entry_head field;
1180	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1181
1182	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1183	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1184	/* Set argument names as fields */
1185	for (i = 0; i < tk->tp.nr_args; i++) {
1186		struct probe_arg *parg = &tk->tp.args[i];
1187
1188		ret = trace_define_field(event_call, parg->type->fmttype,
1189					 parg->name,
1190					 sizeof(field) + parg->offset,
1191					 parg->type->size,
1192					 parg->type->is_signed,
1193					 FILTER_OTHER);
1194		if (ret)
1195			return ret;
1196	}
1197	return 0;
1198}
1199
1200#ifdef CONFIG_PERF_EVENTS
1201
1202/* Kprobe profile handler */
1203static int
1204kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1205{
1206	struct trace_event_call *call = &tk->tp.call;
 
1207	struct kprobe_trace_entry_head *entry;
1208	struct hlist_head *head;
1209	int size, __size, dsize;
1210	int rctx;
1211
1212	if (bpf_prog_array_valid(call)) {
1213		unsigned long orig_ip = instruction_pointer(regs);
1214		int ret;
1215
1216		ret = trace_call_bpf(call, regs);
1217
1218		/*
1219		 * We need to check and see if we modified the pc of the
1220		 * pt_regs, and if so clear the kprobe and return 1 so that we
1221		 * don't do the single stepping.
1222		 * The ftrace kprobe handler leaves it up to us to re-enable
1223		 * preemption here before returning if we've modified the ip.
1224		 */
1225		if (orig_ip != instruction_pointer(regs)) {
1226			reset_current_kprobe();
1227			preempt_enable_no_resched();
1228			return 1;
1229		}
1230		if (!ret)
1231			return 0;
1232	}
1233
1234	head = this_cpu_ptr(call->perf_events);
1235	if (hlist_empty(head))
1236		return 0;
1237
1238	dsize = __get_data_size(&tk->tp, regs);
1239	__size = sizeof(*entry) + tk->tp.size + dsize;
1240	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1241	size -= sizeof(u32);
1242
1243	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1244	if (!entry)
1245		return 0;
1246
1247	entry->ip = (unsigned long)tk->rp.kp.addr;
1248	memset(&entry[1], 0, dsize);
1249	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1250	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1251			      head, NULL);
1252	return 0;
1253}
1254NOKPROBE_SYMBOL(kprobe_perf_func);
1255
1256/* Kretprobe profile handler */
1257static void
1258kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1259		    struct pt_regs *regs)
1260{
1261	struct trace_event_call *call = &tk->tp.call;
 
1262	struct kretprobe_trace_entry_head *entry;
1263	struct hlist_head *head;
1264	int size, __size, dsize;
1265	int rctx;
1266
1267	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1268		return;
1269
1270	head = this_cpu_ptr(call->perf_events);
1271	if (hlist_empty(head))
1272		return;
1273
1274	dsize = __get_data_size(&tk->tp, regs);
1275	__size = sizeof(*entry) + tk->tp.size + dsize;
1276	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1277	size -= sizeof(u32);
1278
1279	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1280	if (!entry)
1281		return;
1282
1283	entry->func = (unsigned long)tk->rp.kp.addr;
1284	entry->ret_ip = (unsigned long)ri->ret_addr;
1285	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1286	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1287			      head, NULL);
1288}
1289NOKPROBE_SYMBOL(kretprobe_perf_func);
1290#endif	/* CONFIG_PERF_EVENTS */
1291
1292/*
1293 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1294 *
1295 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1296 * lockless, but we can't race with this __init function.
1297 */
1298static int kprobe_register(struct trace_event_call *event,
1299			   enum trace_reg type, void *data)
1300{
1301	struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1302	struct trace_event_file *file = data;
1303
1304	switch (type) {
1305	case TRACE_REG_REGISTER:
1306		return enable_trace_kprobe(tk, file);
1307	case TRACE_REG_UNREGISTER:
1308		return disable_trace_kprobe(tk, file);
1309
1310#ifdef CONFIG_PERF_EVENTS
1311	case TRACE_REG_PERF_REGISTER:
1312		return enable_trace_kprobe(tk, NULL);
1313	case TRACE_REG_PERF_UNREGISTER:
1314		return disable_trace_kprobe(tk, NULL);
1315	case TRACE_REG_PERF_OPEN:
1316	case TRACE_REG_PERF_CLOSE:
1317	case TRACE_REG_PERF_ADD:
1318	case TRACE_REG_PERF_DEL:
1319		return 0;
1320#endif
1321	}
1322	return 0;
1323}
1324
1325static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1326{
1327	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1328	int ret = 0;
1329
1330	raw_cpu_inc(*tk->nhit);
1331
1332	if (tk->tp.flags & TP_FLAG_TRACE)
1333		kprobe_trace_func(tk, regs);
1334#ifdef CONFIG_PERF_EVENTS
1335	if (tk->tp.flags & TP_FLAG_PROFILE)
1336		ret = kprobe_perf_func(tk, regs);
1337#endif
1338	return ret;
1339}
1340NOKPROBE_SYMBOL(kprobe_dispatcher);
1341
1342static int
1343kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1344{
1345	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1346
1347	raw_cpu_inc(*tk->nhit);
1348
1349	if (tk->tp.flags & TP_FLAG_TRACE)
1350		kretprobe_trace_func(tk, ri, regs);
1351#ifdef CONFIG_PERF_EVENTS
1352	if (tk->tp.flags & TP_FLAG_PROFILE)
1353		kretprobe_perf_func(tk, ri, regs);
1354#endif
1355	return 0;	/* We don't tweek kernel, so just return 0 */
1356}
1357NOKPROBE_SYMBOL(kretprobe_dispatcher);
1358
1359static struct trace_event_functions kretprobe_funcs = {
1360	.trace		= print_kretprobe_event
1361};
1362
1363static struct trace_event_functions kprobe_funcs = {
1364	.trace		= print_kprobe_event
1365};
1366
1367static inline void init_trace_event_call(struct trace_kprobe *tk,
1368					 struct trace_event_call *call)
1369{
 
 
 
 
1370	INIT_LIST_HEAD(&call->class->fields);
1371	if (trace_kprobe_is_return(tk)) {
1372		call->event.funcs = &kretprobe_funcs;
1373		call->class->define_fields = kretprobe_event_define_fields;
1374	} else {
1375		call->event.funcs = &kprobe_funcs;
1376		call->class->define_fields = kprobe_event_define_fields;
1377	}
1378
1379	call->flags = TRACE_EVENT_FL_KPROBE;
1380	call->class->reg = kprobe_register;
1381	call->data = tk;
1382}
1383
1384static int register_kprobe_event(struct trace_kprobe *tk)
1385{
1386	struct trace_event_call *call = &tk->tp.call;
1387	int ret = 0;
1388
1389	init_trace_event_call(tk, call);
1390
1391	if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1392		return -ENOMEM;
1393	ret = register_trace_event(&call->event);
1394	if (!ret) {
1395		kfree(call->print_fmt);
1396		return -ENODEV;
1397	}
 
 
 
1398	ret = trace_add_event_call(call);
1399	if (ret) {
1400		pr_info("Failed to register kprobe event: %s\n",
1401			trace_event_name(call));
1402		kfree(call->print_fmt);
1403		unregister_trace_event(&call->event);
1404	}
1405	return ret;
1406}
1407
1408static int unregister_kprobe_event(struct trace_kprobe *tk)
1409{
1410	int ret;
1411
1412	/* tp->event is unregistered in trace_remove_event_call() */
1413	ret = trace_remove_event_call(&tk->tp.call);
1414	if (!ret)
1415		kfree(tk->tp.call.print_fmt);
1416	return ret;
1417}
1418
1419#ifdef CONFIG_PERF_EVENTS
1420/* create a trace_kprobe, but don't add it to global lists */
1421struct trace_event_call *
1422create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1423			  bool is_return)
1424{
1425	struct trace_kprobe *tk;
1426	int ret;
1427	char *event;
1428
1429	/*
1430	 * local trace_kprobes are not added to probe_list, so they are never
1431	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1432	 * duplicated name here.
1433	 */
1434	event = func ? func : "DUMMY_EVENT";
1435
1436	tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1437				offs, 0 /* maxactive */, 0 /* nargs */,
1438				is_return);
1439
1440	if (IS_ERR(tk)) {
1441		pr_info("Failed to allocate trace_probe.(%d)\n",
1442			(int)PTR_ERR(tk));
1443		return ERR_CAST(tk);
1444	}
1445
1446	init_trace_event_call(tk, &tk->tp.call);
1447
1448	if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1449		ret = -ENOMEM;
1450		goto error;
1451	}
1452
1453	ret = __register_trace_kprobe(tk);
1454	if (ret < 0)
1455		goto error;
1456
1457	return &tk->tp.call;
1458error:
1459	free_trace_kprobe(tk);
1460	return ERR_PTR(ret);
1461}
1462
1463void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1464{
1465	struct trace_kprobe *tk;
1466
1467	tk = container_of(event_call, struct trace_kprobe, tp.call);
1468
1469	if (trace_probe_is_enabled(&tk->tp)) {
1470		WARN_ON(1);
1471		return;
1472	}
1473
1474	__unregister_trace_kprobe(tk);
1475	free_trace_kprobe(tk);
1476}
1477#endif /* CONFIG_PERF_EVENTS */
1478
1479/* Make a tracefs interface for controlling probe points */
1480static __init int init_kprobe_trace(void)
1481{
1482	struct dentry *d_tracer;
1483	struct dentry *entry;
1484
1485	if (register_module_notifier(&trace_kprobe_module_nb))
1486		return -EINVAL;
1487
1488	d_tracer = tracing_init_dentry();
1489	if (IS_ERR(d_tracer))
1490		return 0;
1491
1492	entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1493				    NULL, &kprobe_events_ops);
1494
1495	/* Event list interface */
1496	if (!entry)
1497		pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1498
1499	/* Profile interface */
1500	entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1501				    NULL, &kprobe_profile_ops);
1502
1503	if (!entry)
1504		pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1505	return 0;
1506}
1507fs_initcall(init_kprobe_trace);
1508
1509
1510#ifdef CONFIG_FTRACE_STARTUP_TEST
 
1511/*
1512 * The "__used" keeps gcc from removing the function symbol
1513 * from the kallsyms table. 'noinline' makes sure that there
1514 * isn't an inlined version used by the test method below
1515 */
1516static __used __init noinline int
1517kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1518{
1519	return a1 + a2 + a3 + a4 + a5 + a6;
1520}
1521
1522static __init struct trace_event_file *
1523find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1524{
1525	struct trace_event_file *file;
1526
1527	list_for_each_entry(file, &tr->events, list)
1528		if (file->event_call == &tk->tp.call)
1529			return file;
1530
1531	return NULL;
1532}
1533
1534/*
1535 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1536 * stage, we can do this lockless.
1537 */
1538static __init int kprobe_trace_self_tests_init(void)
1539{
1540	int ret, warn = 0;
1541	int (*target)(int, int, int, int, int, int);
1542	struct trace_kprobe *tk;
1543	struct trace_event_file *file;
1544
1545	if (tracing_is_disabled())
1546		return -ENODEV;
1547
1548	target = kprobe_trace_selftest_target;
1549
1550	pr_info("Testing kprobe tracing: ");
1551
1552	ret = trace_run_command("p:testprobe kprobe_trace_selftest_target "
1553				"$stack $stack0 +0($stack)",
1554				create_trace_kprobe);
1555	if (WARN_ON_ONCE(ret)) {
1556		pr_warn("error on probing function entry.\n");
1557		warn++;
1558	} else {
1559		/* Enable trace point */
1560		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1561		if (WARN_ON_ONCE(tk == NULL)) {
1562			pr_warn("error on getting new probe.\n");
1563			warn++;
1564		} else {
1565			file = find_trace_probe_file(tk, top_trace_array());
1566			if (WARN_ON_ONCE(file == NULL)) {
1567				pr_warn("error on getting probe file.\n");
1568				warn++;
1569			} else
1570				enable_trace_kprobe(tk, file);
1571		}
1572	}
1573
1574	ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target "
1575				"$retval", create_trace_kprobe);
1576	if (WARN_ON_ONCE(ret)) {
1577		pr_warn("error on probing function return.\n");
1578		warn++;
1579	} else {
1580		/* Enable trace point */
1581		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1582		if (WARN_ON_ONCE(tk == NULL)) {
1583			pr_warn("error on getting 2nd new probe.\n");
1584			warn++;
1585		} else {
1586			file = find_trace_probe_file(tk, top_trace_array());
1587			if (WARN_ON_ONCE(file == NULL)) {
1588				pr_warn("error on getting probe file.\n");
1589				warn++;
1590			} else
1591				enable_trace_kprobe(tk, file);
1592		}
1593	}
1594
1595	if (warn)
1596		goto end;
1597
1598	ret = target(1, 2, 3, 4, 5, 6);
1599
1600	/*
1601	 * Not expecting an error here, the check is only to prevent the
1602	 * optimizer from removing the call to target() as otherwise there
1603	 * are no side-effects and the call is never performed.
1604	 */
1605	if (ret != 21)
1606		warn++;
1607
1608	/* Disable trace points before removing it */
1609	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1610	if (WARN_ON_ONCE(tk == NULL)) {
1611		pr_warn("error on getting test probe.\n");
1612		warn++;
1613	} else {
1614		if (trace_kprobe_nhit(tk) != 1) {
1615			pr_warn("incorrect number of testprobe hits\n");
1616			warn++;
1617		}
1618
1619		file = find_trace_probe_file(tk, top_trace_array());
1620		if (WARN_ON_ONCE(file == NULL)) {
1621			pr_warn("error on getting probe file.\n");
1622			warn++;
1623		} else
1624			disable_trace_kprobe(tk, file);
1625	}
1626
1627	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1628	if (WARN_ON_ONCE(tk == NULL)) {
1629		pr_warn("error on getting 2nd test probe.\n");
1630		warn++;
1631	} else {
1632		if (trace_kprobe_nhit(tk) != 1) {
1633			pr_warn("incorrect number of testprobe2 hits\n");
1634			warn++;
1635		}
1636
1637		file = find_trace_probe_file(tk, top_trace_array());
1638		if (WARN_ON_ONCE(file == NULL)) {
1639			pr_warn("error on getting probe file.\n");
1640			warn++;
1641		} else
1642			disable_trace_kprobe(tk, file);
1643	}
1644
1645	ret = trace_run_command("-:testprobe", create_trace_kprobe);
1646	if (WARN_ON_ONCE(ret)) {
1647		pr_warn("error on deleting a probe.\n");
1648		warn++;
1649	}
1650
1651	ret = trace_run_command("-:testprobe2", create_trace_kprobe);
1652	if (WARN_ON_ONCE(ret)) {
1653		pr_warn("error on deleting a probe.\n");
1654		warn++;
1655	}
1656
1657end:
1658	release_all_trace_kprobes();
1659	/*
1660	 * Wait for the optimizer work to finish. Otherwise it might fiddle
1661	 * with probes in already freed __init text.
1662	 */
1663	wait_for_kprobe_optimizer();
1664	if (warn)
1665		pr_cont("NG: Some tests are failed. Please check them.\n");
1666	else
1667		pr_cont("OK\n");
1668	return 0;
1669}
1670
1671late_initcall(kprobe_trace_self_tests_init);
1672
1673#endif