Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * Kprobes-based tracing events
   3 *
   4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
 
  19
 
  20#include <linux/module.h>
  21#include <linux/uaccess.h>
 
 
 
 
  22
 
 
  23#include "trace_probe.h"
 
  24
  25#define KPROBE_EVENT_SYSTEM "kprobes"
 
 
  26
  27/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  28 * Kprobe event core functions
  29 */
  30struct trace_kprobe {
  31	struct list_head	list;
  32	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
  33	unsigned long __percpu *nhit;
  34	const char		*symbol;	/* symbol name */
  35	struct trace_probe	tp;
  36};
  37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  38#define SIZEOF_TRACE_KPROBE(n)				\
  39	(offsetof(struct trace_kprobe, tp.args) +	\
  40	(sizeof(struct probe_arg) * (n)))
  41
  42
  43static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
  44{
  45	return tk->rp.handler != NULL;
  46}
  47
  48static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  49{
  50	return tk->symbol ? tk->symbol : "unknown";
  51}
  52
  53static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  54{
  55	return tk->rp.kp.offset;
  56}
  57
  58static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
  59{
  60	return !!(kprobe_gone(&tk->rp.kp));
  61}
  62
  63static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
  64						 struct module *mod)
  65{
  66	int len = strlen(mod->name);
  67	const char *name = trace_kprobe_symbol(tk);
  68	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  69}
  70
  71static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
  72{
  73	return !!strchr(trace_kprobe_symbol(tk), ':');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  74}
  75
  76static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
  77{
  78	unsigned long nhit = 0;
  79	int cpu;
  80
  81	for_each_possible_cpu(cpu)
  82		nhit += *per_cpu_ptr(tk->nhit, cpu);
  83
  84	return nhit;
  85}
  86
  87static int register_kprobe_event(struct trace_kprobe *tk);
  88static int unregister_kprobe_event(struct trace_kprobe *tk);
 
 
  89
  90static DEFINE_MUTEX(probe_lock);
  91static LIST_HEAD(probe_list);
  92
  93static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  94static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  95				struct pt_regs *regs);
 
 
 
 
 
 
 
  96
  97/* Memory fetching by symbol */
  98struct symbol_cache {
  99	char		*symbol;
 100	long		offset;
 101	unsigned long	addr;
 102};
 103
 104unsigned long update_symbol_cache(struct symbol_cache *sc)
 
 105{
 106	sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
 
 
 
 
 
 107
 108	if (sc->addr)
 109		sc->addr += sc->offset;
 
 
 110
 111	return sc->addr;
 
 
 
 112}
 113
 114void free_symbol_cache(struct symbol_cache *sc)
 115{
 116	kfree(sc->symbol);
 117	kfree(sc);
 118}
 119
 120struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
 
 
 121{
 122	struct symbol_cache *sc;
 123
 124	if (!sym || strlen(sym) == 0)
 125		return NULL;
 
 
 
 
 
 
 
 
 126
 127	sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
 128	if (!sc)
 129		return NULL;
 
 130
 131	sc->symbol = kstrdup(sym, GFP_KERNEL);
 132	if (!sc->symbol) {
 133		kfree(sc);
 134		return NULL;
 135	}
 136	sc->offset = offset;
 137	update_symbol_cache(sc);
 138
 139	return sc;
 140}
 141
 142/*
 143 * Kprobes-specific fetch functions
 144 */
 145#define DEFINE_FETCH_stack(type)					\
 146static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,		\
 147					  void *offset, void *dest)	\
 148{									\
 149	*(type *)dest = (type)regs_get_kernel_stack_nth(regs,		\
 150				(unsigned int)((unsigned long)offset));	\
 151}									\
 152NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
 153
 154DEFINE_BASIC_FETCH_FUNCS(stack)
 155/* No string on the stack entry */
 156#define fetch_stack_string	NULL
 157#define fetch_stack_string_size	NULL
 158
 159#define DEFINE_FETCH_memory(type)					\
 160static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,		\
 161					  void *addr, void *dest)	\
 162{									\
 163	type retval;							\
 164	if (probe_kernel_address(addr, retval))				\
 165		*(type *)dest = 0;					\
 166	else								\
 167		*(type *)dest = retval;					\
 168}									\
 169NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
 170
 171DEFINE_BASIC_FETCH_FUNCS(memory)
 172/*
 173 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 174 * length and relative data location.
 175 */
 176static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 177					    void *addr, void *dest)
 178{
 179	int maxlen = get_rloc_len(*(u32 *)dest);
 180	u8 *dst = get_rloc_data(dest);
 181	long ret;
 182
 183	if (!maxlen)
 184		return;
 185
 186	/*
 187	 * Try to get string again, since the string can be changed while
 188	 * probing.
 189	 */
 190	ret = strncpy_from_unsafe(dst, addr, maxlen);
 191
 192	if (ret < 0) {	/* Failed to fetch string */
 193		dst[0] = '\0';
 194		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
 195	} else {
 196		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
 197	}
 198}
 199NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
 200
 201/* Return the length of string -- including null terminal byte */
 202static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
 203						 void *addr, void *dest)
 204{
 205	mm_segment_t old_fs;
 206	int ret, len = 0;
 207	u8 c;
 208
 209	old_fs = get_fs();
 210	set_fs(KERNEL_DS);
 211	pagefault_disable();
 212
 213	do {
 214		ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
 215		len++;
 216	} while (c && ret == 0 && len < MAX_STRING_SIZE);
 217
 218	pagefault_enable();
 219	set_fs(old_fs);
 
 220
 221	if (ret < 0)	/* Failed to check the length */
 222		*(u32 *)dest = 0;
 223	else
 224		*(u32 *)dest = len;
 
 
 
 
 225}
 226NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
 227
 228#define DEFINE_FETCH_symbol(type)					\
 229void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
 230{									\
 231	struct symbol_cache *sc = data;					\
 232	if (sc->addr)							\
 233		fetch_memory_##type(regs, (void *)sc->addr, dest);	\
 234	else								\
 235		*(type *)dest = 0;					\
 236}									\
 237NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
 238
 239DEFINE_BASIC_FETCH_FUNCS(symbol)
 240DEFINE_FETCH_symbol(string)
 241DEFINE_FETCH_symbol(string_size)
 242
 243/* kprobes don't support file_offset fetch methods */
 244#define fetch_file_offset_u8		NULL
 245#define fetch_file_offset_u16		NULL
 246#define fetch_file_offset_u32		NULL
 247#define fetch_file_offset_u64		NULL
 248#define fetch_file_offset_string	NULL
 249#define fetch_file_offset_string_size	NULL
 250
 251/* Fetch type information table */
 252static const struct fetch_type kprobes_fetch_type_table[] = {
 253	/* Special types */
 254	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
 255					sizeof(u32), 1, "__data_loc char[]"),
 256	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
 257					string_size, sizeof(u32), 0, "u32"),
 258	/* Basic types */
 259	ASSIGN_FETCH_TYPE(u8,  u8,  0),
 260	ASSIGN_FETCH_TYPE(u16, u16, 0),
 261	ASSIGN_FETCH_TYPE(u32, u32, 0),
 262	ASSIGN_FETCH_TYPE(u64, u64, 0),
 263	ASSIGN_FETCH_TYPE(s8,  u8,  1),
 264	ASSIGN_FETCH_TYPE(s16, u16, 1),
 265	ASSIGN_FETCH_TYPE(s32, u32, 1),
 266	ASSIGN_FETCH_TYPE(s64, u64, 1),
 267	ASSIGN_FETCH_TYPE_ALIAS(x8,  u8,  u8,  0),
 268	ASSIGN_FETCH_TYPE_ALIAS(x16, u16, u16, 0),
 269	ASSIGN_FETCH_TYPE_ALIAS(x32, u32, u32, 0),
 270	ASSIGN_FETCH_TYPE_ALIAS(x64, u64, u64, 0),
 271
 272	ASSIGN_FETCH_TYPE_END
 273};
 274
 275/*
 276 * Allocate new trace_probe and initialize it (including kprobes).
 277 */
 278static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 279					     const char *event,
 280					     void *addr,
 281					     const char *symbol,
 282					     unsigned long offs,
 
 283					     int nargs, bool is_return)
 284{
 285	struct trace_kprobe *tk;
 286	int ret = -ENOMEM;
 287
 288	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
 289	if (!tk)
 290		return ERR_PTR(ret);
 291
 292	tk->nhit = alloc_percpu(unsigned long);
 293	if (!tk->nhit)
 294		goto error;
 295
 296	if (symbol) {
 297		tk->symbol = kstrdup(symbol, GFP_KERNEL);
 298		if (!tk->symbol)
 299			goto error;
 300		tk->rp.kp.symbol_name = tk->symbol;
 301		tk->rp.kp.offset = offs;
 302	} else
 303		tk->rp.kp.addr = addr;
 304
 305	if (is_return)
 306		tk->rp.handler = kretprobe_dispatcher;
 307	else
 308		tk->rp.kp.pre_handler = kprobe_dispatcher;
 309
 310	if (!event || !is_good_name(event)) {
 311		ret = -EINVAL;
 312		goto error;
 313	}
 314
 315	tk->tp.call.class = &tk->tp.class;
 316	tk->tp.call.name = kstrdup(event, GFP_KERNEL);
 317	if (!tk->tp.call.name)
 318		goto error;
 319
 320	if (!group || !is_good_name(group)) {
 321		ret = -EINVAL;
 322		goto error;
 323	}
 324
 325	tk->tp.class.system = kstrdup(group, GFP_KERNEL);
 326	if (!tk->tp.class.system)
 327		goto error;
 328
 329	INIT_LIST_HEAD(&tk->list);
 330	INIT_LIST_HEAD(&tk->tp.files);
 331	return tk;
 332error:
 333	kfree(tk->tp.call.name);
 334	kfree(tk->symbol);
 335	free_percpu(tk->nhit);
 336	kfree(tk);
 337	return ERR_PTR(ret);
 338}
 339
 340static void free_trace_kprobe(struct trace_kprobe *tk)
 
 341{
 342	int i;
 
 343
 344	for (i = 0; i < tk->tp.nr_args; i++)
 345		traceprobe_free_probe_arg(&tk->tp.args[i]);
 
 
 
 
 346
 347	kfree(tk->tp.call.class->system);
 348	kfree(tk->tp.call.name);
 349	kfree(tk->symbol);
 350	free_percpu(tk->nhit);
 351	kfree(tk);
 
 
 
 
 
 
 
 352}
 353
 354static struct trace_kprobe *find_trace_kprobe(const char *event,
 355					      const char *group)
 356{
 
 357	struct trace_kprobe *tk;
 358
 359	list_for_each_entry(tk, &probe_list, list)
 360		if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
 361		    strcmp(tk->tp.call.class->system, group) == 0)
 362			return tk;
 363	return NULL;
 
 
 
 
 364}
 365
 366/*
 367 * Enable trace_probe
 368 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 369 */
 370static int
 371enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 372{
 
 
 
 373	int ret = 0;
 374
 375	if (file) {
 376		struct event_file_link *link;
 
 
 377
 378		link = kmalloc(sizeof(*link), GFP_KERNEL);
 379		if (!link) {
 380			ret = -ENOMEM;
 381			goto out;
 382		}
 
 
 383
 384		link->file = file;
 385		list_add_tail_rcu(&link->list, &tk->tp.files);
 386
 387		tk->tp.flags |= TP_FLAG_TRACE;
 388	} else
 389		tk->tp.flags |= TP_FLAG_PROFILE;
 
 
 
 
 
 
 390
 391	if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
 392		if (trace_kprobe_is_return(tk))
 393			ret = enable_kretprobe(&tk->rp);
 
 
 
 394		else
 395			ret = enable_kprobe(&tk->rp.kp);
 396	}
 397 out:
 398	return ret;
 399}
 400
 401/*
 402 * Disable trace_probe
 403 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 404 */
 405static int
 406disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 407{
 408	struct event_file_link *link = NULL;
 409	int wait = 0;
 410	int ret = 0;
 411
 412	if (file) {
 413		link = find_event_file_link(&tk->tp, file);
 414		if (!link) {
 415			ret = -EINVAL;
 416			goto out;
 417		}
 418
 419		list_del_rcu(&link->list);
 420		wait = 1;
 421		if (!list_empty(&tk->tp.files))
 
 422			goto out;
 423
 424		tk->tp.flags &= ~TP_FLAG_TRACE;
 425	} else
 426		tk->tp.flags &= ~TP_FLAG_PROFILE;
 
 
 
 427
 428	if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
 429		if (trace_kprobe_is_return(tk))
 430			disable_kretprobe(&tk->rp);
 431		else
 432			disable_kprobe(&tk->rp.kp);
 433		wait = 1;
 434	}
 435 out:
 436	if (wait) {
 437		/*
 438		 * Synchronize with kprobe_trace_func/kretprobe_trace_func
 439		 * to ensure disabled (all running handlers are finished).
 440		 * This is not only for kfree(), but also the caller,
 441		 * trace_remove_event_call() supposes it for releasing
 442		 * event_call related objects, which will be accessed in
 443		 * the kprobe_trace_func/kretprobe_trace_func.
 444		 */
 445		synchronize_sched();
 446		kfree(link);	/* Ignored if link == NULL */
 447	}
 448
 449	return ret;
 450}
 451
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452/* Internal register function - just handle k*probes and flags */
 453static int __register_trace_kprobe(struct trace_kprobe *tk)
 454{
 455	int i, ret;
 456
 457	if (trace_probe_is_registered(&tk->tp))
 
 
 
 
 458		return -EINVAL;
 459
 460	for (i = 0; i < tk->tp.nr_args; i++)
 461		traceprobe_update_arg(&tk->tp.args[i]);
 
 
 
 
 
 
 
 
 
 462
 463	/* Set/clear disabled flag according to tp->flag */
 464	if (trace_probe_is_enabled(&tk->tp))
 465		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 466	else
 467		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 468
 469	if (trace_kprobe_is_return(tk))
 470		ret = register_kretprobe(&tk->rp);
 471	else
 472		ret = register_kprobe(&tk->rp.kp);
 473
 474	if (ret == 0)
 475		tk->tp.flags |= TP_FLAG_REGISTERED;
 476	else {
 477		pr_warn("Could not insert probe at %s+%lu: %d\n",
 478			trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
 479		if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
 480			pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 481			ret = 0;
 482		} else if (ret == -EILSEQ) {
 483			pr_warn("Probing address(0x%p) is not an instruction boundary.\n",
 484				tk->rp.kp.addr);
 485			ret = -EINVAL;
 486		}
 487	}
 488
 489	return ret;
 490}
 491
 492/* Internal unregister function - just handle k*probes and flags */
 493static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 494{
 495	if (trace_probe_is_registered(&tk->tp)) {
 496		if (trace_kprobe_is_return(tk))
 497			unregister_kretprobe(&tk->rp);
 498		else
 499			unregister_kprobe(&tk->rp.kp);
 500		tk->tp.flags &= ~TP_FLAG_REGISTERED;
 501		/* Cleanup kprobe for reuse */
 
 502		if (tk->rp.kp.symbol_name)
 503			tk->rp.kp.addr = NULL;
 504	}
 505}
 506
 507/* Unregister a trace_probe and probe_event: call with locking probe_lock */
 508static int unregister_trace_kprobe(struct trace_kprobe *tk)
 509{
 
 
 
 
 510	/* Enabled event can not be unregistered */
 511	if (trace_probe_is_enabled(&tk->tp))
 512		return -EBUSY;
 513
 514	/* Will fail if probe is being used by ftrace or perf */
 515	if (unregister_kprobe_event(tk))
 516		return -EBUSY;
 517
 
 518	__unregister_trace_kprobe(tk);
 519	list_del(&tk->list);
 
 520
 521	return 0;
 522}
 523
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524/* Register a trace_probe and probe_event */
 525static int register_trace_kprobe(struct trace_kprobe *tk)
 526{
 527	struct trace_kprobe *old_tk;
 528	int ret;
 529
 530	mutex_lock(&probe_lock);
 531
 532	/* Delete old (same name) event if exist */
 533	old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
 534			tk->tp.call.class->system);
 535	if (old_tk) {
 536		ret = unregister_trace_kprobe(old_tk);
 537		if (ret < 0)
 538			goto end;
 539		free_trace_kprobe(old_tk);
 
 
 
 
 540	}
 541
 542	/* Register new event */
 543	ret = register_kprobe_event(tk);
 544	if (ret) {
 545		pr_warn("Failed to register probe event(%d)\n", ret);
 546		goto end;
 547	}
 548
 549	/* Register k*probe */
 550	ret = __register_trace_kprobe(tk);
 
 
 
 
 
 551	if (ret < 0)
 552		unregister_kprobe_event(tk);
 553	else
 554		list_add_tail(&tk->list, &probe_list);
 555
 556end:
 557	mutex_unlock(&probe_lock);
 558	return ret;
 559}
 560
 561/* Module notifier call back, checking event on the module */
 562static int trace_kprobe_module_callback(struct notifier_block *nb,
 563				       unsigned long val, void *data)
 564{
 565	struct module *mod = data;
 
 566	struct trace_kprobe *tk;
 567	int ret;
 568
 569	if (val != MODULE_STATE_COMING)
 570		return NOTIFY_DONE;
 571
 572	/* Update probes on coming module */
 573	mutex_lock(&probe_lock);
 574	list_for_each_entry(tk, &probe_list, list) {
 575		if (trace_kprobe_within_module(tk, mod)) {
 576			/* Don't need to check busy - this should have gone. */
 577			__unregister_trace_kprobe(tk);
 578			ret = __register_trace_kprobe(tk);
 579			if (ret)
 580				pr_warn("Failed to re-register probe %s on %s: %d\n",
 581					trace_event_name(&tk->tp.call),
 582					mod->name, ret);
 583		}
 584	}
 585	mutex_unlock(&probe_lock);
 586
 587	return NOTIFY_DONE;
 588}
 589
 590static struct notifier_block trace_kprobe_module_nb = {
 591	.notifier_call = trace_kprobe_module_callback,
 592	.priority = 1	/* Invoked after kprobe module callback */
 593};
 594
 595static int create_trace_kprobe(int argc, char **argv)
 
 
 
 
 
 
 
 
 596{
 597	/*
 598	 * Argument syntax:
 599	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
 600	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
 
 
 601	 * Fetch args:
 602	 *  $retval	: fetch return value
 603	 *  $stack	: fetch stack address
 604	 *  $stackN	: fetch Nth of stack (N:0-)
 605	 *  $comm       : fetch current task comm
 606	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
 607	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
 608	 *  %REG	: fetch register REG
 609	 * Dereferencing memory fetch:
 610	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
 611	 * Alias name of args:
 612	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
 613	 * Type of args:
 614	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
 615	 */
 616	struct trace_kprobe *tk;
 617	int i, ret = 0;
 618	bool is_return = false, is_delete = false;
 619	char *symbol = NULL, *event = NULL, *group = NULL;
 620	char *arg;
 621	unsigned long offset = 0;
 
 622	void *addr = NULL;
 623	char buf[MAX_EVENT_NAME_LEN];
 
 624
 625	/* argc must be >= 1 */
 626	if (argv[0][0] == 'p')
 627		is_return = false;
 628	else if (argv[0][0] == 'r')
 629		is_return = true;
 630	else if (argv[0][0] == '-')
 631		is_delete = true;
 632	else {
 633		pr_info("Probe definition must be started with 'p', 'r' or"
 634			" '-'.\n");
 635		return -EINVAL;
 636	}
 637
 638	if (argv[0][1] == ':') {
 639		event = &argv[0][2];
 640		if (strchr(event, '/')) {
 641			group = event;
 642			event = strchr(group, '/') + 1;
 643			event[-1] = '\0';
 644			if (strlen(group) == 0) {
 645				pr_info("Group name is not specified\n");
 646				return -EINVAL;
 647			}
 648		}
 649		if (strlen(event) == 0) {
 650			pr_info("Event name is not specified\n");
 651			return -EINVAL;
 652		}
 653	}
 654	if (!group)
 655		group = KPROBE_EVENT_SYSTEM;
 656
 657	if (is_delete) {
 658		if (!event) {
 659			pr_info("Delete command needs an event name.\n");
 660			return -EINVAL;
 
 
 
 
 
 
 661		}
 662		mutex_lock(&probe_lock);
 663		tk = find_trace_kprobe(event, group);
 664		if (!tk) {
 665			mutex_unlock(&probe_lock);
 666			pr_info("Event %s/%s doesn't exist.\n", group, event);
 667			return -ENOENT;
 668		}
 669		/* delete an event */
 670		ret = unregister_trace_kprobe(tk);
 671		if (ret == 0)
 672			free_trace_kprobe(tk);
 673		mutex_unlock(&probe_lock);
 674		return ret;
 675	}
 676
 677	if (argc < 2) {
 678		pr_info("Probe point is not specified.\n");
 679		return -EINVAL;
 680	}
 681	if (isdigit(argv[1][0])) {
 682		if (is_return) {
 683			pr_info("Return probe point must be a symbol.\n");
 684			return -EINVAL;
 685		}
 686		/* an address specified */
 687		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
 688		if (ret) {
 689			pr_info("Failed to parse address.\n");
 690			return ret;
 691		}
 692	} else {
 693		/* a symbol specified */
 694		symbol = argv[1];
 
 
 695		/* TODO: support .init module functions */
 696		ret = traceprobe_split_symbol_offset(symbol, &offset);
 697		if (ret) {
 698			pr_info("Failed to parse symbol.\n");
 699			return ret;
 700		}
 701		if (offset && is_return) {
 702			pr_info("Return probe must be used without offset.\n");
 703			return -EINVAL;
 
 
 704		}
 705	}
 706	argc -= 2; argv += 2;
 707
 708	/* setup a probe */
 709	if (!event) {
 
 
 
 
 
 710		/* Make a new event name */
 711		if (symbol)
 712			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
 713				 is_return ? 'r' : 'p', symbol, offset);
 714		else
 715			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
 716				 is_return ? 'r' : 'p', addr);
 
 717		event = buf;
 718	}
 719	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
 720			       is_return);
 
 
 721	if (IS_ERR(tk)) {
 722		pr_info("Failed to allocate trace_probe.(%d)\n",
 723			(int)PTR_ERR(tk));
 724		return PTR_ERR(tk);
 
 725	}
 
 726
 727	/* parse arguments */
 728	ret = 0;
 729	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 730		struct probe_arg *parg = &tk->tp.args[i];
 731
 732		/* Increment count for freeing args in error case */
 733		tk->tp.nr_args++;
 734
 735		/* Parse argument name */
 736		arg = strchr(argv[i], '=');
 737		if (arg) {
 738			*arg++ = '\0';
 739			parg->name = kstrdup(argv[i], GFP_KERNEL);
 740		} else {
 741			arg = argv[i];
 742			/* If argument name is omitted, set "argN" */
 743			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
 744			parg->name = kstrdup(buf, GFP_KERNEL);
 745		}
 746
 747		if (!parg->name) {
 748			pr_info("Failed to allocate argument[%d] name.\n", i);
 749			ret = -ENOMEM;
 750			goto error;
 751		}
 752
 753		if (!is_good_name(parg->name)) {
 754			pr_info("Invalid argument[%d] name: %s\n",
 755				i, parg->name);
 756			ret = -EINVAL;
 757			goto error;
 758		}
 759
 760		if (traceprobe_conflict_field_name(parg->name,
 761							tk->tp.args, i)) {
 762			pr_info("Argument[%d] name '%s' conflicts with "
 763				"another field.\n", i, argv[i]);
 764			ret = -EINVAL;
 765			goto error;
 766		}
 767
 768		/* Parse fetch argument */
 769		ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
 770						is_return, true,
 771						kprobes_fetch_type_table);
 772		if (ret) {
 773			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
 774			goto error;
 775		}
 776	}
 777
 
 
 
 
 778	ret = register_trace_kprobe(tk);
 779	if (ret)
 
 
 
 
 
 
 
 780		goto error;
 781	return 0;
 
 
 
 
 
 782
 
 
 783error:
 784	free_trace_kprobe(tk);
 785	return ret;
 786}
 787
 788static int release_all_trace_kprobes(void)
 789{
 790	struct trace_kprobe *tk;
 791	int ret = 0;
 792
 793	mutex_lock(&probe_lock);
 794	/* Ensure no probe is in use. */
 795	list_for_each_entry(tk, &probe_list, list)
 796		if (trace_probe_is_enabled(&tk->tp)) {
 797			ret = -EBUSY;
 798			goto end;
 799		}
 800	/* TODO: Use batch unregistration */
 801	while (!list_empty(&probe_list)) {
 802		tk = list_entry(probe_list.next, struct trace_kprobe, list);
 803		ret = unregister_trace_kprobe(tk);
 804		if (ret)
 805			goto end;
 806		free_trace_kprobe(tk);
 807	}
 808
 809end:
 810	mutex_unlock(&probe_lock);
 811
 812	return ret;
 
 813}
 814
 815/* Probes listing interfaces */
 816static void *probes_seq_start(struct seq_file *m, loff_t *pos)
 817{
 818	mutex_lock(&probe_lock);
 819	return seq_list_start(&probe_list, *pos);
 820}
 821
 822static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
 823{
 824	return seq_list_next(v, &probe_list, pos);
 825}
 826
 827static void probes_seq_stop(struct seq_file *m, void *v)
 828{
 829	mutex_unlock(&probe_lock);
 830}
 831
 832static int probes_seq_show(struct seq_file *m, void *v)
 833{
 834	struct trace_kprobe *tk = v;
 835	int i;
 836
 837	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
 838	seq_printf(m, ":%s/%s", tk->tp.call.class->system,
 839			trace_event_name(&tk->tp.call));
 840
 841	if (!tk->symbol)
 842		seq_printf(m, " 0x%p", tk->rp.kp.addr);
 843	else if (tk->rp.kp.offset)
 844		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
 845			   tk->rp.kp.offset);
 846	else
 847		seq_printf(m, " %s", trace_kprobe_symbol(tk));
 848
 849	for (i = 0; i < tk->tp.nr_args; i++)
 850		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
 851	seq_putc(m, '\n');
 852
 853	return 0;
 854}
 855
 
 
 
 
 
 
 
 
 
 
 856static const struct seq_operations probes_seq_op = {
 857	.start  = probes_seq_start,
 858	.next   = probes_seq_next,
 859	.stop   = probes_seq_stop,
 860	.show   = probes_seq_show
 861};
 862
 863static int probes_open(struct inode *inode, struct file *file)
 864{
 865	int ret;
 866
 
 
 
 
 867	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 868		ret = release_all_trace_kprobes();
 869		if (ret < 0)
 870			return ret;
 871	}
 872
 873	return seq_open(file, &probes_seq_op);
 874}
 875
 876static ssize_t probes_write(struct file *file, const char __user *buffer,
 877			    size_t count, loff_t *ppos)
 878{
 879	return traceprobe_probes_write(file, buffer, count, ppos,
 880			create_trace_kprobe);
 881}
 882
 883static const struct file_operations kprobe_events_ops = {
 884	.owner          = THIS_MODULE,
 885	.open           = probes_open,
 886	.read           = seq_read,
 887	.llseek         = seq_lseek,
 888	.release        = seq_release,
 889	.write		= probes_write,
 890};
 891
 892/* Probes profiling interfaces */
 893static int probes_profile_seq_show(struct seq_file *m, void *v)
 894{
 895	struct trace_kprobe *tk = v;
 
 
 
 
 896
 
 897	seq_printf(m, "  %-44s %15lu %15lu\n",
 898		   trace_event_name(&tk->tp.call),
 899		   trace_kprobe_nhit(tk),
 900		   tk->rp.kp.nmissed);
 901
 902	return 0;
 903}
 904
 905static const struct seq_operations profile_seq_op = {
 906	.start  = probes_seq_start,
 907	.next   = probes_seq_next,
 908	.stop   = probes_seq_stop,
 909	.show   = probes_profile_seq_show
 910};
 911
 912static int profile_open(struct inode *inode, struct file *file)
 913{
 
 
 
 
 
 
 914	return seq_open(file, &profile_seq_op);
 915}
 916
 917static const struct file_operations kprobe_profile_ops = {
 918	.owner          = THIS_MODULE,
 919	.open           = profile_open,
 920	.read           = seq_read,
 921	.llseek         = seq_lseek,
 922	.release        = seq_release,
 923};
 924
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925/* Kprobe handler */
 926static nokprobe_inline void
 927__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
 928		    struct trace_event_file *trace_file)
 929{
 930	struct kprobe_trace_entry_head *entry;
 931	struct ring_buffer_event *event;
 932	struct ring_buffer *buffer;
 933	int size, dsize, pc;
 934	unsigned long irq_flags;
 935	struct trace_event_call *call = &tk->tp.call;
 936
 937	WARN_ON(call != trace_file->event_call);
 938
 939	if (trace_trigger_soft_disabled(trace_file))
 940		return;
 941
 942	local_save_flags(irq_flags);
 943	pc = preempt_count();
 944
 945	dsize = __get_data_size(&tk->tp, regs);
 946	size = sizeof(*entry) + tk->tp.size + dsize;
 947
 948	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 949						call->event.type,
 950						size, irq_flags, pc);
 951	if (!event)
 952		return;
 953
 954	entry = ring_buffer_event_data(event);
 955	entry->ip = (unsigned long)tk->rp.kp.addr;
 956	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
 957
 958	event_trigger_unlock_commit_regs(trace_file, buffer, event,
 959					 entry, irq_flags, pc, regs);
 960}
 961
 962static void
 963kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
 964{
 965	struct event_file_link *link;
 966
 967	list_for_each_entry_rcu(link, &tk->tp.files, list)
 968		__kprobe_trace_func(tk, regs, link->file);
 969}
 970NOKPROBE_SYMBOL(kprobe_trace_func);
 971
 972/* Kretprobe handler */
 973static nokprobe_inline void
 974__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
 975		       struct pt_regs *regs,
 976		       struct trace_event_file *trace_file)
 977{
 978	struct kretprobe_trace_entry_head *entry;
 979	struct ring_buffer_event *event;
 980	struct ring_buffer *buffer;
 981	int size, pc, dsize;
 982	unsigned long irq_flags;
 983	struct trace_event_call *call = &tk->tp.call;
 984
 985	WARN_ON(call != trace_file->event_call);
 986
 987	if (trace_trigger_soft_disabled(trace_file))
 988		return;
 989
 990	local_save_flags(irq_flags);
 991	pc = preempt_count();
 992
 993	dsize = __get_data_size(&tk->tp, regs);
 994	size = sizeof(*entry) + tk->tp.size + dsize;
 995
 996	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 997						call->event.type,
 998						size, irq_flags, pc);
 999	if (!event)
1000		return;
1001
1002	entry = ring_buffer_event_data(event);
1003	entry->func = (unsigned long)tk->rp.kp.addr;
1004	entry->ret_ip = (unsigned long)ri->ret_addr;
1005	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1006
1007	event_trigger_unlock_commit_regs(trace_file, buffer, event,
1008					 entry, irq_flags, pc, regs);
1009}
1010
1011static void
1012kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1013		     struct pt_regs *regs)
1014{
1015	struct event_file_link *link;
1016
1017	list_for_each_entry_rcu(link, &tk->tp.files, list)
1018		__kretprobe_trace_func(tk, ri, regs, link->file);
1019}
1020NOKPROBE_SYMBOL(kretprobe_trace_func);
1021
1022/* Event entry printers */
1023static enum print_line_t
1024print_kprobe_event(struct trace_iterator *iter, int flags,
1025		   struct trace_event *event)
1026{
1027	struct kprobe_trace_entry_head *field;
1028	struct trace_seq *s = &iter->seq;
1029	struct trace_probe *tp;
1030	u8 *data;
1031	int i;
1032
1033	field = (struct kprobe_trace_entry_head *)iter->ent;
1034	tp = container_of(event, struct trace_probe, call.event);
 
 
 
1035
1036	trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1037
1038	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1039		goto out;
1040
1041	trace_seq_putc(s, ')');
1042
1043	data = (u8 *)&field[1];
1044	for (i = 0; i < tp->nr_args; i++)
1045		if (!tp->args[i].type->print(s, tp->args[i].name,
1046					     data + tp->args[i].offset, field))
1047			goto out;
1048
1049	trace_seq_putc(s, '\n');
1050 out:
1051	return trace_handle_return(s);
1052}
1053
1054static enum print_line_t
1055print_kretprobe_event(struct trace_iterator *iter, int flags,
1056		      struct trace_event *event)
1057{
1058	struct kretprobe_trace_entry_head *field;
1059	struct trace_seq *s = &iter->seq;
1060	struct trace_probe *tp;
1061	u8 *data;
1062	int i;
1063
1064	field = (struct kretprobe_trace_entry_head *)iter->ent;
1065	tp = container_of(event, struct trace_probe, call.event);
 
 
 
1066
1067	trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1068
1069	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1070		goto out;
1071
1072	trace_seq_puts(s, " <- ");
1073
1074	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1075		goto out;
1076
1077	trace_seq_putc(s, ')');
1078
1079	data = (u8 *)&field[1];
1080	for (i = 0; i < tp->nr_args; i++)
1081		if (!tp->args[i].type->print(s, tp->args[i].name,
1082					     data + tp->args[i].offset, field))
1083			goto out;
1084
1085	trace_seq_putc(s, '\n');
1086
1087 out:
1088	return trace_handle_return(s);
1089}
1090
1091
1092static int kprobe_event_define_fields(struct trace_event_call *event_call)
1093{
1094	int ret, i;
1095	struct kprobe_trace_entry_head field;
1096	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
 
 
 
 
1097
1098	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1099	/* Set argument names as fields */
1100	for (i = 0; i < tk->tp.nr_args; i++) {
1101		struct probe_arg *parg = &tk->tp.args[i];
1102
1103		ret = trace_define_field(event_call, parg->type->fmttype,
1104					 parg->name,
1105					 sizeof(field) + parg->offset,
1106					 parg->type->size,
1107					 parg->type->is_signed,
1108					 FILTER_OTHER);
1109		if (ret)
1110			return ret;
1111	}
1112	return 0;
1113}
1114
1115static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1116{
1117	int ret, i;
1118	struct kretprobe_trace_entry_head field;
1119	struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
 
 
 
 
1120
1121	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1122	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1123	/* Set argument names as fields */
1124	for (i = 0; i < tk->tp.nr_args; i++) {
1125		struct probe_arg *parg = &tk->tp.args[i];
1126
1127		ret = trace_define_field(event_call, parg->type->fmttype,
1128					 parg->name,
1129					 sizeof(field) + parg->offset,
1130					 parg->type->size,
1131					 parg->type->is_signed,
1132					 FILTER_OTHER);
1133		if (ret)
1134			return ret;
1135	}
1136	return 0;
1137}
1138
1139#ifdef CONFIG_PERF_EVENTS
1140
1141/* Kprobe profile handler */
1142static void
1143kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1144{
1145	struct trace_event_call *call = &tk->tp.call;
1146	struct bpf_prog *prog = call->prog;
1147	struct kprobe_trace_entry_head *entry;
1148	struct hlist_head *head;
1149	int size, __size, dsize;
1150	int rctx;
1151
1152	if (prog && !trace_call_bpf(prog, regs))
1153		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154
1155	head = this_cpu_ptr(call->perf_events);
1156	if (hlist_empty(head))
1157		return;
1158
1159	dsize = __get_data_size(&tk->tp, regs);
1160	__size = sizeof(*entry) + tk->tp.size + dsize;
1161	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1162	size -= sizeof(u32);
1163
1164	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1165	if (!entry)
1166		return;
1167
1168	entry->ip = (unsigned long)tk->rp.kp.addr;
1169	memset(&entry[1], 0, dsize);
1170	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1171	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1172			      head, NULL);
 
1173}
1174NOKPROBE_SYMBOL(kprobe_perf_func);
1175
1176/* Kretprobe profile handler */
1177static void
1178kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1179		    struct pt_regs *regs)
1180{
1181	struct trace_event_call *call = &tk->tp.call;
1182	struct bpf_prog *prog = call->prog;
1183	struct kretprobe_trace_entry_head *entry;
1184	struct hlist_head *head;
1185	int size, __size, dsize;
1186	int rctx;
1187
1188	if (prog && !trace_call_bpf(prog, regs))
1189		return;
1190
1191	head = this_cpu_ptr(call->perf_events);
1192	if (hlist_empty(head))
1193		return;
1194
1195	dsize = __get_data_size(&tk->tp, regs);
1196	__size = sizeof(*entry) + tk->tp.size + dsize;
1197	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1198	size -= sizeof(u32);
1199
1200	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1201	if (!entry)
1202		return;
1203
1204	entry->func = (unsigned long)tk->rp.kp.addr;
1205	entry->ret_ip = (unsigned long)ri->ret_addr;
1206	store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1207	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1208			      head, NULL);
1209}
1210NOKPROBE_SYMBOL(kretprobe_perf_func);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1211#endif	/* CONFIG_PERF_EVENTS */
1212
1213/*
1214 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1215 *
1216 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1217 * lockless, but we can't race with this __init function.
1218 */
1219static int kprobe_register(struct trace_event_call *event,
1220			   enum trace_reg type, void *data)
1221{
1222	struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1223	struct trace_event_file *file = data;
1224
1225	switch (type) {
1226	case TRACE_REG_REGISTER:
1227		return enable_trace_kprobe(tk, file);
1228	case TRACE_REG_UNREGISTER:
1229		return disable_trace_kprobe(tk, file);
1230
1231#ifdef CONFIG_PERF_EVENTS
1232	case TRACE_REG_PERF_REGISTER:
1233		return enable_trace_kprobe(tk, NULL);
1234	case TRACE_REG_PERF_UNREGISTER:
1235		return disable_trace_kprobe(tk, NULL);
1236	case TRACE_REG_PERF_OPEN:
1237	case TRACE_REG_PERF_CLOSE:
1238	case TRACE_REG_PERF_ADD:
1239	case TRACE_REG_PERF_DEL:
1240		return 0;
1241#endif
1242	}
1243	return 0;
1244}
1245
1246static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1247{
1248	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
 
1249
1250	raw_cpu_inc(*tk->nhit);
1251
1252	if (tk->tp.flags & TP_FLAG_TRACE)
1253		kprobe_trace_func(tk, regs);
1254#ifdef CONFIG_PERF_EVENTS
1255	if (tk->tp.flags & TP_FLAG_PROFILE)
1256		kprobe_perf_func(tk, regs);
1257#endif
1258	return 0;	/* We don't tweek kernel, so just return 0 */
1259}
1260NOKPROBE_SYMBOL(kprobe_dispatcher);
1261
1262static int
1263kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1264{
1265	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1266
1267	raw_cpu_inc(*tk->nhit);
1268
1269	if (tk->tp.flags & TP_FLAG_TRACE)
1270		kretprobe_trace_func(tk, ri, regs);
1271#ifdef CONFIG_PERF_EVENTS
1272	if (tk->tp.flags & TP_FLAG_PROFILE)
1273		kretprobe_perf_func(tk, ri, regs);
1274#endif
1275	return 0;	/* We don't tweek kernel, so just return 0 */
1276}
1277NOKPROBE_SYMBOL(kretprobe_dispatcher);
1278
1279static struct trace_event_functions kretprobe_funcs = {
1280	.trace		= print_kretprobe_event
1281};
1282
1283static struct trace_event_functions kprobe_funcs = {
1284	.trace		= print_kprobe_event
1285};
1286
1287static int register_kprobe_event(struct trace_kprobe *tk)
1288{
1289	struct trace_event_call *call = &tk->tp.call;
1290	int ret;
1291
1292	/* Initialize trace_event_call */
1293	INIT_LIST_HEAD(&call->class->fields);
1294	if (trace_kprobe_is_return(tk)) {
1295		call->event.funcs = &kretprobe_funcs;
1296		call->class->define_fields = kretprobe_event_define_fields;
1297	} else {
1298		call->event.funcs = &kprobe_funcs;
1299		call->class->define_fields = kprobe_event_define_fields;
1300	}
1301	if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1302		return -ENOMEM;
1303	ret = register_trace_event(&call->event);
1304	if (!ret) {
1305		kfree(call->print_fmt);
1306		return -ENODEV;
1307	}
1308	call->flags = TRACE_EVENT_FL_KPROBE;
1309	call->class->reg = kprobe_register;
1310	call->data = tk;
1311	ret = trace_add_event_call(call);
1312	if (ret) {
1313		pr_info("Failed to register kprobe event: %s\n",
1314			trace_event_name(call));
1315		kfree(call->print_fmt);
1316		unregister_trace_event(&call->event);
1317	}
1318	return ret;
1319}
1320
1321static int unregister_kprobe_event(struct trace_kprobe *tk)
1322{
 
 
 
 
 
 
 
 
 
 
1323	int ret;
 
1324
1325	/* tp->event is unregistered in trace_remove_event_call() */
1326	ret = trace_remove_event_call(&tk->tp.call);
1327	if (!ret)
1328		kfree(tk->tp.call.print_fmt);
1329	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1330}
1331
1332/* Make a tracefs interface for controlling probe points */
1333static __init int init_kprobe_trace(void)
1334{
1335	struct dentry *d_tracer;
1336	struct dentry *entry;
 
 
 
 
 
1337
1338	if (register_module_notifier(&trace_kprobe_module_nb))
1339		return -EINVAL;
1340
1341	d_tracer = tracing_init_dentry();
1342	if (IS_ERR(d_tracer))
1343		return 0;
1344
1345	entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1346				    NULL, &kprobe_events_ops);
1347
1348	/* Event list interface */
1349	if (!entry)
1350		pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1351
1352	/* Profile interface */
1353	entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1354				    NULL, &kprobe_profile_ops);
1355
1356	if (!entry)
1357		pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
 
 
 
1358	return 0;
1359}
1360fs_initcall(init_kprobe_trace);
1361
1362
1363#ifdef CONFIG_FTRACE_STARTUP_TEST
1364/*
1365 * The "__used" keeps gcc from removing the function symbol
1366 * from the kallsyms table. 'noinline' makes sure that there
1367 * isn't an inlined version used by the test method below
1368 */
1369static __used __init noinline int
1370kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
1371{
1372	return a1 + a2 + a3 + a4 + a5 + a6;
1373}
1374
1375static __init struct trace_event_file *
1376find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1377{
1378	struct trace_event_file *file;
1379
1380	list_for_each_entry(file, &tr->events, list)
1381		if (file->event_call == &tk->tp.call)
1382			return file;
1383
1384	return NULL;
1385}
1386
1387/*
1388 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1389 * stage, we can do this lockless.
1390 */
1391static __init int kprobe_trace_self_tests_init(void)
1392{
1393	int ret, warn = 0;
1394	int (*target)(int, int, int, int, int, int);
1395	struct trace_kprobe *tk;
1396	struct trace_event_file *file;
1397
1398	if (tracing_is_disabled())
1399		return -ENODEV;
1400
 
 
 
 
 
1401	target = kprobe_trace_selftest_target;
1402
1403	pr_info("Testing kprobe tracing: ");
1404
1405	ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1406				  "$stack $stack0 +0($stack)",
1407				  create_trace_kprobe);
1408	if (WARN_ON_ONCE(ret)) {
1409		pr_warn("error on probing function entry.\n");
1410		warn++;
1411	} else {
1412		/* Enable trace point */
1413		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1414		if (WARN_ON_ONCE(tk == NULL)) {
1415			pr_warn("error on getting new probe.\n");
1416			warn++;
1417		} else {
1418			file = find_trace_probe_file(tk, top_trace_array());
1419			if (WARN_ON_ONCE(file == NULL)) {
1420				pr_warn("error on getting probe file.\n");
1421				warn++;
1422			} else
1423				enable_trace_kprobe(tk, file);
 
1424		}
1425	}
1426
1427	ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1428				  "$retval", create_trace_kprobe);
1429	if (WARN_ON_ONCE(ret)) {
1430		pr_warn("error on probing function return.\n");
1431		warn++;
1432	} else {
1433		/* Enable trace point */
1434		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1435		if (WARN_ON_ONCE(tk == NULL)) {
1436			pr_warn("error on getting 2nd new probe.\n");
1437			warn++;
1438		} else {
1439			file = find_trace_probe_file(tk, top_trace_array());
1440			if (WARN_ON_ONCE(file == NULL)) {
1441				pr_warn("error on getting probe file.\n");
1442				warn++;
1443			} else
1444				enable_trace_kprobe(tk, file);
 
1445		}
1446	}
1447
1448	if (warn)
1449		goto end;
1450
1451	ret = target(1, 2, 3, 4, 5, 6);
1452
1453	/*
1454	 * Not expecting an error here, the check is only to prevent the
1455	 * optimizer from removing the call to target() as otherwise there
1456	 * are no side-effects and the call is never performed.
1457	 */
1458	if (ret != 21)
1459		warn++;
1460
1461	/* Disable trace points before removing it */
1462	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1463	if (WARN_ON_ONCE(tk == NULL)) {
1464		pr_warn("error on getting test probe.\n");
1465		warn++;
1466	} else {
1467		if (trace_kprobe_nhit(tk) != 1) {
1468			pr_warn("incorrect number of testprobe hits\n");
1469			warn++;
1470		}
1471
1472		file = find_trace_probe_file(tk, top_trace_array());
1473		if (WARN_ON_ONCE(file == NULL)) {
1474			pr_warn("error on getting probe file.\n");
1475			warn++;
1476		} else
1477			disable_trace_kprobe(tk, file);
 
1478	}
1479
1480	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1481	if (WARN_ON_ONCE(tk == NULL)) {
1482		pr_warn("error on getting 2nd test probe.\n");
1483		warn++;
1484	} else {
1485		if (trace_kprobe_nhit(tk) != 1) {
1486			pr_warn("incorrect number of testprobe2 hits\n");
1487			warn++;
1488		}
1489
1490		file = find_trace_probe_file(tk, top_trace_array());
1491		if (WARN_ON_ONCE(file == NULL)) {
1492			pr_warn("error on getting probe file.\n");
1493			warn++;
1494		} else
1495			disable_trace_kprobe(tk, file);
 
1496	}
1497
1498	ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1499	if (WARN_ON_ONCE(ret)) {
1500		pr_warn("error on deleting a probe.\n");
1501		warn++;
1502	}
1503
1504	ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1505	if (WARN_ON_ONCE(ret)) {
1506		pr_warn("error on deleting a probe.\n");
1507		warn++;
1508	}
1509
1510end:
1511	release_all_trace_kprobes();
 
 
 
 
 
 
 
 
 
1512	if (warn)
1513		pr_cont("NG: Some tests are failed. Please check them.\n");
1514	else
1515		pr_cont("OK\n");
1516	return 0;
1517}
1518
1519late_initcall(kprobe_trace_self_tests_init);
1520
1521#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Kprobes-based tracing events
   4 *
   5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   6 *
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8#define pr_fmt(fmt)	"trace_kprobe: " fmt
   9
  10#include <linux/security.h>
  11#include <linux/module.h>
  12#include <linux/uaccess.h>
  13#include <linux/rculist.h>
  14#include <linux/error-injection.h>
  15
  16#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
  17
  18#include "trace_dynevent.h"
  19#include "trace_kprobe_selftest.h"
  20#include "trace_probe.h"
  21#include "trace_probe_tmpl.h"
  22
  23#define KPROBE_EVENT_SYSTEM "kprobes"
  24#define KRETPROBE_MAXACTIVE_MAX 4096
  25#define MAX_KPROBE_CMDLINE_SIZE 1024
  26
  27/* Kprobe early definition from command line */
  28static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
  29static bool kprobe_boot_events_enabled __initdata;
  30
  31static int __init set_kprobe_boot_events(char *str)
  32{
  33	strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
  34	return 0;
  35}
  36__setup("kprobe_event=", set_kprobe_boot_events);
  37
  38static int trace_kprobe_create(int argc, const char **argv);
  39static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
  40static int trace_kprobe_release(struct dyn_event *ev);
  41static bool trace_kprobe_is_busy(struct dyn_event *ev);
  42static bool trace_kprobe_match(const char *system, const char *event,
  43			int argc, const char **argv, struct dyn_event *ev);
  44
  45static struct dyn_event_operations trace_kprobe_ops = {
  46	.create = trace_kprobe_create,
  47	.show = trace_kprobe_show,
  48	.is_busy = trace_kprobe_is_busy,
  49	.free = trace_kprobe_release,
  50	.match = trace_kprobe_match,
  51};
  52
  53/*
  54 * Kprobe event core functions
  55 */
  56struct trace_kprobe {
  57	struct dyn_event	devent;
  58	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
  59	unsigned long __percpu *nhit;
  60	const char		*symbol;	/* symbol name */
  61	struct trace_probe	tp;
  62};
  63
  64static bool is_trace_kprobe(struct dyn_event *ev)
  65{
  66	return ev->ops == &trace_kprobe_ops;
  67}
  68
  69static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
  70{
  71	return container_of(ev, struct trace_kprobe, devent);
  72}
  73
  74/**
  75 * for_each_trace_kprobe - iterate over the trace_kprobe list
  76 * @pos:	the struct trace_kprobe * for each entry
  77 * @dpos:	the struct dyn_event * to use as a loop cursor
  78 */
  79#define for_each_trace_kprobe(pos, dpos)	\
  80	for_each_dyn_event(dpos)		\
  81		if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
  82
  83#define SIZEOF_TRACE_KPROBE(n)				\
  84	(offsetof(struct trace_kprobe, tp.args) +	\
  85	(sizeof(struct probe_arg) * (n)))
  86
 
  87static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
  88{
  89	return tk->rp.handler != NULL;
  90}
  91
  92static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  93{
  94	return tk->symbol ? tk->symbol : "unknown";
  95}
  96
  97static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  98{
  99	return tk->rp.kp.offset;
 100}
 101
 102static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
 103{
 104	return !!(kprobe_gone(&tk->rp.kp));
 105}
 106
 107static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
 108						 struct module *mod)
 109{
 110	int len = strlen(mod->name);
 111	const char *name = trace_kprobe_symbol(tk);
 112	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
 113}
 114
 115static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
 116{
 117	char *p;
 118	bool ret;
 119
 120	if (!tk->symbol)
 121		return false;
 122	p = strchr(tk->symbol, ':');
 123	if (!p)
 124		return true;
 125	*p = '\0';
 126	mutex_lock(&module_mutex);
 127	ret = !!find_module(tk->symbol);
 128	mutex_unlock(&module_mutex);
 129	*p = ':';
 130
 131	return ret;
 132}
 133
 134static bool trace_kprobe_is_busy(struct dyn_event *ev)
 135{
 136	struct trace_kprobe *tk = to_trace_kprobe(ev);
 
 
 
 
 137
 138	return trace_probe_is_enabled(&tk->tp);
 139}
 140
 141static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
 142					    int argc, const char **argv)
 143{
 144	char buf[MAX_ARGSTR_LEN + 1];
 145
 146	if (!argc)
 147		return true;
 148
 149	if (!tk->symbol)
 150		snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
 151	else if (tk->rp.kp.offset)
 152		snprintf(buf, sizeof(buf), "%s+%u",
 153			 trace_kprobe_symbol(tk), tk->rp.kp.offset);
 154	else
 155		snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
 156	if (strcmp(buf, argv[0]))
 157		return false;
 158	argc--; argv++;
 159
 160	return trace_probe_match_command_args(&tk->tp, argc, argv);
 161}
 
 
 
 
 162
 163static bool trace_kprobe_match(const char *system, const char *event,
 164			int argc, const char **argv, struct dyn_event *ev)
 165{
 166	struct trace_kprobe *tk = to_trace_kprobe(ev);
 167
 168	return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
 169	    (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
 170	    trace_kprobe_match_command_head(tk, argc, argv);
 171}
 172
 173static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
 174{
 175	unsigned long nhit = 0;
 176	int cpu;
 177
 178	for_each_possible_cpu(cpu)
 179		nhit += *per_cpu_ptr(tk->nhit, cpu);
 180
 181	return nhit;
 182}
 183
 184static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
 185{
 186	return !(list_empty(&tk->rp.kp.list) &&
 187		 hlist_unhashed(&tk->rp.kp.hlist));
 188}
 189
 190/* Return 0 if it fails to find the symbol address */
 191static nokprobe_inline
 192unsigned long trace_kprobe_address(struct trace_kprobe *tk)
 193{
 194	unsigned long addr;
 195
 196	if (tk->symbol) {
 197		addr = (unsigned long)
 198			kallsyms_lookup_name(trace_kprobe_symbol(tk));
 199		if (addr)
 200			addr += tk->rp.kp.offset;
 201	} else {
 202		addr = (unsigned long)tk->rp.kp.addr;
 203	}
 204	return addr;
 205}
 206
 207static nokprobe_inline struct trace_kprobe *
 208trace_kprobe_primary_from_call(struct trace_event_call *call)
 209{
 210	struct trace_probe *tp;
 211
 212	tp = trace_probe_primary_from_call(call);
 213	if (WARN_ON_ONCE(!tp))
 
 214		return NULL;
 
 
 
 215
 216	return container_of(tp, struct trace_kprobe, tp);
 217}
 218
 219bool trace_kprobe_on_func_entry(struct trace_event_call *call)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220{
 221	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 
 
 
 
 
 
 
 
 
 
 
 222
 223	return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
 224			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
 225			tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
 
 
 
 226}
 
 227
 228bool trace_kprobe_error_injectable(struct trace_event_call *call)
 
 
 229{
 230	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 
 
 231
 232	return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
 233	       false;
 234}
 235
 236static int register_kprobe_event(struct trace_kprobe *tk);
 237static int unregister_kprobe_event(struct trace_kprobe *tk);
 
 
 238
 239static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 240static int kretprobe_dispatcher(struct kretprobe_instance *ri,
 241				struct pt_regs *regs);
 242
 243static void free_trace_kprobe(struct trace_kprobe *tk)
 244{
 245	if (tk) {
 246		trace_probe_cleanup(&tk->tp);
 247		kfree(tk->symbol);
 248		free_percpu(tk->nhit);
 249		kfree(tk);
 250	}
 251}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 252
 253/*
 254 * Allocate new trace_probe and initialize it (including kprobes).
 255 */
 256static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 257					     const char *event,
 258					     void *addr,
 259					     const char *symbol,
 260					     unsigned long offs,
 261					     int maxactive,
 262					     int nargs, bool is_return)
 263{
 264	struct trace_kprobe *tk;
 265	int ret = -ENOMEM;
 266
 267	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
 268	if (!tk)
 269		return ERR_PTR(ret);
 270
 271	tk->nhit = alloc_percpu(unsigned long);
 272	if (!tk->nhit)
 273		goto error;
 274
 275	if (symbol) {
 276		tk->symbol = kstrdup(symbol, GFP_KERNEL);
 277		if (!tk->symbol)
 278			goto error;
 279		tk->rp.kp.symbol_name = tk->symbol;
 280		tk->rp.kp.offset = offs;
 281	} else
 282		tk->rp.kp.addr = addr;
 283
 284	if (is_return)
 285		tk->rp.handler = kretprobe_dispatcher;
 286	else
 287		tk->rp.kp.pre_handler = kprobe_dispatcher;
 288
 289	tk->rp.maxactive = maxactive;
 290	INIT_HLIST_NODE(&tk->rp.kp.hlist);
 291	INIT_LIST_HEAD(&tk->rp.kp.list);
 
 
 
 
 
 
 292
 293	ret = trace_probe_init(&tk->tp, event, group);
 294	if (ret < 0)
 
 
 
 
 
 295		goto error;
 296
 297	dyn_event_init(&tk->devent, &trace_kprobe_ops);
 
 298	return tk;
 299error:
 300	free_trace_kprobe(tk);
 
 
 
 301	return ERR_PTR(ret);
 302}
 303
 304static struct trace_kprobe *find_trace_kprobe(const char *event,
 305					      const char *group)
 306{
 307	struct dyn_event *pos;
 308	struct trace_kprobe *tk;
 309
 310	for_each_trace_kprobe(tk, pos)
 311		if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
 312		    strcmp(trace_probe_group_name(&tk->tp), group) == 0)
 313			return tk;
 314	return NULL;
 315}
 316
 317static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
 318{
 319	int ret = 0;
 320
 321	if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
 322		if (trace_kprobe_is_return(tk))
 323			ret = enable_kretprobe(&tk->rp);
 324		else
 325			ret = enable_kprobe(&tk->rp.kp);
 326	}
 327
 328	return ret;
 329}
 330
 331static void __disable_trace_kprobe(struct trace_probe *tp)
 
 332{
 333	struct trace_probe *pos;
 334	struct trace_kprobe *tk;
 335
 336	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
 337		tk = container_of(pos, struct trace_kprobe, tp);
 338		if (!trace_kprobe_is_registered(tk))
 339			continue;
 340		if (trace_kprobe_is_return(tk))
 341			disable_kretprobe(&tk->rp);
 342		else
 343			disable_kprobe(&tk->rp.kp);
 344	}
 345}
 346
 347/*
 348 * Enable trace_probe
 349 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 350 */
 351static int enable_trace_kprobe(struct trace_event_call *call,
 352				struct trace_event_file *file)
 353{
 354	struct trace_probe *pos, *tp;
 355	struct trace_kprobe *tk;
 356	bool enabled;
 357	int ret = 0;
 358
 359	tp = trace_probe_primary_from_call(call);
 360	if (WARN_ON_ONCE(!tp))
 361		return -ENODEV;
 362	enabled = trace_probe_is_enabled(tp);
 363
 364	/* This also changes "enabled" state */
 365	if (file) {
 366		ret = trace_probe_add_file(tp, file);
 367		if (ret)
 368			return ret;
 369	} else
 370		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
 371
 372	if (enabled)
 373		return 0;
 374
 375	list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
 376		tk = container_of(pos, struct trace_kprobe, tp);
 377		if (trace_kprobe_has_gone(tk))
 378			continue;
 379		ret = __enable_trace_kprobe(tk);
 380		if (ret)
 381			break;
 382		enabled = true;
 383	}
 384
 385	if (ret) {
 386		/* Failed to enable one of them. Roll back all */
 387		if (enabled)
 388			__disable_trace_kprobe(tp);
 389		if (file)
 390			trace_probe_remove_file(tp, file);
 391		else
 392			trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 393	}
 394
 395	return ret;
 396}
 397
 398/*
 399 * Disable trace_probe
 400 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 401 */
 402static int disable_trace_kprobe(struct trace_event_call *call,
 403				struct trace_event_file *file)
 404{
 405	struct trace_probe *tp;
 
 
 406
 407	tp = trace_probe_primary_from_call(call);
 408	if (WARN_ON_ONCE(!tp))
 409		return -ENODEV;
 
 
 
 410
 411	if (file) {
 412		if (!trace_probe_get_file_link(tp, file))
 413			return -ENOENT;
 414		if (!trace_probe_has_single_file(tp))
 415			goto out;
 416		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
 
 417	} else
 418		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 419
 420	if (!trace_probe_is_enabled(tp))
 421		__disable_trace_kprobe(tp);
 422
 
 
 
 
 
 
 
 423 out:
 424	if (file)
 425		/*
 426		 * Synchronization is done in below function. For perf event,
 427		 * file == NULL and perf_trace_event_unreg() calls
 428		 * tracepoint_synchronize_unregister() to ensure synchronize
 429		 * event. We don't need to care about it.
 
 
 430		 */
 431		trace_probe_remove_file(tp, file);
 
 
 432
 433	return 0;
 434}
 435
 436#if defined(CONFIG_KPROBES_ON_FTRACE) && \
 437	!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
 438static bool within_notrace_func(struct trace_kprobe *tk)
 439{
 440	unsigned long offset, size, addr;
 441
 442	addr = trace_kprobe_address(tk);
 443	if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
 444		return false;
 445
 446	/* Get the entry address of the target function */
 447	addr -= offset;
 448
 449	/*
 450	 * Since ftrace_location_range() does inclusive range check, we need
 451	 * to subtract 1 byte from the end address.
 452	 */
 453	return !ftrace_location_range(addr, addr + size - 1);
 454}
 455#else
 456#define within_notrace_func(tk)	(false)
 457#endif
 458
 459/* Internal register function - just handle k*probes and flags */
 460static int __register_trace_kprobe(struct trace_kprobe *tk)
 461{
 462	int i, ret;
 463
 464	ret = security_locked_down(LOCKDOWN_KPROBES);
 465	if (ret)
 466		return ret;
 467
 468	if (trace_kprobe_is_registered(tk))
 469		return -EINVAL;
 470
 471	if (within_notrace_func(tk)) {
 472		pr_warn("Could not probe notrace function %s\n",
 473			trace_kprobe_symbol(tk));
 474		return -EINVAL;
 475	}
 476
 477	for (i = 0; i < tk->tp.nr_args; i++) {
 478		ret = traceprobe_update_arg(&tk->tp.args[i]);
 479		if (ret)
 480			return ret;
 481	}
 482
 483	/* Set/clear disabled flag according to tp->flag */
 484	if (trace_probe_is_enabled(&tk->tp))
 485		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 486	else
 487		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 488
 489	if (trace_kprobe_is_return(tk))
 490		ret = register_kretprobe(&tk->rp);
 491	else
 492		ret = register_kprobe(&tk->rp.kp);
 493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 494	return ret;
 495}
 496
 497/* Internal unregister function - just handle k*probes and flags */
 498static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 499{
 500	if (trace_kprobe_is_registered(tk)) {
 501		if (trace_kprobe_is_return(tk))
 502			unregister_kretprobe(&tk->rp);
 503		else
 504			unregister_kprobe(&tk->rp.kp);
 505		/* Cleanup kprobe for reuse and mark it unregistered */
 506		INIT_HLIST_NODE(&tk->rp.kp.hlist);
 507		INIT_LIST_HEAD(&tk->rp.kp.list);
 508		if (tk->rp.kp.symbol_name)
 509			tk->rp.kp.addr = NULL;
 510	}
 511}
 512
 513/* Unregister a trace_probe and probe_event */
 514static int unregister_trace_kprobe(struct trace_kprobe *tk)
 515{
 516	/* If other probes are on the event, just unregister kprobe */
 517	if (trace_probe_has_sibling(&tk->tp))
 518		goto unreg;
 519
 520	/* Enabled event can not be unregistered */
 521	if (trace_probe_is_enabled(&tk->tp))
 522		return -EBUSY;
 523
 524	/* Will fail if probe is being used by ftrace or perf */
 525	if (unregister_kprobe_event(tk))
 526		return -EBUSY;
 527
 528unreg:
 529	__unregister_trace_kprobe(tk);
 530	dyn_event_remove(&tk->devent);
 531	trace_probe_unlink(&tk->tp);
 532
 533	return 0;
 534}
 535
 536static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
 537					 struct trace_kprobe *comp)
 538{
 539	struct trace_probe_event *tpe = orig->tp.event;
 540	struct trace_probe *pos;
 541	int i;
 542
 543	list_for_each_entry(pos, &tpe->probes, list) {
 544		orig = container_of(pos, struct trace_kprobe, tp);
 545		if (strcmp(trace_kprobe_symbol(orig),
 546			   trace_kprobe_symbol(comp)) ||
 547		    trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
 548			continue;
 549
 550		/*
 551		 * trace_probe_compare_arg_type() ensured that nr_args and
 552		 * each argument name and type are same. Let's compare comm.
 553		 */
 554		for (i = 0; i < orig->tp.nr_args; i++) {
 555			if (strcmp(orig->tp.args[i].comm,
 556				   comp->tp.args[i].comm))
 557				break;
 558		}
 559
 560		if (i == orig->tp.nr_args)
 561			return true;
 562	}
 563
 564	return false;
 565}
 566
 567static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
 568{
 569	int ret;
 570
 571	ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
 572	if (ret) {
 573		/* Note that argument starts index = 2 */
 574		trace_probe_log_set_index(ret + 1);
 575		trace_probe_log_err(0, DIFF_ARG_TYPE);
 576		return -EEXIST;
 577	}
 578	if (trace_kprobe_has_same_kprobe(to, tk)) {
 579		trace_probe_log_set_index(0);
 580		trace_probe_log_err(0, SAME_PROBE);
 581		return -EEXIST;
 582	}
 583
 584	/* Append to existing event */
 585	ret = trace_probe_append(&tk->tp, &to->tp);
 586	if (ret)
 587		return ret;
 588
 589	/* Register k*probe */
 590	ret = __register_trace_kprobe(tk);
 591	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
 592		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 593		ret = 0;
 594	}
 595
 596	if (ret)
 597		trace_probe_unlink(&tk->tp);
 598	else
 599		dyn_event_add(&tk->devent);
 600
 601	return ret;
 602}
 603
 604/* Register a trace_probe and probe_event */
 605static int register_trace_kprobe(struct trace_kprobe *tk)
 606{
 607	struct trace_kprobe *old_tk;
 608	int ret;
 609
 610	mutex_lock(&event_mutex);
 611
 612	old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
 613				   trace_probe_group_name(&tk->tp));
 
 614	if (old_tk) {
 615		if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
 616			trace_probe_log_set_index(0);
 617			trace_probe_log_err(0, DIFF_PROBE_TYPE);
 618			ret = -EEXIST;
 619		} else {
 620			ret = append_trace_kprobe(tk, old_tk);
 621		}
 622		goto end;
 623	}
 624
 625	/* Register new event */
 626	ret = register_kprobe_event(tk);
 627	if (ret) {
 628		pr_warn("Failed to register probe event(%d)\n", ret);
 629		goto end;
 630	}
 631
 632	/* Register k*probe */
 633	ret = __register_trace_kprobe(tk);
 634	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
 635		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 636		ret = 0;
 637	}
 638
 639	if (ret < 0)
 640		unregister_kprobe_event(tk);
 641	else
 642		dyn_event_add(&tk->devent);
 643
 644end:
 645	mutex_unlock(&event_mutex);
 646	return ret;
 647}
 648
 649/* Module notifier call back, checking event on the module */
 650static int trace_kprobe_module_callback(struct notifier_block *nb,
 651				       unsigned long val, void *data)
 652{
 653	struct module *mod = data;
 654	struct dyn_event *pos;
 655	struct trace_kprobe *tk;
 656	int ret;
 657
 658	if (val != MODULE_STATE_COMING)
 659		return NOTIFY_DONE;
 660
 661	/* Update probes on coming module */
 662	mutex_lock(&event_mutex);
 663	for_each_trace_kprobe(tk, pos) {
 664		if (trace_kprobe_within_module(tk, mod)) {
 665			/* Don't need to check busy - this should have gone. */
 666			__unregister_trace_kprobe(tk);
 667			ret = __register_trace_kprobe(tk);
 668			if (ret)
 669				pr_warn("Failed to re-register probe %s on %s: %d\n",
 670					trace_probe_name(&tk->tp),
 671					mod->name, ret);
 672		}
 673	}
 674	mutex_unlock(&event_mutex);
 675
 676	return NOTIFY_DONE;
 677}
 678
 679static struct notifier_block trace_kprobe_module_nb = {
 680	.notifier_call = trace_kprobe_module_callback,
 681	.priority = 1	/* Invoked after kprobe module callback */
 682};
 683
 684/* Convert certain expected symbols into '_' when generating event names */
 685static inline void sanitize_event_name(char *name)
 686{
 687	while (*name++ != '\0')
 688		if (*name == ':' || *name == '.')
 689			*name = '_';
 690}
 691
 692static int trace_kprobe_create(int argc, const char *argv[])
 693{
 694	/*
 695	 * Argument syntax:
 696	 *  - Add kprobe:
 697	 *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
 698	 *  - Add kretprobe:
 699	 *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
 700	 * Fetch args:
 701	 *  $retval	: fetch return value
 702	 *  $stack	: fetch stack address
 703	 *  $stackN	: fetch Nth of stack (N:0-)
 704	 *  $comm       : fetch current task comm
 705	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
 706	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
 707	 *  %REG	: fetch register REG
 708	 * Dereferencing memory fetch:
 709	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
 710	 * Alias name of args:
 711	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
 712	 * Type of args:
 713	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
 714	 */
 715	struct trace_kprobe *tk = NULL;
 716	int i, len, ret = 0;
 717	bool is_return = false;
 718	char *symbol = NULL, *tmp = NULL;
 719	const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
 720	int maxactive = 0;
 721	long offset = 0;
 722	void *addr = NULL;
 723	char buf[MAX_EVENT_NAME_LEN];
 724	unsigned int flags = TPARG_FL_KERNEL;
 725
 726	switch (argv[0][0]) {
 727	case 'r':
 
 
 728		is_return = true;
 729		flags |= TPARG_FL_RETURN;
 730		break;
 731	case 'p':
 732		break;
 733	default:
 734		return -ECANCELED;
 735	}
 736	if (argc < 2)
 737		return -ECANCELED;
 738
 739	trace_probe_log_init("trace_kprobe", argc, argv);
 740
 741	event = strchr(&argv[0][1], ':');
 742	if (event)
 743		event++;
 744
 745	if (isdigit(argv[0][1])) {
 746		if (!is_return) {
 747			trace_probe_log_err(1, MAXACT_NO_KPROBE);
 748			goto parse_error;
 
 
 749		}
 750		if (event)
 751			len = event - &argv[0][1] - 1;
 752		else
 753			len = strlen(&argv[0][1]);
 754		if (len > MAX_EVENT_NAME_LEN - 1) {
 755			trace_probe_log_err(1, BAD_MAXACT);
 756			goto parse_error;
 757		}
 758		memcpy(buf, &argv[0][1], len);
 759		buf[len] = '\0';
 760		ret = kstrtouint(buf, 0, &maxactive);
 761		if (ret || !maxactive) {
 762			trace_probe_log_err(1, BAD_MAXACT);
 763			goto parse_error;
 764		}
 765		/* kretprobes instances are iterated over via a list. The
 766		 * maximum should stay reasonable.
 767		 */
 768		if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
 769			trace_probe_log_err(1, MAXACT_TOO_BIG);
 770			goto parse_error;
 771		}
 
 
 
 
 
 
 772	}
 773
 774	/* try to parse an address. if that fails, try to read the
 775	 * input as a symbol. */
 776	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
 777		trace_probe_log_set_index(1);
 778		/* Check whether uprobe event specified */
 779		if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
 780			ret = -ECANCELED;
 781			goto error;
 
 
 
 
 
 
 782		}
 
 783		/* a symbol specified */
 784		symbol = kstrdup(argv[1], GFP_KERNEL);
 785		if (!symbol)
 786			return -ENOMEM;
 787		/* TODO: support .init module functions */
 788		ret = traceprobe_split_symbol_offset(symbol, &offset);
 789		if (ret || offset < 0 || offset > UINT_MAX) {
 790			trace_probe_log_err(0, BAD_PROBE_ADDR);
 791			goto parse_error;
 792		}
 793		if (kprobe_on_func_entry(NULL, symbol, offset))
 794			flags |= TPARG_FL_FENTRY;
 795		if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
 796			trace_probe_log_err(0, BAD_RETPROBE);
 797			goto parse_error;
 798		}
 799	}
 
 800
 801	trace_probe_log_set_index(0);
 802	if (event) {
 803		ret = traceprobe_parse_event_name(&event, &group, buf,
 804						  event - argv[0]);
 805		if (ret)
 806			goto parse_error;
 807	} else {
 808		/* Make a new event name */
 809		if (symbol)
 810			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
 811				 is_return ? 'r' : 'p', symbol, offset);
 812		else
 813			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
 814				 is_return ? 'r' : 'p', addr);
 815		sanitize_event_name(buf);
 816		event = buf;
 817	}
 818
 819	/* setup a probe */
 820	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
 821			       argc - 2, is_return);
 822	if (IS_ERR(tk)) {
 823		ret = PTR_ERR(tk);
 824		/* This must return -ENOMEM, else there is a bug */
 825		WARN_ON_ONCE(ret != -ENOMEM);
 826		goto out;	/* We know tk is not allocated */
 827	}
 828	argc -= 2; argv += 2;
 829
 830	/* parse arguments */
 
 831	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 832		tmp = kstrdup(argv[i], GFP_KERNEL);
 833		if (!tmp) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 834			ret = -ENOMEM;
 835			goto error;
 836		}
 837
 838		trace_probe_log_set_index(i + 2);
 839		ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
 840		kfree(tmp);
 841		if (ret)
 842			goto error;	/* This can be -ENOMEM */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 843	}
 844
 845	ret = traceprobe_set_print_fmt(&tk->tp, is_return);
 846	if (ret < 0)
 847		goto error;
 848
 849	ret = register_trace_kprobe(tk);
 850	if (ret) {
 851		trace_probe_log_set_index(1);
 852		if (ret == -EILSEQ)
 853			trace_probe_log_err(0, BAD_INSN_BNDRY);
 854		else if (ret == -ENOENT)
 855			trace_probe_log_err(0, BAD_PROBE_ADDR);
 856		else if (ret != -ENOMEM && ret != -EEXIST)
 857			trace_probe_log_err(0, FAIL_REG_PROBE);
 858		goto error;
 859	}
 860
 861out:
 862	trace_probe_log_clear();
 863	kfree(symbol);
 864	return ret;
 865
 866parse_error:
 867	ret = -EINVAL;
 868error:
 869	free_trace_kprobe(tk);
 870	goto out;
 871}
 872
 873static int create_or_delete_trace_kprobe(int argc, char **argv)
 874{
 875	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876
 877	if (argv[0][0] == '-')
 878		return dyn_event_release(argc, argv, &trace_kprobe_ops);
 879
 880	ret = trace_kprobe_create(argc, (const char **)argv);
 881	return ret == -ECANCELED ? -EINVAL : ret;
 882}
 883
 884static int trace_kprobe_release(struct dyn_event *ev)
 
 885{
 886	struct trace_kprobe *tk = to_trace_kprobe(ev);
 887	int ret = unregister_trace_kprobe(tk);
 
 888
 889	if (!ret)
 890		free_trace_kprobe(tk);
 891	return ret;
 
 
 
 
 
 892}
 893
 894static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
 895{
 896	struct trace_kprobe *tk = to_trace_kprobe(ev);
 897	int i;
 898
 899	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
 900	seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
 901				trace_probe_name(&tk->tp));
 902
 903	if (!tk->symbol)
 904		seq_printf(m, " 0x%p", tk->rp.kp.addr);
 905	else if (tk->rp.kp.offset)
 906		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
 907			   tk->rp.kp.offset);
 908	else
 909		seq_printf(m, " %s", trace_kprobe_symbol(tk));
 910
 911	for (i = 0; i < tk->tp.nr_args; i++)
 912		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
 913	seq_putc(m, '\n');
 914
 915	return 0;
 916}
 917
 918static int probes_seq_show(struct seq_file *m, void *v)
 919{
 920	struct dyn_event *ev = v;
 921
 922	if (!is_trace_kprobe(ev))
 923		return 0;
 924
 925	return trace_kprobe_show(m, ev);
 926}
 927
 928static const struct seq_operations probes_seq_op = {
 929	.start  = dyn_event_seq_start,
 930	.next   = dyn_event_seq_next,
 931	.stop   = dyn_event_seq_stop,
 932	.show   = probes_seq_show
 933};
 934
 935static int probes_open(struct inode *inode, struct file *file)
 936{
 937	int ret;
 938
 939	ret = security_locked_down(LOCKDOWN_TRACEFS);
 940	if (ret)
 941		return ret;
 942
 943	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 944		ret = dyn_events_release_all(&trace_kprobe_ops);
 945		if (ret < 0)
 946			return ret;
 947	}
 948
 949	return seq_open(file, &probes_seq_op);
 950}
 951
 952static ssize_t probes_write(struct file *file, const char __user *buffer,
 953			    size_t count, loff_t *ppos)
 954{
 955	return trace_parse_run_command(file, buffer, count, ppos,
 956				       create_or_delete_trace_kprobe);
 957}
 958
 959static const struct file_operations kprobe_events_ops = {
 960	.owner          = THIS_MODULE,
 961	.open           = probes_open,
 962	.read           = seq_read,
 963	.llseek         = seq_lseek,
 964	.release        = seq_release,
 965	.write		= probes_write,
 966};
 967
 968/* Probes profiling interfaces */
 969static int probes_profile_seq_show(struct seq_file *m, void *v)
 970{
 971	struct dyn_event *ev = v;
 972	struct trace_kprobe *tk;
 973
 974	if (!is_trace_kprobe(ev))
 975		return 0;
 976
 977	tk = to_trace_kprobe(ev);
 978	seq_printf(m, "  %-44s %15lu %15lu\n",
 979		   trace_probe_name(&tk->tp),
 980		   trace_kprobe_nhit(tk),
 981		   tk->rp.kp.nmissed);
 982
 983	return 0;
 984}
 985
 986static const struct seq_operations profile_seq_op = {
 987	.start  = dyn_event_seq_start,
 988	.next   = dyn_event_seq_next,
 989	.stop   = dyn_event_seq_stop,
 990	.show   = probes_profile_seq_show
 991};
 992
 993static int profile_open(struct inode *inode, struct file *file)
 994{
 995	int ret;
 996
 997	ret = security_locked_down(LOCKDOWN_TRACEFS);
 998	if (ret)
 999		return ret;
1000
1001	return seq_open(file, &profile_seq_op);
1002}
1003
1004static const struct file_operations kprobe_profile_ops = {
1005	.owner          = THIS_MODULE,
1006	.open           = profile_open,
1007	.read           = seq_read,
1008	.llseek         = seq_lseek,
1009	.release        = seq_release,
1010};
1011
1012/* Kprobe specific fetch functions */
1013
1014/* Return the length of string -- including null terminal byte */
1015static nokprobe_inline int
1016fetch_store_strlen(unsigned long addr)
1017{
1018	int ret, len = 0;
1019	u8 c;
1020
1021	do {
1022		ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
1023		len++;
1024	} while (c && ret == 0 && len < MAX_STRING_SIZE);
1025
1026	return (ret < 0) ? ret : len;
1027}
1028
1029/* Return the length of string -- including null terminal byte */
1030static nokprobe_inline int
1031fetch_store_strlen_user(unsigned long addr)
1032{
1033	const void __user *uaddr =  (__force const void __user *)addr;
1034
1035	return strnlen_unsafe_user(uaddr, MAX_STRING_SIZE);
1036}
1037
1038/*
1039 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1040 * length and relative data location.
1041 */
1042static nokprobe_inline int
1043fetch_store_string(unsigned long addr, void *dest, void *base)
1044{
1045	int maxlen = get_loc_len(*(u32 *)dest);
1046	void *__dest;
1047	long ret;
1048
1049	if (unlikely(!maxlen))
1050		return -ENOMEM;
1051
1052	__dest = get_loc_data(dest, base);
1053
1054	/*
1055	 * Try to get string again, since the string can be changed while
1056	 * probing.
1057	 */
1058	ret = strncpy_from_unsafe(__dest, (void *)addr, maxlen);
1059	if (ret >= 0)
1060		*(u32 *)dest = make_data_loc(ret, __dest - base);
1061
1062	return ret;
1063}
1064
1065/*
1066 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1067 * with max length and relative data location.
1068 */
1069static nokprobe_inline int
1070fetch_store_string_user(unsigned long addr, void *dest, void *base)
1071{
1072	const void __user *uaddr =  (__force const void __user *)addr;
1073	int maxlen = get_loc_len(*(u32 *)dest);
1074	void *__dest;
1075	long ret;
1076
1077	if (unlikely(!maxlen))
1078		return -ENOMEM;
1079
1080	__dest = get_loc_data(dest, base);
1081
1082	ret = strncpy_from_unsafe_user(__dest, uaddr, maxlen);
1083	if (ret >= 0)
1084		*(u32 *)dest = make_data_loc(ret, __dest - base);
1085
1086	return ret;
1087}
1088
1089static nokprobe_inline int
1090probe_mem_read(void *dest, void *src, size_t size)
1091{
1092	return probe_kernel_read(dest, src, size);
1093}
1094
1095static nokprobe_inline int
1096probe_mem_read_user(void *dest, void *src, size_t size)
1097{
1098	const void __user *uaddr =  (__force const void __user *)src;
1099
1100	return probe_user_read(dest, uaddr, size);
1101}
1102
1103/* Note that we don't verify it, since the code does not come from user space */
1104static int
1105process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
1106		   void *base)
1107{
1108	unsigned long val;
1109
1110retry:
1111	/* 1st stage: get value from context */
1112	switch (code->op) {
1113	case FETCH_OP_REG:
1114		val = regs_get_register(regs, code->param);
1115		break;
1116	case FETCH_OP_STACK:
1117		val = regs_get_kernel_stack_nth(regs, code->param);
1118		break;
1119	case FETCH_OP_STACKP:
1120		val = kernel_stack_pointer(regs);
1121		break;
1122	case FETCH_OP_RETVAL:
1123		val = regs_return_value(regs);
1124		break;
1125	case FETCH_OP_IMM:
1126		val = code->immediate;
1127		break;
1128	case FETCH_OP_COMM:
1129		val = (unsigned long)current->comm;
1130		break;
1131	case FETCH_OP_DATA:
1132		val = (unsigned long)code->data;
1133		break;
1134#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1135	case FETCH_OP_ARG:
1136		val = regs_get_kernel_argument(regs, code->param);
1137		break;
1138#endif
1139	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
1140		code++;
1141		goto retry;
1142	default:
1143		return -EILSEQ;
1144	}
1145	code++;
1146
1147	return process_fetch_insn_bottom(code, val, dest, base);
1148}
1149NOKPROBE_SYMBOL(process_fetch_insn)
1150
1151/* Kprobe handler */
1152static nokprobe_inline void
1153__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1154		    struct trace_event_file *trace_file)
1155{
1156	struct kprobe_trace_entry_head *entry;
1157	struct ring_buffer_event *event;
1158	struct ring_buffer *buffer;
1159	int size, dsize, pc;
1160	unsigned long irq_flags;
1161	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1162
1163	WARN_ON(call != trace_file->event_call);
1164
1165	if (trace_trigger_soft_disabled(trace_file))
1166		return;
1167
1168	local_save_flags(irq_flags);
1169	pc = preempt_count();
1170
1171	dsize = __get_data_size(&tk->tp, regs);
1172	size = sizeof(*entry) + tk->tp.size + dsize;
1173
1174	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1175						call->event.type,
1176						size, irq_flags, pc);
1177	if (!event)
1178		return;
1179
1180	entry = ring_buffer_event_data(event);
1181	entry->ip = (unsigned long)tk->rp.kp.addr;
1182	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1183
1184	event_trigger_unlock_commit_regs(trace_file, buffer, event,
1185					 entry, irq_flags, pc, regs);
1186}
1187
1188static void
1189kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1190{
1191	struct event_file_link *link;
1192
1193	trace_probe_for_each_link_rcu(link, &tk->tp)
1194		__kprobe_trace_func(tk, regs, link->file);
1195}
1196NOKPROBE_SYMBOL(kprobe_trace_func);
1197
1198/* Kretprobe handler */
1199static nokprobe_inline void
1200__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1201		       struct pt_regs *regs,
1202		       struct trace_event_file *trace_file)
1203{
1204	struct kretprobe_trace_entry_head *entry;
1205	struct ring_buffer_event *event;
1206	struct ring_buffer *buffer;
1207	int size, pc, dsize;
1208	unsigned long irq_flags;
1209	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1210
1211	WARN_ON(call != trace_file->event_call);
1212
1213	if (trace_trigger_soft_disabled(trace_file))
1214		return;
1215
1216	local_save_flags(irq_flags);
1217	pc = preempt_count();
1218
1219	dsize = __get_data_size(&tk->tp, regs);
1220	size = sizeof(*entry) + tk->tp.size + dsize;
1221
1222	event = trace_event_buffer_lock_reserve(&buffer, trace_file,
1223						call->event.type,
1224						size, irq_flags, pc);
1225	if (!event)
1226		return;
1227
1228	entry = ring_buffer_event_data(event);
1229	entry->func = (unsigned long)tk->rp.kp.addr;
1230	entry->ret_ip = (unsigned long)ri->ret_addr;
1231	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1232
1233	event_trigger_unlock_commit_regs(trace_file, buffer, event,
1234					 entry, irq_flags, pc, regs);
1235}
1236
1237static void
1238kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1239		     struct pt_regs *regs)
1240{
1241	struct event_file_link *link;
1242
1243	trace_probe_for_each_link_rcu(link, &tk->tp)
1244		__kretprobe_trace_func(tk, ri, regs, link->file);
1245}
1246NOKPROBE_SYMBOL(kretprobe_trace_func);
1247
1248/* Event entry printers */
1249static enum print_line_t
1250print_kprobe_event(struct trace_iterator *iter, int flags,
1251		   struct trace_event *event)
1252{
1253	struct kprobe_trace_entry_head *field;
1254	struct trace_seq *s = &iter->seq;
1255	struct trace_probe *tp;
 
 
1256
1257	field = (struct kprobe_trace_entry_head *)iter->ent;
1258	tp = trace_probe_primary_from_call(
1259		container_of(event, struct trace_event_call, event));
1260	if (WARN_ON_ONCE(!tp))
1261		goto out;
1262
1263	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1264
1265	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1266		goto out;
1267
1268	trace_seq_putc(s, ')');
1269
1270	if (print_probe_args(s, tp->args, tp->nr_args,
1271			     (u8 *)&field[1], field) < 0)
1272		goto out;
 
 
1273
1274	trace_seq_putc(s, '\n');
1275 out:
1276	return trace_handle_return(s);
1277}
1278
1279static enum print_line_t
1280print_kretprobe_event(struct trace_iterator *iter, int flags,
1281		      struct trace_event *event)
1282{
1283	struct kretprobe_trace_entry_head *field;
1284	struct trace_seq *s = &iter->seq;
1285	struct trace_probe *tp;
 
 
1286
1287	field = (struct kretprobe_trace_entry_head *)iter->ent;
1288	tp = trace_probe_primary_from_call(
1289		container_of(event, struct trace_event_call, event));
1290	if (WARN_ON_ONCE(!tp))
1291		goto out;
1292
1293	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1294
1295	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1296		goto out;
1297
1298	trace_seq_puts(s, " <- ");
1299
1300	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1301		goto out;
1302
1303	trace_seq_putc(s, ')');
1304
1305	if (print_probe_args(s, tp->args, tp->nr_args,
1306			     (u8 *)&field[1], field) < 0)
1307		goto out;
 
 
1308
1309	trace_seq_putc(s, '\n');
1310
1311 out:
1312	return trace_handle_return(s);
1313}
1314
1315
1316static int kprobe_event_define_fields(struct trace_event_call *event_call)
1317{
1318	int ret;
1319	struct kprobe_trace_entry_head field;
1320	struct trace_probe *tp;
1321
1322	tp = trace_probe_primary_from_call(event_call);
1323	if (WARN_ON_ONCE(!tp))
1324		return -ENOENT;
1325
1326	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
 
 
 
1327
1328	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
 
 
 
 
 
 
 
 
 
1329}
1330
1331static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1332{
1333	int ret;
1334	struct kretprobe_trace_entry_head field;
1335	struct trace_probe *tp;
1336
1337	tp = trace_probe_primary_from_call(event_call);
1338	if (WARN_ON_ONCE(!tp))
1339		return -ENOENT;
1340
1341	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1342	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
 
 
 
1343
1344	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
 
 
 
 
 
 
 
 
 
1345}
1346
1347#ifdef CONFIG_PERF_EVENTS
1348
1349/* Kprobe profile handler */
1350static int
1351kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1352{
1353	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
 
1354	struct kprobe_trace_entry_head *entry;
1355	struct hlist_head *head;
1356	int size, __size, dsize;
1357	int rctx;
1358
1359	if (bpf_prog_array_valid(call)) {
1360		unsigned long orig_ip = instruction_pointer(regs);
1361		int ret;
1362
1363		ret = trace_call_bpf(call, regs);
1364
1365		/*
1366		 * We need to check and see if we modified the pc of the
1367		 * pt_regs, and if so return 1 so that we don't do the
1368		 * single stepping.
1369		 */
1370		if (orig_ip != instruction_pointer(regs))
1371			return 1;
1372		if (!ret)
1373			return 0;
1374	}
1375
1376	head = this_cpu_ptr(call->perf_events);
1377	if (hlist_empty(head))
1378		return 0;
1379
1380	dsize = __get_data_size(&tk->tp, regs);
1381	__size = sizeof(*entry) + tk->tp.size + dsize;
1382	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1383	size -= sizeof(u32);
1384
1385	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1386	if (!entry)
1387		return 0;
1388
1389	entry->ip = (unsigned long)tk->rp.kp.addr;
1390	memset(&entry[1], 0, dsize);
1391	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1392	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1393			      head, NULL);
1394	return 0;
1395}
1396NOKPROBE_SYMBOL(kprobe_perf_func);
1397
1398/* Kretprobe profile handler */
1399static void
1400kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1401		    struct pt_regs *regs)
1402{
1403	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
 
1404	struct kretprobe_trace_entry_head *entry;
1405	struct hlist_head *head;
1406	int size, __size, dsize;
1407	int rctx;
1408
1409	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1410		return;
1411
1412	head = this_cpu_ptr(call->perf_events);
1413	if (hlist_empty(head))
1414		return;
1415
1416	dsize = __get_data_size(&tk->tp, regs);
1417	__size = sizeof(*entry) + tk->tp.size + dsize;
1418	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1419	size -= sizeof(u32);
1420
1421	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1422	if (!entry)
1423		return;
1424
1425	entry->func = (unsigned long)tk->rp.kp.addr;
1426	entry->ret_ip = (unsigned long)ri->ret_addr;
1427	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1428	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1429			      head, NULL);
1430}
1431NOKPROBE_SYMBOL(kretprobe_perf_func);
1432
1433int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1434			const char **symbol, u64 *probe_offset,
1435			u64 *probe_addr, bool perf_type_tracepoint)
1436{
1437	const char *pevent = trace_event_name(event->tp_event);
1438	const char *group = event->tp_event->class->system;
1439	struct trace_kprobe *tk;
1440
1441	if (perf_type_tracepoint)
1442		tk = find_trace_kprobe(pevent, group);
1443	else
1444		tk = event->tp_event->data;
1445	if (!tk)
1446		return -EINVAL;
1447
1448	*fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1449					      : BPF_FD_TYPE_KPROBE;
1450	if (tk->symbol) {
1451		*symbol = tk->symbol;
1452		*probe_offset = tk->rp.kp.offset;
1453		*probe_addr = 0;
1454	} else {
1455		*symbol = NULL;
1456		*probe_offset = 0;
1457		*probe_addr = (unsigned long)tk->rp.kp.addr;
1458	}
1459	return 0;
1460}
1461#endif	/* CONFIG_PERF_EVENTS */
1462
1463/*
1464 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1465 *
1466 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1467 * lockless, but we can't race with this __init function.
1468 */
1469static int kprobe_register(struct trace_event_call *event,
1470			   enum trace_reg type, void *data)
1471{
 
1472	struct trace_event_file *file = data;
1473
1474	switch (type) {
1475	case TRACE_REG_REGISTER:
1476		return enable_trace_kprobe(event, file);
1477	case TRACE_REG_UNREGISTER:
1478		return disable_trace_kprobe(event, file);
1479
1480#ifdef CONFIG_PERF_EVENTS
1481	case TRACE_REG_PERF_REGISTER:
1482		return enable_trace_kprobe(event, NULL);
1483	case TRACE_REG_PERF_UNREGISTER:
1484		return disable_trace_kprobe(event, NULL);
1485	case TRACE_REG_PERF_OPEN:
1486	case TRACE_REG_PERF_CLOSE:
1487	case TRACE_REG_PERF_ADD:
1488	case TRACE_REG_PERF_DEL:
1489		return 0;
1490#endif
1491	}
1492	return 0;
1493}
1494
1495static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1496{
1497	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1498	int ret = 0;
1499
1500	raw_cpu_inc(*tk->nhit);
1501
1502	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1503		kprobe_trace_func(tk, regs);
1504#ifdef CONFIG_PERF_EVENTS
1505	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1506		ret = kprobe_perf_func(tk, regs);
1507#endif
1508	return ret;
1509}
1510NOKPROBE_SYMBOL(kprobe_dispatcher);
1511
1512static int
1513kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1514{
1515	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1516
1517	raw_cpu_inc(*tk->nhit);
1518
1519	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1520		kretprobe_trace_func(tk, ri, regs);
1521#ifdef CONFIG_PERF_EVENTS
1522	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1523		kretprobe_perf_func(tk, ri, regs);
1524#endif
1525	return 0;	/* We don't tweek kernel, so just return 0 */
1526}
1527NOKPROBE_SYMBOL(kretprobe_dispatcher);
1528
1529static struct trace_event_functions kretprobe_funcs = {
1530	.trace		= print_kretprobe_event
1531};
1532
1533static struct trace_event_functions kprobe_funcs = {
1534	.trace		= print_kprobe_event
1535};
1536
1537static inline void init_trace_event_call(struct trace_kprobe *tk)
1538{
1539	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
 
1540
 
 
1541	if (trace_kprobe_is_return(tk)) {
1542		call->event.funcs = &kretprobe_funcs;
1543		call->class->define_fields = kretprobe_event_define_fields;
1544	} else {
1545		call->event.funcs = &kprobe_funcs;
1546		call->class->define_fields = kprobe_event_define_fields;
1547	}
1548
 
 
 
 
 
 
1549	call->flags = TRACE_EVENT_FL_KPROBE;
1550	call->class->reg = kprobe_register;
1551}
1552
1553static int register_kprobe_event(struct trace_kprobe *tk)
1554{
1555	init_trace_event_call(tk);
1556
1557	return trace_probe_register_event_call(&tk->tp);
 
 
1558}
1559
1560static int unregister_kprobe_event(struct trace_kprobe *tk)
1561{
1562	return trace_probe_unregister_event_call(&tk->tp);
1563}
1564
1565#ifdef CONFIG_PERF_EVENTS
1566/* create a trace_kprobe, but don't add it to global lists */
1567struct trace_event_call *
1568create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1569			  bool is_return)
1570{
1571	struct trace_kprobe *tk;
1572	int ret;
1573	char *event;
1574
1575	/*
1576	 * local trace_kprobes are not added to dyn_event, so they are never
1577	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1578	 * duplicated name here.
1579	 */
1580	event = func ? func : "DUMMY_EVENT";
1581
1582	tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1583				offs, 0 /* maxactive */, 0 /* nargs */,
1584				is_return);
1585
1586	if (IS_ERR(tk)) {
1587		pr_info("Failed to allocate trace_probe.(%d)\n",
1588			(int)PTR_ERR(tk));
1589		return ERR_CAST(tk);
1590	}
1591
1592	init_trace_event_call(tk);
1593
1594	if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1595		ret = -ENOMEM;
1596		goto error;
1597	}
1598
1599	ret = __register_trace_kprobe(tk);
1600	if (ret < 0)
1601		goto error;
1602
1603	return trace_probe_event_call(&tk->tp);
1604error:
1605	free_trace_kprobe(tk);
1606	return ERR_PTR(ret);
1607}
1608
1609void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1610{
1611	struct trace_kprobe *tk;
1612
1613	tk = trace_kprobe_primary_from_call(event_call);
1614	if (unlikely(!tk))
1615		return;
1616
1617	if (trace_probe_is_enabled(&tk->tp)) {
1618		WARN_ON(1);
1619		return;
1620	}
1621
1622	__unregister_trace_kprobe(tk);
1623
1624	free_trace_kprobe(tk);
1625}
1626#endif /* CONFIG_PERF_EVENTS */
1627
1628static __init void enable_boot_kprobe_events(void)
1629{
1630	struct trace_array *tr = top_trace_array();
1631	struct trace_event_file *file;
1632	struct trace_kprobe *tk;
1633	struct dyn_event *pos;
1634
1635	mutex_lock(&event_mutex);
1636	for_each_trace_kprobe(tk, pos) {
1637		list_for_each_entry(file, &tr->events, list)
1638			if (file->event_call == trace_probe_event_call(&tk->tp))
1639				trace_event_enable_disable(file, 1, 0);
1640	}
1641	mutex_unlock(&event_mutex);
1642}
1643
1644static __init void setup_boot_kprobe_events(void)
1645{
1646	char *p, *cmd = kprobe_boot_events_buf;
1647	int ret;
1648
1649	strreplace(kprobe_boot_events_buf, ',', ' ');
1650
1651	while (cmd && *cmd != '\0') {
1652		p = strchr(cmd, ';');
1653		if (p)
1654			*p++ = '\0';
1655
1656		ret = trace_run_command(cmd, create_or_delete_trace_kprobe);
1657		if (ret)
1658			pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1659		else
1660			kprobe_boot_events_enabled = true;
1661
1662		cmd = p;
1663	}
1664
1665	enable_boot_kprobe_events();
1666}
1667
1668/* Make a tracefs interface for controlling probe points */
1669static __init int init_kprobe_trace(void)
1670{
1671	struct dentry *d_tracer;
1672	struct dentry *entry;
1673	int ret;
1674
1675	ret = dyn_event_register(&trace_kprobe_ops);
1676	if (ret)
1677		return ret;
1678
1679	if (register_module_notifier(&trace_kprobe_module_nb))
1680		return -EINVAL;
1681
1682	d_tracer = tracing_init_dentry();
1683	if (IS_ERR(d_tracer))
1684		return 0;
1685
1686	entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1687				    NULL, &kprobe_events_ops);
1688
1689	/* Event list interface */
1690	if (!entry)
1691		pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1692
1693	/* Profile interface */
1694	entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1695				    NULL, &kprobe_profile_ops);
1696
1697	if (!entry)
1698		pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1699
1700	setup_boot_kprobe_events();
1701
1702	return 0;
1703}
1704fs_initcall(init_kprobe_trace);
1705
1706
1707#ifdef CONFIG_FTRACE_STARTUP_TEST
 
 
 
 
 
 
 
 
 
 
 
1708static __init struct trace_event_file *
1709find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1710{
1711	struct trace_event_file *file;
1712
1713	list_for_each_entry(file, &tr->events, list)
1714		if (file->event_call == trace_probe_event_call(&tk->tp))
1715			return file;
1716
1717	return NULL;
1718}
1719
1720/*
1721 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1722 * stage, we can do this lockless.
1723 */
1724static __init int kprobe_trace_self_tests_init(void)
1725{
1726	int ret, warn = 0;
1727	int (*target)(int, int, int, int, int, int);
1728	struct trace_kprobe *tk;
1729	struct trace_event_file *file;
1730
1731	if (tracing_is_disabled())
1732		return -ENODEV;
1733
1734	if (kprobe_boot_events_enabled) {
1735		pr_info("Skipping kprobe tests due to kprobe_event on cmdline\n");
1736		return 0;
1737	}
1738
1739	target = kprobe_trace_selftest_target;
1740
1741	pr_info("Testing kprobe tracing: ");
1742
1743	ret = trace_run_command("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)",
1744				create_or_delete_trace_kprobe);
 
1745	if (WARN_ON_ONCE(ret)) {
1746		pr_warn("error on probing function entry.\n");
1747		warn++;
1748	} else {
1749		/* Enable trace point */
1750		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1751		if (WARN_ON_ONCE(tk == NULL)) {
1752			pr_warn("error on getting new probe.\n");
1753			warn++;
1754		} else {
1755			file = find_trace_probe_file(tk, top_trace_array());
1756			if (WARN_ON_ONCE(file == NULL)) {
1757				pr_warn("error on getting probe file.\n");
1758				warn++;
1759			} else
1760				enable_trace_kprobe(
1761					trace_probe_event_call(&tk->tp), file);
1762		}
1763	}
1764
1765	ret = trace_run_command("r:testprobe2 kprobe_trace_selftest_target $retval",
1766				create_or_delete_trace_kprobe);
1767	if (WARN_ON_ONCE(ret)) {
1768		pr_warn("error on probing function return.\n");
1769		warn++;
1770	} else {
1771		/* Enable trace point */
1772		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1773		if (WARN_ON_ONCE(tk == NULL)) {
1774			pr_warn("error on getting 2nd new probe.\n");
1775			warn++;
1776		} else {
1777			file = find_trace_probe_file(tk, top_trace_array());
1778			if (WARN_ON_ONCE(file == NULL)) {
1779				pr_warn("error on getting probe file.\n");
1780				warn++;
1781			} else
1782				enable_trace_kprobe(
1783					trace_probe_event_call(&tk->tp), file);
1784		}
1785	}
1786
1787	if (warn)
1788		goto end;
1789
1790	ret = target(1, 2, 3, 4, 5, 6);
1791
1792	/*
1793	 * Not expecting an error here, the check is only to prevent the
1794	 * optimizer from removing the call to target() as otherwise there
1795	 * are no side-effects and the call is never performed.
1796	 */
1797	if (ret != 21)
1798		warn++;
1799
1800	/* Disable trace points before removing it */
1801	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1802	if (WARN_ON_ONCE(tk == NULL)) {
1803		pr_warn("error on getting test probe.\n");
1804		warn++;
1805	} else {
1806		if (trace_kprobe_nhit(tk) != 1) {
1807			pr_warn("incorrect number of testprobe hits\n");
1808			warn++;
1809		}
1810
1811		file = find_trace_probe_file(tk, top_trace_array());
1812		if (WARN_ON_ONCE(file == NULL)) {
1813			pr_warn("error on getting probe file.\n");
1814			warn++;
1815		} else
1816			disable_trace_kprobe(
1817				trace_probe_event_call(&tk->tp), file);
1818	}
1819
1820	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1821	if (WARN_ON_ONCE(tk == NULL)) {
1822		pr_warn("error on getting 2nd test probe.\n");
1823		warn++;
1824	} else {
1825		if (trace_kprobe_nhit(tk) != 1) {
1826			pr_warn("incorrect number of testprobe2 hits\n");
1827			warn++;
1828		}
1829
1830		file = find_trace_probe_file(tk, top_trace_array());
1831		if (WARN_ON_ONCE(file == NULL)) {
1832			pr_warn("error on getting probe file.\n");
1833			warn++;
1834		} else
1835			disable_trace_kprobe(
1836				trace_probe_event_call(&tk->tp), file);
1837	}
1838
1839	ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe);
1840	if (WARN_ON_ONCE(ret)) {
1841		pr_warn("error on deleting a probe.\n");
1842		warn++;
1843	}
1844
1845	ret = trace_run_command("-:testprobe2", create_or_delete_trace_kprobe);
1846	if (WARN_ON_ONCE(ret)) {
1847		pr_warn("error on deleting a probe.\n");
1848		warn++;
1849	}
1850
1851end:
1852	ret = dyn_events_release_all(&trace_kprobe_ops);
1853	if (WARN_ON_ONCE(ret)) {
1854		pr_warn("error on cleaning up probes.\n");
1855		warn++;
1856	}
1857	/*
1858	 * Wait for the optimizer work to finish. Otherwise it might fiddle
1859	 * with probes in already freed __init text.
1860	 */
1861	wait_for_kprobe_optimizer();
1862	if (warn)
1863		pr_cont("NG: Some tests are failed. Please check them.\n");
1864	else
1865		pr_cont("OK\n");
1866	return 0;
1867}
1868
1869late_initcall(kprobe_trace_self_tests_init);
1870
1871#endif