Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Kprobes-based tracing events
   4 *
   5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   6 *
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8#define pr_fmt(fmt)	"trace_kprobe: " fmt
   9
  10#include <linux/bpf-cgroup.h>
  11#include <linux/security.h>
  12#include <linux/module.h>
  13#include <linux/uaccess.h>
  14#include <linux/rculist.h>
  15#include <linux/error-injection.h>
  16
  17#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
  18
  19#include "trace_dynevent.h"
  20#include "trace_kprobe_selftest.h"
  21#include "trace_probe.h"
  22#include "trace_probe_tmpl.h"
  23#include "trace_probe_kernel.h"
  24
 
 
 
 
 
 
 
 
 
 
  25#define KPROBE_EVENT_SYSTEM "kprobes"
  26#define KRETPROBE_MAXACTIVE_MAX 4096
  27
  28/* Kprobe early definition from command line */
  29static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
 
 
 
 
 
 
 
 
 
 
 
 
 
  30
  31static int __init set_kprobe_boot_events(char *str)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  32{
  33	strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
  34	disable_tracing_selftest("running kprobe events");
  35
  36	return 1;
  37}
  38__setup("kprobe_event=", set_kprobe_boot_events);
  39
  40static int trace_kprobe_create(const char *raw_command);
  41static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
  42static int trace_kprobe_release(struct dyn_event *ev);
  43static bool trace_kprobe_is_busy(struct dyn_event *ev);
  44static bool trace_kprobe_match(const char *system, const char *event,
  45			int argc, const char **argv, struct dyn_event *ev);
  46
  47static struct dyn_event_operations trace_kprobe_ops = {
  48	.create = trace_kprobe_create,
  49	.show = trace_kprobe_show,
  50	.is_busy = trace_kprobe_is_busy,
  51	.free = trace_kprobe_release,
  52	.match = trace_kprobe_match,
  53};
  54
  55/*
  56 * Kprobe event core functions
 
 
  57 */
  58struct trace_kprobe {
  59	struct dyn_event	devent;
  60	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
  61	unsigned long __percpu *nhit;
  62	const char		*symbol;	/* symbol name */
  63	struct trace_probe	tp;
  64};
  65
  66static bool is_trace_kprobe(struct dyn_event *ev)
 
 
 
 
 
 
 
  67{
  68	return ev->ops == &trace_kprobe_ops;
 
 
 
 
 
 
  69}
 
  70
  71static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
 
 
 
 
 
 
 
 
 
  72{
  73	return container_of(ev, struct trace_kprobe, devent);
  74}
  75
  76/**
  77 * for_each_trace_kprobe - iterate over the trace_kprobe list
  78 * @pos:	the struct trace_kprobe * for each entry
  79 * @dpos:	the struct dyn_event * to use as a loop cursor
  80 */
  81#define for_each_trace_kprobe(pos, dpos)	\
  82	for_each_dyn_event(dpos)		\
  83		if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
  84
  85static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  86{
  87	return tk->rp.handler != NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88}
  89
  90static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
 
  91{
  92	return tk->symbol ? tk->symbol : "unknown";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  93}
  94
  95static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
 
 
 
 
 
 
 
  96{
  97	return tk->rp.kp.offset;
 
 
 
  98}
  99
 100static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
 101{
 102	return kprobe_gone(&tk->rp.kp);
 
 103}
 104
 105static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
 106						 struct module *mod)
 107{
 108	int len = strlen(module_name(mod));
 109	const char *name = trace_kprobe_symbol(tk);
 110
 111	return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 112}
 113
 114static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
 115{
 116	char *p;
 117	bool ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 118
 119	if (!tk->symbol)
 120		return false;
 121	p = strchr(tk->symbol, ':');
 122	if (!p)
 123		return true;
 124	*p = '\0';
 125	rcu_read_lock_sched();
 126	ret = !!find_module(tk->symbol);
 127	rcu_read_unlock_sched();
 128	*p = ':';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 129
 130	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 131}
 132
 133static bool trace_kprobe_is_busy(struct dyn_event *ev)
 
 134{
 135	struct trace_kprobe *tk = to_trace_kprobe(ev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 136
 137	return trace_probe_is_enabled(&tk->tp);
 138}
 139
 140static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
 141					    int argc, const char **argv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 142{
 143	char buf[MAX_ARGSTR_LEN + 1];
 144
 145	if (!argc)
 146		return true;
 147
 148	if (!tk->symbol)
 149		snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
 150	else if (tk->rp.kp.offset)
 151		snprintf(buf, sizeof(buf), "%s+%u",
 152			 trace_kprobe_symbol(tk), tk->rp.kp.offset);
 153	else
 154		snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
 155	if (strcmp(buf, argv[0]))
 156		return false;
 157	argc--; argv++;
 
 
 
 
 
 
 
 
 
 
 
 
 158
 159	return trace_probe_match_command_args(&tk->tp, argc, argv);
 
 
 
 
 160}
 161
 162static bool trace_kprobe_match(const char *system, const char *event,
 163			int argc, const char **argv, struct dyn_event *ev)
 
 164{
 165	struct trace_kprobe *tk = to_trace_kprobe(ev);
 166
 167	return (event[0] == '\0' ||
 168		strcmp(trace_probe_name(&tk->tp), event) == 0) &&
 169	    (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
 170	    trace_kprobe_match_command_head(tk, argc, argv);
 171}
 172
 173static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
 
 174{
 175	unsigned long nhit = 0;
 176	int cpu;
 177
 178	for_each_possible_cpu(cpu)
 179		nhit += *per_cpu_ptr(tk->nhit, cpu);
 
 
 
 180
 181	return nhit;
 
 182}
 183
 184static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 185{
 186	return !(list_empty(&tk->rp.kp.list) &&
 187		 hlist_unhashed(&tk->rp.kp.hlist));
 188}
 189
 190/* Return 0 if it fails to find the symbol address */
 191static nokprobe_inline
 192unsigned long trace_kprobe_address(struct trace_kprobe *tk)
 193{
 194	unsigned long addr;
 195
 196	if (tk->symbol) {
 197		addr = (unsigned long)
 198			kallsyms_lookup_name(trace_kprobe_symbol(tk));
 199		if (addr)
 200			addr += tk->rp.kp.offset;
 201	} else {
 202		addr = (unsigned long)tk->rp.kp.addr;
 203	}
 204	return addr;
 205}
 206
 207static nokprobe_inline struct trace_kprobe *
 208trace_kprobe_primary_from_call(struct trace_event_call *call)
 209{
 210	struct trace_probe *tp;
 211
 212	tp = trace_probe_primary_from_call(call);
 213	if (WARN_ON_ONCE(!tp))
 214		return NULL;
 215
 216	return container_of(tp, struct trace_kprobe, tp);
 
 
 217}
 218
 219bool trace_kprobe_on_func_entry(struct trace_event_call *call)
 220{
 221	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 
 222
 223	return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
 224			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
 225			tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
 226}
 227
 228bool trace_kprobe_error_injectable(struct trace_event_call *call)
 
 229{
 230	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 
 
 
 231
 232	return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
 233	       false;
 
 234}
 235
 236static int register_kprobe_event(struct trace_kprobe *tk);
 237static int unregister_kprobe_event(struct trace_kprobe *tk);
 
 
 
 238
 239static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 240static int kretprobe_dispatcher(struct kretprobe_instance *ri,
 241				struct pt_regs *regs);
 242
 243static void free_trace_kprobe(struct trace_kprobe *tk)
 
 244{
 245	if (tk) {
 246		trace_probe_cleanup(&tk->tp);
 247		kfree(tk->symbol);
 248		free_percpu(tk->nhit);
 249		kfree(tk);
 250	}
 
 251}
 252
 253/*
 254 * Allocate new trace_probe and initialize it (including kprobes).
 255 */
 256static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 257					     const char *event,
 258					     void *addr,
 259					     const char *symbol,
 260					     unsigned long offs,
 261					     int maxactive,
 262					     int nargs, bool is_return)
 263{
 264	struct trace_kprobe *tk;
 265	int ret = -ENOMEM;
 266
 267	tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
 268	if (!tk)
 269		return ERR_PTR(ret);
 270
 271	tk->nhit = alloc_percpu(unsigned long);
 272	if (!tk->nhit)
 273		goto error;
 274
 275	if (symbol) {
 276		tk->symbol = kstrdup(symbol, GFP_KERNEL);
 277		if (!tk->symbol)
 278			goto error;
 279		tk->rp.kp.symbol_name = tk->symbol;
 280		tk->rp.kp.offset = offs;
 281	} else
 282		tk->rp.kp.addr = addr;
 283
 284	if (is_return)
 285		tk->rp.handler = kretprobe_dispatcher;
 286	else
 287		tk->rp.kp.pre_handler = kprobe_dispatcher;
 288
 289	tk->rp.maxactive = maxactive;
 290	INIT_HLIST_NODE(&tk->rp.kp.hlist);
 291	INIT_LIST_HEAD(&tk->rp.kp.list);
 
 292
 293	ret = trace_probe_init(&tk->tp, event, group, false);
 294	if (ret < 0)
 
 295		goto error;
 296
 297	dyn_event_init(&tk->devent, &trace_kprobe_ops);
 298	return tk;
 
 
 
 
 
 
 
 
 
 299error:
 300	free_trace_kprobe(tk);
 
 
 301	return ERR_PTR(ret);
 302}
 303
 304static struct trace_kprobe *find_trace_kprobe(const char *event,
 305					      const char *group)
 306{
 307	struct dyn_event *pos;
 308	struct trace_kprobe *tk;
 
 
 
 
 
 309
 310	for_each_trace_kprobe(tk, pos)
 311		if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
 312		    strcmp(trace_probe_group_name(&tk->tp), group) == 0)
 313			return tk;
 314	return NULL;
 
 
 
 
 
 315}
 316
 317static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
 318{
 319	int ret = 0;
 320
 321	if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
 322		if (trace_kprobe_is_return(tk))
 323			ret = enable_kretprobe(&tk->rp);
 324		else
 325			ret = enable_kprobe(&tk->rp.kp);
 326	}
 327
 328	return ret;
 
 
 
 329}
 330
 331static void __disable_trace_kprobe(struct trace_probe *tp)
 
 332{
 333	struct trace_kprobe *tk;
 334
 335	list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
 336		if (!trace_kprobe_is_registered(tk))
 337			continue;
 338		if (trace_kprobe_is_return(tk))
 339			disable_kretprobe(&tk->rp);
 340		else
 341			disable_kprobe(&tk->rp.kp);
 342	}
 343}
 344
 345/*
 346 * Enable trace_probe
 347 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 348 */
 349static int enable_trace_kprobe(struct trace_event_call *call,
 350				struct trace_event_file *file)
 351{
 352	struct trace_probe *tp;
 353	struct trace_kprobe *tk;
 354	bool enabled;
 355	int ret = 0;
 356
 357	tp = trace_probe_primary_from_call(call);
 358	if (WARN_ON_ONCE(!tp))
 359		return -ENODEV;
 360	enabled = trace_probe_is_enabled(tp);
 361
 362	/* This also changes "enabled" state */
 363	if (file) {
 364		ret = trace_probe_add_file(tp, file);
 365		if (ret)
 366			return ret;
 367	} else
 368		trace_probe_set_flag(tp, TP_FLAG_PROFILE);
 369
 370	if (enabled)
 371		return 0;
 372
 373	list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
 374		if (trace_kprobe_has_gone(tk))
 375			continue;
 376		ret = __enable_trace_kprobe(tk);
 377		if (ret)
 378			break;
 379		enabled = true;
 380	}
 381
 382	if (ret) {
 383		/* Failed to enable one of them. Roll back all */
 384		if (enabled)
 385			__disable_trace_kprobe(tp);
 386		if (file)
 387			trace_probe_remove_file(tp, file);
 388		else
 389			trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 390	}
 391
 392	return ret;
 393}
 394
 395/*
 396 * Disable trace_probe
 397 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 398 */
 399static int disable_trace_kprobe(struct trace_event_call *call,
 400				struct trace_event_file *file)
 401{
 402	struct trace_probe *tp;
 403
 404	tp = trace_probe_primary_from_call(call);
 405	if (WARN_ON_ONCE(!tp))
 406		return -ENODEV;
 407
 408	if (file) {
 409		if (!trace_probe_get_file_link(tp, file))
 410			return -ENOENT;
 411		if (!trace_probe_has_single_file(tp))
 412			goto out;
 413		trace_probe_clear_flag(tp, TP_FLAG_TRACE);
 414	} else
 415		trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 416
 417	if (!trace_probe_is_enabled(tp))
 418		__disable_trace_kprobe(tp);
 419
 420 out:
 421	if (file)
 422		/*
 423		 * Synchronization is done in below function. For perf event,
 424		 * file == NULL and perf_trace_event_unreg() calls
 425		 * tracepoint_synchronize_unregister() to ensure synchronize
 426		 * event. We don't need to care about it.
 427		 */
 428		trace_probe_remove_file(tp, file);
 429
 430	return 0;
 431}
 432
 433#if defined(CONFIG_DYNAMIC_FTRACE) && \
 434	!defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
 435static bool __within_notrace_func(unsigned long addr)
 436{
 437	unsigned long offset, size;
 438
 439	if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
 440		return false;
 441
 442	/* Get the entry address of the target function */
 443	addr -= offset;
 444
 445	/*
 446	 * Since ftrace_location_range() does inclusive range check, we need
 447	 * to subtract 1 byte from the end address.
 448	 */
 449	return !ftrace_location_range(addr, addr + size - 1);
 450}
 451
 452static bool within_notrace_func(struct trace_kprobe *tk)
 453{
 454	unsigned long addr = trace_kprobe_address(tk);
 455	char symname[KSYM_NAME_LEN], *p;
 456
 457	if (!__within_notrace_func(addr))
 458		return false;
 459
 460	/* Check if the address is on a suffixed-symbol */
 461	if (!lookup_symbol_name(addr, symname)) {
 462		p = strchr(symname, '.');
 463		if (!p)
 464			return true;
 465		*p = '\0';
 466		addr = (unsigned long)kprobe_lookup_name(symname, 0);
 467		if (addr)
 468			return __within_notrace_func(addr);
 469	}
 470
 471	return true;
 472}
 473#else
 474#define within_notrace_func(tk)	(false)
 475#endif
 476
 477/* Internal register function - just handle k*probes and flags */
 478static int __register_trace_kprobe(struct trace_kprobe *tk)
 479{
 480	int i, ret;
 481
 482	ret = security_locked_down(LOCKDOWN_KPROBES);
 483	if (ret)
 484		return ret;
 485
 486	if (trace_kprobe_is_registered(tk))
 487		return -EINVAL;
 488
 489	if (within_notrace_func(tk)) {
 490		pr_warn("Could not probe notrace function %s\n",
 491			trace_kprobe_symbol(tk));
 492		return -EINVAL;
 493	}
 494
 495	for (i = 0; i < tk->tp.nr_args; i++) {
 496		ret = traceprobe_update_arg(&tk->tp.args[i]);
 497		if (ret)
 498			return ret;
 499	}
 500
 501	/* Set/clear disabled flag according to tp->flag */
 502	if (trace_probe_is_enabled(&tk->tp))
 503		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 504	else
 505		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 506
 507	if (trace_kprobe_is_return(tk))
 508		ret = register_kretprobe(&tk->rp);
 509	else
 510		ret = register_kprobe(&tk->rp.kp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511
 512	return ret;
 513}
 514
 515/* Internal unregister function - just handle k*probes and flags */
 516static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 517{
 518	if (trace_kprobe_is_registered(tk)) {
 519		if (trace_kprobe_is_return(tk))
 520			unregister_kretprobe(&tk->rp);
 521		else
 522			unregister_kprobe(&tk->rp.kp);
 523		/* Cleanup kprobe for reuse and mark it unregistered */
 524		INIT_HLIST_NODE(&tk->rp.kp.hlist);
 525		INIT_LIST_HEAD(&tk->rp.kp.list);
 526		if (tk->rp.kp.symbol_name)
 527			tk->rp.kp.addr = NULL;
 528	}
 529}
 530
 531/* Unregister a trace_probe and probe_event */
 532static int unregister_trace_kprobe(struct trace_kprobe *tk)
 533{
 534	/* If other probes are on the event, just unregister kprobe */
 535	if (trace_probe_has_sibling(&tk->tp))
 536		goto unreg;
 537
 538	/* Enabled event can not be unregistered */
 539	if (trace_probe_is_enabled(&tk->tp))
 540		return -EBUSY;
 541
 542	/* If there's a reference to the dynamic event */
 543	if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
 544		return -EBUSY;
 545
 546	/* Will fail if probe is being used by ftrace or perf */
 547	if (unregister_kprobe_event(tk))
 548		return -EBUSY;
 549
 550unreg:
 551	__unregister_trace_kprobe(tk);
 552	dyn_event_remove(&tk->devent);
 553	trace_probe_unlink(&tk->tp);
 554
 555	return 0;
 556}
 557
 558static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
 559					 struct trace_kprobe *comp)
 560{
 561	struct trace_probe_event *tpe = orig->tp.event;
 562	int i;
 563
 564	list_for_each_entry(orig, &tpe->probes, tp.list) {
 565		if (strcmp(trace_kprobe_symbol(orig),
 566			   trace_kprobe_symbol(comp)) ||
 567		    trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
 568			continue;
 569
 570		/*
 571		 * trace_probe_compare_arg_type() ensured that nr_args and
 572		 * each argument name and type are same. Let's compare comm.
 573		 */
 574		for (i = 0; i < orig->tp.nr_args; i++) {
 575			if (strcmp(orig->tp.args[i].comm,
 576				   comp->tp.args[i].comm))
 577				break;
 578		}
 579
 580		if (i == orig->tp.nr_args)
 581			return true;
 582	}
 583
 584	return false;
 585}
 586
 587static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
 588{
 589	int ret;
 590
 591	ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
 592	if (ret) {
 593		/* Note that argument starts index = 2 */
 594		trace_probe_log_set_index(ret + 1);
 595		trace_probe_log_err(0, DIFF_ARG_TYPE);
 596		return -EEXIST;
 597	}
 598	if (trace_kprobe_has_same_kprobe(to, tk)) {
 599		trace_probe_log_set_index(0);
 600		trace_probe_log_err(0, SAME_PROBE);
 601		return -EEXIST;
 602	}
 603
 604	/* Append to existing event */
 605	ret = trace_probe_append(&tk->tp, &to->tp);
 606	if (ret)
 607		return ret;
 608
 609	/* Register k*probe */
 610	ret = __register_trace_kprobe(tk);
 611	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
 612		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 613		ret = 0;
 614	}
 615
 616	if (ret)
 617		trace_probe_unlink(&tk->tp);
 618	else
 619		dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
 620
 621	return ret;
 622}
 623
 624/* Register a trace_probe and probe_event */
 625static int register_trace_kprobe(struct trace_kprobe *tk)
 626{
 627	struct trace_kprobe *old_tk;
 628	int ret;
 629
 630	mutex_lock(&event_mutex);
 631
 632	old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
 633				   trace_probe_group_name(&tk->tp));
 634	if (old_tk) {
 635		if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
 636			trace_probe_log_set_index(0);
 637			trace_probe_log_err(0, DIFF_PROBE_TYPE);
 638			ret = -EEXIST;
 639		} else {
 640			ret = append_trace_kprobe(tk, old_tk);
 641		}
 642		goto end;
 643	}
 644
 645	/* Register new event */
 646	ret = register_kprobe_event(tk);
 647	if (ret) {
 648		if (ret == -EEXIST) {
 649			trace_probe_log_set_index(0);
 650			trace_probe_log_err(0, EVENT_EXIST);
 651		} else
 652			pr_warn("Failed to register probe event(%d)\n", ret);
 653		goto end;
 654	}
 655
 656	/* Register k*probe */
 657	ret = __register_trace_kprobe(tk);
 658	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
 659		pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 660		ret = 0;
 661	}
 662
 663	if (ret < 0)
 664		unregister_kprobe_event(tk);
 665	else
 666		dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
 667
 668end:
 669	mutex_unlock(&event_mutex);
 670	return ret;
 671}
 672
 673/* Module notifier call back, checking event on the module */
 674static int trace_kprobe_module_callback(struct notifier_block *nb,
 675				       unsigned long val, void *data)
 676{
 677	struct module *mod = data;
 678	struct dyn_event *pos;
 679	struct trace_kprobe *tk;
 680	int ret;
 681
 682	if (val != MODULE_STATE_COMING)
 683		return NOTIFY_DONE;
 684
 685	/* Update probes on coming module */
 686	mutex_lock(&event_mutex);
 687	for_each_trace_kprobe(tk, pos) {
 688		if (trace_kprobe_within_module(tk, mod)) {
 689			/* Don't need to check busy - this should have gone. */
 690			__unregister_trace_kprobe(tk);
 691			ret = __register_trace_kprobe(tk);
 692			if (ret)
 693				pr_warn("Failed to re-register probe %s on %s: %d\n",
 694					trace_probe_name(&tk->tp),
 695					module_name(mod), ret);
 696		}
 697	}
 698	mutex_unlock(&event_mutex);
 699
 700	return NOTIFY_DONE;
 701}
 702
 703static struct notifier_block trace_kprobe_module_nb = {
 704	.notifier_call = trace_kprobe_module_callback,
 705	.priority = 1	/* Invoked after kprobe module callback */
 706};
 707
 708static int __trace_kprobe_create(int argc, const char *argv[])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 709{
 710	/*
 711	 * Argument syntax:
 712	 *  - Add kprobe:
 713	 *      p[:[GRP/][EVENT]] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
 714	 *  - Add kretprobe:
 715	 *      r[MAXACTIVE][:[GRP/][EVENT]] [MOD:]KSYM[+0] [FETCHARGS]
 716	 *    Or
 717	 *      p[:[GRP/][EVENT]] [MOD:]KSYM[+0]%return [FETCHARGS]
 718	 *
 719	 * Fetch args:
 720	 *  $retval	: fetch return value
 721	 *  $stack	: fetch stack address
 722	 *  $stackN	: fetch Nth of stack (N:0-)
 723	 *  $comm       : fetch current task comm
 724	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
 725	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
 726	 *  %REG	: fetch register REG
 727	 * Dereferencing memory fetch:
 728	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
 729	 * Alias name of args:
 730	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
 731	 * Type of args:
 732	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
 733	 */
 734	struct trace_kprobe *tk = NULL;
 735	int i, len, ret = 0;
 736	bool is_return = false;
 737	char *symbol = NULL, *tmp = NULL;
 738	const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
 739	enum probe_print_type ptype;
 740	int maxactive = 0;
 741	long offset = 0;
 742	void *addr = NULL;
 743	char buf[MAX_EVENT_NAME_LEN];
 744	char gbuf[MAX_EVENT_NAME_LEN];
 745	unsigned int flags = TPARG_FL_KERNEL;
 746
 747	switch (argv[0][0]) {
 748	case 'r':
 749		is_return = true;
 750		break;
 751	case 'p':
 752		break;
 753	default:
 754		return -ECANCELED;
 
 
 
 755	}
 756	if (argc < 2)
 757		return -ECANCELED;
 758
 759	trace_probe_log_init("trace_kprobe", argc, argv);
 760
 761	event = strchr(&argv[0][1], ':');
 762	if (event)
 763		event++;
 764
 765	if (isdigit(argv[0][1])) {
 766		if (!is_return) {
 767			trace_probe_log_err(1, MAXACT_NO_KPROBE);
 768			goto parse_error;
 
 
 
 
 
 
 769		}
 770		if (event)
 771			len = event - &argv[0][1] - 1;
 772		else
 773			len = strlen(&argv[0][1]);
 774		if (len > MAX_EVENT_NAME_LEN - 1) {
 775			trace_probe_log_err(1, BAD_MAXACT);
 776			goto parse_error;
 777		}
 778		memcpy(buf, &argv[0][1], len);
 779		buf[len] = '\0';
 780		ret = kstrtouint(buf, 0, &maxactive);
 781		if (ret || !maxactive) {
 782			trace_probe_log_err(1, BAD_MAXACT);
 783			goto parse_error;
 784		}
 785		/* kretprobes instances are iterated over via a list. The
 786		 * maximum should stay reasonable.
 787		 */
 788		if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
 789			trace_probe_log_err(1, MAXACT_TOO_BIG);
 790			goto parse_error;
 791		}
 792	}
 
 
 793
 794	/* try to parse an address. if that fails, try to read the
 795	 * input as a symbol. */
 796	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
 797		trace_probe_log_set_index(1);
 798		/* Check whether uprobe event specified */
 799		if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
 800			ret = -ECANCELED;
 801			goto error;
 802		}
 803		/* a symbol specified */
 804		symbol = kstrdup(argv[1], GFP_KERNEL);
 805		if (!symbol)
 806			return -ENOMEM;
 807
 808		tmp = strchr(symbol, '%');
 809		if (tmp) {
 810			if (!strcmp(tmp, "%return")) {
 811				*tmp = '\0';
 812				is_return = true;
 813			} else {
 814				trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
 815				goto parse_error;
 816			}
 817		}
 
 
 
 
 
 
 818
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 819		/* TODO: support .init module functions */
 820		ret = traceprobe_split_symbol_offset(symbol, &offset);
 821		if (ret || offset < 0 || offset > UINT_MAX) {
 822			trace_probe_log_err(0, BAD_PROBE_ADDR);
 823			goto parse_error;
 824		}
 825		if (is_return)
 826			flags |= TPARG_FL_RETURN;
 827		ret = kprobe_on_func_entry(NULL, symbol, offset);
 828		if (ret == 0)
 829			flags |= TPARG_FL_FENTRY;
 830		/* Defer the ENOENT case until register kprobe */
 831		if (ret == -EINVAL && is_return) {
 832			trace_probe_log_err(0, BAD_RETPROBE);
 833			goto parse_error;
 834		}
 835	}
 
 836
 837	trace_probe_log_set_index(0);
 838	if (event) {
 839		ret = traceprobe_parse_event_name(&event, &group, gbuf,
 840						  event - argv[0]);
 841		if (ret)
 842			goto parse_error;
 843	}
 844
 845	if (!event) {
 846		/* Make a new event name */
 847		if (symbol)
 848			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
 849				 is_return ? 'r' : 'p', symbol, offset);
 850		else
 851			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
 852				 is_return ? 'r' : 'p', addr);
 853		sanitize_event_name(buf);
 854		event = buf;
 855	}
 856
 857	/* setup a probe */
 858	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
 859			       argc - 2, is_return);
 860	if (IS_ERR(tk)) {
 861		ret = PTR_ERR(tk);
 862		/* This must return -ENOMEM, else there is a bug */
 863		WARN_ON_ONCE(ret != -ENOMEM);
 864		goto out;	/* We know tk is not allocated */
 865	}
 866	argc -= 2; argv += 2;
 867
 868	/* parse arguments */
 
 869	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 870		trace_probe_log_set_index(i + 2);
 871		ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], flags);
 872		if (ret)
 873			goto error;	/* This can be -ENOMEM */
 874	}
 875
 876	ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
 877	ret = traceprobe_set_print_fmt(&tk->tp, ptype);
 878	if (ret < 0)
 879		goto error;
 880
 881	ret = register_trace_kprobe(tk);
 882	if (ret) {
 883		trace_probe_log_set_index(1);
 884		if (ret == -EILSEQ)
 885			trace_probe_log_err(0, BAD_INSN_BNDRY);
 886		else if (ret == -ENOENT)
 887			trace_probe_log_err(0, BAD_PROBE_ADDR);
 888		else if (ret != -ENOMEM && ret != -EEXIST)
 889			trace_probe_log_err(0, FAIL_REG_PROBE);
 890		goto error;
 891	}
 892
 893out:
 894	trace_probe_log_clear();
 895	kfree(symbol);
 896	return ret;
 897
 898parse_error:
 899	ret = -EINVAL;
 900error:
 901	free_trace_kprobe(tk);
 902	goto out;
 903}
 904
 905static int trace_kprobe_create(const char *raw_command)
 906{
 907	return trace_probe_create(raw_command, __trace_kprobe_create);
 908}
 909
 910static int create_or_delete_trace_kprobe(const char *raw_command)
 911{
 912	int ret;
 913
 914	if (raw_command[0] == '-')
 915		return dyn_event_release(raw_command, &trace_kprobe_ops);
 916
 917	ret = trace_kprobe_create(raw_command);
 918	return ret == -ECANCELED ? -EINVAL : ret;
 919}
 920
 921static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
 922{
 923	return create_or_delete_trace_kprobe(cmd->seq.buffer);
 924}
 925
 926/**
 927 * kprobe_event_cmd_init - Initialize a kprobe event command object
 928 * @cmd: A pointer to the dynevent_cmd struct representing the new event
 929 * @buf: A pointer to the buffer used to build the command
 930 * @maxlen: The length of the buffer passed in @buf
 931 *
 932 * Initialize a synthetic event command object.  Use this before
 933 * calling any of the other kprobe_event functions.
 934 */
 935void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
 936{
 937	dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
 938			  trace_kprobe_run_command);
 939}
 940EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
 941
 942/**
 943 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
 944 * @cmd: A pointer to the dynevent_cmd struct representing the new event
 945 * @name: The name of the kprobe event
 946 * @loc: The location of the kprobe event
 947 * @kretprobe: Is this a return probe?
 948 * @args: Variable number of arg (pairs), one pair for each field
 949 *
 950 * NOTE: Users normally won't want to call this function directly, but
 951 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
 952 * adds a NULL to the end of the arg list.  If this function is used
 953 * directly, make sure the last arg in the variable arg list is NULL.
 954 *
 955 * Generate a kprobe event command to be executed by
 956 * kprobe_event_gen_cmd_end().  This function can be used to generate the
 957 * complete command or only the first part of it; in the latter case,
 958 * kprobe_event_add_fields() can be used to add more fields following this.
 959 *
 960 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
 961 * returns -EINVAL if @loc == NULL.
 962 *
 963 * Return: 0 if successful, error otherwise.
 964 */
 965int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
 966				 const char *name, const char *loc, ...)
 967{
 968	char buf[MAX_EVENT_NAME_LEN];
 969	struct dynevent_arg arg;
 970	va_list args;
 971	int ret;
 972
 973	if (cmd->type != DYNEVENT_TYPE_KPROBE)
 974		return -EINVAL;
 975
 976	if (!loc)
 977		return -EINVAL;
 978
 979	if (kretprobe)
 980		snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
 981	else
 982		snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
 983
 984	ret = dynevent_str_add(cmd, buf);
 985	if (ret)
 986		return ret;
 987
 988	dynevent_arg_init(&arg, 0);
 989	arg.str = loc;
 990	ret = dynevent_arg_add(cmd, &arg, NULL);
 991	if (ret)
 992		return ret;
 
 
 
 
 
 
 993
 994	va_start(args, loc);
 995	for (;;) {
 996		const char *field;
 
 
 997
 998		field = va_arg(args, const char *);
 999		if (!field)
1000			break;
 
 
 
1001
1002		if (++cmd->n_fields > MAX_TRACE_ARGS) {
 
 
1003			ret = -EINVAL;
1004			break;
1005		}
1006
1007		arg.str = field;
1008		ret = dynevent_arg_add(cmd, &arg, NULL);
1009		if (ret)
1010			break;
 
 
1011	}
1012	va_end(args);
1013
 
 
 
 
 
 
 
1014	return ret;
1015}
1016EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1017
1018/**
1019 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1020 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1021 * @args: Variable number of arg (pairs), one pair for each field
1022 *
1023 * NOTE: Users normally won't want to call this function directly, but
1024 * rather use the kprobe_event_add_fields() wrapper, which
1025 * automatically adds a NULL to the end of the arg list.  If this
1026 * function is used directly, make sure the last arg in the variable
1027 * arg list is NULL.
1028 *
1029 * Add probe fields to an existing kprobe command using a variable
1030 * list of args.  Fields are added in the same order they're listed.
1031 *
1032 * Return: 0 if successful, error otherwise.
1033 */
1034int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1035{
1036	struct dynevent_arg arg;
1037	va_list args;
1038	int ret = 0;
1039
1040	if (cmd->type != DYNEVENT_TYPE_KPROBE)
1041		return -EINVAL;
1042
1043	dynevent_arg_init(&arg, 0);
1044
1045	va_start(args, cmd);
1046	for (;;) {
1047		const char *field;
1048
1049		field = va_arg(args, const char *);
1050		if (!field)
1051			break;
1052
1053		if (++cmd->n_fields > MAX_TRACE_ARGS) {
1054			ret = -EINVAL;
1055			break;
1056		}
1057
1058		arg.str = field;
1059		ret = dynevent_arg_add(cmd, &arg, NULL);
1060		if (ret)
1061			break;
 
 
1062	}
1063	va_end(args);
1064
1065	return ret;
1066}
1067EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1068
1069/**
1070 * kprobe_event_delete - Delete a kprobe event
1071 * @name: The name of the kprobe event to delete
1072 *
1073 * Delete a kprobe event with the give @name from kernel code rather
1074 * than directly from the command line.
1075 *
1076 * Return: 0 if successful, error otherwise.
1077 */
1078int kprobe_event_delete(const char *name)
1079{
1080	char buf[MAX_EVENT_NAME_LEN];
1081
1082	snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1083
1084	return create_or_delete_trace_kprobe(buf);
1085}
1086EXPORT_SYMBOL_GPL(kprobe_event_delete);
1087
1088static int trace_kprobe_release(struct dyn_event *ev)
1089{
1090	struct trace_kprobe *tk = to_trace_kprobe(ev);
1091	int ret = unregister_trace_kprobe(tk);
1092
1093	if (!ret)
1094		free_trace_kprobe(tk);
1095	return ret;
1096}
1097
1098static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1099{
1100	struct trace_kprobe *tk = to_trace_kprobe(ev);
1101	int i;
1102
1103	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1104	if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1105		seq_printf(m, "%d", tk->rp.maxactive);
1106	seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1107				trace_probe_name(&tk->tp));
1108
1109	if (!tk->symbol)
1110		seq_printf(m, " 0x%p", tk->rp.kp.addr);
1111	else if (tk->rp.kp.offset)
1112		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1113			   tk->rp.kp.offset);
1114	else
1115		seq_printf(m, " %s", trace_kprobe_symbol(tk));
1116
1117	for (i = 0; i < tk->tp.nr_args; i++)
1118		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1119	seq_putc(m, '\n');
1120
1121	return 0;
1122}
1123
1124static int probes_seq_show(struct seq_file *m, void *v)
1125{
1126	struct dyn_event *ev = v;
1127
1128	if (!is_trace_kprobe(ev))
1129		return 0;
1130
1131	return trace_kprobe_show(m, ev);
1132}
1133
1134static const struct seq_operations probes_seq_op = {
1135	.start  = dyn_event_seq_start,
1136	.next   = dyn_event_seq_next,
1137	.stop   = dyn_event_seq_stop,
1138	.show   = probes_seq_show
1139};
1140
1141static int probes_open(struct inode *inode, struct file *file)
1142{
1143	int ret;
 
 
1144
1145	ret = security_locked_down(LOCKDOWN_TRACEFS);
1146	if (ret)
1147		return ret;
1148
1149	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1150		ret = dyn_events_release_all(&trace_kprobe_ops);
1151		if (ret < 0)
1152			return ret;
1153	}
1154
1155	return seq_open(file, &probes_seq_op);
 
 
 
 
 
 
 
 
1156}
1157
 
 
1158static ssize_t probes_write(struct file *file, const char __user *buffer,
1159			    size_t count, loff_t *ppos)
1160{
1161	return trace_parse_run_command(file, buffer, count, ppos,
1162				       create_or_delete_trace_kprobe);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1163}
1164
1165static const struct file_operations kprobe_events_ops = {
1166	.owner          = THIS_MODULE,
1167	.open           = probes_open,
1168	.read           = seq_read,
1169	.llseek         = seq_lseek,
1170	.release        = seq_release,
1171	.write		= probes_write,
1172};
1173
1174/* Probes profiling interfaces */
1175static int probes_profile_seq_show(struct seq_file *m, void *v)
1176{
1177	struct dyn_event *ev = v;
1178	struct trace_kprobe *tk;
1179	unsigned long nmissed;
1180
1181	if (!is_trace_kprobe(ev))
1182		return 0;
1183
1184	tk = to_trace_kprobe(ev);
1185	nmissed = trace_kprobe_is_return(tk) ?
1186		tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1187	seq_printf(m, "  %-44s %15lu %15lu\n",
1188		   trace_probe_name(&tk->tp),
1189		   trace_kprobe_nhit(tk),
1190		   nmissed);
1191
1192	return 0;
1193}
1194
1195static const struct seq_operations profile_seq_op = {
1196	.start  = dyn_event_seq_start,
1197	.next   = dyn_event_seq_next,
1198	.stop   = dyn_event_seq_stop,
1199	.show   = probes_profile_seq_show
1200};
1201
1202static int profile_open(struct inode *inode, struct file *file)
1203{
1204	int ret;
1205
1206	ret = security_locked_down(LOCKDOWN_TRACEFS);
1207	if (ret)
1208		return ret;
1209
1210	return seq_open(file, &profile_seq_op);
1211}
1212
1213static const struct file_operations kprobe_profile_ops = {
1214	.owner          = THIS_MODULE,
1215	.open           = profile_open,
1216	.read           = seq_read,
1217	.llseek         = seq_lseek,
1218	.release        = seq_release,
1219};
1220
1221/* Kprobe specific fetch functions */
1222
1223/* Return the length of string -- including null terminal byte */
1224static nokprobe_inline int
1225fetch_store_strlen_user(unsigned long addr)
1226{
1227	return kern_fetch_store_strlen_user(addr);
1228}
1229
1230/* Return the length of string -- including null terminal byte */
1231static nokprobe_inline int
1232fetch_store_strlen(unsigned long addr)
1233{
1234	return kern_fetch_store_strlen(addr);
1235}
1236
1237/*
1238 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1239 * with max length and relative data location.
1240 */
1241static nokprobe_inline int
1242fetch_store_string_user(unsigned long addr, void *dest, void *base)
1243{
1244	return kern_fetch_store_string_user(addr, dest, base);
1245}
1246
1247/*
1248 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1249 * length and relative data location.
1250 */
1251static nokprobe_inline int
1252fetch_store_string(unsigned long addr, void *dest, void *base)
1253{
1254	return kern_fetch_store_string(addr, dest, base);
1255}
1256
1257static nokprobe_inline int
1258probe_mem_read_user(void *dest, void *src, size_t size)
1259{
1260	const void __user *uaddr =  (__force const void __user *)src;
1261
1262	return copy_from_user_nofault(dest, uaddr, size);
1263}
1264
1265static nokprobe_inline int
1266probe_mem_read(void *dest, void *src, size_t size)
 
 
1267{
1268#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1269	if ((unsigned long)src < TASK_SIZE)
1270		return probe_mem_read_user(dest, src, size);
1271#endif
1272	return copy_from_kernel_nofault(dest, src, size);
1273}
1274
1275/* Note that we don't verify it, since the code does not come from user space */
1276static int
1277process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
1278		   void *base)
1279{
1280	struct pt_regs *regs = rec;
1281	unsigned long val;
1282
1283retry:
1284	/* 1st stage: get value from context */
1285	switch (code->op) {
1286	case FETCH_OP_REG:
1287		val = regs_get_register(regs, code->param);
1288		break;
1289	case FETCH_OP_STACK:
1290		val = regs_get_kernel_stack_nth(regs, code->param);
1291		break;
1292	case FETCH_OP_STACKP:
1293		val = kernel_stack_pointer(regs);
1294		break;
1295	case FETCH_OP_RETVAL:
1296		val = regs_return_value(regs);
1297		break;
1298	case FETCH_OP_IMM:
1299		val = code->immediate;
1300		break;
1301	case FETCH_OP_COMM:
1302		val = (unsigned long)current->comm;
1303		break;
1304	case FETCH_OP_DATA:
1305		val = (unsigned long)code->data;
1306		break;
1307#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1308	case FETCH_OP_ARG:
1309		val = regs_get_kernel_argument(regs, code->param);
1310		break;
1311#endif
1312	case FETCH_NOP_SYMBOL:	/* Ignore a place holder */
1313		code++;
1314		goto retry;
1315	default:
1316		return -EILSEQ;
1317	}
1318	code++;
1319
1320	return process_fetch_insn_bottom(code, val, dest, base);
1321}
1322NOKPROBE_SYMBOL(process_fetch_insn)
1323
1324/* Kprobe handler */
1325static nokprobe_inline void
1326__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1327		    struct trace_event_file *trace_file)
1328{
 
1329	struct kprobe_trace_entry_head *entry;
1330	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1331	struct trace_event_buffer fbuffer;
1332	int dsize;
1333
1334	WARN_ON(call != trace_file->event_call);
1335
1336	if (trace_trigger_soft_disabled(trace_file))
1337		return;
1338
1339	dsize = __get_data_size(&tk->tp, regs);
1340
1341	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1342					   sizeof(*entry) + tk->tp.size + dsize);
1343	if (!entry)
 
 
 
1344		return;
1345
1346	fbuffer.regs = regs;
1347	entry->ip = (unsigned long)tk->rp.kp.addr;
1348	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1349
1350	trace_event_buffer_commit(&fbuffer);
1351}
1352
1353static void
1354kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1355{
1356	struct event_file_link *link;
1357
1358	trace_probe_for_each_link_rcu(link, &tk->tp)
1359		__kprobe_trace_func(tk, regs, link->file);
1360}
1361NOKPROBE_SYMBOL(kprobe_trace_func);
1362
1363/* Kretprobe handler */
1364static nokprobe_inline void
1365__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1366		       struct pt_regs *regs,
1367		       struct trace_event_file *trace_file)
1368{
 
1369	struct kretprobe_trace_entry_head *entry;
1370	struct trace_event_buffer fbuffer;
1371	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1372	int dsize;
1373
1374	WARN_ON(call != trace_file->event_call);
1375
1376	if (trace_trigger_soft_disabled(trace_file))
1377		return;
1378
1379	dsize = __get_data_size(&tk->tp, regs);
1380
1381	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1382					   sizeof(*entry) + tk->tp.size + dsize);
1383	if (!entry)
 
1384		return;
1385
1386	fbuffer.regs = regs;
1387	entry->func = (unsigned long)tk->rp.kp.addr;
1388	entry->ret_ip = get_kretprobe_retaddr(ri);
1389	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1390
1391	trace_event_buffer_commit(&fbuffer);
 
 
1392}
1393
1394static void
1395kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1396		     struct pt_regs *regs)
1397{
1398	struct event_file_link *link;
1399
1400	trace_probe_for_each_link_rcu(link, &tk->tp)
1401		__kretprobe_trace_func(tk, ri, regs, link->file);
1402}
1403NOKPROBE_SYMBOL(kretprobe_trace_func);
1404
1405/* Event entry printers */
1406static enum print_line_t
1407print_kprobe_event(struct trace_iterator *iter, int flags,
1408		   struct trace_event *event)
1409{
1410	struct kprobe_trace_entry_head *field;
1411	struct trace_seq *s = &iter->seq;
1412	struct trace_probe *tp;
 
 
1413
1414	field = (struct kprobe_trace_entry_head *)iter->ent;
1415	tp = trace_probe_primary_from_call(
1416		container_of(event, struct trace_event_call, event));
1417	if (WARN_ON_ONCE(!tp))
1418		goto out;
1419
1420	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
 
1421
1422	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1423		goto out;
1424
1425	trace_seq_putc(s, ')');
 
1426
1427	if (print_probe_args(s, tp->args, tp->nr_args,
1428			     (u8 *)&field[1], field) < 0)
1429		goto out;
 
 
1430
1431	trace_seq_putc(s, '\n');
1432 out:
1433	return trace_handle_return(s);
 
 
 
1434}
1435
1436static enum print_line_t
1437print_kretprobe_event(struct trace_iterator *iter, int flags,
1438		      struct trace_event *event)
1439{
1440	struct kretprobe_trace_entry_head *field;
1441	struct trace_seq *s = &iter->seq;
1442	struct trace_probe *tp;
 
 
1443
1444	field = (struct kretprobe_trace_entry_head *)iter->ent;
1445	tp = trace_probe_primary_from_call(
1446		container_of(event, struct trace_event_call, event));
1447	if (WARN_ON_ONCE(!tp))
1448		goto out;
1449
1450	trace_seq_printf(s, "%s: (", trace_probe_name(tp));
 
1451
1452	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1453		goto out;
1454
1455	trace_seq_puts(s, " <- ");
 
1456
1457	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1458		goto out;
1459
1460	trace_seq_putc(s, ')');
 
1461
1462	if (print_probe_args(s, tp->args, tp->nr_args,
1463			     (u8 *)&field[1], field) < 0)
1464		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1465
1466	trace_seq_putc(s, '\n');
 
 
 
 
1467
1468 out:
1469	return trace_handle_return(s);
 
 
 
 
 
 
 
 
 
 
 
1470}
1471
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1472
1473static int kprobe_event_define_fields(struct trace_event_call *event_call)
1474{
1475	int ret;
1476	struct kprobe_trace_entry_head field;
1477	struct trace_probe *tp;
1478
1479	tp = trace_probe_primary_from_call(event_call);
1480	if (WARN_ON_ONCE(!tp))
1481		return -ENOENT;
1482
1483	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1484
1485	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
 
 
 
1486}
1487
1488static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1489{
1490	int ret;
1491	struct kretprobe_trace_entry_head field;
1492	struct trace_probe *tp;
1493
1494	tp = trace_probe_primary_from_call(event_call);
1495	if (WARN_ON_ONCE(!tp))
1496		return -ENOENT;
 
 
1497
1498	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1499	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
 
1500
1501	return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1502}
1503
1504#ifdef CONFIG_PERF_EVENTS
1505
1506/* Kprobe profile handler */
1507static int
1508kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1509{
1510	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
 
1511	struct kprobe_trace_entry_head *entry;
1512	struct hlist_head *head;
1513	int size, __size, dsize;
1514	int rctx;
1515
1516	if (bpf_prog_array_valid(call)) {
1517		unsigned long orig_ip = instruction_pointer(regs);
1518		int ret;
1519
1520		ret = trace_call_bpf(call, regs);
1521
1522		/*
1523		 * We need to check and see if we modified the pc of the
1524		 * pt_regs, and if so return 1 so that we don't do the
1525		 * single stepping.
1526		 */
1527		if (orig_ip != instruction_pointer(regs))
1528			return 1;
1529		if (!ret)
1530			return 0;
1531	}
1532
1533	head = this_cpu_ptr(call->perf_events);
1534	if (hlist_empty(head))
1535		return 0;
1536
1537	dsize = __get_data_size(&tk->tp, regs);
1538	__size = sizeof(*entry) + tk->tp.size + dsize;
1539	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1540	size -= sizeof(u32);
 
 
 
1541
1542	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1543	if (!entry)
1544		return 0;
1545
1546	entry->ip = (unsigned long)tk->rp.kp.addr;
1547	memset(&entry[1], 0, dsize);
1548	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1549	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1550			      head, NULL);
1551	return 0;
1552}
1553NOKPROBE_SYMBOL(kprobe_perf_func);
1554
1555/* Kretprobe profile handler */
1556static void
1557kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1558		    struct pt_regs *regs)
1559{
1560	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
 
1561	struct kretprobe_trace_entry_head *entry;
1562	struct hlist_head *head;
1563	int size, __size, dsize;
1564	int rctx;
1565
1566	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1567		return;
1568
1569	head = this_cpu_ptr(call->perf_events);
1570	if (hlist_empty(head))
1571		return;
1572
1573	dsize = __get_data_size(&tk->tp, regs);
1574	__size = sizeof(*entry) + tk->tp.size + dsize;
1575	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1576	size -= sizeof(u32);
 
 
 
1577
1578	entry = perf_trace_buf_alloc(size, NULL, &rctx);
1579	if (!entry)
1580		return;
1581
1582	entry->func = (unsigned long)tk->rp.kp.addr;
1583	entry->ret_ip = get_kretprobe_retaddr(ri);
1584	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1585	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1586			      head, NULL);
1587}
1588NOKPROBE_SYMBOL(kretprobe_perf_func);
1589
1590int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1591			const char **symbol, u64 *probe_offset,
1592			u64 *probe_addr, bool perf_type_tracepoint)
1593{
1594	const char *pevent = trace_event_name(event->tp_event);
1595	const char *group = event->tp_event->class->system;
1596	struct trace_kprobe *tk;
1597
1598	if (perf_type_tracepoint)
1599		tk = find_trace_kprobe(pevent, group);
1600	else
1601		tk = trace_kprobe_primary_from_call(event->tp_event);
1602	if (!tk)
1603		return -EINVAL;
1604
1605	*fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1606					      : BPF_FD_TYPE_KPROBE;
1607	if (tk->symbol) {
1608		*symbol = tk->symbol;
1609		*probe_offset = tk->rp.kp.offset;
1610		*probe_addr = 0;
1611	} else {
1612		*symbol = NULL;
1613		*probe_offset = 0;
1614		*probe_addr = (unsigned long)tk->rp.kp.addr;
1615	}
1616	return 0;
1617}
1618#endif	/* CONFIG_PERF_EVENTS */
1619
1620/*
1621 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1622 *
1623 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1624 * lockless, but we can't race with this __init function.
1625 */
1626static int kprobe_register(struct trace_event_call *event,
1627			   enum trace_reg type, void *data)
1628{
1629	struct trace_event_file *file = data;
1630
1631	switch (type) {
1632	case TRACE_REG_REGISTER:
1633		return enable_trace_kprobe(event, file);
1634	case TRACE_REG_UNREGISTER:
1635		return disable_trace_kprobe(event, file);
 
1636
1637#ifdef CONFIG_PERF_EVENTS
1638	case TRACE_REG_PERF_REGISTER:
1639		return enable_trace_kprobe(event, NULL);
1640	case TRACE_REG_PERF_UNREGISTER:
1641		return disable_trace_kprobe(event, NULL);
1642	case TRACE_REG_PERF_OPEN:
1643	case TRACE_REG_PERF_CLOSE:
1644	case TRACE_REG_PERF_ADD:
1645	case TRACE_REG_PERF_DEL:
1646		return 0;
1647#endif
1648	}
1649	return 0;
1650}
1651
1652static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 
1653{
1654	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1655	int ret = 0;
1656
1657	raw_cpu_inc(*tk->nhit);
1658
1659	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1660		kprobe_trace_func(tk, regs);
1661#ifdef CONFIG_PERF_EVENTS
1662	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1663		ret = kprobe_perf_func(tk, regs);
1664#endif
1665	return ret;
1666}
1667NOKPROBE_SYMBOL(kprobe_dispatcher);
1668
1669static int
1670kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1671{
1672	struct kretprobe *rp = get_kretprobe(ri);
1673	struct trace_kprobe *tk;
1674
1675	/*
1676	 * There is a small chance that get_kretprobe(ri) returns NULL when
1677	 * the kretprobe is unregister on another CPU between kretprobe's
1678	 * trampoline_handler and this function.
1679	 */
1680	if (unlikely(!rp))
1681		return 0;
1682
1683	tk = container_of(rp, struct trace_kprobe, rp);
1684	raw_cpu_inc(*tk->nhit);
1685
1686	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1687		kretprobe_trace_func(tk, ri, regs);
1688#ifdef CONFIG_PERF_EVENTS
1689	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1690		kretprobe_perf_func(tk, ri, regs);
1691#endif
1692	return 0;	/* We don't tweak kernel, so just return 0 */
1693}
1694NOKPROBE_SYMBOL(kretprobe_dispatcher);
1695
1696static struct trace_event_functions kretprobe_funcs = {
1697	.trace		= print_kretprobe_event
1698};
1699
1700static struct trace_event_functions kprobe_funcs = {
1701	.trace		= print_kprobe_event
1702};
1703
1704static struct trace_event_fields kretprobe_fields_array[] = {
1705	{ .type = TRACE_FUNCTION_TYPE,
1706	  .define_fields = kretprobe_event_define_fields },
1707	{}
1708};
1709
1710static struct trace_event_fields kprobe_fields_array[] = {
1711	{ .type = TRACE_FUNCTION_TYPE,
1712	  .define_fields = kprobe_event_define_fields },
1713	{}
1714};
1715
1716static inline void init_trace_event_call(struct trace_kprobe *tk)
1717{
1718	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
 
1719
1720	if (trace_kprobe_is_return(tk)) {
 
 
1721		call->event.funcs = &kretprobe_funcs;
1722		call->class->fields_array = kretprobe_fields_array;
1723	} else {
1724		call->event.funcs = &kprobe_funcs;
1725		call->class->fields_array = kprobe_fields_array;
1726	}
1727
1728	call->flags = TRACE_EVENT_FL_KPROBE;
1729	call->class->reg = kprobe_register;
1730}
1731
1732static int register_kprobe_event(struct trace_kprobe *tk)
1733{
1734	init_trace_event_call(tk);
1735
1736	return trace_probe_register_event_call(&tk->tp);
1737}
1738
1739static int unregister_kprobe_event(struct trace_kprobe *tk)
1740{
1741	return trace_probe_unregister_event_call(&tk->tp);
1742}
1743
1744#ifdef CONFIG_PERF_EVENTS
1745/* create a trace_kprobe, but don't add it to global lists */
1746struct trace_event_call *
1747create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1748			  bool is_return)
1749{
1750	enum probe_print_type ptype;
1751	struct trace_kprobe *tk;
1752	int ret;
1753	char *event;
1754
1755	/*
1756	 * local trace_kprobes are not added to dyn_event, so they are never
1757	 * searched in find_trace_kprobe(). Therefore, there is no concern of
1758	 * duplicated name here.
1759	 */
1760	event = func ? func : "DUMMY_EVENT";
1761
1762	tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1763				offs, 0 /* maxactive */, 0 /* nargs */,
1764				is_return);
1765
1766	if (IS_ERR(tk)) {
1767		pr_info("Failed to allocate trace_probe.(%d)\n",
1768			(int)PTR_ERR(tk));
1769		return ERR_CAST(tk);
1770	}
1771
1772	init_trace_event_call(tk);
1773
1774	ptype = trace_kprobe_is_return(tk) ?
1775		PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1776	if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
1777		ret = -ENOMEM;
1778		goto error;
1779	}
1780
1781	ret = __register_trace_kprobe(tk);
1782	if (ret < 0)
1783		goto error;
1784
1785	return trace_probe_event_call(&tk->tp);
1786error:
1787	free_trace_kprobe(tk);
1788	return ERR_PTR(ret);
1789}
1790
1791void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1792{
1793	struct trace_kprobe *tk;
1794
1795	tk = trace_kprobe_primary_from_call(event_call);
1796	if (unlikely(!tk))
1797		return;
1798
1799	if (trace_probe_is_enabled(&tk->tp)) {
1800		WARN_ON(1);
1801		return;
1802	}
1803
1804	__unregister_trace_kprobe(tk);
1805
1806	free_trace_kprobe(tk);
1807}
1808#endif /* CONFIG_PERF_EVENTS */
1809
1810static __init void enable_boot_kprobe_events(void)
1811{
1812	struct trace_array *tr = top_trace_array();
1813	struct trace_event_file *file;
1814	struct trace_kprobe *tk;
1815	struct dyn_event *pos;
1816
1817	mutex_lock(&event_mutex);
1818	for_each_trace_kprobe(tk, pos) {
1819		list_for_each_entry(file, &tr->events, list)
1820			if (file->event_call == trace_probe_event_call(&tk->tp))
1821				trace_event_enable_disable(file, 1, 0);
1822	}
1823	mutex_unlock(&event_mutex);
1824}
1825
1826static __init void setup_boot_kprobe_events(void)
1827{
1828	char *p, *cmd = kprobe_boot_events_buf;
1829	int ret;
1830
1831	strreplace(kprobe_boot_events_buf, ',', ' ');
1832
1833	while (cmd && *cmd != '\0') {
1834		p = strchr(cmd, ';');
1835		if (p)
1836			*p++ = '\0';
1837
1838		ret = create_or_delete_trace_kprobe(cmd);
1839		if (ret)
1840			pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1841
1842		cmd = p;
1843	}
1844
1845	enable_boot_kprobe_events();
1846}
1847
1848/*
1849 * Register dynevent at core_initcall. This allows kernel to setup kprobe
1850 * events in postcore_initcall without tracefs.
1851 */
1852static __init int init_kprobe_trace_early(void)
1853{
1854	int ret;
1855
1856	ret = dyn_event_register(&trace_kprobe_ops);
1857	if (ret)
1858		return ret;
1859
1860	if (register_module_notifier(&trace_kprobe_module_nb))
1861		return -EINVAL;
1862
1863	return 0;
1864}
1865core_initcall(init_kprobe_trace_early);
1866
1867/* Make a tracefs interface for controlling probe points */
1868static __init int init_kprobe_trace(void)
1869{
1870	int ret;
1871
1872	ret = tracing_init_dentry();
1873	if (ret)
1874		return 0;
1875
 
 
 
1876	/* Event list interface */
1877	trace_create_file("kprobe_events", TRACE_MODE_WRITE,
1878			  NULL, NULL, &kprobe_events_ops);
 
1879
1880	/* Profile interface */
1881	trace_create_file("kprobe_profile", TRACE_MODE_READ,
1882			  NULL, NULL, &kprobe_profile_ops);
1883
1884	setup_boot_kprobe_events();
1885
 
 
 
1886	return 0;
1887}
1888fs_initcall(init_kprobe_trace);
1889
1890
1891#ifdef CONFIG_FTRACE_STARTUP_TEST
1892static __init struct trace_event_file *
1893find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1894{
1895	struct trace_event_file *file;
1896
1897	list_for_each_entry(file, &tr->events, list)
1898		if (file->event_call == trace_probe_event_call(&tk->tp))
1899			return file;
1900
1901	return NULL;
1902}
1903
1904/*
1905 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1906 * stage, we can do this lockless.
1907 */
 
 
 
 
 
 
1908static __init int kprobe_trace_self_tests_init(void)
1909{
1910	int ret, warn = 0;
1911	int (*target)(int, int, int, int, int, int);
1912	struct trace_kprobe *tk;
1913	struct trace_event_file *file;
1914
1915	if (tracing_is_disabled())
1916		return -ENODEV;
1917
1918	if (tracing_selftest_disabled)
1919		return 0;
1920
1921	target = kprobe_trace_selftest_target;
1922
1923	pr_info("Testing kprobe tracing: ");
1924
1925	ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
 
1926	if (WARN_ON_ONCE(ret)) {
1927		pr_warn("error on probing function entry.\n");
1928		warn++;
1929	} else {
1930		/* Enable trace point */
1931		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1932		if (WARN_ON_ONCE(tk == NULL)) {
1933			pr_warn("error on getting new probe.\n");
1934			warn++;
1935		} else {
1936			file = find_trace_probe_file(tk, top_trace_array());
1937			if (WARN_ON_ONCE(file == NULL)) {
1938				pr_warn("error on getting probe file.\n");
1939				warn++;
1940			} else
1941				enable_trace_kprobe(
1942					trace_probe_event_call(&tk->tp), file);
1943		}
1944	}
1945
1946	ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
 
1947	if (WARN_ON_ONCE(ret)) {
1948		pr_warn("error on probing function return.\n");
1949		warn++;
1950	} else {
1951		/* Enable trace point */
1952		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1953		if (WARN_ON_ONCE(tk == NULL)) {
1954			pr_warn("error on getting 2nd new probe.\n");
1955			warn++;
1956		} else {
1957			file = find_trace_probe_file(tk, top_trace_array());
1958			if (WARN_ON_ONCE(file == NULL)) {
1959				pr_warn("error on getting probe file.\n");
1960				warn++;
1961			} else
1962				enable_trace_kprobe(
1963					trace_probe_event_call(&tk->tp), file);
1964		}
1965	}
1966
1967	if (warn)
1968		goto end;
1969
1970	ret = target(1, 2, 3, 4, 5, 6);
1971
1972	/*
1973	 * Not expecting an error here, the check is only to prevent the
1974	 * optimizer from removing the call to target() as otherwise there
1975	 * are no side-effects and the call is never performed.
1976	 */
1977	if (ret != 21)
1978		warn++;
1979
1980	/* Disable trace points before removing it */
1981	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1982	if (WARN_ON_ONCE(tk == NULL)) {
1983		pr_warn("error on getting test probe.\n");
1984		warn++;
1985	} else {
1986		if (trace_kprobe_nhit(tk) != 1) {
1987			pr_warn("incorrect number of testprobe hits\n");
1988			warn++;
1989		}
1990
1991		file = find_trace_probe_file(tk, top_trace_array());
1992		if (WARN_ON_ONCE(file == NULL)) {
1993			pr_warn("error on getting probe file.\n");
1994			warn++;
1995		} else
1996			disable_trace_kprobe(
1997				trace_probe_event_call(&tk->tp), file);
1998	}
1999
2000	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2001	if (WARN_ON_ONCE(tk == NULL)) {
2002		pr_warn("error on getting 2nd test probe.\n");
2003		warn++;
2004	} else {
2005		if (trace_kprobe_nhit(tk) != 1) {
2006			pr_warn("incorrect number of testprobe2 hits\n");
2007			warn++;
2008		}
2009
2010		file = find_trace_probe_file(tk, top_trace_array());
2011		if (WARN_ON_ONCE(file == NULL)) {
2012			pr_warn("error on getting probe file.\n");
2013			warn++;
2014		} else
2015			disable_trace_kprobe(
2016				trace_probe_event_call(&tk->tp), file);
2017	}
2018
2019	ret = create_or_delete_trace_kprobe("-:testprobe");
2020	if (WARN_ON_ONCE(ret)) {
2021		pr_warn("error on deleting a probe.\n");
2022		warn++;
2023	}
2024
2025	ret = create_or_delete_trace_kprobe("-:testprobe2");
2026	if (WARN_ON_ONCE(ret)) {
2027		pr_warn("error on deleting a probe.\n");
2028		warn++;
2029	}
2030
2031end:
2032	ret = dyn_events_release_all(&trace_kprobe_ops);
2033	if (WARN_ON_ONCE(ret)) {
2034		pr_warn("error on cleaning up probes.\n");
2035		warn++;
2036	}
2037	/*
2038	 * Wait for the optimizer work to finish. Otherwise it might fiddle
2039	 * with probes in already freed __init text.
2040	 */
2041	wait_for_kprobe_optimizer();
2042	if (warn)
2043		pr_cont("NG: Some tests are failed. Please check them.\n");
2044	else
2045		pr_cont("OK\n");
2046	return 0;
2047}
2048
2049late_initcall(kprobe_trace_self_tests_init);
2050
2051#endif
v3.1
 
   1/*
   2 * Kprobes-based tracing events
   3 *
   4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
 
  19
 
 
  20#include <linux/module.h>
  21#include <linux/uaccess.h>
  22#include <linux/kprobes.h>
  23#include <linux/seq_file.h>
  24#include <linux/slab.h>
  25#include <linux/smp.h>
  26#include <linux/debugfs.h>
  27#include <linux/types.h>
  28#include <linux/string.h>
  29#include <linux/ctype.h>
  30#include <linux/ptrace.h>
  31#include <linux/perf_event.h>
  32#include <linux/stringify.h>
  33#include <linux/limits.h>
  34#include <asm/bitsperlong.h>
  35
  36#include "trace.h"
  37#include "trace_output.h"
  38
  39#define MAX_TRACE_ARGS 128
  40#define MAX_ARGSTR_LEN 63
  41#define MAX_EVENT_NAME_LEN 64
  42#define MAX_STRING_SIZE PATH_MAX
  43#define KPROBE_EVENT_SYSTEM "kprobes"
 
  44
  45/* Reserved field names */
  46#define FIELD_STRING_IP "__probe_ip"
  47#define FIELD_STRING_RETIP "__probe_ret_ip"
  48#define FIELD_STRING_FUNC "__probe_func"
  49
  50const char *reserved_field_names[] = {
  51	"common_type",
  52	"common_flags",
  53	"common_preempt_count",
  54	"common_pid",
  55	"common_tgid",
  56	FIELD_STRING_IP,
  57	FIELD_STRING_RETIP,
  58	FIELD_STRING_FUNC,
  59};
  60
  61/* Printing function type */
  62typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *,
  63				 void *);
  64#define PRINT_TYPE_FUNC_NAME(type)	print_type_##type
  65#define PRINT_TYPE_FMT_NAME(type)	print_type_format_##type
  66
  67/* Printing  in basic type function template */
  68#define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast)			\
  69static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s,	\
  70						const char *name,	\
  71						void *data, void *ent)\
  72{									\
  73	return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\
  74}									\
  75static const char PRINT_TYPE_FMT_NAME(type)[] = fmt;
  76
  77DEFINE_BASIC_PRINT_TYPE_FUNC(u8, "%x", unsigned int)
  78DEFINE_BASIC_PRINT_TYPE_FUNC(u16, "%x", unsigned int)
  79DEFINE_BASIC_PRINT_TYPE_FUNC(u32, "%lx", unsigned long)
  80DEFINE_BASIC_PRINT_TYPE_FUNC(u64, "%llx", unsigned long long)
  81DEFINE_BASIC_PRINT_TYPE_FUNC(s8, "%d", int)
  82DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int)
  83DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long)
  84DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long)
  85
  86/* data_rloc: data relative location, compatible with u32 */
  87#define make_data_rloc(len, roffs)	\
  88	(((u32)(len) << 16) | ((u32)(roffs) & 0xffff))
  89#define get_rloc_len(dl)	((u32)(dl) >> 16)
  90#define get_rloc_offs(dl)	((u32)(dl) & 0xffff)
  91
  92static inline void *get_rloc_data(u32 *dl)
  93{
  94	return (u8 *)dl + get_rloc_offs(*dl);
 
 
 
  95}
 
  96
  97/* For data_loc conversion */
  98static inline void *get_loc_data(u32 *dl, void *ent)
  99{
 100	return (u8 *)ent + get_rloc_offs(*dl);
 101}
 
 
 
 
 
 
 
 
 
 102
 103/*
 104 * Convert data_rloc to data_loc:
 105 *  data_rloc stores the offset from data_rloc itself, but data_loc
 106 *  stores the offset from event entry.
 107 */
 108#define convert_rloc_to_loc(dl, offs)	((u32)(dl) + (offs))
 
 
 
 
 
 
 109
 110/* For defining macros, define string/string_size types */
 111typedef u32 string;
 112typedef u32 string_size;
 113
 114/* Print type function for string type */
 115static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s,
 116						  const char *name,
 117						  void *data, void *ent)
 118{
 119	int len = *(u32 *)data >> 16;
 120
 121	if (!len)
 122		return trace_seq_printf(s, " %s=(fault)", name);
 123	else
 124		return trace_seq_printf(s, " %s=\"%s\"", name,
 125					(const char *)get_loc_data(data, ent));
 126}
 127static const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\"";
 128
 129/* Data fetch function type */
 130typedef	void (*fetch_func_t)(struct pt_regs *, void *, void *);
 131
 132struct fetch_param {
 133	fetch_func_t	fn;
 134	void *data;
 135};
 136
 137static __kprobes void call_fetch(struct fetch_param *fprm,
 138				 struct pt_regs *regs, void *dest)
 139{
 140	return fprm->fn(regs, fprm->data, dest);
 141}
 142
 143#define FETCH_FUNC_NAME(method, type)	fetch_##method##_##type
 144/*
 145 * Define macro for basic types - we don't need to define s* types, because
 146 * we have to care only about bitwidth at recording time.
 147 */
 148#define DEFINE_BASIC_FETCH_FUNCS(method) \
 149DEFINE_FETCH_##method(u8)		\
 150DEFINE_FETCH_##method(u16)		\
 151DEFINE_FETCH_##method(u32)		\
 152DEFINE_FETCH_##method(u64)
 153
 154#define CHECK_FETCH_FUNCS(method, fn)			\
 155	(((FETCH_FUNC_NAME(method, u8) == fn) ||	\
 156	  (FETCH_FUNC_NAME(method, u16) == fn) ||	\
 157	  (FETCH_FUNC_NAME(method, u32) == fn) ||	\
 158	  (FETCH_FUNC_NAME(method, u64) == fn) ||	\
 159	  (FETCH_FUNC_NAME(method, string) == fn) ||	\
 160	  (FETCH_FUNC_NAME(method, string_size) == fn)) \
 161	 && (fn != NULL))
 162
 163/* Data fetch function templates */
 164#define DEFINE_FETCH_reg(type)						\
 165static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs,	\
 166					void *offset, void *dest)	\
 167{									\
 168	*(type *)dest = (type)regs_get_register(regs,			\
 169				(unsigned int)((unsigned long)offset));	\
 170}
 171DEFINE_BASIC_FETCH_FUNCS(reg)
 172/* No string on the register */
 173#define fetch_reg_string NULL
 174#define fetch_reg_string_size NULL
 175
 176#define DEFINE_FETCH_stack(type)					\
 177static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\
 178					  void *offset, void *dest)	\
 179{									\
 180	*(type *)dest = (type)regs_get_kernel_stack_nth(regs,		\
 181				(unsigned int)((unsigned long)offset));	\
 182}
 183DEFINE_BASIC_FETCH_FUNCS(stack)
 184/* No string on the stack entry */
 185#define fetch_stack_string NULL
 186#define fetch_stack_string_size NULL
 187
 188#define DEFINE_FETCH_retval(type)					\
 189static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\
 190					  void *dummy, void *dest)	\
 191{									\
 192	*(type *)dest = (type)regs_return_value(regs);			\
 193}
 194DEFINE_BASIC_FETCH_FUNCS(retval)
 195/* No string on the retval */
 196#define fetch_retval_string NULL
 197#define fetch_retval_string_size NULL
 198
 199#define DEFINE_FETCH_memory(type)					\
 200static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\
 201					  void *addr, void *dest)	\
 202{									\
 203	type retval;							\
 204	if (probe_kernel_address(addr, retval))				\
 205		*(type *)dest = 0;					\
 206	else								\
 207		*(type *)dest = retval;					\
 208}
 209DEFINE_BASIC_FETCH_FUNCS(memory)
 210/*
 211 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 212 * length and relative data location.
 213 */
 214static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 215						      void *addr, void *dest)
 216{
 217	long ret;
 218	int maxlen = get_rloc_len(*(u32 *)dest);
 219	u8 *dst = get_rloc_data(dest);
 220	u8 *src = addr;
 221	mm_segment_t old_fs = get_fs();
 222	if (!maxlen)
 223		return;
 224	/*
 225	 * Try to get string again, since the string can be changed while
 226	 * probing.
 227	 */
 228	set_fs(KERNEL_DS);
 229	pagefault_disable();
 230	do
 231		ret = __copy_from_user_inatomic(dst++, src++, 1);
 232	while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
 233	dst[-1] = '\0';
 234	pagefault_enable();
 235	set_fs(old_fs);
 236
 237	if (ret < 0) {	/* Failed to fetch string */
 238		((u8 *)get_rloc_data(dest))[0] = '\0';
 239		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
 240	} else
 241		*(u32 *)dest = make_data_rloc(src - (u8 *)addr,
 242					      get_rloc_offs(*(u32 *)dest));
 243}
 244/* Return the length of string -- including null terminal byte */
 245static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
 246							void *addr, void *dest)
 247{
 248	int ret, len = 0;
 249	u8 c;
 250	mm_segment_t old_fs = get_fs();
 251
 252	set_fs(KERNEL_DS);
 253	pagefault_disable();
 254	do {
 255		ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
 256		len++;
 257	} while (c && ret == 0 && len < MAX_STRING_SIZE);
 258	pagefault_enable();
 259	set_fs(old_fs);
 260
 261	if (ret < 0)	/* Failed to check the length */
 262		*(u32 *)dest = 0;
 263	else
 264		*(u32 *)dest = len;
 265}
 266
 267/* Memory fetching by symbol */
 268struct symbol_cache {
 269	char *symbol;
 270	long offset;
 271	unsigned long addr;
 272};
 273
 274static unsigned long update_symbol_cache(struct symbol_cache *sc)
 275{
 276	sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
 277	if (sc->addr)
 278		sc->addr += sc->offset;
 279	return sc->addr;
 280}
 281
 282static void free_symbol_cache(struct symbol_cache *sc)
 283{
 284	kfree(sc->symbol);
 285	kfree(sc);
 286}
 287
 288static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
 
 289{
 290	struct symbol_cache *sc;
 
 291
 292	if (!sym || strlen(sym) == 0)
 293		return NULL;
 294	sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
 295	if (!sc)
 296		return NULL;
 297
 298	sc->symbol = kstrdup(sym, GFP_KERNEL);
 299	if (!sc->symbol) {
 300		kfree(sc);
 301		return NULL;
 302	}
 303	sc->offset = offset;
 304
 305	update_symbol_cache(sc);
 306	return sc;
 307}
 308
 309#define DEFINE_FETCH_symbol(type)					\
 310static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\
 311					  void *data, void *dest)	\
 312{									\
 313	struct symbol_cache *sc = data;					\
 314	if (sc->addr)							\
 315		fetch_memory_##type(regs, (void *)sc->addr, dest);	\
 316	else								\
 317		*(type *)dest = 0;					\
 318}
 319DEFINE_BASIC_FETCH_FUNCS(symbol)
 320DEFINE_FETCH_symbol(string)
 321DEFINE_FETCH_symbol(string_size)
 322
 323/* Dereference memory access function */
 324struct deref_fetch_param {
 325	struct fetch_param orig;
 326	long offset;
 327};
 328
 329#define DEFINE_FETCH_deref(type)					\
 330static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\
 331					    void *data, void *dest)	\
 332{									\
 333	struct deref_fetch_param *dprm = data;				\
 334	unsigned long addr;						\
 335	call_fetch(&dprm->orig, regs, &addr);				\
 336	if (addr) {							\
 337		addr += dprm->offset;					\
 338		fetch_memory_##type(regs, (void *)addr, dest);		\
 339	} else								\
 340		*(type *)dest = 0;					\
 341}
 342DEFINE_BASIC_FETCH_FUNCS(deref)
 343DEFINE_FETCH_deref(string)
 344DEFINE_FETCH_deref(string_size)
 345
 346static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data)
 347{
 348	if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
 349		update_deref_fetch_param(data->orig.data);
 350	else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
 351		update_symbol_cache(data->orig.data);
 352}
 353
 354static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data)
 355{
 356	if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
 357		free_deref_fetch_param(data->orig.data);
 358	else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
 359		free_symbol_cache(data->orig.data);
 360	kfree(data);
 361}
 362
 363/* Bitfield fetch function */
 364struct bitfield_fetch_param {
 365	struct fetch_param orig;
 366	unsigned char hi_shift;
 367	unsigned char low_shift;
 368};
 369
 370#define DEFINE_FETCH_bitfield(type)					\
 371static __kprobes void FETCH_FUNC_NAME(bitfield, type)(struct pt_regs *regs,\
 372					    void *data, void *dest)	\
 373{									\
 374	struct bitfield_fetch_param *bprm = data;			\
 375	type buf = 0;							\
 376	call_fetch(&bprm->orig, regs, &buf);				\
 377	if (buf) {							\
 378		buf <<= bprm->hi_shift;					\
 379		buf >>= bprm->low_shift;				\
 380	}								\
 381	*(type *)dest = buf;						\
 382}
 383DEFINE_BASIC_FETCH_FUNCS(bitfield)
 384#define fetch_bitfield_string NULL
 385#define fetch_bitfield_string_size NULL
 386
 387static __kprobes void
 388update_bitfield_fetch_param(struct bitfield_fetch_param *data)
 389{
 390	/*
 391	 * Don't check the bitfield itself, because this must be the
 392	 * last fetch function.
 393	 */
 394	if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
 395		update_deref_fetch_param(data->orig.data);
 396	else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
 397		update_symbol_cache(data->orig.data);
 398}
 399
 400static __kprobes void
 401free_bitfield_fetch_param(struct bitfield_fetch_param *data)
 402{
 403	/*
 404	 * Don't check the bitfield itself, because this must be the
 405	 * last fetch function.
 406	 */
 407	if (CHECK_FETCH_FUNCS(deref, data->orig.fn))
 408		free_deref_fetch_param(data->orig.data);
 409	else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn))
 410		free_symbol_cache(data->orig.data);
 411	kfree(data);
 412}
 413
 414/* Default (unsigned long) fetch type */
 415#define __DEFAULT_FETCH_TYPE(t) u##t
 416#define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t)
 417#define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG)
 418#define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE)
 419
 420/* Fetch types */
 421enum {
 422	FETCH_MTD_reg = 0,
 423	FETCH_MTD_stack,
 424	FETCH_MTD_retval,
 425	FETCH_MTD_memory,
 426	FETCH_MTD_symbol,
 427	FETCH_MTD_deref,
 428	FETCH_MTD_bitfield,
 429	FETCH_MTD_END,
 430};
 431
 432#define ASSIGN_FETCH_FUNC(method, type)	\
 433	[FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type)
 434
 435#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype)	\
 436	{.name = _name,				\
 437	 .size = _size,					\
 438	 .is_signed = sign,				\
 439	 .print = PRINT_TYPE_FUNC_NAME(ptype),		\
 440	 .fmt = PRINT_TYPE_FMT_NAME(ptype),		\
 441	 .fmttype = _fmttype,				\
 442	 .fetch = {					\
 443ASSIGN_FETCH_FUNC(reg, ftype),				\
 444ASSIGN_FETCH_FUNC(stack, ftype),			\
 445ASSIGN_FETCH_FUNC(retval, ftype),			\
 446ASSIGN_FETCH_FUNC(memory, ftype),			\
 447ASSIGN_FETCH_FUNC(symbol, ftype),			\
 448ASSIGN_FETCH_FUNC(deref, ftype),			\
 449ASSIGN_FETCH_FUNC(bitfield, ftype),			\
 450	  }						\
 451	}
 452
 453#define ASSIGN_FETCH_TYPE(ptype, ftype, sign)			\
 454	__ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype)
 455
 456#define FETCH_TYPE_STRING 0
 457#define FETCH_TYPE_STRSIZE 1
 458
 459/* Fetch type information table */
 460static const struct fetch_type {
 461	const char	*name;		/* Name of type */
 462	size_t		size;		/* Byte size of type */
 463	int		is_signed;	/* Signed flag */
 464	print_type_func_t	print;	/* Print functions */
 465	const char	*fmt;		/* Fromat string */
 466	const char	*fmttype;	/* Name in format file */
 467	/* Fetch functions */
 468	fetch_func_t	fetch[FETCH_MTD_END];
 469} fetch_type_table[] = {
 470	/* Special types */
 471	[FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
 472					sizeof(u32), 1, "__data_loc char[]"),
 473	[FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
 474					string_size, sizeof(u32), 0, "u32"),
 475	/* Basic types */
 476	ASSIGN_FETCH_TYPE(u8,  u8,  0),
 477	ASSIGN_FETCH_TYPE(u16, u16, 0),
 478	ASSIGN_FETCH_TYPE(u32, u32, 0),
 479	ASSIGN_FETCH_TYPE(u64, u64, 0),
 480	ASSIGN_FETCH_TYPE(s8,  u8,  1),
 481	ASSIGN_FETCH_TYPE(s16, u16, 1),
 482	ASSIGN_FETCH_TYPE(s32, u32, 1),
 483	ASSIGN_FETCH_TYPE(s64, u64, 1),
 484};
 485
 486static const struct fetch_type *find_fetch_type(const char *type)
 487{
 488	int i;
 489
 490	if (!type)
 491		type = DEFAULT_FETCH_TYPE_STR;
 492
 493	/* Special case: bitfield */
 494	if (*type == 'b') {
 495		unsigned long bs;
 496		type = strchr(type, '/');
 497		if (!type)
 498			goto fail;
 499		type++;
 500		if (strict_strtoul(type, 0, &bs))
 501			goto fail;
 502		switch (bs) {
 503		case 8:
 504			return find_fetch_type("u8");
 505		case 16:
 506			return find_fetch_type("u16");
 507		case 32:
 508			return find_fetch_type("u32");
 509		case 64:
 510			return find_fetch_type("u64");
 511		default:
 512			goto fail;
 513		}
 514	}
 515
 516	for (i = 0; i < ARRAY_SIZE(fetch_type_table); i++)
 517		if (strcmp(type, fetch_type_table[i].name) == 0)
 518			return &fetch_type_table[i];
 519fail:
 520	return NULL;
 521}
 522
 523/* Special function : only accept unsigned long */
 524static __kprobes void fetch_stack_address(struct pt_regs *regs,
 525					  void *dummy, void *dest)
 526{
 527	*(unsigned long *)dest = kernel_stack_pointer(regs);
 
 
 
 
 
 528}
 529
 530static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
 531					    fetch_func_t orig_fn)
 532{
 533	int i;
 
 534
 535	if (type != &fetch_type_table[FETCH_TYPE_STRING])
 536		return NULL;	/* Only string type needs size function */
 537	for (i = 0; i < FETCH_MTD_END; i++)
 538		if (type->fetch[i] == orig_fn)
 539			return fetch_type_table[FETCH_TYPE_STRSIZE].fetch[i];
 540
 541	WARN_ON(1);	/* This should not happen */
 542	return NULL;
 543}
 544
 545/**
 546 * Kprobe event core functions
 547 */
 548
 549struct probe_arg {
 550	struct fetch_param	fetch;
 551	struct fetch_param	fetch_size;
 552	unsigned int		offset;	/* Offset from argument entry */
 553	const char		*name;	/* Name of this argument */
 554	const char		*comm;	/* Command of this argument */
 555	const struct fetch_type	*type;	/* Type of this argument */
 556};
 557
 558/* Flags for trace_probe */
 559#define TP_FLAG_TRACE	1
 560#define TP_FLAG_PROFILE	2
 561#define TP_FLAG_REGISTERED 4
 562
 563struct trace_probe {
 564	struct list_head	list;
 565	struct kretprobe	rp;	/* Use rp.kp for kprobe use */
 566	unsigned long 		nhit;
 567	unsigned int		flags;	/* For TP_FLAG_* */
 568	const char		*symbol;	/* symbol name */
 569	struct ftrace_event_class	class;
 570	struct ftrace_event_call	call;
 571	ssize_t			size;		/* trace entry size */
 572	unsigned int		nr_args;
 573	struct probe_arg	args[];
 574};
 575
 576#define SIZEOF_TRACE_PROBE(n)			\
 577	(offsetof(struct trace_probe, args) +	\
 578	(sizeof(struct probe_arg) * (n)))
 579
 580
 581static __kprobes int trace_probe_is_return(struct trace_probe *tp)
 582{
 583	return tp->rp.handler != NULL;
 
 584}
 585
 586static __kprobes const char *trace_probe_symbol(struct trace_probe *tp)
 
 
 587{
 588	return tp->symbol ? tp->symbol : "unknown";
 
 
 
 
 
 
 
 
 
 
 589}
 590
 591static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp)
 
 592{
 593	return tp->rp.kp.offset;
 594}
 
 
 
 595
 596static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp)
 597{
 598	return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE));
 599}
 600
 601static __kprobes bool trace_probe_is_registered(struct trace_probe *tp)
 602{
 603	return !!(tp->flags & TP_FLAG_REGISTERED);
 604}
 605
 606static __kprobes bool trace_probe_has_gone(struct trace_probe *tp)
 607{
 608	return !!(kprobe_gone(&tp->rp.kp));
 609}
 610
 611static __kprobes bool trace_probe_within_module(struct trace_probe *tp,
 612						struct module *mod)
 613{
 614	int len = strlen(mod->name);
 615	const char *name = trace_probe_symbol(tp);
 616	return strncmp(mod->name, name, len) == 0 && name[len] == ':';
 617}
 618
 619static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
 620{
 621	return !!strchr(trace_probe_symbol(tp), ':');
 622}
 623
 624static int register_probe_event(struct trace_probe *tp);
 625static void unregister_probe_event(struct trace_probe *tp);
 626
 627static DEFINE_MUTEX(probe_lock);
 628static LIST_HEAD(probe_list);
 629
 630static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 631static int kretprobe_dispatcher(struct kretprobe_instance *ri,
 632				struct pt_regs *regs);
 633
 634/* Check the name is good for event/group/fields */
 635static int is_good_name(const char *name)
 636{
 637	if (!isalpha(*name) && *name != '_')
 638		return 0;
 639	while (*++name != '\0') {
 640		if (!isalpha(*name) && !isdigit(*name) && *name != '_')
 641			return 0;
 642	}
 643	return 1;
 644}
 645
 646/*
 647 * Allocate new trace_probe and initialize it (including kprobes).
 648 */
 649static struct trace_probe *alloc_trace_probe(const char *group,
 650					     const char *event,
 651					     void *addr,
 652					     const char *symbol,
 653					     unsigned long offs,
 654					     int nargs, int is_return)
 
 655{
 656	struct trace_probe *tp;
 657	int ret = -ENOMEM;
 658
 659	tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
 660	if (!tp)
 661		return ERR_PTR(ret);
 662
 
 
 
 
 663	if (symbol) {
 664		tp->symbol = kstrdup(symbol, GFP_KERNEL);
 665		if (!tp->symbol)
 666			goto error;
 667		tp->rp.kp.symbol_name = tp->symbol;
 668		tp->rp.kp.offset = offs;
 669	} else
 670		tp->rp.kp.addr = addr;
 671
 672	if (is_return)
 673		tp->rp.handler = kretprobe_dispatcher;
 674	else
 675		tp->rp.kp.pre_handler = kprobe_dispatcher;
 676
 677	if (!event || !is_good_name(event)) {
 678		ret = -EINVAL;
 679		goto error;
 680	}
 681
 682	tp->call.class = &tp->class;
 683	tp->call.name = kstrdup(event, GFP_KERNEL);
 684	if (!tp->call.name)
 685		goto error;
 686
 687	if (!group || !is_good_name(group)) {
 688		ret = -EINVAL;
 689		goto error;
 690	}
 691
 692	tp->class.system = kstrdup(group, GFP_KERNEL);
 693	if (!tp->class.system)
 694		goto error;
 695
 696	INIT_LIST_HEAD(&tp->list);
 697	return tp;
 698error:
 699	kfree(tp->call.name);
 700	kfree(tp->symbol);
 701	kfree(tp);
 702	return ERR_PTR(ret);
 703}
 704
 705static void update_probe_arg(struct probe_arg *arg)
 
 706{
 707	if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
 708		update_bitfield_fetch_param(arg->fetch.data);
 709	else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
 710		update_deref_fetch_param(arg->fetch.data);
 711	else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
 712		update_symbol_cache(arg->fetch.data);
 713}
 714
 715static void free_probe_arg(struct probe_arg *arg)
 716{
 717	if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn))
 718		free_bitfield_fetch_param(arg->fetch.data);
 719	else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn))
 720		free_deref_fetch_param(arg->fetch.data);
 721	else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn))
 722		free_symbol_cache(arg->fetch.data);
 723	kfree(arg->name);
 724	kfree(arg->comm);
 725}
 726
 727static void free_trace_probe(struct trace_probe *tp)
 728{
 729	int i;
 730
 731	for (i = 0; i < tp->nr_args; i++)
 732		free_probe_arg(&tp->args[i]);
 
 
 
 
 733
 734	kfree(tp->call.class->system);
 735	kfree(tp->call.name);
 736	kfree(tp->symbol);
 737	kfree(tp);
 738}
 739
 740static struct trace_probe *find_trace_probe(const char *event,
 741					    const char *group)
 742{
 743	struct trace_probe *tp;
 744
 745	list_for_each_entry(tp, &probe_list, list)
 746		if (strcmp(tp->call.name, event) == 0 &&
 747		    strcmp(tp->call.class->system, group) == 0)
 748			return tp;
 749	return NULL;
 
 
 
 750}
 751
 752/* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
 753static int enable_trace_probe(struct trace_probe *tp, int flag)
 
 
 
 
 754{
 
 
 
 755	int ret = 0;
 756
 757	tp->flags |= flag;
 758	if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) &&
 759	    !trace_probe_has_gone(tp)) {
 760		if (trace_probe_is_return(tp))
 761			ret = enable_kretprobe(&tp->rp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762		else
 763			ret = enable_kprobe(&tp->rp.kp);
 764	}
 765
 766	return ret;
 767}
 768
 769/* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */
 770static void disable_trace_probe(struct trace_probe *tp, int flag)
 
 
 
 
 771{
 772	tp->flags &= ~flag;
 773	if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) {
 774		if (trace_probe_is_return(tp))
 775			disable_kretprobe(&tp->rp);
 776		else
 777			disable_kprobe(&tp->rp.kp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 778	}
 
 
 779}
 
 
 
 780
 781/* Internal register function - just handle k*probes and flags */
 782static int __register_trace_probe(struct trace_probe *tp)
 783{
 784	int i, ret;
 785
 786	if (trace_probe_is_registered(tp))
 
 
 
 
 787		return -EINVAL;
 788
 789	for (i = 0; i < tp->nr_args; i++)
 790		update_probe_arg(&tp->args[i]);
 
 
 
 
 
 
 
 
 
 791
 792	/* Set/clear disabled flag according to tp->flag */
 793	if (trace_probe_is_enabled(tp))
 794		tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 795	else
 796		tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 797
 798	if (trace_probe_is_return(tp))
 799		ret = register_kretprobe(&tp->rp);
 800	else
 801		ret = register_kprobe(&tp->rp.kp);
 802
 803	if (ret == 0)
 804		tp->flags |= TP_FLAG_REGISTERED;
 805	else {
 806		pr_warning("Could not insert probe at %s+%lu: %d\n",
 807			   trace_probe_symbol(tp), trace_probe_offset(tp), ret);
 808		if (ret == -ENOENT && trace_probe_is_on_module(tp)) {
 809			pr_warning("This probe might be able to register after"
 810				   "target module is loaded. Continue.\n");
 811			ret = 0;
 812		} else if (ret == -EILSEQ) {
 813			pr_warning("Probing address(0x%p) is not an "
 814				   "instruction boundary.\n",
 815				   tp->rp.kp.addr);
 816			ret = -EINVAL;
 817		}
 818	}
 819
 820	return ret;
 821}
 822
 823/* Internal unregister function - just handle k*probes and flags */
 824static void __unregister_trace_probe(struct trace_probe *tp)
 825{
 826	if (trace_probe_is_registered(tp)) {
 827		if (trace_probe_is_return(tp))
 828			unregister_kretprobe(&tp->rp);
 829		else
 830			unregister_kprobe(&tp->rp.kp);
 831		tp->flags &= ~TP_FLAG_REGISTERED;
 832		/* Cleanup kprobe for reuse */
 833		if (tp->rp.kp.symbol_name)
 834			tp->rp.kp.addr = NULL;
 
 835	}
 836}
 837
 838/* Unregister a trace_probe and probe_event: call with locking probe_lock */
 839static void unregister_trace_probe(struct trace_probe *tp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840{
 841	__unregister_trace_probe(tp);
 842	list_del(&tp->list);
 843	unregister_probe_event(tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 844}
 845
 846/* Register a trace_probe and probe_event */
 847static int register_trace_probe(struct trace_probe *tp)
 848{
 849	struct trace_probe *old_tp;
 850	int ret;
 851
 852	mutex_lock(&probe_lock);
 853
 854	/* Delete old (same name) event if exist */
 855	old_tp = find_trace_probe(tp->call.name, tp->call.class->system);
 856	if (old_tp) {
 857		unregister_trace_probe(old_tp);
 858		free_trace_probe(old_tp);
 
 
 
 
 
 
 859	}
 860
 861	/* Register new event */
 862	ret = register_probe_event(tp);
 863	if (ret) {
 864		pr_warning("Failed to register probe event(%d)\n", ret);
 
 
 
 
 865		goto end;
 866	}
 867
 868	/* Register k*probe */
 869	ret = __register_trace_probe(tp);
 
 
 
 
 
 870	if (ret < 0)
 871		unregister_probe_event(tp);
 872	else
 873		list_add_tail(&tp->list, &probe_list);
 874
 875end:
 876	mutex_unlock(&probe_lock);
 877	return ret;
 878}
 879
 880/* Module notifier call back, checking event on the module */
 881static int trace_probe_module_callback(struct notifier_block *nb,
 882				       unsigned long val, void *data)
 883{
 884	struct module *mod = data;
 885	struct trace_probe *tp;
 
 886	int ret;
 887
 888	if (val != MODULE_STATE_COMING)
 889		return NOTIFY_DONE;
 890
 891	/* Update probes on coming module */
 892	mutex_lock(&probe_lock);
 893	list_for_each_entry(tp, &probe_list, list) {
 894		if (trace_probe_within_module(tp, mod)) {
 895			__unregister_trace_probe(tp);
 896			ret = __register_trace_probe(tp);
 
 897			if (ret)
 898				pr_warning("Failed to re-register probe %s on"
 899					   "%s: %d\n",
 900					   tp->call.name, mod->name, ret);
 901		}
 902	}
 903	mutex_unlock(&probe_lock);
 904
 905	return NOTIFY_DONE;
 906}
 907
 908static struct notifier_block trace_probe_module_nb = {
 909	.notifier_call = trace_probe_module_callback,
 910	.priority = 1	/* Invoked after kprobe module callback */
 911};
 912
 913/* Split symbol and offset. */
 914static int split_symbol_offset(char *symbol, unsigned long *offset)
 915{
 916	char *tmp;
 917	int ret;
 918
 919	if (!offset)
 920		return -EINVAL;
 921
 922	tmp = strchr(symbol, '+');
 923	if (tmp) {
 924		/* skip sign because strict_strtol doesn't accept '+' */
 925		ret = strict_strtoul(tmp + 1, 0, offset);
 926		if (ret)
 927			return ret;
 928		*tmp = '\0';
 929	} else
 930		*offset = 0;
 931	return 0;
 932}
 933
 934#define PARAM_MAX_ARGS 16
 935#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
 936
 937static int parse_probe_vars(char *arg, const struct fetch_type *t,
 938			    struct fetch_param *f, int is_return)
 939{
 940	int ret = 0;
 941	unsigned long param;
 942
 943	if (strcmp(arg, "retval") == 0) {
 944		if (is_return)
 945			f->fn = t->fetch[FETCH_MTD_retval];
 946		else
 947			ret = -EINVAL;
 948	} else if (strncmp(arg, "stack", 5) == 0) {
 949		if (arg[5] == '\0') {
 950			if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR) == 0)
 951				f->fn = fetch_stack_address;
 952			else
 953				ret = -EINVAL;
 954		} else if (isdigit(arg[5])) {
 955			ret = strict_strtoul(arg + 5, 10, &param);
 956			if (ret || param > PARAM_MAX_STACK)
 957				ret = -EINVAL;
 958			else {
 959				f->fn = t->fetch[FETCH_MTD_stack];
 960				f->data = (void *)param;
 961			}
 962		} else
 963			ret = -EINVAL;
 964	} else
 965		ret = -EINVAL;
 966	return ret;
 967}
 968
 969/* Recursive argument parser */
 970static int __parse_probe_arg(char *arg, const struct fetch_type *t,
 971			     struct fetch_param *f, int is_return)
 972{
 973	int ret = 0;
 974	unsigned long param;
 975	long offset;
 976	char *tmp;
 977
 978	switch (arg[0]) {
 979	case '$':
 980		ret = parse_probe_vars(arg + 1, t, f, is_return);
 981		break;
 982	case '%':	/* named register */
 983		ret = regs_query_register_offset(arg + 1);
 984		if (ret >= 0) {
 985			f->fn = t->fetch[FETCH_MTD_reg];
 986			f->data = (void *)(unsigned long)ret;
 987			ret = 0;
 988		}
 989		break;
 990	case '@':	/* memory or symbol */
 991		if (isdigit(arg[1])) {
 992			ret = strict_strtoul(arg + 1, 0, &param);
 993			if (ret)
 994				break;
 995			f->fn = t->fetch[FETCH_MTD_memory];
 996			f->data = (void *)param;
 997		} else {
 998			ret = split_symbol_offset(arg + 1, &offset);
 999			if (ret)
1000				break;
1001			f->data = alloc_symbol_cache(arg + 1, offset);
1002			if (f->data)
1003				f->fn = t->fetch[FETCH_MTD_symbol];
1004		}
1005		break;
1006	case '+':	/* deref memory */
1007		arg++;	/* Skip '+', because strict_strtol() rejects it. */
1008	case '-':
1009		tmp = strchr(arg, '(');
1010		if (!tmp)
1011			break;
1012		*tmp = '\0';
1013		ret = strict_strtol(arg, 0, &offset);
1014		if (ret)
1015			break;
1016		arg = tmp + 1;
1017		tmp = strrchr(arg, ')');
1018		if (tmp) {
1019			struct deref_fetch_param *dprm;
1020			const struct fetch_type *t2 = find_fetch_type(NULL);
1021			*tmp = '\0';
1022			dprm = kzalloc(sizeof(struct deref_fetch_param),
1023				       GFP_KERNEL);
1024			if (!dprm)
1025				return -ENOMEM;
1026			dprm->offset = offset;
1027			ret = __parse_probe_arg(arg, t2, &dprm->orig,
1028						is_return);
1029			if (ret)
1030				kfree(dprm);
1031			else {
1032				f->fn = t->fetch[FETCH_MTD_deref];
1033				f->data = (void *)dprm;
1034			}
1035		}
1036		break;
1037	}
1038	if (!ret && !f->fn) {	/* Parsed, but do not find fetch method */
1039		pr_info("%s type has no corresponding fetch method.\n",
1040			t->name);
1041		ret = -EINVAL;
1042	}
1043	return ret;
1044}
1045
1046#define BYTES_TO_BITS(nb)	((BITS_PER_LONG * (nb)) / sizeof(long))
1047
1048/* Bitfield type needs to be parsed into a fetch function */
1049static int __parse_bitfield_probe_arg(const char *bf,
1050				      const struct fetch_type *t,
1051				      struct fetch_param *f)
1052{
1053	struct bitfield_fetch_param *bprm;
1054	unsigned long bw, bo;
1055	char *tail;
1056
1057	if (*bf != 'b')
1058		return 0;
1059
1060	bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1061	if (!bprm)
1062		return -ENOMEM;
1063	bprm->orig = *f;
1064	f->fn = t->fetch[FETCH_MTD_bitfield];
1065	f->data = (void *)bprm;
1066
1067	bw = simple_strtoul(bf + 1, &tail, 0);	/* Use simple one */
1068	if (bw == 0 || *tail != '@')
1069		return -EINVAL;
1070
1071	bf = tail + 1;
1072	bo = simple_strtoul(bf, &tail, 0);
1073	if (tail == bf || *tail != '/')
1074		return -EINVAL;
1075
1076	bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo);
1077	bprm->low_shift = bprm->hi_shift + bo;
1078	return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0;
1079}
1080
1081/* String length checking wrapper */
1082static int parse_probe_arg(char *arg, struct trace_probe *tp,
1083			   struct probe_arg *parg, int is_return)
1084{
1085	const char *t;
1086	int ret;
1087
1088	if (strlen(arg) > MAX_ARGSTR_LEN) {
1089		pr_info("Argument is too long.: %s\n",  arg);
1090		return -ENOSPC;
1091	}
1092	parg->comm = kstrdup(arg, GFP_KERNEL);
1093	if (!parg->comm) {
1094		pr_info("Failed to allocate memory for command '%s'.\n", arg);
1095		return -ENOMEM;
1096	}
1097	t = strchr(parg->comm, ':');
1098	if (t) {
1099		arg[t - parg->comm] = '\0';
1100		t++;
1101	}
1102	parg->type = find_fetch_type(t);
1103	if (!parg->type) {
1104		pr_info("Unsupported type: %s\n", t);
1105		return -EINVAL;
1106	}
1107	parg->offset = tp->size;
1108	tp->size += parg->type->size;
1109	ret = __parse_probe_arg(arg, parg->type, &parg->fetch, is_return);
1110	if (ret >= 0 && t != NULL)
1111		ret = __parse_bitfield_probe_arg(t, parg->type, &parg->fetch);
1112	if (ret >= 0) {
1113		parg->fetch_size.fn = get_fetch_size_function(parg->type,
1114							      parg->fetch.fn);
1115		parg->fetch_size.data = parg->fetch.data;
1116	}
1117	return ret;
1118}
1119
1120/* Return 1 if name is reserved or already used by another argument */
1121static int conflict_field_name(const char *name,
1122			       struct probe_arg *args, int narg)
1123{
1124	int i;
1125	for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
1126		if (strcmp(reserved_field_names[i], name) == 0)
1127			return 1;
1128	for (i = 0; i < narg; i++)
1129		if (strcmp(args[i].name, name) == 0)
1130			return 1;
1131	return 0;
1132}
1133
1134static int create_trace_probe(int argc, char **argv)
1135{
1136	/*
1137	 * Argument syntax:
1138	 *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
1139	 *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
 
 
 
 
 
1140	 * Fetch args:
1141	 *  $retval	: fetch return value
1142	 *  $stack	: fetch stack address
1143	 *  $stackN	: fetch Nth of stack (N:0-)
 
1144	 *  @ADDR	: fetch memory at ADDR (ADDR should be in kernel)
1145	 *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
1146	 *  %REG	: fetch register REG
1147	 * Dereferencing memory fetch:
1148	 *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
1149	 * Alias name of args:
1150	 *  NAME=FETCHARG : set NAME as alias of FETCHARG.
1151	 * Type of args:
1152	 *  FETCHARG:TYPE : use TYPE instead of unsigned long.
1153	 */
1154	struct trace_probe *tp;
1155	int i, ret = 0;
1156	int is_return = 0, is_delete = 0;
1157	char *symbol = NULL, *event = NULL, *group = NULL;
1158	char *arg;
1159	unsigned long offset = 0;
 
 
1160	void *addr = NULL;
1161	char buf[MAX_EVENT_NAME_LEN];
 
 
1162
1163	/* argc must be >= 1 */
1164	if (argv[0][0] == 'p')
1165		is_return = 0;
1166	else if (argv[0][0] == 'r')
1167		is_return = 1;
1168	else if (argv[0][0] == '-')
1169		is_delete = 1;
1170	else {
1171		pr_info("Probe definition must be started with 'p', 'r' or"
1172			" '-'.\n");
1173		return -EINVAL;
1174	}
 
 
 
 
 
 
 
 
1175
1176	if (argv[0][1] == ':') {
1177		event = &argv[0][2];
1178		if (strchr(event, '/')) {
1179			group = event;
1180			event = strchr(group, '/') + 1;
1181			event[-1] = '\0';
1182			if (strlen(group) == 0) {
1183				pr_info("Group name is not specified\n");
1184				return -EINVAL;
1185			}
1186		}
1187		if (strlen(event) == 0) {
1188			pr_info("Event name is not specified\n");
1189			return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190		}
1191	}
1192	if (!group)
1193		group = KPROBE_EVENT_SYSTEM;
1194
1195	if (is_delete) {
1196		if (!event) {
1197			pr_info("Delete command needs an event name.\n");
1198			return -EINVAL;
 
 
 
 
1199		}
1200		mutex_lock(&probe_lock);
1201		tp = find_trace_probe(event, group);
1202		if (!tp) {
1203			mutex_unlock(&probe_lock);
1204			pr_info("Event %s/%s doesn't exist.\n", group, event);
1205			return -ENOENT;
 
 
 
 
 
 
 
 
1206		}
1207		/* delete an event */
1208		unregister_trace_probe(tp);
1209		free_trace_probe(tp);
1210		mutex_unlock(&probe_lock);
1211		return 0;
1212	}
1213
1214	if (argc < 2) {
1215		pr_info("Probe point is not specified.\n");
1216		return -EINVAL;
1217	}
1218	if (isdigit(argv[1][0])) {
1219		if (is_return) {
1220			pr_info("Return probe point must be a symbol.\n");
1221			return -EINVAL;
1222		}
1223		/* an address specified */
1224		ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
1225		if (ret) {
1226			pr_info("Failed to parse address.\n");
1227			return ret;
1228		}
1229	} else {
1230		/* a symbol specified */
1231		symbol = argv[1];
1232		/* TODO: support .init module functions */
1233		ret = split_symbol_offset(symbol, &offset);
1234		if (ret) {
1235			pr_info("Failed to parse symbol.\n");
1236			return ret;
1237		}
1238		if (offset && is_return) {
1239			pr_info("Return probe must be used without offset.\n");
1240			return -EINVAL;
 
 
 
 
 
 
1241		}
1242	}
1243	argc -= 2; argv += 2;
1244
1245	/* setup a probe */
 
 
 
 
 
 
 
1246	if (!event) {
1247		/* Make a new event name */
1248		if (symbol)
1249			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
1250				 is_return ? 'r' : 'p', symbol, offset);
1251		else
1252			snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
1253				 is_return ? 'r' : 'p', addr);
 
1254		event = buf;
1255	}
1256	tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
1257			       is_return);
1258	if (IS_ERR(tp)) {
1259		pr_info("Failed to allocate trace_probe.(%d)\n",
1260			(int)PTR_ERR(tp));
1261		return PTR_ERR(tp);
 
 
 
1262	}
 
1263
1264	/* parse arguments */
1265	ret = 0;
1266	for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
1267		/* Increment count for freeing args in error case */
1268		tp->nr_args++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1269
1270		/* Parse argument name */
1271		arg = strchr(argv[i], '=');
1272		if (arg) {
1273			*arg++ = '\0';
1274			tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
1275		} else {
1276			arg = argv[i];
1277			/* If argument name is omitted, set "argN" */
1278			snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
1279			tp->args[i].name = kstrdup(buf, GFP_KERNEL);
1280		}
1281
1282		if (!tp->args[i].name) {
1283			pr_info("Failed to allocate argument[%d] name.\n", i);
1284			ret = -ENOMEM;
1285			goto error;
1286		}
1287
1288		if (!is_good_name(tp->args[i].name)) {
1289			pr_info("Invalid argument[%d] name: %s\n",
1290				i, tp->args[i].name);
1291			ret = -EINVAL;
1292			goto error;
1293		}
1294
1295		if (conflict_field_name(tp->args[i].name, tp->args, i)) {
1296			pr_info("Argument[%d] name '%s' conflicts with "
1297				"another field.\n", i, argv[i]);
1298			ret = -EINVAL;
1299			goto error;
1300		}
1301
1302		/* Parse fetch argument */
1303		ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
1304		if (ret) {
1305			pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
1306			goto error;
1307		}
1308	}
 
1309
1310	ret = register_trace_probe(tp);
1311	if (ret)
1312		goto error;
1313	return 0;
1314
1315error:
1316	free_trace_probe(tp);
1317	return ret;
1318}
 
1319
1320static void release_all_trace_probes(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1321{
1322	struct trace_probe *tp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1323
1324	mutex_lock(&probe_lock);
1325	/* TODO: Use batch unregistration */
1326	while (!list_empty(&probe_list)) {
1327		tp = list_entry(probe_list.next, struct trace_probe, list);
1328		unregister_trace_probe(tp);
1329		free_trace_probe(tp);
1330	}
1331	mutex_unlock(&probe_lock);
 
 
1332}
 
1333
1334/* Probes listing interfaces */
1335static void *probes_seq_start(struct seq_file *m, loff_t *pos)
 
 
 
 
 
 
 
 
1336{
1337	mutex_lock(&probe_lock);
1338	return seq_list_start(&probe_list, *pos);
 
 
 
1339}
 
1340
1341static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
1342{
1343	return seq_list_next(v, &probe_list, pos);
1344}
1345
1346static void probes_seq_stop(struct seq_file *m, void *v)
1347{
1348	mutex_unlock(&probe_lock);
1349}
1350
1351static int probes_seq_show(struct seq_file *m, void *v)
1352{
1353	struct trace_probe *tp = v;
1354	int i;
1355
1356	seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p');
1357	seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
1358
1359	if (!tp->symbol)
1360		seq_printf(m, " 0x%p", tp->rp.kp.addr);
1361	else if (tp->rp.kp.offset)
1362		seq_printf(m, " %s+%u", trace_probe_symbol(tp),
1363			   tp->rp.kp.offset);
 
 
 
1364	else
1365		seq_printf(m, " %s", trace_probe_symbol(tp));
1366
1367	for (i = 0; i < tp->nr_args; i++)
1368		seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm);
1369	seq_printf(m, "\n");
1370
1371	return 0;
1372}
1373
 
 
 
 
 
 
 
 
 
 
1374static const struct seq_operations probes_seq_op = {
1375	.start  = probes_seq_start,
1376	.next   = probes_seq_next,
1377	.stop   = probes_seq_stop,
1378	.show   = probes_seq_show
1379};
1380
1381static int probes_open(struct inode *inode, struct file *file)
1382{
1383	if ((file->f_mode & FMODE_WRITE) &&
1384	    (file->f_flags & O_TRUNC))
1385		release_all_trace_probes();
1386
1387	return seq_open(file, &probes_seq_op);
1388}
 
1389
1390static int command_trace_probe(const char *buf)
1391{
1392	char **argv;
1393	int argc = 0, ret = 0;
 
1394
1395	argv = argv_split(GFP_KERNEL, buf, &argc);
1396	if (!argv)
1397		return -ENOMEM;
1398
1399	if (argc)
1400		ret = create_trace_probe(argc, argv);
1401
1402	argv_free(argv);
1403	return ret;
1404}
1405
1406#define WRITE_BUFSIZE 4096
1407
1408static ssize_t probes_write(struct file *file, const char __user *buffer,
1409			    size_t count, loff_t *ppos)
1410{
1411	char *kbuf, *tmp;
1412	int ret;
1413	size_t done;
1414	size_t size;
1415
1416	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
1417	if (!kbuf)
1418		return -ENOMEM;
1419
1420	ret = done = 0;
1421	while (done < count) {
1422		size = count - done;
1423		if (size >= WRITE_BUFSIZE)
1424			size = WRITE_BUFSIZE - 1;
1425		if (copy_from_user(kbuf, buffer + done, size)) {
1426			ret = -EFAULT;
1427			goto out;
1428		}
1429		kbuf[size] = '\0';
1430		tmp = strchr(kbuf, '\n');
1431		if (tmp) {
1432			*tmp = '\0';
1433			size = tmp - kbuf + 1;
1434		} else if (done + size < count) {
1435			pr_warning("Line length is too long: "
1436				   "Should be less than %d.", WRITE_BUFSIZE);
1437			ret = -EINVAL;
1438			goto out;
1439		}
1440		done += size;
1441		/* Remove comments */
1442		tmp = strchr(kbuf, '#');
1443		if (tmp)
1444			*tmp = '\0';
1445
1446		ret = command_trace_probe(kbuf);
1447		if (ret)
1448			goto out;
1449	}
1450	ret = done;
1451out:
1452	kfree(kbuf);
1453	return ret;
1454}
1455
1456static const struct file_operations kprobe_events_ops = {
1457	.owner          = THIS_MODULE,
1458	.open           = probes_open,
1459	.read           = seq_read,
1460	.llseek         = seq_lseek,
1461	.release        = seq_release,
1462	.write		= probes_write,
1463};
1464
1465/* Probes profiling interfaces */
1466static int probes_profile_seq_show(struct seq_file *m, void *v)
1467{
1468	struct trace_probe *tp = v;
 
 
1469
1470	seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
1471		   tp->rp.kp.nmissed);
 
 
 
 
 
 
 
 
1472
1473	return 0;
1474}
1475
1476static const struct seq_operations profile_seq_op = {
1477	.start  = probes_seq_start,
1478	.next   = probes_seq_next,
1479	.stop   = probes_seq_stop,
1480	.show   = probes_profile_seq_show
1481};
1482
1483static int profile_open(struct inode *inode, struct file *file)
1484{
 
 
 
 
 
 
1485	return seq_open(file, &profile_seq_op);
1486}
1487
1488static const struct file_operations kprobe_profile_ops = {
1489	.owner          = THIS_MODULE,
1490	.open           = profile_open,
1491	.read           = seq_read,
1492	.llseek         = seq_lseek,
1493	.release        = seq_release,
1494};
1495
1496/* Sum up total data length for dynamic arraies (strings) */
1497static __kprobes int __get_data_size(struct trace_probe *tp,
1498				     struct pt_regs *regs)
1499{
1500	int i, ret = 0;
1501	u32 len;
1502
1503	for (i = 0; i < tp->nr_args; i++)
1504		if (unlikely(tp->args[i].fetch_size.fn)) {
1505			call_fetch(&tp->args[i].fetch_size, regs, &len);
1506			ret += len;
1507		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1508
1509	return ret;
1510}
1511
1512/* Store the value of each argument */
1513static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp,
1514				       struct pt_regs *regs,
1515				       u8 *data, int maxlen)
1516{
1517	int i;
1518	u32 end = tp->size;
1519	u32 *dl;	/* Data (relative) location */
 
 
 
1520
1521	for (i = 0; i < tp->nr_args; i++) {
1522		if (unlikely(tp->args[i].fetch_size.fn)) {
1523			/*
1524			 * First, we set the relative location and
1525			 * maximum data length to *dl
1526			 */
1527			dl = (u32 *)(data + tp->args[i].offset);
1528			*dl = make_data_rloc(maxlen, end - tp->args[i].offset);
1529			/* Then try to fetch string or dynamic array data */
1530			call_fetch(&tp->args[i].fetch, regs, dl);
1531			/* Reduce maximum length */
1532			end += get_rloc_len(*dl);
1533			maxlen -= get_rloc_len(*dl);
1534			/* Trick here, convert data_rloc to data_loc */
1535			*dl = convert_rloc_to_loc(*dl,
1536				 ent_size + tp->args[i].offset);
1537		} else
1538			/* Just fetching data normally */
1539			call_fetch(&tp->args[i].fetch, regs,
1540				   data + tp->args[i].offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1541	}
 
 
 
1542}
 
1543
1544/* Kprobe handler */
1545static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
 
 
1546{
1547	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1548	struct kprobe_trace_entry_head *entry;
1549	struct ring_buffer_event *event;
1550	struct ring_buffer *buffer;
1551	int size, dsize, pc;
1552	unsigned long irq_flags;
1553	struct ftrace_event_call *call = &tp->call;
1554
1555	tp->nhit++;
1556
1557	local_save_flags(irq_flags);
1558	pc = preempt_count();
1559
1560	dsize = __get_data_size(tp, regs);
1561	size = sizeof(*entry) + tp->size + dsize;
1562
1563	event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
1564						  size, irq_flags, pc);
1565	if (!event)
1566		return;
1567
1568	entry = ring_buffer_event_data(event);
1569	entry->ip = (unsigned long)kp->addr;
1570	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1571
1572	if (!filter_current_check_discard(buffer, call, entry, event))
1573		trace_nowake_buffer_unlock_commit_regs(buffer, event,
1574						       irq_flags, pc, regs);
 
 
 
 
 
 
 
1575}
 
1576
1577/* Kretprobe handler */
1578static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
1579					  struct pt_regs *regs)
 
 
1580{
1581	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1582	struct kretprobe_trace_entry_head *entry;
1583	struct ring_buffer_event *event;
1584	struct ring_buffer *buffer;
1585	int size, pc, dsize;
1586	unsigned long irq_flags;
1587	struct ftrace_event_call *call = &tp->call;
1588
1589	local_save_flags(irq_flags);
1590	pc = preempt_count();
1591
1592	dsize = __get_data_size(tp, regs);
1593	size = sizeof(*entry) + tp->size + dsize;
1594
1595	event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
1596						  size, irq_flags, pc);
1597	if (!event)
1598		return;
1599
1600	entry = ring_buffer_event_data(event);
1601	entry->func = (unsigned long)tp->rp.kp.addr;
1602	entry->ret_ip = (unsigned long)ri->ret_addr;
1603	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1604
1605	if (!filter_current_check_discard(buffer, call, entry, event))
1606		trace_nowake_buffer_unlock_commit_regs(buffer, event,
1607						       irq_flags, pc, regs);
1608}
1609
 
 
 
 
 
 
 
 
 
 
 
1610/* Event entry printers */
1611enum print_line_t
1612print_kprobe_event(struct trace_iterator *iter, int flags,
1613		   struct trace_event *event)
1614{
1615	struct kprobe_trace_entry_head *field;
1616	struct trace_seq *s = &iter->seq;
1617	struct trace_probe *tp;
1618	u8 *data;
1619	int i;
1620
1621	field = (struct kprobe_trace_entry_head *)iter->ent;
1622	tp = container_of(event, struct trace_probe, call.event);
 
 
 
1623
1624	if (!trace_seq_printf(s, "%s: (", tp->call.name))
1625		goto partial;
1626
1627	if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1628		goto partial;
1629
1630	if (!trace_seq_puts(s, ")"))
1631		goto partial;
1632
1633	data = (u8 *)&field[1];
1634	for (i = 0; i < tp->nr_args; i++)
1635		if (!tp->args[i].type->print(s, tp->args[i].name,
1636					     data + tp->args[i].offset, field))
1637			goto partial;
1638
1639	if (!trace_seq_puts(s, "\n"))
1640		goto partial;
1641
1642	return TRACE_TYPE_HANDLED;
1643partial:
1644	return TRACE_TYPE_PARTIAL_LINE;
1645}
1646
1647enum print_line_t
1648print_kretprobe_event(struct trace_iterator *iter, int flags,
1649		      struct trace_event *event)
1650{
1651	struct kretprobe_trace_entry_head *field;
1652	struct trace_seq *s = &iter->seq;
1653	struct trace_probe *tp;
1654	u8 *data;
1655	int i;
1656
1657	field = (struct kretprobe_trace_entry_head *)iter->ent;
1658	tp = container_of(event, struct trace_probe, call.event);
 
 
 
1659
1660	if (!trace_seq_printf(s, "%s: (", tp->call.name))
1661		goto partial;
1662
1663	if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1664		goto partial;
1665
1666	if (!trace_seq_puts(s, " <- "))
1667		goto partial;
1668
1669	if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1670		goto partial;
1671
1672	if (!trace_seq_puts(s, ")"))
1673		goto partial;
1674
1675	data = (u8 *)&field[1];
1676	for (i = 0; i < tp->nr_args; i++)
1677		if (!tp->args[i].type->print(s, tp->args[i].name,
1678					     data + tp->args[i].offset, field))
1679			goto partial;
1680
1681	if (!trace_seq_puts(s, "\n"))
1682		goto partial;
1683
1684	return TRACE_TYPE_HANDLED;
1685partial:
1686	return TRACE_TYPE_PARTIAL_LINE;
1687}
1688
1689#undef DEFINE_FIELD
1690#define DEFINE_FIELD(type, item, name, is_signed)			\
1691	do {								\
1692		ret = trace_define_field(event_call, #type, name,	\
1693					 offsetof(typeof(field), item),	\
1694					 sizeof(field.item), is_signed, \
1695					 FILTER_OTHER);			\
1696		if (ret)						\
1697			return ret;					\
1698	} while (0)
1699
1700static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1701{
1702	int ret, i;
1703	struct kprobe_trace_entry_head field;
1704	struct trace_probe *tp = (struct trace_probe *)event_call->data;
1705
1706	DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1707	/* Set argument names as fields */
1708	for (i = 0; i < tp->nr_args; i++) {
1709		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1710					 tp->args[i].name,
1711					 sizeof(field) + tp->args[i].offset,
1712					 tp->args[i].type->size,
1713					 tp->args[i].type->is_signed,
1714					 FILTER_OTHER);
1715		if (ret)
1716			return ret;
1717	}
1718	return 0;
1719}
1720
1721static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1722{
1723	int ret, i;
1724	struct kretprobe_trace_entry_head field;
1725	struct trace_probe *tp = (struct trace_probe *)event_call->data;
1726
1727	DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1728	DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1729	/* Set argument names as fields */
1730	for (i = 0; i < tp->nr_args; i++) {
1731		ret = trace_define_field(event_call, tp->args[i].type->fmttype,
1732					 tp->args[i].name,
1733					 sizeof(field) + tp->args[i].offset,
1734					 tp->args[i].type->size,
1735					 tp->args[i].type->is_signed,
1736					 FILTER_OTHER);
1737		if (ret)
1738			return ret;
1739	}
1740	return 0;
1741}
1742
1743static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
1744{
1745	int i;
1746	int pos = 0;
 
1747
1748	const char *fmt, *arg;
 
 
1749
1750	if (!trace_probe_is_return(tp)) {
1751		fmt = "(%lx)";
1752		arg = "REC->" FIELD_STRING_IP;
1753	} else {
1754		fmt = "(%lx <- %lx)";
1755		arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
1756	}
1757
1758	/* When len=0, we just calculate the needed length */
1759#define LEN_OR_ZERO (len ? len - pos : 0)
1760
1761	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
1762
1763	for (i = 0; i < tp->nr_args; i++) {
1764		pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%s",
1765				tp->args[i].name, tp->args[i].type->fmt);
1766	}
1767
1768	pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
1769
1770	for (i = 0; i < tp->nr_args; i++) {
1771		if (strcmp(tp->args[i].type->name, "string") == 0)
1772			pos += snprintf(buf + pos, LEN_OR_ZERO,
1773					", __get_str(%s)",
1774					tp->args[i].name);
1775		else
1776			pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1777					tp->args[i].name);
1778	}
1779
1780#undef LEN_OR_ZERO
1781
1782	/* return the length of print_fmt */
1783	return pos;
1784}
1785
1786static int set_print_fmt(struct trace_probe *tp)
1787{
1788	int len;
1789	char *print_fmt;
 
1790
1791	/* First: called with 0 length to calculate the needed length */
1792	len = __set_print_fmt(tp, NULL, 0);
1793	print_fmt = kmalloc(len + 1, GFP_KERNEL);
1794	if (!print_fmt)
1795		return -ENOMEM;
1796
1797	/* Second: actually write the @print_fmt */
1798	__set_print_fmt(tp, print_fmt, len + 1);
1799	tp->call.print_fmt = print_fmt;
1800
1801	return 0;
1802}
1803
1804#ifdef CONFIG_PERF_EVENTS
1805
1806/* Kprobe profile handler */
1807static __kprobes void kprobe_perf_func(struct kprobe *kp,
1808					 struct pt_regs *regs)
1809{
1810	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1811	struct ftrace_event_call *call = &tp->call;
1812	struct kprobe_trace_entry_head *entry;
1813	struct hlist_head *head;
1814	int size, __size, dsize;
1815	int rctx;
1816
1817	dsize = __get_data_size(tp, regs);
1818	__size = sizeof(*entry) + tp->size + dsize;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1819	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1820	size -= sizeof(u32);
1821	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1822		     "profile buffer not large enough"))
1823		return;
1824
1825	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1826	if (!entry)
1827		return;
1828
1829	entry->ip = (unsigned long)kp->addr;
1830	memset(&entry[1], 0, dsize);
1831	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
1832
1833	head = this_cpu_ptr(call->perf_events);
1834	perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
1835}
 
1836
1837/* Kretprobe profile handler */
1838static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1839					    struct pt_regs *regs)
 
1840{
1841	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1842	struct ftrace_event_call *call = &tp->call;
1843	struct kretprobe_trace_entry_head *entry;
1844	struct hlist_head *head;
1845	int size, __size, dsize;
1846	int rctx;
1847
1848	dsize = __get_data_size(tp, regs);
1849	__size = sizeof(*entry) + tp->size + dsize;
 
 
 
 
 
 
 
1850	size = ALIGN(__size + sizeof(u32), sizeof(u64));
1851	size -= sizeof(u32);
1852	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1853		     "profile buffer not large enough"))
1854		return;
1855
1856	entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1857	if (!entry)
1858		return;
1859
1860	entry->func = (unsigned long)tp->rp.kp.addr;
1861	entry->ret_ip = (unsigned long)ri->ret_addr;
1862	store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1863
1864	head = this_cpu_ptr(call->perf_events);
1865	perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
 
 
 
 
 
 
 
 
 
 
1866}
1867#endif	/* CONFIG_PERF_EVENTS */
1868
1869static __kprobes
1870int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
 
 
 
 
 
 
1871{
1872	struct trace_probe *tp = (struct trace_probe *)event->data;
1873
1874	switch (type) {
1875	case TRACE_REG_REGISTER:
1876		return enable_trace_probe(tp, TP_FLAG_TRACE);
1877	case TRACE_REG_UNREGISTER:
1878		disable_trace_probe(tp, TP_FLAG_TRACE);
1879		return 0;
1880
1881#ifdef CONFIG_PERF_EVENTS
1882	case TRACE_REG_PERF_REGISTER:
1883		return enable_trace_probe(tp, TP_FLAG_PROFILE);
1884	case TRACE_REG_PERF_UNREGISTER:
1885		disable_trace_probe(tp, TP_FLAG_PROFILE);
 
 
 
 
1886		return 0;
1887#endif
1888	}
1889	return 0;
1890}
1891
1892static __kprobes
1893int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1894{
1895	struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
 
 
 
1896
1897	if (tp->flags & TP_FLAG_TRACE)
1898		kprobe_trace_func(kp, regs);
1899#ifdef CONFIG_PERF_EVENTS
1900	if (tp->flags & TP_FLAG_PROFILE)
1901		kprobe_perf_func(kp, regs);
1902#endif
1903	return 0;	/* We don't tweek kernel, so just return 0 */
1904}
 
1905
1906static __kprobes
1907int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1908{
1909	struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
 
1910
1911	if (tp->flags & TP_FLAG_TRACE)
1912		kretprobe_trace_func(ri, regs);
 
 
 
 
 
 
 
 
 
 
 
1913#ifdef CONFIG_PERF_EVENTS
1914	if (tp->flags & TP_FLAG_PROFILE)
1915		kretprobe_perf_func(ri, regs);
1916#endif
1917	return 0;	/* We don't tweek kernel, so just return 0 */
1918}
 
1919
1920static struct trace_event_functions kretprobe_funcs = {
1921	.trace		= print_kretprobe_event
1922};
1923
1924static struct trace_event_functions kprobe_funcs = {
1925	.trace		= print_kprobe_event
1926};
1927
1928static int register_probe_event(struct trace_probe *tp)
 
 
 
 
 
 
 
 
 
 
 
 
1929{
1930	struct ftrace_event_call *call = &tp->call;
1931	int ret;
1932
1933	/* Initialize ftrace_event_call */
1934	INIT_LIST_HEAD(&call->class->fields);
1935	if (trace_probe_is_return(tp)) {
1936		call->event.funcs = &kretprobe_funcs;
1937		call->class->define_fields = kretprobe_event_define_fields;
1938	} else {
1939		call->event.funcs = &kprobe_funcs;
1940		call->class->define_fields = kprobe_event_define_fields;
1941	}
1942	if (set_print_fmt(tp) < 0)
1943		return -ENOMEM;
1944	ret = register_ftrace_event(&call->event);
1945	if (!ret) {
1946		kfree(call->print_fmt);
1947		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1948	}
1949	call->flags = 0;
1950	call->class->reg = kprobe_register;
1951	call->data = tp;
1952	ret = trace_add_event_call(call);
1953	if (ret) {
1954		pr_info("Failed to register kprobe event: %s\n", call->name);
1955		kfree(call->print_fmt);
1956		unregister_ftrace_event(&call->event);
 
 
 
 
 
 
 
 
 
 
 
1957	}
1958	return ret;
1959}
1960
1961static void unregister_probe_event(struct trace_probe *tp)
1962{
1963	/* tp->event is unregistered in trace_remove_event_call() */
1964	trace_remove_event_call(&tp->call);
1965	kfree(tp->call.print_fmt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1966}
1967
1968/* Make a debugfs interface for controlling probe points */
1969static __init int init_kprobe_trace(void)
 
 
 
1970{
1971	struct dentry *d_tracer;
1972	struct dentry *entry;
 
 
 
1973
1974	if (register_module_notifier(&trace_probe_module_nb))
1975		return -EINVAL;
1976
1977	d_tracer = tracing_init_dentry();
1978	if (!d_tracer)
 
 
 
 
 
 
 
 
 
1979		return 0;
1980
1981	entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1982				    NULL, &kprobe_events_ops);
1983
1984	/* Event list interface */
1985	if (!entry)
1986		pr_warning("Could not create debugfs "
1987			   "'kprobe_events' entry\n");
1988
1989	/* Profile interface */
1990	entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1991				    NULL, &kprobe_profile_ops);
 
 
1992
1993	if (!entry)
1994		pr_warning("Could not create debugfs "
1995			   "'kprobe_profile' entry\n");
1996	return 0;
1997}
1998fs_initcall(init_kprobe_trace);
1999
2000
2001#ifdef CONFIG_FTRACE_STARTUP_TEST
 
 
 
 
 
 
 
 
 
 
 
2002
2003/*
2004 * The "__used" keeps gcc from removing the function symbol
2005 * from the kallsyms table.
2006 */
2007static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
2008					       int a4, int a5, int a6)
2009{
2010	return a1 + a2 + a3 + a4 + a5 + a6;
2011}
2012
2013static __init int kprobe_trace_self_tests_init(void)
2014{
2015	int ret, warn = 0;
2016	int (*target)(int, int, int, int, int, int);
2017	struct trace_probe *tp;
 
 
 
 
 
 
 
2018
2019	target = kprobe_trace_selftest_target;
2020
2021	pr_info("Testing kprobe tracing: ");
2022
2023	ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
2024				  "$stack $stack0 +0($stack)");
2025	if (WARN_ON_ONCE(ret)) {
2026		pr_warning("error on probing function entry.\n");
2027		warn++;
2028	} else {
2029		/* Enable trace point */
2030		tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM);
2031		if (WARN_ON_ONCE(tp == NULL)) {
2032			pr_warning("error on getting new probe.\n");
2033			warn++;
2034		} else
2035			enable_trace_probe(tp, TP_FLAG_TRACE);
 
 
 
 
 
 
 
2036	}
2037
2038	ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
2039				  "$retval");
2040	if (WARN_ON_ONCE(ret)) {
2041		pr_warning("error on probing function return.\n");
2042		warn++;
2043	} else {
2044		/* Enable trace point */
2045		tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM);
2046		if (WARN_ON_ONCE(tp == NULL)) {
2047			pr_warning("error on getting new probe.\n");
2048			warn++;
2049		} else
2050			enable_trace_probe(tp, TP_FLAG_TRACE);
 
 
 
 
 
 
 
2051	}
2052
2053	if (warn)
2054		goto end;
2055
2056	ret = target(1, 2, 3, 4, 5, 6);
2057
2058	ret = command_trace_probe("-:testprobe");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2059	if (WARN_ON_ONCE(ret)) {
2060		pr_warning("error on deleting a probe.\n");
2061		warn++;
2062	}
2063
2064	ret = command_trace_probe("-:testprobe2");
2065	if (WARN_ON_ONCE(ret)) {
2066		pr_warning("error on deleting a probe.\n");
2067		warn++;
2068	}
2069
2070end:
2071	release_all_trace_probes();
 
 
 
 
 
 
 
 
 
2072	if (warn)
2073		pr_cont("NG: Some tests are failed. Please check them.\n");
2074	else
2075		pr_cont("OK\n");
2076	return 0;
2077}
2078
2079late_initcall(kprobe_trace_self_tests_init);
2080
2081#endif