Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Infrastructure for profiling code inserted by 'gcc -pg'.
   4 *
   5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
   7 *
   8 * Originally ported from the -rt patch by:
   9 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  10 *
  11 * Based on code in the latency_tracer, that is:
  12 *
  13 *  Copyright (C) 2004-2006 Ingo Molnar
  14 *  Copyright (C) 2004 Nadia Yvette Chambers
  15 */
  16
  17#include <linux/stop_machine.h>
  18#include <linux/clocksource.h>
  19#include <linux/sched/task.h>
  20#include <linux/kallsyms.h>
  21#include <linux/security.h>
  22#include <linux/seq_file.h>
  23#include <linux/tracefs.h>
  24#include <linux/hardirq.h>
  25#include <linux/kthread.h>
  26#include <linux/uaccess.h>
  27#include <linux/bsearch.h>
  28#include <linux/module.h>
  29#include <linux/ftrace.h>
  30#include <linux/sysctl.h>
  31#include <linux/slab.h>
  32#include <linux/ctype.h>
  33#include <linux/sort.h>
  34#include <linux/list.h>
  35#include <linux/hash.h>
  36#include <linux/rcupdate.h>
  37#include <linux/kprobes.h>
  38
  39#include <trace/events/sched.h>
  40
  41#include <asm/sections.h>
  42#include <asm/setup.h>
  43
  44#include "ftrace_internal.h"
  45#include "trace_output.h"
  46#include "trace_stat.h"
  47
  48/* Flags that do not get reset */
  49#define FTRACE_NOCLEAR_FLAGS	(FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
  50				 FTRACE_FL_MODIFIED)
  51
  52#define FTRACE_INVALID_FUNCTION		"__ftrace_invalid_address__"
  53
  54#define FTRACE_WARN_ON(cond)			\
  55	({					\
  56		int ___r = cond;		\
  57		if (WARN_ON(___r))		\
  58			ftrace_kill();		\
  59		___r;				\
  60	})
  61
  62#define FTRACE_WARN_ON_ONCE(cond)		\
  63	({					\
  64		int ___r = cond;		\
  65		if (WARN_ON_ONCE(___r))		\
  66			ftrace_kill();		\
  67		___r;				\
  68	})
  69
  70/* hash bits for specific function selection */
 
 
  71#define FTRACE_HASH_DEFAULT_BITS 10
  72#define FTRACE_HASH_MAX_BITS 12
  73
  74#ifdef CONFIG_DYNAMIC_FTRACE
  75#define INIT_OPS_HASH(opsname)	\
  76	.func_hash		= &opsname.local_hash,			\
  77	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), \
  78	.subop_list		= LIST_HEAD_INIT(opsname.subop_list),
  79#else
  80#define INIT_OPS_HASH(opsname)
  81#endif
  82
  83enum {
  84	FTRACE_MODIFY_ENABLE_FL		= (1 << 0),
  85	FTRACE_MODIFY_MAY_SLEEP_FL	= (1 << 1),
  86};
  87
  88struct ftrace_ops ftrace_list_end __read_mostly = {
  89	.func		= ftrace_stub,
  90	.flags		= FTRACE_OPS_FL_STUB,
  91	INIT_OPS_HASH(ftrace_list_end)
  92};
  93
  94/* ftrace_enabled is a method to turn ftrace on or off */
  95int ftrace_enabled __read_mostly;
  96static int __maybe_unused last_ftrace_enabled;
  97
  98/* Current function tracing op */
  99struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
 100/* What to set function_trace_op to */
 101static struct ftrace_ops *set_function_trace_op;
 102
 103bool ftrace_pids_enabled(struct ftrace_ops *ops)
 104{
 105	struct trace_array *tr;
 106
 107	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
 108		return false;
 109
 110	tr = ops->private;
 111
 112	return tr->function_pids != NULL || tr->function_no_pids != NULL;
 113}
 114
 115static void ftrace_update_trampoline(struct ftrace_ops *ops);
 116
 117/*
 118 * ftrace_disabled is set when an anomaly is discovered.
 119 * ftrace_disabled is much stronger than ftrace_enabled.
 120 */
 121static int ftrace_disabled __read_mostly;
 122
 123DEFINE_MUTEX(ftrace_lock);
 124
 125struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = (struct ftrace_ops __rcu *)&ftrace_list_end;
 126ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 127struct ftrace_ops global_ops;
 128
 129/* Defined by vmlinux.lds.h see the comment above arch_ftrace_ops_list_func for details */
 130void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 131			  struct ftrace_ops *op, struct ftrace_regs *fregs);
 132
 133#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
 134/*
 135 * Stub used to invoke the list ops without requiring a separate trampoline.
 136 */
 137const struct ftrace_ops ftrace_list_ops = {
 138	.func	= ftrace_ops_list_func,
 139	.flags	= FTRACE_OPS_FL_STUB,
 140};
 141
 142static void ftrace_ops_nop_func(unsigned long ip, unsigned long parent_ip,
 143				struct ftrace_ops *op,
 144				struct ftrace_regs *fregs)
 145{
 146	/* do nothing */
 147}
 148
 149/*
 150 * Stub used when a call site is disabled. May be called transiently by threads
 151 * which have made it into ftrace_caller but haven't yet recovered the ops at
 152 * the point the call site is disabled.
 153 */
 154const struct ftrace_ops ftrace_nop_ops = {
 155	.func	= ftrace_ops_nop_func,
 156	.flags  = FTRACE_OPS_FL_STUB,
 157};
 158#endif
 159
 160static inline void ftrace_ops_init(struct ftrace_ops *ops)
 161{
 162#ifdef CONFIG_DYNAMIC_FTRACE
 163	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
 164		mutex_init(&ops->local_hash.regex_lock);
 165		INIT_LIST_HEAD(&ops->subop_list);
 166		ops->func_hash = &ops->local_hash;
 167		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
 168	}
 169#endif
 170}
 171
 172/* Call this function for when a callback filters on set_ftrace_pid */
 173static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
 174			    struct ftrace_ops *op, struct ftrace_regs *fregs)
 175{
 176	struct trace_array *tr = op->private;
 177	int pid;
 178
 179	if (tr) {
 180		pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
 181		if (pid == FTRACE_PID_IGNORE)
 182			return;
 183		if (pid != FTRACE_PID_TRACE &&
 184		    pid != current->pid)
 185			return;
 186	}
 187
 188	op->saved_func(ip, parent_ip, op, fregs);
 
 
 
 
 
 
 
 
 
 
 
 189}
 190
 191static void ftrace_sync_ipi(void *data)
 192{
 193	/* Probably not needed, but do it anyway */
 194	smp_rmb();
 195}
 196
 197static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
 198{
 199	/*
 200	 * If this is a dynamic or RCU ops, or we force list func,
 201	 * then it needs to call the list anyway.
 202	 */
 203	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
 204	    FTRACE_FORCE_LIST_FUNC)
 205		return ftrace_ops_list_func;
 206
 207	return ftrace_ops_get_func(ops);
 208}
 209
 210static void update_ftrace_function(void)
 211{
 212	ftrace_func_t func;
 213
 214	/*
 215	 * Prepare the ftrace_ops that the arch callback will use.
 216	 * If there's only one ftrace_ops registered, the ftrace_ops_list
 217	 * will point to the ops we want.
 218	 */
 219	set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
 220						lockdep_is_held(&ftrace_lock));
 221
 222	/* If there's no ftrace_ops registered, just call the stub function */
 223	if (set_function_trace_op == &ftrace_list_end) {
 224		func = ftrace_stub;
 225
 226	/*
 227	 * If we are at the end of the list and this ops is
 228	 * recursion safe and not dynamic and the arch supports passing ops,
 229	 * then have the mcount trampoline call the function directly.
 230	 */
 231	} else if (rcu_dereference_protected(ftrace_ops_list->next,
 232			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 233		func = ftrace_ops_get_list_func(ftrace_ops_list);
 234
 235	} else {
 236		/* Just use the default ftrace_ops */
 237		set_function_trace_op = &ftrace_list_end;
 238		func = ftrace_ops_list_func;
 239	}
 240
 
 
 241	/* If there's no change, then do nothing more here */
 242	if (ftrace_trace_function == func)
 243		return;
 244
 245	/*
 246	 * If we are using the list function, it doesn't care
 247	 * about the function_trace_ops.
 248	 */
 249	if (func == ftrace_ops_list_func) {
 250		ftrace_trace_function = func;
 251		/*
 252		 * Don't even bother setting function_trace_ops,
 253		 * it would be racy to do so anyway.
 254		 */
 255		return;
 256	}
 257
 258#ifndef CONFIG_DYNAMIC_FTRACE
 259	/*
 260	 * For static tracing, we need to be a bit more careful.
 261	 * The function change takes affect immediately. Thus,
 262	 * we need to coordinate the setting of the function_trace_ops
 263	 * with the setting of the ftrace_trace_function.
 264	 *
 265	 * Set the function to the list ops, which will call the
 266	 * function we want, albeit indirectly, but it handles the
 267	 * ftrace_ops and doesn't depend on function_trace_op.
 268	 */
 269	ftrace_trace_function = ftrace_ops_list_func;
 270	/*
 271	 * Make sure all CPUs see this. Yes this is slow, but static
 272	 * tracing is slow and nasty to have enabled.
 273	 */
 274	synchronize_rcu_tasks_rude();
 275	/* Now all cpus are using the list ops. */
 276	function_trace_op = set_function_trace_op;
 277	/* Make sure the function_trace_op is visible on all CPUs */
 278	smp_wmb();
 279	/* Nasty way to force a rmb on all cpus */
 280	smp_call_function(ftrace_sync_ipi, NULL, 1);
 281	/* OK, we are all set to update the ftrace_trace_function now! */
 282#endif /* !CONFIG_DYNAMIC_FTRACE */
 283
 284	ftrace_trace_function = func;
 285}
 286
 287static void add_ftrace_ops(struct ftrace_ops __rcu **list,
 288			   struct ftrace_ops *ops)
 289{
 290	rcu_assign_pointer(ops->next, *list);
 291
 292	/*
 293	 * We are entering ops into the list but another
 294	 * CPU might be walking that list. We need to make sure
 295	 * the ops->next pointer is valid before another CPU sees
 296	 * the ops pointer included into the list.
 297	 */
 298	rcu_assign_pointer(*list, ops);
 299}
 300
 301static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
 302			     struct ftrace_ops *ops)
 303{
 304	struct ftrace_ops **p;
 305
 306	/*
 307	 * If we are removing the last function, then simply point
 308	 * to the ftrace_stub.
 309	 */
 310	if (rcu_dereference_protected(*list,
 311			lockdep_is_held(&ftrace_lock)) == ops &&
 312	    rcu_dereference_protected(ops->next,
 313			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 314		rcu_assign_pointer(*list, &ftrace_list_end);
 315		return 0;
 316	}
 317
 318	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
 319		if (*p == ops)
 320			break;
 321
 322	if (*p != ops)
 323		return -1;
 324
 325	*p = (*p)->next;
 326	return 0;
 327}
 328
 329static void ftrace_update_trampoline(struct ftrace_ops *ops);
 330
 331int __register_ftrace_function(struct ftrace_ops *ops)
 332{
 333	if (ops->flags & FTRACE_OPS_FL_DELETED)
 334		return -EINVAL;
 335
 336	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
 337		return -EBUSY;
 338
 339#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 340	/*
 341	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
 342	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
 343	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
 344	 */
 345	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
 346	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
 347		return -EINVAL;
 348
 349	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
 350		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
 351#endif
 352	if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
 353		return -EBUSY;
 354
 355	if (!is_kernel_core_data((unsigned long)ops))
 356		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 357
 358	add_ftrace_ops(&ftrace_ops_list, ops);
 359
 360	/* Always save the function, and reset at unregistering */
 361	ops->saved_func = ops->func;
 362
 363	if (ftrace_pids_enabled(ops))
 364		ops->func = ftrace_pid_func;
 365
 366	ftrace_update_trampoline(ops);
 367
 368	if (ftrace_enabled)
 369		update_ftrace_function();
 370
 371	return 0;
 372}
 373
 374int __unregister_ftrace_function(struct ftrace_ops *ops)
 375{
 376	int ret;
 377
 378	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
 379		return -EBUSY;
 380
 381	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 382
 383	if (ret < 0)
 384		return ret;
 385
 386	if (ftrace_enabled)
 387		update_ftrace_function();
 388
 389	ops->func = ops->saved_func;
 390
 391	return 0;
 392}
 393
 394static void ftrace_update_pid_func(void)
 395{
 396	struct ftrace_ops *op;
 397
 398	/* Only do something if we are tracing something */
 399	if (ftrace_trace_function == ftrace_stub)
 400		return;
 401
 402	do_for_each_ftrace_op(op, ftrace_ops_list) {
 403		if (op->flags & FTRACE_OPS_FL_PID) {
 404			op->func = ftrace_pids_enabled(op) ?
 405				ftrace_pid_func : op->saved_func;
 406			ftrace_update_trampoline(op);
 407		}
 408	} while_for_each_ftrace_op(op);
 409
 410	fgraph_update_pid_func();
 411
 412	update_ftrace_function();
 413}
 414
 415#ifdef CONFIG_FUNCTION_PROFILER
 416struct ftrace_profile {
 417	struct hlist_node		node;
 418	unsigned long			ip;
 419	unsigned long			counter;
 420#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 421	unsigned long long		time;
 422	unsigned long long		time_squared;
 423#endif
 424};
 425
 426struct ftrace_profile_page {
 427	struct ftrace_profile_page	*next;
 428	unsigned long			index;
 429	struct ftrace_profile		records[];
 430};
 431
 432struct ftrace_profile_stat {
 433	atomic_t			disabled;
 434	struct hlist_head		*hash;
 435	struct ftrace_profile_page	*pages;
 436	struct ftrace_profile_page	*start;
 437	struct tracer_stat		stat;
 438};
 439
 440#define PROFILE_RECORDS_SIZE						\
 441	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 442
 443#define PROFILES_PER_PAGE					\
 444	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 445
 446static int ftrace_profile_enabled __read_mostly;
 447
 448/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 449static DEFINE_MUTEX(ftrace_profile_lock);
 450
 451static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 452
 453#define FTRACE_PROFILE_HASH_BITS 10
 454#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
 455
 456static void *
 457function_stat_next(void *v, int idx)
 458{
 459	struct ftrace_profile *rec = v;
 460	struct ftrace_profile_page *pg;
 461
 462	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 463
 464 again:
 465	if (idx != 0)
 466		rec++;
 467
 468	if ((void *)rec >= (void *)&pg->records[pg->index]) {
 469		pg = pg->next;
 470		if (!pg)
 471			return NULL;
 472		rec = &pg->records[0];
 473		if (!rec->counter)
 474			goto again;
 475	}
 476
 477	return rec;
 478}
 479
 480static void *function_stat_start(struct tracer_stat *trace)
 481{
 482	struct ftrace_profile_stat *stat =
 483		container_of(trace, struct ftrace_profile_stat, stat);
 484
 485	if (!stat || !stat->start)
 486		return NULL;
 487
 488	return function_stat_next(&stat->start->records[0], 0);
 489}
 490
 491#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 492/* function graph compares on total time */
 493static int function_stat_cmp(const void *p1, const void *p2)
 494{
 495	const struct ftrace_profile *a = p1;
 496	const struct ftrace_profile *b = p2;
 497
 498	if (a->time < b->time)
 499		return -1;
 500	if (a->time > b->time)
 501		return 1;
 502	else
 503		return 0;
 504}
 505#else
 506/* not function graph compares against hits */
 507static int function_stat_cmp(const void *p1, const void *p2)
 508{
 509	const struct ftrace_profile *a = p1;
 510	const struct ftrace_profile *b = p2;
 511
 512	if (a->counter < b->counter)
 513		return -1;
 514	if (a->counter > b->counter)
 515		return 1;
 516	else
 517		return 0;
 518}
 519#endif
 520
 521static int function_stat_headers(struct seq_file *m)
 522{
 523#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 524	seq_puts(m, "  Function                               "
 525		 "Hit    Time            Avg             s^2\n"
 526		    "  --------                               "
 527		 "---    ----            ---             ---\n");
 528#else
 529	seq_puts(m, "  Function                               Hit\n"
 530		    "  --------                               ---\n");
 531#endif
 532	return 0;
 533}
 534
 535static int function_stat_show(struct seq_file *m, void *v)
 536{
 537	struct ftrace_profile *rec = v;
 538	char str[KSYM_SYMBOL_LEN];
 539	int ret = 0;
 540#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 541	static struct trace_seq s;
 542	unsigned long long avg;
 543	unsigned long long stddev;
 544	unsigned long long stddev_denom;
 545#endif
 546	mutex_lock(&ftrace_profile_lock);
 547
 548	/* we raced with function_profile_reset() */
 549	if (unlikely(rec->counter == 0)) {
 550		ret = -EBUSY;
 551		goto out;
 552	}
 553
 554#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 555	avg = div64_ul(rec->time, rec->counter);
 
 556	if (tracing_thresh && (avg < tracing_thresh))
 557		goto out;
 558#endif
 559
 560	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 561	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 562
 563#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 564	seq_puts(m, "    ");
 565
 566	/*
 567	 * Variance formula:
 568	 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
 569	 * Maybe Welford's method is better here?
 570	 * Divide only by 1000 for ns^2 -> us^2 conversion.
 571	 * trace_print_graph_duration will divide by 1000 again.
 572	 */
 573	stddev = 0;
 574	stddev_denom = rec->counter * (rec->counter - 1) * 1000;
 575	if (stddev_denom) {
 576		stddev = rec->counter * rec->time_squared -
 577			 rec->time * rec->time;
 578		stddev = div64_ul(stddev, stddev_denom);
 
 
 
 
 
 579	}
 580
 581	trace_seq_init(&s);
 582	trace_print_graph_duration(rec->time, &s);
 583	trace_seq_puts(&s, "    ");
 584	trace_print_graph_duration(avg, &s);
 585	trace_seq_puts(&s, "    ");
 586	trace_print_graph_duration(stddev, &s);
 587	trace_print_seq(m, &s);
 588#endif
 589	seq_putc(m, '\n');
 590out:
 591	mutex_unlock(&ftrace_profile_lock);
 592
 593	return ret;
 594}
 595
 596static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 597{
 598	struct ftrace_profile_page *pg;
 599
 600	pg = stat->pages = stat->start;
 601
 602	while (pg) {
 603		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
 604		pg->index = 0;
 605		pg = pg->next;
 606	}
 607
 608	memset(stat->hash, 0,
 609	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 610}
 611
 612static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 613{
 614	struct ftrace_profile_page *pg;
 615	int functions;
 616	int pages;
 617	int i;
 618
 619	/* If we already allocated, do nothing */
 620	if (stat->pages)
 621		return 0;
 622
 623	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
 624	if (!stat->pages)
 625		return -ENOMEM;
 626
 627#ifdef CONFIG_DYNAMIC_FTRACE
 628	functions = ftrace_update_tot_cnt;
 629#else
 630	/*
 631	 * We do not know the number of functions that exist because
 632	 * dynamic tracing is what counts them. With past experience
 633	 * we have around 20K functions. That should be more than enough.
 634	 * It is highly unlikely we will execute every function in
 635	 * the kernel.
 636	 */
 637	functions = 20000;
 638#endif
 639
 640	pg = stat->start = stat->pages;
 641
 642	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 643
 644	for (i = 1; i < pages; i++) {
 645		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 646		if (!pg->next)
 647			goto out_free;
 648		pg = pg->next;
 649	}
 650
 651	return 0;
 652
 653 out_free:
 654	pg = stat->start;
 655	while (pg) {
 656		unsigned long tmp = (unsigned long)pg;
 657
 658		pg = pg->next;
 659		free_page(tmp);
 660	}
 661
 662	stat->pages = NULL;
 663	stat->start = NULL;
 664
 665	return -ENOMEM;
 666}
 667
 668static int ftrace_profile_init_cpu(int cpu)
 669{
 670	struct ftrace_profile_stat *stat;
 671	int size;
 672
 673	stat = &per_cpu(ftrace_profile_stats, cpu);
 674
 675	if (stat->hash) {
 676		/* If the profile is already created, simply reset it */
 677		ftrace_profile_reset(stat);
 678		return 0;
 679	}
 680
 681	/*
 682	 * We are profiling all functions, but usually only a few thousand
 683	 * functions are hit. We'll make a hash of 1024 items.
 684	 */
 685	size = FTRACE_PROFILE_HASH_SIZE;
 686
 687	stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
 688
 689	if (!stat->hash)
 690		return -ENOMEM;
 691
 692	/* Preallocate the function profiling pages */
 693	if (ftrace_profile_pages_init(stat) < 0) {
 694		kfree(stat->hash);
 695		stat->hash = NULL;
 696		return -ENOMEM;
 697	}
 698
 699	return 0;
 700}
 701
 702static int ftrace_profile_init(void)
 703{
 704	int cpu;
 705	int ret = 0;
 706
 707	for_each_possible_cpu(cpu) {
 708		ret = ftrace_profile_init_cpu(cpu);
 709		if (ret)
 710			break;
 711	}
 712
 713	return ret;
 714}
 715
 716/* interrupts must be disabled */
 717static struct ftrace_profile *
 718ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 719{
 720	struct ftrace_profile *rec;
 721	struct hlist_head *hhd;
 722	unsigned long key;
 723
 724	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
 725	hhd = &stat->hash[key];
 726
 727	if (hlist_empty(hhd))
 728		return NULL;
 729
 730	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
 731		if (rec->ip == ip)
 732			return rec;
 733	}
 734
 735	return NULL;
 736}
 737
 738static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 739			       struct ftrace_profile *rec)
 740{
 741	unsigned long key;
 742
 743	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
 744	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 745}
 746
 747/*
 748 * The memory is already allocated, this simply finds a new record to use.
 749 */
 750static struct ftrace_profile *
 751ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 752{
 753	struct ftrace_profile *rec = NULL;
 754
 755	/* prevent recursion (from NMIs) */
 756	if (atomic_inc_return(&stat->disabled) != 1)
 757		goto out;
 758
 759	/*
 760	 * Try to find the function again since an NMI
 761	 * could have added it
 762	 */
 763	rec = ftrace_find_profiled_func(stat, ip);
 764	if (rec)
 765		goto out;
 766
 767	if (stat->pages->index == PROFILES_PER_PAGE) {
 768		if (!stat->pages->next)
 769			goto out;
 770		stat->pages = stat->pages->next;
 771	}
 772
 773	rec = &stat->pages->records[stat->pages->index++];
 774	rec->ip = ip;
 775	ftrace_add_profile(stat, rec);
 776
 777 out:
 778	atomic_dec(&stat->disabled);
 779
 780	return rec;
 781}
 782
 783static void
 784function_profile_call(unsigned long ip, unsigned long parent_ip,
 785		      struct ftrace_ops *ops, struct ftrace_regs *fregs)
 786{
 787	struct ftrace_profile_stat *stat;
 788	struct ftrace_profile *rec;
 789	unsigned long flags;
 790
 791	if (!ftrace_profile_enabled)
 792		return;
 793
 794	local_irq_save(flags);
 795
 796	stat = this_cpu_ptr(&ftrace_profile_stats);
 797	if (!stat->hash || !ftrace_profile_enabled)
 798		goto out;
 799
 800	rec = ftrace_find_profiled_func(stat, ip);
 801	if (!rec) {
 802		rec = ftrace_profile_alloc(stat, ip);
 803		if (!rec)
 804			goto out;
 805	}
 806
 807	rec->counter++;
 808 out:
 809	local_irq_restore(flags);
 810}
 811
 812#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 813static bool fgraph_graph_time = true;
 814
 815void ftrace_graph_graph_time_control(bool enable)
 816{
 817	fgraph_graph_time = enable;
 818}
 819
 820struct profile_fgraph_data {
 821	unsigned long long		calltime;
 822	unsigned long long		subtime;
 823	unsigned long long		sleeptime;
 824};
 825
 826static int profile_graph_entry(struct ftrace_graph_ent *trace,
 827			       struct fgraph_ops *gops)
 828{
 829	struct profile_fgraph_data *profile_data;
 830
 831	function_profile_call(trace->func, 0, NULL, NULL);
 832
 833	/* If function graph is shutting down, ret_stack can be NULL */
 834	if (!current->ret_stack)
 835		return 0;
 836
 837	profile_data = fgraph_reserve_data(gops->idx, sizeof(*profile_data));
 838	if (!profile_data)
 839		return 0;
 840
 841	profile_data->subtime = 0;
 842	profile_data->sleeptime = current->ftrace_sleeptime;
 843	profile_data->calltime = trace_clock_local();
 844
 845	return 1;
 846}
 847
 848static void profile_graph_return(struct ftrace_graph_ret *trace,
 849				 struct fgraph_ops *gops)
 850{
 851	struct profile_fgraph_data *profile_data;
 852	struct ftrace_profile_stat *stat;
 853	unsigned long long calltime;
 854	unsigned long long rettime = trace_clock_local();
 855	struct ftrace_profile *rec;
 856	unsigned long flags;
 857	int size;
 858
 859	local_irq_save(flags);
 860	stat = this_cpu_ptr(&ftrace_profile_stats);
 861	if (!stat->hash || !ftrace_profile_enabled)
 862		goto out;
 863
 864	profile_data = fgraph_retrieve_data(gops->idx, &size);
 865
 866	/* If the calltime was zero'd ignore it */
 867	if (!profile_data || !profile_data->calltime)
 868		goto out;
 869
 870	calltime = rettime - profile_data->calltime;
 871
 872	if (!fgraph_sleep_time) {
 873		if (current->ftrace_sleeptime)
 874			calltime -= current->ftrace_sleeptime - profile_data->sleeptime;
 875	}
 876
 877	if (!fgraph_graph_time) {
 878		struct profile_fgraph_data *parent_data;
 879
 880		/* Append this call time to the parent time to subtract */
 881		parent_data = fgraph_retrieve_parent_data(gops->idx, &size, 1);
 882		if (parent_data)
 883			parent_data->subtime += calltime;
 884
 885		if (profile_data->subtime && profile_data->subtime < calltime)
 886			calltime -= profile_data->subtime;
 
 887		else
 888			calltime = 0;
 889	}
 890
 891	rec = ftrace_find_profiled_func(stat, trace->func);
 892	if (rec) {
 893		rec->time += calltime;
 894		rec->time_squared += calltime * calltime;
 895	}
 896
 897 out:
 898	local_irq_restore(flags);
 899}
 900
 901static struct fgraph_ops fprofiler_ops = {
 902	.entryfunc = &profile_graph_entry,
 903	.retfunc = &profile_graph_return,
 904};
 905
 906static int register_ftrace_profiler(void)
 907{
 908	ftrace_ops_set_global_filter(&fprofiler_ops.ops);
 909	return register_ftrace_graph(&fprofiler_ops);
 910}
 911
 912static void unregister_ftrace_profiler(void)
 913{
 914	unregister_ftrace_graph(&fprofiler_ops);
 915}
 916#else
 917static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 918	.func		= function_profile_call,
 
 
 919};
 920
 921static int register_ftrace_profiler(void)
 922{
 923	ftrace_ops_set_global_filter(&ftrace_profile_ops);
 924	return register_ftrace_function(&ftrace_profile_ops);
 925}
 926
 927static void unregister_ftrace_profiler(void)
 928{
 929	unregister_ftrace_function(&ftrace_profile_ops);
 930}
 931#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 932
 933static ssize_t
 934ftrace_profile_write(struct file *filp, const char __user *ubuf,
 935		     size_t cnt, loff_t *ppos)
 936{
 937	unsigned long val;
 938	int ret;
 939
 940	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 941	if (ret)
 942		return ret;
 943
 944	val = !!val;
 945
 946	mutex_lock(&ftrace_profile_lock);
 947	if (ftrace_profile_enabled ^ val) {
 948		if (val) {
 949			ret = ftrace_profile_init();
 950			if (ret < 0) {
 951				cnt = ret;
 952				goto out;
 953			}
 954
 955			ret = register_ftrace_profiler();
 956			if (ret < 0) {
 957				cnt = ret;
 958				goto out;
 959			}
 960			ftrace_profile_enabled = 1;
 961		} else {
 962			ftrace_profile_enabled = 0;
 963			/*
 964			 * unregister_ftrace_profiler calls stop_machine
 965			 * so this acts like an synchronize_rcu.
 966			 */
 967			unregister_ftrace_profiler();
 968		}
 969	}
 970 out:
 971	mutex_unlock(&ftrace_profile_lock);
 972
 973	*ppos += cnt;
 974
 975	return cnt;
 976}
 977
 978static ssize_t
 979ftrace_profile_read(struct file *filp, char __user *ubuf,
 980		     size_t cnt, loff_t *ppos)
 981{
 982	char buf[64];		/* big enough to hold a number */
 983	int r;
 984
 985	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
 986	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 987}
 988
 989static const struct file_operations ftrace_profile_fops = {
 990	.open		= tracing_open_generic,
 991	.read		= ftrace_profile_read,
 992	.write		= ftrace_profile_write,
 993	.llseek		= default_llseek,
 994};
 995
 996/* used to initialize the real stat files */
 997static struct tracer_stat function_stats __initdata = {
 998	.name		= "functions",
 999	.stat_start	= function_stat_start,
1000	.stat_next	= function_stat_next,
1001	.stat_cmp	= function_stat_cmp,
1002	.stat_headers	= function_stat_headers,
1003	.stat_show	= function_stat_show
1004};
1005
1006static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1007{
1008	struct ftrace_profile_stat *stat;
 
1009	char *name;
1010	int ret;
1011	int cpu;
1012
1013	for_each_possible_cpu(cpu) {
1014		stat = &per_cpu(ftrace_profile_stats, cpu);
1015
1016		name = kasprintf(GFP_KERNEL, "function%d", cpu);
1017		if (!name) {
1018			/*
1019			 * The files created are permanent, if something happens
1020			 * we still do not free memory.
1021			 */
1022			WARN(1,
1023			     "Could not allocate stat file for cpu %d\n",
1024			     cpu);
1025			return;
1026		}
1027		stat->stat = function_stats;
1028		stat->stat.name = name;
1029		ret = register_stat_tracer(&stat->stat);
1030		if (ret) {
1031			WARN(1,
1032			     "Could not register function stat for cpu %d\n",
1033			     cpu);
1034			kfree(name);
1035			return;
1036		}
1037	}
1038
1039	trace_create_file("function_profile_enabled",
1040			  TRACE_MODE_WRITE, d_tracer, NULL,
1041			  &ftrace_profile_fops);
 
1042}
1043
1044#else /* CONFIG_FUNCTION_PROFILER */
1045static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1046{
1047}
1048#endif /* CONFIG_FUNCTION_PROFILER */
1049
1050#ifdef CONFIG_DYNAMIC_FTRACE
1051
1052static struct ftrace_ops *removed_ops;
1053
1054/*
1055 * Set when doing a global update, like enabling all recs or disabling them.
1056 * It is not set when just updating a single ftrace_ops.
1057 */
1058static bool update_all_ops;
1059
1060#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1061# error Dynamic ftrace depends on MCOUNT_RECORD
1062#endif
1063
 
 
 
 
 
1064struct ftrace_func_probe {
1065	struct ftrace_probe_ops	*probe_ops;
1066	struct ftrace_ops	ops;
1067	struct trace_array	*tr;
1068	struct list_head	list;
1069	void			*data;
1070	int			ref;
1071};
1072
1073/*
1074 * We make these constant because no one should touch them,
1075 * but they are used as the default "empty hash", to avoid allocating
1076 * it all the time. These are in a read only section such that if
1077 * anyone does try to modify it, it will cause an exception.
1078 */
1079static const struct hlist_head empty_buckets[1];
1080static const struct ftrace_hash empty_hash = {
1081	.buckets = (struct hlist_head *)empty_buckets,
1082};
1083#define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1084
1085struct ftrace_ops global_ops = {
1086	.func				= ftrace_stub,
1087	.local_hash.notrace_hash	= EMPTY_HASH,
1088	.local_hash.filter_hash		= EMPTY_HASH,
1089	INIT_OPS_HASH(global_ops)
1090	.flags				= FTRACE_OPS_FL_INITIALIZED |
 
1091					  FTRACE_OPS_FL_PID,
1092};
1093
1094/*
1095 * Used by the stack unwinder to know about dynamic ftrace trampolines.
1096 */
1097struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1098{
1099	struct ftrace_ops *op = NULL;
1100
1101	/*
1102	 * Some of the ops may be dynamically allocated,
1103	 * they are freed after a synchronize_rcu().
1104	 */
1105	preempt_disable_notrace();
1106
1107	do_for_each_ftrace_op(op, ftrace_ops_list) {
1108		/*
1109		 * This is to check for dynamically allocated trampolines.
1110		 * Trampolines that are in kernel text will have
1111		 * core_kernel_text() return true.
1112		 */
1113		if (op->trampoline && op->trampoline_size)
1114			if (addr >= op->trampoline &&
1115			    addr < op->trampoline + op->trampoline_size) {
1116				preempt_enable_notrace();
1117				return op;
1118			}
1119	} while_for_each_ftrace_op(op);
1120	preempt_enable_notrace();
1121
1122	return NULL;
1123}
1124
1125/*
1126 * This is used by __kernel_text_address() to return true if the
1127 * address is on a dynamically allocated trampoline that would
1128 * not return true for either core_kernel_text() or
1129 * is_module_text_address().
1130 */
1131bool is_ftrace_trampoline(unsigned long addr)
1132{
1133	return ftrace_ops_trampoline(addr) != NULL;
1134}
1135
1136struct ftrace_page {
1137	struct ftrace_page	*next;
1138	struct dyn_ftrace	*records;
1139	int			index;
1140	int			order;
1141};
1142
1143#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1144#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1145
 
 
 
1146static struct ftrace_page	*ftrace_pages_start;
1147static struct ftrace_page	*ftrace_pages;
1148
1149static __always_inline unsigned long
1150ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1151{
1152	if (hash->size_bits > 0)
1153		return hash_long(ip, hash->size_bits);
1154
1155	return 0;
1156}
1157
1158/* Only use this function if ftrace_hash_empty() has already been tested */
1159static __always_inline struct ftrace_func_entry *
1160__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1161{
1162	unsigned long key;
1163	struct ftrace_func_entry *entry;
1164	struct hlist_head *hhd;
1165
1166	key = ftrace_hash_key(hash, ip);
1167	hhd = &hash->buckets[key];
1168
1169	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1170		if (entry->ip == ip)
1171			return entry;
1172	}
1173	return NULL;
1174}
1175
1176/**
1177 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1178 * @hash: The hash to look at
1179 * @ip: The instruction pointer to test
1180 *
1181 * Search a given @hash to see if a given instruction pointer (@ip)
1182 * exists in it.
1183 *
1184 * Returns: the entry that holds the @ip if found. NULL otherwise.
1185 */
1186struct ftrace_func_entry *
1187ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1188{
1189	if (ftrace_hash_empty(hash))
1190		return NULL;
1191
1192	return __ftrace_lookup_ip(hash, ip);
1193}
1194
1195static void __add_hash_entry(struct ftrace_hash *hash,
1196			     struct ftrace_func_entry *entry)
1197{
1198	struct hlist_head *hhd;
1199	unsigned long key;
1200
1201	key = ftrace_hash_key(hash, entry->ip);
1202	hhd = &hash->buckets[key];
1203	hlist_add_head(&entry->hlist, hhd);
1204	hash->count++;
1205}
1206
1207static struct ftrace_func_entry *
1208add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1209{
1210	struct ftrace_func_entry *entry;
1211
1212	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1213	if (!entry)
1214		return NULL;
1215
1216	entry->ip = ip;
1217	__add_hash_entry(hash, entry);
1218
1219	return entry;
1220}
1221
1222static void
1223free_hash_entry(struct ftrace_hash *hash,
1224		  struct ftrace_func_entry *entry)
1225{
1226	hlist_del(&entry->hlist);
1227	kfree(entry);
1228	hash->count--;
1229}
1230
1231static void
1232remove_hash_entry(struct ftrace_hash *hash,
1233		  struct ftrace_func_entry *entry)
1234{
1235	hlist_del_rcu(&entry->hlist);
1236	hash->count--;
1237}
1238
1239static void ftrace_hash_clear(struct ftrace_hash *hash)
1240{
1241	struct hlist_head *hhd;
1242	struct hlist_node *tn;
1243	struct ftrace_func_entry *entry;
1244	int size = 1 << hash->size_bits;
1245	int i;
1246
1247	if (!hash->count)
1248		return;
1249
1250	for (i = 0; i < size; i++) {
1251		hhd = &hash->buckets[i];
1252		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1253			free_hash_entry(hash, entry);
1254	}
1255	FTRACE_WARN_ON(hash->count);
1256}
1257
1258static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1259{
1260	list_del(&ftrace_mod->list);
1261	kfree(ftrace_mod->module);
1262	kfree(ftrace_mod->func);
1263	kfree(ftrace_mod);
1264}
1265
1266static void clear_ftrace_mod_list(struct list_head *head)
1267{
1268	struct ftrace_mod_load *p, *n;
1269
1270	/* stack tracer isn't supported yet */
1271	if (!head)
1272		return;
1273
1274	mutex_lock(&ftrace_lock);
1275	list_for_each_entry_safe(p, n, head, list)
1276		free_ftrace_mod(p);
1277	mutex_unlock(&ftrace_lock);
1278}
1279
1280static void free_ftrace_hash(struct ftrace_hash *hash)
1281{
1282	if (!hash || hash == EMPTY_HASH)
1283		return;
1284	ftrace_hash_clear(hash);
1285	kfree(hash->buckets);
1286	kfree(hash);
1287}
1288
1289static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1290{
1291	struct ftrace_hash *hash;
1292
1293	hash = container_of(rcu, struct ftrace_hash, rcu);
1294	free_ftrace_hash(hash);
1295}
1296
1297static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1298{
1299	if (!hash || hash == EMPTY_HASH)
1300		return;
1301	call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1302}
1303
1304/**
1305 * ftrace_free_filter - remove all filters for an ftrace_ops
1306 * @ops: the ops to remove the filters from
1307 */
1308void ftrace_free_filter(struct ftrace_ops *ops)
1309{
1310	ftrace_ops_init(ops);
1311	free_ftrace_hash(ops->func_hash->filter_hash);
1312	free_ftrace_hash(ops->func_hash->notrace_hash);
1313}
1314EXPORT_SYMBOL_GPL(ftrace_free_filter);
1315
1316static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1317{
1318	struct ftrace_hash *hash;
1319	int size;
1320
1321	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1322	if (!hash)
1323		return NULL;
1324
1325	size = 1 << size_bits;
1326	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1327
1328	if (!hash->buckets) {
1329		kfree(hash);
1330		return NULL;
1331	}
1332
1333	hash->size_bits = size_bits;
1334
1335	return hash;
1336}
1337
1338/* Used to save filters on functions for modules not loaded yet */
1339static int ftrace_add_mod(struct trace_array *tr,
1340			  const char *func, const char *module,
1341			  int enable)
1342{
1343	struct ftrace_mod_load *ftrace_mod;
1344	struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1345
1346	ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1347	if (!ftrace_mod)
1348		return -ENOMEM;
1349
1350	INIT_LIST_HEAD(&ftrace_mod->list);
1351	ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1352	ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1353	ftrace_mod->enable = enable;
1354
1355	if (!ftrace_mod->func || !ftrace_mod->module)
1356		goto out_free;
1357
1358	list_add(&ftrace_mod->list, mod_head);
1359
1360	return 0;
1361
1362 out_free:
1363	free_ftrace_mod(ftrace_mod);
1364
1365	return -ENOMEM;
1366}
1367
1368static struct ftrace_hash *
1369alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1370{
1371	struct ftrace_func_entry *entry;
1372	struct ftrace_hash *new_hash;
1373	int size;
 
1374	int i;
1375
1376	new_hash = alloc_ftrace_hash(size_bits);
1377	if (!new_hash)
1378		return NULL;
1379
1380	if (hash)
1381		new_hash->flags = hash->flags;
1382
1383	/* Empty hash? */
1384	if (ftrace_hash_empty(hash))
1385		return new_hash;
1386
1387	size = 1 << hash->size_bits;
1388	for (i = 0; i < size; i++) {
1389		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1390			if (add_hash_entry(new_hash, entry->ip) == NULL)
 
1391				goto free_hash;
1392		}
1393	}
1394
1395	FTRACE_WARN_ON(new_hash->count != hash->count);
1396
1397	return new_hash;
1398
1399 free_hash:
1400	free_ftrace_hash(new_hash);
1401	return NULL;
1402}
1403
1404static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops);
1405static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops);
 
 
1406
1407static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1408				       struct ftrace_hash *new_hash);
1409
1410/*
1411 * Allocate a new hash and remove entries from @src and move them to the new hash.
1412 * On success, the @src hash will be empty and should be freed.
1413 */
1414static struct ftrace_hash *__move_hash(struct ftrace_hash *src, int size)
1415{
1416	struct ftrace_func_entry *entry;
1417	struct ftrace_hash *new_hash;
1418	struct hlist_head *hhd;
1419	struct hlist_node *tn;
 
 
 
1420	int bits = 0;
1421	int i;
1422
1423	/*
1424	 * Use around half the size (max bit of it), but
1425	 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
1426	 */
1427	bits = fls(size / 2);
 
 
 
 
 
 
 
1428
1429	/* Don't allocate too much */
1430	if (bits > FTRACE_HASH_MAX_BITS)
1431		bits = FTRACE_HASH_MAX_BITS;
1432
1433	new_hash = alloc_ftrace_hash(bits);
1434	if (!new_hash)
1435		return NULL;
1436
1437	new_hash->flags = src->flags;
1438
1439	size = 1 << src->size_bits;
1440	for (i = 0; i < size; i++) {
1441		hhd = &src->buckets[i];
1442		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1443			remove_hash_entry(src, entry);
1444			__add_hash_entry(new_hash, entry);
1445		}
1446	}
1447	return new_hash;
1448}
1449
1450/* Move the @src entries to a newly allocated hash */
1451static struct ftrace_hash *
1452__ftrace_hash_move(struct ftrace_hash *src)
1453{
1454	int size = src->count;
1455
1456	/*
1457	 * If the new source is empty, just return the empty_hash.
1458	 */
1459	if (ftrace_hash_empty(src))
1460		return EMPTY_HASH;
1461
1462	return __move_hash(src, size);
1463}
1464
1465/**
1466 * ftrace_hash_move - move a new hash to a filter and do updates
1467 * @ops: The ops with the hash that @dst points to
1468 * @enable: True if for the filter hash, false for the notrace hash
1469 * @dst: Points to the @ops hash that should be updated
1470 * @src: The hash to update @dst with
1471 *
1472 * This is called when an ftrace_ops hash is being updated and the
1473 * the kernel needs to reflect this. Note, this only updates the kernel
1474 * function callbacks if the @ops is enabled (not to be confused with
1475 * @enable above). If the @ops is enabled, its hash determines what
1476 * callbacks get called. This function gets called when the @ops hash
1477 * is updated and it requires new callbacks.
1478 *
1479 * On success the elements of @src is moved to @dst, and @dst is updated
1480 * properly, as well as the functions determined by the @ops hashes
1481 * are now calling the @ops callback function.
1482 *
1483 * Regardless of return type, @src should be freed with free_ftrace_hash().
1484 */
1485static int
1486ftrace_hash_move(struct ftrace_ops *ops, int enable,
1487		 struct ftrace_hash **dst, struct ftrace_hash *src)
1488{
1489	struct ftrace_hash *new_hash;
1490	int ret;
1491
1492	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
1493	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1494		return -EINVAL;
1495
1496	new_hash = __ftrace_hash_move(src);
1497	if (!new_hash)
1498		return -ENOMEM;
1499
1500	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1501	if (enable) {
1502		/* IPMODIFY should be updated only when filter_hash updating */
1503		ret = ftrace_hash_ipmodify_update(ops, new_hash);
1504		if (ret < 0) {
1505			free_ftrace_hash(new_hash);
1506			return ret;
1507		}
1508	}
1509
1510	/*
1511	 * Remove the current set, update the hash and add
1512	 * them back.
1513	 */
1514	ftrace_hash_rec_disable_modify(ops);
1515
1516	rcu_assign_pointer(*dst, new_hash);
1517
1518	ftrace_hash_rec_enable_modify(ops);
1519
1520	return 0;
1521}
1522
1523static bool hash_contains_ip(unsigned long ip,
1524			     struct ftrace_ops_hash *hash)
1525{
1526	/*
1527	 * The function record is a match if it exists in the filter
1528	 * hash and not in the notrace hash. Note, an empty hash is
1529	 * considered a match for the filter hash, but an empty
1530	 * notrace hash is considered not in the notrace hash.
1531	 */
1532	return (ftrace_hash_empty(hash->filter_hash) ||
1533		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
1534		(ftrace_hash_empty(hash->notrace_hash) ||
1535		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1536}
1537
1538/*
1539 * Test the hashes for this ops to see if we want to call
1540 * the ops->func or not.
1541 *
1542 * It's a match if the ip is in the ops->filter_hash or
1543 * the filter_hash does not exist or is empty,
1544 *  AND
1545 * the ip is not in the ops->notrace_hash.
1546 *
1547 * This needs to be called with preemption disabled as
1548 * the hashes are freed with call_rcu().
1549 */
1550int
1551ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1552{
1553	struct ftrace_ops_hash hash;
1554	int ret;
1555
1556#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1557	/*
1558	 * There's a small race when adding ops that the ftrace handler
1559	 * that wants regs, may be called without them. We can not
1560	 * allow that handler to be called if regs is NULL.
1561	 */
1562	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1563		return 0;
1564#endif
1565
1566	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1567	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1568
1569	if (hash_contains_ip(ip, &hash))
1570		ret = 1;
1571	else
1572		ret = 0;
1573
1574	return ret;
1575}
1576
1577/*
1578 * This is a double for. Do not use 'break' to break out of the loop,
1579 * you must use a goto.
1580 */
1581#define do_for_each_ftrace_rec(pg, rec)					\
1582	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1583		int _____i;						\
1584		for (_____i = 0; _____i < pg->index; _____i++) {	\
1585			rec = &pg->records[_____i];
1586
1587#define while_for_each_ftrace_rec()		\
1588		}				\
1589	}
1590
1591
1592static int ftrace_cmp_recs(const void *a, const void *b)
1593{
1594	const struct dyn_ftrace *key = a;
1595	const struct dyn_ftrace *rec = b;
1596
1597	if (key->flags < rec->ip)
1598		return -1;
1599	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1600		return 1;
1601	return 0;
1602}
1603
1604static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1605{
1606	struct ftrace_page *pg;
1607	struct dyn_ftrace *rec = NULL;
1608	struct dyn_ftrace key;
1609
1610	key.ip = start;
1611	key.flags = end;	/* overload flags, as it is unsigned long */
1612
1613	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1614		if (pg->index == 0 ||
1615		    end < pg->records[0].ip ||
1616		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1617			continue;
1618		rec = bsearch(&key, pg->records, pg->index,
1619			      sizeof(struct dyn_ftrace),
1620			      ftrace_cmp_recs);
1621		if (rec)
1622			break;
1623	}
1624	return rec;
1625}
1626
1627/**
1628 * ftrace_location_range - return the first address of a traced location
1629 *	if it touches the given ip range
1630 * @start: start of range to search.
1631 * @end: end of range to search (inclusive). @end points to the last byte
1632 *	to check.
1633 *
1634 * Returns: rec->ip if the related ftrace location is a least partly within
1635 * the given address range. That is, the first address of the instruction
1636 * that is either a NOP or call to the function tracer. It checks the ftrace
1637 * internal tables to determine if the address belongs or not.
1638 */
1639unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1640{
 
1641	struct dyn_ftrace *rec;
1642	unsigned long ip = 0;
1643
1644	rcu_read_lock();
1645	rec = lookup_rec(start, end);
1646	if (rec)
1647		ip = rec->ip;
1648	rcu_read_unlock();
1649
1650	return ip;
 
 
 
 
 
 
 
 
 
 
 
1651}
1652
1653/**
1654 * ftrace_location - return the ftrace location
1655 * @ip: the instruction pointer to check
1656 *
1657 * Returns:
1658 * * If @ip matches the ftrace location, return @ip.
1659 * * If @ip matches sym+0, return sym's ftrace location.
1660 * * Otherwise, return 0.
1661 */
1662unsigned long ftrace_location(unsigned long ip)
1663{
1664	unsigned long loc;
1665	unsigned long offset;
1666	unsigned long size;
1667
1668	loc = ftrace_location_range(ip, ip);
1669	if (!loc) {
1670		if (!kallsyms_lookup_size_offset(ip, &size, &offset))
1671			goto out;
1672
1673		/* map sym+0 to __fentry__ */
1674		if (!offset)
1675			loc = ftrace_location_range(ip, ip + size - 1);
1676	}
1677
1678out:
1679	return loc;
1680}
1681
1682/**
1683 * ftrace_text_reserved - return true if range contains an ftrace location
1684 * @start: start of range to search
1685 * @end: end of range to search (inclusive). @end points to the last byte to check.
1686 *
1687 * Returns: 1 if @start and @end contains a ftrace location.
1688 * That is, the instruction that is either a NOP or call to
1689 * the function tracer. It checks the ftrace internal tables to
1690 * determine if the address belongs or not.
1691 */
1692int ftrace_text_reserved(const void *start, const void *end)
1693{
1694	unsigned long ret;
1695
1696	ret = ftrace_location_range((unsigned long)start,
1697				    (unsigned long)end);
1698
1699	return (int)!!ret;
1700}
1701
1702/* Test if ops registered to this rec needs regs */
1703static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1704{
1705	struct ftrace_ops *ops;
1706	bool keep_regs = false;
1707
1708	for (ops = ftrace_ops_list;
1709	     ops != &ftrace_list_end; ops = ops->next) {
1710		/* pass rec in as regs to have non-NULL val */
1711		if (ftrace_ops_test(ops, rec->ip, rec)) {
1712			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1713				keep_regs = true;
1714				break;
1715			}
1716		}
1717	}
1718
1719	return  keep_regs;
1720}
1721
1722static struct ftrace_ops *
1723ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1724static struct ftrace_ops *
1725ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1726static struct ftrace_ops *
1727ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1728
1729static bool skip_record(struct dyn_ftrace *rec)
1730{
1731	/*
1732	 * At boot up, weak functions are set to disable. Function tracing
1733	 * can be enabled before they are, and they still need to be disabled now.
1734	 * If the record is disabled, still continue if it is marked as already
1735	 * enabled (this is needed to keep the accounting working).
1736	 */
1737	return rec->flags & FTRACE_FL_DISABLED &&
1738		!(rec->flags & FTRACE_FL_ENABLED);
1739}
1740
1741/*
1742 * This is the main engine to the ftrace updates to the dyn_ftrace records.
1743 *
1744 * It will iterate through all the available ftrace functions
1745 * (the ones that ftrace can have callbacks to) and set the flags
1746 * in the associated dyn_ftrace records.
1747 *
1748 * @inc: If true, the functions associated to @ops are added to
1749 *       the dyn_ftrace records, otherwise they are removed.
1750 */
1751static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
 
1752				     bool inc)
1753{
1754	struct ftrace_hash *hash;
1755	struct ftrace_hash *notrace_hash;
1756	struct ftrace_page *pg;
1757	struct dyn_ftrace *rec;
1758	bool update = false;
1759	int count = 0;
1760	int all = false;
1761
1762	/* Only update if the ops has been registered */
1763	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1764		return false;
1765
1766	/*
 
1767	 *   If the count is zero, we update all records.
1768	 *   Otherwise we just update the items in the hash.
 
 
 
 
 
 
1769	 */
1770	hash = ops->func_hash->filter_hash;
1771	notrace_hash = ops->func_hash->notrace_hash;
1772	if (ftrace_hash_empty(hash))
1773		all = true;
 
 
 
 
 
 
 
 
 
 
 
 
1774
1775	do_for_each_ftrace_rec(pg, rec) {
1776		int in_notrace_hash = 0;
1777		int in_hash = 0;
1778		int match = 0;
1779
1780		if (skip_record(rec))
1781			continue;
1782
1783		if (all) {
1784			/*
1785			 * Only the filter_hash affects all records.
1786			 * Update if the record is not in the notrace hash.
1787			 */
1788			if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip))
1789				match = 1;
1790		} else {
1791			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1792			in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip);
1793
1794			/*
1795			 * We want to match all functions that are in the hash but
1796			 * not in the other hash.
 
 
 
 
 
 
1797			 */
1798			if (in_hash && !in_notrace_hash)
 
 
 
1799				match = 1;
1800		}
1801		if (!match)
1802			continue;
1803
1804		if (inc) {
1805			rec->flags++;
1806			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1807				return false;
1808
1809			if (ops->flags & FTRACE_OPS_FL_DIRECT)
1810				rec->flags |= FTRACE_FL_DIRECT;
1811
1812			/*
1813			 * If there's only a single callback registered to a
1814			 * function, and the ops has a trampoline registered
1815			 * for it, then we can call it directly.
1816			 */
1817			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1818				rec->flags |= FTRACE_FL_TRAMP;
1819			else
1820				/*
1821				 * If we are adding another function callback
1822				 * to this function, and the previous had a
1823				 * custom trampoline in use, then we need to go
1824				 * back to the default trampoline.
1825				 */
1826				rec->flags &= ~FTRACE_FL_TRAMP;
1827
1828			/*
1829			 * If any ops wants regs saved for this function
1830			 * then all ops will get saved regs.
1831			 */
1832			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1833				rec->flags |= FTRACE_FL_REGS;
1834		} else {
1835			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1836				return false;
1837			rec->flags--;
1838
1839			/*
1840			 * Only the internal direct_ops should have the
1841			 * DIRECT flag set. Thus, if it is removing a
1842			 * function, then that function should no longer
1843			 * be direct.
1844			 */
1845			if (ops->flags & FTRACE_OPS_FL_DIRECT)
1846				rec->flags &= ~FTRACE_FL_DIRECT;
1847
1848			/*
1849			 * If the rec had REGS enabled and the ops that is
1850			 * being removed had REGS set, then see if there is
1851			 * still any ops for this record that wants regs.
1852			 * If not, we can stop recording them.
1853			 */
1854			if (ftrace_rec_count(rec) > 0 &&
1855			    rec->flags & FTRACE_FL_REGS &&
1856			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1857				if (!test_rec_ops_needs_regs(rec))
1858					rec->flags &= ~FTRACE_FL_REGS;
1859			}
1860
1861			/*
1862			 * The TRAMP needs to be set only if rec count
1863			 * is decremented to one, and the ops that is
1864			 * left has a trampoline. As TRAMP can only be
1865			 * enabled if there is only a single ops attached
1866			 * to it.
1867			 */
1868			if (ftrace_rec_count(rec) == 1 &&
1869			    ftrace_find_tramp_ops_any_other(rec, ops))
1870				rec->flags |= FTRACE_FL_TRAMP;
1871			else
1872				rec->flags &= ~FTRACE_FL_TRAMP;
1873
1874			/*
1875			 * flags will be cleared in ftrace_check_record()
1876			 * if rec count is zero.
1877			 */
1878		}
1879
1880		/*
1881		 * If the rec has a single associated ops, and ops->func can be
1882		 * called directly, allow the call site to call via the ops.
1883		 */
1884		if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) &&
1885		    ftrace_rec_count(rec) == 1 &&
1886		    ftrace_ops_get_func(ops) == ops->func)
1887			rec->flags |= FTRACE_FL_CALL_OPS;
1888		else
1889			rec->flags &= ~FTRACE_FL_CALL_OPS;
1890
1891		count++;
1892
1893		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1894		update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1895
1896		/* Shortcut, if we handled all records, we are done. */
1897		if (!all && count == hash->count)
1898			return update;
1899	} while_for_each_ftrace_rec();
1900
1901	return update;
1902}
1903
1904/*
1905 * This is called when an ops is removed from tracing. It will decrement
1906 * the counters of the dyn_ftrace records for all the functions that
1907 * the @ops attached to.
1908 */
1909static bool ftrace_hash_rec_disable(struct ftrace_ops *ops)
1910{
1911	return __ftrace_hash_rec_update(ops, false);
1912}
1913
1914/*
1915 * This is called when an ops is added to tracing. It will increment
1916 * the counters of the dyn_ftrace records for all the functions that
1917 * the @ops attached to.
1918 */
1919static bool ftrace_hash_rec_enable(struct ftrace_ops *ops)
1920{
1921	return __ftrace_hash_rec_update(ops, true);
1922}
1923
1924/*
1925 * This function will update what functions @ops traces when its filter
1926 * changes.
1927 *
1928 * The @inc states if the @ops callbacks are going to be added or removed.
1929 * When one of the @ops hashes are updated to a "new_hash" the dyn_ftrace
1930 * records are update via:
1931 *
1932 * ftrace_hash_rec_disable_modify(ops);
1933 * ops->hash = new_hash
1934 * ftrace_hash_rec_enable_modify(ops);
1935 *
1936 * Where the @ops is removed from all the records it is tracing using
1937 * its old hash. The @ops hash is updated to the new hash, and then
1938 * the @ops is added back to the records so that it is tracing all
1939 * the new functions.
1940 */
1941static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, bool inc)
1942{
1943	struct ftrace_ops *op;
1944
1945	__ftrace_hash_rec_update(ops, inc);
1946
1947	if (ops->func_hash != &global_ops.local_hash)
1948		return;
1949
1950	/*
1951	 * If the ops shares the global_ops hash, then we need to update
1952	 * all ops that are enabled and use this hash.
1953	 */
1954	do_for_each_ftrace_op(op, ftrace_ops_list) {
1955		/* Already done */
1956		if (op == ops)
1957			continue;
1958		if (op->func_hash == &global_ops.local_hash)
1959			__ftrace_hash_rec_update(op, inc);
1960	} while_for_each_ftrace_op(op);
1961}
1962
1963static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops)
 
1964{
1965	ftrace_hash_rec_update_modify(ops, false);
1966}
1967
1968static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops)
 
1969{
1970	ftrace_hash_rec_update_modify(ops, true);
1971}
1972
1973/*
1974 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1975 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1976 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1977 * Note that old_hash and new_hash has below meanings
1978 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1979 *  - If the hash is EMPTY_HASH, it hits nothing
1980 *  - Anything else hits the recs which match the hash entries.
1981 *
1982 * DIRECT ops does not have IPMODIFY flag, but we still need to check it
1983 * against functions with FTRACE_FL_IPMODIFY. If there is any overlap, call
1984 * ops_func(SHARE_IPMODIFY_SELF) to make sure current ops can share with
1985 * IPMODIFY. If ops_func(SHARE_IPMODIFY_SELF) returns non-zero, propagate
1986 * the return value to the caller and eventually to the owner of the DIRECT
1987 * ops.
1988 */
1989static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1990					 struct ftrace_hash *old_hash,
1991					 struct ftrace_hash *new_hash)
1992{
1993	struct ftrace_page *pg;
1994	struct dyn_ftrace *rec, *end = NULL;
1995	int in_old, in_new;
1996	bool is_ipmodify, is_direct;
1997
1998	/* Only update if the ops has been registered */
1999	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2000		return 0;
2001
2002	is_ipmodify = ops->flags & FTRACE_OPS_FL_IPMODIFY;
2003	is_direct = ops->flags & FTRACE_OPS_FL_DIRECT;
2004
2005	/* neither IPMODIFY nor DIRECT, skip */
2006	if (!is_ipmodify && !is_direct)
2007		return 0;
2008
2009	if (WARN_ON_ONCE(is_ipmodify && is_direct))
2010		return 0;
2011
2012	/*
2013	 * Since the IPMODIFY and DIRECT are very address sensitive
2014	 * actions, we do not allow ftrace_ops to set all functions to new
2015	 * hash.
2016	 */
2017	if (!new_hash || !old_hash)
2018		return -EINVAL;
2019
2020	/* Update rec->flags */
2021	do_for_each_ftrace_rec(pg, rec) {
2022
2023		if (rec->flags & FTRACE_FL_DISABLED)
2024			continue;
2025
2026		/* We need to update only differences of filter_hash */
2027		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2028		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2029		if (in_old == in_new)
2030			continue;
2031
2032		if (in_new) {
2033			if (rec->flags & FTRACE_FL_IPMODIFY) {
2034				int ret;
2035
2036				/* Cannot have two ipmodify on same rec */
2037				if (is_ipmodify)
2038					goto rollback;
2039
2040				FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
2041
2042				/*
2043				 * Another ops with IPMODIFY is already
2044				 * attached. We are now attaching a direct
2045				 * ops. Run SHARE_IPMODIFY_SELF, to check
2046				 * whether sharing is supported.
2047				 */
2048				if (!ops->ops_func)
2049					return -EBUSY;
2050				ret = ops->ops_func(ops, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF);
2051				if (ret)
2052					return ret;
2053			} else if (is_ipmodify) {
2054				rec->flags |= FTRACE_FL_IPMODIFY;
2055			}
2056		} else if (is_ipmodify) {
2057			rec->flags &= ~FTRACE_FL_IPMODIFY;
2058		}
2059	} while_for_each_ftrace_rec();
2060
2061	return 0;
2062
2063rollback:
2064	end = rec;
2065
2066	/* Roll back what we did above */
2067	do_for_each_ftrace_rec(pg, rec) {
2068
2069		if (rec->flags & FTRACE_FL_DISABLED)
2070			continue;
2071
2072		if (rec == end)
2073			goto err_out;
2074
2075		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
2076		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
2077		if (in_old == in_new)
2078			continue;
2079
2080		if (in_new)
2081			rec->flags &= ~FTRACE_FL_IPMODIFY;
2082		else
2083			rec->flags |= FTRACE_FL_IPMODIFY;
2084	} while_for_each_ftrace_rec();
2085
2086err_out:
2087	return -EBUSY;
2088}
2089
2090static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
2091{
2092	struct ftrace_hash *hash = ops->func_hash->filter_hash;
2093
2094	if (ftrace_hash_empty(hash))
2095		hash = NULL;
2096
2097	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
2098}
2099
2100/* Disabling always succeeds */
2101static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
2102{
2103	struct ftrace_hash *hash = ops->func_hash->filter_hash;
2104
2105	if (ftrace_hash_empty(hash))
2106		hash = NULL;
2107
2108	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
2109}
2110
2111static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2112				       struct ftrace_hash *new_hash)
2113{
2114	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2115
2116	if (ftrace_hash_empty(old_hash))
2117		old_hash = NULL;
2118
2119	if (ftrace_hash_empty(new_hash))
2120		new_hash = NULL;
2121
2122	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2123}
2124
2125static void print_ip_ins(const char *fmt, const unsigned char *p)
2126{
2127	char ins[MCOUNT_INSN_SIZE];
2128
2129	if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
2130		printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
2131		return;
2132	}
2133
2134	printk(KERN_CONT "%s", fmt);
2135	pr_cont("%*phC", MCOUNT_INSN_SIZE, ins);
 
 
2136}
2137
2138enum ftrace_bug_type ftrace_bug_type;
2139const void *ftrace_expected;
2140
2141static void print_bug_type(void)
2142{
2143	switch (ftrace_bug_type) {
2144	case FTRACE_BUG_UNKNOWN:
2145		break;
2146	case FTRACE_BUG_INIT:
2147		pr_info("Initializing ftrace call sites\n");
2148		break;
2149	case FTRACE_BUG_NOP:
2150		pr_info("Setting ftrace call site to NOP\n");
2151		break;
2152	case FTRACE_BUG_CALL:
2153		pr_info("Setting ftrace call site to call ftrace function\n");
2154		break;
2155	case FTRACE_BUG_UPDATE:
2156		pr_info("Updating ftrace call site to call a different ftrace function\n");
2157		break;
2158	}
2159}
2160
2161/**
2162 * ftrace_bug - report and shutdown function tracer
2163 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2164 * @rec: The record that failed
2165 *
2166 * The arch code that enables or disables the function tracing
2167 * can call ftrace_bug() when it has detected a problem in
2168 * modifying the code. @failed should be one of either:
2169 * EFAULT - if the problem happens on reading the @ip address
2170 * EINVAL - if what is read at @ip is not what was expected
2171 * EPERM - if the problem happens on writing to the @ip address
2172 */
2173void ftrace_bug(int failed, struct dyn_ftrace *rec)
2174{
2175	unsigned long ip = rec ? rec->ip : 0;
2176
2177	pr_info("------------[ ftrace bug ]------------\n");
2178
2179	switch (failed) {
2180	case -EFAULT:
 
2181		pr_info("ftrace faulted on modifying ");
2182		print_ip_sym(KERN_INFO, ip);
2183		break;
2184	case -EINVAL:
 
2185		pr_info("ftrace failed to modify ");
2186		print_ip_sym(KERN_INFO, ip);
2187		print_ip_ins(" actual:   ", (unsigned char *)ip);
2188		pr_cont("\n");
2189		if (ftrace_expected) {
2190			print_ip_ins(" expected: ", ftrace_expected);
2191			pr_cont("\n");
2192		}
2193		break;
2194	case -EPERM:
 
2195		pr_info("ftrace faulted on writing ");
2196		print_ip_sym(KERN_INFO, ip);
2197		break;
2198	default:
 
2199		pr_info("ftrace faulted on unknown error ");
2200		print_ip_sym(KERN_INFO, ip);
2201	}
2202	print_bug_type();
2203	if (rec) {
2204		struct ftrace_ops *ops = NULL;
2205
2206		pr_info("ftrace record flags: %lx\n", rec->flags);
2207		pr_cont(" (%ld)%s%s", ftrace_rec_count(rec),
2208			rec->flags & FTRACE_FL_REGS ? " R" : "  ",
2209			rec->flags & FTRACE_FL_CALL_OPS ? " O" : "  ");
2210		if (rec->flags & FTRACE_FL_TRAMP_EN) {
2211			ops = ftrace_find_tramp_ops_any(rec);
2212			if (ops) {
2213				do {
2214					pr_cont("\ttramp: %pS (%pS)",
2215						(void *)ops->trampoline,
2216						(void *)ops->func);
2217					ops = ftrace_find_tramp_ops_next(rec, ops);
2218				} while (ops);
2219			} else
2220				pr_cont("\ttramp: ERROR!");
2221
2222		}
2223		ip = ftrace_get_addr_curr(rec);
2224		pr_cont("\n expected tramp: %lx\n", ip);
2225	}
2226
2227	FTRACE_WARN_ON_ONCE(1);
2228}
2229
2230static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2231{
2232	unsigned long flag = 0UL;
2233
2234	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2235
2236	if (skip_record(rec))
2237		return FTRACE_UPDATE_IGNORE;
2238
2239	/*
2240	 * If we are updating calls:
2241	 *
2242	 *   If the record has a ref count, then we need to enable it
2243	 *   because someone is using it.
2244	 *
2245	 *   Otherwise we make sure its disabled.
2246	 *
2247	 * If we are disabling calls, then disable all records that
2248	 * are enabled.
2249	 */
2250	if (enable && ftrace_rec_count(rec))
2251		flag = FTRACE_FL_ENABLED;
2252
2253	/*
2254	 * If enabling and the REGS flag does not match the REGS_EN, or
2255	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2256	 * this record. Set flags to fail the compare against ENABLED.
2257	 * Same for direct calls.
2258	 */
2259	if (flag) {
2260		if (!(rec->flags & FTRACE_FL_REGS) !=
2261		    !(rec->flags & FTRACE_FL_REGS_EN))
2262			flag |= FTRACE_FL_REGS;
2263
2264		if (!(rec->flags & FTRACE_FL_TRAMP) !=
2265		    !(rec->flags & FTRACE_FL_TRAMP_EN))
2266			flag |= FTRACE_FL_TRAMP;
2267
2268		/*
2269		 * Direct calls are special, as count matters.
2270		 * We must test the record for direct, if the
2271		 * DIRECT and DIRECT_EN do not match, but only
2272		 * if the count is 1. That's because, if the
2273		 * count is something other than one, we do not
2274		 * want the direct enabled (it will be done via the
2275		 * direct helper). But if DIRECT_EN is set, and
2276		 * the count is not one, we need to clear it.
2277		 *
2278		 */
2279		if (ftrace_rec_count(rec) == 1) {
2280			if (!(rec->flags & FTRACE_FL_DIRECT) !=
2281			    !(rec->flags & FTRACE_FL_DIRECT_EN))
2282				flag |= FTRACE_FL_DIRECT;
2283		} else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2284			flag |= FTRACE_FL_DIRECT;
2285		}
2286
2287		/*
2288		 * Ops calls are special, as count matters.
2289		 * As with direct calls, they must only be enabled when count
2290		 * is one, otherwise they'll be handled via the list ops.
2291		 */
2292		if (ftrace_rec_count(rec) == 1) {
2293			if (!(rec->flags & FTRACE_FL_CALL_OPS) !=
2294			    !(rec->flags & FTRACE_FL_CALL_OPS_EN))
2295				flag |= FTRACE_FL_CALL_OPS;
2296		} else if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
2297			flag |= FTRACE_FL_CALL_OPS;
2298		}
2299	}
2300
2301	/* If the state of this record hasn't changed, then do nothing */
2302	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2303		return FTRACE_UPDATE_IGNORE;
2304
2305	if (flag) {
2306		/* Save off if rec is being enabled (for return value) */
2307		flag ^= rec->flags & FTRACE_FL_ENABLED;
2308
2309		if (update) {
2310			rec->flags |= FTRACE_FL_ENABLED | FTRACE_FL_TOUCHED;
2311			if (flag & FTRACE_FL_REGS) {
2312				if (rec->flags & FTRACE_FL_REGS)
2313					rec->flags |= FTRACE_FL_REGS_EN;
2314				else
2315					rec->flags &= ~FTRACE_FL_REGS_EN;
2316			}
2317			if (flag & FTRACE_FL_TRAMP) {
2318				if (rec->flags & FTRACE_FL_TRAMP)
2319					rec->flags |= FTRACE_FL_TRAMP_EN;
2320				else
2321					rec->flags &= ~FTRACE_FL_TRAMP_EN;
2322			}
2323
2324			/* Keep track of anything that modifies the function */
2325			if (rec->flags & (FTRACE_FL_DIRECT | FTRACE_FL_IPMODIFY))
2326				rec->flags |= FTRACE_FL_MODIFIED;
2327
2328			if (flag & FTRACE_FL_DIRECT) {
2329				/*
2330				 * If there's only one user (direct_ops helper)
2331				 * then we can call the direct function
2332				 * directly (no ftrace trampoline).
2333				 */
2334				if (ftrace_rec_count(rec) == 1) {
2335					if (rec->flags & FTRACE_FL_DIRECT)
2336						rec->flags |= FTRACE_FL_DIRECT_EN;
2337					else
2338						rec->flags &= ~FTRACE_FL_DIRECT_EN;
2339				} else {
2340					/*
2341					 * Can only call directly if there's
2342					 * only one callback to the function.
2343					 */
2344					rec->flags &= ~FTRACE_FL_DIRECT_EN;
2345				}
2346			}
2347
2348			if (flag & FTRACE_FL_CALL_OPS) {
2349				if (ftrace_rec_count(rec) == 1) {
2350					if (rec->flags & FTRACE_FL_CALL_OPS)
2351						rec->flags |= FTRACE_FL_CALL_OPS_EN;
2352					else
2353						rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2354				} else {
2355					/*
2356					 * Can only call directly if there's
2357					 * only one set of associated ops.
2358					 */
2359					rec->flags &= ~FTRACE_FL_CALL_OPS_EN;
2360				}
2361			}
2362		}
2363
2364		/*
2365		 * If this record is being updated from a nop, then
2366		 *   return UPDATE_MAKE_CALL.
2367		 * Otherwise,
2368		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2369		 *   from the save regs, to a non-save regs function or
2370		 *   vice versa, or from a trampoline call.
2371		 */
2372		if (flag & FTRACE_FL_ENABLED) {
2373			ftrace_bug_type = FTRACE_BUG_CALL;
2374			return FTRACE_UPDATE_MAKE_CALL;
2375		}
2376
2377		ftrace_bug_type = FTRACE_BUG_UPDATE;
2378		return FTRACE_UPDATE_MODIFY_CALL;
2379	}
2380
2381	if (update) {
2382		/* If there's no more users, clear all flags */
2383		if (!ftrace_rec_count(rec))
2384			rec->flags &= FTRACE_NOCLEAR_FLAGS;
2385		else
2386			/*
2387			 * Just disable the record, but keep the ops TRAMP
2388			 * and REGS states. The _EN flags must be disabled though.
2389			 */
2390			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2391					FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN |
2392					FTRACE_FL_CALL_OPS_EN);
2393	}
2394
2395	ftrace_bug_type = FTRACE_BUG_NOP;
2396	return FTRACE_UPDATE_MAKE_NOP;
2397}
2398
2399/**
2400 * ftrace_update_record - set a record that now is tracing or not
2401 * @rec: the record to update
2402 * @enable: set to true if the record is tracing, false to force disable
2403 *
2404 * The records that represent all functions that can be traced need
2405 * to be updated when tracing has been enabled.
2406 */
2407int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2408{
2409	return ftrace_check_record(rec, enable, true);
2410}
2411
2412/**
2413 * ftrace_test_record - check if the record has been enabled or not
2414 * @rec: the record to test
2415 * @enable: set to true to check if enabled, false if it is disabled
2416 *
2417 * The arch code may need to test if a record is already set to
2418 * tracing to determine how to modify the function code that it
2419 * represents.
2420 */
2421int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2422{
2423	return ftrace_check_record(rec, enable, false);
2424}
2425
2426static struct ftrace_ops *
2427ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2428{
2429	struct ftrace_ops *op;
2430	unsigned long ip = rec->ip;
2431
2432	do_for_each_ftrace_op(op, ftrace_ops_list) {
2433
2434		if (!op->trampoline)
2435			continue;
2436
2437		if (hash_contains_ip(ip, op->func_hash))
2438			return op;
2439	} while_for_each_ftrace_op(op);
2440
2441	return NULL;
2442}
2443
2444static struct ftrace_ops *
2445ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2446{
2447	struct ftrace_ops *op;
2448	unsigned long ip = rec->ip;
2449
2450	do_for_each_ftrace_op(op, ftrace_ops_list) {
2451
2452		if (op == op_exclude || !op->trampoline)
2453			continue;
2454
2455		if (hash_contains_ip(ip, op->func_hash))
2456			return op;
2457	} while_for_each_ftrace_op(op);
2458
2459	return NULL;
2460}
2461
2462static struct ftrace_ops *
2463ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2464			   struct ftrace_ops *op)
2465{
2466	unsigned long ip = rec->ip;
2467
2468	while_for_each_ftrace_op(op) {
2469
2470		if (!op->trampoline)
2471			continue;
2472
2473		if (hash_contains_ip(ip, op->func_hash))
2474			return op;
2475	}
2476
2477	return NULL;
2478}
2479
2480static struct ftrace_ops *
2481ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2482{
2483	struct ftrace_ops *op;
2484	unsigned long ip = rec->ip;
2485
2486	/*
2487	 * Need to check removed ops first.
2488	 * If they are being removed, and this rec has a tramp,
2489	 * and this rec is in the ops list, then it would be the
2490	 * one with the tramp.
2491	 */
2492	if (removed_ops) {
2493		if (hash_contains_ip(ip, &removed_ops->old_hash))
2494			return removed_ops;
2495	}
2496
2497	/*
2498	 * Need to find the current trampoline for a rec.
2499	 * Now, a trampoline is only attached to a rec if there
2500	 * was a single 'ops' attached to it. But this can be called
2501	 * when we are adding another op to the rec or removing the
2502	 * current one. Thus, if the op is being added, we can
2503	 * ignore it because it hasn't attached itself to the rec
2504	 * yet.
2505	 *
2506	 * If an ops is being modified (hooking to different functions)
2507	 * then we don't care about the new functions that are being
2508	 * added, just the old ones (that are probably being removed).
2509	 *
2510	 * If we are adding an ops to a function that already is using
2511	 * a trampoline, it needs to be removed (trampolines are only
2512	 * for single ops connected), then an ops that is not being
2513	 * modified also needs to be checked.
2514	 */
2515	do_for_each_ftrace_op(op, ftrace_ops_list) {
2516
2517		if (!op->trampoline)
2518			continue;
2519
2520		/*
2521		 * If the ops is being added, it hasn't gotten to
2522		 * the point to be removed from this tree yet.
2523		 */
2524		if (op->flags & FTRACE_OPS_FL_ADDING)
2525			continue;
2526
2527
2528		/*
2529		 * If the ops is being modified and is in the old
2530		 * hash, then it is probably being removed from this
2531		 * function.
2532		 */
2533		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2534		    hash_contains_ip(ip, &op->old_hash))
2535			return op;
2536		/*
2537		 * If the ops is not being added or modified, and it's
2538		 * in its normal filter hash, then this must be the one
2539		 * we want!
2540		 */
2541		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2542		    hash_contains_ip(ip, op->func_hash))
2543			return op;
2544
2545	} while_for_each_ftrace_op(op);
2546
2547	return NULL;
2548}
2549
2550static struct ftrace_ops *
2551ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2552{
2553	struct ftrace_ops *op;
2554	unsigned long ip = rec->ip;
2555
2556	do_for_each_ftrace_op(op, ftrace_ops_list) {
2557		/* pass rec in as regs to have non-NULL val */
2558		if (hash_contains_ip(ip, op->func_hash))
2559			return op;
2560	} while_for_each_ftrace_op(op);
2561
2562	return NULL;
2563}
2564
2565struct ftrace_ops *
2566ftrace_find_unique_ops(struct dyn_ftrace *rec)
2567{
2568	struct ftrace_ops *op, *found = NULL;
2569	unsigned long ip = rec->ip;
2570
2571	do_for_each_ftrace_op(op, ftrace_ops_list) {
2572
2573		if (hash_contains_ip(ip, op->func_hash)) {
2574			if (found)
2575				return NULL;
2576			found = op;
2577		}
2578
2579	} while_for_each_ftrace_op(op);
2580
2581	return found;
2582}
2583
2584#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2585/* Protected by rcu_tasks for reading, and direct_mutex for writing */
2586static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
2587static DEFINE_MUTEX(direct_mutex);
2588
2589/*
2590 * Search the direct_functions hash to see if the given instruction pointer
2591 * has a direct caller attached to it.
2592 */
2593unsigned long ftrace_find_rec_direct(unsigned long ip)
2594{
2595	struct ftrace_func_entry *entry;
2596
2597	entry = __ftrace_lookup_ip(direct_functions, ip);
2598	if (!entry)
2599		return 0;
2600
2601	return entry->direct;
2602}
2603
2604static void call_direct_funcs(unsigned long ip, unsigned long pip,
2605			      struct ftrace_ops *ops, struct ftrace_regs *fregs)
2606{
2607	unsigned long addr = READ_ONCE(ops->direct_call);
2608
2609	if (!addr)
2610		return;
2611
2612	arch_ftrace_set_direct_caller(fregs, addr);
2613}
2614#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2615
2616/**
2617 * ftrace_get_addr_new - Get the call address to set to
2618 * @rec:  The ftrace record descriptor
2619 *
2620 * If the record has the FTRACE_FL_REGS set, that means that it
2621 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2622 * is not set, then it wants to convert to the normal callback.
2623 *
2624 * Returns: the address of the trampoline to set to
2625 */
2626unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2627{
2628	struct ftrace_ops *ops;
2629	unsigned long addr;
2630
2631	if ((rec->flags & FTRACE_FL_DIRECT) &&
2632	    (ftrace_rec_count(rec) == 1)) {
2633		addr = ftrace_find_rec_direct(rec->ip);
2634		if (addr)
2635			return addr;
2636		WARN_ON_ONCE(1);
2637	}
2638
2639	/* Trampolines take precedence over regs */
2640	if (rec->flags & FTRACE_FL_TRAMP) {
2641		ops = ftrace_find_tramp_ops_new(rec);
2642		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2643			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2644				(void *)rec->ip, (void *)rec->ip, rec->flags);
2645			/* Ftrace is shutting down, return anything */
2646			return (unsigned long)FTRACE_ADDR;
2647		}
2648		return ops->trampoline;
2649	}
2650
2651	if (rec->flags & FTRACE_FL_REGS)
2652		return (unsigned long)FTRACE_REGS_ADDR;
2653	else
2654		return (unsigned long)FTRACE_ADDR;
2655}
2656
2657/**
2658 * ftrace_get_addr_curr - Get the call address that is already there
2659 * @rec:  The ftrace record descriptor
2660 *
2661 * The FTRACE_FL_REGS_EN is set when the record already points to
2662 * a function that saves all the regs. Basically the '_EN' version
2663 * represents the current state of the function.
2664 *
2665 * Returns: the address of the trampoline that is currently being called
2666 */
2667unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2668{
2669	struct ftrace_ops *ops;
2670	unsigned long addr;
2671
2672	/* Direct calls take precedence over trampolines */
2673	if (rec->flags & FTRACE_FL_DIRECT_EN) {
2674		addr = ftrace_find_rec_direct(rec->ip);
2675		if (addr)
2676			return addr;
2677		WARN_ON_ONCE(1);
2678	}
2679
2680	/* Trampolines take precedence over regs */
2681	if (rec->flags & FTRACE_FL_TRAMP_EN) {
2682		ops = ftrace_find_tramp_ops_curr(rec);
2683		if (FTRACE_WARN_ON(!ops)) {
2684			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2685				(void *)rec->ip, (void *)rec->ip);
2686			/* Ftrace is shutting down, return anything */
2687			return (unsigned long)FTRACE_ADDR;
2688		}
2689		return ops->trampoline;
2690	}
2691
2692	if (rec->flags & FTRACE_FL_REGS_EN)
2693		return (unsigned long)FTRACE_REGS_ADDR;
2694	else
2695		return (unsigned long)FTRACE_ADDR;
2696}
2697
2698static int
2699__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2700{
2701	unsigned long ftrace_old_addr;
2702	unsigned long ftrace_addr;
2703	int ret;
2704
2705	ftrace_addr = ftrace_get_addr_new(rec);
2706
2707	/* This needs to be done before we call ftrace_update_record */
2708	ftrace_old_addr = ftrace_get_addr_curr(rec);
2709
2710	ret = ftrace_update_record(rec, enable);
2711
2712	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2713
2714	switch (ret) {
2715	case FTRACE_UPDATE_IGNORE:
2716		return 0;
2717
2718	case FTRACE_UPDATE_MAKE_CALL:
2719		ftrace_bug_type = FTRACE_BUG_CALL;
2720		return ftrace_make_call(rec, ftrace_addr);
2721
2722	case FTRACE_UPDATE_MAKE_NOP:
2723		ftrace_bug_type = FTRACE_BUG_NOP;
2724		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2725
2726	case FTRACE_UPDATE_MODIFY_CALL:
2727		ftrace_bug_type = FTRACE_BUG_UPDATE;
2728		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2729	}
2730
2731	return -1; /* unknown ftrace bug */
2732}
2733
2734void __weak ftrace_replace_code(int mod_flags)
2735{
2736	struct dyn_ftrace *rec;
2737	struct ftrace_page *pg;
2738	bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2739	int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2740	int failed;
2741
2742	if (unlikely(ftrace_disabled))
2743		return;
2744
2745	do_for_each_ftrace_rec(pg, rec) {
2746
2747		if (skip_record(rec))
2748			continue;
2749
2750		failed = __ftrace_replace_code(rec, enable);
2751		if (failed) {
2752			ftrace_bug(failed, rec);
2753			/* Stop processing */
2754			return;
2755		}
2756		if (schedulable)
2757			cond_resched();
2758	} while_for_each_ftrace_rec();
2759}
2760
2761struct ftrace_rec_iter {
2762	struct ftrace_page	*pg;
2763	int			index;
2764};
2765
2766/**
2767 * ftrace_rec_iter_start - start up iterating over traced functions
2768 *
2769 * Returns: an iterator handle that is used to iterate over all
2770 * the records that represent address locations where functions
2771 * are traced.
2772 *
2773 * May return NULL if no records are available.
2774 */
2775struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2776{
2777	/*
2778	 * We only use a single iterator.
2779	 * Protected by the ftrace_lock mutex.
2780	 */
2781	static struct ftrace_rec_iter ftrace_rec_iter;
2782	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2783
2784	iter->pg = ftrace_pages_start;
2785	iter->index = 0;
2786
2787	/* Could have empty pages */
2788	while (iter->pg && !iter->pg->index)
2789		iter->pg = iter->pg->next;
2790
2791	if (!iter->pg)
2792		return NULL;
2793
2794	return iter;
2795}
2796
2797/**
2798 * ftrace_rec_iter_next - get the next record to process.
2799 * @iter: The handle to the iterator.
2800 *
2801 * Returns: the next iterator after the given iterator @iter.
2802 */
2803struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2804{
2805	iter->index++;
2806
2807	if (iter->index >= iter->pg->index) {
2808		iter->pg = iter->pg->next;
2809		iter->index = 0;
2810
2811		/* Could have empty pages */
2812		while (iter->pg && !iter->pg->index)
2813			iter->pg = iter->pg->next;
2814	}
2815
2816	if (!iter->pg)
2817		return NULL;
2818
2819	return iter;
2820}
2821
2822/**
2823 * ftrace_rec_iter_record - get the record at the iterator location
2824 * @iter: The current iterator location
2825 *
2826 * Returns: the record that the current @iter is at.
2827 */
2828struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2829{
2830	return &iter->pg->records[iter->index];
2831}
2832
2833static int
2834ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2835{
2836	int ret;
2837
2838	if (unlikely(ftrace_disabled))
2839		return 0;
2840
2841	ret = ftrace_init_nop(mod, rec);
2842	if (ret) {
2843		ftrace_bug_type = FTRACE_BUG_INIT;
2844		ftrace_bug(ret, rec);
2845		return 0;
2846	}
2847	return 1;
2848}
2849
2850/*
2851 * archs can override this function if they must do something
2852 * before the modifying code is performed.
2853 */
2854void __weak ftrace_arch_code_modify_prepare(void)
2855{
 
2856}
2857
2858/*
2859 * archs can override this function if they must do something
2860 * after the modifying code is performed.
2861 */
2862void __weak ftrace_arch_code_modify_post_process(void)
2863{
2864}
2865
2866static int update_ftrace_func(ftrace_func_t func)
2867{
2868	static ftrace_func_t save_func;
2869
2870	/* Avoid updating if it hasn't changed */
2871	if (func == save_func)
2872		return 0;
2873
2874	save_func = func;
2875
2876	return ftrace_update_ftrace_func(func);
2877}
2878
2879void ftrace_modify_all_code(int command)
2880{
2881	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2882	int mod_flags = 0;
2883	int err = 0;
2884
2885	if (command & FTRACE_MAY_SLEEP)
2886		mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2887
2888	/*
2889	 * If the ftrace_caller calls a ftrace_ops func directly,
2890	 * we need to make sure that it only traces functions it
2891	 * expects to trace. When doing the switch of functions,
2892	 * we need to update to the ftrace_ops_list_func first
2893	 * before the transition between old and new calls are set,
2894	 * as the ftrace_ops_list_func will check the ops hashes
2895	 * to make sure the ops are having the right functions
2896	 * traced.
2897	 */
2898	if (update) {
2899		err = update_ftrace_func(ftrace_ops_list_func);
2900		if (FTRACE_WARN_ON(err))
2901			return;
2902	}
2903
2904	if (command & FTRACE_UPDATE_CALLS)
2905		ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2906	else if (command & FTRACE_DISABLE_CALLS)
2907		ftrace_replace_code(mod_flags);
2908
2909	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2910		function_trace_op = set_function_trace_op;
2911		smp_wmb();
2912		/* If irqs are disabled, we are in stop machine */
2913		if (!irqs_disabled())
2914			smp_call_function(ftrace_sync_ipi, NULL, 1);
2915		err = update_ftrace_func(ftrace_trace_function);
2916		if (FTRACE_WARN_ON(err))
2917			return;
2918	}
2919
2920	if (command & FTRACE_START_FUNC_RET)
2921		err = ftrace_enable_ftrace_graph_caller();
2922	else if (command & FTRACE_STOP_FUNC_RET)
2923		err = ftrace_disable_ftrace_graph_caller();
2924	FTRACE_WARN_ON(err);
2925}
2926
2927static int __ftrace_modify_code(void *data)
2928{
2929	int *command = data;
2930
2931	ftrace_modify_all_code(*command);
2932
2933	return 0;
2934}
2935
2936/**
2937 * ftrace_run_stop_machine - go back to the stop machine method
2938 * @command: The command to tell ftrace what to do
2939 *
2940 * If an arch needs to fall back to the stop machine method, the
2941 * it can call this function.
2942 */
2943void ftrace_run_stop_machine(int command)
2944{
2945	stop_machine(__ftrace_modify_code, &command, NULL);
2946}
2947
2948/**
2949 * arch_ftrace_update_code - modify the code to trace or not trace
2950 * @command: The command that needs to be done
2951 *
2952 * Archs can override this function if it does not need to
2953 * run stop_machine() to modify code.
2954 */
2955void __weak arch_ftrace_update_code(int command)
2956{
2957	ftrace_run_stop_machine(command);
2958}
2959
2960static void ftrace_run_update_code(int command)
2961{
2962	ftrace_arch_code_modify_prepare();
 
 
 
 
 
2963
2964	/*
2965	 * By default we use stop_machine() to modify the code.
2966	 * But archs can do what ever they want as long as it
2967	 * is safe. The stop_machine() is the safest, but also
2968	 * produces the most overhead.
2969	 */
2970	arch_ftrace_update_code(command);
2971
2972	ftrace_arch_code_modify_post_process();
 
2973}
2974
2975static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2976				   struct ftrace_ops_hash *old_hash)
2977{
2978	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2979	ops->old_hash.filter_hash = old_hash->filter_hash;
2980	ops->old_hash.notrace_hash = old_hash->notrace_hash;
2981	ftrace_run_update_code(command);
2982	ops->old_hash.filter_hash = NULL;
2983	ops->old_hash.notrace_hash = NULL;
2984	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2985}
2986
2987static ftrace_func_t saved_ftrace_func;
2988static int ftrace_start_up;
2989
2990void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2991{
2992}
2993
2994/* List of trace_ops that have allocated trampolines */
2995static LIST_HEAD(ftrace_ops_trampoline_list);
2996
2997static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2998{
2999	lockdep_assert_held(&ftrace_lock);
3000	list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
3001}
3002
3003static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
3004{
3005	lockdep_assert_held(&ftrace_lock);
3006	list_del_rcu(&ops->list);
3007	synchronize_rcu();
3008}
3009
3010/*
3011 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
3012 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
3013 * not a module.
3014 */
3015#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
3016#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
3017
3018static void ftrace_trampoline_free(struct ftrace_ops *ops)
3019{
3020	if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
3021	    ops->trampoline) {
3022		/*
3023		 * Record the text poke event before the ksymbol unregister
3024		 * event.
3025		 */
3026		perf_event_text_poke((void *)ops->trampoline,
3027				     (void *)ops->trampoline,
3028				     ops->trampoline_size, NULL, 0);
3029		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
3030				   ops->trampoline, ops->trampoline_size,
3031				   true, FTRACE_TRAMPOLINE_SYM);
3032		/* Remove from kallsyms after the perf events */
3033		ftrace_remove_trampoline_from_kallsyms(ops);
3034	}
3035
3036	arch_ftrace_trampoline_free(ops);
3037}
3038
3039static void ftrace_startup_enable(int command)
3040{
3041	if (saved_ftrace_func != ftrace_trace_function) {
3042		saved_ftrace_func = ftrace_trace_function;
3043		command |= FTRACE_UPDATE_TRACE_FUNC;
3044	}
3045
3046	if (!command || !ftrace_enabled)
3047		return;
3048
3049	ftrace_run_update_code(command);
3050}
3051
3052static void ftrace_startup_all(int command)
3053{
3054	update_all_ops = true;
3055	ftrace_startup_enable(command);
3056	update_all_ops = false;
3057}
3058
3059int ftrace_startup(struct ftrace_ops *ops, int command)
3060{
3061	int ret;
3062
3063	if (unlikely(ftrace_disabled))
3064		return -ENODEV;
3065
3066	ret = __register_ftrace_function(ops);
3067	if (ret)
3068		return ret;
3069
3070	ftrace_start_up++;
3071
3072	/*
3073	 * Note that ftrace probes uses this to start up
3074	 * and modify functions it will probe. But we still
3075	 * set the ADDING flag for modification, as probes
3076	 * do not have trampolines. If they add them in the
3077	 * future, then the probes will need to distinguish
3078	 * between adding and updating probes.
3079	 */
3080	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
3081
3082	ret = ftrace_hash_ipmodify_enable(ops);
3083	if (ret < 0) {
3084		/* Rollback registration process */
3085		__unregister_ftrace_function(ops);
3086		ftrace_start_up--;
3087		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3088		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
3089			ftrace_trampoline_free(ops);
3090		return ret;
3091	}
3092
3093	if (ftrace_hash_rec_enable(ops))
3094		command |= FTRACE_UPDATE_CALLS;
3095
3096	ftrace_startup_enable(command);
3097
3098	/*
3099	 * If ftrace is in an undefined state, we just remove ops from list
3100	 * to prevent the NULL pointer, instead of totally rolling it back and
3101	 * free trampoline, because those actions could cause further damage.
3102	 */
3103	if (unlikely(ftrace_disabled)) {
3104		__unregister_ftrace_function(ops);
3105		return -ENODEV;
3106	}
3107
3108	ops->flags &= ~FTRACE_OPS_FL_ADDING;
3109
3110	return 0;
3111}
3112
3113int ftrace_shutdown(struct ftrace_ops *ops, int command)
3114{
3115	int ret;
3116
3117	if (unlikely(ftrace_disabled))
3118		return -ENODEV;
3119
3120	ret = __unregister_ftrace_function(ops);
3121	if (ret)
3122		return ret;
3123
3124	ftrace_start_up--;
3125	/*
3126	 * Just warn in case of unbalance, no need to kill ftrace, it's not
3127	 * critical but the ftrace_call callers may be never nopped again after
3128	 * further ftrace uses.
3129	 */
3130	WARN_ON_ONCE(ftrace_start_up < 0);
3131
3132	/* Disabling ipmodify never fails */
3133	ftrace_hash_ipmodify_disable(ops);
3134
3135	if (ftrace_hash_rec_disable(ops))
3136		command |= FTRACE_UPDATE_CALLS;
3137
3138	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
3139
3140	if (saved_ftrace_func != ftrace_trace_function) {
3141		saved_ftrace_func = ftrace_trace_function;
3142		command |= FTRACE_UPDATE_TRACE_FUNC;
3143	}
3144
3145	if (!command || !ftrace_enabled)
3146		goto out;
 
 
 
 
 
 
 
 
 
 
3147
3148	/*
3149	 * If the ops uses a trampoline, then it needs to be
3150	 * tested first on update.
3151	 */
3152	ops->flags |= FTRACE_OPS_FL_REMOVING;
3153	removed_ops = ops;
3154
3155	/* The trampoline logic checks the old hashes */
3156	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
3157	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
3158
3159	ftrace_run_update_code(command);
3160
3161	/*
3162	 * If there's no more ops registered with ftrace, run a
3163	 * sanity check to make sure all rec flags are cleared.
3164	 */
3165	if (rcu_dereference_protected(ftrace_ops_list,
3166			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3167		struct ftrace_page *pg;
3168		struct dyn_ftrace *rec;
3169
3170		do_for_each_ftrace_rec(pg, rec) {
3171			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_NOCLEAR_FLAGS))
3172				pr_warn("  %pS flags:%lx\n",
3173					(void *)rec->ip, rec->flags);
3174		} while_for_each_ftrace_rec();
3175	}
3176
3177	ops->old_hash.filter_hash = NULL;
3178	ops->old_hash.notrace_hash = NULL;
3179
3180	removed_ops = NULL;
3181	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3182
3183out:
3184	/*
3185	 * Dynamic ops may be freed, we must make sure that all
3186	 * callers are done before leaving this function.
 
 
3187	 */
3188	if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3189		/*
3190		 * We need to do a hard force of sched synchronization.
3191		 * This is because we use preempt_disable() to do RCU, but
3192		 * the function tracers can be called where RCU is not watching
3193		 * (like before user_exit()). We can not rely on the RCU
3194		 * infrastructure to do the synchronization, thus we must do it
3195		 * ourselves.
3196		 */
3197		synchronize_rcu_tasks_rude();
3198
3199		/*
3200		 * When the kernel is preemptive, tasks can be preempted
3201		 * while on a ftrace trampoline. Just scheduling a task on
3202		 * a CPU is not good enough to flush them. Calling
3203		 * synchronize_rcu_tasks() will wait for those tasks to
3204		 * execute and either schedule voluntarily or enter user space.
3205		 */
3206		synchronize_rcu_tasks();
 
3207
3208		ftrace_trampoline_free(ops);
 
3209	}
3210
3211	return 0;
3212}
3213
3214/* Simply make a copy of @src and return it */
3215static struct ftrace_hash *copy_hash(struct ftrace_hash *src)
3216{
3217	if (ftrace_hash_empty(src))
3218		return EMPTY_HASH;
3219
3220	return alloc_and_copy_ftrace_hash(src->size_bits, src);
3221}
3222
3223/*
3224 * Append @new_hash entries to @hash:
3225 *
3226 *  If @hash is the EMPTY_HASH then it traces all functions and nothing
3227 *  needs to be done.
3228 *
3229 *  If @new_hash is the EMPTY_HASH, then make *hash the EMPTY_HASH so
3230 *  that it traces everything.
3231 *
3232 *  Otherwise, go through all of @new_hash and add anything that @hash
3233 *  doesn't already have, to @hash.
3234 *
3235 *  The filter_hash updates uses just the append_hash() function
3236 *  and the notrace_hash does not.
3237 */
3238static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash,
3239		       int size_bits)
3240{
3241	struct ftrace_func_entry *entry;
3242	int size;
3243	int i;
3244
3245	if (*hash) {
3246		/* An empty hash does everything */
3247		if (ftrace_hash_empty(*hash))
3248			return 0;
3249	} else {
3250		*hash = alloc_ftrace_hash(size_bits);
3251		if (!*hash)
3252			return -ENOMEM;
3253	}
3254
3255	/* If new_hash has everything make hash have everything */
3256	if (ftrace_hash_empty(new_hash)) {
3257		free_ftrace_hash(*hash);
3258		*hash = EMPTY_HASH;
3259		return 0;
3260	}
3261
3262	size = 1 << new_hash->size_bits;
3263	for (i = 0; i < size; i++) {
3264		hlist_for_each_entry(entry, &new_hash->buckets[i], hlist) {
3265			/* Only add if not already in hash */
3266			if (!__ftrace_lookup_ip(*hash, entry->ip) &&
3267			    add_hash_entry(*hash, entry->ip) == NULL)
3268				return -ENOMEM;
3269		}
3270	}
3271	return 0;
3272}
3273
3274/*
3275 * Add to @hash only those that are in both @new_hash1 and @new_hash2
3276 *
3277 * The notrace_hash updates uses just the intersect_hash() function
3278 * and the filter_hash does not.
3279 */
3280static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash1,
3281			  struct ftrace_hash *new_hash2)
3282{
3283	struct ftrace_func_entry *entry;
3284	int size;
3285	int i;
3286
3287	/*
3288	 * If new_hash1 or new_hash2 is the EMPTY_HASH then make the hash
3289	 * empty as well as empty for notrace means none are notraced.
3290	 */
3291	if (ftrace_hash_empty(new_hash1) || ftrace_hash_empty(new_hash2)) {
3292		free_ftrace_hash(*hash);
3293		*hash = EMPTY_HASH;
3294		return 0;
3295	}
3296
3297	size = 1 << new_hash1->size_bits;
3298	for (i = 0; i < size; i++) {
3299		hlist_for_each_entry(entry, &new_hash1->buckets[i], hlist) {
3300			/* Only add if in both @new_hash1 and @new_hash2 */
3301			if (__ftrace_lookup_ip(new_hash2, entry->ip) &&
3302			    add_hash_entry(*hash, entry->ip) == NULL)
3303				return -ENOMEM;
3304		}
3305	}
3306	/* If nothing intersects, make it the empty set */
3307	if (ftrace_hash_empty(*hash)) {
3308		free_ftrace_hash(*hash);
3309		*hash = EMPTY_HASH;
3310	}
3311	return 0;
3312}
3313
3314/* Return a new hash that has a union of all @ops->filter_hash entries */
3315static struct ftrace_hash *append_hashes(struct ftrace_ops *ops)
3316{
3317	struct ftrace_hash *new_hash = NULL;
3318	struct ftrace_ops *subops;
3319	int size_bits;
3320	int ret;
3321
3322	if (ops->func_hash->filter_hash)
3323		size_bits = ops->func_hash->filter_hash->size_bits;
3324	else
3325		size_bits = FTRACE_HASH_DEFAULT_BITS;
3326
3327	list_for_each_entry(subops, &ops->subop_list, list) {
3328		ret = append_hash(&new_hash, subops->func_hash->filter_hash, size_bits);
3329		if (ret < 0) {
3330			free_ftrace_hash(new_hash);
3331			return NULL;
3332		}
3333		/* Nothing more to do if new_hash is empty */
3334		if (ftrace_hash_empty(new_hash))
3335			break;
3336	}
3337	/* Can't return NULL as that means this failed */
3338	return new_hash ? : EMPTY_HASH;
3339}
3340
3341/* Make @ops trace evenything except what all its subops do not trace */
3342static struct ftrace_hash *intersect_hashes(struct ftrace_ops *ops)
3343{
3344	struct ftrace_hash *new_hash = NULL;
3345	struct ftrace_ops *subops;
3346	int size_bits;
3347	int ret;
3348
3349	list_for_each_entry(subops, &ops->subop_list, list) {
3350		struct ftrace_hash *next_hash;
3351
3352		if (!new_hash) {
3353			size_bits = subops->func_hash->notrace_hash->size_bits;
3354			new_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->notrace_hash);
3355			if (!new_hash)
3356				return NULL;
3357			continue;
3358		}
3359		size_bits = new_hash->size_bits;
3360		next_hash = new_hash;
3361		new_hash = alloc_ftrace_hash(size_bits);
3362		ret = intersect_hash(&new_hash, next_hash, subops->func_hash->notrace_hash);
3363		free_ftrace_hash(next_hash);
3364		if (ret < 0) {
3365			free_ftrace_hash(new_hash);
3366			return NULL;
3367		}
3368		/* Nothing more to do if new_hash is empty */
3369		if (ftrace_hash_empty(new_hash))
3370			break;
3371	}
3372	return new_hash;
3373}
3374
3375static bool ops_equal(struct ftrace_hash *A, struct ftrace_hash *B)
3376{
3377	struct ftrace_func_entry *entry;
3378	int size;
3379	int i;
3380
3381	if (ftrace_hash_empty(A))
3382		return ftrace_hash_empty(B);
3383
3384	if (ftrace_hash_empty(B))
3385		return ftrace_hash_empty(A);
3386
3387	if (A->count != B->count)
3388		return false;
3389
3390	size = 1 << A->size_bits;
3391	for (i = 0; i < size; i++) {
3392		hlist_for_each_entry(entry, &A->buckets[i], hlist) {
3393			if (!__ftrace_lookup_ip(B, entry->ip))
3394				return false;
3395		}
3396	}
3397
3398	return true;
3399}
3400
3401static void ftrace_ops_update_code(struct ftrace_ops *ops,
3402				   struct ftrace_ops_hash *old_hash);
3403
3404static int __ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3405					     struct ftrace_hash **orig_hash,
3406					     struct ftrace_hash *hash,
3407					     int enable)
3408{
3409	struct ftrace_ops_hash old_hash_ops;
3410	struct ftrace_hash *old_hash;
3411	int ret;
3412
3413	old_hash = *orig_hash;
3414	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3415	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3416	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3417	if (!ret) {
3418		ftrace_ops_update_code(ops, &old_hash_ops);
3419		free_ftrace_hash_rcu(old_hash);
3420	}
3421	return ret;
3422}
3423
3424static int ftrace_update_ops(struct ftrace_ops *ops, struct ftrace_hash *filter_hash,
3425			     struct ftrace_hash *notrace_hash)
3426{
3427	int ret;
3428
3429	if (!ops_equal(filter_hash, ops->func_hash->filter_hash)) {
3430		ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->filter_hash,
3431							filter_hash, 1);
3432		if (ret < 0)
3433			return ret;
3434	}
3435
3436	if (!ops_equal(notrace_hash, ops->func_hash->notrace_hash)) {
3437		ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->notrace_hash,
3438							notrace_hash, 0);
3439		if (ret < 0)
3440			return ret;
3441	}
3442
3443	return 0;
3444}
3445
3446/**
3447 * ftrace_startup_subops - enable tracing for subops of an ops
3448 * @ops: Manager ops (used to pick all the functions of its subops)
3449 * @subops: A new ops to add to @ops
3450 * @command: Extra commands to use to enable tracing
3451 *
3452 * The @ops is a manager @ops that has the filter that includes all the functions
3453 * that its list of subops are tracing. Adding a new @subops will add the
3454 * functions of @subops to @ops.
3455 */
3456int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
3457{
3458	struct ftrace_hash *filter_hash;
3459	struct ftrace_hash *notrace_hash;
3460	struct ftrace_hash *save_filter_hash;
3461	struct ftrace_hash *save_notrace_hash;
3462	int size_bits;
3463	int ret;
3464
3465	if (unlikely(ftrace_disabled))
3466		return -ENODEV;
3467
3468	ftrace_ops_init(ops);
3469	ftrace_ops_init(subops);
3470
3471	if (WARN_ON_ONCE(subops->flags & FTRACE_OPS_FL_ENABLED))
3472		return -EBUSY;
3473
3474	/* Make everything canonical (Just in case!) */
3475	if (!ops->func_hash->filter_hash)
3476		ops->func_hash->filter_hash = EMPTY_HASH;
3477	if (!ops->func_hash->notrace_hash)
3478		ops->func_hash->notrace_hash = EMPTY_HASH;
3479	if (!subops->func_hash->filter_hash)
3480		subops->func_hash->filter_hash = EMPTY_HASH;
3481	if (!subops->func_hash->notrace_hash)
3482		subops->func_hash->notrace_hash = EMPTY_HASH;
3483
3484	/* For the first subops to ops just enable it normally */
3485	if (list_empty(&ops->subop_list)) {
3486		/* Just use the subops hashes */
3487		filter_hash = copy_hash(subops->func_hash->filter_hash);
3488		notrace_hash = copy_hash(subops->func_hash->notrace_hash);
3489		if (!filter_hash || !notrace_hash) {
3490			free_ftrace_hash(filter_hash);
3491			free_ftrace_hash(notrace_hash);
3492			return -ENOMEM;
3493		}
3494
3495		save_filter_hash = ops->func_hash->filter_hash;
3496		save_notrace_hash = ops->func_hash->notrace_hash;
3497
3498		ops->func_hash->filter_hash = filter_hash;
3499		ops->func_hash->notrace_hash = notrace_hash;
3500		list_add(&subops->list, &ops->subop_list);
3501		ret = ftrace_startup(ops, command);
3502		if (ret < 0) {
3503			list_del(&subops->list);
3504			ops->func_hash->filter_hash = save_filter_hash;
3505			ops->func_hash->notrace_hash = save_notrace_hash;
3506			free_ftrace_hash(filter_hash);
3507			free_ftrace_hash(notrace_hash);
3508		} else {
3509			free_ftrace_hash(save_filter_hash);
3510			free_ftrace_hash(save_notrace_hash);
3511			subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
3512			subops->managed = ops;
3513		}
3514		return ret;
3515	}
3516
3517	/*
3518	 * Here there's already something attached. Here are the rules:
3519	 *   o If either filter_hash is empty then the final stays empty
3520	 *      o Otherwise, the final is a superset of both hashes
3521	 *   o If either notrace_hash is empty then the final stays empty
3522	 *      o Otherwise, the final is an intersection between the hashes
3523	 */
3524	if (ftrace_hash_empty(ops->func_hash->filter_hash) ||
3525	    ftrace_hash_empty(subops->func_hash->filter_hash)) {
3526		filter_hash = EMPTY_HASH;
3527	} else {
3528		size_bits = max(ops->func_hash->filter_hash->size_bits,
3529				subops->func_hash->filter_hash->size_bits);
3530		filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash);
3531		if (!filter_hash)
3532			return -ENOMEM;
3533		ret = append_hash(&filter_hash, subops->func_hash->filter_hash,
3534				  size_bits);
3535		if (ret < 0) {
3536			free_ftrace_hash(filter_hash);
3537			return ret;
3538		}
3539	}
3540
3541	if (ftrace_hash_empty(ops->func_hash->notrace_hash) ||
3542	    ftrace_hash_empty(subops->func_hash->notrace_hash)) {
3543		notrace_hash = EMPTY_HASH;
3544	} else {
3545		size_bits = max(ops->func_hash->filter_hash->size_bits,
3546				subops->func_hash->filter_hash->size_bits);
3547		notrace_hash = alloc_ftrace_hash(size_bits);
3548		if (!notrace_hash) {
3549			free_ftrace_hash(filter_hash);
3550			return -ENOMEM;
3551		}
3552
3553		ret = intersect_hash(&notrace_hash, ops->func_hash->filter_hash,
3554				     subops->func_hash->filter_hash);
3555		if (ret < 0) {
3556			free_ftrace_hash(filter_hash);
3557			free_ftrace_hash(notrace_hash);
3558			return ret;
3559		}
3560	}
3561
3562	list_add(&subops->list, &ops->subop_list);
3563
3564	ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
3565	free_ftrace_hash(filter_hash);
3566	free_ftrace_hash(notrace_hash);
3567	if (ret < 0) {
3568		list_del(&subops->list);
3569	} else {
3570		subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP;
3571		subops->managed = ops;
3572	}
3573	return ret;
3574}
3575
3576/**
3577 * ftrace_shutdown_subops - Remove a subops from a manager ops
3578 * @ops: A manager ops to remove @subops from
3579 * @subops: The subops to remove from @ops
3580 * @command: Any extra command flags to add to modifying the text
3581 *
3582 * Removes the functions being traced by the @subops from @ops. Note, it
3583 * will not affect functions that are being traced by other subops that
3584 * still exist in @ops.
3585 *
3586 * If the last subops is removed from @ops, then @ops is shutdown normally.
3587 */
3588int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command)
3589{
3590	struct ftrace_hash *filter_hash;
3591	struct ftrace_hash *notrace_hash;
3592	int ret;
3593
3594	if (unlikely(ftrace_disabled))
3595		return -ENODEV;
3596
3597	if (WARN_ON_ONCE(!(subops->flags & FTRACE_OPS_FL_ENABLED)))
3598		return -EINVAL;
3599
3600	list_del(&subops->list);
3601
3602	if (list_empty(&ops->subop_list)) {
3603		/* Last one, just disable the current ops */
3604
3605		ret = ftrace_shutdown(ops, command);
3606		if (ret < 0) {
3607			list_add(&subops->list, &ops->subop_list);
3608			return ret;
3609		}
3610
3611		subops->flags &= ~FTRACE_OPS_FL_ENABLED;
3612
3613		free_ftrace_hash(ops->func_hash->filter_hash);
3614		free_ftrace_hash(ops->func_hash->notrace_hash);
3615		ops->func_hash->filter_hash = EMPTY_HASH;
3616		ops->func_hash->notrace_hash = EMPTY_HASH;
3617		subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
3618		subops->managed = NULL;
3619
3620		return 0;
3621	}
3622
3623	/* Rebuild the hashes without subops */
3624	filter_hash = append_hashes(ops);
3625	notrace_hash = intersect_hashes(ops);
3626	if (!filter_hash || !notrace_hash) {
3627		free_ftrace_hash(filter_hash);
3628		free_ftrace_hash(notrace_hash);
3629		list_add(&subops->list, &ops->subop_list);
3630		return -ENOMEM;
3631	}
3632
3633	ret = ftrace_update_ops(ops, filter_hash, notrace_hash);
3634	if (ret < 0) {
3635		list_add(&subops->list, &ops->subop_list);
3636	} else {
3637		subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP);
3638		subops->managed = NULL;
3639	}
3640	free_ftrace_hash(filter_hash);
3641	free_ftrace_hash(notrace_hash);
3642	return ret;
3643}
3644
3645static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops,
3646					      struct ftrace_hash **orig_subhash,
3647					      struct ftrace_hash *hash,
3648					      int enable)
3649{
3650	struct ftrace_ops *ops = subops->managed;
3651	struct ftrace_hash **orig_hash;
3652	struct ftrace_hash *save_hash;
3653	struct ftrace_hash *new_hash;
3654	int ret;
3655
3656	/* Manager ops can not be subops (yet) */
3657	if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP))
3658		return -EINVAL;
3659
3660	/* Move the new hash over to the subops hash */
3661	save_hash = *orig_subhash;
3662	*orig_subhash = __ftrace_hash_move(hash);
3663	if (!*orig_subhash) {
3664		*orig_subhash = save_hash;
3665		return -ENOMEM;
3666	}
3667
3668	/* Create a new_hash to hold the ops new functions */
3669	if (enable) {
3670		orig_hash = &ops->func_hash->filter_hash;
3671		new_hash = append_hashes(ops);
3672	} else {
3673		orig_hash = &ops->func_hash->notrace_hash;
3674		new_hash = intersect_hashes(ops);
3675	}
3676
3677	/* Move the hash over to the new hash */
3678	ret = __ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable);
3679
3680	free_ftrace_hash(new_hash);
3681
3682	if (ret) {
3683		/* Put back the original hash */
3684		free_ftrace_hash_rcu(*orig_subhash);
3685		*orig_subhash = save_hash;
3686	} else {
3687		free_ftrace_hash_rcu(save_hash);
3688	}
3689	return ret;
3690}
3691
3692
3693u64			ftrace_update_time;
3694u64			ftrace_total_mod_time;
3695unsigned long		ftrace_update_tot_cnt;
3696unsigned long		ftrace_number_of_pages;
3697unsigned long		ftrace_number_of_groups;
3698
3699static inline int ops_traces_mod(struct ftrace_ops *ops)
3700{
3701	/*
3702	 * Filter_hash being empty will default to trace module.
3703	 * But notrace hash requires a test of individual module functions.
3704	 */
3705	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3706		ftrace_hash_empty(ops->func_hash->notrace_hash);
3707}
3708
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3709static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3710{
3711	bool init_nop = ftrace_need_init_nop();
3712	struct ftrace_page *pg;
3713	struct dyn_ftrace *p;
3714	u64 start, stop, update_time;
3715	unsigned long update_cnt = 0;
3716	unsigned long rec_flags = 0;
3717	int i;
3718
3719	start = ftrace_now(raw_smp_processor_id());
3720
3721	/*
3722	 * When a module is loaded, this function is called to convert
3723	 * the calls to mcount in its text to nops, and also to create
3724	 * an entry in the ftrace data. Now, if ftrace is activated
3725	 * after this call, but before the module sets its text to
3726	 * read-only, the modification of enabling ftrace can fail if
3727	 * the read-only is done while ftrace is converting the calls.
3728	 * To prevent this, the module's records are set as disabled
3729	 * and will be enabled after the call to set the module's text
3730	 * to read-only.
3731	 */
3732	if (mod)
3733		rec_flags |= FTRACE_FL_DISABLED;
3734
3735	for (pg = new_pgs; pg; pg = pg->next) {
3736
3737		for (i = 0; i < pg->index; i++) {
3738
3739			/* If something went wrong, bail without enabling anything */
3740			if (unlikely(ftrace_disabled))
3741				return -1;
3742
3743			p = &pg->records[i];
3744			p->flags = rec_flags;
3745
3746			/*
3747			 * Do the initial record conversion from mcount jump
3748			 * to the NOP instructions.
3749			 */
3750			if (init_nop && !ftrace_nop_initialize(mod, p))
 
3751				break;
3752
3753			update_cnt++;
3754		}
3755	}
3756
3757	stop = ftrace_now(raw_smp_processor_id());
3758	update_time = stop - start;
3759	if (mod)
3760		ftrace_total_mod_time += update_time;
3761	else
3762		ftrace_update_time = update_time;
3763	ftrace_update_tot_cnt += update_cnt;
3764
3765	return 0;
3766}
3767
3768static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3769{
3770	int order;
3771	int pages;
3772	int cnt;
3773
3774	if (WARN_ON(!count))
3775		return -EINVAL;
3776
3777	/* We want to fill as much as possible, with no empty pages */
3778	pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3779	order = fls(pages) - 1;
 
 
 
 
 
3780
3781 again:
3782	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3783
3784	if (!pg->records) {
3785		/* if we can't allocate this size, try something smaller */
3786		if (!order)
3787			return -ENOMEM;
3788		order--;
3789		goto again;
3790	}
3791
3792	ftrace_number_of_pages += 1 << order;
3793	ftrace_number_of_groups++;
3794
3795	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3796	pg->order = order;
3797
3798	if (cnt > count)
3799		cnt = count;
3800
3801	return cnt;
3802}
3803
3804static void ftrace_free_pages(struct ftrace_page *pages)
3805{
3806	struct ftrace_page *pg = pages;
3807
3808	while (pg) {
3809		if (pg->records) {
3810			free_pages((unsigned long)pg->records, pg->order);
3811			ftrace_number_of_pages -= 1 << pg->order;
3812		}
3813		pages = pg->next;
3814		kfree(pg);
3815		pg = pages;
3816		ftrace_number_of_groups--;
3817	}
3818}
3819
3820static struct ftrace_page *
3821ftrace_allocate_pages(unsigned long num_to_init)
3822{
3823	struct ftrace_page *start_pg;
3824	struct ftrace_page *pg;
 
3825	int cnt;
3826
3827	if (!num_to_init)
3828		return NULL;
3829
3830	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3831	if (!pg)
3832		return NULL;
3833
3834	/*
3835	 * Try to allocate as much as possible in one continues
3836	 * location that fills in all of the space. We want to
3837	 * waste as little space as possible.
3838	 */
3839	for (;;) {
3840		cnt = ftrace_allocate_records(pg, num_to_init);
3841		if (cnt < 0)
3842			goto free_pages;
3843
3844		num_to_init -= cnt;
3845		if (!num_to_init)
3846			break;
3847
3848		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3849		if (!pg->next)
3850			goto free_pages;
3851
3852		pg = pg->next;
3853	}
3854
3855	return start_pg;
3856
3857 free_pages:
3858	ftrace_free_pages(start_pg);
 
 
 
 
 
 
 
3859	pr_info("ftrace: FAILED to allocate memory for functions\n");
3860	return NULL;
3861}
3862
3863#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3864
3865struct ftrace_iterator {
3866	loff_t				pos;
3867	loff_t				func_pos;
3868	loff_t				mod_pos;
3869	struct ftrace_page		*pg;
3870	struct dyn_ftrace		*func;
3871	struct ftrace_func_probe	*probe;
3872	struct ftrace_func_entry	*probe_entry;
3873	struct trace_parser		parser;
3874	struct ftrace_hash		*hash;
3875	struct ftrace_ops		*ops;
3876	struct trace_array		*tr;
3877	struct list_head		*mod_list;
3878	int				pidx;
3879	int				idx;
3880	unsigned			flags;
3881};
3882
3883static void *
3884t_probe_next(struct seq_file *m, loff_t *pos)
3885{
3886	struct ftrace_iterator *iter = m->private;
3887	struct trace_array *tr = iter->ops->private;
3888	struct list_head *func_probes;
3889	struct ftrace_hash *hash;
3890	struct list_head *next;
3891	struct hlist_node *hnd = NULL;
3892	struct hlist_head *hhd;
3893	int size;
3894
3895	(*pos)++;
3896	iter->pos = *pos;
3897
3898	if (!tr)
3899		return NULL;
3900
3901	func_probes = &tr->func_probes;
3902	if (list_empty(func_probes))
3903		return NULL;
3904
3905	if (!iter->probe) {
3906		next = func_probes->next;
3907		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3908	}
3909
3910	if (iter->probe_entry)
3911		hnd = &iter->probe_entry->hlist;
3912
3913	hash = iter->probe->ops.func_hash->filter_hash;
3914
3915	/*
3916	 * A probe being registered may temporarily have an empty hash
3917	 * and it's at the end of the func_probes list.
3918	 */
3919	if (!hash || hash == EMPTY_HASH)
3920		return NULL;
3921
3922	size = 1 << hash->size_bits;
3923
3924 retry:
3925	if (iter->pidx >= size) {
3926		if (iter->probe->list.next == func_probes)
3927			return NULL;
3928		next = iter->probe->list.next;
3929		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3930		hash = iter->probe->ops.func_hash->filter_hash;
3931		size = 1 << hash->size_bits;
3932		iter->pidx = 0;
3933	}
3934
3935	hhd = &hash->buckets[iter->pidx];
3936
3937	if (hlist_empty(hhd)) {
3938		iter->pidx++;
3939		hnd = NULL;
3940		goto retry;
3941	}
3942
3943	if (!hnd)
3944		hnd = hhd->first;
3945	else {
3946		hnd = hnd->next;
3947		if (!hnd) {
3948			iter->pidx++;
3949			goto retry;
3950		}
3951	}
3952
3953	if (WARN_ON_ONCE(!hnd))
3954		return NULL;
3955
3956	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3957
3958	return iter;
3959}
3960
3961static void *t_probe_start(struct seq_file *m, loff_t *pos)
3962{
3963	struct ftrace_iterator *iter = m->private;
3964	void *p = NULL;
3965	loff_t l;
3966
3967	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3968		return NULL;
3969
3970	if (iter->mod_pos > *pos)
3971		return NULL;
3972
3973	iter->probe = NULL;
3974	iter->probe_entry = NULL;
3975	iter->pidx = 0;
3976	for (l = 0; l <= (*pos - iter->mod_pos); ) {
3977		p = t_probe_next(m, &l);
3978		if (!p)
3979			break;
3980	}
3981	if (!p)
3982		return NULL;
3983
3984	/* Only set this if we have an item */
3985	iter->flags |= FTRACE_ITER_PROBE;
3986
3987	return iter;
3988}
3989
3990static int
3991t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3992{
3993	struct ftrace_func_entry *probe_entry;
3994	struct ftrace_probe_ops *probe_ops;
3995	struct ftrace_func_probe *probe;
3996
3997	probe = iter->probe;
3998	probe_entry = iter->probe_entry;
3999
4000	if (WARN_ON_ONCE(!probe || !probe_entry))
4001		return -EIO;
4002
4003	probe_ops = probe->probe_ops;
4004
4005	if (probe_ops->print)
4006		return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
4007
4008	seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
4009		   (void *)probe_ops->func);
4010
4011	return 0;
4012}
4013
4014static void *
4015t_mod_next(struct seq_file *m, loff_t *pos)
4016{
4017	struct ftrace_iterator *iter = m->private;
4018	struct trace_array *tr = iter->tr;
4019
4020	(*pos)++;
4021	iter->pos = *pos;
4022
4023	iter->mod_list = iter->mod_list->next;
4024
4025	if (iter->mod_list == &tr->mod_trace ||
4026	    iter->mod_list == &tr->mod_notrace) {
4027		iter->flags &= ~FTRACE_ITER_MOD;
4028		return NULL;
4029	}
4030
4031	iter->mod_pos = *pos;
4032
4033	return iter;
4034}
4035
4036static void *t_mod_start(struct seq_file *m, loff_t *pos)
4037{
4038	struct ftrace_iterator *iter = m->private;
4039	void *p = NULL;
4040	loff_t l;
4041
4042	if (iter->func_pos > *pos)
4043		return NULL;
4044
4045	iter->mod_pos = iter->func_pos;
4046
4047	/* probes are only available if tr is set */
4048	if (!iter->tr)
4049		return NULL;
4050
4051	for (l = 0; l <= (*pos - iter->func_pos); ) {
4052		p = t_mod_next(m, &l);
4053		if (!p)
4054			break;
4055	}
4056	if (!p) {
4057		iter->flags &= ~FTRACE_ITER_MOD;
4058		return t_probe_start(m, pos);
4059	}
4060
4061	/* Only set this if we have an item */
4062	iter->flags |= FTRACE_ITER_MOD;
4063
4064	return iter;
4065}
4066
4067static int
4068t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
4069{
4070	struct ftrace_mod_load *ftrace_mod;
4071	struct trace_array *tr = iter->tr;
4072
4073	if (WARN_ON_ONCE(!iter->mod_list) ||
4074			 iter->mod_list == &tr->mod_trace ||
4075			 iter->mod_list == &tr->mod_notrace)
4076		return -EIO;
4077
4078	ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
4079
4080	if (ftrace_mod->func)
4081		seq_printf(m, "%s", ftrace_mod->func);
4082	else
4083		seq_putc(m, '*');
4084
4085	seq_printf(m, ":mod:%s\n", ftrace_mod->module);
4086
4087	return 0;
4088}
4089
4090static void *
4091t_func_next(struct seq_file *m, loff_t *pos)
4092{
4093	struct ftrace_iterator *iter = m->private;
4094	struct dyn_ftrace *rec = NULL;
4095
4096	(*pos)++;
4097
4098 retry:
4099	if (iter->idx >= iter->pg->index) {
4100		if (iter->pg->next) {
4101			iter->pg = iter->pg->next;
4102			iter->idx = 0;
4103			goto retry;
4104		}
4105	} else {
4106		rec = &iter->pg->records[iter->idx++];
4107		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
4108		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
4109
4110		    ((iter->flags & FTRACE_ITER_ENABLED) &&
4111		     !(rec->flags & FTRACE_FL_ENABLED)) ||
4112
4113		    ((iter->flags & FTRACE_ITER_TOUCHED) &&
4114		     !(rec->flags & FTRACE_FL_TOUCHED))) {
4115
4116			rec = NULL;
4117			goto retry;
4118		}
4119	}
4120
4121	if (!rec)
4122		return NULL;
4123
4124	iter->pos = iter->func_pos = *pos;
4125	iter->func = rec;
4126
4127	return iter;
4128}
4129
4130static void *
4131t_next(struct seq_file *m, void *v, loff_t *pos)
4132{
4133	struct ftrace_iterator *iter = m->private;
4134	loff_t l = *pos; /* t_probe_start() must use original pos */
4135	void *ret;
4136
4137	if (unlikely(ftrace_disabled))
4138		return NULL;
4139
4140	if (iter->flags & FTRACE_ITER_PROBE)
4141		return t_probe_next(m, pos);
4142
4143	if (iter->flags & FTRACE_ITER_MOD)
4144		return t_mod_next(m, pos);
4145
4146	if (iter->flags & FTRACE_ITER_PRINTALL) {
4147		/* next must increment pos, and t_probe_start does not */
4148		(*pos)++;
4149		return t_mod_start(m, &l);
4150	}
4151
4152	ret = t_func_next(m, pos);
4153
4154	if (!ret)
4155		return t_mod_start(m, &l);
4156
4157	return ret;
4158}
4159
4160static void reset_iter_read(struct ftrace_iterator *iter)
4161{
4162	iter->pos = 0;
4163	iter->func_pos = 0;
4164	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
4165}
4166
4167static void *t_start(struct seq_file *m, loff_t *pos)
4168{
4169	struct ftrace_iterator *iter = m->private;
4170	void *p = NULL;
4171	loff_t l;
4172
4173	mutex_lock(&ftrace_lock);
4174
4175	if (unlikely(ftrace_disabled))
4176		return NULL;
4177
4178	/*
4179	 * If an lseek was done, then reset and start from beginning.
4180	 */
4181	if (*pos < iter->pos)
4182		reset_iter_read(iter);
4183
4184	/*
4185	 * For set_ftrace_filter reading, if we have the filter
4186	 * off, we can short cut and just print out that all
4187	 * functions are enabled.
4188	 */
4189	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
4190	    ftrace_hash_empty(iter->hash)) {
4191		iter->func_pos = 1; /* Account for the message */
4192		if (*pos > 0)
4193			return t_mod_start(m, pos);
4194		iter->flags |= FTRACE_ITER_PRINTALL;
4195		/* reset in case of seek/pread */
4196		iter->flags &= ~FTRACE_ITER_PROBE;
4197		return iter;
4198	}
4199
4200	if (iter->flags & FTRACE_ITER_MOD)
4201		return t_mod_start(m, pos);
4202
4203	/*
4204	 * Unfortunately, we need to restart at ftrace_pages_start
4205	 * every time we let go of the ftrace_mutex. This is because
4206	 * those pointers can change without the lock.
4207	 */
4208	iter->pg = ftrace_pages_start;
4209	iter->idx = 0;
4210	for (l = 0; l <= *pos; ) {
4211		p = t_func_next(m, &l);
4212		if (!p)
4213			break;
4214	}
4215
4216	if (!p)
4217		return t_mod_start(m, pos);
4218
4219	return iter;
4220}
4221
4222static void t_stop(struct seq_file *m, void *p)
4223{
4224	mutex_unlock(&ftrace_lock);
4225}
4226
4227void * __weak
4228arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
4229{
4230	return NULL;
4231}
4232
4233static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
4234				struct dyn_ftrace *rec)
4235{
4236	void *ptr;
4237
4238	ptr = arch_ftrace_trampoline_func(ops, rec);
4239	if (ptr)
4240		seq_printf(m, " ->%pS", ptr);
4241}
4242
4243#ifdef FTRACE_MCOUNT_MAX_OFFSET
4244/*
4245 * Weak functions can still have an mcount/fentry that is saved in
4246 * the __mcount_loc section. These can be detected by having a
4247 * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
4248 * symbol found by kallsyms is not the function that the mcount/fentry
4249 * is part of. The offset is much greater in these cases.
4250 *
4251 * Test the record to make sure that the ip points to a valid kallsyms
4252 * and if not, mark it disabled.
4253 */
4254static int test_for_valid_rec(struct dyn_ftrace *rec)
4255{
4256	char str[KSYM_SYMBOL_LEN];
4257	unsigned long offset;
4258	const char *ret;
4259
4260	ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
4261
4262	/* Weak functions can cause invalid addresses */
4263	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
4264		rec->flags |= FTRACE_FL_DISABLED;
4265		return 0;
4266	}
4267	return 1;
4268}
4269
4270static struct workqueue_struct *ftrace_check_wq __initdata;
4271static struct work_struct ftrace_check_work __initdata;
4272
4273/*
4274 * Scan all the mcount/fentry entries to make sure they are valid.
4275 */
4276static __init void ftrace_check_work_func(struct work_struct *work)
4277{
4278	struct ftrace_page *pg;
4279	struct dyn_ftrace *rec;
4280
4281	mutex_lock(&ftrace_lock);
4282	do_for_each_ftrace_rec(pg, rec) {
4283		test_for_valid_rec(rec);
4284	} while_for_each_ftrace_rec();
4285	mutex_unlock(&ftrace_lock);
4286}
4287
4288static int __init ftrace_check_for_weak_functions(void)
4289{
4290	INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
4291
4292	ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
4293
4294	queue_work(ftrace_check_wq, &ftrace_check_work);
4295	return 0;
4296}
4297
4298static int __init ftrace_check_sync(void)
4299{
4300	/* Make sure the ftrace_check updates are finished */
4301	if (ftrace_check_wq)
4302		destroy_workqueue(ftrace_check_wq);
4303	return 0;
4304}
4305
4306late_initcall_sync(ftrace_check_sync);
4307subsys_initcall(ftrace_check_for_weak_functions);
4308
4309static int print_rec(struct seq_file *m, unsigned long ip)
4310{
4311	unsigned long offset;
4312	char str[KSYM_SYMBOL_LEN];
4313	char *modname;
4314	const char *ret;
4315
4316	ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
4317	/* Weak functions can cause invalid addresses */
4318	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
4319		snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
4320			 FTRACE_INVALID_FUNCTION, offset);
4321		ret = NULL;
4322	}
4323
4324	seq_puts(m, str);
4325	if (modname)
4326		seq_printf(m, " [%s]", modname);
4327	return ret == NULL ? -1 : 0;
4328}
4329#else
4330static inline int test_for_valid_rec(struct dyn_ftrace *rec)
4331{
4332	return 1;
4333}
4334
4335static inline int print_rec(struct seq_file *m, unsigned long ip)
4336{
4337	seq_printf(m, "%ps", (void *)ip);
4338	return 0;
4339}
4340#endif
4341
4342static int t_show(struct seq_file *m, void *v)
4343{
4344	struct ftrace_iterator *iter = m->private;
4345	struct dyn_ftrace *rec;
4346
4347	if (iter->flags & FTRACE_ITER_PROBE)
4348		return t_probe_show(m, iter);
4349
4350	if (iter->flags & FTRACE_ITER_MOD)
4351		return t_mod_show(m, iter);
4352
4353	if (iter->flags & FTRACE_ITER_PRINTALL) {
4354		if (iter->flags & FTRACE_ITER_NOTRACE)
4355			seq_puts(m, "#### no functions disabled ####\n");
4356		else
4357			seq_puts(m, "#### all functions enabled ####\n");
4358		return 0;
4359	}
4360
4361	rec = iter->func;
4362
4363	if (!rec)
4364		return 0;
4365
4366	if (iter->flags & FTRACE_ITER_ADDRS)
4367		seq_printf(m, "%lx ", rec->ip);
4368
4369	if (print_rec(m, rec->ip)) {
4370		/* This should only happen when a rec is disabled */
4371		WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
4372		seq_putc(m, '\n');
4373		return 0;
4374	}
4375
4376	if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
4377		struct ftrace_ops *ops;
4378
4379		seq_printf(m, " (%ld)%s%s%s%s%s",
4380			   ftrace_rec_count(rec),
4381			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
4382			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
4383			   rec->flags & FTRACE_FL_DIRECT ? " D" : "  ",
4384			   rec->flags & FTRACE_FL_CALL_OPS ? " O" : "  ",
4385			   rec->flags & FTRACE_FL_MODIFIED ? " M " : "   ");
4386		if (rec->flags & FTRACE_FL_TRAMP_EN) {
4387			ops = ftrace_find_tramp_ops_any(rec);
4388			if (ops) {
4389				do {
4390					seq_printf(m, "\ttramp: %pS (%pS)",
4391						   (void *)ops->trampoline,
4392						   (void *)ops->func);
4393					add_trampoline_func(m, ops, rec);
4394					ops = ftrace_find_tramp_ops_next(rec, ops);
4395				} while (ops);
4396			} else
4397				seq_puts(m, "\ttramp: ERROR!");
4398		} else {
4399			add_trampoline_func(m, NULL, rec);
4400		}
4401		if (rec->flags & FTRACE_FL_CALL_OPS_EN) {
4402			ops = ftrace_find_unique_ops(rec);
4403			if (ops) {
4404				seq_printf(m, "\tops: %pS (%pS)",
4405					   ops, ops->func);
4406			} else {
4407				seq_puts(m, "\tops: ERROR!");
4408			}
4409		}
4410		if (rec->flags & FTRACE_FL_DIRECT) {
4411			unsigned long direct;
4412
4413			direct = ftrace_find_rec_direct(rec->ip);
4414			if (direct)
4415				seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
4416		}
4417	}
4418
4419	seq_putc(m, '\n');
4420
4421	return 0;
4422}
4423
4424static const struct seq_operations show_ftrace_seq_ops = {
4425	.start = t_start,
4426	.next = t_next,
4427	.stop = t_stop,
4428	.show = t_show,
4429};
4430
4431static int
4432ftrace_avail_open(struct inode *inode, struct file *file)
4433{
4434	struct ftrace_iterator *iter;
4435	int ret;
4436
4437	ret = security_locked_down(LOCKDOWN_TRACEFS);
4438	if (ret)
4439		return ret;
4440
4441	if (unlikely(ftrace_disabled))
4442		return -ENODEV;
4443
4444	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4445	if (!iter)
4446		return -ENOMEM;
4447
4448	iter->pg = ftrace_pages_start;
4449	iter->ops = &global_ops;
4450
4451	return 0;
4452}
4453
4454static int
4455ftrace_enabled_open(struct inode *inode, struct file *file)
4456{
4457	struct ftrace_iterator *iter;
4458
4459	/*
4460	 * This shows us what functions are currently being
4461	 * traced and by what. Not sure if we want lockdown
4462	 * to hide such critical information for an admin.
4463	 * Although, perhaps it can show information we don't
4464	 * want people to see, but if something is tracing
4465	 * something, we probably want to know about it.
4466	 */
4467
4468	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4469	if (!iter)
4470		return -ENOMEM;
4471
4472	iter->pg = ftrace_pages_start;
4473	iter->flags = FTRACE_ITER_ENABLED;
4474	iter->ops = &global_ops;
4475
4476	return 0;
4477}
4478
4479static int
4480ftrace_touched_open(struct inode *inode, struct file *file)
4481{
4482	struct ftrace_iterator *iter;
4483
4484	/*
4485	 * This shows us what functions have ever been enabled
4486	 * (traced, direct, patched, etc). Not sure if we want lockdown
4487	 * to hide such critical information for an admin.
4488	 * Although, perhaps it can show information we don't
4489	 * want people to see, but if something had traced
4490	 * something, we probably want to know about it.
4491	 */
4492
4493	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4494	if (!iter)
4495		return -ENOMEM;
4496
4497	iter->pg = ftrace_pages_start;
4498	iter->flags = FTRACE_ITER_TOUCHED;
4499	iter->ops = &global_ops;
4500
4501	return 0;
4502}
4503
4504static int
4505ftrace_avail_addrs_open(struct inode *inode, struct file *file)
4506{
4507	struct ftrace_iterator *iter;
4508	int ret;
4509
4510	ret = security_locked_down(LOCKDOWN_TRACEFS);
4511	if (ret)
4512		return ret;
4513
4514	if (unlikely(ftrace_disabled))
4515		return -ENODEV;
4516
4517	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
4518	if (!iter)
4519		return -ENOMEM;
4520
4521	iter->pg = ftrace_pages_start;
4522	iter->flags = FTRACE_ITER_ADDRS;
4523	iter->ops = &global_ops;
4524
4525	return 0;
4526}
4527
4528/**
4529 * ftrace_regex_open - initialize function tracer filter files
4530 * @ops: The ftrace_ops that hold the hash filters
4531 * @flag: The type of filter to process
4532 * @inode: The inode, usually passed in to your open routine
4533 * @file: The file, usually passed in to your open routine
4534 *
4535 * ftrace_regex_open() initializes the filter files for the
4536 * @ops. Depending on @flag it may process the filter hash or
4537 * the notrace hash of @ops. With this called from the open
4538 * routine, you can use ftrace_filter_write() for the write
4539 * routine if @flag has FTRACE_ITER_FILTER set, or
4540 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
4541 * tracing_lseek() should be used as the lseek routine, and
4542 * release must call ftrace_regex_release().
4543 *
4544 * Returns: 0 on success or a negative errno value on failure
4545 */
4546int
4547ftrace_regex_open(struct ftrace_ops *ops, int flag,
4548		  struct inode *inode, struct file *file)
4549{
4550	struct ftrace_iterator *iter;
4551	struct ftrace_hash *hash;
4552	struct list_head *mod_head;
4553	struct trace_array *tr = ops->private;
4554	int ret = -ENOMEM;
4555
4556	ftrace_ops_init(ops);
4557
4558	if (unlikely(ftrace_disabled))
4559		return -ENODEV;
4560
4561	if (tracing_check_open_get_tr(tr))
4562		return -ENODEV;
4563
4564	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4565	if (!iter)
4566		goto out;
4567
4568	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
4569		goto out;
4570
4571	iter->ops = ops;
4572	iter->flags = flag;
4573	iter->tr = tr;
4574
4575	mutex_lock(&ops->func_hash->regex_lock);
4576
4577	if (flag & FTRACE_ITER_NOTRACE) {
4578		hash = ops->func_hash->notrace_hash;
4579		mod_head = tr ? &tr->mod_notrace : NULL;
4580	} else {
4581		hash = ops->func_hash->filter_hash;
4582		mod_head = tr ? &tr->mod_trace : NULL;
4583	}
4584
4585	iter->mod_list = mod_head;
4586
4587	if (file->f_mode & FMODE_WRITE) {
4588		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
4589
4590		if (file->f_flags & O_TRUNC) {
4591			iter->hash = alloc_ftrace_hash(size_bits);
4592			clear_ftrace_mod_list(mod_head);
4593	        } else {
4594			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
4595		}
4596
4597		if (!iter->hash) {
4598			trace_parser_put(&iter->parser);
4599			goto out_unlock;
4600		}
4601	} else
4602		iter->hash = hash;
4603
4604	ret = 0;
4605
4606	if (file->f_mode & FMODE_READ) {
4607		iter->pg = ftrace_pages_start;
4608
4609		ret = seq_open(file, &show_ftrace_seq_ops);
4610		if (!ret) {
4611			struct seq_file *m = file->private_data;
4612			m->private = iter;
4613		} else {
4614			/* Failed */
4615			free_ftrace_hash(iter->hash);
4616			trace_parser_put(&iter->parser);
4617		}
4618	} else
4619		file->private_data = iter;
4620
4621 out_unlock:
4622	mutex_unlock(&ops->func_hash->regex_lock);
4623
4624 out:
4625	if (ret) {
4626		kfree(iter);
4627		if (tr)
4628			trace_array_put(tr);
4629	}
4630
4631	return ret;
4632}
4633
4634static int
4635ftrace_filter_open(struct inode *inode, struct file *file)
4636{
4637	struct ftrace_ops *ops = inode->i_private;
4638
4639	/* Checks for tracefs lockdown */
4640	return ftrace_regex_open(ops,
4641			FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
4642			inode, file);
4643}
4644
4645static int
4646ftrace_notrace_open(struct inode *inode, struct file *file)
4647{
4648	struct ftrace_ops *ops = inode->i_private;
4649
4650	/* Checks for tracefs lockdown */
4651	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
4652				 inode, file);
4653}
4654
4655/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
4656struct ftrace_glob {
4657	char *search;
4658	unsigned len;
4659	int type;
4660};
4661
4662/*
4663 * If symbols in an architecture don't correspond exactly to the user-visible
4664 * name of what they represent, it is possible to define this function to
4665 * perform the necessary adjustments.
4666*/
4667char * __weak arch_ftrace_match_adjust(char *str, const char *search)
4668{
4669	return str;
4670}
4671
4672static int ftrace_match(char *str, struct ftrace_glob *g)
4673{
4674	int matched = 0;
4675	int slen;
4676
4677	str = arch_ftrace_match_adjust(str, g->search);
4678
4679	switch (g->type) {
4680	case MATCH_FULL:
4681		if (strcmp(str, g->search) == 0)
4682			matched = 1;
4683		break;
4684	case MATCH_FRONT_ONLY:
4685		if (strncmp(str, g->search, g->len) == 0)
4686			matched = 1;
4687		break;
4688	case MATCH_MIDDLE_ONLY:
4689		if (strstr(str, g->search))
4690			matched = 1;
4691		break;
4692	case MATCH_END_ONLY:
4693		slen = strlen(str);
4694		if (slen >= g->len &&
4695		    memcmp(str + slen - g->len, g->search, g->len) == 0)
4696			matched = 1;
4697		break;
4698	case MATCH_GLOB:
4699		if (glob_match(g->search, str))
4700			matched = 1;
4701		break;
4702	}
4703
4704	return matched;
4705}
4706
4707static int
4708enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
4709{
4710	struct ftrace_func_entry *entry;
4711	int ret = 0;
4712
4713	entry = ftrace_lookup_ip(hash, rec->ip);
4714	if (clear_filter) {
4715		/* Do nothing if it doesn't exist */
4716		if (!entry)
4717			return 0;
4718
4719		free_hash_entry(hash, entry);
4720	} else {
4721		/* Do nothing if it exists */
4722		if (entry)
4723			return 0;
4724		if (add_hash_entry(hash, rec->ip) == NULL)
4725			ret = -ENOMEM;
4726	}
4727	return ret;
4728}
4729
4730static int
4731add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
4732		 int clear_filter)
4733{
4734	long index;
4735	struct ftrace_page *pg;
4736	struct dyn_ftrace *rec;
4737
4738	/* The index starts at 1 */
4739	if (kstrtoul(func_g->search, 0, &index) || --index < 0)
4740		return 0;
4741
4742	do_for_each_ftrace_rec(pg, rec) {
4743		if (pg->index <= index) {
4744			index -= pg->index;
4745			/* this is a double loop, break goes to the next page */
4746			break;
4747		}
4748		rec = &pg->records[index];
4749		enter_record(hash, rec, clear_filter);
4750		return 1;
4751	} while_for_each_ftrace_rec();
4752	return 0;
4753}
4754
4755#ifdef FTRACE_MCOUNT_MAX_OFFSET
4756static int lookup_ip(unsigned long ip, char **modname, char *str)
4757{
4758	unsigned long offset;
4759
4760	kallsyms_lookup(ip, NULL, &offset, modname, str);
4761	if (offset > FTRACE_MCOUNT_MAX_OFFSET)
4762		return -1;
4763	return 0;
4764}
4765#else
4766static int lookup_ip(unsigned long ip, char **modname, char *str)
4767{
4768	kallsyms_lookup(ip, NULL, NULL, modname, str);
4769	return 0;
4770}
4771#endif
4772
4773static int
4774ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
4775		struct ftrace_glob *mod_g, int exclude_mod)
4776{
4777	char str[KSYM_SYMBOL_LEN];
4778	char *modname;
4779
4780	if (lookup_ip(rec->ip, &modname, str)) {
4781		/* This should only happen when a rec is disabled */
4782		WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
4783			     !(rec->flags & FTRACE_FL_DISABLED));
4784		return 0;
4785	}
4786
4787	if (mod_g) {
4788		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4789
4790		/* blank module name to match all modules */
4791		if (!mod_g->len) {
4792			/* blank module globbing: modname xor exclude_mod */
4793			if (!exclude_mod != !modname)
4794				goto func_match;
4795			return 0;
4796		}
4797
4798		/*
4799		 * exclude_mod is set to trace everything but the given
4800		 * module. If it is set and the module matches, then
4801		 * return 0. If it is not set, and the module doesn't match
4802		 * also return 0. Otherwise, check the function to see if
4803		 * that matches.
4804		 */
4805		if (!mod_matches == !exclude_mod)
4806			return 0;
4807func_match:
4808		/* blank search means to match all funcs in the mod */
4809		if (!func_g->len)
4810			return 1;
4811	}
4812
4813	return ftrace_match(str, func_g);
4814}
4815
4816static int
4817match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4818{
4819	struct ftrace_page *pg;
4820	struct dyn_ftrace *rec;
4821	struct ftrace_glob func_g = { .type = MATCH_FULL };
4822	struct ftrace_glob mod_g = { .type = MATCH_FULL };
4823	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4824	int exclude_mod = 0;
4825	int found = 0;
4826	int ret;
4827	int clear_filter = 0;
4828
4829	if (func) {
4830		func_g.type = filter_parse_regex(func, len, &func_g.search,
4831						 &clear_filter);
4832		func_g.len = strlen(func_g.search);
4833	}
4834
4835	if (mod) {
4836		mod_g.type = filter_parse_regex(mod, strlen(mod),
4837				&mod_g.search, &exclude_mod);
4838		mod_g.len = strlen(mod_g.search);
4839	}
4840
4841	guard(mutex)(&ftrace_lock);
4842
4843	if (unlikely(ftrace_disabled))
4844		return 0;
4845
4846	if (func_g.type == MATCH_INDEX)
4847		return add_rec_by_index(hash, &func_g, clear_filter);
 
 
4848
4849	do_for_each_ftrace_rec(pg, rec) {
4850
4851		if (rec->flags & FTRACE_FL_DISABLED)
4852			continue;
4853
4854		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4855			ret = enter_record(hash, rec, clear_filter);
4856			if (ret < 0)
4857				return ret;
 
 
4858			found = 1;
4859		}
4860		cond_resched();
4861	} while_for_each_ftrace_rec();
 
 
4862
4863	return found;
4864}
4865
4866static int
4867ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4868{
4869	return match_records(hash, buff, len, NULL);
4870}
4871
4872static void ftrace_ops_update_code(struct ftrace_ops *ops,
4873				   struct ftrace_ops_hash *old_hash)
4874{
4875	struct ftrace_ops *op;
4876
4877	if (!ftrace_enabled)
4878		return;
4879
4880	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4881		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4882		return;
4883	}
4884
4885	/*
4886	 * If this is the shared global_ops filter, then we need to
4887	 * check if there is another ops that shares it, is enabled.
4888	 * If so, we still need to run the modify code.
4889	 */
4890	if (ops->func_hash != &global_ops.local_hash)
4891		return;
4892
4893	do_for_each_ftrace_op(op, ftrace_ops_list) {
4894		if (op->func_hash == &global_ops.local_hash &&
4895		    op->flags & FTRACE_OPS_FL_ENABLED) {
4896			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4897			/* Only need to do this once */
4898			return;
4899		}
4900	} while_for_each_ftrace_op(op);
4901}
4902
4903static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4904					   struct ftrace_hash **orig_hash,
4905					   struct ftrace_hash *hash,
4906					   int enable)
4907{
4908	if (ops->flags & FTRACE_OPS_FL_SUBOP)
4909		return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable);
 
4910
4911	/*
4912	 * If this ops is not enabled, it could be sharing its filters
4913	 * with a subop. If that's the case, update the subop instead of
4914	 * this ops. Shared filters are only allowed to have one ops set
4915	 * at a time, and if we update the ops that is not enabled,
4916	 * it will not affect subops that share it.
4917	 */
4918	if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) {
4919		struct ftrace_ops *op;
4920
4921		/* Check if any other manager subops maps to this hash */
4922		do_for_each_ftrace_op(op, ftrace_ops_list) {
4923			struct ftrace_ops *subops;
4924
4925			list_for_each_entry(subops, &op->subop_list, list) {
4926				if ((subops->flags & FTRACE_OPS_FL_ENABLED) &&
4927				     subops->func_hash == ops->func_hash) {
4928					return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable);
4929				}
4930			}
4931		} while_for_each_ftrace_op(op);
4932	}
4933
4934	return __ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4935}
4936
4937static bool module_exists(const char *module)
4938{
4939	/* All modules have the symbol __this_module */
4940	static const char this_mod[] = "__this_module";
4941	char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4942	unsigned long val;
4943	int n;
4944
4945	n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4946
4947	if (n > sizeof(modname) - 1)
4948		return false;
4949
4950	val = module_kallsyms_lookup_name(modname);
4951	return val != 0;
4952}
4953
4954static int cache_mod(struct trace_array *tr,
4955		     const char *func, char *module, int enable)
4956{
4957	struct ftrace_mod_load *ftrace_mod, *n;
4958	struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
 
4959
4960	guard(mutex)(&ftrace_lock);
4961
4962	/* We do not cache inverse filters */
4963	if (func[0] == '!') {
4964		int ret = -EINVAL;
4965
4966		func++;
 
4967
4968		/* Look to remove this hash */
4969		list_for_each_entry_safe(ftrace_mod, n, head, list) {
4970			if (strcmp(ftrace_mod->module, module) != 0)
4971				continue;
4972
4973			/* no func matches all */
4974			if (strcmp(func, "*") == 0 ||
4975			    (ftrace_mod->func &&
4976			     strcmp(ftrace_mod->func, func) == 0)) {
4977				ret = 0;
4978				free_ftrace_mod(ftrace_mod);
4979				continue;
4980			}
4981		}
4982		return ret;
4983	}
4984
 
4985	/* We only care about modules that have not been loaded yet */
4986	if (module_exists(module))
4987		return -EINVAL;
4988
4989	/* Save this string off, and execute it when the module is loaded */
4990	return ftrace_add_mod(tr, func, module, enable);
 
 
 
 
4991}
4992
4993static int
4994ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4995		 int reset, int enable);
4996
4997#ifdef CONFIG_MODULES
4998static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4999			     char *mod, bool enable)
5000{
5001	struct ftrace_mod_load *ftrace_mod, *n;
5002	struct ftrace_hash **orig_hash, *new_hash;
5003	LIST_HEAD(process_mods);
5004	char *func;
 
5005
5006	mutex_lock(&ops->func_hash->regex_lock);
5007
5008	if (enable)
5009		orig_hash = &ops->func_hash->filter_hash;
5010	else
5011		orig_hash = &ops->func_hash->notrace_hash;
5012
5013	new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
5014					      *orig_hash);
5015	if (!new_hash)
5016		goto out; /* warn? */
5017
5018	mutex_lock(&ftrace_lock);
5019
5020	list_for_each_entry_safe(ftrace_mod, n, head, list) {
5021
5022		if (strcmp(ftrace_mod->module, mod) != 0)
5023			continue;
5024
5025		if (ftrace_mod->func)
5026			func = kstrdup(ftrace_mod->func, GFP_KERNEL);
5027		else
5028			func = kstrdup("*", GFP_KERNEL);
5029
5030		if (!func) /* warn? */
5031			continue;
5032
5033		list_move(&ftrace_mod->list, &process_mods);
 
5034
5035		/* Use the newly allocated func, as it may be "*" */
5036		kfree(ftrace_mod->func);
5037		ftrace_mod->func = func;
5038	}
5039
5040	mutex_unlock(&ftrace_lock);
5041
5042	list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
5043
5044		func = ftrace_mod->func;
5045
5046		/* Grabs ftrace_lock, which is why we have this extra step */
5047		match_records(new_hash, func, strlen(func), mod);
5048		free_ftrace_mod(ftrace_mod);
5049	}
5050
5051	if (enable && list_empty(head))
5052		new_hash->flags &= ~FTRACE_HASH_FL_MOD;
5053
5054	mutex_lock(&ftrace_lock);
5055
5056	ftrace_hash_move_and_update_ops(ops, orig_hash,
5057					      new_hash, enable);
5058	mutex_unlock(&ftrace_lock);
5059
5060 out:
5061	mutex_unlock(&ops->func_hash->regex_lock);
5062
5063	free_ftrace_hash(new_hash);
5064}
5065
5066static void process_cached_mods(const char *mod_name)
5067{
5068	struct trace_array *tr;
5069	char *mod;
5070
5071	mod = kstrdup(mod_name, GFP_KERNEL);
5072	if (!mod)
5073		return;
5074
5075	mutex_lock(&trace_types_lock);
5076	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5077		if (!list_empty(&tr->mod_trace))
5078			process_mod_list(&tr->mod_trace, tr->ops, mod, true);
5079		if (!list_empty(&tr->mod_notrace))
5080			process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
5081	}
5082	mutex_unlock(&trace_types_lock);
5083
5084	kfree(mod);
5085}
5086#endif
5087
5088/*
5089 * We register the module command as a template to show others how
5090 * to register the a command as well.
5091 */
5092
5093static int
5094ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
5095		    char *func_orig, char *cmd, char *module, int enable)
5096{
5097	char *func;
5098	int ret;
5099
5100	if (!tr)
5101		return -ENODEV;
5102
5103	/* match_records() modifies func, and we need the original */
5104	func = kstrdup(func_orig, GFP_KERNEL);
5105	if (!func)
5106		return -ENOMEM;
5107
5108	/*
5109	 * cmd == 'mod' because we only registered this func
5110	 * for the 'mod' ftrace_func_command.
5111	 * But if you register one func with multiple commands,
5112	 * you can tell which command was used by the cmd
5113	 * parameter.
5114	 */
5115	ret = match_records(hash, func, strlen(func), module);
5116	kfree(func);
5117
5118	if (!ret)
5119		return cache_mod(tr, func_orig, module, enable);
5120	if (ret < 0)
5121		return ret;
5122	return 0;
5123}
5124
5125static struct ftrace_func_command ftrace_mod_cmd = {
5126	.name			= "mod",
5127	.func			= ftrace_mod_callback,
5128};
5129
5130static int __init ftrace_mod_cmd_init(void)
5131{
5132	return register_ftrace_command(&ftrace_mod_cmd);
5133}
5134core_initcall(ftrace_mod_cmd_init);
5135
5136static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
5137				      struct ftrace_ops *op, struct ftrace_regs *fregs)
5138{
5139	struct ftrace_probe_ops *probe_ops;
5140	struct ftrace_func_probe *probe;
5141
5142	probe = container_of(op, struct ftrace_func_probe, ops);
5143	probe_ops = probe->probe_ops;
5144
5145	/*
5146	 * Disable preemption for these calls to prevent a RCU grace
5147	 * period. This syncs the hash iteration and freeing of items
5148	 * on the hash. rcu_read_lock is too dangerous here.
5149	 */
5150	preempt_disable_notrace();
5151	probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
5152	preempt_enable_notrace();
5153}
5154
5155struct ftrace_func_map {
5156	struct ftrace_func_entry	entry;
5157	void				*data;
5158};
5159
5160struct ftrace_func_mapper {
5161	struct ftrace_hash		hash;
5162};
5163
5164/**
5165 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
5166 *
5167 * Returns: a ftrace_func_mapper descriptor that can be used to map ips to data.
5168 */
5169struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
5170{
5171	struct ftrace_hash *hash;
5172
5173	/*
5174	 * The mapper is simply a ftrace_hash, but since the entries
5175	 * in the hash are not ftrace_func_entry type, we define it
5176	 * as a separate structure.
5177	 */
5178	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5179	return (struct ftrace_func_mapper *)hash;
5180}
5181
5182/**
5183 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
5184 * @mapper: The mapper that has the ip maps
5185 * @ip: the instruction pointer to find the data for
5186 *
5187 * Returns: the data mapped to @ip if found otherwise NULL. The return
5188 * is actually the address of the mapper data pointer. The address is
5189 * returned for use cases where the data is no bigger than a long, and
5190 * the user can use the data pointer as its data instead of having to
5191 * allocate more memory for the reference.
5192 */
5193void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
5194				  unsigned long ip)
5195{
5196	struct ftrace_func_entry *entry;
5197	struct ftrace_func_map *map;
5198
5199	entry = ftrace_lookup_ip(&mapper->hash, ip);
5200	if (!entry)
5201		return NULL;
5202
5203	map = (struct ftrace_func_map *)entry;
5204	return &map->data;
5205}
5206
5207/**
5208 * ftrace_func_mapper_add_ip - Map some data to an ip
5209 * @mapper: The mapper that has the ip maps
5210 * @ip: The instruction pointer address to map @data to
5211 * @data: The data to map to @ip
5212 *
5213 * Returns: 0 on success otherwise an error.
5214 */
5215int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
5216			      unsigned long ip, void *data)
5217{
5218	struct ftrace_func_entry *entry;
5219	struct ftrace_func_map *map;
5220
5221	entry = ftrace_lookup_ip(&mapper->hash, ip);
5222	if (entry)
5223		return -EBUSY;
5224
5225	map = kmalloc(sizeof(*map), GFP_KERNEL);
5226	if (!map)
5227		return -ENOMEM;
5228
5229	map->entry.ip = ip;
5230	map->data = data;
5231
5232	__add_hash_entry(&mapper->hash, &map->entry);
5233
5234	return 0;
5235}
5236
5237/**
5238 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
5239 * @mapper: The mapper that has the ip maps
5240 * @ip: The instruction pointer address to remove the data from
5241 *
5242 * Returns: the data if it is found, otherwise NULL.
5243 * Note, if the data pointer is used as the data itself, (see
5244 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
5245 * if the data pointer was set to zero.
5246 */
5247void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
5248				   unsigned long ip)
5249{
5250	struct ftrace_func_entry *entry;
5251	struct ftrace_func_map *map;
5252	void *data;
5253
5254	entry = ftrace_lookup_ip(&mapper->hash, ip);
5255	if (!entry)
5256		return NULL;
5257
5258	map = (struct ftrace_func_map *)entry;
5259	data = map->data;
5260
5261	remove_hash_entry(&mapper->hash, entry);
5262	kfree(entry);
5263
5264	return data;
5265}
5266
5267/**
5268 * free_ftrace_func_mapper - free a mapping of ips and data
5269 * @mapper: The mapper that has the ip maps
5270 * @free_func: A function to be called on each data item.
5271 *
5272 * This is used to free the function mapper. The @free_func is optional
5273 * and can be used if the data needs to be freed as well.
5274 */
5275void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
5276			     ftrace_mapper_func free_func)
5277{
5278	struct ftrace_func_entry *entry;
5279	struct ftrace_func_map *map;
5280	struct hlist_head *hhd;
5281	int size, i;
5282
5283	if (!mapper)
5284		return;
5285
5286	if (free_func && mapper->hash.count) {
5287		size = 1 << mapper->hash.size_bits;
5288		for (i = 0; i < size; i++) {
5289			hhd = &mapper->hash.buckets[i];
5290			hlist_for_each_entry(entry, hhd, hlist) {
5291				map = (struct ftrace_func_map *)entry;
5292				free_func(map);
5293			}
5294		}
5295	}
5296	free_ftrace_hash(&mapper->hash);
5297}
5298
5299static void release_probe(struct ftrace_func_probe *probe)
5300{
5301	struct ftrace_probe_ops *probe_ops;
5302
5303	guard(mutex)(&ftrace_lock);
5304
5305	WARN_ON(probe->ref <= 0);
5306
5307	/* Subtract the ref that was used to protect this instance */
5308	probe->ref--;
5309
5310	if (!probe->ref) {
5311		probe_ops = probe->probe_ops;
5312		/*
5313		 * Sending zero as ip tells probe_ops to free
5314		 * the probe->data itself
5315		 */
5316		if (probe_ops->free)
5317			probe_ops->free(probe_ops, probe->tr, 0, probe->data);
5318		list_del(&probe->list);
5319		kfree(probe);
5320	}
 
5321}
5322
5323static void acquire_probe_locked(struct ftrace_func_probe *probe)
5324{
5325	/*
5326	 * Add one ref to keep it from being freed when releasing the
5327	 * ftrace_lock mutex.
5328	 */
5329	probe->ref++;
5330}
5331
5332int
5333register_ftrace_function_probe(char *glob, struct trace_array *tr,
5334			       struct ftrace_probe_ops *probe_ops,
5335			       void *data)
5336{
5337	struct ftrace_func_probe *probe = NULL, *iter;
5338	struct ftrace_func_entry *entry;
 
5339	struct ftrace_hash **orig_hash;
5340	struct ftrace_hash *old_hash;
5341	struct ftrace_hash *hash;
5342	int count = 0;
5343	int size;
5344	int ret;
5345	int i;
5346
5347	if (WARN_ON(!tr))
5348		return -EINVAL;
5349
5350	/* We do not support '!' for function probes */
5351	if (WARN_ON(glob[0] == '!'))
5352		return -EINVAL;
5353
5354
5355	mutex_lock(&ftrace_lock);
5356	/* Check if the probe_ops is already registered */
5357	list_for_each_entry(iter, &tr->func_probes, list) {
5358		if (iter->probe_ops == probe_ops) {
5359			probe = iter;
5360			break;
5361		}
5362	}
5363	if (!probe) {
5364		probe = kzalloc(sizeof(*probe), GFP_KERNEL);
5365		if (!probe) {
5366			mutex_unlock(&ftrace_lock);
5367			return -ENOMEM;
5368		}
5369		probe->probe_ops = probe_ops;
5370		probe->ops.func = function_trace_probe_call;
5371		probe->tr = tr;
5372		ftrace_ops_init(&probe->ops);
5373		list_add(&probe->list, &tr->func_probes);
5374	}
5375
5376	acquire_probe_locked(probe);
5377
5378	mutex_unlock(&ftrace_lock);
5379
5380	/*
5381	 * Note, there's a small window here that the func_hash->filter_hash
5382	 * may be NULL or empty. Need to be careful when reading the loop.
5383	 */
5384	mutex_lock(&probe->ops.func_hash->regex_lock);
5385
5386	orig_hash = &probe->ops.func_hash->filter_hash;
5387	old_hash = *orig_hash;
5388	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5389
5390	if (!hash) {
5391		ret = -ENOMEM;
5392		goto out;
5393	}
5394
5395	ret = ftrace_match_records(hash, glob, strlen(glob));
5396
5397	/* Nothing found? */
5398	if (!ret)
5399		ret = -EINVAL;
5400
5401	if (ret < 0)
5402		goto out;
5403
5404	size = 1 << hash->size_bits;
5405	for (i = 0; i < size; i++) {
5406		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5407			if (ftrace_lookup_ip(old_hash, entry->ip))
5408				continue;
5409			/*
5410			 * The caller might want to do something special
5411			 * for each function we find. We call the callback
5412			 * to give the caller an opportunity to do so.
5413			 */
5414			if (probe_ops->init) {
5415				ret = probe_ops->init(probe_ops, tr,
5416						      entry->ip, data,
5417						      &probe->data);
5418				if (ret < 0) {
5419					if (probe_ops->free && count)
5420						probe_ops->free(probe_ops, tr,
5421								0, probe->data);
5422					probe->data = NULL;
5423					goto out;
5424				}
5425			}
5426			count++;
5427		}
5428	}
5429
5430	mutex_lock(&ftrace_lock);
5431
5432	if (!count) {
5433		/* Nothing was added? */
5434		ret = -EINVAL;
5435		goto out_unlock;
5436	}
5437
5438	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
5439					      hash, 1);
5440	if (ret < 0)
5441		goto err_unlock;
5442
5443	/* One ref for each new function traced */
5444	probe->ref += count;
5445
5446	if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
5447		ret = ftrace_startup(&probe->ops, 0);
5448
5449 out_unlock:
5450	mutex_unlock(&ftrace_lock);
5451
5452	if (!ret)
5453		ret = count;
5454 out:
5455	mutex_unlock(&probe->ops.func_hash->regex_lock);
5456	free_ftrace_hash(hash);
5457
5458	release_probe(probe);
5459
5460	return ret;
5461
5462 err_unlock:
5463	if (!probe_ops->free || !count)
5464		goto out_unlock;
5465
5466	/* Failed to do the move, need to call the free functions */
5467	for (i = 0; i < size; i++) {
5468		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5469			if (ftrace_lookup_ip(old_hash, entry->ip))
5470				continue;
5471			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5472		}
5473	}
5474	goto out_unlock;
5475}
5476
5477int
5478unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
5479				      struct ftrace_probe_ops *probe_ops)
5480{
5481	struct ftrace_func_probe *probe = NULL, *iter;
5482	struct ftrace_ops_hash old_hash_ops;
5483	struct ftrace_func_entry *entry;
 
5484	struct ftrace_glob func_g;
5485	struct ftrace_hash **orig_hash;
5486	struct ftrace_hash *old_hash;
5487	struct ftrace_hash *hash = NULL;
5488	struct hlist_node *tmp;
5489	struct hlist_head hhd;
5490	char str[KSYM_SYMBOL_LEN];
5491	int count = 0;
5492	int i, ret = -ENODEV;
5493	int size;
5494
5495	if (!glob || !strlen(glob) || !strcmp(glob, "*"))
5496		func_g.search = NULL;
5497	else {
5498		int not;
5499
5500		func_g.type = filter_parse_regex(glob, strlen(glob),
5501						 &func_g.search, &not);
5502		func_g.len = strlen(func_g.search);
5503
5504		/* we do not support '!' for function probes */
5505		if (WARN_ON(not))
5506			return -EINVAL;
5507	}
5508
5509	mutex_lock(&ftrace_lock);
5510	/* Check if the probe_ops is already registered */
5511	list_for_each_entry(iter, &tr->func_probes, list) {
5512		if (iter->probe_ops == probe_ops) {
5513			probe = iter;
5514			break;
5515		}
5516	}
5517	if (!probe)
5518		goto err_unlock_ftrace;
5519
5520	ret = -EINVAL;
5521	if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
5522		goto err_unlock_ftrace;
5523
5524	acquire_probe_locked(probe);
5525
5526	mutex_unlock(&ftrace_lock);
5527
5528	mutex_lock(&probe->ops.func_hash->regex_lock);
5529
5530	orig_hash = &probe->ops.func_hash->filter_hash;
5531	old_hash = *orig_hash;
5532
5533	if (ftrace_hash_empty(old_hash))
5534		goto out_unlock;
5535
5536	old_hash_ops.filter_hash = old_hash;
5537	/* Probes only have filters */
5538	old_hash_ops.notrace_hash = NULL;
5539
5540	ret = -ENOMEM;
5541	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
5542	if (!hash)
5543		goto out_unlock;
5544
5545	INIT_HLIST_HEAD(&hhd);
5546
5547	size = 1 << hash->size_bits;
5548	for (i = 0; i < size; i++) {
5549		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
5550
5551			if (func_g.search) {
5552				kallsyms_lookup(entry->ip, NULL, NULL,
5553						NULL, str);
5554				if (!ftrace_match(str, &func_g))
5555					continue;
5556			}
5557			count++;
5558			remove_hash_entry(hash, entry);
5559			hlist_add_head(&entry->hlist, &hhd);
5560		}
5561	}
5562
5563	/* Nothing found? */
5564	if (!count) {
5565		ret = -EINVAL;
5566		goto out_unlock;
5567	}
5568
5569	mutex_lock(&ftrace_lock);
5570
5571	WARN_ON(probe->ref < count);
5572
5573	probe->ref -= count;
5574
5575	if (ftrace_hash_empty(hash))
5576		ftrace_shutdown(&probe->ops, 0);
5577
5578	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
5579					      hash, 1);
5580
5581	/* still need to update the function call sites */
5582	if (ftrace_enabled && !ftrace_hash_empty(hash))
5583		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
5584				       &old_hash_ops);
5585	synchronize_rcu();
5586
5587	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
5588		hlist_del(&entry->hlist);
5589		if (probe_ops->free)
5590			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
5591		kfree(entry);
5592	}
5593	mutex_unlock(&ftrace_lock);
5594
5595 out_unlock:
5596	mutex_unlock(&probe->ops.func_hash->regex_lock);
5597	free_ftrace_hash(hash);
5598
5599	release_probe(probe);
5600
5601	return ret;
5602
5603 err_unlock_ftrace:
5604	mutex_unlock(&ftrace_lock);
5605	return ret;
5606}
5607
5608void clear_ftrace_function_probes(struct trace_array *tr)
5609{
5610	struct ftrace_func_probe *probe, *n;
5611
5612	list_for_each_entry_safe(probe, n, &tr->func_probes, list)
5613		unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
5614}
5615
5616static LIST_HEAD(ftrace_commands);
5617static DEFINE_MUTEX(ftrace_cmd_mutex);
5618
5619/*
5620 * Currently we only register ftrace commands from __init, so mark this
5621 * __init too.
5622 */
5623__init int register_ftrace_command(struct ftrace_func_command *cmd)
5624{
5625	struct ftrace_func_command *p;
5626	int ret = 0;
5627
5628	mutex_lock(&ftrace_cmd_mutex);
5629	list_for_each_entry(p, &ftrace_commands, list) {
5630		if (strcmp(cmd->name, p->name) == 0) {
5631			ret = -EBUSY;
5632			goto out_unlock;
5633		}
5634	}
5635	list_add(&cmd->list, &ftrace_commands);
5636 out_unlock:
5637	mutex_unlock(&ftrace_cmd_mutex);
5638
5639	return ret;
5640}
5641
5642/*
5643 * Currently we only unregister ftrace commands from __init, so mark
5644 * this __init too.
5645 */
5646__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
5647{
5648	struct ftrace_func_command *p, *n;
5649	int ret = -ENODEV;
5650
5651	mutex_lock(&ftrace_cmd_mutex);
5652	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
5653		if (strcmp(cmd->name, p->name) == 0) {
5654			ret = 0;
5655			list_del_init(&p->list);
5656			goto out_unlock;
5657		}
5658	}
5659 out_unlock:
5660	mutex_unlock(&ftrace_cmd_mutex);
5661
5662	return ret;
5663}
5664
5665static int ftrace_process_regex(struct ftrace_iterator *iter,
5666				char *buff, int len, int enable)
5667{
5668	struct ftrace_hash *hash = iter->hash;
5669	struct trace_array *tr = iter->ops->private;
5670	char *func, *command, *next = buff;
5671	struct ftrace_func_command *p;
5672	int ret = -EINVAL;
5673
5674	func = strsep(&next, ":");
5675
5676	if (!next) {
5677		ret = ftrace_match_records(hash, func, len);
5678		if (!ret)
5679			ret = -EINVAL;
5680		if (ret < 0)
5681			return ret;
5682		return 0;
5683	}
5684
5685	/* command found */
5686
5687	command = strsep(&next, ":");
5688
5689	mutex_lock(&ftrace_cmd_mutex);
5690	list_for_each_entry(p, &ftrace_commands, list) {
5691		if (strcmp(p->name, command) == 0) {
5692			ret = p->func(tr, hash, func, command, next, enable);
5693			goto out_unlock;
5694		}
5695	}
5696 out_unlock:
5697	mutex_unlock(&ftrace_cmd_mutex);
5698
5699	return ret;
5700}
5701
5702static ssize_t
5703ftrace_regex_write(struct file *file, const char __user *ubuf,
5704		   size_t cnt, loff_t *ppos, int enable)
5705{
5706	struct ftrace_iterator *iter;
5707	struct trace_parser *parser;
5708	ssize_t ret, read;
5709
5710	if (!cnt)
5711		return 0;
5712
5713	if (file->f_mode & FMODE_READ) {
5714		struct seq_file *m = file->private_data;
5715		iter = m->private;
5716	} else
5717		iter = file->private_data;
5718
5719	if (unlikely(ftrace_disabled))
5720		return -ENODEV;
5721
5722	/* iter->hash is a local copy, so we don't need regex_lock */
5723
5724	parser = &iter->parser;
5725	read = trace_get_user(parser, ubuf, cnt, ppos);
5726
5727	if (read >= 0 && trace_parser_loaded(parser) &&
5728	    !trace_parser_cont(parser)) {
5729		ret = ftrace_process_regex(iter, parser->buffer,
5730					   parser->idx, enable);
5731		trace_parser_clear(parser);
5732		if (ret < 0)
5733			goto out;
5734	}
5735
5736	ret = read;
5737 out:
5738	return ret;
5739}
5740
5741ssize_t
5742ftrace_filter_write(struct file *file, const char __user *ubuf,
5743		    size_t cnt, loff_t *ppos)
5744{
5745	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
5746}
5747
5748ssize_t
5749ftrace_notrace_write(struct file *file, const char __user *ubuf,
5750		     size_t cnt, loff_t *ppos)
5751{
5752	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
5753}
5754
5755static int
5756__ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
5757{
5758	struct ftrace_func_entry *entry;
5759
5760	ip = ftrace_location(ip);
5761	if (!ip)
5762		return -EINVAL;
5763
5764	if (remove) {
5765		entry = ftrace_lookup_ip(hash, ip);
5766		if (!entry)
5767			return -ENOENT;
5768		free_hash_entry(hash, entry);
5769		return 0;
5770	} else if (__ftrace_lookup_ip(hash, ip) != NULL) {
5771		/* Already exists */
5772		return 0;
5773	}
5774
5775	entry = add_hash_entry(hash, ip);
5776	return entry ? 0 :  -ENOMEM;
5777}
5778
5779static int
5780ftrace_match_addr(struct ftrace_hash *hash, unsigned long *ips,
5781		  unsigned int cnt, int remove)
5782{
5783	unsigned int i;
5784	int err;
5785
5786	for (i = 0; i < cnt; i++) {
5787		err = __ftrace_match_addr(hash, ips[i], remove);
5788		if (err) {
5789			/*
5790			 * This expects the @hash is a temporary hash and if this
5791			 * fails the caller must free the @hash.
5792			 */
5793			return err;
5794		}
5795	}
5796	return 0;
5797}
5798
5799static int
5800ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
5801		unsigned long *ips, unsigned int cnt,
5802		int remove, int reset, int enable)
5803{
5804	struct ftrace_hash **orig_hash;
5805	struct ftrace_hash *hash;
5806	int ret;
5807
5808	if (unlikely(ftrace_disabled))
5809		return -ENODEV;
5810
5811	mutex_lock(&ops->func_hash->regex_lock);
5812
5813	if (enable)
5814		orig_hash = &ops->func_hash->filter_hash;
5815	else
5816		orig_hash = &ops->func_hash->notrace_hash;
5817
5818	if (reset)
5819		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5820	else
5821		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5822
5823	if (!hash) {
5824		ret = -ENOMEM;
5825		goto out_regex_unlock;
5826	}
5827
5828	if (buf && !ftrace_match_records(hash, buf, len)) {
5829		ret = -EINVAL;
5830		goto out_regex_unlock;
5831	}
5832	if (ips) {
5833		ret = ftrace_match_addr(hash, ips, cnt, remove);
5834		if (ret < 0)
5835			goto out_regex_unlock;
5836	}
5837
5838	mutex_lock(&ftrace_lock);
5839	ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5840	mutex_unlock(&ftrace_lock);
5841
5842 out_regex_unlock:
5843	mutex_unlock(&ops->func_hash->regex_lock);
5844
5845	free_ftrace_hash(hash);
5846	return ret;
5847}
5848
5849static int
5850ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
5851		int remove, int reset, int enable)
5852{
5853	return ftrace_set_hash(ops, NULL, 0, ips, cnt, remove, reset, enable);
5854}
5855
5856#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5857
5858static int register_ftrace_function_nolock(struct ftrace_ops *ops);
5859
5860/*
5861 * If there are multiple ftrace_ops, use SAVE_REGS by default, so that direct
5862 * call will be jumped from ftrace_regs_caller. Only if the architecture does
5863 * not support ftrace_regs_caller but direct_call, use SAVE_ARGS so that it
5864 * jumps from ftrace_caller for multiple ftrace_ops.
5865 */
5866#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
5867#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_ARGS)
5868#else
5869#define MULTI_FLAGS (FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS)
5870#endif
5871
5872static int check_direct_multi(struct ftrace_ops *ops)
5873{
5874	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5875		return -EINVAL;
5876	if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5877		return -EINVAL;
5878	return 0;
5879}
5880
5881static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5882{
5883	struct ftrace_func_entry *entry, *del;
5884	int size, i;
5885
5886	size = 1 << hash->size_bits;
5887	for (i = 0; i < size; i++) {
5888		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5889			del = __ftrace_lookup_ip(direct_functions, entry->ip);
5890			if (del && del->direct == addr) {
5891				remove_hash_entry(direct_functions, del);
5892				kfree(del);
5893			}
5894		}
5895	}
5896}
5897
5898static void register_ftrace_direct_cb(struct rcu_head *rhp)
5899{
5900	struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu);
5901
5902	free_ftrace_hash(fhp);
5903}
5904
5905/**
5906 * register_ftrace_direct - Call a custom trampoline directly
5907 * for multiple functions registered in @ops
5908 * @ops: The address of the struct ftrace_ops object
5909 * @addr: The address of the trampoline to call at @ops functions
5910 *
5911 * This is used to connect a direct calls to @addr from the nop locations
5912 * of the functions registered in @ops (with by ftrace_set_filter_ip
5913 * function).
5914 *
5915 * The location that it calls (@addr) must be able to handle a direct call,
5916 * and save the parameters of the function being traced, and restore them
5917 * (or inject new ones if needed), before returning.
5918 *
5919 * Returns:
5920 *  0 on success
5921 *  -EINVAL  - The @ops object was already registered with this call or
5922 *             when there are no functions in @ops object.
5923 *  -EBUSY   - Another direct function is already attached (there can be only one)
5924 *  -ENODEV  - @ip does not point to a ftrace nop location (or not supported)
5925 *  -ENOMEM  - There was an allocation failure.
5926 */
5927int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
5928{
5929	struct ftrace_hash *hash, *new_hash = NULL, *free_hash = NULL;
5930	struct ftrace_func_entry *entry, *new;
5931	int err = -EBUSY, size, i;
5932
5933	if (ops->func || ops->trampoline)
5934		return -EINVAL;
5935	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5936		return -EINVAL;
5937	if (ops->flags & FTRACE_OPS_FL_ENABLED)
5938		return -EINVAL;
5939
5940	hash = ops->func_hash->filter_hash;
5941	if (ftrace_hash_empty(hash))
5942		return -EINVAL;
5943
5944	mutex_lock(&direct_mutex);
5945
5946	/* Make sure requested entries are not already registered.. */
5947	size = 1 << hash->size_bits;
5948	for (i = 0; i < size; i++) {
5949		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5950			if (ftrace_find_rec_direct(entry->ip))
5951				goto out_unlock;
5952		}
5953	}
5954
5955	err = -ENOMEM;
5956
5957	/* Make a copy hash to place the new and the old entries in */
5958	size = hash->count + direct_functions->count;
5959	if (size > 32)
5960		size = 32;
5961	new_hash = alloc_ftrace_hash(fls(size));
5962	if (!new_hash)
5963		goto out_unlock;
5964
5965	/* Now copy over the existing direct entries */
5966	size = 1 << direct_functions->size_bits;
5967	for (i = 0; i < size; i++) {
5968		hlist_for_each_entry(entry, &direct_functions->buckets[i], hlist) {
5969			new = add_hash_entry(new_hash, entry->ip);
5970			if (!new)
5971				goto out_unlock;
5972			new->direct = entry->direct;
5973		}
5974	}
5975
5976	/* ... and add the new entries */
5977	size = 1 << hash->size_bits;
5978	for (i = 0; i < size; i++) {
5979		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5980			new = add_hash_entry(new_hash, entry->ip);
5981			if (!new)
5982				goto out_unlock;
5983			/* Update both the copy and the hash entry */
5984			new->direct = addr;
5985			entry->direct = addr;
5986		}
5987	}
5988
5989	free_hash = direct_functions;
5990	rcu_assign_pointer(direct_functions, new_hash);
5991	new_hash = NULL;
5992
5993	ops->func = call_direct_funcs;
5994	ops->flags = MULTI_FLAGS;
5995	ops->trampoline = FTRACE_REGS_ADDR;
5996	ops->direct_call = addr;
5997
5998	err = register_ftrace_function_nolock(ops);
5999
6000 out_unlock:
6001	mutex_unlock(&direct_mutex);
6002
6003	if (free_hash && free_hash != EMPTY_HASH)
6004		call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb);
6005
6006	if (new_hash)
6007		free_ftrace_hash(new_hash);
6008
6009	return err;
6010}
6011EXPORT_SYMBOL_GPL(register_ftrace_direct);
6012
6013/**
6014 * unregister_ftrace_direct - Remove calls to custom trampoline
6015 * previously registered by register_ftrace_direct for @ops object.
6016 * @ops: The address of the struct ftrace_ops object
6017 * @addr: The address of the direct function that is called by the @ops functions
6018 * @free_filters: Set to true to remove all filters for the ftrace_ops, false otherwise
6019 *
6020 * This is used to remove a direct calls to @addr from the nop locations
6021 * of the functions registered in @ops (with by ftrace_set_filter_ip
6022 * function).
6023 *
6024 * Returns:
6025 *  0 on success
6026 *  -EINVAL - The @ops object was not properly registered.
6027 */
6028int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
6029			     bool free_filters)
6030{
6031	struct ftrace_hash *hash = ops->func_hash->filter_hash;
6032	int err;
6033
6034	if (check_direct_multi(ops))
6035		return -EINVAL;
6036	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6037		return -EINVAL;
6038
6039	mutex_lock(&direct_mutex);
6040	err = unregister_ftrace_function(ops);
6041	remove_direct_functions_hash(hash, addr);
6042	mutex_unlock(&direct_mutex);
6043
6044	/* cleanup for possible another register call */
6045	ops->func = NULL;
6046	ops->trampoline = 0;
6047
6048	if (free_filters)
6049		ftrace_free_filter(ops);
6050	return err;
6051}
6052EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
6053
6054static int
6055__modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
6056{
6057	struct ftrace_hash *hash;
6058	struct ftrace_func_entry *entry, *iter;
6059	static struct ftrace_ops tmp_ops = {
6060		.func		= ftrace_stub,
6061		.flags		= FTRACE_OPS_FL_STUB,
6062	};
6063	int i, size;
6064	int err;
6065
6066	lockdep_assert_held_once(&direct_mutex);
6067
6068	/* Enable the tmp_ops to have the same functions as the direct ops */
6069	ftrace_ops_init(&tmp_ops);
6070	tmp_ops.func_hash = ops->func_hash;
6071	tmp_ops.direct_call = addr;
6072
6073	err = register_ftrace_function_nolock(&tmp_ops);
6074	if (err)
6075		return err;
6076
6077	/*
6078	 * Now the ftrace_ops_list_func() is called to do the direct callers.
6079	 * We can safely change the direct functions attached to each entry.
6080	 */
6081	mutex_lock(&ftrace_lock);
6082
6083	hash = ops->func_hash->filter_hash;
6084	size = 1 << hash->size_bits;
6085	for (i = 0; i < size; i++) {
6086		hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
6087			entry = __ftrace_lookup_ip(direct_functions, iter->ip);
6088			if (!entry)
6089				continue;
6090			entry->direct = addr;
6091		}
6092	}
6093	/* Prevent store tearing if a trampoline concurrently accesses the value */
6094	WRITE_ONCE(ops->direct_call, addr);
6095
6096	mutex_unlock(&ftrace_lock);
6097
6098	/* Removing the tmp_ops will add the updated direct callers to the functions */
6099	unregister_ftrace_function(&tmp_ops);
6100
6101	return err;
6102}
6103
6104/**
6105 * modify_ftrace_direct_nolock - Modify an existing direct 'multi' call
6106 * to call something else
6107 * @ops: The address of the struct ftrace_ops object
6108 * @addr: The address of the new trampoline to call at @ops functions
6109 *
6110 * This is used to unregister currently registered direct caller and
6111 * register new one @addr on functions registered in @ops object.
6112 *
6113 * Note there's window between ftrace_shutdown and ftrace_startup calls
6114 * where there will be no callbacks called.
6115 *
6116 * Caller should already have direct_mutex locked, so we don't lock
6117 * direct_mutex here.
6118 *
6119 * Returns: zero on success. Non zero on error, which includes:
6120 *  -EINVAL - The @ops object was not properly registered.
6121 */
6122int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
6123{
6124	if (check_direct_multi(ops))
6125		return -EINVAL;
6126	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6127		return -EINVAL;
6128
6129	return __modify_ftrace_direct(ops, addr);
6130}
6131EXPORT_SYMBOL_GPL(modify_ftrace_direct_nolock);
6132
6133/**
6134 * modify_ftrace_direct - Modify an existing direct 'multi' call
6135 * to call something else
6136 * @ops: The address of the struct ftrace_ops object
6137 * @addr: The address of the new trampoline to call at @ops functions
6138 *
6139 * This is used to unregister currently registered direct caller and
6140 * register new one @addr on functions registered in @ops object.
6141 *
6142 * Note there's window between ftrace_shutdown and ftrace_startup calls
6143 * where there will be no callbacks called.
6144 *
6145 * Returns: zero on success. Non zero on error, which includes:
6146 *  -EINVAL - The @ops object was not properly registered.
6147 */
6148int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
6149{
6150	int err;
6151
6152	if (check_direct_multi(ops))
6153		return -EINVAL;
6154	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
6155		return -EINVAL;
6156
6157	mutex_lock(&direct_mutex);
6158	err = __modify_ftrace_direct(ops, addr);
6159	mutex_unlock(&direct_mutex);
6160	return err;
6161}
6162EXPORT_SYMBOL_GPL(modify_ftrace_direct);
6163#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
6164
6165/**
6166 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
6167 * @ops: the ops to set the filter with
6168 * @ip: the address to add to or remove from the filter.
6169 * @remove: non zero to remove the ip from the filter
6170 * @reset: non zero to reset all filters before applying this filter.
6171 *
6172 * Filters denote which functions should be enabled when tracing is enabled
6173 * If @ip is NULL, it fails to update filter.
6174 *
6175 * This can allocate memory which must be freed before @ops can be freed,
6176 * either by removing each filtered addr or by using
6177 * ftrace_free_filter(@ops).
6178 */
6179int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
6180			 int remove, int reset)
6181{
6182	ftrace_ops_init(ops);
6183	return ftrace_set_addr(ops, &ip, 1, remove, reset, 1);
6184}
6185EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
6186
6187/**
6188 * ftrace_set_filter_ips - set functions to filter on in ftrace by addresses
6189 * @ops: the ops to set the filter with
6190 * @ips: the array of addresses to add to or remove from the filter.
6191 * @cnt: the number of addresses in @ips
6192 * @remove: non zero to remove ips from the filter
6193 * @reset: non zero to reset all filters before applying this filter.
6194 *
6195 * Filters denote which functions should be enabled when tracing is enabled
6196 * If @ips array or any ip specified within is NULL , it fails to update filter.
6197 *
6198 * This can allocate memory which must be freed before @ops can be freed,
6199 * either by removing each filtered addr or by using
6200 * ftrace_free_filter(@ops).
6201*/
6202int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
6203			  unsigned int cnt, int remove, int reset)
6204{
6205	ftrace_ops_init(ops);
6206	return ftrace_set_addr(ops, ips, cnt, remove, reset, 1);
6207}
6208EXPORT_SYMBOL_GPL(ftrace_set_filter_ips);
6209
6210/**
6211 * ftrace_ops_set_global_filter - setup ops to use global filters
6212 * @ops: the ops which will use the global filters
6213 *
6214 * ftrace users who need global function trace filtering should call this.
6215 * It can set the global filter only if ops were not initialized before.
6216 */
6217void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
6218{
6219	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
6220		return;
6221
6222	ftrace_ops_init(ops);
6223	ops->func_hash = &global_ops.local_hash;
6224}
6225EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
6226
6227static int
6228ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
6229		 int reset, int enable)
6230{
6231	return ftrace_set_hash(ops, buf, len, NULL, 0, 0, reset, enable);
6232}
6233
6234/**
6235 * ftrace_set_filter - set a function to filter on in ftrace
6236 * @ops: the ops to set the filter with
6237 * @buf: the string that holds the function filter text.
6238 * @len: the length of the string.
6239 * @reset: non-zero to reset all filters before applying this filter.
6240 *
6241 * Filters denote which functions should be enabled when tracing is enabled.
6242 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
6243 *
6244 * This can allocate memory which must be freed before @ops can be freed,
6245 * either by removing each filtered addr or by using
6246 * ftrace_free_filter(@ops).
6247 */
6248int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
6249		       int len, int reset)
6250{
6251	ftrace_ops_init(ops);
6252	return ftrace_set_regex(ops, buf, len, reset, 1);
6253}
6254EXPORT_SYMBOL_GPL(ftrace_set_filter);
6255
6256/**
6257 * ftrace_set_notrace - set a function to not trace in ftrace
6258 * @ops: the ops to set the notrace filter with
6259 * @buf: the string that holds the function notrace text.
6260 * @len: the length of the string.
6261 * @reset: non-zero to reset all filters before applying this filter.
6262 *
6263 * Notrace Filters denote which functions should not be enabled when tracing
6264 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
6265 * for tracing.
6266 *
6267 * This can allocate memory which must be freed before @ops can be freed,
6268 * either by removing each filtered addr or by using
6269 * ftrace_free_filter(@ops).
6270 */
6271int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
6272			int len, int reset)
6273{
6274	ftrace_ops_init(ops);
6275	return ftrace_set_regex(ops, buf, len, reset, 0);
6276}
6277EXPORT_SYMBOL_GPL(ftrace_set_notrace);
6278/**
6279 * ftrace_set_global_filter - set a function to filter on with global tracers
6280 * @buf: the string that holds the function filter text.
6281 * @len: the length of the string.
6282 * @reset: non-zero to reset all filters before applying this filter.
6283 *
6284 * Filters denote which functions should be enabled when tracing is enabled.
6285 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
6286 */
6287void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
6288{
6289	ftrace_set_regex(&global_ops, buf, len, reset, 1);
6290}
6291EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
6292
6293/**
6294 * ftrace_set_global_notrace - set a function to not trace with global tracers
6295 * @buf: the string that holds the function notrace text.
6296 * @len: the length of the string.
6297 * @reset: non-zero to reset all filters before applying this filter.
6298 *
6299 * Notrace Filters denote which functions should not be enabled when tracing
6300 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
6301 * for tracing.
6302 */
6303void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
6304{
6305	ftrace_set_regex(&global_ops, buf, len, reset, 0);
6306}
6307EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
6308
6309/*
6310 * command line interface to allow users to set filters on boot up.
6311 */
6312#define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
6313static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
6314static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
6315
6316/* Used by function selftest to not test if filter is set */
6317bool ftrace_filter_param __initdata;
6318
6319static int __init set_ftrace_notrace(char *str)
6320{
6321	ftrace_filter_param = true;
6322	strscpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
6323	return 1;
6324}
6325__setup("ftrace_notrace=", set_ftrace_notrace);
6326
6327static int __init set_ftrace_filter(char *str)
6328{
6329	ftrace_filter_param = true;
6330	strscpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
6331	return 1;
6332}
6333__setup("ftrace_filter=", set_ftrace_filter);
6334
6335#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6336static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
6337static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
6338static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
6339
6340static int __init set_graph_function(char *str)
6341{
6342	strscpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
6343	return 1;
6344}
6345__setup("ftrace_graph_filter=", set_graph_function);
6346
6347static int __init set_graph_notrace_function(char *str)
6348{
6349	strscpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
6350	return 1;
6351}
6352__setup("ftrace_graph_notrace=", set_graph_notrace_function);
6353
6354static int __init set_graph_max_depth_function(char *str)
6355{
6356	if (!str || kstrtouint(str, 0, &fgraph_max_depth))
6357		return 0;
 
6358	return 1;
6359}
6360__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
6361
6362static void __init set_ftrace_early_graph(char *buf, int enable)
6363{
6364	int ret;
6365	char *func;
6366	struct ftrace_hash *hash;
6367
6368	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
6369	if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
6370		return;
6371
6372	while (buf) {
6373		func = strsep(&buf, ",");
6374		/* we allow only one expression at a time */
6375		ret = ftrace_graph_set_hash(hash, func);
6376		if (ret)
6377			printk(KERN_DEBUG "ftrace: function %s not "
6378					  "traceable\n", func);
6379	}
6380
6381	if (enable)
6382		ftrace_graph_hash = hash;
6383	else
6384		ftrace_graph_notrace_hash = hash;
6385}
6386#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6387
6388void __init
6389ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
6390{
6391	char *func;
6392
6393	ftrace_ops_init(ops);
6394
6395	while (buf) {
6396		func = strsep(&buf, ",");
6397		ftrace_set_regex(ops, func, strlen(func), 0, enable);
6398	}
6399}
6400
6401static void __init set_ftrace_early_filters(void)
6402{
6403	if (ftrace_filter_buf[0])
6404		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
6405	if (ftrace_notrace_buf[0])
6406		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
6407#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6408	if (ftrace_graph_buf[0])
6409		set_ftrace_early_graph(ftrace_graph_buf, 1);
6410	if (ftrace_graph_notrace_buf[0])
6411		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
6412#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6413}
6414
6415int ftrace_regex_release(struct inode *inode, struct file *file)
6416{
6417	struct seq_file *m = (struct seq_file *)file->private_data;
6418	struct ftrace_iterator *iter;
6419	struct ftrace_hash **orig_hash;
6420	struct trace_parser *parser;
6421	int filter_hash;
 
6422
6423	if (file->f_mode & FMODE_READ) {
6424		iter = m->private;
6425		seq_release(inode, file);
6426	} else
6427		iter = file->private_data;
6428
6429	parser = &iter->parser;
6430	if (trace_parser_loaded(parser)) {
6431		int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
6432
6433		ftrace_process_regex(iter, parser->buffer,
6434				     parser->idx, enable);
6435	}
6436
6437	trace_parser_put(parser);
6438
6439	mutex_lock(&iter->ops->func_hash->regex_lock);
6440
6441	if (file->f_mode & FMODE_WRITE) {
6442		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
6443
6444		if (filter_hash) {
6445			orig_hash = &iter->ops->func_hash->filter_hash;
6446			if (iter->tr) {
6447				if (list_empty(&iter->tr->mod_trace))
6448					iter->hash->flags &= ~FTRACE_HASH_FL_MOD;
6449				else
6450					iter->hash->flags |= FTRACE_HASH_FL_MOD;
6451			}
6452		} else
6453			orig_hash = &iter->ops->func_hash->notrace_hash;
6454
6455		mutex_lock(&ftrace_lock);
6456		ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
6457						      iter->hash, filter_hash);
6458		mutex_unlock(&ftrace_lock);
6459	} else {
6460		/* For read only, the hash is the ops hash */
6461		iter->hash = NULL;
6462	}
6463
6464	mutex_unlock(&iter->ops->func_hash->regex_lock);
6465	free_ftrace_hash(iter->hash);
6466	if (iter->tr)
6467		trace_array_put(iter->tr);
6468	kfree(iter);
6469
6470	return 0;
6471}
6472
6473static const struct file_operations ftrace_avail_fops = {
6474	.open = ftrace_avail_open,
6475	.read = seq_read,
6476	.llseek = seq_lseek,
6477	.release = seq_release_private,
6478};
6479
6480static const struct file_operations ftrace_enabled_fops = {
6481	.open = ftrace_enabled_open,
6482	.read = seq_read,
6483	.llseek = seq_lseek,
6484	.release = seq_release_private,
6485};
6486
6487static const struct file_operations ftrace_touched_fops = {
6488	.open = ftrace_touched_open,
6489	.read = seq_read,
6490	.llseek = seq_lseek,
6491	.release = seq_release_private,
6492};
6493
6494static const struct file_operations ftrace_avail_addrs_fops = {
6495	.open = ftrace_avail_addrs_open,
6496	.read = seq_read,
6497	.llseek = seq_lseek,
6498	.release = seq_release_private,
6499};
6500
6501static const struct file_operations ftrace_filter_fops = {
6502	.open = ftrace_filter_open,
6503	.read = seq_read,
6504	.write = ftrace_filter_write,
6505	.llseek = tracing_lseek,
6506	.release = ftrace_regex_release,
6507};
6508
6509static const struct file_operations ftrace_notrace_fops = {
6510	.open = ftrace_notrace_open,
6511	.read = seq_read,
6512	.write = ftrace_notrace_write,
6513	.llseek = tracing_lseek,
6514	.release = ftrace_regex_release,
6515};
6516
6517#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6518
6519static DEFINE_MUTEX(graph_lock);
6520
6521struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
6522struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
6523
6524enum graph_filter_type {
6525	GRAPH_FILTER_NOTRACE	= 0,
6526	GRAPH_FILTER_FUNCTION,
6527};
6528
6529#define FTRACE_GRAPH_EMPTY	((void *)1)
6530
6531struct ftrace_graph_data {
6532	struct ftrace_hash		*hash;
6533	struct ftrace_func_entry	*entry;
6534	int				idx;   /* for hash table iteration */
6535	enum graph_filter_type		type;
6536	struct ftrace_hash		*new_hash;
6537	const struct seq_operations	*seq_ops;
6538	struct trace_parser		parser;
6539};
6540
6541static void *
6542__g_next(struct seq_file *m, loff_t *pos)
6543{
6544	struct ftrace_graph_data *fgd = m->private;
6545	struct ftrace_func_entry *entry = fgd->entry;
6546	struct hlist_head *head;
6547	int i, idx = fgd->idx;
6548
6549	if (*pos >= fgd->hash->count)
6550		return NULL;
6551
6552	if (entry) {
6553		hlist_for_each_entry_continue(entry, hlist) {
6554			fgd->entry = entry;
6555			return entry;
6556		}
6557
6558		idx++;
6559	}
6560
6561	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
6562		head = &fgd->hash->buckets[i];
6563		hlist_for_each_entry(entry, head, hlist) {
6564			fgd->entry = entry;
6565			fgd->idx = i;
6566			return entry;
6567		}
6568	}
6569	return NULL;
6570}
6571
6572static void *
6573g_next(struct seq_file *m, void *v, loff_t *pos)
6574{
6575	(*pos)++;
6576	return __g_next(m, pos);
6577}
6578
6579static void *g_start(struct seq_file *m, loff_t *pos)
6580{
6581	struct ftrace_graph_data *fgd = m->private;
6582
6583	mutex_lock(&graph_lock);
6584
6585	if (fgd->type == GRAPH_FILTER_FUNCTION)
6586		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6587					lockdep_is_held(&graph_lock));
6588	else
6589		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6590					lockdep_is_held(&graph_lock));
6591
6592	/* Nothing, tell g_show to print all functions are enabled */
6593	if (ftrace_hash_empty(fgd->hash) && !*pos)
6594		return FTRACE_GRAPH_EMPTY;
6595
6596	fgd->idx = 0;
6597	fgd->entry = NULL;
6598	return __g_next(m, pos);
6599}
6600
6601static void g_stop(struct seq_file *m, void *p)
6602{
6603	mutex_unlock(&graph_lock);
6604}
6605
6606static int g_show(struct seq_file *m, void *v)
6607{
6608	struct ftrace_func_entry *entry = v;
6609
6610	if (!entry)
6611		return 0;
6612
6613	if (entry == FTRACE_GRAPH_EMPTY) {
6614		struct ftrace_graph_data *fgd = m->private;
6615
6616		if (fgd->type == GRAPH_FILTER_FUNCTION)
6617			seq_puts(m, "#### all functions enabled ####\n");
6618		else
6619			seq_puts(m, "#### no functions disabled ####\n");
6620		return 0;
6621	}
6622
6623	seq_printf(m, "%ps\n", (void *)entry->ip);
6624
6625	return 0;
6626}
6627
6628static const struct seq_operations ftrace_graph_seq_ops = {
6629	.start = g_start,
6630	.next = g_next,
6631	.stop = g_stop,
6632	.show = g_show,
6633};
6634
6635static int
6636__ftrace_graph_open(struct inode *inode, struct file *file,
6637		    struct ftrace_graph_data *fgd)
6638{
6639	int ret;
6640	struct ftrace_hash *new_hash = NULL;
6641
6642	ret = security_locked_down(LOCKDOWN_TRACEFS);
6643	if (ret)
6644		return ret;
6645
6646	if (file->f_mode & FMODE_WRITE) {
6647		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6648
6649		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6650			return -ENOMEM;
6651
6652		if (file->f_flags & O_TRUNC)
6653			new_hash = alloc_ftrace_hash(size_bits);
6654		else
6655			new_hash = alloc_and_copy_ftrace_hash(size_bits,
6656							      fgd->hash);
6657		if (!new_hash) {
6658			ret = -ENOMEM;
6659			goto out;
6660		}
6661	}
6662
6663	if (file->f_mode & FMODE_READ) {
6664		ret = seq_open(file, &ftrace_graph_seq_ops);
6665		if (!ret) {
6666			struct seq_file *m = file->private_data;
6667			m->private = fgd;
6668		} else {
6669			/* Failed */
6670			free_ftrace_hash(new_hash);
6671			new_hash = NULL;
6672		}
6673	} else
6674		file->private_data = fgd;
6675
6676out:
6677	if (ret < 0 && file->f_mode & FMODE_WRITE)
6678		trace_parser_put(&fgd->parser);
6679
6680	fgd->new_hash = new_hash;
6681
6682	/*
6683	 * All uses of fgd->hash must be taken with the graph_lock
6684	 * held. The graph_lock is going to be released, so force
6685	 * fgd->hash to be reinitialized when it is taken again.
6686	 */
6687	fgd->hash = NULL;
6688
6689	return ret;
6690}
6691
6692static int
6693ftrace_graph_open(struct inode *inode, struct file *file)
6694{
6695	struct ftrace_graph_data *fgd;
6696	int ret;
6697
6698	if (unlikely(ftrace_disabled))
6699		return -ENODEV;
6700
6701	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6702	if (fgd == NULL)
6703		return -ENOMEM;
6704
6705	mutex_lock(&graph_lock);
6706
6707	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6708					lockdep_is_held(&graph_lock));
6709	fgd->type = GRAPH_FILTER_FUNCTION;
6710	fgd->seq_ops = &ftrace_graph_seq_ops;
6711
6712	ret = __ftrace_graph_open(inode, file, fgd);
6713	if (ret < 0)
6714		kfree(fgd);
6715
6716	mutex_unlock(&graph_lock);
6717	return ret;
6718}
6719
6720static int
6721ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6722{
6723	struct ftrace_graph_data *fgd;
6724	int ret;
6725
6726	if (unlikely(ftrace_disabled))
6727		return -ENODEV;
6728
6729	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6730	if (fgd == NULL)
6731		return -ENOMEM;
6732
6733	mutex_lock(&graph_lock);
6734
6735	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6736					lockdep_is_held(&graph_lock));
6737	fgd->type = GRAPH_FILTER_NOTRACE;
6738	fgd->seq_ops = &ftrace_graph_seq_ops;
6739
6740	ret = __ftrace_graph_open(inode, file, fgd);
6741	if (ret < 0)
6742		kfree(fgd);
6743
6744	mutex_unlock(&graph_lock);
6745	return ret;
6746}
6747
6748static int
6749ftrace_graph_release(struct inode *inode, struct file *file)
6750{
6751	struct ftrace_graph_data *fgd;
6752	struct ftrace_hash *old_hash, *new_hash;
6753	struct trace_parser *parser;
6754	int ret = 0;
6755
6756	if (file->f_mode & FMODE_READ) {
6757		struct seq_file *m = file->private_data;
6758
6759		fgd = m->private;
6760		seq_release(inode, file);
6761	} else {
6762		fgd = file->private_data;
6763	}
6764
6765
6766	if (file->f_mode & FMODE_WRITE) {
6767
6768		parser = &fgd->parser;
6769
6770		if (trace_parser_loaded((parser))) {
6771			ret = ftrace_graph_set_hash(fgd->new_hash,
6772						    parser->buffer);
6773		}
6774
6775		trace_parser_put(parser);
6776
6777		new_hash = __ftrace_hash_move(fgd->new_hash);
6778		if (!new_hash) {
6779			ret = -ENOMEM;
6780			goto out;
6781		}
6782
6783		mutex_lock(&graph_lock);
6784
6785		if (fgd->type == GRAPH_FILTER_FUNCTION) {
6786			old_hash = rcu_dereference_protected(ftrace_graph_hash,
6787					lockdep_is_held(&graph_lock));
6788			rcu_assign_pointer(ftrace_graph_hash, new_hash);
6789		} else {
6790			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6791					lockdep_is_held(&graph_lock));
6792			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6793		}
6794
6795		mutex_unlock(&graph_lock);
6796
6797		/*
6798		 * We need to do a hard force of sched synchronization.
6799		 * This is because we use preempt_disable() to do RCU, but
6800		 * the function tracers can be called where RCU is not watching
6801		 * (like before user_exit()). We can not rely on the RCU
6802		 * infrastructure to do the synchronization, thus we must do it
6803		 * ourselves.
6804		 */
6805		if (old_hash != EMPTY_HASH)
6806			synchronize_rcu_tasks_rude();
6807
6808		free_ftrace_hash(old_hash);
6809	}
6810
6811 out:
6812	free_ftrace_hash(fgd->new_hash);
6813	kfree(fgd);
6814
6815	return ret;
6816}
6817
6818static int
6819ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6820{
6821	struct ftrace_glob func_g;
6822	struct dyn_ftrace *rec;
6823	struct ftrace_page *pg;
6824	struct ftrace_func_entry *entry;
6825	int fail = 1;
6826	int not;
6827
6828	/* decode regex */
6829	func_g.type = filter_parse_regex(buffer, strlen(buffer),
6830					 &func_g.search, &not);
6831
6832	func_g.len = strlen(func_g.search);
6833
6834	guard(mutex)(&ftrace_lock);
6835
6836	if (unlikely(ftrace_disabled))
 
6837		return -ENODEV;
 
6838
6839	do_for_each_ftrace_rec(pg, rec) {
6840
6841		if (rec->flags & FTRACE_FL_DISABLED)
6842			continue;
6843
6844		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6845			entry = ftrace_lookup_ip(hash, rec->ip);
6846
6847			if (!not) {
6848				fail = 0;
6849
6850				if (entry)
6851					continue;
6852				if (add_hash_entry(hash, rec->ip) == NULL)
6853					return 0;
6854			} else {
6855				if (entry) {
6856					free_hash_entry(hash, entry);
6857					fail = 0;
6858				}
6859			}
6860		}
6861	} while_for_each_ftrace_rec();
 
 
6862
6863	return fail ? -EINVAL : 0;
 
 
 
6864}
6865
6866static ssize_t
6867ftrace_graph_write(struct file *file, const char __user *ubuf,
6868		   size_t cnt, loff_t *ppos)
6869{
6870	ssize_t read, ret = 0;
6871	struct ftrace_graph_data *fgd = file->private_data;
6872	struct trace_parser *parser;
6873
6874	if (!cnt)
6875		return 0;
6876
6877	/* Read mode uses seq functions */
6878	if (file->f_mode & FMODE_READ) {
6879		struct seq_file *m = file->private_data;
6880		fgd = m->private;
6881	}
6882
6883	parser = &fgd->parser;
6884
6885	read = trace_get_user(parser, ubuf, cnt, ppos);
6886
6887	if (read >= 0 && trace_parser_loaded(parser) &&
6888	    !trace_parser_cont(parser)) {
6889
6890		ret = ftrace_graph_set_hash(fgd->new_hash,
6891					    parser->buffer);
6892		trace_parser_clear(parser);
6893	}
6894
6895	if (!ret)
6896		ret = read;
6897
6898	return ret;
6899}
6900
6901static const struct file_operations ftrace_graph_fops = {
6902	.open		= ftrace_graph_open,
6903	.read		= seq_read,
6904	.write		= ftrace_graph_write,
6905	.llseek		= tracing_lseek,
6906	.release	= ftrace_graph_release,
6907};
6908
6909static const struct file_operations ftrace_graph_notrace_fops = {
6910	.open		= ftrace_graph_notrace_open,
6911	.read		= seq_read,
6912	.write		= ftrace_graph_write,
6913	.llseek		= tracing_lseek,
6914	.release	= ftrace_graph_release,
6915};
6916#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6917
6918void ftrace_create_filter_files(struct ftrace_ops *ops,
6919				struct dentry *parent)
6920{
6921
6922	trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6923			  ops, &ftrace_filter_fops);
6924
6925	trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6926			  ops, &ftrace_notrace_fops);
6927}
6928
6929/*
6930 * The name "destroy_filter_files" is really a misnomer. Although
6931 * in the future, it may actually delete the files, but this is
6932 * really intended to make sure the ops passed in are disabled
6933 * and that when this function returns, the caller is free to
6934 * free the ops.
6935 *
6936 * The "destroy" name is only to match the "create" name that this
6937 * should be paired with.
6938 */
6939void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6940{
6941	mutex_lock(&ftrace_lock);
6942	if (ops->flags & FTRACE_OPS_FL_ENABLED)
6943		ftrace_shutdown(ops, 0);
6944	ops->flags |= FTRACE_OPS_FL_DELETED;
6945	ftrace_free_filter(ops);
6946	mutex_unlock(&ftrace_lock);
6947}
6948
6949static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6950{
6951
6952	trace_create_file("available_filter_functions", TRACE_MODE_READ,
6953			d_tracer, NULL, &ftrace_avail_fops);
6954
6955	trace_create_file("available_filter_functions_addrs", TRACE_MODE_READ,
6956			d_tracer, NULL, &ftrace_avail_addrs_fops);
6957
6958	trace_create_file("enabled_functions", TRACE_MODE_READ,
6959			d_tracer, NULL, &ftrace_enabled_fops);
6960
6961	trace_create_file("touched_functions", TRACE_MODE_READ,
6962			d_tracer, NULL, &ftrace_touched_fops);
6963
6964	ftrace_create_filter_files(&global_ops, d_tracer);
6965
6966#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6967	trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6968				    NULL,
6969				    &ftrace_graph_fops);
6970	trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
6971				    NULL,
6972				    &ftrace_graph_notrace_fops);
6973#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6974
6975	return 0;
6976}
6977
6978static int ftrace_cmp_ips(const void *a, const void *b)
6979{
6980	const unsigned long *ipa = a;
6981	const unsigned long *ipb = b;
6982
6983	if (*ipa > *ipb)
6984		return 1;
6985	if (*ipa < *ipb)
6986		return -1;
6987	return 0;
6988}
6989
6990#ifdef CONFIG_FTRACE_SORT_STARTUP_TEST
6991static void test_is_sorted(unsigned long *start, unsigned long count)
6992{
6993	int i;
6994
6995	for (i = 1; i < count; i++) {
6996		if (WARN(start[i - 1] > start[i],
6997			 "[%d] %pS at %lx is not sorted with %pS at %lx\n", i,
6998			 (void *)start[i - 1], start[i - 1],
6999			 (void *)start[i], start[i]))
7000			break;
7001	}
7002	if (i == count)
7003		pr_info("ftrace section at %px sorted properly\n", start);
7004}
7005#else
7006static void test_is_sorted(unsigned long *start, unsigned long count)
7007{
7008}
7009#endif
7010
7011static int ftrace_process_locs(struct module *mod,
7012			       unsigned long *start,
7013			       unsigned long *end)
7014{
7015	struct ftrace_page *pg_unuse = NULL;
7016	struct ftrace_page *start_pg;
7017	struct ftrace_page *pg;
7018	struct dyn_ftrace *rec;
7019	unsigned long skipped = 0;
7020	unsigned long count;
7021	unsigned long *p;
7022	unsigned long addr;
7023	unsigned long flags = 0; /* Shut up gcc */
7024	int ret = -ENOMEM;
7025
7026	count = end - start;
7027
7028	if (!count)
7029		return 0;
7030
7031	/*
7032	 * Sorting mcount in vmlinux at build time depend on
7033	 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in
7034	 * modules can not be sorted at build time.
7035	 */
7036	if (!IS_ENABLED(CONFIG_BUILDTIME_MCOUNT_SORT) || mod) {
7037		sort(start, count, sizeof(*start),
7038		     ftrace_cmp_ips, NULL);
7039	} else {
7040		test_is_sorted(start, count);
7041	}
7042
7043	start_pg = ftrace_allocate_pages(count);
7044	if (!start_pg)
7045		return -ENOMEM;
7046
7047	mutex_lock(&ftrace_lock);
7048
7049	/*
7050	 * Core and each module needs their own pages, as
7051	 * modules will free them when they are removed.
7052	 * Force a new page to be allocated for modules.
7053	 */
7054	if (!mod) {
7055		WARN_ON(ftrace_pages || ftrace_pages_start);
7056		/* First initialization */
7057		ftrace_pages = ftrace_pages_start = start_pg;
7058	} else {
7059		if (!ftrace_pages)
7060			goto out;
7061
7062		if (WARN_ON(ftrace_pages->next)) {
7063			/* Hmm, we have free pages? */
7064			while (ftrace_pages->next)
7065				ftrace_pages = ftrace_pages->next;
7066		}
7067
7068		ftrace_pages->next = start_pg;
7069	}
7070
7071	p = start;
7072	pg = start_pg;
7073	while (p < end) {
7074		unsigned long end_offset;
7075		addr = ftrace_call_adjust(*p++);
7076		/*
7077		 * Some architecture linkers will pad between
7078		 * the different mcount_loc sections of different
7079		 * object files to satisfy alignments.
7080		 * Skip any NULL pointers.
7081		 */
7082		if (!addr) {
7083			skipped++;
7084			continue;
7085		}
7086
7087		end_offset = (pg->index+1) * sizeof(pg->records[0]);
7088		if (end_offset > PAGE_SIZE << pg->order) {
7089			/* We should have allocated enough */
7090			if (WARN_ON(!pg->next))
7091				break;
7092			pg = pg->next;
7093		}
7094
7095		rec = &pg->records[pg->index++];
7096		rec->ip = addr;
7097	}
7098
7099	if (pg->next) {
7100		pg_unuse = pg->next;
7101		pg->next = NULL;
7102	}
7103
7104	/* Assign the last page to ftrace_pages */
7105	ftrace_pages = pg;
7106
7107	/*
7108	 * We only need to disable interrupts on start up
7109	 * because we are modifying code that an interrupt
7110	 * may execute, and the modification is not atomic.
7111	 * But for modules, nothing runs the code we modify
7112	 * until we are finished with it, and there's no
7113	 * reason to cause large interrupt latencies while we do it.
7114	 */
7115	if (!mod)
7116		local_irq_save(flags);
7117	ftrace_update_code(mod, start_pg);
7118	if (!mod)
7119		local_irq_restore(flags);
7120	ret = 0;
7121 out:
7122	mutex_unlock(&ftrace_lock);
7123
7124	/* We should have used all pages unless we skipped some */
7125	if (pg_unuse) {
7126		WARN_ON(!skipped);
7127		/* Need to synchronize with ftrace_location_range() */
7128		synchronize_rcu();
7129		ftrace_free_pages(pg_unuse);
7130	}
7131	return ret;
7132}
7133
7134struct ftrace_mod_func {
7135	struct list_head	list;
7136	char			*name;
7137	unsigned long		ip;
7138	unsigned int		size;
7139};
7140
7141struct ftrace_mod_map {
7142	struct rcu_head		rcu;
7143	struct list_head	list;
7144	struct module		*mod;
7145	unsigned long		start_addr;
7146	unsigned long		end_addr;
7147	struct list_head	funcs;
7148	unsigned int		num_funcs;
7149};
7150
7151static int ftrace_get_trampoline_kallsym(unsigned int symnum,
7152					 unsigned long *value, char *type,
7153					 char *name, char *module_name,
7154					 int *exported)
7155{
7156	struct ftrace_ops *op;
7157
7158	list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
7159		if (!op->trampoline || symnum--)
7160			continue;
7161		*value = op->trampoline;
7162		*type = 't';
7163		strscpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
7164		strscpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
7165		*exported = 0;
7166		return 0;
7167	}
7168
7169	return -ERANGE;
7170}
7171
7172#if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) || defined(CONFIG_MODULES)
7173/*
7174 * Check if the current ops references the given ip.
7175 *
7176 * If the ops traces all functions, then it was already accounted for.
7177 * If the ops does not trace the current record function, skip it.
7178 * If the ops ignores the function via notrace filter, skip it.
7179 */
7180static bool
7181ops_references_ip(struct ftrace_ops *ops, unsigned long ip)
7182{
7183	/* If ops isn't enabled, ignore it */
7184	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
7185		return false;
7186
7187	/* If ops traces all then it includes this function */
7188	if (ops_traces_mod(ops))
7189		return true;
7190
7191	/* The function must be in the filter */
7192	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
7193	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, ip))
7194		return false;
7195
7196	/* If in notrace hash, we ignore it too */
7197	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, ip))
7198		return false;
7199
7200	return true;
7201}
7202#endif
7203
7204#ifdef CONFIG_MODULES
7205
7206#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
7207
7208static LIST_HEAD(ftrace_mod_maps);
7209
7210static int referenced_filters(struct dyn_ftrace *rec)
7211{
7212	struct ftrace_ops *ops;
7213	int cnt = 0;
7214
7215	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
7216		if (ops_references_ip(ops, rec->ip)) {
7217			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
7218				continue;
7219			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
7220				continue;
7221			cnt++;
7222			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
7223				rec->flags |= FTRACE_FL_REGS;
7224			if (cnt == 1 && ops->trampoline)
7225				rec->flags |= FTRACE_FL_TRAMP;
7226			else
7227				rec->flags &= ~FTRACE_FL_TRAMP;
7228		}
7229	}
7230
7231	return cnt;
7232}
7233
7234static void
7235clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
7236{
7237	struct ftrace_func_entry *entry;
7238	struct dyn_ftrace *rec;
7239	int i;
7240
7241	if (ftrace_hash_empty(hash))
7242		return;
7243
7244	for (i = 0; i < pg->index; i++) {
7245		rec = &pg->records[i];
7246		entry = __ftrace_lookup_ip(hash, rec->ip);
7247		/*
7248		 * Do not allow this rec to match again.
7249		 * Yeah, it may waste some memory, but will be removed
7250		 * if/when the hash is modified again.
7251		 */
7252		if (entry)
7253			entry->ip = 0;
7254	}
7255}
7256
7257/* Clear any records from hashes */
7258static void clear_mod_from_hashes(struct ftrace_page *pg)
7259{
7260	struct trace_array *tr;
7261
7262	mutex_lock(&trace_types_lock);
7263	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7264		if (!tr->ops || !tr->ops->func_hash)
7265			continue;
7266		mutex_lock(&tr->ops->func_hash->regex_lock);
7267		clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
7268		clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
7269		mutex_unlock(&tr->ops->func_hash->regex_lock);
7270	}
7271	mutex_unlock(&trace_types_lock);
7272}
7273
7274static void ftrace_free_mod_map(struct rcu_head *rcu)
7275{
7276	struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
7277	struct ftrace_mod_func *mod_func;
7278	struct ftrace_mod_func *n;
7279
7280	/* All the contents of mod_map are now not visible to readers */
7281	list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
7282		kfree(mod_func->name);
7283		list_del(&mod_func->list);
7284		kfree(mod_func);
7285	}
7286
7287	kfree(mod_map);
7288}
7289
7290void ftrace_release_mod(struct module *mod)
7291{
7292	struct ftrace_mod_map *mod_map;
7293	struct ftrace_mod_map *n;
7294	struct dyn_ftrace *rec;
7295	struct ftrace_page **last_pg;
7296	struct ftrace_page *tmp_page = NULL;
7297	struct ftrace_page *pg;
 
7298
7299	mutex_lock(&ftrace_lock);
7300
7301	if (ftrace_disabled)
7302		goto out_unlock;
7303
7304	list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
7305		if (mod_map->mod == mod) {
7306			list_del_rcu(&mod_map->list);
7307			call_rcu(&mod_map->rcu, ftrace_free_mod_map);
7308			break;
7309		}
7310	}
7311
7312	/*
7313	 * Each module has its own ftrace_pages, remove
7314	 * them from the list.
7315	 */
7316	last_pg = &ftrace_pages_start;
7317	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
7318		rec = &pg->records[0];
7319		if (within_module(rec->ip, mod)) {
 
7320			/*
7321			 * As core pages are first, the first
7322			 * page should never be a module page.
7323			 */
7324			if (WARN_ON(pg == ftrace_pages_start))
7325				goto out_unlock;
7326
7327			/* Check if we are deleting the last page */
7328			if (pg == ftrace_pages)
7329				ftrace_pages = next_to_ftrace_page(last_pg);
7330
7331			ftrace_update_tot_cnt -= pg->index;
7332			*last_pg = pg->next;
7333
7334			pg->next = tmp_page;
7335			tmp_page = pg;
7336		} else
7337			last_pg = &pg->next;
7338	}
7339 out_unlock:
7340	mutex_unlock(&ftrace_lock);
7341
7342	/* Need to synchronize with ftrace_location_range() */
7343	if (tmp_page)
7344		synchronize_rcu();
7345	for (pg = tmp_page; pg; pg = tmp_page) {
7346
7347		/* Needs to be called outside of ftrace_lock */
7348		clear_mod_from_hashes(pg);
7349
7350		if (pg->records) {
7351			free_pages((unsigned long)pg->records, pg->order);
7352			ftrace_number_of_pages -= 1 << pg->order;
7353		}
7354		tmp_page = pg->next;
7355		kfree(pg);
7356		ftrace_number_of_groups--;
7357	}
7358}
7359
7360void ftrace_module_enable(struct module *mod)
7361{
7362	struct dyn_ftrace *rec;
7363	struct ftrace_page *pg;
7364
7365	mutex_lock(&ftrace_lock);
7366
7367	if (ftrace_disabled)
7368		goto out_unlock;
7369
7370	/*
7371	 * If the tracing is enabled, go ahead and enable the record.
7372	 *
7373	 * The reason not to enable the record immediately is the
7374	 * inherent check of ftrace_make_nop/ftrace_make_call for
7375	 * correct previous instructions.  Making first the NOP
7376	 * conversion puts the module to the correct state, thus
7377	 * passing the ftrace_make_call check.
7378	 *
7379	 * We also delay this to after the module code already set the
7380	 * text to read-only, as we now need to set it back to read-write
7381	 * so that we can modify the text.
7382	 */
7383	if (ftrace_start_up)
7384		ftrace_arch_code_modify_prepare();
7385
7386	do_for_each_ftrace_rec(pg, rec) {
7387		int cnt;
7388		/*
7389		 * do_for_each_ftrace_rec() is a double loop.
7390		 * module text shares the pg. If a record is
7391		 * not part of this module, then skip this pg,
7392		 * which the "break" will do.
7393		 */
7394		if (!within_module(rec->ip, mod))
 
7395			break;
7396
7397		/* Weak functions should still be ignored */
7398		if (!test_for_valid_rec(rec)) {
7399			/* Clear all other flags. Should not be enabled anyway */
7400			rec->flags = FTRACE_FL_DISABLED;
7401			continue;
7402		}
7403
7404		cnt = 0;
7405
7406		/*
7407		 * When adding a module, we need to check if tracers are
7408		 * currently enabled and if they are, and can trace this record,
7409		 * we need to enable the module functions as well as update the
7410		 * reference counts for those function records.
7411		 */
7412		if (ftrace_start_up)
7413			cnt += referenced_filters(rec);
7414
7415		rec->flags &= ~FTRACE_FL_DISABLED;
7416		rec->flags += cnt;
7417
7418		if (ftrace_start_up && cnt) {
7419			int failed = __ftrace_replace_code(rec, 1);
7420			if (failed) {
7421				ftrace_bug(failed, rec);
7422				goto out_loop;
7423			}
7424		}
7425
7426	} while_for_each_ftrace_rec();
7427
7428 out_loop:
7429	if (ftrace_start_up)
7430		ftrace_arch_code_modify_post_process();
7431
7432 out_unlock:
7433	mutex_unlock(&ftrace_lock);
7434
7435	process_cached_mods(mod->name);
7436}
7437
7438void ftrace_module_init(struct module *mod)
7439{
7440	int ret;
7441
7442	if (ftrace_disabled || !mod->num_ftrace_callsites)
7443		return;
7444
7445	ret = ftrace_process_locs(mod, mod->ftrace_callsites,
7446				  mod->ftrace_callsites + mod->num_ftrace_callsites);
7447	if (ret)
7448		pr_warn("ftrace: failed to allocate entries for module '%s' functions\n",
7449			mod->name);
7450}
7451
7452static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7453				struct dyn_ftrace *rec)
7454{
7455	struct ftrace_mod_func *mod_func;
7456	unsigned long symsize;
7457	unsigned long offset;
7458	char str[KSYM_SYMBOL_LEN];
7459	char *modname;
7460	const char *ret;
7461
7462	ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
7463	if (!ret)
7464		return;
7465
7466	mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
7467	if (!mod_func)
7468		return;
7469
7470	mod_func->name = kstrdup(str, GFP_KERNEL);
7471	if (!mod_func->name) {
7472		kfree(mod_func);
7473		return;
7474	}
7475
7476	mod_func->ip = rec->ip - offset;
7477	mod_func->size = symsize;
7478
7479	mod_map->num_funcs++;
7480
7481	list_add_rcu(&mod_func->list, &mod_map->funcs);
7482}
7483
7484static struct ftrace_mod_map *
7485allocate_ftrace_mod_map(struct module *mod,
7486			unsigned long start, unsigned long end)
7487{
7488	struct ftrace_mod_map *mod_map;
7489
7490	mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
7491	if (!mod_map)
7492		return NULL;
7493
7494	mod_map->mod = mod;
7495	mod_map->start_addr = start;
7496	mod_map->end_addr = end;
7497	mod_map->num_funcs = 0;
7498
7499	INIT_LIST_HEAD_RCU(&mod_map->funcs);
7500
7501	list_add_rcu(&mod_map->list, &ftrace_mod_maps);
7502
7503	return mod_map;
7504}
7505
7506static int
7507ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
7508			   unsigned long addr, unsigned long *size,
7509			   unsigned long *off, char *sym)
7510{
7511	struct ftrace_mod_func *found_func =  NULL;
7512	struct ftrace_mod_func *mod_func;
7513
7514	list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7515		if (addr >= mod_func->ip &&
7516		    addr < mod_func->ip + mod_func->size) {
7517			found_func = mod_func;
7518			break;
7519		}
7520	}
7521
7522	if (found_func) {
7523		if (size)
7524			*size = found_func->size;
7525		if (off)
7526			*off = addr - found_func->ip;
7527		return strscpy(sym, found_func->name, KSYM_NAME_LEN);
 
 
 
7528	}
7529
7530	return 0;
7531}
7532
7533int
7534ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
7535		   unsigned long *off, char **modname, char *sym)
7536{
7537	struct ftrace_mod_map *mod_map;
7538	int ret = 0;
7539
7540	/* mod_map is freed via call_rcu() */
7541	preempt_disable();
7542	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7543		ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
7544		if (ret) {
7545			if (modname)
7546				*modname = mod_map->mod->name;
7547			break;
7548		}
7549	}
7550	preempt_enable();
7551
7552	return ret;
7553}
7554
7555int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7556			   char *type, char *name,
7557			   char *module_name, int *exported)
7558{
7559	struct ftrace_mod_map *mod_map;
7560	struct ftrace_mod_func *mod_func;
7561	int ret;
7562
7563	preempt_disable();
7564	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
7565
7566		if (symnum >= mod_map->num_funcs) {
7567			symnum -= mod_map->num_funcs;
7568			continue;
7569		}
7570
7571		list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
7572			if (symnum > 1) {
7573				symnum--;
7574				continue;
7575			}
7576
7577			*value = mod_func->ip;
7578			*type = 'T';
7579			strscpy(name, mod_func->name, KSYM_NAME_LEN);
7580			strscpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
7581			*exported = 1;
7582			preempt_enable();
7583			return 0;
7584		}
7585		WARN_ON(1);
7586		break;
7587	}
7588	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7589					    module_name, exported);
7590	preempt_enable();
7591	return ret;
7592}
7593
7594#else
7595static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
7596				struct dyn_ftrace *rec) { }
7597static inline struct ftrace_mod_map *
7598allocate_ftrace_mod_map(struct module *mod,
7599			unsigned long start, unsigned long end)
7600{
7601	return NULL;
7602}
7603int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
7604			   char *type, char *name, char *module_name,
7605			   int *exported)
7606{
7607	int ret;
7608
7609	preempt_disable();
7610	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
7611					    module_name, exported);
7612	preempt_enable();
7613	return ret;
7614}
7615#endif /* CONFIG_MODULES */
7616
7617struct ftrace_init_func {
7618	struct list_head list;
7619	unsigned long ip;
7620};
7621
7622/* Clear any init ips from hashes */
7623static void
7624clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
7625{
7626	struct ftrace_func_entry *entry;
7627
7628	entry = ftrace_lookup_ip(hash, func->ip);
7629	/*
7630	 * Do not allow this rec to match again.
7631	 * Yeah, it may waste some memory, but will be removed
7632	 * if/when the hash is modified again.
7633	 */
7634	if (entry)
7635		entry->ip = 0;
7636}
7637
7638static void
7639clear_func_from_hashes(struct ftrace_init_func *func)
7640{
7641	struct trace_array *tr;
7642
7643	mutex_lock(&trace_types_lock);
7644	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7645		if (!tr->ops || !tr->ops->func_hash)
7646			continue;
7647		mutex_lock(&tr->ops->func_hash->regex_lock);
7648		clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
7649		clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
7650		mutex_unlock(&tr->ops->func_hash->regex_lock);
7651	}
7652	mutex_unlock(&trace_types_lock);
7653}
7654
7655static void add_to_clear_hash_list(struct list_head *clear_list,
7656				   struct dyn_ftrace *rec)
7657{
7658	struct ftrace_init_func *func;
7659
7660	func = kmalloc(sizeof(*func), GFP_KERNEL);
7661	if (!func) {
7662		MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
7663		return;
7664	}
7665
7666	func->ip = rec->ip;
7667	list_add(&func->list, clear_list);
7668}
7669
7670void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
7671{
7672	unsigned long start = (unsigned long)(start_ptr);
7673	unsigned long end = (unsigned long)(end_ptr);
7674	struct ftrace_page **last_pg = &ftrace_pages_start;
7675	struct ftrace_page *tmp_page = NULL;
7676	struct ftrace_page *pg;
7677	struct dyn_ftrace *rec;
7678	struct dyn_ftrace key;
7679	struct ftrace_mod_map *mod_map = NULL;
7680	struct ftrace_init_func *func, *func_next;
7681	LIST_HEAD(clear_hash);
 
 
 
7682
7683	key.ip = start;
7684	key.flags = end;	/* overload flags, as it is unsigned long */
7685
7686	mutex_lock(&ftrace_lock);
7687
7688	/*
7689	 * If we are freeing module init memory, then check if
7690	 * any tracer is active. If so, we need to save a mapping of
7691	 * the module functions being freed with the address.
7692	 */
7693	if (mod && ftrace_ops_list != &ftrace_list_end)
7694		mod_map = allocate_ftrace_mod_map(mod, start, end);
7695
7696	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7697		if (end < pg->records[0].ip ||
7698		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7699			continue;
7700 again:
7701		rec = bsearch(&key, pg->records, pg->index,
7702			      sizeof(struct dyn_ftrace),
7703			      ftrace_cmp_recs);
7704		if (!rec)
7705			continue;
7706
7707		/* rec will be cleared from hashes after ftrace_lock unlock */
7708		add_to_clear_hash_list(&clear_hash, rec);
7709
7710		if (mod_map)
7711			save_ftrace_mod_rec(mod_map, rec);
7712
7713		pg->index--;
7714		ftrace_update_tot_cnt--;
7715		if (!pg->index) {
7716			*last_pg = pg->next;
7717			pg->next = tmp_page;
7718			tmp_page = pg;
 
7719			pg = container_of(last_pg, struct ftrace_page, next);
7720			if (!(*last_pg))
7721				ftrace_pages = pg;
7722			continue;
7723		}
7724		memmove(rec, rec + 1,
7725			(pg->index - (rec - pg->records)) * sizeof(*rec));
7726		/* More than one function may be in this block */
7727		goto again;
7728	}
7729	mutex_unlock(&ftrace_lock);
7730
7731	list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7732		clear_func_from_hashes(func);
7733		kfree(func);
7734	}
7735	/* Need to synchronize with ftrace_location_range() */
7736	if (tmp_page) {
7737		synchronize_rcu();
7738		ftrace_free_pages(tmp_page);
7739	}
7740}
7741
7742void __init ftrace_free_init_mem(void)
7743{
7744	void *start = (void *)(&__init_begin);
7745	void *end = (void *)(&__init_end);
7746
7747	ftrace_boot_snapshot();
7748
7749	ftrace_free_mem(NULL, start, end);
7750}
7751
7752int __init __weak ftrace_dyn_arch_init(void)
7753{
7754	return 0;
7755}
7756
7757void __init ftrace_init(void)
7758{
7759	extern unsigned long __start_mcount_loc[];
7760	extern unsigned long __stop_mcount_loc[];
7761	unsigned long count, flags;
7762	int ret;
7763
7764	local_irq_save(flags);
7765	ret = ftrace_dyn_arch_init();
7766	local_irq_restore(flags);
7767	if (ret)
7768		goto failed;
7769
7770	count = __stop_mcount_loc - __start_mcount_loc;
7771	if (!count) {
7772		pr_info("ftrace: No functions to be traced?\n");
7773		goto failed;
7774	}
7775
7776	pr_info("ftrace: allocating %ld entries in %ld pages\n",
7777		count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
 
 
7778
7779	ret = ftrace_process_locs(NULL,
7780				  __start_mcount_loc,
7781				  __stop_mcount_loc);
7782	if (ret) {
7783		pr_warn("ftrace: failed to allocate entries for functions\n");
7784		goto failed;
7785	}
7786
7787	pr_info("ftrace: allocated %ld pages with %ld groups\n",
7788		ftrace_number_of_pages, ftrace_number_of_groups);
7789
7790	last_ftrace_enabled = ftrace_enabled = 1;
7791
7792	set_ftrace_early_filters();
7793
7794	return;
7795 failed:
7796	ftrace_disabled = 1;
7797}
7798
7799/* Do nothing if arch does not support this */
7800void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7801{
7802}
7803
7804static void ftrace_update_trampoline(struct ftrace_ops *ops)
7805{
7806	unsigned long trampoline = ops->trampoline;
7807
7808	arch_ftrace_update_trampoline(ops);
7809	if (ops->trampoline && ops->trampoline != trampoline &&
7810	    (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7811		/* Add to kallsyms before the perf events */
7812		ftrace_add_trampoline_to_kallsyms(ops);
7813		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7814				   ops->trampoline, ops->trampoline_size, false,
7815				   FTRACE_TRAMPOLINE_SYM);
7816		/*
7817		 * Record the perf text poke event after the ksymbol register
7818		 * event.
7819		 */
7820		perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7821				     (void *)ops->trampoline,
7822				     ops->trampoline_size);
7823	}
7824}
7825
7826void ftrace_init_trace_array(struct trace_array *tr)
7827{
7828	INIT_LIST_HEAD(&tr->func_probes);
7829	INIT_LIST_HEAD(&tr->mod_trace);
7830	INIT_LIST_HEAD(&tr->mod_notrace);
7831}
7832#else
7833
7834struct ftrace_ops global_ops = {
7835	.func			= ftrace_stub,
7836	.flags			= FTRACE_OPS_FL_INITIALIZED |
 
7837				  FTRACE_OPS_FL_PID,
7838};
7839
7840static int __init ftrace_nodyn_init(void)
7841{
7842	ftrace_enabled = 1;
7843	return 0;
7844}
7845core_initcall(ftrace_nodyn_init);
7846
7847static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
 
7848static inline void ftrace_startup_all(int command) { }
7849
 
 
 
7850static void ftrace_update_trampoline(struct ftrace_ops *ops)
7851{
7852}
7853
7854#endif /* CONFIG_DYNAMIC_FTRACE */
7855
7856__init void ftrace_init_global_array_ops(struct trace_array *tr)
7857{
7858	tr->ops = &global_ops;
7859	tr->ops->private = tr;
7860	ftrace_init_trace_array(tr);
7861	init_array_fgraph_ops(tr, tr->ops);
7862}
7863
7864void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7865{
7866	/* If we filter on pids, update to use the pid function */
7867	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7868		if (WARN_ON(tr->ops->func != ftrace_stub))
7869			printk("ftrace ops had %pS for function\n",
7870			       tr->ops->func);
7871	}
7872	tr->ops->func = func;
7873	tr->ops->private = tr;
7874}
7875
7876void ftrace_reset_array_ops(struct trace_array *tr)
7877{
7878	tr->ops->func = ftrace_stub;
7879}
7880
7881static nokprobe_inline void
7882__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7883		       struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7884{
7885	struct pt_regs *regs = ftrace_get_regs(fregs);
7886	struct ftrace_ops *op;
7887	int bit;
7888
7889	/*
7890	 * The ftrace_test_and_set_recursion() will disable preemption,
7891	 * which is required since some of the ops may be dynamically
7892	 * allocated, they must be freed after a synchronize_rcu().
7893	 */
7894	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7895	if (bit < 0)
7896		return;
7897
 
 
 
 
 
 
7898	do_for_each_ftrace_op(op, ftrace_ops_list) {
7899		/* Stub functions don't need to be called nor tested */
7900		if (op->flags & FTRACE_OPS_FL_STUB)
7901			continue;
7902		/*
7903		 * Check the following for each ops before calling their func:
7904		 *  if RCU flag is set, then rcu_is_watching() must be true
 
 
7905		 *  Otherwise test if the ip matches the ops filter
7906		 *
7907		 * If any of the above fails then the op->func() is not executed.
7908		 */
7909		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7910		    ftrace_ops_test(op, ip, regs)) {
7911			if (FTRACE_WARN_ON(!op->func)) {
7912				pr_warn("op=%p %pS\n", op, op);
7913				goto out;
7914			}
7915			op->func(ip, parent_ip, op, fregs);
7916		}
7917	} while_for_each_ftrace_op(op);
7918out:
 
7919	trace_clear_recursion(bit);
7920}
7921
7922/*
7923 * Some archs only support passing ip and parent_ip. Even though
7924 * the list function ignores the op parameter, we do not want any
7925 * C side effects, where a function is called without the caller
7926 * sending a third parameter.
7927 * Archs are to support both the regs and ftrace_ops at the same time.
7928 * If they support ftrace_ops, it is assumed they support regs.
7929 * If call backs want to use regs, they must either check for regs
7930 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
7931 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
7932 * An architecture can pass partial regs with ftrace_ops and still
7933 * set the ARCH_SUPPORTS_FTRACE_OPS.
7934 *
7935 * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
7936 * arch_ftrace_ops_list_func.
7937 */
7938#if ARCH_SUPPORTS_FTRACE_OPS
7939void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7940			       struct ftrace_ops *op, struct ftrace_regs *fregs)
7941{
7942	kmsan_unpoison_memory(fregs, ftrace_regs_size());
7943	__ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7944}
 
7945#else
7946void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
7947{
7948	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7949}
 
7950#endif
7951NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
7952
7953/*
7954 * If there's only one function registered but it does not support
7955 * recursion, needs RCU protection, then this function will be called
7956 * by the mcount trampoline.
7957 */
7958static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7959				   struct ftrace_ops *op, struct ftrace_regs *fregs)
7960{
7961	int bit;
7962
7963	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
 
 
 
7964	if (bit < 0)
7965		return;
7966
7967	if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7968		op->func(ip, parent_ip, op, fregs);
 
7969
 
7970	trace_clear_recursion(bit);
7971}
7972NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7973
7974/**
7975 * ftrace_ops_get_func - get the function a trampoline should call
7976 * @ops: the ops to get the function for
7977 *
7978 * Normally the mcount trampoline will call the ops->func, but there
7979 * are times that it should not. For example, if the ops does not
7980 * have its own recursion protection, then it should call the
7981 * ftrace_ops_assist_func() instead.
7982 *
7983 * Returns: the function that the trampoline should call for @ops.
7984 */
7985ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7986{
7987	/*
7988	 * If the function does not handle recursion or needs to be RCU safe,
7989	 * then we need to call the assist handler.
7990	 */
7991	if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7992			  FTRACE_OPS_FL_RCU))
7993		return ftrace_ops_assist_func;
7994
7995	return ops->func;
7996}
7997
7998static void
7999ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
8000				     struct task_struct *prev,
8001				     struct task_struct *next,
8002				     unsigned int prev_state)
8003{
8004	struct trace_array *tr = data;
8005	struct trace_pid_list *pid_list;
8006	struct trace_pid_list *no_pid_list;
8007
8008	pid_list = rcu_dereference_sched(tr->function_pids);
8009	no_pid_list = rcu_dereference_sched(tr->function_no_pids);
8010
8011	if (trace_ignore_this_task(pid_list, no_pid_list, next))
8012		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8013			       FTRACE_PID_IGNORE);
8014	else
8015		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8016			       next->pid);
8017}
8018
8019static void
8020ftrace_pid_follow_sched_process_fork(void *data,
8021				     struct task_struct *self,
8022				     struct task_struct *task)
8023{
8024	struct trace_pid_list *pid_list;
8025	struct trace_array *tr = data;
8026
8027	pid_list = rcu_dereference_sched(tr->function_pids);
8028	trace_filter_add_remove_task(pid_list, self, task);
8029
8030	pid_list = rcu_dereference_sched(tr->function_no_pids);
8031	trace_filter_add_remove_task(pid_list, self, task);
8032}
8033
8034static void
8035ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
8036{
8037	struct trace_pid_list *pid_list;
8038	struct trace_array *tr = data;
8039
8040	pid_list = rcu_dereference_sched(tr->function_pids);
8041	trace_filter_add_remove_task(pid_list, NULL, task);
8042
8043	pid_list = rcu_dereference_sched(tr->function_no_pids);
8044	trace_filter_add_remove_task(pid_list, NULL, task);
8045}
8046
8047void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
8048{
8049	if (enable) {
8050		register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
8051						  tr);
8052		register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
8053						  tr);
8054	} else {
8055		unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
8056						    tr);
8057		unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
8058						    tr);
8059	}
8060}
8061
8062static void clear_ftrace_pids(struct trace_array *tr, int type)
8063{
8064	struct trace_pid_list *pid_list;
8065	struct trace_pid_list *no_pid_list;
8066	int cpu;
8067
8068	pid_list = rcu_dereference_protected(tr->function_pids,
8069					     lockdep_is_held(&ftrace_lock));
8070	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8071						lockdep_is_held(&ftrace_lock));
8072
8073	/* Make sure there's something to do */
8074	if (!pid_type_enabled(type, pid_list, no_pid_list))
8075		return;
8076
8077	/* See if the pids still need to be checked after this */
8078	if (!still_need_pid_events(type, pid_list, no_pid_list)) {
8079		unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8080		for_each_possible_cpu(cpu)
8081			per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
8082	}
8083
8084	if (type & TRACE_PIDS)
8085		rcu_assign_pointer(tr->function_pids, NULL);
8086
8087	if (type & TRACE_NO_PIDS)
8088		rcu_assign_pointer(tr->function_no_pids, NULL);
8089
8090	/* Wait till all users are no longer using pid filtering */
8091	synchronize_rcu();
8092
8093	if ((type & TRACE_PIDS) && pid_list)
8094		trace_pid_list_free(pid_list);
8095
8096	if ((type & TRACE_NO_PIDS) && no_pid_list)
8097		trace_pid_list_free(no_pid_list);
8098}
8099
8100void ftrace_clear_pids(struct trace_array *tr)
8101{
8102	mutex_lock(&ftrace_lock);
8103
8104	clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
8105
8106	mutex_unlock(&ftrace_lock);
8107}
8108
8109static void ftrace_pid_reset(struct trace_array *tr, int type)
8110{
8111	mutex_lock(&ftrace_lock);
8112	clear_ftrace_pids(tr, type);
8113
8114	ftrace_update_pid_func();
8115	ftrace_startup_all(0);
8116
8117	mutex_unlock(&ftrace_lock);
8118}
8119
8120/* Greater than any max PID */
8121#define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
8122
8123static void *fpid_start(struct seq_file *m, loff_t *pos)
8124	__acquires(RCU)
8125{
8126	struct trace_pid_list *pid_list;
8127	struct trace_array *tr = m->private;
8128
8129	mutex_lock(&ftrace_lock);
8130	rcu_read_lock_sched();
8131
8132	pid_list = rcu_dereference_sched(tr->function_pids);
8133
8134	if (!pid_list)
8135		return !(*pos) ? FTRACE_NO_PIDS : NULL;
8136
8137	return trace_pid_start(pid_list, pos);
8138}
8139
8140static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
8141{
8142	struct trace_array *tr = m->private;
8143	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
8144
8145	if (v == FTRACE_NO_PIDS) {
8146		(*pos)++;
8147		return NULL;
8148	}
8149	return trace_pid_next(pid_list, v, pos);
8150}
8151
8152static void fpid_stop(struct seq_file *m, void *p)
8153	__releases(RCU)
8154{
8155	rcu_read_unlock_sched();
8156	mutex_unlock(&ftrace_lock);
8157}
8158
8159static int fpid_show(struct seq_file *m, void *v)
8160{
8161	if (v == FTRACE_NO_PIDS) {
8162		seq_puts(m, "no pid\n");
8163		return 0;
8164	}
8165
8166	return trace_pid_show(m, v);
8167}
8168
8169static const struct seq_operations ftrace_pid_sops = {
8170	.start = fpid_start,
8171	.next = fpid_next,
8172	.stop = fpid_stop,
8173	.show = fpid_show,
8174};
8175
8176static void *fnpid_start(struct seq_file *m, loff_t *pos)
8177	__acquires(RCU)
8178{
8179	struct trace_pid_list *pid_list;
8180	struct trace_array *tr = m->private;
8181
8182	mutex_lock(&ftrace_lock);
8183	rcu_read_lock_sched();
8184
8185	pid_list = rcu_dereference_sched(tr->function_no_pids);
8186
8187	if (!pid_list)
8188		return !(*pos) ? FTRACE_NO_PIDS : NULL;
8189
8190	return trace_pid_start(pid_list, pos);
8191}
8192
8193static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
8194{
8195	struct trace_array *tr = m->private;
8196	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
8197
8198	if (v == FTRACE_NO_PIDS) {
8199		(*pos)++;
8200		return NULL;
8201	}
8202	return trace_pid_next(pid_list, v, pos);
8203}
8204
8205static const struct seq_operations ftrace_no_pid_sops = {
8206	.start = fnpid_start,
8207	.next = fnpid_next,
8208	.stop = fpid_stop,
8209	.show = fpid_show,
8210};
8211
8212static int pid_open(struct inode *inode, struct file *file, int type)
8213{
8214	const struct seq_operations *seq_ops;
8215	struct trace_array *tr = inode->i_private;
8216	struct seq_file *m;
8217	int ret = 0;
8218
8219	ret = tracing_check_open_get_tr(tr);
8220	if (ret)
8221		return ret;
8222
8223	if ((file->f_mode & FMODE_WRITE) &&
8224	    (file->f_flags & O_TRUNC))
8225		ftrace_pid_reset(tr, type);
8226
8227	switch (type) {
8228	case TRACE_PIDS:
8229		seq_ops = &ftrace_pid_sops;
8230		break;
8231	case TRACE_NO_PIDS:
8232		seq_ops = &ftrace_no_pid_sops;
8233		break;
8234	default:
8235		trace_array_put(tr);
8236		WARN_ON_ONCE(1);
8237		return -EINVAL;
8238	}
8239
8240	ret = seq_open(file, seq_ops);
8241	if (ret < 0) {
8242		trace_array_put(tr);
8243	} else {
8244		m = file->private_data;
8245		/* copy tr over to seq ops */
8246		m->private = tr;
8247	}
8248
8249	return ret;
8250}
8251
8252static int
8253ftrace_pid_open(struct inode *inode, struct file *file)
8254{
8255	return pid_open(inode, file, TRACE_PIDS);
8256}
8257
8258static int
8259ftrace_no_pid_open(struct inode *inode, struct file *file)
8260{
8261	return pid_open(inode, file, TRACE_NO_PIDS);
8262}
8263
8264static void ignore_task_cpu(void *data)
8265{
8266	struct trace_array *tr = data;
8267	struct trace_pid_list *pid_list;
8268	struct trace_pid_list *no_pid_list;
8269
8270	/*
8271	 * This function is called by on_each_cpu() while the
8272	 * event_mutex is held.
8273	 */
8274	pid_list = rcu_dereference_protected(tr->function_pids,
8275					     mutex_is_locked(&ftrace_lock));
8276	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
8277						mutex_is_locked(&ftrace_lock));
8278
8279	if (trace_ignore_this_task(pid_list, no_pid_list, current))
8280		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8281			       FTRACE_PID_IGNORE);
8282	else
8283		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
8284			       current->pid);
8285}
8286
8287static ssize_t
8288pid_write(struct file *filp, const char __user *ubuf,
8289	  size_t cnt, loff_t *ppos, int type)
8290{
8291	struct seq_file *m = filp->private_data;
8292	struct trace_array *tr = m->private;
8293	struct trace_pid_list *filtered_pids;
8294	struct trace_pid_list *other_pids;
8295	struct trace_pid_list *pid_list;
8296	ssize_t ret;
8297
8298	if (!cnt)
8299		return 0;
8300
8301	mutex_lock(&ftrace_lock);
8302
8303	switch (type) {
8304	case TRACE_PIDS:
8305		filtered_pids = rcu_dereference_protected(tr->function_pids,
8306					     lockdep_is_held(&ftrace_lock));
8307		other_pids = rcu_dereference_protected(tr->function_no_pids,
8308					     lockdep_is_held(&ftrace_lock));
8309		break;
8310	case TRACE_NO_PIDS:
8311		filtered_pids = rcu_dereference_protected(tr->function_no_pids,
8312					     lockdep_is_held(&ftrace_lock));
8313		other_pids = rcu_dereference_protected(tr->function_pids,
8314					     lockdep_is_held(&ftrace_lock));
8315		break;
8316	default:
8317		ret = -EINVAL;
8318		WARN_ON_ONCE(1);
8319		goto out;
8320	}
8321
8322	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
8323	if (ret < 0)
8324		goto out;
8325
8326	switch (type) {
8327	case TRACE_PIDS:
8328		rcu_assign_pointer(tr->function_pids, pid_list);
8329		break;
8330	case TRACE_NO_PIDS:
8331		rcu_assign_pointer(tr->function_no_pids, pid_list);
8332		break;
8333	}
8334
8335
8336	if (filtered_pids) {
8337		synchronize_rcu();
8338		trace_pid_list_free(filtered_pids);
8339	} else if (pid_list && !other_pids) {
8340		/* Register a probe to set whether to ignore the tracing of a task */
8341		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
8342	}
8343
8344	/*
8345	 * Ignoring of pids is done at task switch. But we have to
8346	 * check for those tasks that are currently running.
8347	 * Always do this in case a pid was appended or removed.
8348	 */
8349	on_each_cpu(ignore_task_cpu, tr, 1);
8350
8351	ftrace_update_pid_func();
8352	ftrace_startup_all(0);
8353 out:
8354	mutex_unlock(&ftrace_lock);
8355
8356	if (ret > 0)
8357		*ppos += ret;
8358
8359	return ret;
8360}
8361
8362static ssize_t
8363ftrace_pid_write(struct file *filp, const char __user *ubuf,
8364		 size_t cnt, loff_t *ppos)
8365{
8366	return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
8367}
8368
8369static ssize_t
8370ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
8371		    size_t cnt, loff_t *ppos)
8372{
8373	return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
8374}
8375
8376static int
8377ftrace_pid_release(struct inode *inode, struct file *file)
8378{
8379	struct trace_array *tr = inode->i_private;
8380
8381	trace_array_put(tr);
8382
8383	return seq_release(inode, file);
8384}
8385
8386static const struct file_operations ftrace_pid_fops = {
8387	.open		= ftrace_pid_open,
8388	.write		= ftrace_pid_write,
8389	.read		= seq_read,
8390	.llseek		= tracing_lseek,
8391	.release	= ftrace_pid_release,
8392};
8393
8394static const struct file_operations ftrace_no_pid_fops = {
8395	.open		= ftrace_no_pid_open,
8396	.write		= ftrace_no_pid_write,
8397	.read		= seq_read,
8398	.llseek		= tracing_lseek,
8399	.release	= ftrace_pid_release,
8400};
8401
8402void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8403{
8404	trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
8405			    tr, &ftrace_pid_fops);
8406	trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
8407			  d_tracer, tr, &ftrace_no_pid_fops);
8408}
8409
8410void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
8411					 struct dentry *d_tracer)
8412{
8413	/* Only the top level directory has the dyn_tracefs and profile */
8414	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
8415
8416	ftrace_init_dyn_tracefs(d_tracer);
8417	ftrace_profile_tracefs(d_tracer);
8418}
8419
8420/**
8421 * ftrace_kill - kill ftrace
8422 *
8423 * This function should be used by panic code. It stops ftrace
8424 * but in a not so nice way. If you need to simply kill ftrace
8425 * from a non-atomic section, use ftrace_kill.
8426 */
8427void ftrace_kill(void)
8428{
8429	ftrace_disabled = 1;
8430	ftrace_enabled = 0;
8431	ftrace_trace_function = ftrace_stub;
8432	kprobe_ftrace_kill();
8433}
8434
8435/**
8436 * ftrace_is_dead - Test if ftrace is dead or not.
8437 *
8438 * Returns: 1 if ftrace is "dead", zero otherwise.
8439 */
8440int ftrace_is_dead(void)
8441{
8442	return ftrace_disabled;
8443}
8444
8445#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
8446/*
8447 * When registering ftrace_ops with IPMODIFY, it is necessary to make sure
8448 * it doesn't conflict with any direct ftrace_ops. If there is existing
8449 * direct ftrace_ops on a kernel function being patched, call
8450 * FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER on it to enable sharing.
8451 *
8452 * @ops:     ftrace_ops being registered.
8453 *
8454 * Returns:
8455 *         0 on success;
8456 *         Negative on failure.
8457 */
8458static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8459{
8460	struct ftrace_func_entry *entry;
8461	struct ftrace_hash *hash;
8462	struct ftrace_ops *op;
8463	int size, i, ret;
8464
8465	lockdep_assert_held_once(&direct_mutex);
8466
8467	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8468		return 0;
8469
8470	hash = ops->func_hash->filter_hash;
8471	size = 1 << hash->size_bits;
8472	for (i = 0; i < size; i++) {
8473		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8474			unsigned long ip = entry->ip;
8475			bool found_op = false;
8476
8477			mutex_lock(&ftrace_lock);
8478			do_for_each_ftrace_op(op, ftrace_ops_list) {
8479				if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8480					continue;
8481				if (ops_references_ip(op, ip)) {
8482					found_op = true;
8483					break;
8484				}
8485			} while_for_each_ftrace_op(op);
8486			mutex_unlock(&ftrace_lock);
8487
8488			if (found_op) {
8489				if (!op->ops_func)
8490					return -EBUSY;
8491
8492				ret = op->ops_func(op, FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER);
8493				if (ret)
8494					return ret;
8495			}
8496		}
8497	}
8498
8499	return 0;
8500}
8501
8502/*
8503 * Similar to prepare_direct_functions_for_ipmodify, clean up after ops
8504 * with IPMODIFY is unregistered. The cleanup is optional for most DIRECT
8505 * ops.
8506 */
8507static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8508{
8509	struct ftrace_func_entry *entry;
8510	struct ftrace_hash *hash;
8511	struct ftrace_ops *op;
8512	int size, i;
8513
8514	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
8515		return;
8516
8517	mutex_lock(&direct_mutex);
8518
8519	hash = ops->func_hash->filter_hash;
8520	size = 1 << hash->size_bits;
8521	for (i = 0; i < size; i++) {
8522		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
8523			unsigned long ip = entry->ip;
8524			bool found_op = false;
8525
8526			mutex_lock(&ftrace_lock);
8527			do_for_each_ftrace_op(op, ftrace_ops_list) {
8528				if (!(op->flags & FTRACE_OPS_FL_DIRECT))
8529					continue;
8530				if (ops_references_ip(op, ip)) {
8531					found_op = true;
8532					break;
8533				}
8534			} while_for_each_ftrace_op(op);
8535			mutex_unlock(&ftrace_lock);
8536
8537			/* The cleanup is optional, ignore any errors */
8538			if (found_op && op->ops_func)
8539				op->ops_func(op, FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER);
8540		}
8541	}
8542	mutex_unlock(&direct_mutex);
8543}
8544
8545#define lock_direct_mutex()	mutex_lock(&direct_mutex)
8546#define unlock_direct_mutex()	mutex_unlock(&direct_mutex)
8547
8548#else  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8549
8550static int prepare_direct_functions_for_ipmodify(struct ftrace_ops *ops)
8551{
8552	return 0;
8553}
8554
8555static void cleanup_direct_functions_after_ipmodify(struct ftrace_ops *ops)
8556{
8557}
8558
8559#define lock_direct_mutex()	do { } while (0)
8560#define unlock_direct_mutex()	do { } while (0)
8561
8562#endif  /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
8563
8564/*
8565 * Similar to register_ftrace_function, except we don't lock direct_mutex.
8566 */
8567static int register_ftrace_function_nolock(struct ftrace_ops *ops)
8568{
8569	int ret;
8570
8571	ftrace_ops_init(ops);
8572
8573	mutex_lock(&ftrace_lock);
8574
8575	ret = ftrace_startup(ops, 0);
8576
8577	mutex_unlock(&ftrace_lock);
8578
8579	return ret;
8580}
8581
8582/**
8583 * register_ftrace_function - register a function for profiling
8584 * @ops:	ops structure that holds the function for profiling.
8585 *
8586 * Register a function to be called by all functions in the
8587 * kernel.
8588 *
8589 * Note: @ops->func and all the functions it calls must be labeled
8590 *       with "notrace", otherwise it will go into a
8591 *       recursive loop.
8592 */
8593int register_ftrace_function(struct ftrace_ops *ops)
8594{
8595	int ret;
8596
8597	lock_direct_mutex();
8598	ret = prepare_direct_functions_for_ipmodify(ops);
8599	if (ret < 0)
8600		goto out_unlock;
 
8601
8602	ret = register_ftrace_function_nolock(ops);
8603
8604out_unlock:
8605	unlock_direct_mutex();
8606	return ret;
8607}
8608EXPORT_SYMBOL_GPL(register_ftrace_function);
8609
8610/**
8611 * unregister_ftrace_function - unregister a function for profiling.
8612 * @ops:	ops structure that holds the function to unregister
8613 *
8614 * Unregister a function that was added to be called by ftrace profiling.
8615 */
8616int unregister_ftrace_function(struct ftrace_ops *ops)
8617{
8618	int ret;
8619
8620	mutex_lock(&ftrace_lock);
8621	ret = ftrace_shutdown(ops, 0);
8622	mutex_unlock(&ftrace_lock);
8623
8624	cleanup_direct_functions_after_ipmodify(ops);
8625	return ret;
8626}
8627EXPORT_SYMBOL_GPL(unregister_ftrace_function);
8628
8629static int symbols_cmp(const void *a, const void *b)
8630{
8631	const char **str_a = (const char **) a;
8632	const char **str_b = (const char **) b;
8633
8634	return strcmp(*str_a, *str_b);
8635}
8636
8637struct kallsyms_data {
8638	unsigned long *addrs;
8639	const char **syms;
8640	size_t cnt;
8641	size_t found;
8642};
8643
8644/* This function gets called for all kernel and module symbols
8645 * and returns 1 in case we resolved all the requested symbols,
8646 * 0 otherwise.
8647 */
8648static int kallsyms_callback(void *data, const char *name, unsigned long addr)
8649{
8650	struct kallsyms_data *args = data;
8651	const char **sym;
8652	int idx;
8653
8654	sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
8655	if (!sym)
8656		return 0;
8657
8658	idx = sym - args->syms;
8659	if (args->addrs[idx])
8660		return 0;
8661
8662	if (!ftrace_location(addr))
8663		return 0;
8664
8665	args->addrs[idx] = addr;
8666	args->found++;
8667	return args->found == args->cnt ? 1 : 0;
8668}
8669
8670/**
8671 * ftrace_lookup_symbols - Lookup addresses for array of symbols
8672 *
8673 * @sorted_syms: array of symbols pointers symbols to resolve,
8674 * must be alphabetically sorted
8675 * @cnt: number of symbols/addresses in @syms/@addrs arrays
8676 * @addrs: array for storing resulting addresses
8677 *
8678 * This function looks up addresses for array of symbols provided in
8679 * @syms array (must be alphabetically sorted) and stores them in
8680 * @addrs array, which needs to be big enough to store at least @cnt
8681 * addresses.
8682 *
8683 * Returns: 0 if all provided symbols are found, -ESRCH otherwise.
8684 */
8685int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
8686{
8687	struct kallsyms_data args;
8688	int found_all;
8689
8690	memset(addrs, 0, sizeof(*addrs) * cnt);
8691	args.addrs = addrs;
8692	args.syms = sorted_syms;
8693	args.cnt = cnt;
8694	args.found = 0;
8695
8696	found_all = kallsyms_on_each_symbol(kallsyms_callback, &args);
8697	if (found_all)
8698		return 0;
8699	found_all = module_kallsyms_on_each_symbol(NULL, kallsyms_callback, &args);
8700	return found_all ? 0 : -ESRCH;
8701}
8702
8703#ifdef CONFIG_SYSCTL
8704
8705#ifdef CONFIG_DYNAMIC_FTRACE
8706static void ftrace_startup_sysctl(void)
8707{
8708	int command;
8709
8710	if (unlikely(ftrace_disabled))
8711		return;
8712
8713	/* Force update next time */
8714	saved_ftrace_func = NULL;
8715	/* ftrace_start_up is true if we want ftrace running */
8716	if (ftrace_start_up) {
8717		command = FTRACE_UPDATE_CALLS;
8718		if (ftrace_graph_active)
8719			command |= FTRACE_START_FUNC_RET;
8720		ftrace_startup_enable(command);
8721	}
8722}
8723
8724static void ftrace_shutdown_sysctl(void)
8725{
8726	int command;
8727
8728	if (unlikely(ftrace_disabled))
8729		return;
8730
8731	/* ftrace_start_up is true if ftrace is running */
8732	if (ftrace_start_up) {
8733		command = FTRACE_DISABLE_CALLS;
8734		if (ftrace_graph_active)
8735			command |= FTRACE_STOP_FUNC_RET;
8736		ftrace_run_update_code(command);
8737	}
8738}
8739#else
8740# define ftrace_startup_sysctl()       do { } while (0)
8741# define ftrace_shutdown_sysctl()      do { } while (0)
8742#endif /* CONFIG_DYNAMIC_FTRACE */
8743
8744static bool is_permanent_ops_registered(void)
8745{
8746	struct ftrace_ops *op;
8747
8748	do_for_each_ftrace_op(op, ftrace_ops_list) {
8749		if (op->flags & FTRACE_OPS_FL_PERMANENT)
8750			return true;
8751	} while_for_each_ftrace_op(op);
8752
8753	return false;
8754}
8755
8756static int
8757ftrace_enable_sysctl(const struct ctl_table *table, int write,
8758		     void *buffer, size_t *lenp, loff_t *ppos)
8759{
8760	int ret = -ENODEV;
8761
8762	mutex_lock(&ftrace_lock);
8763
8764	if (unlikely(ftrace_disabled))
8765		goto out;
8766
8767	ret = proc_dointvec(table, write, buffer, lenp, ppos);
8768
8769	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
8770		goto out;
8771
 
 
8772	if (ftrace_enabled) {
8773
8774		/* we are starting ftrace again */
8775		if (rcu_dereference_protected(ftrace_ops_list,
8776			lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
8777			update_ftrace_function();
8778
8779		ftrace_startup_sysctl();
8780
8781	} else {
8782		if (is_permanent_ops_registered()) {
8783			ftrace_enabled = true;
8784			ret = -EBUSY;
8785			goto out;
8786		}
8787
8788		/* stopping ftrace calls (just send to ftrace_stub) */
8789		ftrace_trace_function = ftrace_stub;
8790
8791		ftrace_shutdown_sysctl();
8792	}
8793
8794	last_ftrace_enabled = !!ftrace_enabled;
8795 out:
8796	mutex_unlock(&ftrace_lock);
8797	return ret;
8798}
8799
8800static struct ctl_table ftrace_sysctls[] = {
8801	{
8802		.procname       = "ftrace_enabled",
8803		.data           = &ftrace_enabled,
8804		.maxlen         = sizeof(int),
8805		.mode           = 0644,
8806		.proc_handler   = ftrace_enable_sysctl,
8807	},
8808};
8809
8810static int __init ftrace_sysctl_init(void)
8811{
8812	register_sysctl_init("kernel", ftrace_sysctls);
8813	return 0;
8814}
8815late_initcall(ftrace_sysctl_init);
8816#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Infrastructure for profiling code inserted by 'gcc -pg'.
   4 *
   5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
   7 *
   8 * Originally ported from the -rt patch by:
   9 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  10 *
  11 * Based on code in the latency_tracer, that is:
  12 *
  13 *  Copyright (C) 2004-2006 Ingo Molnar
  14 *  Copyright (C) 2004 Nadia Yvette Chambers
  15 */
  16
  17#include <linux/stop_machine.h>
  18#include <linux/clocksource.h>
  19#include <linux/sched/task.h>
  20#include <linux/kallsyms.h>
  21#include <linux/security.h>
  22#include <linux/seq_file.h>
  23#include <linux/tracefs.h>
  24#include <linux/hardirq.h>
  25#include <linux/kthread.h>
  26#include <linux/uaccess.h>
  27#include <linux/bsearch.h>
  28#include <linux/module.h>
  29#include <linux/ftrace.h>
  30#include <linux/sysctl.h>
  31#include <linux/slab.h>
  32#include <linux/ctype.h>
  33#include <linux/sort.h>
  34#include <linux/list.h>
  35#include <linux/hash.h>
  36#include <linux/rcupdate.h>
  37#include <linux/kprobes.h>
  38
  39#include <trace/events/sched.h>
  40
  41#include <asm/sections.h>
  42#include <asm/setup.h>
  43
  44#include "ftrace_internal.h"
  45#include "trace_output.h"
  46#include "trace_stat.h"
  47
 
 
 
 
 
 
  48#define FTRACE_WARN_ON(cond)			\
  49	({					\
  50		int ___r = cond;		\
  51		if (WARN_ON(___r))		\
  52			ftrace_kill();		\
  53		___r;				\
  54	})
  55
  56#define FTRACE_WARN_ON_ONCE(cond)		\
  57	({					\
  58		int ___r = cond;		\
  59		if (WARN_ON_ONCE(___r))		\
  60			ftrace_kill();		\
  61		___r;				\
  62	})
  63
  64/* hash bits for specific function selection */
  65#define FTRACE_HASH_BITS 7
  66#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
  67#define FTRACE_HASH_DEFAULT_BITS 10
  68#define FTRACE_HASH_MAX_BITS 12
  69
  70#ifdef CONFIG_DYNAMIC_FTRACE
  71#define INIT_OPS_HASH(opsname)	\
  72	.func_hash		= &opsname.local_hash,			\
  73	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
 
  74#else
  75#define INIT_OPS_HASH(opsname)
  76#endif
  77
  78enum {
  79	FTRACE_MODIFY_ENABLE_FL		= (1 << 0),
  80	FTRACE_MODIFY_MAY_SLEEP_FL	= (1 << 1),
  81};
  82
  83struct ftrace_ops ftrace_list_end __read_mostly = {
  84	.func		= ftrace_stub,
  85	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
  86	INIT_OPS_HASH(ftrace_list_end)
  87};
  88
  89/* ftrace_enabled is a method to turn ftrace on or off */
  90int ftrace_enabled __read_mostly;
  91static int last_ftrace_enabled;
  92
  93/* Current function tracing op */
  94struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
  95/* What to set function_trace_op to */
  96static struct ftrace_ops *set_function_trace_op;
  97
  98static bool ftrace_pids_enabled(struct ftrace_ops *ops)
  99{
 100	struct trace_array *tr;
 101
 102	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
 103		return false;
 104
 105	tr = ops->private;
 106
 107	return tr->function_pids != NULL;
 108}
 109
 110static void ftrace_update_trampoline(struct ftrace_ops *ops);
 111
 112/*
 113 * ftrace_disabled is set when an anomaly is discovered.
 114 * ftrace_disabled is much stronger than ftrace_enabled.
 115 */
 116static int ftrace_disabled __read_mostly;
 117
 118DEFINE_MUTEX(ftrace_lock);
 119
 120struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
 121ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 122struct ftrace_ops global_ops;
 123
 124#if ARCH_SUPPORTS_FTRACE_OPS
 125static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 126				 struct ftrace_ops *op, struct pt_regs *regs);
 127#else
 128/* See comment below, where ftrace_ops_list_func is defined */
 129static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 130#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 131#endif
 132
 133static inline void ftrace_ops_init(struct ftrace_ops *ops)
 134{
 135#ifdef CONFIG_DYNAMIC_FTRACE
 136	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
 137		mutex_init(&ops->local_hash.regex_lock);
 
 138		ops->func_hash = &ops->local_hash;
 139		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
 140	}
 141#endif
 142}
 143
 
 144static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
 145			    struct ftrace_ops *op, struct pt_regs *regs)
 146{
 147	struct trace_array *tr = op->private;
 
 148
 149	if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
 150		return;
 
 
 
 
 
 
 151
 152	op->saved_func(ip, parent_ip, op, regs);
 153}
 154
 155static void ftrace_sync(struct work_struct *work)
 156{
 157	/*
 158	 * This function is just a stub to implement a hard force
 159	 * of synchronize_rcu(). This requires synchronizing
 160	 * tasks even in userspace and idle.
 161	 *
 162	 * Yes, function tracing is rude.
 163	 */
 164}
 165
 166static void ftrace_sync_ipi(void *data)
 167{
 168	/* Probably not needed, but do it anyway */
 169	smp_rmb();
 170}
 171
 172static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
 173{
 174	/*
 175	 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
 176	 * then it needs to call the list anyway.
 177	 */
 178	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
 179	    FTRACE_FORCE_LIST_FUNC)
 180		return ftrace_ops_list_func;
 181
 182	return ftrace_ops_get_func(ops);
 183}
 184
 185static void update_ftrace_function(void)
 186{
 187	ftrace_func_t func;
 188
 189	/*
 190	 * Prepare the ftrace_ops that the arch callback will use.
 191	 * If there's only one ftrace_ops registered, the ftrace_ops_list
 192	 * will point to the ops we want.
 193	 */
 194	set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
 195						lockdep_is_held(&ftrace_lock));
 196
 197	/* If there's no ftrace_ops registered, just call the stub function */
 198	if (set_function_trace_op == &ftrace_list_end) {
 199		func = ftrace_stub;
 200
 201	/*
 202	 * If we are at the end of the list and this ops is
 203	 * recursion safe and not dynamic and the arch supports passing ops,
 204	 * then have the mcount trampoline call the function directly.
 205	 */
 206	} else if (rcu_dereference_protected(ftrace_ops_list->next,
 207			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 208		func = ftrace_ops_get_list_func(ftrace_ops_list);
 209
 210	} else {
 211		/* Just use the default ftrace_ops */
 212		set_function_trace_op = &ftrace_list_end;
 213		func = ftrace_ops_list_func;
 214	}
 215
 216	update_function_graph_func();
 217
 218	/* If there's no change, then do nothing more here */
 219	if (ftrace_trace_function == func)
 220		return;
 221
 222	/*
 223	 * If we are using the list function, it doesn't care
 224	 * about the function_trace_ops.
 225	 */
 226	if (func == ftrace_ops_list_func) {
 227		ftrace_trace_function = func;
 228		/*
 229		 * Don't even bother setting function_trace_ops,
 230		 * it would be racy to do so anyway.
 231		 */
 232		return;
 233	}
 234
 235#ifndef CONFIG_DYNAMIC_FTRACE
 236	/*
 237	 * For static tracing, we need to be a bit more careful.
 238	 * The function change takes affect immediately. Thus,
 239	 * we need to coorditate the setting of the function_trace_ops
 240	 * with the setting of the ftrace_trace_function.
 241	 *
 242	 * Set the function to the list ops, which will call the
 243	 * function we want, albeit indirectly, but it handles the
 244	 * ftrace_ops and doesn't depend on function_trace_op.
 245	 */
 246	ftrace_trace_function = ftrace_ops_list_func;
 247	/*
 248	 * Make sure all CPUs see this. Yes this is slow, but static
 249	 * tracing is slow and nasty to have enabled.
 250	 */
 251	schedule_on_each_cpu(ftrace_sync);
 252	/* Now all cpus are using the list ops. */
 253	function_trace_op = set_function_trace_op;
 254	/* Make sure the function_trace_op is visible on all CPUs */
 255	smp_wmb();
 256	/* Nasty way to force a rmb on all cpus */
 257	smp_call_function(ftrace_sync_ipi, NULL, 1);
 258	/* OK, we are all set to update the ftrace_trace_function now! */
 259#endif /* !CONFIG_DYNAMIC_FTRACE */
 260
 261	ftrace_trace_function = func;
 262}
 263
 264static void add_ftrace_ops(struct ftrace_ops __rcu **list,
 265			   struct ftrace_ops *ops)
 266{
 267	rcu_assign_pointer(ops->next, *list);
 268
 269	/*
 270	 * We are entering ops into the list but another
 271	 * CPU might be walking that list. We need to make sure
 272	 * the ops->next pointer is valid before another CPU sees
 273	 * the ops pointer included into the list.
 274	 */
 275	rcu_assign_pointer(*list, ops);
 276}
 277
 278static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
 279			     struct ftrace_ops *ops)
 280{
 281	struct ftrace_ops **p;
 282
 283	/*
 284	 * If we are removing the last function, then simply point
 285	 * to the ftrace_stub.
 286	 */
 287	if (rcu_dereference_protected(*list,
 288			lockdep_is_held(&ftrace_lock)) == ops &&
 289	    rcu_dereference_protected(ops->next,
 290			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 291		*list = &ftrace_list_end;
 292		return 0;
 293	}
 294
 295	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
 296		if (*p == ops)
 297			break;
 298
 299	if (*p != ops)
 300		return -1;
 301
 302	*p = (*p)->next;
 303	return 0;
 304}
 305
 306static void ftrace_update_trampoline(struct ftrace_ops *ops);
 307
 308int __register_ftrace_function(struct ftrace_ops *ops)
 309{
 310	if (ops->flags & FTRACE_OPS_FL_DELETED)
 311		return -EINVAL;
 312
 313	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
 314		return -EBUSY;
 315
 316#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 317	/*
 318	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
 319	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
 320	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
 321	 */
 322	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
 323	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
 324		return -EINVAL;
 325
 326	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
 327		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
 328#endif
 
 
 329
 330	if (!core_kernel_data((unsigned long)ops))
 331		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 332
 333	add_ftrace_ops(&ftrace_ops_list, ops);
 334
 335	/* Always save the function, and reset at unregistering */
 336	ops->saved_func = ops->func;
 337
 338	if (ftrace_pids_enabled(ops))
 339		ops->func = ftrace_pid_func;
 340
 341	ftrace_update_trampoline(ops);
 342
 343	if (ftrace_enabled)
 344		update_ftrace_function();
 345
 346	return 0;
 347}
 348
 349int __unregister_ftrace_function(struct ftrace_ops *ops)
 350{
 351	int ret;
 352
 353	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
 354		return -EBUSY;
 355
 356	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 357
 358	if (ret < 0)
 359		return ret;
 360
 361	if (ftrace_enabled)
 362		update_ftrace_function();
 363
 364	ops->func = ops->saved_func;
 365
 366	return 0;
 367}
 368
 369static void ftrace_update_pid_func(void)
 370{
 371	struct ftrace_ops *op;
 372
 373	/* Only do something if we are tracing something */
 374	if (ftrace_trace_function == ftrace_stub)
 375		return;
 376
 377	do_for_each_ftrace_op(op, ftrace_ops_list) {
 378		if (op->flags & FTRACE_OPS_FL_PID) {
 379			op->func = ftrace_pids_enabled(op) ?
 380				ftrace_pid_func : op->saved_func;
 381			ftrace_update_trampoline(op);
 382		}
 383	} while_for_each_ftrace_op(op);
 384
 
 
 385	update_ftrace_function();
 386}
 387
 388#ifdef CONFIG_FUNCTION_PROFILER
 389struct ftrace_profile {
 390	struct hlist_node		node;
 391	unsigned long			ip;
 392	unsigned long			counter;
 393#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 394	unsigned long long		time;
 395	unsigned long long		time_squared;
 396#endif
 397};
 398
 399struct ftrace_profile_page {
 400	struct ftrace_profile_page	*next;
 401	unsigned long			index;
 402	struct ftrace_profile		records[];
 403};
 404
 405struct ftrace_profile_stat {
 406	atomic_t			disabled;
 407	struct hlist_head		*hash;
 408	struct ftrace_profile_page	*pages;
 409	struct ftrace_profile_page	*start;
 410	struct tracer_stat		stat;
 411};
 412
 413#define PROFILE_RECORDS_SIZE						\
 414	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 415
 416#define PROFILES_PER_PAGE					\
 417	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 418
 419static int ftrace_profile_enabled __read_mostly;
 420
 421/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 422static DEFINE_MUTEX(ftrace_profile_lock);
 423
 424static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 425
 426#define FTRACE_PROFILE_HASH_BITS 10
 427#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
 428
 429static void *
 430function_stat_next(void *v, int idx)
 431{
 432	struct ftrace_profile *rec = v;
 433	struct ftrace_profile_page *pg;
 434
 435	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 436
 437 again:
 438	if (idx != 0)
 439		rec++;
 440
 441	if ((void *)rec >= (void *)&pg->records[pg->index]) {
 442		pg = pg->next;
 443		if (!pg)
 444			return NULL;
 445		rec = &pg->records[0];
 446		if (!rec->counter)
 447			goto again;
 448	}
 449
 450	return rec;
 451}
 452
 453static void *function_stat_start(struct tracer_stat *trace)
 454{
 455	struct ftrace_profile_stat *stat =
 456		container_of(trace, struct ftrace_profile_stat, stat);
 457
 458	if (!stat || !stat->start)
 459		return NULL;
 460
 461	return function_stat_next(&stat->start->records[0], 0);
 462}
 463
 464#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 465/* function graph compares on total time */
 466static int function_stat_cmp(void *p1, void *p2)
 467{
 468	struct ftrace_profile *a = p1;
 469	struct ftrace_profile *b = p2;
 470
 471	if (a->time < b->time)
 472		return -1;
 473	if (a->time > b->time)
 474		return 1;
 475	else
 476		return 0;
 477}
 478#else
 479/* not function graph compares against hits */
 480static int function_stat_cmp(void *p1, void *p2)
 481{
 482	struct ftrace_profile *a = p1;
 483	struct ftrace_profile *b = p2;
 484
 485	if (a->counter < b->counter)
 486		return -1;
 487	if (a->counter > b->counter)
 488		return 1;
 489	else
 490		return 0;
 491}
 492#endif
 493
 494static int function_stat_headers(struct seq_file *m)
 495{
 496#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 497	seq_puts(m, "  Function                               "
 498		 "Hit    Time            Avg             s^2\n"
 499		    "  --------                               "
 500		 "---    ----            ---             ---\n");
 501#else
 502	seq_puts(m, "  Function                               Hit\n"
 503		    "  --------                               ---\n");
 504#endif
 505	return 0;
 506}
 507
 508static int function_stat_show(struct seq_file *m, void *v)
 509{
 510	struct ftrace_profile *rec = v;
 511	char str[KSYM_SYMBOL_LEN];
 512	int ret = 0;
 513#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 514	static struct trace_seq s;
 515	unsigned long long avg;
 516	unsigned long long stddev;
 
 517#endif
 518	mutex_lock(&ftrace_profile_lock);
 519
 520	/* we raced with function_profile_reset() */
 521	if (unlikely(rec->counter == 0)) {
 522		ret = -EBUSY;
 523		goto out;
 524	}
 525
 526#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 527	avg = rec->time;
 528	do_div(avg, rec->counter);
 529	if (tracing_thresh && (avg < tracing_thresh))
 530		goto out;
 531#endif
 532
 533	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 534	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 535
 536#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 537	seq_puts(m, "    ");
 538
 539	/* Sample standard deviation (s^2) */
 540	if (rec->counter <= 1)
 541		stddev = 0;
 542	else {
 543		/*
 544		 * Apply Welford's method:
 545		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
 546		 */
 
 
 547		stddev = rec->counter * rec->time_squared -
 548			 rec->time * rec->time;
 549
 550		/*
 551		 * Divide only 1000 for ns^2 -> us^2 conversion.
 552		 * trace_print_graph_duration will divide 1000 again.
 553		 */
 554		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
 555	}
 556
 557	trace_seq_init(&s);
 558	trace_print_graph_duration(rec->time, &s);
 559	trace_seq_puts(&s, "    ");
 560	trace_print_graph_duration(avg, &s);
 561	trace_seq_puts(&s, "    ");
 562	trace_print_graph_duration(stddev, &s);
 563	trace_print_seq(m, &s);
 564#endif
 565	seq_putc(m, '\n');
 566out:
 567	mutex_unlock(&ftrace_profile_lock);
 568
 569	return ret;
 570}
 571
 572static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 573{
 574	struct ftrace_profile_page *pg;
 575
 576	pg = stat->pages = stat->start;
 577
 578	while (pg) {
 579		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
 580		pg->index = 0;
 581		pg = pg->next;
 582	}
 583
 584	memset(stat->hash, 0,
 585	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 586}
 587
 588int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 589{
 590	struct ftrace_profile_page *pg;
 591	int functions;
 592	int pages;
 593	int i;
 594
 595	/* If we already allocated, do nothing */
 596	if (stat->pages)
 597		return 0;
 598
 599	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
 600	if (!stat->pages)
 601		return -ENOMEM;
 602
 603#ifdef CONFIG_DYNAMIC_FTRACE
 604	functions = ftrace_update_tot_cnt;
 605#else
 606	/*
 607	 * We do not know the number of functions that exist because
 608	 * dynamic tracing is what counts them. With past experience
 609	 * we have around 20K functions. That should be more than enough.
 610	 * It is highly unlikely we will execute every function in
 611	 * the kernel.
 612	 */
 613	functions = 20000;
 614#endif
 615
 616	pg = stat->start = stat->pages;
 617
 618	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 619
 620	for (i = 1; i < pages; i++) {
 621		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 622		if (!pg->next)
 623			goto out_free;
 624		pg = pg->next;
 625	}
 626
 627	return 0;
 628
 629 out_free:
 630	pg = stat->start;
 631	while (pg) {
 632		unsigned long tmp = (unsigned long)pg;
 633
 634		pg = pg->next;
 635		free_page(tmp);
 636	}
 637
 638	stat->pages = NULL;
 639	stat->start = NULL;
 640
 641	return -ENOMEM;
 642}
 643
 644static int ftrace_profile_init_cpu(int cpu)
 645{
 646	struct ftrace_profile_stat *stat;
 647	int size;
 648
 649	stat = &per_cpu(ftrace_profile_stats, cpu);
 650
 651	if (stat->hash) {
 652		/* If the profile is already created, simply reset it */
 653		ftrace_profile_reset(stat);
 654		return 0;
 655	}
 656
 657	/*
 658	 * We are profiling all functions, but usually only a few thousand
 659	 * functions are hit. We'll make a hash of 1024 items.
 660	 */
 661	size = FTRACE_PROFILE_HASH_SIZE;
 662
 663	stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
 664
 665	if (!stat->hash)
 666		return -ENOMEM;
 667
 668	/* Preallocate the function profiling pages */
 669	if (ftrace_profile_pages_init(stat) < 0) {
 670		kfree(stat->hash);
 671		stat->hash = NULL;
 672		return -ENOMEM;
 673	}
 674
 675	return 0;
 676}
 677
 678static int ftrace_profile_init(void)
 679{
 680	int cpu;
 681	int ret = 0;
 682
 683	for_each_possible_cpu(cpu) {
 684		ret = ftrace_profile_init_cpu(cpu);
 685		if (ret)
 686			break;
 687	}
 688
 689	return ret;
 690}
 691
 692/* interrupts must be disabled */
 693static struct ftrace_profile *
 694ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 695{
 696	struct ftrace_profile *rec;
 697	struct hlist_head *hhd;
 698	unsigned long key;
 699
 700	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
 701	hhd = &stat->hash[key];
 702
 703	if (hlist_empty(hhd))
 704		return NULL;
 705
 706	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
 707		if (rec->ip == ip)
 708			return rec;
 709	}
 710
 711	return NULL;
 712}
 713
 714static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 715			       struct ftrace_profile *rec)
 716{
 717	unsigned long key;
 718
 719	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
 720	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 721}
 722
 723/*
 724 * The memory is already allocated, this simply finds a new record to use.
 725 */
 726static struct ftrace_profile *
 727ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 728{
 729	struct ftrace_profile *rec = NULL;
 730
 731	/* prevent recursion (from NMIs) */
 732	if (atomic_inc_return(&stat->disabled) != 1)
 733		goto out;
 734
 735	/*
 736	 * Try to find the function again since an NMI
 737	 * could have added it
 738	 */
 739	rec = ftrace_find_profiled_func(stat, ip);
 740	if (rec)
 741		goto out;
 742
 743	if (stat->pages->index == PROFILES_PER_PAGE) {
 744		if (!stat->pages->next)
 745			goto out;
 746		stat->pages = stat->pages->next;
 747	}
 748
 749	rec = &stat->pages->records[stat->pages->index++];
 750	rec->ip = ip;
 751	ftrace_add_profile(stat, rec);
 752
 753 out:
 754	atomic_dec(&stat->disabled);
 755
 756	return rec;
 757}
 758
 759static void
 760function_profile_call(unsigned long ip, unsigned long parent_ip,
 761		      struct ftrace_ops *ops, struct pt_regs *regs)
 762{
 763	struct ftrace_profile_stat *stat;
 764	struct ftrace_profile *rec;
 765	unsigned long flags;
 766
 767	if (!ftrace_profile_enabled)
 768		return;
 769
 770	local_irq_save(flags);
 771
 772	stat = this_cpu_ptr(&ftrace_profile_stats);
 773	if (!stat->hash || !ftrace_profile_enabled)
 774		goto out;
 775
 776	rec = ftrace_find_profiled_func(stat, ip);
 777	if (!rec) {
 778		rec = ftrace_profile_alloc(stat, ip);
 779		if (!rec)
 780			goto out;
 781	}
 782
 783	rec->counter++;
 784 out:
 785	local_irq_restore(flags);
 786}
 787
 788#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 789static bool fgraph_graph_time = true;
 790
 791void ftrace_graph_graph_time_control(bool enable)
 792{
 793	fgraph_graph_time = enable;
 794}
 795
 796static int profile_graph_entry(struct ftrace_graph_ent *trace)
 
 
 
 
 
 
 
 797{
 798	struct ftrace_ret_stack *ret_stack;
 799
 800	function_profile_call(trace->func, 0, NULL, NULL);
 801
 802	/* If function graph is shutting down, ret_stack can be NULL */
 803	if (!current->ret_stack)
 804		return 0;
 805
 806	ret_stack = ftrace_graph_get_ret_stack(current, 0);
 807	if (ret_stack)
 808		ret_stack->subtime = 0;
 
 
 
 
 809
 810	return 1;
 811}
 812
 813static void profile_graph_return(struct ftrace_graph_ret *trace)
 
 814{
 815	struct ftrace_ret_stack *ret_stack;
 816	struct ftrace_profile_stat *stat;
 817	unsigned long long calltime;
 
 818	struct ftrace_profile *rec;
 819	unsigned long flags;
 
 820
 821	local_irq_save(flags);
 822	stat = this_cpu_ptr(&ftrace_profile_stats);
 823	if (!stat->hash || !ftrace_profile_enabled)
 824		goto out;
 825
 
 
 826	/* If the calltime was zero'd ignore it */
 827	if (!trace->calltime)
 828		goto out;
 829
 830	calltime = trace->rettime - trace->calltime;
 
 
 
 
 
 831
 832	if (!fgraph_graph_time) {
 
 833
 834		/* Append this call time to the parent time to subtract */
 835		ret_stack = ftrace_graph_get_ret_stack(current, 1);
 836		if (ret_stack)
 837			ret_stack->subtime += calltime;
 838
 839		ret_stack = ftrace_graph_get_ret_stack(current, 0);
 840		if (ret_stack && ret_stack->subtime < calltime)
 841			calltime -= ret_stack->subtime;
 842		else
 843			calltime = 0;
 844	}
 845
 846	rec = ftrace_find_profiled_func(stat, trace->func);
 847	if (rec) {
 848		rec->time += calltime;
 849		rec->time_squared += calltime * calltime;
 850	}
 851
 852 out:
 853	local_irq_restore(flags);
 854}
 855
 856static struct fgraph_ops fprofiler_ops = {
 857	.entryfunc = &profile_graph_entry,
 858	.retfunc = &profile_graph_return,
 859};
 860
 861static int register_ftrace_profiler(void)
 862{
 
 863	return register_ftrace_graph(&fprofiler_ops);
 864}
 865
 866static void unregister_ftrace_profiler(void)
 867{
 868	unregister_ftrace_graph(&fprofiler_ops);
 869}
 870#else
 871static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 872	.func		= function_profile_call,
 873	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
 874	INIT_OPS_HASH(ftrace_profile_ops)
 875};
 876
 877static int register_ftrace_profiler(void)
 878{
 
 879	return register_ftrace_function(&ftrace_profile_ops);
 880}
 881
 882static void unregister_ftrace_profiler(void)
 883{
 884	unregister_ftrace_function(&ftrace_profile_ops);
 885}
 886#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 887
 888static ssize_t
 889ftrace_profile_write(struct file *filp, const char __user *ubuf,
 890		     size_t cnt, loff_t *ppos)
 891{
 892	unsigned long val;
 893	int ret;
 894
 895	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 896	if (ret)
 897		return ret;
 898
 899	val = !!val;
 900
 901	mutex_lock(&ftrace_profile_lock);
 902	if (ftrace_profile_enabled ^ val) {
 903		if (val) {
 904			ret = ftrace_profile_init();
 905			if (ret < 0) {
 906				cnt = ret;
 907				goto out;
 908			}
 909
 910			ret = register_ftrace_profiler();
 911			if (ret < 0) {
 912				cnt = ret;
 913				goto out;
 914			}
 915			ftrace_profile_enabled = 1;
 916		} else {
 917			ftrace_profile_enabled = 0;
 918			/*
 919			 * unregister_ftrace_profiler calls stop_machine
 920			 * so this acts like an synchronize_rcu.
 921			 */
 922			unregister_ftrace_profiler();
 923		}
 924	}
 925 out:
 926	mutex_unlock(&ftrace_profile_lock);
 927
 928	*ppos += cnt;
 929
 930	return cnt;
 931}
 932
 933static ssize_t
 934ftrace_profile_read(struct file *filp, char __user *ubuf,
 935		     size_t cnt, loff_t *ppos)
 936{
 937	char buf[64];		/* big enough to hold a number */
 938	int r;
 939
 940	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
 941	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 942}
 943
 944static const struct file_operations ftrace_profile_fops = {
 945	.open		= tracing_open_generic,
 946	.read		= ftrace_profile_read,
 947	.write		= ftrace_profile_write,
 948	.llseek		= default_llseek,
 949};
 950
 951/* used to initialize the real stat files */
 952static struct tracer_stat function_stats __initdata = {
 953	.name		= "functions",
 954	.stat_start	= function_stat_start,
 955	.stat_next	= function_stat_next,
 956	.stat_cmp	= function_stat_cmp,
 957	.stat_headers	= function_stat_headers,
 958	.stat_show	= function_stat_show
 959};
 960
 961static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
 962{
 963	struct ftrace_profile_stat *stat;
 964	struct dentry *entry;
 965	char *name;
 966	int ret;
 967	int cpu;
 968
 969	for_each_possible_cpu(cpu) {
 970		stat = &per_cpu(ftrace_profile_stats, cpu);
 971
 972		name = kasprintf(GFP_KERNEL, "function%d", cpu);
 973		if (!name) {
 974			/*
 975			 * The files created are permanent, if something happens
 976			 * we still do not free memory.
 977			 */
 978			WARN(1,
 979			     "Could not allocate stat file for cpu %d\n",
 980			     cpu);
 981			return;
 982		}
 983		stat->stat = function_stats;
 984		stat->stat.name = name;
 985		ret = register_stat_tracer(&stat->stat);
 986		if (ret) {
 987			WARN(1,
 988			     "Could not register function stat for cpu %d\n",
 989			     cpu);
 990			kfree(name);
 991			return;
 992		}
 993	}
 994
 995	entry = tracefs_create_file("function_profile_enabled", 0644,
 996				    d_tracer, NULL, &ftrace_profile_fops);
 997	if (!entry)
 998		pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
 999}
1000
1001#else /* CONFIG_FUNCTION_PROFILER */
1002static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1003{
1004}
1005#endif /* CONFIG_FUNCTION_PROFILER */
1006
1007#ifdef CONFIG_DYNAMIC_FTRACE
1008
1009static struct ftrace_ops *removed_ops;
1010
1011/*
1012 * Set when doing a global update, like enabling all recs or disabling them.
1013 * It is not set when just updating a single ftrace_ops.
1014 */
1015static bool update_all_ops;
1016
1017#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1018# error Dynamic ftrace depends on MCOUNT_RECORD
1019#endif
1020
1021struct ftrace_func_entry {
1022	struct hlist_node hlist;
1023	unsigned long ip;
1024};
1025
1026struct ftrace_func_probe {
1027	struct ftrace_probe_ops	*probe_ops;
1028	struct ftrace_ops	ops;
1029	struct trace_array	*tr;
1030	struct list_head	list;
1031	void			*data;
1032	int			ref;
1033};
1034
1035/*
1036 * We make these constant because no one should touch them,
1037 * but they are used as the default "empty hash", to avoid allocating
1038 * it all the time. These are in a read only section such that if
1039 * anyone does try to modify it, it will cause an exception.
1040 */
1041static const struct hlist_head empty_buckets[1];
1042static const struct ftrace_hash empty_hash = {
1043	.buckets = (struct hlist_head *)empty_buckets,
1044};
1045#define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1046
1047struct ftrace_ops global_ops = {
1048	.func				= ftrace_stub,
1049	.local_hash.notrace_hash	= EMPTY_HASH,
1050	.local_hash.filter_hash		= EMPTY_HASH,
1051	INIT_OPS_HASH(global_ops)
1052	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
1053					  FTRACE_OPS_FL_INITIALIZED |
1054					  FTRACE_OPS_FL_PID,
1055};
1056
1057/*
1058 * Used by the stack undwinder to know about dynamic ftrace trampolines.
1059 */
1060struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1061{
1062	struct ftrace_ops *op = NULL;
1063
1064	/*
1065	 * Some of the ops may be dynamically allocated,
1066	 * they are freed after a synchronize_rcu().
1067	 */
1068	preempt_disable_notrace();
1069
1070	do_for_each_ftrace_op(op, ftrace_ops_list) {
1071		/*
1072		 * This is to check for dynamically allocated trampolines.
1073		 * Trampolines that are in kernel text will have
1074		 * core_kernel_text() return true.
1075		 */
1076		if (op->trampoline && op->trampoline_size)
1077			if (addr >= op->trampoline &&
1078			    addr < op->trampoline + op->trampoline_size) {
1079				preempt_enable_notrace();
1080				return op;
1081			}
1082	} while_for_each_ftrace_op(op);
1083	preempt_enable_notrace();
1084
1085	return NULL;
1086}
1087
1088/*
1089 * This is used by __kernel_text_address() to return true if the
1090 * address is on a dynamically allocated trampoline that would
1091 * not return true for either core_kernel_text() or
1092 * is_module_text_address().
1093 */
1094bool is_ftrace_trampoline(unsigned long addr)
1095{
1096	return ftrace_ops_trampoline(addr) != NULL;
1097}
1098
1099struct ftrace_page {
1100	struct ftrace_page	*next;
1101	struct dyn_ftrace	*records;
1102	int			index;
1103	int			size;
1104};
1105
1106#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1107#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1108
1109/* estimate from running different kernels */
1110#define NR_TO_INIT		10000
1111
1112static struct ftrace_page	*ftrace_pages_start;
1113static struct ftrace_page	*ftrace_pages;
1114
1115static __always_inline unsigned long
1116ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1117{
1118	if (hash->size_bits > 0)
1119		return hash_long(ip, hash->size_bits);
1120
1121	return 0;
1122}
1123
1124/* Only use this function if ftrace_hash_empty() has already been tested */
1125static __always_inline struct ftrace_func_entry *
1126__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1127{
1128	unsigned long key;
1129	struct ftrace_func_entry *entry;
1130	struct hlist_head *hhd;
1131
1132	key = ftrace_hash_key(hash, ip);
1133	hhd = &hash->buckets[key];
1134
1135	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1136		if (entry->ip == ip)
1137			return entry;
1138	}
1139	return NULL;
1140}
1141
1142/**
1143 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1144 * @hash: The hash to look at
1145 * @ip: The instruction pointer to test
1146 *
1147 * Search a given @hash to see if a given instruction pointer (@ip)
1148 * exists in it.
1149 *
1150 * Returns the entry that holds the @ip if found. NULL otherwise.
1151 */
1152struct ftrace_func_entry *
1153ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1154{
1155	if (ftrace_hash_empty(hash))
1156		return NULL;
1157
1158	return __ftrace_lookup_ip(hash, ip);
1159}
1160
1161static void __add_hash_entry(struct ftrace_hash *hash,
1162			     struct ftrace_func_entry *entry)
1163{
1164	struct hlist_head *hhd;
1165	unsigned long key;
1166
1167	key = ftrace_hash_key(hash, entry->ip);
1168	hhd = &hash->buckets[key];
1169	hlist_add_head(&entry->hlist, hhd);
1170	hash->count++;
1171}
1172
1173static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
 
1174{
1175	struct ftrace_func_entry *entry;
1176
1177	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1178	if (!entry)
1179		return -ENOMEM;
1180
1181	entry->ip = ip;
1182	__add_hash_entry(hash, entry);
1183
1184	return 0;
1185}
1186
1187static void
1188free_hash_entry(struct ftrace_hash *hash,
1189		  struct ftrace_func_entry *entry)
1190{
1191	hlist_del(&entry->hlist);
1192	kfree(entry);
1193	hash->count--;
1194}
1195
1196static void
1197remove_hash_entry(struct ftrace_hash *hash,
1198		  struct ftrace_func_entry *entry)
1199{
1200	hlist_del_rcu(&entry->hlist);
1201	hash->count--;
1202}
1203
1204static void ftrace_hash_clear(struct ftrace_hash *hash)
1205{
1206	struct hlist_head *hhd;
1207	struct hlist_node *tn;
1208	struct ftrace_func_entry *entry;
1209	int size = 1 << hash->size_bits;
1210	int i;
1211
1212	if (!hash->count)
1213		return;
1214
1215	for (i = 0; i < size; i++) {
1216		hhd = &hash->buckets[i];
1217		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1218			free_hash_entry(hash, entry);
1219	}
1220	FTRACE_WARN_ON(hash->count);
1221}
1222
1223static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1224{
1225	list_del(&ftrace_mod->list);
1226	kfree(ftrace_mod->module);
1227	kfree(ftrace_mod->func);
1228	kfree(ftrace_mod);
1229}
1230
1231static void clear_ftrace_mod_list(struct list_head *head)
1232{
1233	struct ftrace_mod_load *p, *n;
1234
1235	/* stack tracer isn't supported yet */
1236	if (!head)
1237		return;
1238
1239	mutex_lock(&ftrace_lock);
1240	list_for_each_entry_safe(p, n, head, list)
1241		free_ftrace_mod(p);
1242	mutex_unlock(&ftrace_lock);
1243}
1244
1245static void free_ftrace_hash(struct ftrace_hash *hash)
1246{
1247	if (!hash || hash == EMPTY_HASH)
1248		return;
1249	ftrace_hash_clear(hash);
1250	kfree(hash->buckets);
1251	kfree(hash);
1252}
1253
1254static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1255{
1256	struct ftrace_hash *hash;
1257
1258	hash = container_of(rcu, struct ftrace_hash, rcu);
1259	free_ftrace_hash(hash);
1260}
1261
1262static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1263{
1264	if (!hash || hash == EMPTY_HASH)
1265		return;
1266	call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1267}
1268
 
 
 
 
1269void ftrace_free_filter(struct ftrace_ops *ops)
1270{
1271	ftrace_ops_init(ops);
1272	free_ftrace_hash(ops->func_hash->filter_hash);
1273	free_ftrace_hash(ops->func_hash->notrace_hash);
1274}
 
1275
1276static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1277{
1278	struct ftrace_hash *hash;
1279	int size;
1280
1281	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1282	if (!hash)
1283		return NULL;
1284
1285	size = 1 << size_bits;
1286	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1287
1288	if (!hash->buckets) {
1289		kfree(hash);
1290		return NULL;
1291	}
1292
1293	hash->size_bits = size_bits;
1294
1295	return hash;
1296}
1297
1298
1299static int ftrace_add_mod(struct trace_array *tr,
1300			  const char *func, const char *module,
1301			  int enable)
1302{
1303	struct ftrace_mod_load *ftrace_mod;
1304	struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1305
1306	ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1307	if (!ftrace_mod)
1308		return -ENOMEM;
1309
 
1310	ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1311	ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1312	ftrace_mod->enable = enable;
1313
1314	if (!ftrace_mod->func || !ftrace_mod->module)
1315		goto out_free;
1316
1317	list_add(&ftrace_mod->list, mod_head);
1318
1319	return 0;
1320
1321 out_free:
1322	free_ftrace_mod(ftrace_mod);
1323
1324	return -ENOMEM;
1325}
1326
1327static struct ftrace_hash *
1328alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1329{
1330	struct ftrace_func_entry *entry;
1331	struct ftrace_hash *new_hash;
1332	int size;
1333	int ret;
1334	int i;
1335
1336	new_hash = alloc_ftrace_hash(size_bits);
1337	if (!new_hash)
1338		return NULL;
1339
1340	if (hash)
1341		new_hash->flags = hash->flags;
1342
1343	/* Empty hash? */
1344	if (ftrace_hash_empty(hash))
1345		return new_hash;
1346
1347	size = 1 << hash->size_bits;
1348	for (i = 0; i < size; i++) {
1349		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1350			ret = add_hash_entry(new_hash, entry->ip);
1351			if (ret < 0)
1352				goto free_hash;
1353		}
1354	}
1355
1356	FTRACE_WARN_ON(new_hash->count != hash->count);
1357
1358	return new_hash;
1359
1360 free_hash:
1361	free_ftrace_hash(new_hash);
1362	return NULL;
1363}
1364
1365static void
1366ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1367static void
1368ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1369
1370static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1371				       struct ftrace_hash *new_hash);
1372
1373static struct ftrace_hash *
1374__ftrace_hash_move(struct ftrace_hash *src)
 
 
 
1375{
1376	struct ftrace_func_entry *entry;
 
 
1377	struct hlist_node *tn;
1378	struct hlist_head *hhd;
1379	struct ftrace_hash *new_hash;
1380	int size = src->count;
1381	int bits = 0;
1382	int i;
1383
1384	/*
1385	 * If the new source is empty, just return the empty_hash.
 
1386	 */
1387	if (ftrace_hash_empty(src))
1388		return EMPTY_HASH;
1389
1390	/*
1391	 * Make the hash size about 1/2 the # found
1392	 */
1393	for (size /= 2; size; size >>= 1)
1394		bits++;
1395
1396	/* Don't allocate too much */
1397	if (bits > FTRACE_HASH_MAX_BITS)
1398		bits = FTRACE_HASH_MAX_BITS;
1399
1400	new_hash = alloc_ftrace_hash(bits);
1401	if (!new_hash)
1402		return NULL;
1403
1404	new_hash->flags = src->flags;
1405
1406	size = 1 << src->size_bits;
1407	for (i = 0; i < size; i++) {
1408		hhd = &src->buckets[i];
1409		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1410			remove_hash_entry(src, entry);
1411			__add_hash_entry(new_hash, entry);
1412		}
1413	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1414
1415	return new_hash;
1416}
1417
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1418static int
1419ftrace_hash_move(struct ftrace_ops *ops, int enable,
1420		 struct ftrace_hash **dst, struct ftrace_hash *src)
1421{
1422	struct ftrace_hash *new_hash;
1423	int ret;
1424
1425	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
1426	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1427		return -EINVAL;
1428
1429	new_hash = __ftrace_hash_move(src);
1430	if (!new_hash)
1431		return -ENOMEM;
1432
1433	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1434	if (enable) {
1435		/* IPMODIFY should be updated only when filter_hash updating */
1436		ret = ftrace_hash_ipmodify_update(ops, new_hash);
1437		if (ret < 0) {
1438			free_ftrace_hash(new_hash);
1439			return ret;
1440		}
1441	}
1442
1443	/*
1444	 * Remove the current set, update the hash and add
1445	 * them back.
1446	 */
1447	ftrace_hash_rec_disable_modify(ops, enable);
1448
1449	rcu_assign_pointer(*dst, new_hash);
1450
1451	ftrace_hash_rec_enable_modify(ops, enable);
1452
1453	return 0;
1454}
1455
1456static bool hash_contains_ip(unsigned long ip,
1457			     struct ftrace_ops_hash *hash)
1458{
1459	/*
1460	 * The function record is a match if it exists in the filter
1461	 * hash and not in the notrace hash. Note, an emty hash is
1462	 * considered a match for the filter hash, but an empty
1463	 * notrace hash is considered not in the notrace hash.
1464	 */
1465	return (ftrace_hash_empty(hash->filter_hash) ||
1466		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
1467		(ftrace_hash_empty(hash->notrace_hash) ||
1468		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1469}
1470
1471/*
1472 * Test the hashes for this ops to see if we want to call
1473 * the ops->func or not.
1474 *
1475 * It's a match if the ip is in the ops->filter_hash or
1476 * the filter_hash does not exist or is empty,
1477 *  AND
1478 * the ip is not in the ops->notrace_hash.
1479 *
1480 * This needs to be called with preemption disabled as
1481 * the hashes are freed with call_rcu().
1482 */
1483int
1484ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1485{
1486	struct ftrace_ops_hash hash;
1487	int ret;
1488
1489#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1490	/*
1491	 * There's a small race when adding ops that the ftrace handler
1492	 * that wants regs, may be called without them. We can not
1493	 * allow that handler to be called if regs is NULL.
1494	 */
1495	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1496		return 0;
1497#endif
1498
1499	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1500	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1501
1502	if (hash_contains_ip(ip, &hash))
1503		ret = 1;
1504	else
1505		ret = 0;
1506
1507	return ret;
1508}
1509
1510/*
1511 * This is a double for. Do not use 'break' to break out of the loop,
1512 * you must use a goto.
1513 */
1514#define do_for_each_ftrace_rec(pg, rec)					\
1515	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1516		int _____i;						\
1517		for (_____i = 0; _____i < pg->index; _____i++) {	\
1518			rec = &pg->records[_____i];
1519
1520#define while_for_each_ftrace_rec()		\
1521		}				\
1522	}
1523
1524
1525static int ftrace_cmp_recs(const void *a, const void *b)
1526{
1527	const struct dyn_ftrace *key = a;
1528	const struct dyn_ftrace *rec = b;
1529
1530	if (key->flags < rec->ip)
1531		return -1;
1532	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1533		return 1;
1534	return 0;
1535}
1536
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1537/**
1538 * ftrace_location_range - return the first address of a traced location
1539 *	if it touches the given ip range
1540 * @start: start of range to search.
1541 * @end: end of range to search (inclusive). @end points to the last byte
1542 *	to check.
1543 *
1544 * Returns rec->ip if the related ftrace location is a least partly within
1545 * the given address range. That is, the first address of the instruction
1546 * that is either a NOP or call to the function tracer. It checks the ftrace
1547 * internal tables to determine if the address belongs or not.
1548 */
1549unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1550{
1551	struct ftrace_page *pg;
1552	struct dyn_ftrace *rec;
1553	struct dyn_ftrace key;
1554
1555	key.ip = start;
1556	key.flags = end;	/* overload flags, as it is unsigned long */
 
 
 
1557
1558	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1559		if (end < pg->records[0].ip ||
1560		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1561			continue;
1562		rec = bsearch(&key, pg->records, pg->index,
1563			      sizeof(struct dyn_ftrace),
1564			      ftrace_cmp_recs);
1565		if (rec)
1566			return rec->ip;
1567	}
1568
1569	return 0;
1570}
1571
1572/**
1573 * ftrace_location - return true if the ip giving is a traced location
1574 * @ip: the instruction pointer to check
1575 *
1576 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1577 * That is, the instruction that is either a NOP or call to
1578 * the function tracer. It checks the ftrace internal tables to
1579 * determine if the address belongs or not.
1580 */
1581unsigned long ftrace_location(unsigned long ip)
1582{
1583	return ftrace_location_range(ip, ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1584}
1585
1586/**
1587 * ftrace_text_reserved - return true if range contains an ftrace location
1588 * @start: start of range to search
1589 * @end: end of range to search (inclusive). @end points to the last byte to check.
1590 *
1591 * Returns 1 if @start and @end contains a ftrace location.
1592 * That is, the instruction that is either a NOP or call to
1593 * the function tracer. It checks the ftrace internal tables to
1594 * determine if the address belongs or not.
1595 */
1596int ftrace_text_reserved(const void *start, const void *end)
1597{
1598	unsigned long ret;
1599
1600	ret = ftrace_location_range((unsigned long)start,
1601				    (unsigned long)end);
1602
1603	return (int)!!ret;
1604}
1605
1606/* Test if ops registered to this rec needs regs */
1607static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1608{
1609	struct ftrace_ops *ops;
1610	bool keep_regs = false;
1611
1612	for (ops = ftrace_ops_list;
1613	     ops != &ftrace_list_end; ops = ops->next) {
1614		/* pass rec in as regs to have non-NULL val */
1615		if (ftrace_ops_test(ops, rec->ip, rec)) {
1616			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1617				keep_regs = true;
1618				break;
1619			}
1620		}
1621	}
1622
1623	return  keep_regs;
1624}
1625
1626static struct ftrace_ops *
1627ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1628static struct ftrace_ops *
 
 
1629ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1630
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1631static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1632				     int filter_hash,
1633				     bool inc)
1634{
1635	struct ftrace_hash *hash;
1636	struct ftrace_hash *other_hash;
1637	struct ftrace_page *pg;
1638	struct dyn_ftrace *rec;
1639	bool update = false;
1640	int count = 0;
1641	int all = false;
1642
1643	/* Only update if the ops has been registered */
1644	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1645		return false;
1646
1647	/*
1648	 * In the filter_hash case:
1649	 *   If the count is zero, we update all records.
1650	 *   Otherwise we just update the items in the hash.
1651	 *
1652	 * In the notrace_hash case:
1653	 *   We enable the update in the hash.
1654	 *   As disabling notrace means enabling the tracing,
1655	 *   and enabling notrace means disabling, the inc variable
1656	 *   gets inversed.
1657	 */
1658	if (filter_hash) {
1659		hash = ops->func_hash->filter_hash;
1660		other_hash = ops->func_hash->notrace_hash;
1661		if (ftrace_hash_empty(hash))
1662			all = true;
1663	} else {
1664		inc = !inc;
1665		hash = ops->func_hash->notrace_hash;
1666		other_hash = ops->func_hash->filter_hash;
1667		/*
1668		 * If the notrace hash has no items,
1669		 * then there's nothing to do.
1670		 */
1671		if (ftrace_hash_empty(hash))
1672			return false;
1673	}
1674
1675	do_for_each_ftrace_rec(pg, rec) {
1676		int in_other_hash = 0;
1677		int in_hash = 0;
1678		int match = 0;
1679
1680		if (rec->flags & FTRACE_FL_DISABLED)
1681			continue;
1682
1683		if (all) {
1684			/*
1685			 * Only the filter_hash affects all records.
1686			 * Update if the record is not in the notrace hash.
1687			 */
1688			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1689				match = 1;
1690		} else {
1691			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1692			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1693
1694			/*
1695			 * If filter_hash is set, we want to match all functions
1696			 * that are in the hash but not in the other hash.
1697			 *
1698			 * If filter_hash is not set, then we are decrementing.
1699			 * That means we match anything that is in the hash
1700			 * and also in the other_hash. That is, we need to turn
1701			 * off functions in the other hash because they are disabled
1702			 * by this hash.
1703			 */
1704			if (filter_hash && in_hash && !in_other_hash)
1705				match = 1;
1706			else if (!filter_hash && in_hash &&
1707				 (in_other_hash || ftrace_hash_empty(other_hash)))
1708				match = 1;
1709		}
1710		if (!match)
1711			continue;
1712
1713		if (inc) {
1714			rec->flags++;
1715			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1716				return false;
1717
 
 
 
1718			/*
1719			 * If there's only a single callback registered to a
1720			 * function, and the ops has a trampoline registered
1721			 * for it, then we can call it directly.
1722			 */
1723			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1724				rec->flags |= FTRACE_FL_TRAMP;
1725			else
1726				/*
1727				 * If we are adding another function callback
1728				 * to this function, and the previous had a
1729				 * custom trampoline in use, then we need to go
1730				 * back to the default trampoline.
1731				 */
1732				rec->flags &= ~FTRACE_FL_TRAMP;
1733
1734			/*
1735			 * If any ops wants regs saved for this function
1736			 * then all ops will get saved regs.
1737			 */
1738			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1739				rec->flags |= FTRACE_FL_REGS;
1740		} else {
1741			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1742				return false;
1743			rec->flags--;
1744
1745			/*
 
 
 
 
 
 
 
 
 
1746			 * If the rec had REGS enabled and the ops that is
1747			 * being removed had REGS set, then see if there is
1748			 * still any ops for this record that wants regs.
1749			 * If not, we can stop recording them.
1750			 */
1751			if (ftrace_rec_count(rec) > 0 &&
1752			    rec->flags & FTRACE_FL_REGS &&
1753			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1754				if (!test_rec_ops_needs_regs(rec))
1755					rec->flags &= ~FTRACE_FL_REGS;
1756			}
1757
1758			/*
1759			 * The TRAMP needs to be set only if rec count
1760			 * is decremented to one, and the ops that is
1761			 * left has a trampoline. As TRAMP can only be
1762			 * enabled if there is only a single ops attached
1763			 * to it.
1764			 */
1765			if (ftrace_rec_count(rec) == 1 &&
1766			    ftrace_find_tramp_ops_any(rec))
1767				rec->flags |= FTRACE_FL_TRAMP;
1768			else
1769				rec->flags &= ~FTRACE_FL_TRAMP;
1770
1771			/*
1772			 * flags will be cleared in ftrace_check_record()
1773			 * if rec count is zero.
1774			 */
1775		}
 
 
 
 
 
 
 
 
 
 
 
 
1776		count++;
1777
1778		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1779		update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1780
1781		/* Shortcut, if we handled all records, we are done. */
1782		if (!all && count == hash->count)
1783			return update;
1784	} while_for_each_ftrace_rec();
1785
1786	return update;
1787}
1788
1789static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1790				    int filter_hash)
 
 
 
 
1791{
1792	return __ftrace_hash_rec_update(ops, filter_hash, 0);
1793}
1794
1795static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1796				   int filter_hash)
 
 
 
 
1797{
1798	return __ftrace_hash_rec_update(ops, filter_hash, 1);
1799}
1800
1801static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1802					  int filter_hash, int inc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1803{
1804	struct ftrace_ops *op;
1805
1806	__ftrace_hash_rec_update(ops, filter_hash, inc);
1807
1808	if (ops->func_hash != &global_ops.local_hash)
1809		return;
1810
1811	/*
1812	 * If the ops shares the global_ops hash, then we need to update
1813	 * all ops that are enabled and use this hash.
1814	 */
1815	do_for_each_ftrace_op(op, ftrace_ops_list) {
1816		/* Already done */
1817		if (op == ops)
1818			continue;
1819		if (op->func_hash == &global_ops.local_hash)
1820			__ftrace_hash_rec_update(op, filter_hash, inc);
1821	} while_for_each_ftrace_op(op);
1822}
1823
1824static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1825					   int filter_hash)
1826{
1827	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1828}
1829
1830static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1831					  int filter_hash)
1832{
1833	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1834}
1835
1836/*
1837 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1838 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1839 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1840 * Note that old_hash and new_hash has below meanings
1841 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1842 *  - If the hash is EMPTY_HASH, it hits nothing
1843 *  - Anything else hits the recs which match the hash entries.
 
 
 
 
 
 
 
1844 */
1845static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1846					 struct ftrace_hash *old_hash,
1847					 struct ftrace_hash *new_hash)
1848{
1849	struct ftrace_page *pg;
1850	struct dyn_ftrace *rec, *end = NULL;
1851	int in_old, in_new;
 
1852
1853	/* Only update if the ops has been registered */
1854	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1855		return 0;
1856
1857	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
 
 
 
 
 
 
 
1858		return 0;
1859
1860	/*
1861	 * Since the IPMODIFY is a very address sensitive action, we do not
1862	 * allow ftrace_ops to set all functions to new hash.
 
1863	 */
1864	if (!new_hash || !old_hash)
1865		return -EINVAL;
1866
1867	/* Update rec->flags */
1868	do_for_each_ftrace_rec(pg, rec) {
1869
1870		if (rec->flags & FTRACE_FL_DISABLED)
1871			continue;
1872
1873		/* We need to update only differences of filter_hash */
1874		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1875		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1876		if (in_old == in_new)
1877			continue;
1878
1879		if (in_new) {
1880			/* New entries must ensure no others are using it */
1881			if (rec->flags & FTRACE_FL_IPMODIFY)
1882				goto rollback;
1883			rec->flags |= FTRACE_FL_IPMODIFY;
1884		} else /* Removed entry */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1885			rec->flags &= ~FTRACE_FL_IPMODIFY;
 
1886	} while_for_each_ftrace_rec();
1887
1888	return 0;
1889
1890rollback:
1891	end = rec;
1892
1893	/* Roll back what we did above */
1894	do_for_each_ftrace_rec(pg, rec) {
1895
1896		if (rec->flags & FTRACE_FL_DISABLED)
1897			continue;
1898
1899		if (rec == end)
1900			goto err_out;
1901
1902		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1903		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1904		if (in_old == in_new)
1905			continue;
1906
1907		if (in_new)
1908			rec->flags &= ~FTRACE_FL_IPMODIFY;
1909		else
1910			rec->flags |= FTRACE_FL_IPMODIFY;
1911	} while_for_each_ftrace_rec();
1912
1913err_out:
1914	return -EBUSY;
1915}
1916
1917static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1918{
1919	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1920
1921	if (ftrace_hash_empty(hash))
1922		hash = NULL;
1923
1924	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1925}
1926
1927/* Disabling always succeeds */
1928static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1929{
1930	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1931
1932	if (ftrace_hash_empty(hash))
1933		hash = NULL;
1934
1935	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1936}
1937
1938static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1939				       struct ftrace_hash *new_hash)
1940{
1941	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1942
1943	if (ftrace_hash_empty(old_hash))
1944		old_hash = NULL;
1945
1946	if (ftrace_hash_empty(new_hash))
1947		new_hash = NULL;
1948
1949	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1950}
1951
1952static void print_ip_ins(const char *fmt, const unsigned char *p)
1953{
1954	int i;
 
 
 
 
 
1955
1956	printk(KERN_CONT "%s", fmt);
1957
1958	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1959		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1960}
1961
1962enum ftrace_bug_type ftrace_bug_type;
1963const void *ftrace_expected;
1964
1965static void print_bug_type(void)
1966{
1967	switch (ftrace_bug_type) {
1968	case FTRACE_BUG_UNKNOWN:
1969		break;
1970	case FTRACE_BUG_INIT:
1971		pr_info("Initializing ftrace call sites\n");
1972		break;
1973	case FTRACE_BUG_NOP:
1974		pr_info("Setting ftrace call site to NOP\n");
1975		break;
1976	case FTRACE_BUG_CALL:
1977		pr_info("Setting ftrace call site to call ftrace function\n");
1978		break;
1979	case FTRACE_BUG_UPDATE:
1980		pr_info("Updating ftrace call site to call a different ftrace function\n");
1981		break;
1982	}
1983}
1984
1985/**
1986 * ftrace_bug - report and shutdown function tracer
1987 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1988 * @rec: The record that failed
1989 *
1990 * The arch code that enables or disables the function tracing
1991 * can call ftrace_bug() when it has detected a problem in
1992 * modifying the code. @failed should be one of either:
1993 * EFAULT - if the problem happens on reading the @ip address
1994 * EINVAL - if what is read at @ip is not what was expected
1995 * EPERM - if the problem happens on writing to the @ip address
1996 */
1997void ftrace_bug(int failed, struct dyn_ftrace *rec)
1998{
1999	unsigned long ip = rec ? rec->ip : 0;
2000
 
 
2001	switch (failed) {
2002	case -EFAULT:
2003		FTRACE_WARN_ON_ONCE(1);
2004		pr_info("ftrace faulted on modifying ");
2005		print_ip_sym(ip);
2006		break;
2007	case -EINVAL:
2008		FTRACE_WARN_ON_ONCE(1);
2009		pr_info("ftrace failed to modify ");
2010		print_ip_sym(ip);
2011		print_ip_ins(" actual:   ", (unsigned char *)ip);
2012		pr_cont("\n");
2013		if (ftrace_expected) {
2014			print_ip_ins(" expected: ", ftrace_expected);
2015			pr_cont("\n");
2016		}
2017		break;
2018	case -EPERM:
2019		FTRACE_WARN_ON_ONCE(1);
2020		pr_info("ftrace faulted on writing ");
2021		print_ip_sym(ip);
2022		break;
2023	default:
2024		FTRACE_WARN_ON_ONCE(1);
2025		pr_info("ftrace faulted on unknown error ");
2026		print_ip_sym(ip);
2027	}
2028	print_bug_type();
2029	if (rec) {
2030		struct ftrace_ops *ops = NULL;
2031
2032		pr_info("ftrace record flags: %lx\n", rec->flags);
2033		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2034			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
 
2035		if (rec->flags & FTRACE_FL_TRAMP_EN) {
2036			ops = ftrace_find_tramp_ops_any(rec);
2037			if (ops) {
2038				do {
2039					pr_cont("\ttramp: %pS (%pS)",
2040						(void *)ops->trampoline,
2041						(void *)ops->func);
2042					ops = ftrace_find_tramp_ops_next(rec, ops);
2043				} while (ops);
2044			} else
2045				pr_cont("\ttramp: ERROR!");
2046
2047		}
2048		ip = ftrace_get_addr_curr(rec);
2049		pr_cont("\n expected tramp: %lx\n", ip);
2050	}
 
 
2051}
2052
2053static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2054{
2055	unsigned long flag = 0UL;
2056
2057	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2058
2059	if (rec->flags & FTRACE_FL_DISABLED)
2060		return FTRACE_UPDATE_IGNORE;
2061
2062	/*
2063	 * If we are updating calls:
2064	 *
2065	 *   If the record has a ref count, then we need to enable it
2066	 *   because someone is using it.
2067	 *
2068	 *   Otherwise we make sure its disabled.
2069	 *
2070	 * If we are disabling calls, then disable all records that
2071	 * are enabled.
2072	 */
2073	if (enable && ftrace_rec_count(rec))
2074		flag = FTRACE_FL_ENABLED;
2075
2076	/*
2077	 * If enabling and the REGS flag does not match the REGS_EN, or
2078	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2079	 * this record. Set flags to fail the compare against ENABLED.
 
2080	 */
2081	if (flag) {
2082		if (!(rec->flags & FTRACE_FL_REGS) != 
2083		    !(rec->flags & FTRACE_FL_REGS_EN))
2084			flag |= FTRACE_FL_REGS;
2085
2086		if (!(rec->flags & FTRACE_FL_TRAMP) != 
2087		    !(rec->flags & FTRACE_FL_TRAMP_EN))
2088			flag |= FTRACE_FL_TRAMP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2089	}
2090
2091	/* If the state of this record hasn't changed, then do nothing */
2092	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2093		return FTRACE_UPDATE_IGNORE;
2094
2095	if (flag) {
2096		/* Save off if rec is being enabled (for return value) */
2097		flag ^= rec->flags & FTRACE_FL_ENABLED;
2098
2099		if (update) {
2100			rec->flags |= FTRACE_FL_ENABLED;
2101			if (flag & FTRACE_FL_REGS) {
2102				if (rec->flags & FTRACE_FL_REGS)
2103					rec->flags |= FTRACE_FL_REGS_EN;
2104				else
2105					rec->flags &= ~FTRACE_FL_REGS_EN;
2106			}
2107			if (flag & FTRACE_FL_TRAMP) {
2108				if (rec->flags & FTRACE_FL_TRAMP)
2109					rec->flags |= FTRACE_FL_TRAMP_EN;
2110				else
2111					rec->flags &= ~FTRACE_FL_TRAMP_EN;
2112			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2113		}
2114
2115		/*
2116		 * If this record is being updated from a nop, then
2117		 *   return UPDATE_MAKE_CALL.
2118		 * Otherwise,
2119		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2120		 *   from the save regs, to a non-save regs function or
2121		 *   vice versa, or from a trampoline call.
2122		 */
2123		if (flag & FTRACE_FL_ENABLED) {
2124			ftrace_bug_type = FTRACE_BUG_CALL;
2125			return FTRACE_UPDATE_MAKE_CALL;
2126		}
2127
2128		ftrace_bug_type = FTRACE_BUG_UPDATE;
2129		return FTRACE_UPDATE_MODIFY_CALL;
2130	}
2131
2132	if (update) {
2133		/* If there's no more users, clear all flags */
2134		if (!ftrace_rec_count(rec))
2135			rec->flags = 0;
2136		else
2137			/*
2138			 * Just disable the record, but keep the ops TRAMP
2139			 * and REGS states. The _EN flags must be disabled though.
2140			 */
2141			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2142					FTRACE_FL_REGS_EN);
 
2143	}
2144
2145	ftrace_bug_type = FTRACE_BUG_NOP;
2146	return FTRACE_UPDATE_MAKE_NOP;
2147}
2148
2149/**
2150 * ftrace_update_record, set a record that now is tracing or not
2151 * @rec: the record to update
2152 * @enable: set to true if the record is tracing, false to force disable
2153 *
2154 * The records that represent all functions that can be traced need
2155 * to be updated when tracing has been enabled.
2156 */
2157int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2158{
2159	return ftrace_check_record(rec, enable, true);
2160}
2161
2162/**
2163 * ftrace_test_record, check if the record has been enabled or not
2164 * @rec: the record to test
2165 * @enable: set to true to check if enabled, false if it is disabled
2166 *
2167 * The arch code may need to test if a record is already set to
2168 * tracing to determine how to modify the function code that it
2169 * represents.
2170 */
2171int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2172{
2173	return ftrace_check_record(rec, enable, false);
2174}
2175
2176static struct ftrace_ops *
2177ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2178{
2179	struct ftrace_ops *op;
2180	unsigned long ip = rec->ip;
2181
2182	do_for_each_ftrace_op(op, ftrace_ops_list) {
2183
2184		if (!op->trampoline)
2185			continue;
2186
2187		if (hash_contains_ip(ip, op->func_hash))
2188			return op;
2189	} while_for_each_ftrace_op(op);
2190
2191	return NULL;
2192}
2193
2194static struct ftrace_ops *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2195ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2196			   struct ftrace_ops *op)
2197{
2198	unsigned long ip = rec->ip;
2199
2200	while_for_each_ftrace_op(op) {
2201
2202		if (!op->trampoline)
2203			continue;
2204
2205		if (hash_contains_ip(ip, op->func_hash))
2206			return op;
2207	} 
2208
2209	return NULL;
2210}
2211
2212static struct ftrace_ops *
2213ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2214{
2215	struct ftrace_ops *op;
2216	unsigned long ip = rec->ip;
2217
2218	/*
2219	 * Need to check removed ops first.
2220	 * If they are being removed, and this rec has a tramp,
2221	 * and this rec is in the ops list, then it would be the
2222	 * one with the tramp.
2223	 */
2224	if (removed_ops) {
2225		if (hash_contains_ip(ip, &removed_ops->old_hash))
2226			return removed_ops;
2227	}
2228
2229	/*
2230	 * Need to find the current trampoline for a rec.
2231	 * Now, a trampoline is only attached to a rec if there
2232	 * was a single 'ops' attached to it. But this can be called
2233	 * when we are adding another op to the rec or removing the
2234	 * current one. Thus, if the op is being added, we can
2235	 * ignore it because it hasn't attached itself to the rec
2236	 * yet.
2237	 *
2238	 * If an ops is being modified (hooking to different functions)
2239	 * then we don't care about the new functions that are being
2240	 * added, just the old ones (that are probably being removed).
2241	 *
2242	 * If we are adding an ops to a function that already is using
2243	 * a trampoline, it needs to be removed (trampolines are only
2244	 * for single ops connected), then an ops that is not being
2245	 * modified also needs to be checked.
2246	 */
2247	do_for_each_ftrace_op(op, ftrace_ops_list) {
2248
2249		if (!op->trampoline)
2250			continue;
2251
2252		/*
2253		 * If the ops is being added, it hasn't gotten to
2254		 * the point to be removed from this tree yet.
2255		 */
2256		if (op->flags & FTRACE_OPS_FL_ADDING)
2257			continue;
2258
2259
2260		/*
2261		 * If the ops is being modified and is in the old
2262		 * hash, then it is probably being removed from this
2263		 * function.
2264		 */
2265		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2266		    hash_contains_ip(ip, &op->old_hash))
2267			return op;
2268		/*
2269		 * If the ops is not being added or modified, and it's
2270		 * in its normal filter hash, then this must be the one
2271		 * we want!
2272		 */
2273		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2274		    hash_contains_ip(ip, op->func_hash))
2275			return op;
2276
2277	} while_for_each_ftrace_op(op);
2278
2279	return NULL;
2280}
2281
2282static struct ftrace_ops *
2283ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2284{
2285	struct ftrace_ops *op;
2286	unsigned long ip = rec->ip;
2287
2288	do_for_each_ftrace_op(op, ftrace_ops_list) {
2289		/* pass rec in as regs to have non-NULL val */
2290		if (hash_contains_ip(ip, op->func_hash))
2291			return op;
2292	} while_for_each_ftrace_op(op);
2293
2294	return NULL;
2295}
2296
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2297/**
2298 * ftrace_get_addr_new - Get the call address to set to
2299 * @rec:  The ftrace record descriptor
2300 *
2301 * If the record has the FTRACE_FL_REGS set, that means that it
2302 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2303 * is not not set, then it wants to convert to the normal callback.
2304 *
2305 * Returns the address of the trampoline to set to
2306 */
2307unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2308{
2309	struct ftrace_ops *ops;
 
 
 
 
 
 
 
 
 
2310
2311	/* Trampolines take precedence over regs */
2312	if (rec->flags & FTRACE_FL_TRAMP) {
2313		ops = ftrace_find_tramp_ops_new(rec);
2314		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2315			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2316				(void *)rec->ip, (void *)rec->ip, rec->flags);
2317			/* Ftrace is shutting down, return anything */
2318			return (unsigned long)FTRACE_ADDR;
2319		}
2320		return ops->trampoline;
2321	}
2322
2323	if (rec->flags & FTRACE_FL_REGS)
2324		return (unsigned long)FTRACE_REGS_ADDR;
2325	else
2326		return (unsigned long)FTRACE_ADDR;
2327}
2328
2329/**
2330 * ftrace_get_addr_curr - Get the call address that is already there
2331 * @rec:  The ftrace record descriptor
2332 *
2333 * The FTRACE_FL_REGS_EN is set when the record already points to
2334 * a function that saves all the regs. Basically the '_EN' version
2335 * represents the current state of the function.
2336 *
2337 * Returns the address of the trampoline that is currently being called
2338 */
2339unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2340{
2341	struct ftrace_ops *ops;
 
 
 
 
 
 
 
 
 
2342
2343	/* Trampolines take precedence over regs */
2344	if (rec->flags & FTRACE_FL_TRAMP_EN) {
2345		ops = ftrace_find_tramp_ops_curr(rec);
2346		if (FTRACE_WARN_ON(!ops)) {
2347			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2348				(void *)rec->ip, (void *)rec->ip);
2349			/* Ftrace is shutting down, return anything */
2350			return (unsigned long)FTRACE_ADDR;
2351		}
2352		return ops->trampoline;
2353	}
2354
2355	if (rec->flags & FTRACE_FL_REGS_EN)
2356		return (unsigned long)FTRACE_REGS_ADDR;
2357	else
2358		return (unsigned long)FTRACE_ADDR;
2359}
2360
2361static int
2362__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2363{
2364	unsigned long ftrace_old_addr;
2365	unsigned long ftrace_addr;
2366	int ret;
2367
2368	ftrace_addr = ftrace_get_addr_new(rec);
2369
2370	/* This needs to be done before we call ftrace_update_record */
2371	ftrace_old_addr = ftrace_get_addr_curr(rec);
2372
2373	ret = ftrace_update_record(rec, enable);
2374
2375	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2376
2377	switch (ret) {
2378	case FTRACE_UPDATE_IGNORE:
2379		return 0;
2380
2381	case FTRACE_UPDATE_MAKE_CALL:
2382		ftrace_bug_type = FTRACE_BUG_CALL;
2383		return ftrace_make_call(rec, ftrace_addr);
2384
2385	case FTRACE_UPDATE_MAKE_NOP:
2386		ftrace_bug_type = FTRACE_BUG_NOP;
2387		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2388
2389	case FTRACE_UPDATE_MODIFY_CALL:
2390		ftrace_bug_type = FTRACE_BUG_UPDATE;
2391		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2392	}
2393
2394	return -1; /* unknown ftrace bug */
2395}
2396
2397void __weak ftrace_replace_code(int mod_flags)
2398{
2399	struct dyn_ftrace *rec;
2400	struct ftrace_page *pg;
2401	bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2402	int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2403	int failed;
2404
2405	if (unlikely(ftrace_disabled))
2406		return;
2407
2408	do_for_each_ftrace_rec(pg, rec) {
2409
2410		if (rec->flags & FTRACE_FL_DISABLED)
2411			continue;
2412
2413		failed = __ftrace_replace_code(rec, enable);
2414		if (failed) {
2415			ftrace_bug(failed, rec);
2416			/* Stop processing */
2417			return;
2418		}
2419		if (schedulable)
2420			cond_resched();
2421	} while_for_each_ftrace_rec();
2422}
2423
2424struct ftrace_rec_iter {
2425	struct ftrace_page	*pg;
2426	int			index;
2427};
2428
2429/**
2430 * ftrace_rec_iter_start, start up iterating over traced functions
2431 *
2432 * Returns an iterator handle that is used to iterate over all
2433 * the records that represent address locations where functions
2434 * are traced.
2435 *
2436 * May return NULL if no records are available.
2437 */
2438struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2439{
2440	/*
2441	 * We only use a single iterator.
2442	 * Protected by the ftrace_lock mutex.
2443	 */
2444	static struct ftrace_rec_iter ftrace_rec_iter;
2445	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2446
2447	iter->pg = ftrace_pages_start;
2448	iter->index = 0;
2449
2450	/* Could have empty pages */
2451	while (iter->pg && !iter->pg->index)
2452		iter->pg = iter->pg->next;
2453
2454	if (!iter->pg)
2455		return NULL;
2456
2457	return iter;
2458}
2459
2460/**
2461 * ftrace_rec_iter_next, get the next record to process.
2462 * @iter: The handle to the iterator.
2463 *
2464 * Returns the next iterator after the given iterator @iter.
2465 */
2466struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2467{
2468	iter->index++;
2469
2470	if (iter->index >= iter->pg->index) {
2471		iter->pg = iter->pg->next;
2472		iter->index = 0;
2473
2474		/* Could have empty pages */
2475		while (iter->pg && !iter->pg->index)
2476			iter->pg = iter->pg->next;
2477	}
2478
2479	if (!iter->pg)
2480		return NULL;
2481
2482	return iter;
2483}
2484
2485/**
2486 * ftrace_rec_iter_record, get the record at the iterator location
2487 * @iter: The current iterator location
2488 *
2489 * Returns the record that the current @iter is at.
2490 */
2491struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2492{
2493	return &iter->pg->records[iter->index];
2494}
2495
2496static int
2497ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2498{
2499	int ret;
2500
2501	if (unlikely(ftrace_disabled))
2502		return 0;
2503
2504	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2505	if (ret) {
2506		ftrace_bug_type = FTRACE_BUG_INIT;
2507		ftrace_bug(ret, rec);
2508		return 0;
2509	}
2510	return 1;
2511}
2512
2513/*
2514 * archs can override this function if they must do something
2515 * before the modifying code is performed.
2516 */
2517int __weak ftrace_arch_code_modify_prepare(void)
2518{
2519	return 0;
2520}
2521
2522/*
2523 * archs can override this function if they must do something
2524 * after the modifying code is performed.
2525 */
2526int __weak ftrace_arch_code_modify_post_process(void)
2527{
2528	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
2529}
2530
2531void ftrace_modify_all_code(int command)
2532{
2533	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2534	int mod_flags = 0;
2535	int err = 0;
2536
2537	if (command & FTRACE_MAY_SLEEP)
2538		mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2539
2540	/*
2541	 * If the ftrace_caller calls a ftrace_ops func directly,
2542	 * we need to make sure that it only traces functions it
2543	 * expects to trace. When doing the switch of functions,
2544	 * we need to update to the ftrace_ops_list_func first
2545	 * before the transition between old and new calls are set,
2546	 * as the ftrace_ops_list_func will check the ops hashes
2547	 * to make sure the ops are having the right functions
2548	 * traced.
2549	 */
2550	if (update) {
2551		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2552		if (FTRACE_WARN_ON(err))
2553			return;
2554	}
2555
2556	if (command & FTRACE_UPDATE_CALLS)
2557		ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2558	else if (command & FTRACE_DISABLE_CALLS)
2559		ftrace_replace_code(mod_flags);
2560
2561	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2562		function_trace_op = set_function_trace_op;
2563		smp_wmb();
2564		/* If irqs are disabled, we are in stop machine */
2565		if (!irqs_disabled())
2566			smp_call_function(ftrace_sync_ipi, NULL, 1);
2567		err = ftrace_update_ftrace_func(ftrace_trace_function);
2568		if (FTRACE_WARN_ON(err))
2569			return;
2570	}
2571
2572	if (command & FTRACE_START_FUNC_RET)
2573		err = ftrace_enable_ftrace_graph_caller();
2574	else if (command & FTRACE_STOP_FUNC_RET)
2575		err = ftrace_disable_ftrace_graph_caller();
2576	FTRACE_WARN_ON(err);
2577}
2578
2579static int __ftrace_modify_code(void *data)
2580{
2581	int *command = data;
2582
2583	ftrace_modify_all_code(*command);
2584
2585	return 0;
2586}
2587
2588/**
2589 * ftrace_run_stop_machine, go back to the stop machine method
2590 * @command: The command to tell ftrace what to do
2591 *
2592 * If an arch needs to fall back to the stop machine method, the
2593 * it can call this function.
2594 */
2595void ftrace_run_stop_machine(int command)
2596{
2597	stop_machine(__ftrace_modify_code, &command, NULL);
2598}
2599
2600/**
2601 * arch_ftrace_update_code, modify the code to trace or not trace
2602 * @command: The command that needs to be done
2603 *
2604 * Archs can override this function if it does not need to
2605 * run stop_machine() to modify code.
2606 */
2607void __weak arch_ftrace_update_code(int command)
2608{
2609	ftrace_run_stop_machine(command);
2610}
2611
2612static void ftrace_run_update_code(int command)
2613{
2614	int ret;
2615
2616	ret = ftrace_arch_code_modify_prepare();
2617	FTRACE_WARN_ON(ret);
2618	if (ret)
2619		return;
2620
2621	/*
2622	 * By default we use stop_machine() to modify the code.
2623	 * But archs can do what ever they want as long as it
2624	 * is safe. The stop_machine() is the safest, but also
2625	 * produces the most overhead.
2626	 */
2627	arch_ftrace_update_code(command);
2628
2629	ret = ftrace_arch_code_modify_post_process();
2630	FTRACE_WARN_ON(ret);
2631}
2632
2633static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2634				   struct ftrace_ops_hash *old_hash)
2635{
2636	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2637	ops->old_hash.filter_hash = old_hash->filter_hash;
2638	ops->old_hash.notrace_hash = old_hash->notrace_hash;
2639	ftrace_run_update_code(command);
2640	ops->old_hash.filter_hash = NULL;
2641	ops->old_hash.notrace_hash = NULL;
2642	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2643}
2644
2645static ftrace_func_t saved_ftrace_func;
2646static int ftrace_start_up;
2647
2648void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2649{
2650}
2651
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2652static void ftrace_startup_enable(int command)
2653{
2654	if (saved_ftrace_func != ftrace_trace_function) {
2655		saved_ftrace_func = ftrace_trace_function;
2656		command |= FTRACE_UPDATE_TRACE_FUNC;
2657	}
2658
2659	if (!command || !ftrace_enabled)
2660		return;
2661
2662	ftrace_run_update_code(command);
2663}
2664
2665static void ftrace_startup_all(int command)
2666{
2667	update_all_ops = true;
2668	ftrace_startup_enable(command);
2669	update_all_ops = false;
2670}
2671
2672int ftrace_startup(struct ftrace_ops *ops, int command)
2673{
2674	int ret;
2675
2676	if (unlikely(ftrace_disabled))
2677		return -ENODEV;
2678
2679	ret = __register_ftrace_function(ops);
2680	if (ret)
2681		return ret;
2682
2683	ftrace_start_up++;
2684
2685	/*
2686	 * Note that ftrace probes uses this to start up
2687	 * and modify functions it will probe. But we still
2688	 * set the ADDING flag for modification, as probes
2689	 * do not have trampolines. If they add them in the
2690	 * future, then the probes will need to distinguish
2691	 * between adding and updating probes.
2692	 */
2693	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2694
2695	ret = ftrace_hash_ipmodify_enable(ops);
2696	if (ret < 0) {
2697		/* Rollback registration process */
2698		__unregister_ftrace_function(ops);
2699		ftrace_start_up--;
2700		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 
 
2701		return ret;
2702	}
2703
2704	if (ftrace_hash_rec_enable(ops, 1))
2705		command |= FTRACE_UPDATE_CALLS;
2706
2707	ftrace_startup_enable(command);
2708
 
 
 
 
 
 
 
 
 
 
2709	ops->flags &= ~FTRACE_OPS_FL_ADDING;
2710
2711	return 0;
2712}
2713
2714int ftrace_shutdown(struct ftrace_ops *ops, int command)
2715{
2716	int ret;
2717
2718	if (unlikely(ftrace_disabled))
2719		return -ENODEV;
2720
2721	ret = __unregister_ftrace_function(ops);
2722	if (ret)
2723		return ret;
2724
2725	ftrace_start_up--;
2726	/*
2727	 * Just warn in case of unbalance, no need to kill ftrace, it's not
2728	 * critical but the ftrace_call callers may be never nopped again after
2729	 * further ftrace uses.
2730	 */
2731	WARN_ON_ONCE(ftrace_start_up < 0);
2732
2733	/* Disabling ipmodify never fails */
2734	ftrace_hash_ipmodify_disable(ops);
2735
2736	if (ftrace_hash_rec_disable(ops, 1))
2737		command |= FTRACE_UPDATE_CALLS;
2738
2739	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2740
2741	if (saved_ftrace_func != ftrace_trace_function) {
2742		saved_ftrace_func = ftrace_trace_function;
2743		command |= FTRACE_UPDATE_TRACE_FUNC;
2744	}
2745
2746	if (!command || !ftrace_enabled) {
2747		/*
2748		 * If these are dynamic or per_cpu ops, they still
2749		 * need their data freed. Since, function tracing is
2750		 * not currently active, we can just free them
2751		 * without synchronizing all CPUs.
2752		 */
2753		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2754			goto free_ops;
2755
2756		return 0;
2757	}
2758
2759	/*
2760	 * If the ops uses a trampoline, then it needs to be
2761	 * tested first on update.
2762	 */
2763	ops->flags |= FTRACE_OPS_FL_REMOVING;
2764	removed_ops = ops;
2765
2766	/* The trampoline logic checks the old hashes */
2767	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2768	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2769
2770	ftrace_run_update_code(command);
2771
2772	/*
2773	 * If there's no more ops registered with ftrace, run a
2774	 * sanity check to make sure all rec flags are cleared.
2775	 */
2776	if (rcu_dereference_protected(ftrace_ops_list,
2777			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
2778		struct ftrace_page *pg;
2779		struct dyn_ftrace *rec;
2780
2781		do_for_each_ftrace_rec(pg, rec) {
2782			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2783				pr_warn("  %pS flags:%lx\n",
2784					(void *)rec->ip, rec->flags);
2785		} while_for_each_ftrace_rec();
2786	}
2787
2788	ops->old_hash.filter_hash = NULL;
2789	ops->old_hash.notrace_hash = NULL;
2790
2791	removed_ops = NULL;
2792	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2793
 
2794	/*
2795	 * Dynamic ops may be freed, we must make sure that all
2796	 * callers are done before leaving this function.
2797	 * The same goes for freeing the per_cpu data of the per_cpu
2798	 * ops.
2799	 */
2800	if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
2801		/*
2802		 * We need to do a hard force of sched synchronization.
2803		 * This is because we use preempt_disable() to do RCU, but
2804		 * the function tracers can be called where RCU is not watching
2805		 * (like before user_exit()). We can not rely on the RCU
2806		 * infrastructure to do the synchronization, thus we must do it
2807		 * ourselves.
2808		 */
2809		schedule_on_each_cpu(ftrace_sync);
2810
2811		/*
2812		 * When the kernel is preeptive, tasks can be preempted
2813		 * while on a ftrace trampoline. Just scheduling a task on
2814		 * a CPU is not good enough to flush them. Calling
2815		 * synchornize_rcu_tasks() will wait for those tasks to
2816		 * execute and either schedule voluntarily or enter user space.
2817		 */
2818		if (IS_ENABLED(CONFIG_PREEMPTION))
2819			synchronize_rcu_tasks();
2820
2821 free_ops:
2822		arch_ftrace_trampoline_free(ops);
2823	}
2824
2825	return 0;
2826}
2827
2828static void ftrace_startup_sysctl(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2829{
2830	int command;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2831
2832	if (unlikely(ftrace_disabled))
2833		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2834
2835	/* Force update next time */
2836	saved_ftrace_func = NULL;
2837	/* ftrace_start_up is true if we want ftrace running */
2838	if (ftrace_start_up) {
2839		command = FTRACE_UPDATE_CALLS;
2840		if (ftrace_graph_active)
2841			command |= FTRACE_START_FUNC_RET;
2842		ftrace_startup_enable(command);
2843	}
 
2844}
2845
2846static void ftrace_shutdown_sysctl(void)
 
 
 
 
 
 
 
 
 
 
 
 
2847{
2848	int command;
 
 
2849
2850	if (unlikely(ftrace_disabled))
2851		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2852
2853	/* ftrace_start_up is true if ftrace is running */
2854	if (ftrace_start_up) {
2855		command = FTRACE_DISABLE_CALLS;
2856		if (ftrace_graph_active)
2857			command |= FTRACE_STOP_FUNC_RET;
2858		ftrace_run_update_code(command);
2859	}
 
2860}
2861
2862static u64		ftrace_update_time;
 
 
2863unsigned long		ftrace_update_tot_cnt;
 
 
2864
2865static inline int ops_traces_mod(struct ftrace_ops *ops)
2866{
2867	/*
2868	 * Filter_hash being empty will default to trace module.
2869	 * But notrace hash requires a test of individual module functions.
2870	 */
2871	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2872		ftrace_hash_empty(ops->func_hash->notrace_hash);
2873}
2874
2875/*
2876 * Check if the current ops references the record.
2877 *
2878 * If the ops traces all functions, then it was already accounted for.
2879 * If the ops does not trace the current record function, skip it.
2880 * If the ops ignores the function via notrace filter, skip it.
2881 */
2882static inline bool
2883ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2884{
2885	/* If ops isn't enabled, ignore it */
2886	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2887		return false;
2888
2889	/* If ops traces all then it includes this function */
2890	if (ops_traces_mod(ops))
2891		return true;
2892
2893	/* The function must be in the filter */
2894	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2895	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2896		return false;
2897
2898	/* If in notrace hash, we ignore it too */
2899	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2900		return false;
2901
2902	return true;
2903}
2904
2905static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2906{
 
2907	struct ftrace_page *pg;
2908	struct dyn_ftrace *p;
2909	u64 start, stop;
2910	unsigned long update_cnt = 0;
2911	unsigned long rec_flags = 0;
2912	int i;
2913
2914	start = ftrace_now(raw_smp_processor_id());
2915
2916	/*
2917	 * When a module is loaded, this function is called to convert
2918	 * the calls to mcount in its text to nops, and also to create
2919	 * an entry in the ftrace data. Now, if ftrace is activated
2920	 * after this call, but before the module sets its text to
2921	 * read-only, the modification of enabling ftrace can fail if
2922	 * the read-only is done while ftrace is converting the calls.
2923	 * To prevent this, the module's records are set as disabled
2924	 * and will be enabled after the call to set the module's text
2925	 * to read-only.
2926	 */
2927	if (mod)
2928		rec_flags |= FTRACE_FL_DISABLED;
2929
2930	for (pg = new_pgs; pg; pg = pg->next) {
2931
2932		for (i = 0; i < pg->index; i++) {
2933
2934			/* If something went wrong, bail without enabling anything */
2935			if (unlikely(ftrace_disabled))
2936				return -1;
2937
2938			p = &pg->records[i];
2939			p->flags = rec_flags;
2940
2941			/*
2942			 * Do the initial record conversion from mcount jump
2943			 * to the NOP instructions.
2944			 */
2945			if (!__is_defined(CC_USING_NOP_MCOUNT) &&
2946			    !ftrace_code_disable(mod, p))
2947				break;
2948
2949			update_cnt++;
2950		}
2951	}
2952
2953	stop = ftrace_now(raw_smp_processor_id());
2954	ftrace_update_time = stop - start;
 
 
 
 
2955	ftrace_update_tot_cnt += update_cnt;
2956
2957	return 0;
2958}
2959
2960static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2961{
2962	int order;
 
2963	int cnt;
2964
2965	if (WARN_ON(!count))
2966		return -EINVAL;
2967
2968	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2969
2970	/*
2971	 * We want to fill as much as possible. No more than a page
2972	 * may be empty.
2973	 */
2974	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2975		order--;
2976
2977 again:
2978	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2979
2980	if (!pg->records) {
2981		/* if we can't allocate this size, try something smaller */
2982		if (!order)
2983			return -ENOMEM;
2984		order >>= 1;
2985		goto again;
2986	}
2987
 
 
 
2988	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2989	pg->size = cnt;
2990
2991	if (cnt > count)
2992		cnt = count;
2993
2994	return cnt;
2995}
2996
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2997static struct ftrace_page *
2998ftrace_allocate_pages(unsigned long num_to_init)
2999{
3000	struct ftrace_page *start_pg;
3001	struct ftrace_page *pg;
3002	int order;
3003	int cnt;
3004
3005	if (!num_to_init)
3006		return NULL;
3007
3008	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3009	if (!pg)
3010		return NULL;
3011
3012	/*
3013	 * Try to allocate as much as possible in one continues
3014	 * location that fills in all of the space. We want to
3015	 * waste as little space as possible.
3016	 */
3017	for (;;) {
3018		cnt = ftrace_allocate_records(pg, num_to_init);
3019		if (cnt < 0)
3020			goto free_pages;
3021
3022		num_to_init -= cnt;
3023		if (!num_to_init)
3024			break;
3025
3026		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3027		if (!pg->next)
3028			goto free_pages;
3029
3030		pg = pg->next;
3031	}
3032
3033	return start_pg;
3034
3035 free_pages:
3036	pg = start_pg;
3037	while (pg) {
3038		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3039		free_pages((unsigned long)pg->records, order);
3040		start_pg = pg->next;
3041		kfree(pg);
3042		pg = start_pg;
3043	}
3044	pr_info("ftrace: FAILED to allocate memory for functions\n");
3045	return NULL;
3046}
3047
3048#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3049
3050struct ftrace_iterator {
3051	loff_t				pos;
3052	loff_t				func_pos;
3053	loff_t				mod_pos;
3054	struct ftrace_page		*pg;
3055	struct dyn_ftrace		*func;
3056	struct ftrace_func_probe	*probe;
3057	struct ftrace_func_entry	*probe_entry;
3058	struct trace_parser		parser;
3059	struct ftrace_hash		*hash;
3060	struct ftrace_ops		*ops;
3061	struct trace_array		*tr;
3062	struct list_head		*mod_list;
3063	int				pidx;
3064	int				idx;
3065	unsigned			flags;
3066};
3067
3068static void *
3069t_probe_next(struct seq_file *m, loff_t *pos)
3070{
3071	struct ftrace_iterator *iter = m->private;
3072	struct trace_array *tr = iter->ops->private;
3073	struct list_head *func_probes;
3074	struct ftrace_hash *hash;
3075	struct list_head *next;
3076	struct hlist_node *hnd = NULL;
3077	struct hlist_head *hhd;
3078	int size;
3079
3080	(*pos)++;
3081	iter->pos = *pos;
3082
3083	if (!tr)
3084		return NULL;
3085
3086	func_probes = &tr->func_probes;
3087	if (list_empty(func_probes))
3088		return NULL;
3089
3090	if (!iter->probe) {
3091		next = func_probes->next;
3092		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3093	}
3094
3095	if (iter->probe_entry)
3096		hnd = &iter->probe_entry->hlist;
3097
3098	hash = iter->probe->ops.func_hash->filter_hash;
3099
3100	/*
3101	 * A probe being registered may temporarily have an empty hash
3102	 * and it's at the end of the func_probes list.
3103	 */
3104	if (!hash || hash == EMPTY_HASH)
3105		return NULL;
3106
3107	size = 1 << hash->size_bits;
3108
3109 retry:
3110	if (iter->pidx >= size) {
3111		if (iter->probe->list.next == func_probes)
3112			return NULL;
3113		next = iter->probe->list.next;
3114		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3115		hash = iter->probe->ops.func_hash->filter_hash;
3116		size = 1 << hash->size_bits;
3117		iter->pidx = 0;
3118	}
3119
3120	hhd = &hash->buckets[iter->pidx];
3121
3122	if (hlist_empty(hhd)) {
3123		iter->pidx++;
3124		hnd = NULL;
3125		goto retry;
3126	}
3127
3128	if (!hnd)
3129		hnd = hhd->first;
3130	else {
3131		hnd = hnd->next;
3132		if (!hnd) {
3133			iter->pidx++;
3134			goto retry;
3135		}
3136	}
3137
3138	if (WARN_ON_ONCE(!hnd))
3139		return NULL;
3140
3141	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3142
3143	return iter;
3144}
3145
3146static void *t_probe_start(struct seq_file *m, loff_t *pos)
3147{
3148	struct ftrace_iterator *iter = m->private;
3149	void *p = NULL;
3150	loff_t l;
3151
3152	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3153		return NULL;
3154
3155	if (iter->mod_pos > *pos)
3156		return NULL;
3157
3158	iter->probe = NULL;
3159	iter->probe_entry = NULL;
3160	iter->pidx = 0;
3161	for (l = 0; l <= (*pos - iter->mod_pos); ) {
3162		p = t_probe_next(m, &l);
3163		if (!p)
3164			break;
3165	}
3166	if (!p)
3167		return NULL;
3168
3169	/* Only set this if we have an item */
3170	iter->flags |= FTRACE_ITER_PROBE;
3171
3172	return iter;
3173}
3174
3175static int
3176t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3177{
3178	struct ftrace_func_entry *probe_entry;
3179	struct ftrace_probe_ops *probe_ops;
3180	struct ftrace_func_probe *probe;
3181
3182	probe = iter->probe;
3183	probe_entry = iter->probe_entry;
3184
3185	if (WARN_ON_ONCE(!probe || !probe_entry))
3186		return -EIO;
3187
3188	probe_ops = probe->probe_ops;
3189
3190	if (probe_ops->print)
3191		return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3192
3193	seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3194		   (void *)probe_ops->func);
3195
3196	return 0;
3197}
3198
3199static void *
3200t_mod_next(struct seq_file *m, loff_t *pos)
3201{
3202	struct ftrace_iterator *iter = m->private;
3203	struct trace_array *tr = iter->tr;
3204
3205	(*pos)++;
3206	iter->pos = *pos;
3207
3208	iter->mod_list = iter->mod_list->next;
3209
3210	if (iter->mod_list == &tr->mod_trace ||
3211	    iter->mod_list == &tr->mod_notrace) {
3212		iter->flags &= ~FTRACE_ITER_MOD;
3213		return NULL;
3214	}
3215
3216	iter->mod_pos = *pos;
3217
3218	return iter;
3219}
3220
3221static void *t_mod_start(struct seq_file *m, loff_t *pos)
3222{
3223	struct ftrace_iterator *iter = m->private;
3224	void *p = NULL;
3225	loff_t l;
3226
3227	if (iter->func_pos > *pos)
3228		return NULL;
3229
3230	iter->mod_pos = iter->func_pos;
3231
3232	/* probes are only available if tr is set */
3233	if (!iter->tr)
3234		return NULL;
3235
3236	for (l = 0; l <= (*pos - iter->func_pos); ) {
3237		p = t_mod_next(m, &l);
3238		if (!p)
3239			break;
3240	}
3241	if (!p) {
3242		iter->flags &= ~FTRACE_ITER_MOD;
3243		return t_probe_start(m, pos);
3244	}
3245
3246	/* Only set this if we have an item */
3247	iter->flags |= FTRACE_ITER_MOD;
3248
3249	return iter;
3250}
3251
3252static int
3253t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3254{
3255	struct ftrace_mod_load *ftrace_mod;
3256	struct trace_array *tr = iter->tr;
3257
3258	if (WARN_ON_ONCE(!iter->mod_list) ||
3259			 iter->mod_list == &tr->mod_trace ||
3260			 iter->mod_list == &tr->mod_notrace)
3261		return -EIO;
3262
3263	ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3264
3265	if (ftrace_mod->func)
3266		seq_printf(m, "%s", ftrace_mod->func);
3267	else
3268		seq_putc(m, '*');
3269
3270	seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3271
3272	return 0;
3273}
3274
3275static void *
3276t_func_next(struct seq_file *m, loff_t *pos)
3277{
3278	struct ftrace_iterator *iter = m->private;
3279	struct dyn_ftrace *rec = NULL;
3280
3281	(*pos)++;
3282
3283 retry:
3284	if (iter->idx >= iter->pg->index) {
3285		if (iter->pg->next) {
3286			iter->pg = iter->pg->next;
3287			iter->idx = 0;
3288			goto retry;
3289		}
3290	} else {
3291		rec = &iter->pg->records[iter->idx++];
3292		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3293		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3294
3295		    ((iter->flags & FTRACE_ITER_ENABLED) &&
3296		     !(rec->flags & FTRACE_FL_ENABLED))) {
 
 
 
3297
3298			rec = NULL;
3299			goto retry;
3300		}
3301	}
3302
3303	if (!rec)
3304		return NULL;
3305
3306	iter->pos = iter->func_pos = *pos;
3307	iter->func = rec;
3308
3309	return iter;
3310}
3311
3312static void *
3313t_next(struct seq_file *m, void *v, loff_t *pos)
3314{
3315	struct ftrace_iterator *iter = m->private;
3316	loff_t l = *pos; /* t_probe_start() must use original pos */
3317	void *ret;
3318
3319	if (unlikely(ftrace_disabled))
3320		return NULL;
3321
3322	if (iter->flags & FTRACE_ITER_PROBE)
3323		return t_probe_next(m, pos);
3324
3325	if (iter->flags & FTRACE_ITER_MOD)
3326		return t_mod_next(m, pos);
3327
3328	if (iter->flags & FTRACE_ITER_PRINTALL) {
3329		/* next must increment pos, and t_probe_start does not */
3330		(*pos)++;
3331		return t_mod_start(m, &l);
3332	}
3333
3334	ret = t_func_next(m, pos);
3335
3336	if (!ret)
3337		return t_mod_start(m, &l);
3338
3339	return ret;
3340}
3341
3342static void reset_iter_read(struct ftrace_iterator *iter)
3343{
3344	iter->pos = 0;
3345	iter->func_pos = 0;
3346	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3347}
3348
3349static void *t_start(struct seq_file *m, loff_t *pos)
3350{
3351	struct ftrace_iterator *iter = m->private;
3352	void *p = NULL;
3353	loff_t l;
3354
3355	mutex_lock(&ftrace_lock);
3356
3357	if (unlikely(ftrace_disabled))
3358		return NULL;
3359
3360	/*
3361	 * If an lseek was done, then reset and start from beginning.
3362	 */
3363	if (*pos < iter->pos)
3364		reset_iter_read(iter);
3365
3366	/*
3367	 * For set_ftrace_filter reading, if we have the filter
3368	 * off, we can short cut and just print out that all
3369	 * functions are enabled.
3370	 */
3371	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3372	    ftrace_hash_empty(iter->hash)) {
3373		iter->func_pos = 1; /* Account for the message */
3374		if (*pos > 0)
3375			return t_mod_start(m, pos);
3376		iter->flags |= FTRACE_ITER_PRINTALL;
3377		/* reset in case of seek/pread */
3378		iter->flags &= ~FTRACE_ITER_PROBE;
3379		return iter;
3380	}
3381
3382	if (iter->flags & FTRACE_ITER_MOD)
3383		return t_mod_start(m, pos);
3384
3385	/*
3386	 * Unfortunately, we need to restart at ftrace_pages_start
3387	 * every time we let go of the ftrace_mutex. This is because
3388	 * those pointers can change without the lock.
3389	 */
3390	iter->pg = ftrace_pages_start;
3391	iter->idx = 0;
3392	for (l = 0; l <= *pos; ) {
3393		p = t_func_next(m, &l);
3394		if (!p)
3395			break;
3396	}
3397
3398	if (!p)
3399		return t_mod_start(m, pos);
3400
3401	return iter;
3402}
3403
3404static void t_stop(struct seq_file *m, void *p)
3405{
3406	mutex_unlock(&ftrace_lock);
3407}
3408
3409void * __weak
3410arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3411{
3412	return NULL;
3413}
3414
3415static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3416				struct dyn_ftrace *rec)
3417{
3418	void *ptr;
3419
3420	ptr = arch_ftrace_trampoline_func(ops, rec);
3421	if (ptr)
3422		seq_printf(m, " ->%pS", ptr);
3423}
3424
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3425static int t_show(struct seq_file *m, void *v)
3426{
3427	struct ftrace_iterator *iter = m->private;
3428	struct dyn_ftrace *rec;
3429
3430	if (iter->flags & FTRACE_ITER_PROBE)
3431		return t_probe_show(m, iter);
3432
3433	if (iter->flags & FTRACE_ITER_MOD)
3434		return t_mod_show(m, iter);
3435
3436	if (iter->flags & FTRACE_ITER_PRINTALL) {
3437		if (iter->flags & FTRACE_ITER_NOTRACE)
3438			seq_puts(m, "#### no functions disabled ####\n");
3439		else
3440			seq_puts(m, "#### all functions enabled ####\n");
3441		return 0;
3442	}
3443
3444	rec = iter->func;
3445
3446	if (!rec)
3447		return 0;
3448
3449	seq_printf(m, "%ps", (void *)rec->ip);
3450	if (iter->flags & FTRACE_ITER_ENABLED) {
 
 
 
 
 
 
 
 
 
3451		struct ftrace_ops *ops;
3452
3453		seq_printf(m, " (%ld)%s%s",
3454			   ftrace_rec_count(rec),
3455			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3456			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
 
 
 
3457		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3458			ops = ftrace_find_tramp_ops_any(rec);
3459			if (ops) {
3460				do {
3461					seq_printf(m, "\ttramp: %pS (%pS)",
3462						   (void *)ops->trampoline,
3463						   (void *)ops->func);
3464					add_trampoline_func(m, ops, rec);
3465					ops = ftrace_find_tramp_ops_next(rec, ops);
3466				} while (ops);
3467			} else
3468				seq_puts(m, "\ttramp: ERROR!");
3469		} else {
3470			add_trampoline_func(m, NULL, rec);
3471		}
3472	}	
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3473
3474	seq_putc(m, '\n');
3475
3476	return 0;
3477}
3478
3479static const struct seq_operations show_ftrace_seq_ops = {
3480	.start = t_start,
3481	.next = t_next,
3482	.stop = t_stop,
3483	.show = t_show,
3484};
3485
3486static int
3487ftrace_avail_open(struct inode *inode, struct file *file)
3488{
3489	struct ftrace_iterator *iter;
3490	int ret;
3491
3492	ret = security_locked_down(LOCKDOWN_TRACEFS);
3493	if (ret)
3494		return ret;
3495
3496	if (unlikely(ftrace_disabled))
3497		return -ENODEV;
3498
3499	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3500	if (!iter)
3501		return -ENOMEM;
3502
3503	iter->pg = ftrace_pages_start;
3504	iter->ops = &global_ops;
3505
3506	return 0;
3507}
3508
3509static int
3510ftrace_enabled_open(struct inode *inode, struct file *file)
3511{
3512	struct ftrace_iterator *iter;
3513
3514	/*
3515	 * This shows us what functions are currently being
3516	 * traced and by what. Not sure if we want lockdown
3517	 * to hide such critical information for an admin.
3518	 * Although, perhaps it can show information we don't
3519	 * want people to see, but if something is tracing
3520	 * something, we probably want to know about it.
3521	 */
3522
3523	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3524	if (!iter)
3525		return -ENOMEM;
3526
3527	iter->pg = ftrace_pages_start;
3528	iter->flags = FTRACE_ITER_ENABLED;
3529	iter->ops = &global_ops;
3530
3531	return 0;
3532}
3533
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3534/**
3535 * ftrace_regex_open - initialize function tracer filter files
3536 * @ops: The ftrace_ops that hold the hash filters
3537 * @flag: The type of filter to process
3538 * @inode: The inode, usually passed in to your open routine
3539 * @file: The file, usually passed in to your open routine
3540 *
3541 * ftrace_regex_open() initializes the filter files for the
3542 * @ops. Depending on @flag it may process the filter hash or
3543 * the notrace hash of @ops. With this called from the open
3544 * routine, you can use ftrace_filter_write() for the write
3545 * routine if @flag has FTRACE_ITER_FILTER set, or
3546 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3547 * tracing_lseek() should be used as the lseek routine, and
3548 * release must call ftrace_regex_release().
 
 
3549 */
3550int
3551ftrace_regex_open(struct ftrace_ops *ops, int flag,
3552		  struct inode *inode, struct file *file)
3553{
3554	struct ftrace_iterator *iter;
3555	struct ftrace_hash *hash;
3556	struct list_head *mod_head;
3557	struct trace_array *tr = ops->private;
3558	int ret = -ENOMEM;
3559
3560	ftrace_ops_init(ops);
3561
3562	if (unlikely(ftrace_disabled))
3563		return -ENODEV;
3564
3565	if (tracing_check_open_get_tr(tr))
3566		return -ENODEV;
3567
3568	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3569	if (!iter)
3570		goto out;
3571
3572	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3573		goto out;
3574
3575	iter->ops = ops;
3576	iter->flags = flag;
3577	iter->tr = tr;
3578
3579	mutex_lock(&ops->func_hash->regex_lock);
3580
3581	if (flag & FTRACE_ITER_NOTRACE) {
3582		hash = ops->func_hash->notrace_hash;
3583		mod_head = tr ? &tr->mod_notrace : NULL;
3584	} else {
3585		hash = ops->func_hash->filter_hash;
3586		mod_head = tr ? &tr->mod_trace : NULL;
3587	}
3588
3589	iter->mod_list = mod_head;
3590
3591	if (file->f_mode & FMODE_WRITE) {
3592		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3593
3594		if (file->f_flags & O_TRUNC) {
3595			iter->hash = alloc_ftrace_hash(size_bits);
3596			clear_ftrace_mod_list(mod_head);
3597	        } else {
3598			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3599		}
3600
3601		if (!iter->hash) {
3602			trace_parser_put(&iter->parser);
3603			goto out_unlock;
3604		}
3605	} else
3606		iter->hash = hash;
3607
3608	ret = 0;
3609
3610	if (file->f_mode & FMODE_READ) {
3611		iter->pg = ftrace_pages_start;
3612
3613		ret = seq_open(file, &show_ftrace_seq_ops);
3614		if (!ret) {
3615			struct seq_file *m = file->private_data;
3616			m->private = iter;
3617		} else {
3618			/* Failed */
3619			free_ftrace_hash(iter->hash);
3620			trace_parser_put(&iter->parser);
3621		}
3622	} else
3623		file->private_data = iter;
3624
3625 out_unlock:
3626	mutex_unlock(&ops->func_hash->regex_lock);
3627
3628 out:
3629	if (ret) {
3630		kfree(iter);
3631		if (tr)
3632			trace_array_put(tr);
3633	}
3634
3635	return ret;
3636}
3637
3638static int
3639ftrace_filter_open(struct inode *inode, struct file *file)
3640{
3641	struct ftrace_ops *ops = inode->i_private;
3642
3643	/* Checks for tracefs lockdown */
3644	return ftrace_regex_open(ops,
3645			FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3646			inode, file);
3647}
3648
3649static int
3650ftrace_notrace_open(struct inode *inode, struct file *file)
3651{
3652	struct ftrace_ops *ops = inode->i_private;
3653
3654	/* Checks for tracefs lockdown */
3655	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3656				 inode, file);
3657}
3658
3659/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3660struct ftrace_glob {
3661	char *search;
3662	unsigned len;
3663	int type;
3664};
3665
3666/*
3667 * If symbols in an architecture don't correspond exactly to the user-visible
3668 * name of what they represent, it is possible to define this function to
3669 * perform the necessary adjustments.
3670*/
3671char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3672{
3673	return str;
3674}
3675
3676static int ftrace_match(char *str, struct ftrace_glob *g)
3677{
3678	int matched = 0;
3679	int slen;
3680
3681	str = arch_ftrace_match_adjust(str, g->search);
3682
3683	switch (g->type) {
3684	case MATCH_FULL:
3685		if (strcmp(str, g->search) == 0)
3686			matched = 1;
3687		break;
3688	case MATCH_FRONT_ONLY:
3689		if (strncmp(str, g->search, g->len) == 0)
3690			matched = 1;
3691		break;
3692	case MATCH_MIDDLE_ONLY:
3693		if (strstr(str, g->search))
3694			matched = 1;
3695		break;
3696	case MATCH_END_ONLY:
3697		slen = strlen(str);
3698		if (slen >= g->len &&
3699		    memcmp(str + slen - g->len, g->search, g->len) == 0)
3700			matched = 1;
3701		break;
3702	case MATCH_GLOB:
3703		if (glob_match(g->search, str))
3704			matched = 1;
3705		break;
3706	}
3707
3708	return matched;
3709}
3710
3711static int
3712enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3713{
3714	struct ftrace_func_entry *entry;
3715	int ret = 0;
3716
3717	entry = ftrace_lookup_ip(hash, rec->ip);
3718	if (clear_filter) {
3719		/* Do nothing if it doesn't exist */
3720		if (!entry)
3721			return 0;
3722
3723		free_hash_entry(hash, entry);
3724	} else {
3725		/* Do nothing if it exists */
3726		if (entry)
3727			return 0;
3728
3729		ret = add_hash_entry(hash, rec->ip);
3730	}
3731	return ret;
3732}
3733
3734static int
3735add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
3736		 int clear_filter)
3737{
3738	long index = simple_strtoul(func_g->search, NULL, 0);
3739	struct ftrace_page *pg;
3740	struct dyn_ftrace *rec;
3741
3742	/* The index starts at 1 */
3743	if (--index < 0)
3744		return 0;
3745
3746	do_for_each_ftrace_rec(pg, rec) {
3747		if (pg->index <= index) {
3748			index -= pg->index;
3749			/* this is a double loop, break goes to the next page */
3750			break;
3751		}
3752		rec = &pg->records[index];
3753		enter_record(hash, rec, clear_filter);
3754		return 1;
3755	} while_for_each_ftrace_rec();
3756	return 0;
3757}
3758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3759static int
3760ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3761		struct ftrace_glob *mod_g, int exclude_mod)
3762{
3763	char str[KSYM_SYMBOL_LEN];
3764	char *modname;
3765
3766	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
 
 
 
 
 
3767
3768	if (mod_g) {
3769		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3770
3771		/* blank module name to match all modules */
3772		if (!mod_g->len) {
3773			/* blank module globbing: modname xor exclude_mod */
3774			if (!exclude_mod != !modname)
3775				goto func_match;
3776			return 0;
3777		}
3778
3779		/*
3780		 * exclude_mod is set to trace everything but the given
3781		 * module. If it is set and the module matches, then
3782		 * return 0. If it is not set, and the module doesn't match
3783		 * also return 0. Otherwise, check the function to see if
3784		 * that matches.
3785		 */
3786		if (!mod_matches == !exclude_mod)
3787			return 0;
3788func_match:
3789		/* blank search means to match all funcs in the mod */
3790		if (!func_g->len)
3791			return 1;
3792	}
3793
3794	return ftrace_match(str, func_g);
3795}
3796
3797static int
3798match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3799{
3800	struct ftrace_page *pg;
3801	struct dyn_ftrace *rec;
3802	struct ftrace_glob func_g = { .type = MATCH_FULL };
3803	struct ftrace_glob mod_g = { .type = MATCH_FULL };
3804	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3805	int exclude_mod = 0;
3806	int found = 0;
3807	int ret;
3808	int clear_filter = 0;
3809
3810	if (func) {
3811		func_g.type = filter_parse_regex(func, len, &func_g.search,
3812						 &clear_filter);
3813		func_g.len = strlen(func_g.search);
3814	}
3815
3816	if (mod) {
3817		mod_g.type = filter_parse_regex(mod, strlen(mod),
3818				&mod_g.search, &exclude_mod);
3819		mod_g.len = strlen(mod_g.search);
3820	}
3821
3822	mutex_lock(&ftrace_lock);
3823
3824	if (unlikely(ftrace_disabled))
3825		goto out_unlock;
3826
3827	if (func_g.type == MATCH_INDEX) {
3828		found = add_rec_by_index(hash, &func_g, clear_filter);
3829		goto out_unlock;
3830	}
3831
3832	do_for_each_ftrace_rec(pg, rec) {
3833
3834		if (rec->flags & FTRACE_FL_DISABLED)
3835			continue;
3836
3837		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3838			ret = enter_record(hash, rec, clear_filter);
3839			if (ret < 0) {
3840				found = ret;
3841				goto out_unlock;
3842			}
3843			found = 1;
3844		}
 
3845	} while_for_each_ftrace_rec();
3846 out_unlock:
3847	mutex_unlock(&ftrace_lock);
3848
3849	return found;
3850}
3851
3852static int
3853ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3854{
3855	return match_records(hash, buff, len, NULL);
3856}
3857
3858static void ftrace_ops_update_code(struct ftrace_ops *ops,
3859				   struct ftrace_ops_hash *old_hash)
3860{
3861	struct ftrace_ops *op;
3862
3863	if (!ftrace_enabled)
3864		return;
3865
3866	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3867		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3868		return;
3869	}
3870
3871	/*
3872	 * If this is the shared global_ops filter, then we need to
3873	 * check if there is another ops that shares it, is enabled.
3874	 * If so, we still need to run the modify code.
3875	 */
3876	if (ops->func_hash != &global_ops.local_hash)
3877		return;
3878
3879	do_for_each_ftrace_op(op, ftrace_ops_list) {
3880		if (op->func_hash == &global_ops.local_hash &&
3881		    op->flags & FTRACE_OPS_FL_ENABLED) {
3882			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3883			/* Only need to do this once */
3884			return;
3885		}
3886	} while_for_each_ftrace_op(op);
3887}
3888
3889static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3890					   struct ftrace_hash **orig_hash,
3891					   struct ftrace_hash *hash,
3892					   int enable)
3893{
3894	struct ftrace_ops_hash old_hash_ops;
3895	struct ftrace_hash *old_hash;
3896	int ret;
3897
3898	old_hash = *orig_hash;
3899	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3900	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3901	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3902	if (!ret) {
3903		ftrace_ops_update_code(ops, &old_hash_ops);
3904		free_ftrace_hash_rcu(old_hash);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3905	}
3906	return ret;
 
3907}
3908
3909static bool module_exists(const char *module)
3910{
3911	/* All modules have the symbol __this_module */
3912	static const char this_mod[] = "__this_module";
3913	char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
3914	unsigned long val;
3915	int n;
3916
3917	n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
3918
3919	if (n > sizeof(modname) - 1)
3920		return false;
3921
3922	val = module_kallsyms_lookup_name(modname);
3923	return val != 0;
3924}
3925
3926static int cache_mod(struct trace_array *tr,
3927		     const char *func, char *module, int enable)
3928{
3929	struct ftrace_mod_load *ftrace_mod, *n;
3930	struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
3931	int ret;
3932
3933	mutex_lock(&ftrace_lock);
3934
3935	/* We do not cache inverse filters */
3936	if (func[0] == '!') {
 
 
3937		func++;
3938		ret = -EINVAL;
3939
3940		/* Look to remove this hash */
3941		list_for_each_entry_safe(ftrace_mod, n, head, list) {
3942			if (strcmp(ftrace_mod->module, module) != 0)
3943				continue;
3944
3945			/* no func matches all */
3946			if (strcmp(func, "*") == 0 ||
3947			    (ftrace_mod->func &&
3948			     strcmp(ftrace_mod->func, func) == 0)) {
3949				ret = 0;
3950				free_ftrace_mod(ftrace_mod);
3951				continue;
3952			}
3953		}
3954		goto out;
3955	}
3956
3957	ret = -EINVAL;
3958	/* We only care about modules that have not been loaded yet */
3959	if (module_exists(module))
3960		goto out;
3961
3962	/* Save this string off, and execute it when the module is loaded */
3963	ret = ftrace_add_mod(tr, func, module, enable);
3964 out:
3965	mutex_unlock(&ftrace_lock);
3966
3967	return ret;
3968}
3969
3970static int
3971ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3972		 int reset, int enable);
3973
3974#ifdef CONFIG_MODULES
3975static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
3976			     char *mod, bool enable)
3977{
3978	struct ftrace_mod_load *ftrace_mod, *n;
3979	struct ftrace_hash **orig_hash, *new_hash;
3980	LIST_HEAD(process_mods);
3981	char *func;
3982	int ret;
3983
3984	mutex_lock(&ops->func_hash->regex_lock);
3985
3986	if (enable)
3987		orig_hash = &ops->func_hash->filter_hash;
3988	else
3989		orig_hash = &ops->func_hash->notrace_hash;
3990
3991	new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
3992					      *orig_hash);
3993	if (!new_hash)
3994		goto out; /* warn? */
3995
3996	mutex_lock(&ftrace_lock);
3997
3998	list_for_each_entry_safe(ftrace_mod, n, head, list) {
3999
4000		if (strcmp(ftrace_mod->module, mod) != 0)
4001			continue;
4002
4003		if (ftrace_mod->func)
4004			func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4005		else
4006			func = kstrdup("*", GFP_KERNEL);
4007
4008		if (!func) /* warn? */
4009			continue;
4010
4011		list_del(&ftrace_mod->list);
4012		list_add(&ftrace_mod->list, &process_mods);
4013
4014		/* Use the newly allocated func, as it may be "*" */
4015		kfree(ftrace_mod->func);
4016		ftrace_mod->func = func;
4017	}
4018
4019	mutex_unlock(&ftrace_lock);
4020
4021	list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4022
4023		func = ftrace_mod->func;
4024
4025		/* Grabs ftrace_lock, which is why we have this extra step */
4026		match_records(new_hash, func, strlen(func), mod);
4027		free_ftrace_mod(ftrace_mod);
4028	}
4029
4030	if (enable && list_empty(head))
4031		new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4032
4033	mutex_lock(&ftrace_lock);
4034
4035	ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
4036					      new_hash, enable);
4037	mutex_unlock(&ftrace_lock);
4038
4039 out:
4040	mutex_unlock(&ops->func_hash->regex_lock);
4041
4042	free_ftrace_hash(new_hash);
4043}
4044
4045static void process_cached_mods(const char *mod_name)
4046{
4047	struct trace_array *tr;
4048	char *mod;
4049
4050	mod = kstrdup(mod_name, GFP_KERNEL);
4051	if (!mod)
4052		return;
4053
4054	mutex_lock(&trace_types_lock);
4055	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4056		if (!list_empty(&tr->mod_trace))
4057			process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4058		if (!list_empty(&tr->mod_notrace))
4059			process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4060	}
4061	mutex_unlock(&trace_types_lock);
4062
4063	kfree(mod);
4064}
4065#endif
4066
4067/*
4068 * We register the module command as a template to show others how
4069 * to register the a command as well.
4070 */
4071
4072static int
4073ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4074		    char *func_orig, char *cmd, char *module, int enable)
4075{
4076	char *func;
4077	int ret;
4078
 
 
 
4079	/* match_records() modifies func, and we need the original */
4080	func = kstrdup(func_orig, GFP_KERNEL);
4081	if (!func)
4082		return -ENOMEM;
4083
4084	/*
4085	 * cmd == 'mod' because we only registered this func
4086	 * for the 'mod' ftrace_func_command.
4087	 * But if you register one func with multiple commands,
4088	 * you can tell which command was used by the cmd
4089	 * parameter.
4090	 */
4091	ret = match_records(hash, func, strlen(func), module);
4092	kfree(func);
4093
4094	if (!ret)
4095		return cache_mod(tr, func_orig, module, enable);
4096	if (ret < 0)
4097		return ret;
4098	return 0;
4099}
4100
4101static struct ftrace_func_command ftrace_mod_cmd = {
4102	.name			= "mod",
4103	.func			= ftrace_mod_callback,
4104};
4105
4106static int __init ftrace_mod_cmd_init(void)
4107{
4108	return register_ftrace_command(&ftrace_mod_cmd);
4109}
4110core_initcall(ftrace_mod_cmd_init);
4111
4112static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4113				      struct ftrace_ops *op, struct pt_regs *pt_regs)
4114{
4115	struct ftrace_probe_ops *probe_ops;
4116	struct ftrace_func_probe *probe;
4117
4118	probe = container_of(op, struct ftrace_func_probe, ops);
4119	probe_ops = probe->probe_ops;
4120
4121	/*
4122	 * Disable preemption for these calls to prevent a RCU grace
4123	 * period. This syncs the hash iteration and freeing of items
4124	 * on the hash. rcu_read_lock is too dangerous here.
4125	 */
4126	preempt_disable_notrace();
4127	probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4128	preempt_enable_notrace();
4129}
4130
4131struct ftrace_func_map {
4132	struct ftrace_func_entry	entry;
4133	void				*data;
4134};
4135
4136struct ftrace_func_mapper {
4137	struct ftrace_hash		hash;
4138};
4139
4140/**
4141 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4142 *
4143 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4144 */
4145struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4146{
4147	struct ftrace_hash *hash;
4148
4149	/*
4150	 * The mapper is simply a ftrace_hash, but since the entries
4151	 * in the hash are not ftrace_func_entry type, we define it
4152	 * as a separate structure.
4153	 */
4154	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4155	return (struct ftrace_func_mapper *)hash;
4156}
4157
4158/**
4159 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4160 * @mapper: The mapper that has the ip maps
4161 * @ip: the instruction pointer to find the data for
4162 *
4163 * Returns the data mapped to @ip if found otherwise NULL. The return
4164 * is actually the address of the mapper data pointer. The address is
4165 * returned for use cases where the data is no bigger than a long, and
4166 * the user can use the data pointer as its data instead of having to
4167 * allocate more memory for the reference.
4168 */
4169void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4170				  unsigned long ip)
4171{
4172	struct ftrace_func_entry *entry;
4173	struct ftrace_func_map *map;
4174
4175	entry = ftrace_lookup_ip(&mapper->hash, ip);
4176	if (!entry)
4177		return NULL;
4178
4179	map = (struct ftrace_func_map *)entry;
4180	return &map->data;
4181}
4182
4183/**
4184 * ftrace_func_mapper_add_ip - Map some data to an ip
4185 * @mapper: The mapper that has the ip maps
4186 * @ip: The instruction pointer address to map @data to
4187 * @data: The data to map to @ip
4188 *
4189 * Returns 0 on succes otherwise an error.
4190 */
4191int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4192			      unsigned long ip, void *data)
4193{
4194	struct ftrace_func_entry *entry;
4195	struct ftrace_func_map *map;
4196
4197	entry = ftrace_lookup_ip(&mapper->hash, ip);
4198	if (entry)
4199		return -EBUSY;
4200
4201	map = kmalloc(sizeof(*map), GFP_KERNEL);
4202	if (!map)
4203		return -ENOMEM;
4204
4205	map->entry.ip = ip;
4206	map->data = data;
4207
4208	__add_hash_entry(&mapper->hash, &map->entry);
4209
4210	return 0;
4211}
4212
4213/**
4214 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4215 * @mapper: The mapper that has the ip maps
4216 * @ip: The instruction pointer address to remove the data from
4217 *
4218 * Returns the data if it is found, otherwise NULL.
4219 * Note, if the data pointer is used as the data itself, (see 
4220 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4221 * if the data pointer was set to zero.
4222 */
4223void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4224				   unsigned long ip)
4225{
4226	struct ftrace_func_entry *entry;
4227	struct ftrace_func_map *map;
4228	void *data;
4229
4230	entry = ftrace_lookup_ip(&mapper->hash, ip);
4231	if (!entry)
4232		return NULL;
4233
4234	map = (struct ftrace_func_map *)entry;
4235	data = map->data;
4236
4237	remove_hash_entry(&mapper->hash, entry);
4238	kfree(entry);
4239
4240	return data;
4241}
4242
4243/**
4244 * free_ftrace_func_mapper - free a mapping of ips and data
4245 * @mapper: The mapper that has the ip maps
4246 * @free_func: A function to be called on each data item.
4247 *
4248 * This is used to free the function mapper. The @free_func is optional
4249 * and can be used if the data needs to be freed as well.
4250 */
4251void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4252			     ftrace_mapper_func free_func)
4253{
4254	struct ftrace_func_entry *entry;
4255	struct ftrace_func_map *map;
4256	struct hlist_head *hhd;
4257	int size, i;
4258
4259	if (!mapper)
4260		return;
4261
4262	if (free_func && mapper->hash.count) {
4263		size = 1 << mapper->hash.size_bits;
4264		for (i = 0; i < size; i++) {
4265			hhd = &mapper->hash.buckets[i];
4266			hlist_for_each_entry(entry, hhd, hlist) {
4267				map = (struct ftrace_func_map *)entry;
4268				free_func(map);
4269			}
4270		}
4271	}
4272	free_ftrace_hash(&mapper->hash);
4273}
4274
4275static void release_probe(struct ftrace_func_probe *probe)
4276{
4277	struct ftrace_probe_ops *probe_ops;
4278
4279	mutex_lock(&ftrace_lock);
4280
4281	WARN_ON(probe->ref <= 0);
4282
4283	/* Subtract the ref that was used to protect this instance */
4284	probe->ref--;
4285
4286	if (!probe->ref) {
4287		probe_ops = probe->probe_ops;
4288		/*
4289		 * Sending zero as ip tells probe_ops to free
4290		 * the probe->data itself
4291		 */
4292		if (probe_ops->free)
4293			probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4294		list_del(&probe->list);
4295		kfree(probe);
4296	}
4297	mutex_unlock(&ftrace_lock);
4298}
4299
4300static void acquire_probe_locked(struct ftrace_func_probe *probe)
4301{
4302	/*
4303	 * Add one ref to keep it from being freed when releasing the
4304	 * ftrace_lock mutex.
4305	 */
4306	probe->ref++;
4307}
4308
4309int
4310register_ftrace_function_probe(char *glob, struct trace_array *tr,
4311			       struct ftrace_probe_ops *probe_ops,
4312			       void *data)
4313{
 
4314	struct ftrace_func_entry *entry;
4315	struct ftrace_func_probe *probe;
4316	struct ftrace_hash **orig_hash;
4317	struct ftrace_hash *old_hash;
4318	struct ftrace_hash *hash;
4319	int count = 0;
4320	int size;
4321	int ret;
4322	int i;
4323
4324	if (WARN_ON(!tr))
4325		return -EINVAL;
4326
4327	/* We do not support '!' for function probes */
4328	if (WARN_ON(glob[0] == '!'))
4329		return -EINVAL;
4330
4331
4332	mutex_lock(&ftrace_lock);
4333	/* Check if the probe_ops is already registered */
4334	list_for_each_entry(probe, &tr->func_probes, list) {
4335		if (probe->probe_ops == probe_ops)
 
4336			break;
 
4337	}
4338	if (&probe->list == &tr->func_probes) {
4339		probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4340		if (!probe) {
4341			mutex_unlock(&ftrace_lock);
4342			return -ENOMEM;
4343		}
4344		probe->probe_ops = probe_ops;
4345		probe->ops.func = function_trace_probe_call;
4346		probe->tr = tr;
4347		ftrace_ops_init(&probe->ops);
4348		list_add(&probe->list, &tr->func_probes);
4349	}
4350
4351	acquire_probe_locked(probe);
4352
4353	mutex_unlock(&ftrace_lock);
4354
4355	/*
4356	 * Note, there's a small window here that the func_hash->filter_hash
4357	 * may be NULL or empty. Need to be carefule when reading the loop.
4358	 */
4359	mutex_lock(&probe->ops.func_hash->regex_lock);
4360
4361	orig_hash = &probe->ops.func_hash->filter_hash;
4362	old_hash = *orig_hash;
4363	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4364
4365	if (!hash) {
4366		ret = -ENOMEM;
4367		goto out;
4368	}
4369
4370	ret = ftrace_match_records(hash, glob, strlen(glob));
4371
4372	/* Nothing found? */
4373	if (!ret)
4374		ret = -EINVAL;
4375
4376	if (ret < 0)
4377		goto out;
4378
4379	size = 1 << hash->size_bits;
4380	for (i = 0; i < size; i++) {
4381		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4382			if (ftrace_lookup_ip(old_hash, entry->ip))
4383				continue;
4384			/*
4385			 * The caller might want to do something special
4386			 * for each function we find. We call the callback
4387			 * to give the caller an opportunity to do so.
4388			 */
4389			if (probe_ops->init) {
4390				ret = probe_ops->init(probe_ops, tr,
4391						      entry->ip, data,
4392						      &probe->data);
4393				if (ret < 0) {
4394					if (probe_ops->free && count)
4395						probe_ops->free(probe_ops, tr,
4396								0, probe->data);
4397					probe->data = NULL;
4398					goto out;
4399				}
4400			}
4401			count++;
4402		}
4403	}
4404
4405	mutex_lock(&ftrace_lock);
4406
4407	if (!count) {
4408		/* Nothing was added? */
4409		ret = -EINVAL;
4410		goto out_unlock;
4411	}
4412
4413	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4414					      hash, 1);
4415	if (ret < 0)
4416		goto err_unlock;
4417
4418	/* One ref for each new function traced */
4419	probe->ref += count;
4420
4421	if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4422		ret = ftrace_startup(&probe->ops, 0);
4423
4424 out_unlock:
4425	mutex_unlock(&ftrace_lock);
4426
4427	if (!ret)
4428		ret = count;
4429 out:
4430	mutex_unlock(&probe->ops.func_hash->regex_lock);
4431	free_ftrace_hash(hash);
4432
4433	release_probe(probe);
4434
4435	return ret;
4436
4437 err_unlock:
4438	if (!probe_ops->free || !count)
4439		goto out_unlock;
4440
4441	/* Failed to do the move, need to call the free functions */
4442	for (i = 0; i < size; i++) {
4443		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4444			if (ftrace_lookup_ip(old_hash, entry->ip))
4445				continue;
4446			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4447		}
4448	}
4449	goto out_unlock;
4450}
4451
4452int
4453unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4454				      struct ftrace_probe_ops *probe_ops)
4455{
 
4456	struct ftrace_ops_hash old_hash_ops;
4457	struct ftrace_func_entry *entry;
4458	struct ftrace_func_probe *probe;
4459	struct ftrace_glob func_g;
4460	struct ftrace_hash **orig_hash;
4461	struct ftrace_hash *old_hash;
4462	struct ftrace_hash *hash = NULL;
4463	struct hlist_node *tmp;
4464	struct hlist_head hhd;
4465	char str[KSYM_SYMBOL_LEN];
4466	int count = 0;
4467	int i, ret = -ENODEV;
4468	int size;
4469
4470	if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4471		func_g.search = NULL;
4472	else {
4473		int not;
4474
4475		func_g.type = filter_parse_regex(glob, strlen(glob),
4476						 &func_g.search, &not);
4477		func_g.len = strlen(func_g.search);
4478
4479		/* we do not support '!' for function probes */
4480		if (WARN_ON(not))
4481			return -EINVAL;
4482	}
4483
4484	mutex_lock(&ftrace_lock);
4485	/* Check if the probe_ops is already registered */
4486	list_for_each_entry(probe, &tr->func_probes, list) {
4487		if (probe->probe_ops == probe_ops)
 
4488			break;
 
4489	}
4490	if (&probe->list == &tr->func_probes)
4491		goto err_unlock_ftrace;
4492
4493	ret = -EINVAL;
4494	if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4495		goto err_unlock_ftrace;
4496
4497	acquire_probe_locked(probe);
4498
4499	mutex_unlock(&ftrace_lock);
4500
4501	mutex_lock(&probe->ops.func_hash->regex_lock);
4502
4503	orig_hash = &probe->ops.func_hash->filter_hash;
4504	old_hash = *orig_hash;
4505
4506	if (ftrace_hash_empty(old_hash))
4507		goto out_unlock;
4508
4509	old_hash_ops.filter_hash = old_hash;
4510	/* Probes only have filters */
4511	old_hash_ops.notrace_hash = NULL;
4512
4513	ret = -ENOMEM;
4514	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4515	if (!hash)
4516		goto out_unlock;
4517
4518	INIT_HLIST_HEAD(&hhd);
4519
4520	size = 1 << hash->size_bits;
4521	for (i = 0; i < size; i++) {
4522		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4523
4524			if (func_g.search) {
4525				kallsyms_lookup(entry->ip, NULL, NULL,
4526						NULL, str);
4527				if (!ftrace_match(str, &func_g))
4528					continue;
4529			}
4530			count++;
4531			remove_hash_entry(hash, entry);
4532			hlist_add_head(&entry->hlist, &hhd);
4533		}
4534	}
4535
4536	/* Nothing found? */
4537	if (!count) {
4538		ret = -EINVAL;
4539		goto out_unlock;
4540	}
4541
4542	mutex_lock(&ftrace_lock);
4543
4544	WARN_ON(probe->ref < count);
4545
4546	probe->ref -= count;
4547
4548	if (ftrace_hash_empty(hash))
4549		ftrace_shutdown(&probe->ops, 0);
4550
4551	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4552					      hash, 1);
4553
4554	/* still need to update the function call sites */
4555	if (ftrace_enabled && !ftrace_hash_empty(hash))
4556		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4557				       &old_hash_ops);
4558	synchronize_rcu();
4559
4560	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4561		hlist_del(&entry->hlist);
4562		if (probe_ops->free)
4563			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4564		kfree(entry);
4565	}
4566	mutex_unlock(&ftrace_lock);
4567
4568 out_unlock:
4569	mutex_unlock(&probe->ops.func_hash->regex_lock);
4570	free_ftrace_hash(hash);
4571
4572	release_probe(probe);
4573
4574	return ret;
4575
4576 err_unlock_ftrace:
4577	mutex_unlock(&ftrace_lock);
4578	return ret;
4579}
4580
4581void clear_ftrace_function_probes(struct trace_array *tr)
4582{
4583	struct ftrace_func_probe *probe, *n;
4584
4585	list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4586		unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4587}
4588
4589static LIST_HEAD(ftrace_commands);
4590static DEFINE_MUTEX(ftrace_cmd_mutex);
4591
4592/*
4593 * Currently we only register ftrace commands from __init, so mark this
4594 * __init too.
4595 */
4596__init int register_ftrace_command(struct ftrace_func_command *cmd)
4597{
4598	struct ftrace_func_command *p;
4599	int ret = 0;
4600
4601	mutex_lock(&ftrace_cmd_mutex);
4602	list_for_each_entry(p, &ftrace_commands, list) {
4603		if (strcmp(cmd->name, p->name) == 0) {
4604			ret = -EBUSY;
4605			goto out_unlock;
4606		}
4607	}
4608	list_add(&cmd->list, &ftrace_commands);
4609 out_unlock:
4610	mutex_unlock(&ftrace_cmd_mutex);
4611
4612	return ret;
4613}
4614
4615/*
4616 * Currently we only unregister ftrace commands from __init, so mark
4617 * this __init too.
4618 */
4619__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4620{
4621	struct ftrace_func_command *p, *n;
4622	int ret = -ENODEV;
4623
4624	mutex_lock(&ftrace_cmd_mutex);
4625	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4626		if (strcmp(cmd->name, p->name) == 0) {
4627			ret = 0;
4628			list_del_init(&p->list);
4629			goto out_unlock;
4630		}
4631	}
4632 out_unlock:
4633	mutex_unlock(&ftrace_cmd_mutex);
4634
4635	return ret;
4636}
4637
4638static int ftrace_process_regex(struct ftrace_iterator *iter,
4639				char *buff, int len, int enable)
4640{
4641	struct ftrace_hash *hash = iter->hash;
4642	struct trace_array *tr = iter->ops->private;
4643	char *func, *command, *next = buff;
4644	struct ftrace_func_command *p;
4645	int ret = -EINVAL;
4646
4647	func = strsep(&next, ":");
4648
4649	if (!next) {
4650		ret = ftrace_match_records(hash, func, len);
4651		if (!ret)
4652			ret = -EINVAL;
4653		if (ret < 0)
4654			return ret;
4655		return 0;
4656	}
4657
4658	/* command found */
4659
4660	command = strsep(&next, ":");
4661
4662	mutex_lock(&ftrace_cmd_mutex);
4663	list_for_each_entry(p, &ftrace_commands, list) {
4664		if (strcmp(p->name, command) == 0) {
4665			ret = p->func(tr, hash, func, command, next, enable);
4666			goto out_unlock;
4667		}
4668	}
4669 out_unlock:
4670	mutex_unlock(&ftrace_cmd_mutex);
4671
4672	return ret;
4673}
4674
4675static ssize_t
4676ftrace_regex_write(struct file *file, const char __user *ubuf,
4677		   size_t cnt, loff_t *ppos, int enable)
4678{
4679	struct ftrace_iterator *iter;
4680	struct trace_parser *parser;
4681	ssize_t ret, read;
4682
4683	if (!cnt)
4684		return 0;
4685
4686	if (file->f_mode & FMODE_READ) {
4687		struct seq_file *m = file->private_data;
4688		iter = m->private;
4689	} else
4690		iter = file->private_data;
4691
4692	if (unlikely(ftrace_disabled))
4693		return -ENODEV;
4694
4695	/* iter->hash is a local copy, so we don't need regex_lock */
4696
4697	parser = &iter->parser;
4698	read = trace_get_user(parser, ubuf, cnt, ppos);
4699
4700	if (read >= 0 && trace_parser_loaded(parser) &&
4701	    !trace_parser_cont(parser)) {
4702		ret = ftrace_process_regex(iter, parser->buffer,
4703					   parser->idx, enable);
4704		trace_parser_clear(parser);
4705		if (ret < 0)
4706			goto out;
4707	}
4708
4709	ret = read;
4710 out:
4711	return ret;
4712}
4713
4714ssize_t
4715ftrace_filter_write(struct file *file, const char __user *ubuf,
4716		    size_t cnt, loff_t *ppos)
4717{
4718	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4719}
4720
4721ssize_t
4722ftrace_notrace_write(struct file *file, const char __user *ubuf,
4723		     size_t cnt, loff_t *ppos)
4724{
4725	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4726}
4727
4728static int
4729ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4730{
4731	struct ftrace_func_entry *entry;
4732
4733	if (!ftrace_location(ip))
 
4734		return -EINVAL;
4735
4736	if (remove) {
4737		entry = ftrace_lookup_ip(hash, ip);
4738		if (!entry)
4739			return -ENOENT;
4740		free_hash_entry(hash, entry);
4741		return 0;
 
 
 
4742	}
4743
4744	return add_hash_entry(hash, ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4745}
4746
4747static int
4748ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4749		unsigned long ip, int remove, int reset, int enable)
 
4750{
4751	struct ftrace_hash **orig_hash;
4752	struct ftrace_hash *hash;
4753	int ret;
4754
4755	if (unlikely(ftrace_disabled))
4756		return -ENODEV;
4757
4758	mutex_lock(&ops->func_hash->regex_lock);
4759
4760	if (enable)
4761		orig_hash = &ops->func_hash->filter_hash;
4762	else
4763		orig_hash = &ops->func_hash->notrace_hash;
4764
4765	if (reset)
4766		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4767	else
4768		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4769
4770	if (!hash) {
4771		ret = -ENOMEM;
4772		goto out_regex_unlock;
4773	}
4774
4775	if (buf && !ftrace_match_records(hash, buf, len)) {
4776		ret = -EINVAL;
4777		goto out_regex_unlock;
4778	}
4779	if (ip) {
4780		ret = ftrace_match_addr(hash, ip, remove);
4781		if (ret < 0)
4782			goto out_regex_unlock;
4783	}
4784
4785	mutex_lock(&ftrace_lock);
4786	ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4787	mutex_unlock(&ftrace_lock);
4788
4789 out_regex_unlock:
4790	mutex_unlock(&ops->func_hash->regex_lock);
4791
4792	free_ftrace_hash(hash);
4793	return ret;
4794}
4795
4796static int
4797ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4798		int reset, int enable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4799{
4800	return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
 
 
 
 
 
4801}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4802
4803/**
4804 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4805 * @ops - the ops to set the filter with
4806 * @ip - the address to add to or remove from the filter.
4807 * @remove - non zero to remove the ip from the filter
4808 * @reset - non zero to reset all filters before applying this filter.
4809 *
4810 * Filters denote which functions should be enabled when tracing is enabled
4811 * If @ip is NULL, it failes to update filter.
 
 
 
 
4812 */
4813int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4814			 int remove, int reset)
4815{
4816	ftrace_ops_init(ops);
4817	return ftrace_set_addr(ops, ip, remove, reset, 1);
4818}
4819EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4820
4821/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4822 * ftrace_ops_set_global_filter - setup ops to use global filters
4823 * @ops - the ops which will use the global filters
4824 *
4825 * ftrace users who need global function trace filtering should call this.
4826 * It can set the global filter only if ops were not initialized before.
4827 */
4828void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
4829{
4830	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
4831		return;
4832
4833	ftrace_ops_init(ops);
4834	ops->func_hash = &global_ops.local_hash;
4835}
4836EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
4837
4838static int
4839ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4840		 int reset, int enable)
4841{
4842	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4843}
4844
4845/**
4846 * ftrace_set_filter - set a function to filter on in ftrace
4847 * @ops - the ops to set the filter with
4848 * @buf - the string that holds the function filter text.
4849 * @len - the length of the string.
4850 * @reset - non zero to reset all filters before applying this filter.
4851 *
4852 * Filters denote which functions should be enabled when tracing is enabled.
4853 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
 
 
 
 
4854 */
4855int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4856		       int len, int reset)
4857{
4858	ftrace_ops_init(ops);
4859	return ftrace_set_regex(ops, buf, len, reset, 1);
4860}
4861EXPORT_SYMBOL_GPL(ftrace_set_filter);
4862
4863/**
4864 * ftrace_set_notrace - set a function to not trace in ftrace
4865 * @ops - the ops to set the notrace filter with
4866 * @buf - the string that holds the function notrace text.
4867 * @len - the length of the string.
4868 * @reset - non zero to reset all filters before applying this filter.
4869 *
4870 * Notrace Filters denote which functions should not be enabled when tracing
4871 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4872 * for tracing.
 
 
 
 
4873 */
4874int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4875			int len, int reset)
4876{
4877	ftrace_ops_init(ops);
4878	return ftrace_set_regex(ops, buf, len, reset, 0);
4879}
4880EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4881/**
4882 * ftrace_set_global_filter - set a function to filter on with global tracers
4883 * @buf - the string that holds the function filter text.
4884 * @len - the length of the string.
4885 * @reset - non zero to reset all filters before applying this filter.
4886 *
4887 * Filters denote which functions should be enabled when tracing is enabled.
4888 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4889 */
4890void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4891{
4892	ftrace_set_regex(&global_ops, buf, len, reset, 1);
4893}
4894EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4895
4896/**
4897 * ftrace_set_global_notrace - set a function to not trace with global tracers
4898 * @buf - the string that holds the function notrace text.
4899 * @len - the length of the string.
4900 * @reset - non zero to reset all filters before applying this filter.
4901 *
4902 * Notrace Filters denote which functions should not be enabled when tracing
4903 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4904 * for tracing.
4905 */
4906void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4907{
4908	ftrace_set_regex(&global_ops, buf, len, reset, 0);
4909}
4910EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4911
4912/*
4913 * command line interface to allow users to set filters on boot up.
4914 */
4915#define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
4916static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4917static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4918
4919/* Used by function selftest to not test if filter is set */
4920bool ftrace_filter_param __initdata;
4921
4922static int __init set_ftrace_notrace(char *str)
4923{
4924	ftrace_filter_param = true;
4925	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4926	return 1;
4927}
4928__setup("ftrace_notrace=", set_ftrace_notrace);
4929
4930static int __init set_ftrace_filter(char *str)
4931{
4932	ftrace_filter_param = true;
4933	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4934	return 1;
4935}
4936__setup("ftrace_filter=", set_ftrace_filter);
4937
4938#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4939static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4940static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4941static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
4942
4943static int __init set_graph_function(char *str)
4944{
4945	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4946	return 1;
4947}
4948__setup("ftrace_graph_filter=", set_graph_function);
4949
4950static int __init set_graph_notrace_function(char *str)
4951{
4952	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4953	return 1;
4954}
4955__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4956
4957static int __init set_graph_max_depth_function(char *str)
4958{
4959	if (!str)
4960		return 0;
4961	fgraph_max_depth = simple_strtoul(str, NULL, 0);
4962	return 1;
4963}
4964__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
4965
4966static void __init set_ftrace_early_graph(char *buf, int enable)
4967{
4968	int ret;
4969	char *func;
4970	struct ftrace_hash *hash;
4971
4972	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4973	if (WARN_ON(!hash))
4974		return;
4975
4976	while (buf) {
4977		func = strsep(&buf, ",");
4978		/* we allow only one expression at a time */
4979		ret = ftrace_graph_set_hash(hash, func);
4980		if (ret)
4981			printk(KERN_DEBUG "ftrace: function %s not "
4982					  "traceable\n", func);
4983	}
4984
4985	if (enable)
4986		ftrace_graph_hash = hash;
4987	else
4988		ftrace_graph_notrace_hash = hash;
4989}
4990#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4991
4992void __init
4993ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4994{
4995	char *func;
4996
4997	ftrace_ops_init(ops);
4998
4999	while (buf) {
5000		func = strsep(&buf, ",");
5001		ftrace_set_regex(ops, func, strlen(func), 0, enable);
5002	}
5003}
5004
5005static void __init set_ftrace_early_filters(void)
5006{
5007	if (ftrace_filter_buf[0])
5008		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5009	if (ftrace_notrace_buf[0])
5010		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5011#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5012	if (ftrace_graph_buf[0])
5013		set_ftrace_early_graph(ftrace_graph_buf, 1);
5014	if (ftrace_graph_notrace_buf[0])
5015		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5016#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5017}
5018
5019int ftrace_regex_release(struct inode *inode, struct file *file)
5020{
5021	struct seq_file *m = (struct seq_file *)file->private_data;
5022	struct ftrace_iterator *iter;
5023	struct ftrace_hash **orig_hash;
5024	struct trace_parser *parser;
5025	int filter_hash;
5026	int ret;
5027
5028	if (file->f_mode & FMODE_READ) {
5029		iter = m->private;
5030		seq_release(inode, file);
5031	} else
5032		iter = file->private_data;
5033
5034	parser = &iter->parser;
5035	if (trace_parser_loaded(parser)) {
5036		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
 
 
 
5037	}
5038
5039	trace_parser_put(parser);
5040
5041	mutex_lock(&iter->ops->func_hash->regex_lock);
5042
5043	if (file->f_mode & FMODE_WRITE) {
5044		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5045
5046		if (filter_hash) {
5047			orig_hash = &iter->ops->func_hash->filter_hash;
5048			if (iter->tr && !list_empty(&iter->tr->mod_trace))
5049				iter->hash->flags |= FTRACE_HASH_FL_MOD;
 
 
 
 
5050		} else
5051			orig_hash = &iter->ops->func_hash->notrace_hash;
5052
5053		mutex_lock(&ftrace_lock);
5054		ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5055						      iter->hash, filter_hash);
5056		mutex_unlock(&ftrace_lock);
5057	} else {
5058		/* For read only, the hash is the ops hash */
5059		iter->hash = NULL;
5060	}
5061
5062	mutex_unlock(&iter->ops->func_hash->regex_lock);
5063	free_ftrace_hash(iter->hash);
5064	if (iter->tr)
5065		trace_array_put(iter->tr);
5066	kfree(iter);
5067
5068	return 0;
5069}
5070
5071static const struct file_operations ftrace_avail_fops = {
5072	.open = ftrace_avail_open,
5073	.read = seq_read,
5074	.llseek = seq_lseek,
5075	.release = seq_release_private,
5076};
5077
5078static const struct file_operations ftrace_enabled_fops = {
5079	.open = ftrace_enabled_open,
5080	.read = seq_read,
5081	.llseek = seq_lseek,
5082	.release = seq_release_private,
5083};
5084
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5085static const struct file_operations ftrace_filter_fops = {
5086	.open = ftrace_filter_open,
5087	.read = seq_read,
5088	.write = ftrace_filter_write,
5089	.llseek = tracing_lseek,
5090	.release = ftrace_regex_release,
5091};
5092
5093static const struct file_operations ftrace_notrace_fops = {
5094	.open = ftrace_notrace_open,
5095	.read = seq_read,
5096	.write = ftrace_notrace_write,
5097	.llseek = tracing_lseek,
5098	.release = ftrace_regex_release,
5099};
5100
5101#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5102
5103static DEFINE_MUTEX(graph_lock);
5104
5105struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
5106struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
5107
5108enum graph_filter_type {
5109	GRAPH_FILTER_NOTRACE	= 0,
5110	GRAPH_FILTER_FUNCTION,
5111};
5112
5113#define FTRACE_GRAPH_EMPTY	((void *)1)
5114
5115struct ftrace_graph_data {
5116	struct ftrace_hash		*hash;
5117	struct ftrace_func_entry	*entry;
5118	int				idx;   /* for hash table iteration */
5119	enum graph_filter_type		type;
5120	struct ftrace_hash		*new_hash;
5121	const struct seq_operations	*seq_ops;
5122	struct trace_parser		parser;
5123};
5124
5125static void *
5126__g_next(struct seq_file *m, loff_t *pos)
5127{
5128	struct ftrace_graph_data *fgd = m->private;
5129	struct ftrace_func_entry *entry = fgd->entry;
5130	struct hlist_head *head;
5131	int i, idx = fgd->idx;
5132
5133	if (*pos >= fgd->hash->count)
5134		return NULL;
5135
5136	if (entry) {
5137		hlist_for_each_entry_continue(entry, hlist) {
5138			fgd->entry = entry;
5139			return entry;
5140		}
5141
5142		idx++;
5143	}
5144
5145	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5146		head = &fgd->hash->buckets[i];
5147		hlist_for_each_entry(entry, head, hlist) {
5148			fgd->entry = entry;
5149			fgd->idx = i;
5150			return entry;
5151		}
5152	}
5153	return NULL;
5154}
5155
5156static void *
5157g_next(struct seq_file *m, void *v, loff_t *pos)
5158{
5159	(*pos)++;
5160	return __g_next(m, pos);
5161}
5162
5163static void *g_start(struct seq_file *m, loff_t *pos)
5164{
5165	struct ftrace_graph_data *fgd = m->private;
5166
5167	mutex_lock(&graph_lock);
5168
5169	if (fgd->type == GRAPH_FILTER_FUNCTION)
5170		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5171					lockdep_is_held(&graph_lock));
5172	else
5173		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5174					lockdep_is_held(&graph_lock));
5175
5176	/* Nothing, tell g_show to print all functions are enabled */
5177	if (ftrace_hash_empty(fgd->hash) && !*pos)
5178		return FTRACE_GRAPH_EMPTY;
5179
5180	fgd->idx = 0;
5181	fgd->entry = NULL;
5182	return __g_next(m, pos);
5183}
5184
5185static void g_stop(struct seq_file *m, void *p)
5186{
5187	mutex_unlock(&graph_lock);
5188}
5189
5190static int g_show(struct seq_file *m, void *v)
5191{
5192	struct ftrace_func_entry *entry = v;
5193
5194	if (!entry)
5195		return 0;
5196
5197	if (entry == FTRACE_GRAPH_EMPTY) {
5198		struct ftrace_graph_data *fgd = m->private;
5199
5200		if (fgd->type == GRAPH_FILTER_FUNCTION)
5201			seq_puts(m, "#### all functions enabled ####\n");
5202		else
5203			seq_puts(m, "#### no functions disabled ####\n");
5204		return 0;
5205	}
5206
5207	seq_printf(m, "%ps\n", (void *)entry->ip);
5208
5209	return 0;
5210}
5211
5212static const struct seq_operations ftrace_graph_seq_ops = {
5213	.start = g_start,
5214	.next = g_next,
5215	.stop = g_stop,
5216	.show = g_show,
5217};
5218
5219static int
5220__ftrace_graph_open(struct inode *inode, struct file *file,
5221		    struct ftrace_graph_data *fgd)
5222{
5223	int ret;
5224	struct ftrace_hash *new_hash = NULL;
5225
5226	ret = security_locked_down(LOCKDOWN_TRACEFS);
5227	if (ret)
5228		return ret;
5229
5230	if (file->f_mode & FMODE_WRITE) {
5231		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
5232
5233		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
5234			return -ENOMEM;
5235
5236		if (file->f_flags & O_TRUNC)
5237			new_hash = alloc_ftrace_hash(size_bits);
5238		else
5239			new_hash = alloc_and_copy_ftrace_hash(size_bits,
5240							      fgd->hash);
5241		if (!new_hash) {
5242			ret = -ENOMEM;
5243			goto out;
5244		}
5245	}
5246
5247	if (file->f_mode & FMODE_READ) {
5248		ret = seq_open(file, &ftrace_graph_seq_ops);
5249		if (!ret) {
5250			struct seq_file *m = file->private_data;
5251			m->private = fgd;
5252		} else {
5253			/* Failed */
5254			free_ftrace_hash(new_hash);
5255			new_hash = NULL;
5256		}
5257	} else
5258		file->private_data = fgd;
5259
5260out:
5261	if (ret < 0 && file->f_mode & FMODE_WRITE)
5262		trace_parser_put(&fgd->parser);
5263
5264	fgd->new_hash = new_hash;
5265
5266	/*
5267	 * All uses of fgd->hash must be taken with the graph_lock
5268	 * held. The graph_lock is going to be released, so force
5269	 * fgd->hash to be reinitialized when it is taken again.
5270	 */
5271	fgd->hash = NULL;
5272
5273	return ret;
5274}
5275
5276static int
5277ftrace_graph_open(struct inode *inode, struct file *file)
5278{
5279	struct ftrace_graph_data *fgd;
5280	int ret;
5281
5282	if (unlikely(ftrace_disabled))
5283		return -ENODEV;
5284
5285	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5286	if (fgd == NULL)
5287		return -ENOMEM;
5288
5289	mutex_lock(&graph_lock);
5290
5291	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5292					lockdep_is_held(&graph_lock));
5293	fgd->type = GRAPH_FILTER_FUNCTION;
5294	fgd->seq_ops = &ftrace_graph_seq_ops;
5295
5296	ret = __ftrace_graph_open(inode, file, fgd);
5297	if (ret < 0)
5298		kfree(fgd);
5299
5300	mutex_unlock(&graph_lock);
5301	return ret;
5302}
5303
5304static int
5305ftrace_graph_notrace_open(struct inode *inode, struct file *file)
5306{
5307	struct ftrace_graph_data *fgd;
5308	int ret;
5309
5310	if (unlikely(ftrace_disabled))
5311		return -ENODEV;
5312
5313	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5314	if (fgd == NULL)
5315		return -ENOMEM;
5316
5317	mutex_lock(&graph_lock);
5318
5319	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5320					lockdep_is_held(&graph_lock));
5321	fgd->type = GRAPH_FILTER_NOTRACE;
5322	fgd->seq_ops = &ftrace_graph_seq_ops;
5323
5324	ret = __ftrace_graph_open(inode, file, fgd);
5325	if (ret < 0)
5326		kfree(fgd);
5327
5328	mutex_unlock(&graph_lock);
5329	return ret;
5330}
5331
5332static int
5333ftrace_graph_release(struct inode *inode, struct file *file)
5334{
5335	struct ftrace_graph_data *fgd;
5336	struct ftrace_hash *old_hash, *new_hash;
5337	struct trace_parser *parser;
5338	int ret = 0;
5339
5340	if (file->f_mode & FMODE_READ) {
5341		struct seq_file *m = file->private_data;
5342
5343		fgd = m->private;
5344		seq_release(inode, file);
5345	} else {
5346		fgd = file->private_data;
5347	}
5348
5349
5350	if (file->f_mode & FMODE_WRITE) {
5351
5352		parser = &fgd->parser;
5353
5354		if (trace_parser_loaded((parser))) {
5355			ret = ftrace_graph_set_hash(fgd->new_hash,
5356						    parser->buffer);
5357		}
5358
5359		trace_parser_put(parser);
5360
5361		new_hash = __ftrace_hash_move(fgd->new_hash);
5362		if (!new_hash) {
5363			ret = -ENOMEM;
5364			goto out;
5365		}
5366
5367		mutex_lock(&graph_lock);
5368
5369		if (fgd->type == GRAPH_FILTER_FUNCTION) {
5370			old_hash = rcu_dereference_protected(ftrace_graph_hash,
5371					lockdep_is_held(&graph_lock));
5372			rcu_assign_pointer(ftrace_graph_hash, new_hash);
5373		} else {
5374			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5375					lockdep_is_held(&graph_lock));
5376			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5377		}
5378
5379		mutex_unlock(&graph_lock);
5380
5381		/* Wait till all users are no longer using the old hash */
5382		synchronize_rcu();
 
 
 
 
 
 
 
 
5383
5384		free_ftrace_hash(old_hash);
5385	}
5386
5387 out:
5388	free_ftrace_hash(fgd->new_hash);
5389	kfree(fgd);
5390
5391	return ret;
5392}
5393
5394static int
5395ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
5396{
5397	struct ftrace_glob func_g;
5398	struct dyn_ftrace *rec;
5399	struct ftrace_page *pg;
5400	struct ftrace_func_entry *entry;
5401	int fail = 1;
5402	int not;
5403
5404	/* decode regex */
5405	func_g.type = filter_parse_regex(buffer, strlen(buffer),
5406					 &func_g.search, &not);
5407
5408	func_g.len = strlen(func_g.search);
5409
5410	mutex_lock(&ftrace_lock);
5411
5412	if (unlikely(ftrace_disabled)) {
5413		mutex_unlock(&ftrace_lock);
5414		return -ENODEV;
5415	}
5416
5417	do_for_each_ftrace_rec(pg, rec) {
5418
5419		if (rec->flags & FTRACE_FL_DISABLED)
5420			continue;
5421
5422		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
5423			entry = ftrace_lookup_ip(hash, rec->ip);
5424
5425			if (!not) {
5426				fail = 0;
5427
5428				if (entry)
5429					continue;
5430				if (add_hash_entry(hash, rec->ip) < 0)
5431					goto out;
5432			} else {
5433				if (entry) {
5434					free_hash_entry(hash, entry);
5435					fail = 0;
5436				}
5437			}
5438		}
5439	} while_for_each_ftrace_rec();
5440out:
5441	mutex_unlock(&ftrace_lock);
5442
5443	if (fail)
5444		return -EINVAL;
5445
5446	return 0;
5447}
5448
5449static ssize_t
5450ftrace_graph_write(struct file *file, const char __user *ubuf,
5451		   size_t cnt, loff_t *ppos)
5452{
5453	ssize_t read, ret = 0;
5454	struct ftrace_graph_data *fgd = file->private_data;
5455	struct trace_parser *parser;
5456
5457	if (!cnt)
5458		return 0;
5459
5460	/* Read mode uses seq functions */
5461	if (file->f_mode & FMODE_READ) {
5462		struct seq_file *m = file->private_data;
5463		fgd = m->private;
5464	}
5465
5466	parser = &fgd->parser;
5467
5468	read = trace_get_user(parser, ubuf, cnt, ppos);
5469
5470	if (read >= 0 && trace_parser_loaded(parser) &&
5471	    !trace_parser_cont(parser)) {
5472
5473		ret = ftrace_graph_set_hash(fgd->new_hash,
5474					    parser->buffer);
5475		trace_parser_clear(parser);
5476	}
5477
5478	if (!ret)
5479		ret = read;
5480
5481	return ret;
5482}
5483
5484static const struct file_operations ftrace_graph_fops = {
5485	.open		= ftrace_graph_open,
5486	.read		= seq_read,
5487	.write		= ftrace_graph_write,
5488	.llseek		= tracing_lseek,
5489	.release	= ftrace_graph_release,
5490};
5491
5492static const struct file_operations ftrace_graph_notrace_fops = {
5493	.open		= ftrace_graph_notrace_open,
5494	.read		= seq_read,
5495	.write		= ftrace_graph_write,
5496	.llseek		= tracing_lseek,
5497	.release	= ftrace_graph_release,
5498};
5499#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5500
5501void ftrace_create_filter_files(struct ftrace_ops *ops,
5502				struct dentry *parent)
5503{
5504
5505	trace_create_file("set_ftrace_filter", 0644, parent,
5506			  ops, &ftrace_filter_fops);
5507
5508	trace_create_file("set_ftrace_notrace", 0644, parent,
5509			  ops, &ftrace_notrace_fops);
5510}
5511
5512/*
5513 * The name "destroy_filter_files" is really a misnomer. Although
5514 * in the future, it may actually delete the files, but this is
5515 * really intended to make sure the ops passed in are disabled
5516 * and that when this function returns, the caller is free to
5517 * free the ops.
5518 *
5519 * The "destroy" name is only to match the "create" name that this
5520 * should be paired with.
5521 */
5522void ftrace_destroy_filter_files(struct ftrace_ops *ops)
5523{
5524	mutex_lock(&ftrace_lock);
5525	if (ops->flags & FTRACE_OPS_FL_ENABLED)
5526		ftrace_shutdown(ops, 0);
5527	ops->flags |= FTRACE_OPS_FL_DELETED;
5528	ftrace_free_filter(ops);
5529	mutex_unlock(&ftrace_lock);
5530}
5531
5532static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
5533{
5534
5535	trace_create_file("available_filter_functions", 0444,
5536			d_tracer, NULL, &ftrace_avail_fops);
5537
5538	trace_create_file("enabled_functions", 0444,
 
 
 
5539			d_tracer, NULL, &ftrace_enabled_fops);
5540
 
 
 
5541	ftrace_create_filter_files(&global_ops, d_tracer);
5542
5543#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5544	trace_create_file("set_graph_function", 0644, d_tracer,
5545				    NULL,
5546				    &ftrace_graph_fops);
5547	trace_create_file("set_graph_notrace", 0644, d_tracer,
5548				    NULL,
5549				    &ftrace_graph_notrace_fops);
5550#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5551
5552	return 0;
5553}
5554
5555static int ftrace_cmp_ips(const void *a, const void *b)
5556{
5557	const unsigned long *ipa = a;
5558	const unsigned long *ipb = b;
5559
5560	if (*ipa > *ipb)
5561		return 1;
5562	if (*ipa < *ipb)
5563		return -1;
5564	return 0;
5565}
5566
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5567static int ftrace_process_locs(struct module *mod,
5568			       unsigned long *start,
5569			       unsigned long *end)
5570{
 
5571	struct ftrace_page *start_pg;
5572	struct ftrace_page *pg;
5573	struct dyn_ftrace *rec;
 
5574	unsigned long count;
5575	unsigned long *p;
5576	unsigned long addr;
5577	unsigned long flags = 0; /* Shut up gcc */
5578	int ret = -ENOMEM;
5579
5580	count = end - start;
5581
5582	if (!count)
5583		return 0;
5584
5585	sort(start, count, sizeof(*start),
5586	     ftrace_cmp_ips, NULL);
 
 
 
 
 
 
 
 
 
5587
5588	start_pg = ftrace_allocate_pages(count);
5589	if (!start_pg)
5590		return -ENOMEM;
5591
5592	mutex_lock(&ftrace_lock);
5593
5594	/*
5595	 * Core and each module needs their own pages, as
5596	 * modules will free them when they are removed.
5597	 * Force a new page to be allocated for modules.
5598	 */
5599	if (!mod) {
5600		WARN_ON(ftrace_pages || ftrace_pages_start);
5601		/* First initialization */
5602		ftrace_pages = ftrace_pages_start = start_pg;
5603	} else {
5604		if (!ftrace_pages)
5605			goto out;
5606
5607		if (WARN_ON(ftrace_pages->next)) {
5608			/* Hmm, we have free pages? */
5609			while (ftrace_pages->next)
5610				ftrace_pages = ftrace_pages->next;
5611		}
5612
5613		ftrace_pages->next = start_pg;
5614	}
5615
5616	p = start;
5617	pg = start_pg;
5618	while (p < end) {
 
5619		addr = ftrace_call_adjust(*p++);
5620		/*
5621		 * Some architecture linkers will pad between
5622		 * the different mcount_loc sections of different
5623		 * object files to satisfy alignments.
5624		 * Skip any NULL pointers.
5625		 */
5626		if (!addr)
 
5627			continue;
 
5628
5629		if (pg->index == pg->size) {
 
5630			/* We should have allocated enough */
5631			if (WARN_ON(!pg->next))
5632				break;
5633			pg = pg->next;
5634		}
5635
5636		rec = &pg->records[pg->index++];
5637		rec->ip = addr;
5638	}
5639
5640	/* We should have used all pages */
5641	WARN_ON(pg->next);
 
 
5642
5643	/* Assign the last page to ftrace_pages */
5644	ftrace_pages = pg;
5645
5646	/*
5647	 * We only need to disable interrupts on start up
5648	 * because we are modifying code that an interrupt
5649	 * may execute, and the modification is not atomic.
5650	 * But for modules, nothing runs the code we modify
5651	 * until we are finished with it, and there's no
5652	 * reason to cause large interrupt latencies while we do it.
5653	 */
5654	if (!mod)
5655		local_irq_save(flags);
5656	ftrace_update_code(mod, start_pg);
5657	if (!mod)
5658		local_irq_restore(flags);
5659	ret = 0;
5660 out:
5661	mutex_unlock(&ftrace_lock);
5662
 
 
 
 
 
 
 
5663	return ret;
5664}
5665
5666struct ftrace_mod_func {
5667	struct list_head	list;
5668	char			*name;
5669	unsigned long		ip;
5670	unsigned int		size;
5671};
5672
5673struct ftrace_mod_map {
5674	struct rcu_head		rcu;
5675	struct list_head	list;
5676	struct module		*mod;
5677	unsigned long		start_addr;
5678	unsigned long		end_addr;
5679	struct list_head	funcs;
5680	unsigned int		num_funcs;
5681};
5682
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5683#ifdef CONFIG_MODULES
5684
5685#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
5686
5687static LIST_HEAD(ftrace_mod_maps);
5688
5689static int referenced_filters(struct dyn_ftrace *rec)
5690{
5691	struct ftrace_ops *ops;
5692	int cnt = 0;
5693
5694	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
5695		if (ops_references_rec(ops, rec))
5696		    cnt++;
 
 
 
 
 
 
 
 
 
 
 
5697	}
5698
5699	return cnt;
5700}
5701
5702static void
5703clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
5704{
5705	struct ftrace_func_entry *entry;
5706	struct dyn_ftrace *rec;
5707	int i;
5708
5709	if (ftrace_hash_empty(hash))
5710		return;
5711
5712	for (i = 0; i < pg->index; i++) {
5713		rec = &pg->records[i];
5714		entry = __ftrace_lookup_ip(hash, rec->ip);
5715		/*
5716		 * Do not allow this rec to match again.
5717		 * Yeah, it may waste some memory, but will be removed
5718		 * if/when the hash is modified again.
5719		 */
5720		if (entry)
5721			entry->ip = 0;
5722	}
5723}
5724
5725/* Clear any records from hashs */
5726static void clear_mod_from_hashes(struct ftrace_page *pg)
5727{
5728	struct trace_array *tr;
5729
5730	mutex_lock(&trace_types_lock);
5731	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5732		if (!tr->ops || !tr->ops->func_hash)
5733			continue;
5734		mutex_lock(&tr->ops->func_hash->regex_lock);
5735		clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
5736		clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
5737		mutex_unlock(&tr->ops->func_hash->regex_lock);
5738	}
5739	mutex_unlock(&trace_types_lock);
5740}
5741
5742static void ftrace_free_mod_map(struct rcu_head *rcu)
5743{
5744	struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
5745	struct ftrace_mod_func *mod_func;
5746	struct ftrace_mod_func *n;
5747
5748	/* All the contents of mod_map are now not visible to readers */
5749	list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
5750		kfree(mod_func->name);
5751		list_del(&mod_func->list);
5752		kfree(mod_func);
5753	}
5754
5755	kfree(mod_map);
5756}
5757
5758void ftrace_release_mod(struct module *mod)
5759{
5760	struct ftrace_mod_map *mod_map;
5761	struct ftrace_mod_map *n;
5762	struct dyn_ftrace *rec;
5763	struct ftrace_page **last_pg;
5764	struct ftrace_page *tmp_page = NULL;
5765	struct ftrace_page *pg;
5766	int order;
5767
5768	mutex_lock(&ftrace_lock);
5769
5770	if (ftrace_disabled)
5771		goto out_unlock;
5772
5773	list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
5774		if (mod_map->mod == mod) {
5775			list_del_rcu(&mod_map->list);
5776			call_rcu(&mod_map->rcu, ftrace_free_mod_map);
5777			break;
5778		}
5779	}
5780
5781	/*
5782	 * Each module has its own ftrace_pages, remove
5783	 * them from the list.
5784	 */
5785	last_pg = &ftrace_pages_start;
5786	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
5787		rec = &pg->records[0];
5788		if (within_module_core(rec->ip, mod) ||
5789		    within_module_init(rec->ip, mod)) {
5790			/*
5791			 * As core pages are first, the first
5792			 * page should never be a module page.
5793			 */
5794			if (WARN_ON(pg == ftrace_pages_start))
5795				goto out_unlock;
5796
5797			/* Check if we are deleting the last page */
5798			if (pg == ftrace_pages)
5799				ftrace_pages = next_to_ftrace_page(last_pg);
5800
5801			ftrace_update_tot_cnt -= pg->index;
5802			*last_pg = pg->next;
5803
5804			pg->next = tmp_page;
5805			tmp_page = pg;
5806		} else
5807			last_pg = &pg->next;
5808	}
5809 out_unlock:
5810	mutex_unlock(&ftrace_lock);
5811
 
 
 
5812	for (pg = tmp_page; pg; pg = tmp_page) {
5813
5814		/* Needs to be called outside of ftrace_lock */
5815		clear_mod_from_hashes(pg);
5816
5817		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5818		free_pages((unsigned long)pg->records, order);
 
 
5819		tmp_page = pg->next;
5820		kfree(pg);
 
5821	}
5822}
5823
5824void ftrace_module_enable(struct module *mod)
5825{
5826	struct dyn_ftrace *rec;
5827	struct ftrace_page *pg;
5828
5829	mutex_lock(&ftrace_lock);
5830
5831	if (ftrace_disabled)
5832		goto out_unlock;
5833
5834	/*
5835	 * If the tracing is enabled, go ahead and enable the record.
5836	 *
5837	 * The reason not to enable the record immediately is the
5838	 * inherent check of ftrace_make_nop/ftrace_make_call for
5839	 * correct previous instructions.  Making first the NOP
5840	 * conversion puts the module to the correct state, thus
5841	 * passing the ftrace_make_call check.
5842	 *
5843	 * We also delay this to after the module code already set the
5844	 * text to read-only, as we now need to set it back to read-write
5845	 * so that we can modify the text.
5846	 */
5847	if (ftrace_start_up)
5848		ftrace_arch_code_modify_prepare();
5849
5850	do_for_each_ftrace_rec(pg, rec) {
5851		int cnt;
5852		/*
5853		 * do_for_each_ftrace_rec() is a double loop.
5854		 * module text shares the pg. If a record is
5855		 * not part of this module, then skip this pg,
5856		 * which the "break" will do.
5857		 */
5858		if (!within_module_core(rec->ip, mod) &&
5859		    !within_module_init(rec->ip, mod))
5860			break;
5861
 
 
 
 
 
 
 
5862		cnt = 0;
5863
5864		/*
5865		 * When adding a module, we need to check if tracers are
5866		 * currently enabled and if they are, and can trace this record,
5867		 * we need to enable the module functions as well as update the
5868		 * reference counts for those function records.
5869		 */
5870		if (ftrace_start_up)
5871			cnt += referenced_filters(rec);
5872
5873		/* This clears FTRACE_FL_DISABLED */
5874		rec->flags = cnt;
5875
5876		if (ftrace_start_up && cnt) {
5877			int failed = __ftrace_replace_code(rec, 1);
5878			if (failed) {
5879				ftrace_bug(failed, rec);
5880				goto out_loop;
5881			}
5882		}
5883
5884	} while_for_each_ftrace_rec();
5885
5886 out_loop:
5887	if (ftrace_start_up)
5888		ftrace_arch_code_modify_post_process();
5889
5890 out_unlock:
5891	mutex_unlock(&ftrace_lock);
5892
5893	process_cached_mods(mod->name);
5894}
5895
5896void ftrace_module_init(struct module *mod)
5897{
 
 
5898	if (ftrace_disabled || !mod->num_ftrace_callsites)
5899		return;
5900
5901	ftrace_process_locs(mod, mod->ftrace_callsites,
5902			    mod->ftrace_callsites + mod->num_ftrace_callsites);
 
 
 
5903}
5904
5905static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
5906				struct dyn_ftrace *rec)
5907{
5908	struct ftrace_mod_func *mod_func;
5909	unsigned long symsize;
5910	unsigned long offset;
5911	char str[KSYM_SYMBOL_LEN];
5912	char *modname;
5913	const char *ret;
5914
5915	ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
5916	if (!ret)
5917		return;
5918
5919	mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
5920	if (!mod_func)
5921		return;
5922
5923	mod_func->name = kstrdup(str, GFP_KERNEL);
5924	if (!mod_func->name) {
5925		kfree(mod_func);
5926		return;
5927	}
5928
5929	mod_func->ip = rec->ip - offset;
5930	mod_func->size = symsize;
5931
5932	mod_map->num_funcs++;
5933
5934	list_add_rcu(&mod_func->list, &mod_map->funcs);
5935}
5936
5937static struct ftrace_mod_map *
5938allocate_ftrace_mod_map(struct module *mod,
5939			unsigned long start, unsigned long end)
5940{
5941	struct ftrace_mod_map *mod_map;
5942
5943	mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
5944	if (!mod_map)
5945		return NULL;
5946
5947	mod_map->mod = mod;
5948	mod_map->start_addr = start;
5949	mod_map->end_addr = end;
5950	mod_map->num_funcs = 0;
5951
5952	INIT_LIST_HEAD_RCU(&mod_map->funcs);
5953
5954	list_add_rcu(&mod_map->list, &ftrace_mod_maps);
5955
5956	return mod_map;
5957}
5958
5959static const char *
5960ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
5961			   unsigned long addr, unsigned long *size,
5962			   unsigned long *off, char *sym)
5963{
5964	struct ftrace_mod_func *found_func =  NULL;
5965	struct ftrace_mod_func *mod_func;
5966
5967	list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
5968		if (addr >= mod_func->ip &&
5969		    addr < mod_func->ip + mod_func->size) {
5970			found_func = mod_func;
5971			break;
5972		}
5973	}
5974
5975	if (found_func) {
5976		if (size)
5977			*size = found_func->size;
5978		if (off)
5979			*off = addr - found_func->ip;
5980		if (sym)
5981			strlcpy(sym, found_func->name, KSYM_NAME_LEN);
5982
5983		return found_func->name;
5984	}
5985
5986	return NULL;
5987}
5988
5989const char *
5990ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
5991		   unsigned long *off, char **modname, char *sym)
5992{
5993	struct ftrace_mod_map *mod_map;
5994	const char *ret = NULL;
5995
5996	/* mod_map is freed via call_rcu() */
5997	preempt_disable();
5998	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
5999		ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
6000		if (ret) {
6001			if (modname)
6002				*modname = mod_map->mod->name;
6003			break;
6004		}
6005	}
6006	preempt_enable();
6007
6008	return ret;
6009}
6010
6011int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
6012			   char *type, char *name,
6013			   char *module_name, int *exported)
6014{
6015	struct ftrace_mod_map *mod_map;
6016	struct ftrace_mod_func *mod_func;
 
6017
6018	preempt_disable();
6019	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6020
6021		if (symnum >= mod_map->num_funcs) {
6022			symnum -= mod_map->num_funcs;
6023			continue;
6024		}
6025
6026		list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6027			if (symnum > 1) {
6028				symnum--;
6029				continue;
6030			}
6031
6032			*value = mod_func->ip;
6033			*type = 'T';
6034			strlcpy(name, mod_func->name, KSYM_NAME_LEN);
6035			strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
6036			*exported = 1;
6037			preempt_enable();
6038			return 0;
6039		}
6040		WARN_ON(1);
6041		break;
6042	}
 
 
6043	preempt_enable();
6044	return -ERANGE;
6045}
6046
6047#else
6048static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6049				struct dyn_ftrace *rec) { }
6050static inline struct ftrace_mod_map *
6051allocate_ftrace_mod_map(struct module *mod,
6052			unsigned long start, unsigned long end)
6053{
6054	return NULL;
6055}
 
 
 
 
 
 
 
 
 
 
 
 
6056#endif /* CONFIG_MODULES */
6057
6058struct ftrace_init_func {
6059	struct list_head list;
6060	unsigned long ip;
6061};
6062
6063/* Clear any init ips from hashes */
6064static void
6065clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
6066{
6067	struct ftrace_func_entry *entry;
6068
6069	entry = ftrace_lookup_ip(hash, func->ip);
6070	/*
6071	 * Do not allow this rec to match again.
6072	 * Yeah, it may waste some memory, but will be removed
6073	 * if/when the hash is modified again.
6074	 */
6075	if (entry)
6076		entry->ip = 0;
6077}
6078
6079static void
6080clear_func_from_hashes(struct ftrace_init_func *func)
6081{
6082	struct trace_array *tr;
6083
6084	mutex_lock(&trace_types_lock);
6085	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6086		if (!tr->ops || !tr->ops->func_hash)
6087			continue;
6088		mutex_lock(&tr->ops->func_hash->regex_lock);
6089		clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6090		clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6091		mutex_unlock(&tr->ops->func_hash->regex_lock);
6092	}
6093	mutex_unlock(&trace_types_lock);
6094}
6095
6096static void add_to_clear_hash_list(struct list_head *clear_list,
6097				   struct dyn_ftrace *rec)
6098{
6099	struct ftrace_init_func *func;
6100
6101	func = kmalloc(sizeof(*func), GFP_KERNEL);
6102	if (!func) {
6103		WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n");
6104		return;
6105	}
6106
6107	func->ip = rec->ip;
6108	list_add(&func->list, clear_list);
6109}
6110
6111void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
6112{
6113	unsigned long start = (unsigned long)(start_ptr);
6114	unsigned long end = (unsigned long)(end_ptr);
6115	struct ftrace_page **last_pg = &ftrace_pages_start;
 
6116	struct ftrace_page *pg;
6117	struct dyn_ftrace *rec;
6118	struct dyn_ftrace key;
6119	struct ftrace_mod_map *mod_map = NULL;
6120	struct ftrace_init_func *func, *func_next;
6121	struct list_head clear_hash;
6122	int order;
6123
6124	INIT_LIST_HEAD(&clear_hash);
6125
6126	key.ip = start;
6127	key.flags = end;	/* overload flags, as it is unsigned long */
6128
6129	mutex_lock(&ftrace_lock);
6130
6131	/*
6132	 * If we are freeing module init memory, then check if
6133	 * any tracer is active. If so, we need to save a mapping of
6134	 * the module functions being freed with the address.
6135	 */
6136	if (mod && ftrace_ops_list != &ftrace_list_end)
6137		mod_map = allocate_ftrace_mod_map(mod, start, end);
6138
6139	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
6140		if (end < pg->records[0].ip ||
6141		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
6142			continue;
6143 again:
6144		rec = bsearch(&key, pg->records, pg->index,
6145			      sizeof(struct dyn_ftrace),
6146			      ftrace_cmp_recs);
6147		if (!rec)
6148			continue;
6149
6150		/* rec will be cleared from hashes after ftrace_lock unlock */
6151		add_to_clear_hash_list(&clear_hash, rec);
6152
6153		if (mod_map)
6154			save_ftrace_mod_rec(mod_map, rec);
6155
6156		pg->index--;
6157		ftrace_update_tot_cnt--;
6158		if (!pg->index) {
6159			*last_pg = pg->next;
6160			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
6161			free_pages((unsigned long)pg->records, order);
6162			kfree(pg);
6163			pg = container_of(last_pg, struct ftrace_page, next);
6164			if (!(*last_pg))
6165				ftrace_pages = pg;
6166			continue;
6167		}
6168		memmove(rec, rec + 1,
6169			(pg->index - (rec - pg->records)) * sizeof(*rec));
6170		/* More than one function may be in this block */
6171		goto again;
6172	}
6173	mutex_unlock(&ftrace_lock);
6174
6175	list_for_each_entry_safe(func, func_next, &clear_hash, list) {
6176		clear_func_from_hashes(func);
6177		kfree(func);
6178	}
 
 
 
 
 
6179}
6180
6181void __init ftrace_free_init_mem(void)
6182{
6183	void *start = (void *)(&__init_begin);
6184	void *end = (void *)(&__init_end);
6185
 
 
6186	ftrace_free_mem(NULL, start, end);
6187}
6188
 
 
 
 
 
6189void __init ftrace_init(void)
6190{
6191	extern unsigned long __start_mcount_loc[];
6192	extern unsigned long __stop_mcount_loc[];
6193	unsigned long count, flags;
6194	int ret;
6195
6196	local_irq_save(flags);
6197	ret = ftrace_dyn_arch_init();
6198	local_irq_restore(flags);
6199	if (ret)
6200		goto failed;
6201
6202	count = __stop_mcount_loc - __start_mcount_loc;
6203	if (!count) {
6204		pr_info("ftrace: No functions to be traced?\n");
6205		goto failed;
6206	}
6207
6208	pr_info("ftrace: allocating %ld entries in %ld pages\n",
6209		count, count / ENTRIES_PER_PAGE + 1);
6210
6211	last_ftrace_enabled = ftrace_enabled = 1;
6212
6213	ret = ftrace_process_locs(NULL,
6214				  __start_mcount_loc,
6215				  __stop_mcount_loc);
 
 
 
 
 
 
 
 
 
6216
6217	set_ftrace_early_filters();
6218
6219	return;
6220 failed:
6221	ftrace_disabled = 1;
6222}
6223
6224/* Do nothing if arch does not support this */
6225void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
6226{
6227}
6228
6229static void ftrace_update_trampoline(struct ftrace_ops *ops)
6230{
 
 
6231	arch_ftrace_update_trampoline(ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6232}
6233
6234void ftrace_init_trace_array(struct trace_array *tr)
6235{
6236	INIT_LIST_HEAD(&tr->func_probes);
6237	INIT_LIST_HEAD(&tr->mod_trace);
6238	INIT_LIST_HEAD(&tr->mod_notrace);
6239}
6240#else
6241
6242struct ftrace_ops global_ops = {
6243	.func			= ftrace_stub,
6244	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
6245				  FTRACE_OPS_FL_INITIALIZED |
6246				  FTRACE_OPS_FL_PID,
6247};
6248
6249static int __init ftrace_nodyn_init(void)
6250{
6251	ftrace_enabled = 1;
6252	return 0;
6253}
6254core_initcall(ftrace_nodyn_init);
6255
6256static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
6257static inline void ftrace_startup_enable(int command) { }
6258static inline void ftrace_startup_all(int command) { }
6259
6260# define ftrace_startup_sysctl()	do { } while (0)
6261# define ftrace_shutdown_sysctl()	do { } while (0)
6262
6263static void ftrace_update_trampoline(struct ftrace_ops *ops)
6264{
6265}
6266
6267#endif /* CONFIG_DYNAMIC_FTRACE */
6268
6269__init void ftrace_init_global_array_ops(struct trace_array *tr)
6270{
6271	tr->ops = &global_ops;
6272	tr->ops->private = tr;
6273	ftrace_init_trace_array(tr);
 
6274}
6275
6276void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
6277{
6278	/* If we filter on pids, update to use the pid function */
6279	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
6280		if (WARN_ON(tr->ops->func != ftrace_stub))
6281			printk("ftrace ops had %pS for function\n",
6282			       tr->ops->func);
6283	}
6284	tr->ops->func = func;
6285	tr->ops->private = tr;
6286}
6287
6288void ftrace_reset_array_ops(struct trace_array *tr)
6289{
6290	tr->ops->func = ftrace_stub;
6291}
6292
6293static nokprobe_inline void
6294__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6295		       struct ftrace_ops *ignored, struct pt_regs *regs)
6296{
 
6297	struct ftrace_ops *op;
6298	int bit;
6299
6300	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
 
 
 
 
 
6301	if (bit < 0)
6302		return;
6303
6304	/*
6305	 * Some of the ops may be dynamically allocated,
6306	 * they must be freed after a synchronize_rcu().
6307	 */
6308	preempt_disable_notrace();
6309
6310	do_for_each_ftrace_op(op, ftrace_ops_list) {
6311		/* Stub functions don't need to be called nor tested */
6312		if (op->flags & FTRACE_OPS_FL_STUB)
6313			continue;
6314		/*
6315		 * Check the following for each ops before calling their func:
6316		 *  if RCU flag is set, then rcu_is_watching() must be true
6317		 *  if PER_CPU is set, then ftrace_function_local_disable()
6318		 *                          must be false
6319		 *  Otherwise test if the ip matches the ops filter
6320		 *
6321		 * If any of the above fails then the op->func() is not executed.
6322		 */
6323		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
6324		    ftrace_ops_test(op, ip, regs)) {
6325			if (FTRACE_WARN_ON(!op->func)) {
6326				pr_warn("op=%p %pS\n", op, op);
6327				goto out;
6328			}
6329			op->func(ip, parent_ip, op, regs);
6330		}
6331	} while_for_each_ftrace_op(op);
6332out:
6333	preempt_enable_notrace();
6334	trace_clear_recursion(bit);
6335}
6336
6337/*
6338 * Some archs only support passing ip and parent_ip. Even though
6339 * the list function ignores the op parameter, we do not want any
6340 * C side effects, where a function is called without the caller
6341 * sending a third parameter.
6342 * Archs are to support both the regs and ftrace_ops at the same time.
6343 * If they support ftrace_ops, it is assumed they support regs.
6344 * If call backs want to use regs, they must either check for regs
6345 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
6346 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
6347 * An architecture can pass partial regs with ftrace_ops and still
6348 * set the ARCH_SUPPORTS_FTRACE_OPS.
 
 
 
6349 */
6350#if ARCH_SUPPORTS_FTRACE_OPS
6351static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6352				 struct ftrace_ops *op, struct pt_regs *regs)
6353{
6354	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
 
6355}
6356NOKPROBE_SYMBOL(ftrace_ops_list_func);
6357#else
6358static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
6359{
6360	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
6361}
6362NOKPROBE_SYMBOL(ftrace_ops_no_ops);
6363#endif
 
6364
6365/*
6366 * If there's only one function registered but it does not support
6367 * recursion, needs RCU protection and/or requires per cpu handling, then
6368 * this function will be called by the mcount trampoline.
6369 */
6370static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
6371				   struct ftrace_ops *op, struct pt_regs *regs)
6372{
6373	int bit;
6374
6375	if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
6376		return;
6377
6378	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6379	if (bit < 0)
6380		return;
6381
6382	preempt_disable_notrace();
6383
6384	op->func(ip, parent_ip, op, regs);
6385
6386	preempt_enable_notrace();
6387	trace_clear_recursion(bit);
6388}
6389NOKPROBE_SYMBOL(ftrace_ops_assist_func);
6390
6391/**
6392 * ftrace_ops_get_func - get the function a trampoline should call
6393 * @ops: the ops to get the function for
6394 *
6395 * Normally the mcount trampoline will call the ops->func, but there
6396 * are times that it should not. For example, if the ops does not
6397 * have its own recursion protection, then it should call the
6398 * ftrace_ops_assist_func() instead.
6399 *
6400 * Returns the function that the trampoline should call for @ops.
6401 */
6402ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
6403{
6404	/*
6405	 * If the function does not handle recursion, needs to be RCU safe,
6406	 * or does per cpu logic, then we need to call the assist handler.
6407	 */
6408	if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
6409	    ops->flags & FTRACE_OPS_FL_RCU)
6410		return ftrace_ops_assist_func;
6411
6412	return ops->func;
6413}
6414
6415static void
6416ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
6417		    struct task_struct *prev, struct task_struct *next)
 
 
6418{
6419	struct trace_array *tr = data;
6420	struct trace_pid_list *pid_list;
 
6421
6422	pid_list = rcu_dereference_sched(tr->function_pids);
 
6423
6424	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6425		       trace_ignore_this_task(pid_list, next));
 
 
 
 
6426}
6427
6428static void
6429ftrace_pid_follow_sched_process_fork(void *data,
6430				     struct task_struct *self,
6431				     struct task_struct *task)
6432{
6433	struct trace_pid_list *pid_list;
6434	struct trace_array *tr = data;
6435
6436	pid_list = rcu_dereference_sched(tr->function_pids);
6437	trace_filter_add_remove_task(pid_list, self, task);
 
 
 
6438}
6439
6440static void
6441ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
6442{
6443	struct trace_pid_list *pid_list;
6444	struct trace_array *tr = data;
6445
6446	pid_list = rcu_dereference_sched(tr->function_pids);
6447	trace_filter_add_remove_task(pid_list, NULL, task);
 
 
 
6448}
6449
6450void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
6451{
6452	if (enable) {
6453		register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6454						  tr);
6455		register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6456						  tr);
6457	} else {
6458		unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6459						    tr);
6460		unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6461						    tr);
6462	}
6463}
6464
6465static void clear_ftrace_pids(struct trace_array *tr)
6466{
6467	struct trace_pid_list *pid_list;
 
6468	int cpu;
6469
6470	pid_list = rcu_dereference_protected(tr->function_pids,
6471					     lockdep_is_held(&ftrace_lock));
6472	if (!pid_list)
 
 
 
 
6473		return;
6474
6475	unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
 
 
 
 
 
6476
6477	for_each_possible_cpu(cpu)
6478		per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
6479
6480	rcu_assign_pointer(tr->function_pids, NULL);
 
6481
6482	/* Wait till all users are no longer using pid filtering */
6483	synchronize_rcu();
6484
6485	trace_free_pid_list(pid_list);
 
 
 
 
6486}
6487
6488void ftrace_clear_pids(struct trace_array *tr)
6489{
6490	mutex_lock(&ftrace_lock);
6491
6492	clear_ftrace_pids(tr);
6493
6494	mutex_unlock(&ftrace_lock);
6495}
6496
6497static void ftrace_pid_reset(struct trace_array *tr)
6498{
6499	mutex_lock(&ftrace_lock);
6500	clear_ftrace_pids(tr);
6501
6502	ftrace_update_pid_func();
6503	ftrace_startup_all(0);
6504
6505	mutex_unlock(&ftrace_lock);
6506}
6507
6508/* Greater than any max PID */
6509#define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
6510
6511static void *fpid_start(struct seq_file *m, loff_t *pos)
6512	__acquires(RCU)
6513{
6514	struct trace_pid_list *pid_list;
6515	struct trace_array *tr = m->private;
6516
6517	mutex_lock(&ftrace_lock);
6518	rcu_read_lock_sched();
6519
6520	pid_list = rcu_dereference_sched(tr->function_pids);
6521
6522	if (!pid_list)
6523		return !(*pos) ? FTRACE_NO_PIDS : NULL;
6524
6525	return trace_pid_start(pid_list, pos);
6526}
6527
6528static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
6529{
6530	struct trace_array *tr = m->private;
6531	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
6532
6533	if (v == FTRACE_NO_PIDS)
 
6534		return NULL;
6535
6536	return trace_pid_next(pid_list, v, pos);
6537}
6538
6539static void fpid_stop(struct seq_file *m, void *p)
6540	__releases(RCU)
6541{
6542	rcu_read_unlock_sched();
6543	mutex_unlock(&ftrace_lock);
6544}
6545
6546static int fpid_show(struct seq_file *m, void *v)
6547{
6548	if (v == FTRACE_NO_PIDS) {
6549		seq_puts(m, "no pid\n");
6550		return 0;
6551	}
6552
6553	return trace_pid_show(m, v);
6554}
6555
6556static const struct seq_operations ftrace_pid_sops = {
6557	.start = fpid_start,
6558	.next = fpid_next,
6559	.stop = fpid_stop,
6560	.show = fpid_show,
6561};
6562
6563static int
6564ftrace_pid_open(struct inode *inode, struct file *file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6565{
 
6566	struct trace_array *tr = inode->i_private;
6567	struct seq_file *m;
6568	int ret = 0;
6569
6570	ret = tracing_check_open_get_tr(tr);
6571	if (ret)
6572		return ret;
6573
6574	if ((file->f_mode & FMODE_WRITE) &&
6575	    (file->f_flags & O_TRUNC))
6576		ftrace_pid_reset(tr);
6577
6578	ret = seq_open(file, &ftrace_pid_sops);
 
 
 
 
 
 
 
 
 
 
 
 
 
6579	if (ret < 0) {
6580		trace_array_put(tr);
6581	} else {
6582		m = file->private_data;
6583		/* copy tr over to seq ops */
6584		m->private = tr;
6585	}
6586
6587	return ret;
6588}
6589
 
 
 
 
 
 
 
 
 
 
 
 
6590static void ignore_task_cpu(void *data)
6591{
6592	struct trace_array *tr = data;
6593	struct trace_pid_list *pid_list;
 
6594
6595	/*
6596	 * This function is called by on_each_cpu() while the
6597	 * event_mutex is held.
6598	 */
6599	pid_list = rcu_dereference_protected(tr->function_pids,
6600					     mutex_is_locked(&ftrace_lock));
 
 
6601
6602	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6603		       trace_ignore_this_task(pid_list, current));
 
 
 
 
6604}
6605
6606static ssize_t
6607ftrace_pid_write(struct file *filp, const char __user *ubuf,
6608		   size_t cnt, loff_t *ppos)
6609{
6610	struct seq_file *m = filp->private_data;
6611	struct trace_array *tr = m->private;
6612	struct trace_pid_list *filtered_pids = NULL;
 
6613	struct trace_pid_list *pid_list;
6614	ssize_t ret;
6615
6616	if (!cnt)
6617		return 0;
6618
6619	mutex_lock(&ftrace_lock);
6620
6621	filtered_pids = rcu_dereference_protected(tr->function_pids,
 
 
 
 
 
 
 
 
 
 
6622					     lockdep_is_held(&ftrace_lock));
 
 
 
 
 
 
6623
6624	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
6625	if (ret < 0)
6626		goto out;
6627
6628	rcu_assign_pointer(tr->function_pids, pid_list);
 
 
 
 
 
 
 
 
6629
6630	if (filtered_pids) {
6631		synchronize_rcu();
6632		trace_free_pid_list(filtered_pids);
6633	} else if (pid_list) {
6634		/* Register a probe to set whether to ignore the tracing of a task */
6635		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
6636	}
6637
6638	/*
6639	 * Ignoring of pids is done at task switch. But we have to
6640	 * check for those tasks that are currently running.
6641	 * Always do this in case a pid was appended or removed.
6642	 */
6643	on_each_cpu(ignore_task_cpu, tr, 1);
6644
6645	ftrace_update_pid_func();
6646	ftrace_startup_all(0);
6647 out:
6648	mutex_unlock(&ftrace_lock);
6649
6650	if (ret > 0)
6651		*ppos += ret;
6652
6653	return ret;
6654}
6655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6656static int
6657ftrace_pid_release(struct inode *inode, struct file *file)
6658{
6659	struct trace_array *tr = inode->i_private;
6660
6661	trace_array_put(tr);
6662
6663	return seq_release(inode, file);
6664}
6665
6666static const struct file_operations ftrace_pid_fops = {
6667	.open		= ftrace_pid_open,
6668	.write		= ftrace_pid_write,
6669	.read		= seq_read,
6670	.llseek		= tracing_lseek,
6671	.release	= ftrace_pid_release,
6672};
6673
 
 
 
 
 
 
 
 
6674void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6675{
6676	trace_create_file("set_ftrace_pid", 0644, d_tracer,
6677			    tr, &ftrace_pid_fops);
 
 
6678}
6679
6680void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
6681					 struct dentry *d_tracer)
6682{
6683	/* Only the top level directory has the dyn_tracefs and profile */
6684	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
6685
6686	ftrace_init_dyn_tracefs(d_tracer);
6687	ftrace_profile_tracefs(d_tracer);
6688}
6689
6690/**
6691 * ftrace_kill - kill ftrace
6692 *
6693 * This function should be used by panic code. It stops ftrace
6694 * but in a not so nice way. If you need to simply kill ftrace
6695 * from a non-atomic section, use ftrace_kill.
6696 */
6697void ftrace_kill(void)
6698{
6699	ftrace_disabled = 1;
6700	ftrace_enabled = 0;
6701	ftrace_trace_function = ftrace_stub;
 
6702}
6703
6704/**
6705 * Test if ftrace is dead or not.
 
 
6706 */
6707int ftrace_is_dead(void)
6708{
6709	return ftrace_disabled;
6710}
6711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6712/**
6713 * register_ftrace_function - register a function for profiling
6714 * @ops - ops structure that holds the function for profiling.
6715 *
6716 * Register a function to be called by all functions in the
6717 * kernel.
6718 *
6719 * Note: @ops->func and all the functions it calls must be labeled
6720 *       with "notrace", otherwise it will go into a
6721 *       recursive loop.
6722 */
6723int register_ftrace_function(struct ftrace_ops *ops)
6724{
6725	int ret = -1;
6726
6727	ftrace_ops_init(ops);
6728
6729	mutex_lock(&ftrace_lock);
6730
6731	ret = ftrace_startup(ops, 0);
6732
6733	mutex_unlock(&ftrace_lock);
6734
 
 
6735	return ret;
6736}
6737EXPORT_SYMBOL_GPL(register_ftrace_function);
6738
6739/**
6740 * unregister_ftrace_function - unregister a function for profiling.
6741 * @ops - ops structure that holds the function to unregister
6742 *
6743 * Unregister a function that was added to be called by ftrace profiling.
6744 */
6745int unregister_ftrace_function(struct ftrace_ops *ops)
6746{
6747	int ret;
6748
6749	mutex_lock(&ftrace_lock);
6750	ret = ftrace_shutdown(ops, 0);
6751	mutex_unlock(&ftrace_lock);
6752
 
6753	return ret;
6754}
6755EXPORT_SYMBOL_GPL(unregister_ftrace_function);
6756
6757int
6758ftrace_enable_sysctl(struct ctl_table *table, int write,
6759		     void __user *buffer, size_t *lenp,
6760		     loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6761{
6762	int ret = -ENODEV;
6763
6764	mutex_lock(&ftrace_lock);
6765
6766	if (unlikely(ftrace_disabled))
6767		goto out;
6768
6769	ret = proc_dointvec(table, write, buffer, lenp, ppos);
6770
6771	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
6772		goto out;
6773
6774	last_ftrace_enabled = !!ftrace_enabled;
6775
6776	if (ftrace_enabled) {
6777
6778		/* we are starting ftrace again */
6779		if (rcu_dereference_protected(ftrace_ops_list,
6780			lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
6781			update_ftrace_function();
6782
6783		ftrace_startup_sysctl();
6784
6785	} else {
 
 
 
 
 
 
6786		/* stopping ftrace calls (just send to ftrace_stub) */
6787		ftrace_trace_function = ftrace_stub;
6788
6789		ftrace_shutdown_sysctl();
6790	}
6791
 
6792 out:
6793	mutex_unlock(&ftrace_lock);
6794	return ret;
6795}