Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#ifndef _LINUX_KERNEL_TRACE_H
   4#define _LINUX_KERNEL_TRACE_H
   5
   6#include <linux/fs.h>
   7#include <linux/atomic.h>
   8#include <linux/sched.h>
   9#include <linux/clocksource.h>
  10#include <linux/ring_buffer.h>
  11#include <linux/mmiotrace.h>
  12#include <linux/tracepoint.h>
  13#include <linux/ftrace.h>
 
  14#include <linux/hw_breakpoint.h>
  15#include <linux/trace_seq.h>
  16#include <linux/trace_events.h>
  17#include <linux/compiler.h>
  18#include <linux/glob.h>
 
 
  19
  20#ifdef CONFIG_FTRACE_SYSCALLS
  21#include <asm/unistd.h>		/* For NR_SYSCALLS	     */
  22#include <asm/syscall.h>	/* some archs define it here */
  23#endif
  24
  25enum trace_type {
  26	__TRACE_FIRST_TYPE = 0,
  27
  28	TRACE_FN,
  29	TRACE_CTX,
  30	TRACE_WAKE,
  31	TRACE_STACK,
  32	TRACE_PRINT,
  33	TRACE_BPRINT,
  34	TRACE_MMIO_RW,
  35	TRACE_MMIO_MAP,
  36	TRACE_BRANCH,
  37	TRACE_GRAPH_RET,
  38	TRACE_GRAPH_ENT,
  39	TRACE_USER_STACK,
  40	TRACE_BLK,
  41	TRACE_BPUTS,
  42	TRACE_HWLAT,
  43	TRACE_RAW_DATA,
  44
  45	__TRACE_LAST_TYPE,
  46};
  47
  48
  49#undef __field
  50#define __field(type, item)		type	item;
  51
 
 
 
  52#undef __field_struct
  53#define __field_struct(type, item)	__field(type, item)
  54
  55#undef __field_desc
  56#define __field_desc(type, container, item)
  57
 
 
 
  58#undef __array
  59#define __array(type, item, size)	type	item[size];
  60
  61#undef __array_desc
  62#define __array_desc(type, container, item, size)
  63
  64#undef __dynamic_array
  65#define __dynamic_array(type, item)	type	item[];
  66
  67#undef F_STRUCT
  68#define F_STRUCT(args...)		args
  69
  70#undef FTRACE_ENTRY
  71#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)	\
  72	struct struct_name {						\
  73		struct trace_entry	ent;				\
  74		tstruct							\
  75	}
  76
  77#undef FTRACE_ENTRY_DUP
  78#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
  79
  80#undef FTRACE_ENTRY_REG
  81#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	\
  82			 filter, regfn) \
  83	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
  84		     filter)
  85
  86#undef FTRACE_ENTRY_PACKED
  87#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print,	\
  88			    filter)					\
  89	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
  90		     filter) __packed
  91
  92#include "trace_entries.h"
  93
 
 
 
 
 
 
 
 
 
 
 
 
  94/*
  95 * syscalls are special, and need special handling, this is why
  96 * they are not included in trace_entries.h
  97 */
  98struct syscall_trace_enter {
  99	struct trace_entry	ent;
 100	int			nr;
 101	unsigned long		args[];
 102};
 103
 104struct syscall_trace_exit {
 105	struct trace_entry	ent;
 106	int			nr;
 107	long			ret;
 108};
 109
 110struct kprobe_trace_entry_head {
 111	struct trace_entry	ent;
 112	unsigned long		ip;
 113};
 114
 115struct kretprobe_trace_entry_head {
 116	struct trace_entry	ent;
 117	unsigned long		func;
 118	unsigned long		ret_ip;
 119};
 120
 121/*
 122 * trace_flag_type is an enumeration that holds different
 123 * states when a trace occurs. These are:
 124 *  IRQS_OFF		- interrupts were disabled
 125 *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
 126 *  NEED_RESCHED	- reschedule is requested
 127 *  HARDIRQ		- inside an interrupt handler
 128 *  SOFTIRQ		- inside a softirq handler
 129 */
 130enum trace_flag_type {
 131	TRACE_FLAG_IRQS_OFF		= 0x01,
 132	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
 133	TRACE_FLAG_NEED_RESCHED		= 0x04,
 134	TRACE_FLAG_HARDIRQ		= 0x08,
 135	TRACE_FLAG_SOFTIRQ		= 0x10,
 136	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
 137	TRACE_FLAG_NMI			= 0x40,
 138};
 139
 140#define TRACE_BUF_SIZE		1024
 141
 142struct trace_array;
 143
 144/*
 145 * The CPU trace array - it consists of thousands of trace entries
 146 * plus some other descriptor data: (for example which task started
 147 * the trace, etc.)
 148 */
 149struct trace_array_cpu {
 150	atomic_t		disabled;
 151	void			*buffer_page;	/* ring buffer spare */
 152
 153	unsigned long		entries;
 154	unsigned long		saved_latency;
 155	unsigned long		critical_start;
 156	unsigned long		critical_end;
 157	unsigned long		critical_sequence;
 158	unsigned long		nice;
 159	unsigned long		policy;
 160	unsigned long		rt_priority;
 161	unsigned long		skipped_entries;
 162	u64			preempt_timestamp;
 163	pid_t			pid;
 164	kuid_t			uid;
 165	char			comm[TASK_COMM_LEN];
 166
 167	bool			ignore_pid;
 168#ifdef CONFIG_FUNCTION_TRACER
 169	bool			ftrace_ignore_pid;
 170#endif
 
 171};
 172
 173struct tracer;
 174struct trace_option_dentry;
 175
 176struct trace_buffer {
 177	struct trace_array		*tr;
 178	struct ring_buffer		*buffer;
 179	struct trace_array_cpu __percpu	*data;
 180	u64				time_start;
 181	int				cpu;
 182};
 183
 184#define TRACE_FLAGS_MAX_SIZE		32
 185
 186struct trace_options {
 187	struct tracer			*tracer;
 188	struct trace_option_dentry	*topts;
 189};
 190
 191struct trace_pid_list {
 192	int				pid_max;
 193	unsigned long			*pids;
 194};
 195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 196typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
 197
 198/**
 199 * struct cond_snapshot - conditional snapshot data and callback
 200 *
 201 * The cond_snapshot structure encapsulates a callback function and
 202 * data associated with the snapshot for a given tracing instance.
 203 *
 204 * When a snapshot is taken conditionally, by invoking
 205 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
 206 * passed in turn to the cond_snapshot.update() function.  That data
 207 * can be compared by the update() implementation with the cond_data
 208 * contained wihin the struct cond_snapshot instance associated with
 209 * the trace_array.  Because the tr->max_lock is held throughout the
 210 * update() call, the update() function can directly retrieve the
 211 * cond_snapshot and cond_data associated with the per-instance
 212 * snapshot associated with the trace_array.
 213 *
 214 * The cond_snapshot.update() implementation can save data to be
 215 * associated with the snapshot if it decides to, and returns 'true'
 216 * in that case, or it returns 'false' if the conditional snapshot
 217 * shouldn't be taken.
 218 *
 219 * The cond_snapshot instance is created and associated with the
 220 * user-defined cond_data by tracing_cond_snapshot_enable().
 221 * Likewise, the cond_snapshot instance is destroyed and is no longer
 222 * associated with the trace instance by
 223 * tracing_cond_snapshot_disable().
 224 *
 225 * The method below is required.
 226 *
 227 * @update: When a conditional snapshot is invoked, the update()
 228 *	callback function is invoked with the tr->max_lock held.  The
 229 *	update() implementation signals whether or not to actually
 230 *	take the snapshot, by returning 'true' if so, 'false' if no
 231 *	snapshot should be taken.  Because the max_lock is held for
 232 *	the duration of update(), the implementation is safe to
 233 *	directly retrieven and save any implementation data it needs
 234 *	to in association with the snapshot.
 235 */
 236struct cond_snapshot {
 237	void				*cond_data;
 238	cond_update_fn_t		update;
 239};
 240
 241/*
 242 * The trace array - an array of per-CPU trace arrays. This is the
 243 * highest level data structure that individual tracers deal with.
 244 * They have on/off state as well:
 245 */
 246struct trace_array {
 247	struct list_head	list;
 248	char			*name;
 249	struct trace_buffer	trace_buffer;
 250#ifdef CONFIG_TRACER_MAX_TRACE
 251	/*
 252	 * The max_buffer is used to snapshot the trace when a maximum
 253	 * latency is reached, or when the user initiates a snapshot.
 254	 * Some tracers will use this to store a maximum trace while
 255	 * it continues examining live traces.
 256	 *
 257	 * The buffers for the max_buffer are set up the same as the trace_buffer
 258	 * When a snapshot is taken, the buffer of the max_buffer is swapped
 259	 * with the buffer of the trace_buffer and the buffers are reset for
 260	 * the trace_buffer so the tracing can continue.
 261	 */
 262	struct trace_buffer	max_buffer;
 263	bool			allocated_snapshot;
 264#endif
 265#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
 266	unsigned long		max_latency;
 
 
 
 
 
 267#endif
 268	struct trace_pid_list	__rcu *filtered_pids;
 
 269	/*
 270	 * max_lock is used to protect the swapping of buffers
 271	 * when taking a max snapshot. The buffers themselves are
 272	 * protected by per_cpu spinlocks. But the action of the swap
 273	 * needs its own lock.
 274	 *
 275	 * This is defined as a arch_spinlock_t in order to help
 276	 * with performance when lockdep debugging is enabled.
 277	 *
 278	 * It is also used in other places outside the update_max_tr
 279	 * so it needs to be defined outside of the
 280	 * CONFIG_TRACER_MAX_TRACE.
 281	 */
 282	arch_spinlock_t		max_lock;
 283	int			buffer_disabled;
 284#ifdef CONFIG_FTRACE_SYSCALLS
 285	int			sys_refcount_enter;
 286	int			sys_refcount_exit;
 287	struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
 288	struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
 289#endif
 290	int			stop_count;
 291	int			clock_id;
 292	int			nr_topts;
 293	bool			clear_trace;
 294	int			buffer_percent;
 295	unsigned int		n_err_log_entries;
 296	struct tracer		*current_trace;
 297	unsigned int		trace_flags;
 298	unsigned char		trace_flags_index[TRACE_FLAGS_MAX_SIZE];
 299	unsigned int		flags;
 300	raw_spinlock_t		start_lock;
 301	struct list_head	err_log;
 302	struct dentry		*dir;
 303	struct dentry		*options;
 304	struct dentry		*percpu_dir;
 305	struct dentry		*event_dir;
 306	struct trace_options	*topts;
 307	struct list_head	systems;
 308	struct list_head	events;
 309	struct trace_event_file *trace_marker_file;
 310	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
 311	int			ref;
 
 312#ifdef CONFIG_FUNCTION_TRACER
 313	struct ftrace_ops	*ops;
 314	struct trace_pid_list	__rcu *function_pids;
 
 315#ifdef CONFIG_DYNAMIC_FTRACE
 316	/* All of these are protected by the ftrace_lock */
 317	struct list_head	func_probes;
 318	struct list_head	mod_trace;
 319	struct list_head	mod_notrace;
 320#endif
 321	/* function tracing enabled */
 322	int			function_enabled;
 323#endif
 324	int			time_stamp_abs_ref;
 325	struct list_head	hist_vars;
 326#ifdef CONFIG_TRACER_SNAPSHOT
 327	struct cond_snapshot	*cond_snapshot;
 328#endif
 329};
 330
 331enum {
 332	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
 333};
 334
 335extern struct list_head ftrace_trace_arrays;
 336
 337extern struct mutex trace_types_lock;
 338
 339extern int trace_array_get(struct trace_array *tr);
 340extern void trace_array_put(struct trace_array *tr);
 341extern int tracing_check_open_get_tr(struct trace_array *tr);
 
 
 342
 343extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
 344extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
 345
 346extern bool trace_clock_in_ns(struct trace_array *tr);
 347
 348/*
 349 * The global tracer (top) should be the first trace array added,
 350 * but we check the flag anyway.
 351 */
 352static inline struct trace_array *top_trace_array(void)
 353{
 354	struct trace_array *tr;
 355
 356	if (list_empty(&ftrace_trace_arrays))
 357		return NULL;
 358
 359	tr = list_entry(ftrace_trace_arrays.prev,
 360			typeof(*tr), list);
 361	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
 362	return tr;
 363}
 364
 365#define FTRACE_CMP_TYPE(var, type) \
 366	__builtin_types_compatible_p(typeof(var), type *)
 367
 368#undef IF_ASSIGN
 369#define IF_ASSIGN(var, entry, etype, id)			\
 370	if (FTRACE_CMP_TYPE(var, etype)) {			\
 371		var = (typeof(var))(entry);			\
 372		WARN_ON(id != 0 && (entry)->type != id);	\
 373		break;						\
 374	}
 375
 376/* Will cause compile errors if type is not found. */
 377extern void __ftrace_bad_type(void);
 378
 379/*
 380 * The trace_assign_type is a verifier that the entry type is
 381 * the same as the type being assigned. To add new types simply
 382 * add a line with the following format:
 383 *
 384 * IF_ASSIGN(var, ent, type, id);
 385 *
 386 *  Where "type" is the trace type that includes the trace_entry
 387 *  as the "ent" item. And "id" is the trace identifier that is
 388 *  used in the trace_type enum.
 389 *
 390 *  If the type can have more than one id, then use zero.
 391 */
 392#define trace_assign_type(var, ent)					\
 393	do {								\
 394		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
 395		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
 396		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
 397		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
 398		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
 399		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
 400		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
 401		IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT);	\
 402		IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
 403		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
 404			  TRACE_MMIO_RW);				\
 405		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
 406			  TRACE_MMIO_MAP);				\
 407		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
 408		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
 409			  TRACE_GRAPH_ENT);		\
 410		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
 411			  TRACE_GRAPH_RET);		\
 412		__ftrace_bad_type();					\
 413	} while (0)
 414
 415/*
 416 * An option specific to a tracer. This is a boolean value.
 417 * The bit is the bit index that sets its value on the
 418 * flags value in struct tracer_flags.
 419 */
 420struct tracer_opt {
 421	const char	*name; /* Will appear on the trace_options file */
 422	u32		bit; /* Mask assigned in val field in tracer_flags */
 423};
 424
 425/*
 426 * The set of specific options for a tracer. Your tracer
 427 * have to set the initial value of the flags val.
 428 */
 429struct tracer_flags {
 430	u32			val;
 431	struct tracer_opt	*opts;
 432	struct tracer		*trace;
 433};
 434
 435/* Makes more easy to define a tracer opt */
 436#define TRACER_OPT(s, b)	.name = #s, .bit = b
 437
 438
 439struct trace_option_dentry {
 440	struct tracer_opt		*opt;
 441	struct tracer_flags		*flags;
 442	struct trace_array		*tr;
 443	struct dentry			*entry;
 444};
 445
 446/**
 447 * struct tracer - a specific tracer and its callbacks to interact with tracefs
 448 * @name: the name chosen to select it on the available_tracers file
 449 * @init: called when one switches to this tracer (echo name > current_tracer)
 450 * @reset: called when one switches to another tracer
 451 * @start: called when tracing is unpaused (echo 1 > tracing_on)
 452 * @stop: called when tracing is paused (echo 0 > tracing_on)
 453 * @update_thresh: called when tracing_thresh is updated
 454 * @open: called when the trace file is opened
 455 * @pipe_open: called when the trace_pipe file is opened
 456 * @close: called when the trace file is released
 457 * @pipe_close: called when the trace_pipe file is released
 458 * @read: override the default read callback on trace_pipe
 459 * @splice_read: override the default splice_read callback on trace_pipe
 460 * @selftest: selftest to run on boot (see trace_selftest.c)
 461 * @print_headers: override the first lines that describe your columns
 462 * @print_line: callback that prints a trace
 463 * @set_flag: signals one of your private flags changed (trace_options file)
 464 * @flags: your private flags
 465 */
 466struct tracer {
 467	const char		*name;
 468	int			(*init)(struct trace_array *tr);
 469	void			(*reset)(struct trace_array *tr);
 470	void			(*start)(struct trace_array *tr);
 471	void			(*stop)(struct trace_array *tr);
 472	int			(*update_thresh)(struct trace_array *tr);
 473	void			(*open)(struct trace_iterator *iter);
 474	void			(*pipe_open)(struct trace_iterator *iter);
 475	void			(*close)(struct trace_iterator *iter);
 476	void			(*pipe_close)(struct trace_iterator *iter);
 477	ssize_t			(*read)(struct trace_iterator *iter,
 478					struct file *filp, char __user *ubuf,
 479					size_t cnt, loff_t *ppos);
 480	ssize_t			(*splice_read)(struct trace_iterator *iter,
 481					       struct file *filp,
 482					       loff_t *ppos,
 483					       struct pipe_inode_info *pipe,
 484					       size_t len,
 485					       unsigned int flags);
 486#ifdef CONFIG_FTRACE_STARTUP_TEST
 487	int			(*selftest)(struct tracer *trace,
 488					    struct trace_array *tr);
 489#endif
 490	void			(*print_header)(struct seq_file *m);
 491	enum print_line_t	(*print_line)(struct trace_iterator *iter);
 492	/* If you handled the flag setting, return 0 */
 493	int			(*set_flag)(struct trace_array *tr,
 494					    u32 old_flags, u32 bit, int set);
 495	/* Return 0 if OK with change, else return non-zero */
 496	int			(*flag_changed)(struct trace_array *tr,
 497						u32 mask, int set);
 498	struct tracer		*next;
 499	struct tracer_flags	*flags;
 500	int			enabled;
 501	int			ref;
 502	bool			print_max;
 503	bool			allow_instances;
 504#ifdef CONFIG_TRACER_MAX_TRACE
 505	bool			use_max_tr;
 506#endif
 507	/* True if tracer cannot be enabled in kernel param */
 508	bool			noboot;
 509};
 510
 511
 512/* Only current can touch trace_recursion */
 513
 514/*
 515 * For function tracing recursion:
 516 *  The order of these bits are important.
 517 *
 518 *  When function tracing occurs, the following steps are made:
 519 *   If arch does not support a ftrace feature:
 520 *    call internal function (uses INTERNAL bits) which calls...
 521 *   If callback is registered to the "global" list, the list
 522 *    function is called and recursion checks the GLOBAL bits.
 523 *    then this function calls...
 524 *   The function callback, which can use the FTRACE bits to
 525 *    check for recursion.
 526 *
 527 * Now if the arch does not suppport a feature, and it calls
 528 * the global list function which calls the ftrace callback
 529 * all three of these steps will do a recursion protection.
 530 * There's no reason to do one if the previous caller already
 531 * did. The recursion that we are protecting against will
 532 * go through the same steps again.
 533 *
 534 * To prevent the multiple recursion checks, if a recursion
 535 * bit is set that is higher than the MAX bit of the current
 536 * check, then we know that the check was made by the previous
 537 * caller, and we can skip the current check.
 538 */
 539enum {
 540	TRACE_BUFFER_BIT,
 541	TRACE_BUFFER_NMI_BIT,
 542	TRACE_BUFFER_IRQ_BIT,
 543	TRACE_BUFFER_SIRQ_BIT,
 544
 545	/* Start of function recursion bits */
 546	TRACE_FTRACE_BIT,
 547	TRACE_FTRACE_NMI_BIT,
 548	TRACE_FTRACE_IRQ_BIT,
 549	TRACE_FTRACE_SIRQ_BIT,
 550
 551	/* INTERNAL_BITs must be greater than FTRACE_BITs */
 552	TRACE_INTERNAL_BIT,
 553	TRACE_INTERNAL_NMI_BIT,
 554	TRACE_INTERNAL_IRQ_BIT,
 555	TRACE_INTERNAL_SIRQ_BIT,
 556
 557	TRACE_BRANCH_BIT,
 558/*
 559 * Abuse of the trace_recursion.
 560 * As we need a way to maintain state if we are tracing the function
 561 * graph in irq because we want to trace a particular function that
 562 * was called in irq context but we have irq tracing off. Since this
 563 * can only be modified by current, we can reuse trace_recursion.
 564 */
 565	TRACE_IRQ_BIT,
 566
 567	/* Set if the function is in the set_graph_function file */
 568	TRACE_GRAPH_BIT,
 569
 570	/*
 571	 * In the very unlikely case that an interrupt came in
 572	 * at a start of graph tracing, and we want to trace
 573	 * the function in that interrupt, the depth can be greater
 574	 * than zero, because of the preempted start of a previous
 575	 * trace. In an even more unlikely case, depth could be 2
 576	 * if a softirq interrupted the start of graph tracing,
 577	 * followed by an interrupt preempting a start of graph
 578	 * tracing in the softirq, and depth can even be 3
 579	 * if an NMI came in at the start of an interrupt function
 580	 * that preempted a softirq start of a function that
 581	 * preempted normal context!!!! Luckily, it can't be
 582	 * greater than 3, so the next two bits are a mask
 583	 * of what the depth is when we set TRACE_GRAPH_BIT
 584	 */
 585
 586	TRACE_GRAPH_DEPTH_START_BIT,
 587	TRACE_GRAPH_DEPTH_END_BIT,
 588
 589	/*
 590	 * To implement set_graph_notrace, if this bit is set, we ignore
 591	 * function graph tracing of called functions, until the return
 592	 * function is called to clear it.
 593	 */
 594	TRACE_GRAPH_NOTRACE_BIT,
 595};
 596
 597#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
 598#define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
 599#define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
 600
 601#define trace_recursion_depth() \
 602	(((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
 603#define trace_recursion_set_depth(depth) \
 604	do {								\
 605		current->trace_recursion &=				\
 606			~(3 << TRACE_GRAPH_DEPTH_START_BIT);		\
 607		current->trace_recursion |=				\
 608			((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT;	\
 609	} while (0)
 610
 611#define TRACE_CONTEXT_BITS	4
 612
 613#define TRACE_FTRACE_START	TRACE_FTRACE_BIT
 614#define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
 615
 616#define TRACE_LIST_START	TRACE_INTERNAL_BIT
 617#define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 618
 619#define TRACE_CONTEXT_MASK	TRACE_LIST_MAX
 620
 621static __always_inline int trace_get_context_bit(void)
 622{
 623	int bit;
 624
 625	if (in_interrupt()) {
 626		if (in_nmi())
 627			bit = 0;
 628
 629		else if (in_irq())
 630			bit = 1;
 631		else
 632			bit = 2;
 633	} else
 634		bit = 3;
 635
 636	return bit;
 637}
 638
 639static __always_inline int trace_test_and_set_recursion(int start, int max)
 640{
 641	unsigned int val = current->trace_recursion;
 642	int bit;
 643
 644	/* A previous recursion check was made */
 645	if ((val & TRACE_CONTEXT_MASK) > max)
 646		return 0;
 647
 648	bit = trace_get_context_bit() + start;
 649	if (unlikely(val & (1 << bit)))
 650		return -1;
 651
 652	val |= 1 << bit;
 653	current->trace_recursion = val;
 654	barrier();
 655
 656	return bit;
 657}
 658
 659static __always_inline void trace_clear_recursion(int bit)
 660{
 661	unsigned int val = current->trace_recursion;
 662
 663	if (!bit)
 664		return;
 665
 666	bit = 1 << bit;
 667	val &= ~bit;
 668
 669	barrier();
 670	current->trace_recursion = val;
 671}
 672
 673static inline struct ring_buffer_iter *
 674trace_buffer_iter(struct trace_iterator *iter, int cpu)
 675{
 676	return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
 677}
 678
 679int tracer_init(struct tracer *t, struct trace_array *tr);
 680int tracing_is_enabled(void);
 681void tracing_reset_online_cpus(struct trace_buffer *buf);
 682void tracing_reset_current(int cpu);
 683void tracing_reset_all_online_cpus(void);
 684int tracing_open_generic(struct inode *inode, struct file *filp);
 685int tracing_open_generic_tr(struct inode *inode, struct file *filp);
 686bool tracing_is_disabled(void);
 687bool tracer_tracing_is_on(struct trace_array *tr);
 688void tracer_tracing_on(struct trace_array *tr);
 689void tracer_tracing_off(struct trace_array *tr);
 690struct dentry *trace_create_file(const char *name,
 691				 umode_t mode,
 692				 struct dentry *parent,
 693				 void *data,
 694				 const struct file_operations *fops);
 695
 696struct dentry *tracing_init_dentry(void);
 697
 698struct ring_buffer_event;
 699
 700struct ring_buffer_event *
 701trace_buffer_lock_reserve(struct ring_buffer *buffer,
 702			  int type,
 703			  unsigned long len,
 704			  unsigned long flags,
 705			  int pc);
 706
 707struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 708						struct trace_array_cpu *data);
 709
 710struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 711					  int *ent_cpu, u64 *ent_ts);
 712
 713void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
 714					struct ring_buffer_event *event);
 715
 716int trace_empty(struct trace_iterator *iter);
 717
 718void *trace_find_next_entry_inc(struct trace_iterator *iter);
 719
 720void trace_init_global_iter(struct trace_iterator *iter);
 721
 722void tracing_iter_reset(struct trace_iterator *iter, int cpu);
 723
 724unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
 725unsigned long trace_total_entries(struct trace_array *tr);
 726
 727void trace_function(struct trace_array *tr,
 728		    unsigned long ip,
 729		    unsigned long parent_ip,
 730		    unsigned long flags, int pc);
 731void trace_graph_function(struct trace_array *tr,
 732		    unsigned long ip,
 733		    unsigned long parent_ip,
 734		    unsigned long flags, int pc);
 735void trace_latency_header(struct seq_file *m);
 736void trace_default_header(struct seq_file *m);
 737void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
 738int trace_empty(struct trace_iterator *iter);
 739
 740void trace_graph_return(struct ftrace_graph_ret *trace);
 741int trace_graph_entry(struct ftrace_graph_ent *trace);
 742void set_graph_array(struct trace_array *tr);
 743
 744void tracing_start_cmdline_record(void);
 745void tracing_stop_cmdline_record(void);
 746void tracing_start_tgid_record(void);
 747void tracing_stop_tgid_record(void);
 748
 749int register_tracer(struct tracer *type);
 750int is_tracing_stopped(void);
 751
 752loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
 753
 754extern cpumask_var_t __read_mostly tracing_buffer_mask;
 755
 756#define for_each_tracing_cpu(cpu)	\
 757	for_each_cpu(cpu, tracing_buffer_mask)
 758
 759extern unsigned long nsecs_to_usecs(unsigned long nsecs);
 760
 761extern unsigned long tracing_thresh;
 762
 763/* PID filtering */
 764
 765extern int pid_max;
 766
 767bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
 768			     pid_t search_pid);
 769bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
 
 770			    struct task_struct *task);
 771void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
 772				  struct task_struct *self,
 773				  struct task_struct *task);
 774void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
 775void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
 776int trace_pid_show(struct seq_file *m, void *v);
 777void trace_free_pid_list(struct trace_pid_list *pid_list);
 778int trace_pid_write(struct trace_pid_list *filtered_pids,
 779		    struct trace_pid_list **new_pid_list,
 780		    const char __user *ubuf, size_t cnt);
 781
 782#ifdef CONFIG_TRACER_MAX_TRACE
 783void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 784		   void *cond_data);
 785void update_max_tr_single(struct trace_array *tr,
 786			  struct task_struct *tsk, int cpu);
 787#endif /* CONFIG_TRACER_MAX_TRACE */
 788
 
 
 
 
 
 
 
 
 
 
 
 789#ifdef CONFIG_STACKTRACE
 790void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
 791		   int pc);
 792#else
 793static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
 794				 int skip, int pc)
 795{
 796}
 797#endif /* CONFIG_STACKTRACE */
 798
 799extern u64 ftrace_now(int cpu);
 800
 801extern void trace_find_cmdline(int pid, char comm[]);
 802extern int trace_find_tgid(int pid);
 803extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
 804
 805#ifdef CONFIG_DYNAMIC_FTRACE
 806extern unsigned long ftrace_update_tot_cnt;
 
 
 807void ftrace_init_trace_array(struct trace_array *tr);
 808#else
 809static inline void ftrace_init_trace_array(struct trace_array *tr) { }
 810#endif
 811#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
 812extern int DYN_FTRACE_TEST_NAME(void);
 813#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
 814extern int DYN_FTRACE_TEST_NAME2(void);
 815
 816extern bool ring_buffer_expanded;
 817extern bool tracing_selftest_disabled;
 818
 819#ifdef CONFIG_FTRACE_STARTUP_TEST
 820extern int trace_selftest_startup_function(struct tracer *trace,
 821					   struct trace_array *tr);
 822extern int trace_selftest_startup_function_graph(struct tracer *trace,
 823						 struct trace_array *tr);
 824extern int trace_selftest_startup_irqsoff(struct tracer *trace,
 825					  struct trace_array *tr);
 826extern int trace_selftest_startup_preemptoff(struct tracer *trace,
 827					     struct trace_array *tr);
 828extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
 829						 struct trace_array *tr);
 830extern int trace_selftest_startup_wakeup(struct tracer *trace,
 831					 struct trace_array *tr);
 832extern int trace_selftest_startup_nop(struct tracer *trace,
 833					 struct trace_array *tr);
 834extern int trace_selftest_startup_branch(struct tracer *trace,
 835					 struct trace_array *tr);
 836/*
 837 * Tracer data references selftest functions that only occur
 838 * on boot up. These can be __init functions. Thus, when selftests
 839 * are enabled, then the tracers need to reference __init functions.
 840 */
 841#define __tracer_data		__refdata
 842#else
 843/* Tracers are seldom changed. Optimize when selftests are disabled. */
 844#define __tracer_data		__read_mostly
 845#endif /* CONFIG_FTRACE_STARTUP_TEST */
 846
 847extern void *head_page(struct trace_array_cpu *data);
 848extern unsigned long long ns2usecs(u64 nsec);
 849extern int
 850trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
 851extern int
 852trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 853extern int
 854trace_array_vprintk(struct trace_array *tr,
 855		    unsigned long ip, const char *fmt, va_list args);
 856int trace_array_printk(struct trace_array *tr,
 857		       unsigned long ip, const char *fmt, ...);
 858int trace_array_printk_buf(struct ring_buffer *buffer,
 859			   unsigned long ip, const char *fmt, ...);
 860void trace_printk_seq(struct trace_seq *s);
 861enum print_line_t print_trace_line(struct trace_iterator *iter);
 862
 863extern char trace_find_mark(unsigned long long duration);
 864
 865struct ftrace_hash;
 866
 867struct ftrace_mod_load {
 868	struct list_head	list;
 869	char			*func;
 870	char			*module;
 871	int			 enable;
 872};
 873
 874enum {
 875	FTRACE_HASH_FL_MOD	= (1 << 0),
 876};
 877
 878struct ftrace_hash {
 879	unsigned long		size_bits;
 880	struct hlist_head	*buckets;
 881	unsigned long		count;
 882	unsigned long		flags;
 883	struct rcu_head		rcu;
 884};
 885
 886struct ftrace_func_entry *
 887ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
 888
 889static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
 890{
 891	return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
 892}
 893
 894/* Standard output formatting function used for function return traces */
 895#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 896
 897/* Flag options */
 898#define TRACE_GRAPH_PRINT_OVERRUN       0x1
 899#define TRACE_GRAPH_PRINT_CPU           0x2
 900#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
 901#define TRACE_GRAPH_PRINT_PROC          0x8
 902#define TRACE_GRAPH_PRINT_DURATION      0x10
 903#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
 904#define TRACE_GRAPH_PRINT_REL_TIME      0x40
 905#define TRACE_GRAPH_PRINT_IRQS          0x80
 906#define TRACE_GRAPH_PRINT_TAIL          0x100
 907#define TRACE_GRAPH_SLEEP_TIME          0x200
 908#define TRACE_GRAPH_GRAPH_TIME          0x400
 909#define TRACE_GRAPH_PRINT_FILL_SHIFT	28
 910#define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
 911
 912extern void ftrace_graph_sleep_time_control(bool enable);
 913
 914#ifdef CONFIG_FUNCTION_PROFILER
 915extern void ftrace_graph_graph_time_control(bool enable);
 916#else
 917static inline void ftrace_graph_graph_time_control(bool enable) { }
 918#endif
 919
 920extern enum print_line_t
 921print_graph_function_flags(struct trace_iterator *iter, u32 flags);
 922extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
 923extern void
 924trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
 925extern void graph_trace_open(struct trace_iterator *iter);
 926extern void graph_trace_close(struct trace_iterator *iter);
 927extern int __trace_graph_entry(struct trace_array *tr,
 928			       struct ftrace_graph_ent *trace,
 929			       unsigned long flags, int pc);
 930extern void __trace_graph_return(struct trace_array *tr,
 931				 struct ftrace_graph_ret *trace,
 932				 unsigned long flags, int pc);
 933
 934#ifdef CONFIG_DYNAMIC_FTRACE
 935extern struct ftrace_hash *ftrace_graph_hash;
 936extern struct ftrace_hash *ftrace_graph_notrace_hash;
 937
 938static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
 939{
 940	unsigned long addr = trace->func;
 941	int ret = 0;
 
 942
 943	preempt_disable_notrace();
 944
 945	if (ftrace_hash_empty(ftrace_graph_hash)) {
 
 
 
 
 
 
 
 
 946		ret = 1;
 947		goto out;
 948	}
 949
 950	if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
 951
 952		/*
 953		 * This needs to be cleared on the return functions
 954		 * when the depth is zero.
 955		 */
 956		trace_recursion_set(TRACE_GRAPH_BIT);
 957		trace_recursion_set_depth(trace->depth);
 958
 959		/*
 960		 * If no irqs are to be traced, but a set_graph_function
 961		 * is set, and called by an interrupt handler, we still
 962		 * want to trace it.
 963		 */
 964		if (in_irq())
 965			trace_recursion_set(TRACE_IRQ_BIT);
 966		else
 967			trace_recursion_clear(TRACE_IRQ_BIT);
 968		ret = 1;
 969	}
 970
 971out:
 972	preempt_enable_notrace();
 973	return ret;
 974}
 975
 976static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
 977{
 978	if (trace_recursion_test(TRACE_GRAPH_BIT) &&
 979	    trace->depth == trace_recursion_depth())
 980		trace_recursion_clear(TRACE_GRAPH_BIT);
 981}
 982
 983static inline int ftrace_graph_notrace_addr(unsigned long addr)
 984{
 985	int ret = 0;
 
 986
 987	preempt_disable_notrace();
 988
 989	if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
 
 
 
 
 
 
 
 
 
 990		ret = 1;
 991
 992	preempt_enable_notrace();
 993	return ret;
 994}
 995#else
 996static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
 997{
 998	return 1;
 999}
1000
1001static inline int ftrace_graph_notrace_addr(unsigned long addr)
1002{
1003	return 0;
1004}
1005static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1006{ }
1007#endif /* CONFIG_DYNAMIC_FTRACE */
1008
1009extern unsigned int fgraph_max_depth;
1010
1011static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
1012{
1013	/* trace it when it is-nested-in or is a function enabled. */
1014	return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
1015		 ftrace_graph_addr(trace)) ||
1016		(trace->depth < 0) ||
1017		(fgraph_max_depth && trace->depth >= fgraph_max_depth);
1018}
1019
1020#else /* CONFIG_FUNCTION_GRAPH_TRACER */
1021static inline enum print_line_t
1022print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1023{
1024	return TRACE_TYPE_UNHANDLED;
1025}
1026#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1027
1028extern struct list_head ftrace_pids;
1029
1030#ifdef CONFIG_FUNCTION_TRACER
 
 
 
 
1031struct ftrace_func_command {
1032	struct list_head	list;
1033	char			*name;
1034	int			(*func)(struct trace_array *tr,
1035					struct ftrace_hash *hash,
1036					char *func, char *cmd,
1037					char *params, int enable);
1038};
1039extern bool ftrace_filter_param __initdata;
1040static inline int ftrace_trace_task(struct trace_array *tr)
1041{
1042	return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
 
1043}
1044extern int ftrace_is_dead(void);
1045int ftrace_create_function_files(struct trace_array *tr,
1046				 struct dentry *parent);
1047void ftrace_destroy_function_files(struct trace_array *tr);
1048void ftrace_init_global_array_ops(struct trace_array *tr);
1049void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1050void ftrace_reset_array_ops(struct trace_array *tr);
1051void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1052void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1053				  struct dentry *d_tracer);
1054void ftrace_clear_pids(struct trace_array *tr);
1055int init_function_trace(void);
1056void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1057#else
1058static inline int ftrace_trace_task(struct trace_array *tr)
1059{
1060	return 1;
1061}
1062static inline int ftrace_is_dead(void) { return 0; }
1063static inline int
1064ftrace_create_function_files(struct trace_array *tr,
1065			     struct dentry *parent)
1066{
1067	return 0;
1068}
1069static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1070static inline __init void
1071ftrace_init_global_array_ops(struct trace_array *tr) { }
1072static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1073static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1074static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1075static inline void ftrace_clear_pids(struct trace_array *tr) { }
1076static inline int init_function_trace(void) { return 0; }
1077static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1078/* ftace_func_t type is not defined, use macro instead of static inline */
1079#define ftrace_init_array_ops(tr, func) do { } while (0)
1080#endif /* CONFIG_FUNCTION_TRACER */
1081
1082#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1083
1084struct ftrace_probe_ops {
1085	void			(*func)(unsigned long ip,
1086					unsigned long parent_ip,
1087					struct trace_array *tr,
1088					struct ftrace_probe_ops *ops,
1089					void *data);
1090	int			(*init)(struct ftrace_probe_ops *ops,
1091					struct trace_array *tr,
1092					unsigned long ip, void *init_data,
1093					void **data);
1094	void			(*free)(struct ftrace_probe_ops *ops,
1095					struct trace_array *tr,
1096					unsigned long ip, void *data);
1097	int			(*print)(struct seq_file *m,
1098					 unsigned long ip,
1099					 struct ftrace_probe_ops *ops,
1100					 void *data);
1101};
1102
1103struct ftrace_func_mapper;
1104typedef int (*ftrace_mapper_func)(void *data);
1105
1106struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1107void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1108					   unsigned long ip);
1109int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1110			       unsigned long ip, void *data);
1111void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1112				   unsigned long ip);
1113void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1114			     ftrace_mapper_func free_func);
1115
1116extern int
1117register_ftrace_function_probe(char *glob, struct trace_array *tr,
1118			       struct ftrace_probe_ops *ops, void *data);
1119extern int
1120unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1121				      struct ftrace_probe_ops *ops);
1122extern void clear_ftrace_function_probes(struct trace_array *tr);
1123
1124int register_ftrace_command(struct ftrace_func_command *cmd);
1125int unregister_ftrace_command(struct ftrace_func_command *cmd);
1126
1127void ftrace_create_filter_files(struct ftrace_ops *ops,
1128				struct dentry *parent);
1129void ftrace_destroy_filter_files(struct ftrace_ops *ops);
 
 
 
 
 
1130#else
1131struct ftrace_func_command;
1132
1133static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1134{
1135	return -EINVAL;
1136}
1137static inline __init int unregister_ftrace_command(char *cmd_name)
1138{
1139	return -EINVAL;
1140}
1141static inline void clear_ftrace_function_probes(struct trace_array *tr)
1142{
1143}
1144
1145/*
1146 * The ops parameter passed in is usually undefined.
1147 * This must be a macro.
1148 */
1149#define ftrace_create_filter_files(ops, parent) do { } while (0)
1150#define ftrace_destroy_filter_files(ops) do { } while (0)
1151#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1152
1153bool ftrace_event_is_function(struct trace_event_call *call);
1154
1155/*
1156 * struct trace_parser - servers for reading the user input separated by spaces
1157 * @cont: set if the input is not complete - no final space char was found
1158 * @buffer: holds the parsed user input
1159 * @idx: user input length
1160 * @size: buffer size
1161 */
1162struct trace_parser {
1163	bool		cont;
1164	char		*buffer;
1165	unsigned	idx;
1166	unsigned	size;
1167};
1168
1169static inline bool trace_parser_loaded(struct trace_parser *parser)
1170{
1171	return (parser->idx != 0);
1172}
1173
1174static inline bool trace_parser_cont(struct trace_parser *parser)
1175{
1176	return parser->cont;
1177}
1178
1179static inline void trace_parser_clear(struct trace_parser *parser)
1180{
1181	parser->cont = false;
1182	parser->idx = 0;
1183}
1184
1185extern int trace_parser_get_init(struct trace_parser *parser, int size);
1186extern void trace_parser_put(struct trace_parser *parser);
1187extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1188	size_t cnt, loff_t *ppos);
1189
1190/*
1191 * Only create function graph options if function graph is configured.
1192 */
1193#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1194# define FGRAPH_FLAGS						\
1195		C(DISPLAY_GRAPH,	"display-graph"),
1196#else
1197# define FGRAPH_FLAGS
1198#endif
1199
1200#ifdef CONFIG_BRANCH_TRACER
1201# define BRANCH_FLAGS					\
1202		C(BRANCH,		"branch"),
1203#else
1204# define BRANCH_FLAGS
1205#endif
1206
1207#ifdef CONFIG_FUNCTION_TRACER
1208# define FUNCTION_FLAGS						\
1209		C(FUNCTION,		"function-trace"),	\
1210		C(FUNC_FORK,		"function-fork"),
1211# define FUNCTION_DEFAULT_FLAGS		TRACE_ITER_FUNCTION
1212#else
1213# define FUNCTION_FLAGS
1214# define FUNCTION_DEFAULT_FLAGS		0UL
1215# define TRACE_ITER_FUNC_FORK		0UL
1216#endif
1217
1218#ifdef CONFIG_STACKTRACE
1219# define STACK_FLAGS				\
1220		C(STACKTRACE,		"stacktrace"),
1221#else
1222# define STACK_FLAGS
1223#endif
1224
1225/*
1226 * trace_iterator_flags is an enumeration that defines bit
1227 * positions into trace_flags that controls the output.
1228 *
1229 * NOTE: These bits must match the trace_options array in
1230 *       trace.c (this macro guarantees it).
1231 */
1232#define TRACE_FLAGS						\
1233		C(PRINT_PARENT,		"print-parent"),	\
1234		C(SYM_OFFSET,		"sym-offset"),		\
1235		C(SYM_ADDR,		"sym-addr"),		\
1236		C(VERBOSE,		"verbose"),		\
1237		C(RAW,			"raw"),			\
1238		C(HEX,			"hex"),			\
1239		C(BIN,			"bin"),			\
1240		C(BLOCK,		"block"),		\
1241		C(PRINTK,		"trace_printk"),	\
1242		C(ANNOTATE,		"annotate"),		\
1243		C(USERSTACKTRACE,	"userstacktrace"),	\
1244		C(SYM_USEROBJ,		"sym-userobj"),		\
1245		C(PRINTK_MSGONLY,	"printk-msg-only"),	\
1246		C(CONTEXT_INFO,		"context-info"),   /* Print pid/cpu/time */ \
1247		C(LATENCY_FMT,		"latency-format"),	\
1248		C(RECORD_CMD,		"record-cmd"),		\
1249		C(RECORD_TGID,		"record-tgid"),		\
1250		C(OVERWRITE,		"overwrite"),		\
1251		C(STOP_ON_FREE,		"disable_on_free"),	\
1252		C(IRQ_INFO,		"irq-info"),		\
1253		C(MARKERS,		"markers"),		\
1254		C(EVENT_FORK,		"event-fork"),		\
 
1255		FUNCTION_FLAGS					\
1256		FGRAPH_FLAGS					\
1257		STACK_FLAGS					\
1258		BRANCH_FLAGS
1259
1260/*
1261 * By defining C, we can make TRACE_FLAGS a list of bit names
1262 * that will define the bits for the flag masks.
1263 */
1264#undef C
1265#define C(a, b) TRACE_ITER_##a##_BIT
1266
1267enum trace_iterator_bits {
1268	TRACE_FLAGS
1269	/* Make sure we don't go more than we have bits for */
1270	TRACE_ITER_LAST_BIT
1271};
1272
1273/*
1274 * By redefining C, we can make TRACE_FLAGS a list of masks that
1275 * use the bits as defined above.
1276 */
1277#undef C
1278#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1279
1280enum trace_iterator_flags { TRACE_FLAGS };
1281
1282/*
1283 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1284 * control the output of kernel symbols.
1285 */
1286#define TRACE_ITER_SYM_MASK \
1287	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1288
1289extern struct tracer nop_trace;
1290
1291#ifdef CONFIG_BRANCH_TRACER
1292extern int enable_branch_tracing(struct trace_array *tr);
1293extern void disable_branch_tracing(void);
1294static inline int trace_branch_enable(struct trace_array *tr)
1295{
1296	if (tr->trace_flags & TRACE_ITER_BRANCH)
1297		return enable_branch_tracing(tr);
1298	return 0;
1299}
1300static inline void trace_branch_disable(void)
1301{
1302	/* due to races, always disable */
1303	disable_branch_tracing();
1304}
1305#else
1306static inline int trace_branch_enable(struct trace_array *tr)
1307{
1308	return 0;
1309}
1310static inline void trace_branch_disable(void)
1311{
1312}
1313#endif /* CONFIG_BRANCH_TRACER */
1314
1315/* set ring buffers to default size if not already done so */
1316int tracing_update_buffers(void);
1317
1318struct ftrace_event_field {
1319	struct list_head	link;
1320	const char		*name;
1321	const char		*type;
1322	int			filter_type;
1323	int			offset;
1324	int			size;
1325	int			is_signed;
1326};
1327
1328struct prog_entry;
1329
1330struct event_filter {
1331	struct prog_entry __rcu	*prog;
1332	char			*filter_string;
1333};
1334
1335struct event_subsystem {
1336	struct list_head	list;
1337	const char		*name;
1338	struct event_filter	*filter;
1339	int			ref_count;
1340};
1341
1342struct trace_subsystem_dir {
1343	struct list_head		list;
1344	struct event_subsystem		*subsystem;
1345	struct trace_array		*tr;
1346	struct dentry			*entry;
1347	int				ref_count;
1348	int				nr_events;
1349};
1350
1351extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1352				     struct ring_buffer *buffer,
1353				     struct ring_buffer_event *event);
1354
1355void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1356				     struct ring_buffer *buffer,
1357				     struct ring_buffer_event *event,
1358				     unsigned long flags, int pc,
1359				     struct pt_regs *regs);
1360
1361static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1362					      struct ring_buffer *buffer,
1363					      struct ring_buffer_event *event,
1364					      unsigned long flags, int pc)
1365{
1366	trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1367}
1368
1369DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1370DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1371void trace_buffered_event_disable(void);
1372void trace_buffered_event_enable(void);
1373
1374static inline void
1375__trace_event_discard_commit(struct ring_buffer *buffer,
1376			     struct ring_buffer_event *event)
1377{
1378	if (this_cpu_read(trace_buffered_event) == event) {
1379		/* Simply release the temp buffer */
1380		this_cpu_dec(trace_buffered_event_cnt);
1381		return;
1382	}
1383	ring_buffer_discard_commit(buffer, event);
1384}
1385
1386/*
1387 * Helper function for event_trigger_unlock_commit{_regs}().
1388 * If there are event triggers attached to this event that requires
1389 * filtering against its fields, then they wil be called as the
1390 * entry already holds the field information of the current event.
1391 *
1392 * It also checks if the event should be discarded or not.
1393 * It is to be discarded if the event is soft disabled and the
1394 * event was only recorded to process triggers, or if the event
1395 * filter is active and this event did not match the filters.
1396 *
1397 * Returns true if the event is discarded, false otherwise.
1398 */
1399static inline bool
1400__event_trigger_test_discard(struct trace_event_file *file,
1401			     struct ring_buffer *buffer,
1402			     struct ring_buffer_event *event,
1403			     void *entry,
1404			     enum event_trigger_type *tt)
1405{
1406	unsigned long eflags = file->flags;
1407
1408	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1409		*tt = event_triggers_call(file, entry, event);
1410
1411	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1412	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1413	     !filter_match_preds(file->filter, entry))) {
1414		__trace_event_discard_commit(buffer, event);
1415		return true;
1416	}
1417
1418	return false;
1419}
1420
1421/**
1422 * event_trigger_unlock_commit - handle triggers and finish event commit
1423 * @file: The file pointer assoctiated to the event
1424 * @buffer: The ring buffer that the event is being written to
1425 * @event: The event meta data in the ring buffer
1426 * @entry: The event itself
1427 * @irq_flags: The state of the interrupts at the start of the event
1428 * @pc: The state of the preempt count at the start of the event.
1429 *
1430 * This is a helper function to handle triggers that require data
1431 * from the event itself. It also tests the event against filters and
1432 * if the event is soft disabled and should be discarded.
1433 */
1434static inline void
1435event_trigger_unlock_commit(struct trace_event_file *file,
1436			    struct ring_buffer *buffer,
1437			    struct ring_buffer_event *event,
1438			    void *entry, unsigned long irq_flags, int pc)
1439{
1440	enum event_trigger_type tt = ETT_NONE;
1441
1442	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1443		trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1444
1445	if (tt)
1446		event_triggers_post_call(file, tt);
1447}
1448
1449/**
1450 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1451 * @file: The file pointer assoctiated to the event
1452 * @buffer: The ring buffer that the event is being written to
1453 * @event: The event meta data in the ring buffer
1454 * @entry: The event itself
1455 * @irq_flags: The state of the interrupts at the start of the event
1456 * @pc: The state of the preempt count at the start of the event.
1457 *
1458 * This is a helper function to handle triggers that require data
1459 * from the event itself. It also tests the event against filters and
1460 * if the event is soft disabled and should be discarded.
1461 *
1462 * Same as event_trigger_unlock_commit() but calls
1463 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1464 */
1465static inline void
1466event_trigger_unlock_commit_regs(struct trace_event_file *file,
1467				 struct ring_buffer *buffer,
1468				 struct ring_buffer_event *event,
1469				 void *entry, unsigned long irq_flags, int pc,
1470				 struct pt_regs *regs)
1471{
1472	enum event_trigger_type tt = ETT_NONE;
1473
1474	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1475		trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1476						irq_flags, pc, regs);
1477
1478	if (tt)
1479		event_triggers_post_call(file, tt);
1480}
1481
1482#define FILTER_PRED_INVALID	((unsigned short)-1)
1483#define FILTER_PRED_IS_RIGHT	(1 << 15)
1484#define FILTER_PRED_FOLD	(1 << 15)
1485
1486/*
1487 * The max preds is the size of unsigned short with
1488 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1489 * and FOLD flags. The other is reserved.
1490 *
1491 * 2^14 preds is way more than enough.
1492 */
1493#define MAX_FILTER_PRED		16384
1494
1495struct filter_pred;
1496struct regex;
1497
1498typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1499
1500typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1501
1502enum regex_type {
1503	MATCH_FULL = 0,
1504	MATCH_FRONT_ONLY,
1505	MATCH_MIDDLE_ONLY,
1506	MATCH_END_ONLY,
1507	MATCH_GLOB,
1508	MATCH_INDEX,
1509};
1510
1511struct regex {
1512	char			pattern[MAX_FILTER_STR_VAL];
1513	int			len;
1514	int			field_len;
1515	regex_match_func	match;
1516};
1517
1518struct filter_pred {
1519	filter_pred_fn_t 	fn;
1520	u64 			val;
1521	struct regex		regex;
1522	unsigned short		*ops;
1523	struct ftrace_event_field *field;
1524	int 			offset;
1525	int			not;
1526	int 			op;
1527};
1528
1529static inline bool is_string_field(struct ftrace_event_field *field)
1530{
1531	return field->filter_type == FILTER_DYN_STRING ||
1532	       field->filter_type == FILTER_STATIC_STRING ||
1533	       field->filter_type == FILTER_PTR_STRING ||
1534	       field->filter_type == FILTER_COMM;
1535}
1536
1537static inline bool is_function_field(struct ftrace_event_field *field)
1538{
1539	return field->filter_type == FILTER_TRACE_FN;
1540}
1541
1542extern enum regex_type
1543filter_parse_regex(char *buff, int len, char **search, int *not);
1544extern void print_event_filter(struct trace_event_file *file,
1545			       struct trace_seq *s);
1546extern int apply_event_filter(struct trace_event_file *file,
1547			      char *filter_string);
1548extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1549					char *filter_string);
1550extern void print_subsystem_event_filter(struct event_subsystem *system,
1551					 struct trace_seq *s);
1552extern int filter_assign_type(const char *type);
1553extern int create_event_filter(struct trace_array *tr,
1554			       struct trace_event_call *call,
1555			       char *filter_str, bool set_str,
1556			       struct event_filter **filterp);
1557extern void free_event_filter(struct event_filter *filter);
1558
1559struct ftrace_event_field *
1560trace_find_event_field(struct trace_event_call *call, char *name);
1561
1562extern void trace_event_enable_cmd_record(bool enable);
1563extern void trace_event_enable_tgid_record(bool enable);
1564
1565extern int event_trace_init(void);
1566extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1567extern int event_trace_del_tracer(struct trace_array *tr);
1568
1569extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1570						  const char *system,
1571						  const char *event);
1572extern struct trace_event_file *find_event_file(struct trace_array *tr,
1573						const char *system,
1574						const char *event);
1575
1576static inline void *event_file_data(struct file *filp)
1577{
1578	return READ_ONCE(file_inode(filp)->i_private);
1579}
1580
1581extern struct mutex event_mutex;
1582extern struct list_head ftrace_events;
1583
1584extern const struct file_operations event_trigger_fops;
1585extern const struct file_operations event_hist_fops;
 
 
1586
1587#ifdef CONFIG_HIST_TRIGGERS
1588extern int register_trigger_hist_cmd(void);
1589extern int register_trigger_hist_enable_disable_cmds(void);
1590#else
1591static inline int register_trigger_hist_cmd(void) { return 0; }
1592static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1593#endif
1594
1595extern int register_trigger_cmds(void);
1596extern void clear_event_triggers(struct trace_array *tr);
1597
1598struct event_trigger_data {
1599	unsigned long			count;
1600	int				ref;
1601	struct event_trigger_ops	*ops;
1602	struct event_command		*cmd_ops;
1603	struct event_filter __rcu	*filter;
1604	char				*filter_str;
1605	void				*private_data;
1606	bool				paused;
1607	bool				paused_tmp;
1608	struct list_head		list;
1609	char				*name;
1610	struct list_head		named_list;
1611	struct event_trigger_data	*named_data;
1612};
1613
1614/* Avoid typos */
1615#define ENABLE_EVENT_STR	"enable_event"
1616#define DISABLE_EVENT_STR	"disable_event"
1617#define ENABLE_HIST_STR		"enable_hist"
1618#define DISABLE_HIST_STR	"disable_hist"
1619
1620struct enable_trigger_data {
1621	struct trace_event_file		*file;
1622	bool				enable;
1623	bool				hist;
1624};
1625
1626extern int event_enable_trigger_print(struct seq_file *m,
1627				      struct event_trigger_ops *ops,
1628				      struct event_trigger_data *data);
1629extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1630				      struct event_trigger_data *data);
1631extern int event_enable_trigger_func(struct event_command *cmd_ops,
1632				     struct trace_event_file *file,
1633				     char *glob, char *cmd, char *param);
1634extern int event_enable_register_trigger(char *glob,
1635					 struct event_trigger_ops *ops,
1636					 struct event_trigger_data *data,
1637					 struct trace_event_file *file);
1638extern void event_enable_unregister_trigger(char *glob,
1639					    struct event_trigger_ops *ops,
1640					    struct event_trigger_data *test,
1641					    struct trace_event_file *file);
1642extern void trigger_data_free(struct event_trigger_data *data);
1643extern int event_trigger_init(struct event_trigger_ops *ops,
1644			      struct event_trigger_data *data);
1645extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1646					      int trigger_enable);
1647extern void update_cond_flag(struct trace_event_file *file);
1648extern int set_trigger_filter(char *filter_str,
1649			      struct event_trigger_data *trigger_data,
1650			      struct trace_event_file *file);
1651extern struct event_trigger_data *find_named_trigger(const char *name);
1652extern bool is_named_trigger(struct event_trigger_data *test);
1653extern int save_named_trigger(const char *name,
1654			      struct event_trigger_data *data);
1655extern void del_named_trigger(struct event_trigger_data *data);
1656extern void pause_named_trigger(struct event_trigger_data *data);
1657extern void unpause_named_trigger(struct event_trigger_data *data);
1658extern void set_named_trigger_data(struct event_trigger_data *data,
1659				   struct event_trigger_data *named_data);
1660extern struct event_trigger_data *
1661get_named_trigger_data(struct event_trigger_data *data);
1662extern int register_event_command(struct event_command *cmd);
1663extern int unregister_event_command(struct event_command *cmd);
1664extern int register_trigger_hist_enable_disable_cmds(void);
1665
1666/**
1667 * struct event_trigger_ops - callbacks for trace event triggers
1668 *
1669 * The methods in this structure provide per-event trigger hooks for
1670 * various trigger operations.
1671 *
1672 * All the methods below, except for @init() and @free(), must be
1673 * implemented.
1674 *
1675 * @func: The trigger 'probe' function called when the triggering
1676 *	event occurs.  The data passed into this callback is the data
1677 *	that was supplied to the event_command @reg() function that
1678 *	registered the trigger (see struct event_command) along with
1679 *	the trace record, rec.
1680 *
1681 * @init: An optional initialization function called for the trigger
1682 *	when the trigger is registered (via the event_command reg()
1683 *	function).  This can be used to perform per-trigger
1684 *	initialization such as incrementing a per-trigger reference
1685 *	count, for instance.  This is usually implemented by the
1686 *	generic utility function @event_trigger_init() (see
1687 *	trace_event_triggers.c).
1688 *
1689 * @free: An optional de-initialization function called for the
1690 *	trigger when the trigger is unregistered (via the
1691 *	event_command @reg() function).  This can be used to perform
1692 *	per-trigger de-initialization such as decrementing a
1693 *	per-trigger reference count and freeing corresponding trigger
1694 *	data, for instance.  This is usually implemented by the
1695 *	generic utility function @event_trigger_free() (see
1696 *	trace_event_triggers.c).
1697 *
1698 * @print: The callback function invoked to have the trigger print
1699 *	itself.  This is usually implemented by a wrapper function
1700 *	that calls the generic utility function @event_trigger_print()
1701 *	(see trace_event_triggers.c).
1702 */
1703struct event_trigger_ops {
1704	void			(*func)(struct event_trigger_data *data,
1705					void *rec,
1706					struct ring_buffer_event *rbe);
1707	int			(*init)(struct event_trigger_ops *ops,
1708					struct event_trigger_data *data);
1709	void			(*free)(struct event_trigger_ops *ops,
1710					struct event_trigger_data *data);
1711	int			(*print)(struct seq_file *m,
1712					 struct event_trigger_ops *ops,
1713					 struct event_trigger_data *data);
1714};
1715
1716/**
1717 * struct event_command - callbacks and data members for event commands
1718 *
1719 * Event commands are invoked by users by writing the command name
1720 * into the 'trigger' file associated with a trace event.  The
1721 * parameters associated with a specific invocation of an event
1722 * command are used to create an event trigger instance, which is
1723 * added to the list of trigger instances associated with that trace
1724 * event.  When the event is hit, the set of triggers associated with
1725 * that event is invoked.
1726 *
1727 * The data members in this structure provide per-event command data
1728 * for various event commands.
1729 *
1730 * All the data members below, except for @post_trigger, must be set
1731 * for each event command.
1732 *
1733 * @name: The unique name that identifies the event command.  This is
1734 *	the name used when setting triggers via trigger files.
1735 *
1736 * @trigger_type: A unique id that identifies the event command
1737 *	'type'.  This value has two purposes, the first to ensure that
1738 *	only one trigger of the same type can be set at a given time
1739 *	for a particular event e.g. it doesn't make sense to have both
1740 *	a traceon and traceoff trigger attached to a single event at
1741 *	the same time, so traceon and traceoff have the same type
1742 *	though they have different names.  The @trigger_type value is
1743 *	also used as a bit value for deferring the actual trigger
1744 *	action until after the current event is finished.  Some
1745 *	commands need to do this if they themselves log to the trace
1746 *	buffer (see the @post_trigger() member below).  @trigger_type
1747 *	values are defined by adding new values to the trigger_type
1748 *	enum in include/linux/trace_events.h.
1749 *
1750 * @flags: See the enum event_command_flags below.
1751 *
1752 * All the methods below, except for @set_filter() and @unreg_all(),
1753 * must be implemented.
1754 *
1755 * @func: The callback function responsible for parsing and
1756 *	registering the trigger written to the 'trigger' file by the
1757 *	user.  It allocates the trigger instance and registers it with
1758 *	the appropriate trace event.  It makes use of the other
1759 *	event_command callback functions to orchestrate this, and is
1760 *	usually implemented by the generic utility function
1761 *	@event_trigger_callback() (see trace_event_triggers.c).
1762 *
1763 * @reg: Adds the trigger to the list of triggers associated with the
1764 *	event, and enables the event trigger itself, after
1765 *	initializing it (via the event_trigger_ops @init() function).
1766 *	This is also where commands can use the @trigger_type value to
1767 *	make the decision as to whether or not multiple instances of
1768 *	the trigger should be allowed.  This is usually implemented by
1769 *	the generic utility function @register_trigger() (see
1770 *	trace_event_triggers.c).
1771 *
1772 * @unreg: Removes the trigger from the list of triggers associated
1773 *	with the event, and disables the event trigger itself, after
1774 *	initializing it (via the event_trigger_ops @free() function).
1775 *	This is usually implemented by the generic utility function
1776 *	@unregister_trigger() (see trace_event_triggers.c).
1777 *
1778 * @unreg_all: An optional function called to remove all the triggers
1779 *	from the list of triggers associated with the event.  Called
1780 *	when a trigger file is opened in truncate mode.
1781 *
1782 * @set_filter: An optional function called to parse and set a filter
1783 *	for the trigger.  If no @set_filter() method is set for the
1784 *	event command, filters set by the user for the command will be
1785 *	ignored.  This is usually implemented by the generic utility
1786 *	function @set_trigger_filter() (see trace_event_triggers.c).
1787 *
1788 * @get_trigger_ops: The callback function invoked to retrieve the
1789 *	event_trigger_ops implementation associated with the command.
1790 */
1791struct event_command {
1792	struct list_head	list;
1793	char			*name;
1794	enum event_trigger_type	trigger_type;
1795	int			flags;
1796	int			(*func)(struct event_command *cmd_ops,
1797					struct trace_event_file *file,
1798					char *glob, char *cmd, char *params);
1799	int			(*reg)(char *glob,
1800				       struct event_trigger_ops *ops,
1801				       struct event_trigger_data *data,
1802				       struct trace_event_file *file);
1803	void			(*unreg)(char *glob,
1804					 struct event_trigger_ops *ops,
1805					 struct event_trigger_data *data,
1806					 struct trace_event_file *file);
1807	void			(*unreg_all)(struct trace_event_file *file);
1808	int			(*set_filter)(char *filter_str,
1809					      struct event_trigger_data *data,
1810					      struct trace_event_file *file);
1811	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1812};
1813
1814/**
1815 * enum event_command_flags - flags for struct event_command
1816 *
1817 * @POST_TRIGGER: A flag that says whether or not this command needs
1818 *	to have its action delayed until after the current event has
1819 *	been closed.  Some triggers need to avoid being invoked while
1820 *	an event is currently in the process of being logged, since
1821 *	the trigger may itself log data into the trace buffer.  Thus
1822 *	we make sure the current event is committed before invoking
1823 *	those triggers.  To do that, the trigger invocation is split
1824 *	in two - the first part checks the filter using the current
1825 *	trace record; if a command has the @post_trigger flag set, it
1826 *	sets a bit for itself in the return value, otherwise it
1827 *	directly invokes the trigger.  Once all commands have been
1828 *	either invoked or set their return flag, the current record is
1829 *	either committed or discarded.  At that point, if any commands
1830 *	have deferred their triggers, those commands are finally
1831 *	invoked following the close of the current event.  In other
1832 *	words, if the event_trigger_ops @func() probe implementation
1833 *	itself logs to the trace buffer, this flag should be set,
1834 *	otherwise it can be left unspecified.
1835 *
1836 * @NEEDS_REC: A flag that says whether or not this command needs
1837 *	access to the trace record in order to perform its function,
1838 *	regardless of whether or not it has a filter associated with
1839 *	it (filters make a trigger require access to the trace record
1840 *	but are not always present).
1841 */
1842enum event_command_flags {
1843	EVENT_CMD_FL_POST_TRIGGER	= 1,
1844	EVENT_CMD_FL_NEEDS_REC		= 2,
1845};
1846
1847static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1848{
1849	return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1850}
1851
1852static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1853{
1854	return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1855}
1856
1857extern int trace_event_enable_disable(struct trace_event_file *file,
1858				      int enable, int soft_disable);
1859extern int tracing_alloc_snapshot(void);
1860extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1861extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1862
1863extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1864extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1865
1866extern const char *__start___trace_bprintk_fmt[];
1867extern const char *__stop___trace_bprintk_fmt[];
1868
1869extern const char *__start___tracepoint_str[];
1870extern const char *__stop___tracepoint_str[];
1871
1872void trace_printk_control(bool enabled);
1873void trace_printk_init_buffers(void);
1874void trace_printk_start_comm(void);
1875int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1876int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1877
 
 
 
 
 
 
 
 
 
1878#define MAX_EVENT_NAME_LEN	64
1879
1880extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1881extern ssize_t trace_parse_run_command(struct file *file,
1882		const char __user *buffer, size_t count, loff_t *ppos,
1883		int (*createfn)(int, char**));
1884
1885extern unsigned int err_pos(char *cmd, const char *str);
1886extern void tracing_log_err(struct trace_array *tr,
1887			    const char *loc, const char *cmd,
1888			    const char **errs, u8 type, u8 pos);
1889
1890/*
1891 * Normal trace_printk() and friends allocates special buffers
1892 * to do the manipulation, as well as saves the print formats
1893 * into sections to display. But the trace infrastructure wants
1894 * to use these without the added overhead at the price of being
1895 * a bit slower (used mainly for warnings, where we don't care
1896 * about performance). The internal_trace_puts() is for such
1897 * a purpose.
1898 */
1899#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1900
1901#undef FTRACE_ENTRY
1902#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
1903	extern struct trace_event_call					\
1904	__aligned(4) event_##call;
1905#undef FTRACE_ENTRY_DUP
1906#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
1907	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1908		     filter)
1909#undef FTRACE_ENTRY_PACKED
1910#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
1911	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1912		     filter)
1913
1914#include "trace_entries.h"
1915
1916#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1917int perf_ftrace_event_register(struct trace_event_call *call,
1918			       enum trace_reg type, void *data);
1919#else
1920#define perf_ftrace_event_register NULL
1921#endif
1922
1923#ifdef CONFIG_FTRACE_SYSCALLS
1924void init_ftrace_syscalls(void);
1925const char *get_syscall_name(int syscall);
1926#else
1927static inline void init_ftrace_syscalls(void) { }
1928static inline const char *get_syscall_name(int syscall)
1929{
1930	return NULL;
1931}
1932#endif
1933
1934#ifdef CONFIG_EVENT_TRACING
1935void trace_event_init(void);
1936void trace_event_eval_update(struct trace_eval_map **map, int len);
 
 
 
1937#else
1938static inline void __init trace_event_init(void) { }
1939static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
1940#endif
1941
1942#ifdef CONFIG_TRACER_SNAPSHOT
1943void tracing_snapshot_instance(struct trace_array *tr);
1944int tracing_alloc_snapshot_instance(struct trace_array *tr);
1945#else
1946static inline void tracing_snapshot_instance(struct trace_array *tr) { }
1947static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
1948{
1949	return 0;
1950}
1951#endif
1952
1953#ifdef CONFIG_PREEMPT_TRACER
1954void tracer_preempt_on(unsigned long a0, unsigned long a1);
1955void tracer_preempt_off(unsigned long a0, unsigned long a1);
1956#else
1957static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
1958static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
1959#endif
1960#ifdef CONFIG_IRQSOFF_TRACER
1961void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
1962void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
1963#else
1964static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
1965static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
1966#endif
1967
1968extern struct trace_iterator *tracepoint_print_iter;
1969
1970/*
1971 * Reset the state of the trace_iterator so that it can read consumed data.
1972 * Normally, the trace_iterator is used for reading the data when it is not
1973 * consumed, and must retain state.
1974 */
1975static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
1976{
1977	const size_t offset = offsetof(struct trace_iterator, seq);
1978
1979	/*
1980	 * Keep gcc from complaining about overwriting more than just one
1981	 * member in the structure.
1982	 */
1983	memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
1984
1985	iter->pos = -1;
1986}
1987
1988#endif /* _LINUX_KERNEL_TRACE_H */
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#ifndef _LINUX_KERNEL_TRACE_H
   4#define _LINUX_KERNEL_TRACE_H
   5
   6#include <linux/fs.h>
   7#include <linux/atomic.h>
   8#include <linux/sched.h>
   9#include <linux/clocksource.h>
  10#include <linux/ring_buffer.h>
  11#include <linux/mmiotrace.h>
  12#include <linux/tracepoint.h>
  13#include <linux/ftrace.h>
  14#include <linux/trace.h>
  15#include <linux/hw_breakpoint.h>
  16#include <linux/trace_seq.h>
  17#include <linux/trace_events.h>
  18#include <linux/compiler.h>
  19#include <linux/glob.h>
  20#include <linux/irq_work.h>
  21#include <linux/workqueue.h>
  22
  23#ifdef CONFIG_FTRACE_SYSCALLS
  24#include <asm/unistd.h>		/* For NR_SYSCALLS	     */
  25#include <asm/syscall.h>	/* some archs define it here */
  26#endif
  27
  28enum trace_type {
  29	__TRACE_FIRST_TYPE = 0,
  30
  31	TRACE_FN,
  32	TRACE_CTX,
  33	TRACE_WAKE,
  34	TRACE_STACK,
  35	TRACE_PRINT,
  36	TRACE_BPRINT,
  37	TRACE_MMIO_RW,
  38	TRACE_MMIO_MAP,
  39	TRACE_BRANCH,
  40	TRACE_GRAPH_RET,
  41	TRACE_GRAPH_ENT,
  42	TRACE_USER_STACK,
  43	TRACE_BLK,
  44	TRACE_BPUTS,
  45	TRACE_HWLAT,
  46	TRACE_RAW_DATA,
  47
  48	__TRACE_LAST_TYPE,
  49};
  50
  51
  52#undef __field
  53#define __field(type, item)		type	item;
  54
  55#undef __field_fn
  56#define __field_fn(type, item)		type	item;
  57
  58#undef __field_struct
  59#define __field_struct(type, item)	__field(type, item)
  60
  61#undef __field_desc
  62#define __field_desc(type, container, item)
  63
  64#undef __field_packed
  65#define __field_packed(type, container, item)
  66
  67#undef __array
  68#define __array(type, item, size)	type	item[size];
  69
  70#undef __array_desc
  71#define __array_desc(type, container, item, size)
  72
  73#undef __dynamic_array
  74#define __dynamic_array(type, item)	type	item[];
  75
  76#undef F_STRUCT
  77#define F_STRUCT(args...)		args
  78
  79#undef FTRACE_ENTRY
  80#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)		\
  81	struct struct_name {						\
  82		struct trace_entry	ent;				\
  83		tstruct							\
  84	}
  85
  86#undef FTRACE_ENTRY_DUP
  87#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
  88
  89#undef FTRACE_ENTRY_REG
  90#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	regfn)	\
  91	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
 
 
  92
  93#undef FTRACE_ENTRY_PACKED
  94#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print)	\
  95	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed
 
 
  96
  97#include "trace_entries.h"
  98
  99/* Use this for memory failure errors */
 100#define MEM_FAIL(condition, fmt, ...) ({			\
 101	static bool __section(.data.once) __warned;		\
 102	int __ret_warn_once = !!(condition);			\
 103								\
 104	if (unlikely(__ret_warn_once && !__warned)) {		\
 105		__warned = true;				\
 106		pr_err("ERROR: " fmt, ##__VA_ARGS__);		\
 107	}							\
 108	unlikely(__ret_warn_once);				\
 109})
 110
 111/*
 112 * syscalls are special, and need special handling, this is why
 113 * they are not included in trace_entries.h
 114 */
 115struct syscall_trace_enter {
 116	struct trace_entry	ent;
 117	int			nr;
 118	unsigned long		args[];
 119};
 120
 121struct syscall_trace_exit {
 122	struct trace_entry	ent;
 123	int			nr;
 124	long			ret;
 125};
 126
 127struct kprobe_trace_entry_head {
 128	struct trace_entry	ent;
 129	unsigned long		ip;
 130};
 131
 132struct kretprobe_trace_entry_head {
 133	struct trace_entry	ent;
 134	unsigned long		func;
 135	unsigned long		ret_ip;
 136};
 137
 138/*
 139 * trace_flag_type is an enumeration that holds different
 140 * states when a trace occurs. These are:
 141 *  IRQS_OFF		- interrupts were disabled
 142 *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
 143 *  NEED_RESCHED	- reschedule is requested
 144 *  HARDIRQ		- inside an interrupt handler
 145 *  SOFTIRQ		- inside a softirq handler
 146 */
 147enum trace_flag_type {
 148	TRACE_FLAG_IRQS_OFF		= 0x01,
 149	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
 150	TRACE_FLAG_NEED_RESCHED		= 0x04,
 151	TRACE_FLAG_HARDIRQ		= 0x08,
 152	TRACE_FLAG_SOFTIRQ		= 0x10,
 153	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
 154	TRACE_FLAG_NMI			= 0x40,
 155};
 156
 157#define TRACE_BUF_SIZE		1024
 158
 159struct trace_array;
 160
 161/*
 162 * The CPU trace array - it consists of thousands of trace entries
 163 * plus some other descriptor data: (for example which task started
 164 * the trace, etc.)
 165 */
 166struct trace_array_cpu {
 167	atomic_t		disabled;
 168	void			*buffer_page;	/* ring buffer spare */
 169
 170	unsigned long		entries;
 171	unsigned long		saved_latency;
 172	unsigned long		critical_start;
 173	unsigned long		critical_end;
 174	unsigned long		critical_sequence;
 175	unsigned long		nice;
 176	unsigned long		policy;
 177	unsigned long		rt_priority;
 178	unsigned long		skipped_entries;
 179	u64			preempt_timestamp;
 180	pid_t			pid;
 181	kuid_t			uid;
 182	char			comm[TASK_COMM_LEN];
 183
 
 184#ifdef CONFIG_FUNCTION_TRACER
 185	int			ftrace_ignore_pid;
 186#endif
 187	bool			ignore_pid;
 188};
 189
 190struct tracer;
 191struct trace_option_dentry;
 192
 193struct array_buffer {
 194	struct trace_array		*tr;
 195	struct trace_buffer		*buffer;
 196	struct trace_array_cpu __percpu	*data;
 197	u64				time_start;
 198	int				cpu;
 199};
 200
 201#define TRACE_FLAGS_MAX_SIZE		32
 202
 203struct trace_options {
 204	struct tracer			*tracer;
 205	struct trace_option_dentry	*topts;
 206};
 207
 208struct trace_pid_list {
 209	int				pid_max;
 210	unsigned long			*pids;
 211};
 212
 213enum {
 214	TRACE_PIDS		= BIT(0),
 215	TRACE_NO_PIDS		= BIT(1),
 216};
 217
 218static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list,
 219				    struct trace_pid_list *no_pid_list)
 220{
 221	/* Return true if the pid list in type has pids */
 222	return ((type & TRACE_PIDS) && pid_list) ||
 223		((type & TRACE_NO_PIDS) && no_pid_list);
 224}
 225
 226static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list,
 227					 struct trace_pid_list *no_pid_list)
 228{
 229	/*
 230	 * Turning off what is in @type, return true if the "other"
 231	 * pid list, still has pids in it.
 232	 */
 233	return (!(type & TRACE_PIDS) && pid_list) ||
 234		(!(type & TRACE_NO_PIDS) && no_pid_list);
 235}
 236
 237typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data);
 238
 239/**
 240 * struct cond_snapshot - conditional snapshot data and callback
 241 *
 242 * The cond_snapshot structure encapsulates a callback function and
 243 * data associated with the snapshot for a given tracing instance.
 244 *
 245 * When a snapshot is taken conditionally, by invoking
 246 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is
 247 * passed in turn to the cond_snapshot.update() function.  That data
 248 * can be compared by the update() implementation with the cond_data
 249 * contained wihin the struct cond_snapshot instance associated with
 250 * the trace_array.  Because the tr->max_lock is held throughout the
 251 * update() call, the update() function can directly retrieve the
 252 * cond_snapshot and cond_data associated with the per-instance
 253 * snapshot associated with the trace_array.
 254 *
 255 * The cond_snapshot.update() implementation can save data to be
 256 * associated with the snapshot if it decides to, and returns 'true'
 257 * in that case, or it returns 'false' if the conditional snapshot
 258 * shouldn't be taken.
 259 *
 260 * The cond_snapshot instance is created and associated with the
 261 * user-defined cond_data by tracing_cond_snapshot_enable().
 262 * Likewise, the cond_snapshot instance is destroyed and is no longer
 263 * associated with the trace instance by
 264 * tracing_cond_snapshot_disable().
 265 *
 266 * The method below is required.
 267 *
 268 * @update: When a conditional snapshot is invoked, the update()
 269 *	callback function is invoked with the tr->max_lock held.  The
 270 *	update() implementation signals whether or not to actually
 271 *	take the snapshot, by returning 'true' if so, 'false' if no
 272 *	snapshot should be taken.  Because the max_lock is held for
 273 *	the duration of update(), the implementation is safe to
 274 *	directly retrieven and save any implementation data it needs
 275 *	to in association with the snapshot.
 276 */
 277struct cond_snapshot {
 278	void				*cond_data;
 279	cond_update_fn_t		update;
 280};
 281
 282/*
 283 * The trace array - an array of per-CPU trace arrays. This is the
 284 * highest level data structure that individual tracers deal with.
 285 * They have on/off state as well:
 286 */
 287struct trace_array {
 288	struct list_head	list;
 289	char			*name;
 290	struct array_buffer	array_buffer;
 291#ifdef CONFIG_TRACER_MAX_TRACE
 292	/*
 293	 * The max_buffer is used to snapshot the trace when a maximum
 294	 * latency is reached, or when the user initiates a snapshot.
 295	 * Some tracers will use this to store a maximum trace while
 296	 * it continues examining live traces.
 297	 *
 298	 * The buffers for the max_buffer are set up the same as the array_buffer
 299	 * When a snapshot is taken, the buffer of the max_buffer is swapped
 300	 * with the buffer of the array_buffer and the buffers are reset for
 301	 * the array_buffer so the tracing can continue.
 302	 */
 303	struct array_buffer	max_buffer;
 304	bool			allocated_snapshot;
 305#endif
 306#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
 307	unsigned long		max_latency;
 308#ifdef CONFIG_FSNOTIFY
 309	struct dentry		*d_max_latency;
 310	struct work_struct	fsnotify_work;
 311	struct irq_work		fsnotify_irqwork;
 312#endif
 313#endif
 314	struct trace_pid_list	__rcu *filtered_pids;
 315	struct trace_pid_list	__rcu *filtered_no_pids;
 316	/*
 317	 * max_lock is used to protect the swapping of buffers
 318	 * when taking a max snapshot. The buffers themselves are
 319	 * protected by per_cpu spinlocks. But the action of the swap
 320	 * needs its own lock.
 321	 *
 322	 * This is defined as a arch_spinlock_t in order to help
 323	 * with performance when lockdep debugging is enabled.
 324	 *
 325	 * It is also used in other places outside the update_max_tr
 326	 * so it needs to be defined outside of the
 327	 * CONFIG_TRACER_MAX_TRACE.
 328	 */
 329	arch_spinlock_t		max_lock;
 330	int			buffer_disabled;
 331#ifdef CONFIG_FTRACE_SYSCALLS
 332	int			sys_refcount_enter;
 333	int			sys_refcount_exit;
 334	struct trace_event_file __rcu *enter_syscall_files[NR_syscalls];
 335	struct trace_event_file __rcu *exit_syscall_files[NR_syscalls];
 336#endif
 337	int			stop_count;
 338	int			clock_id;
 339	int			nr_topts;
 340	bool			clear_trace;
 341	int			buffer_percent;
 342	unsigned int		n_err_log_entries;
 343	struct tracer		*current_trace;
 344	unsigned int		trace_flags;
 345	unsigned char		trace_flags_index[TRACE_FLAGS_MAX_SIZE];
 346	unsigned int		flags;
 347	raw_spinlock_t		start_lock;
 348	struct list_head	err_log;
 349	struct dentry		*dir;
 350	struct dentry		*options;
 351	struct dentry		*percpu_dir;
 352	struct dentry		*event_dir;
 353	struct trace_options	*topts;
 354	struct list_head	systems;
 355	struct list_head	events;
 356	struct trace_event_file *trace_marker_file;
 357	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
 358	int			ref;
 359	int			trace_ref;
 360#ifdef CONFIG_FUNCTION_TRACER
 361	struct ftrace_ops	*ops;
 362	struct trace_pid_list	__rcu *function_pids;
 363	struct trace_pid_list	__rcu *function_no_pids;
 364#ifdef CONFIG_DYNAMIC_FTRACE
 365	/* All of these are protected by the ftrace_lock */
 366	struct list_head	func_probes;
 367	struct list_head	mod_trace;
 368	struct list_head	mod_notrace;
 369#endif
 370	/* function tracing enabled */
 371	int			function_enabled;
 372#endif
 373	int			time_stamp_abs_ref;
 374	struct list_head	hist_vars;
 375#ifdef CONFIG_TRACER_SNAPSHOT
 376	struct cond_snapshot	*cond_snapshot;
 377#endif
 378};
 379
 380enum {
 381	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
 382};
 383
 384extern struct list_head ftrace_trace_arrays;
 385
 386extern struct mutex trace_types_lock;
 387
 388extern int trace_array_get(struct trace_array *tr);
 
 389extern int tracing_check_open_get_tr(struct trace_array *tr);
 390extern struct trace_array *trace_array_find(const char *instance);
 391extern struct trace_array *trace_array_find_get(const char *instance);
 392
 393extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
 394extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
 395
 396extern bool trace_clock_in_ns(struct trace_array *tr);
 397
 398/*
 399 * The global tracer (top) should be the first trace array added,
 400 * but we check the flag anyway.
 401 */
 402static inline struct trace_array *top_trace_array(void)
 403{
 404	struct trace_array *tr;
 405
 406	if (list_empty(&ftrace_trace_arrays))
 407		return NULL;
 408
 409	tr = list_entry(ftrace_trace_arrays.prev,
 410			typeof(*tr), list);
 411	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
 412	return tr;
 413}
 414
 415#define FTRACE_CMP_TYPE(var, type) \
 416	__builtin_types_compatible_p(typeof(var), type *)
 417
 418#undef IF_ASSIGN
 419#define IF_ASSIGN(var, entry, etype, id)			\
 420	if (FTRACE_CMP_TYPE(var, etype)) {			\
 421		var = (typeof(var))(entry);			\
 422		WARN_ON(id != 0 && (entry)->type != id);	\
 423		break;						\
 424	}
 425
 426/* Will cause compile errors if type is not found. */
 427extern void __ftrace_bad_type(void);
 428
 429/*
 430 * The trace_assign_type is a verifier that the entry type is
 431 * the same as the type being assigned. To add new types simply
 432 * add a line with the following format:
 433 *
 434 * IF_ASSIGN(var, ent, type, id);
 435 *
 436 *  Where "type" is the trace type that includes the trace_entry
 437 *  as the "ent" item. And "id" is the trace identifier that is
 438 *  used in the trace_type enum.
 439 *
 440 *  If the type can have more than one id, then use zero.
 441 */
 442#define trace_assign_type(var, ent)					\
 443	do {								\
 444		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
 445		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
 446		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
 447		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
 448		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
 449		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
 450		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
 451		IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT);	\
 452		IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
 453		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
 454			  TRACE_MMIO_RW);				\
 455		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
 456			  TRACE_MMIO_MAP);				\
 457		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
 458		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
 459			  TRACE_GRAPH_ENT);		\
 460		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
 461			  TRACE_GRAPH_RET);		\
 462		__ftrace_bad_type();					\
 463	} while (0)
 464
 465/*
 466 * An option specific to a tracer. This is a boolean value.
 467 * The bit is the bit index that sets its value on the
 468 * flags value in struct tracer_flags.
 469 */
 470struct tracer_opt {
 471	const char	*name; /* Will appear on the trace_options file */
 472	u32		bit; /* Mask assigned in val field in tracer_flags */
 473};
 474
 475/*
 476 * The set of specific options for a tracer. Your tracer
 477 * have to set the initial value of the flags val.
 478 */
 479struct tracer_flags {
 480	u32			val;
 481	struct tracer_opt	*opts;
 482	struct tracer		*trace;
 483};
 484
 485/* Makes more easy to define a tracer opt */
 486#define TRACER_OPT(s, b)	.name = #s, .bit = b
 487
 488
 489struct trace_option_dentry {
 490	struct tracer_opt		*opt;
 491	struct tracer_flags		*flags;
 492	struct trace_array		*tr;
 493	struct dentry			*entry;
 494};
 495
 496/**
 497 * struct tracer - a specific tracer and its callbacks to interact with tracefs
 498 * @name: the name chosen to select it on the available_tracers file
 499 * @init: called when one switches to this tracer (echo name > current_tracer)
 500 * @reset: called when one switches to another tracer
 501 * @start: called when tracing is unpaused (echo 1 > tracing_on)
 502 * @stop: called when tracing is paused (echo 0 > tracing_on)
 503 * @update_thresh: called when tracing_thresh is updated
 504 * @open: called when the trace file is opened
 505 * @pipe_open: called when the trace_pipe file is opened
 506 * @close: called when the trace file is released
 507 * @pipe_close: called when the trace_pipe file is released
 508 * @read: override the default read callback on trace_pipe
 509 * @splice_read: override the default splice_read callback on trace_pipe
 510 * @selftest: selftest to run on boot (see trace_selftest.c)
 511 * @print_headers: override the first lines that describe your columns
 512 * @print_line: callback that prints a trace
 513 * @set_flag: signals one of your private flags changed (trace_options file)
 514 * @flags: your private flags
 515 */
 516struct tracer {
 517	const char		*name;
 518	int			(*init)(struct trace_array *tr);
 519	void			(*reset)(struct trace_array *tr);
 520	void			(*start)(struct trace_array *tr);
 521	void			(*stop)(struct trace_array *tr);
 522	int			(*update_thresh)(struct trace_array *tr);
 523	void			(*open)(struct trace_iterator *iter);
 524	void			(*pipe_open)(struct trace_iterator *iter);
 525	void			(*close)(struct trace_iterator *iter);
 526	void			(*pipe_close)(struct trace_iterator *iter);
 527	ssize_t			(*read)(struct trace_iterator *iter,
 528					struct file *filp, char __user *ubuf,
 529					size_t cnt, loff_t *ppos);
 530	ssize_t			(*splice_read)(struct trace_iterator *iter,
 531					       struct file *filp,
 532					       loff_t *ppos,
 533					       struct pipe_inode_info *pipe,
 534					       size_t len,
 535					       unsigned int flags);
 536#ifdef CONFIG_FTRACE_STARTUP_TEST
 537	int			(*selftest)(struct tracer *trace,
 538					    struct trace_array *tr);
 539#endif
 540	void			(*print_header)(struct seq_file *m);
 541	enum print_line_t	(*print_line)(struct trace_iterator *iter);
 542	/* If you handled the flag setting, return 0 */
 543	int			(*set_flag)(struct trace_array *tr,
 544					    u32 old_flags, u32 bit, int set);
 545	/* Return 0 if OK with change, else return non-zero */
 546	int			(*flag_changed)(struct trace_array *tr,
 547						u32 mask, int set);
 548	struct tracer		*next;
 549	struct tracer_flags	*flags;
 550	int			enabled;
 
 551	bool			print_max;
 552	bool			allow_instances;
 553#ifdef CONFIG_TRACER_MAX_TRACE
 554	bool			use_max_tr;
 555#endif
 556	/* True if tracer cannot be enabled in kernel param */
 557	bool			noboot;
 558};
 559
 560
 561/* Only current can touch trace_recursion */
 562
 563/*
 564 * For function tracing recursion:
 565 *  The order of these bits are important.
 566 *
 567 *  When function tracing occurs, the following steps are made:
 568 *   If arch does not support a ftrace feature:
 569 *    call internal function (uses INTERNAL bits) which calls...
 570 *   If callback is registered to the "global" list, the list
 571 *    function is called and recursion checks the GLOBAL bits.
 572 *    then this function calls...
 573 *   The function callback, which can use the FTRACE bits to
 574 *    check for recursion.
 575 *
 576 * Now if the arch does not suppport a feature, and it calls
 577 * the global list function which calls the ftrace callback
 578 * all three of these steps will do a recursion protection.
 579 * There's no reason to do one if the previous caller already
 580 * did. The recursion that we are protecting against will
 581 * go through the same steps again.
 582 *
 583 * To prevent the multiple recursion checks, if a recursion
 584 * bit is set that is higher than the MAX bit of the current
 585 * check, then we know that the check was made by the previous
 586 * caller, and we can skip the current check.
 587 */
 588enum {
 589	/* Function recursion bits */
 
 
 
 
 
 590	TRACE_FTRACE_BIT,
 591	TRACE_FTRACE_NMI_BIT,
 592	TRACE_FTRACE_IRQ_BIT,
 593	TRACE_FTRACE_SIRQ_BIT,
 594
 595	/* INTERNAL_BITs must be greater than FTRACE_BITs */
 596	TRACE_INTERNAL_BIT,
 597	TRACE_INTERNAL_NMI_BIT,
 598	TRACE_INTERNAL_IRQ_BIT,
 599	TRACE_INTERNAL_SIRQ_BIT,
 600
 601	TRACE_BRANCH_BIT,
 602/*
 603 * Abuse of the trace_recursion.
 604 * As we need a way to maintain state if we are tracing the function
 605 * graph in irq because we want to trace a particular function that
 606 * was called in irq context but we have irq tracing off. Since this
 607 * can only be modified by current, we can reuse trace_recursion.
 608 */
 609	TRACE_IRQ_BIT,
 610
 611	/* Set if the function is in the set_graph_function file */
 612	TRACE_GRAPH_BIT,
 613
 614	/*
 615	 * In the very unlikely case that an interrupt came in
 616	 * at a start of graph tracing, and we want to trace
 617	 * the function in that interrupt, the depth can be greater
 618	 * than zero, because of the preempted start of a previous
 619	 * trace. In an even more unlikely case, depth could be 2
 620	 * if a softirq interrupted the start of graph tracing,
 621	 * followed by an interrupt preempting a start of graph
 622	 * tracing in the softirq, and depth can even be 3
 623	 * if an NMI came in at the start of an interrupt function
 624	 * that preempted a softirq start of a function that
 625	 * preempted normal context!!!! Luckily, it can't be
 626	 * greater than 3, so the next two bits are a mask
 627	 * of what the depth is when we set TRACE_GRAPH_BIT
 628	 */
 629
 630	TRACE_GRAPH_DEPTH_START_BIT,
 631	TRACE_GRAPH_DEPTH_END_BIT,
 632
 633	/*
 634	 * To implement set_graph_notrace, if this bit is set, we ignore
 635	 * function graph tracing of called functions, until the return
 636	 * function is called to clear it.
 637	 */
 638	TRACE_GRAPH_NOTRACE_BIT,
 639};
 640
 641#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
 642#define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
 643#define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
 644
 645#define trace_recursion_depth() \
 646	(((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
 647#define trace_recursion_set_depth(depth) \
 648	do {								\
 649		current->trace_recursion &=				\
 650			~(3 << TRACE_GRAPH_DEPTH_START_BIT);		\
 651		current->trace_recursion |=				\
 652			((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT;	\
 653	} while (0)
 654
 655#define TRACE_CONTEXT_BITS	4
 656
 657#define TRACE_FTRACE_START	TRACE_FTRACE_BIT
 658#define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
 659
 660#define TRACE_LIST_START	TRACE_INTERNAL_BIT
 661#define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 662
 663#define TRACE_CONTEXT_MASK	TRACE_LIST_MAX
 664
 665static __always_inline int trace_get_context_bit(void)
 666{
 667	int bit;
 668
 669	if (in_interrupt()) {
 670		if (in_nmi())
 671			bit = 0;
 672
 673		else if (in_irq())
 674			bit = 1;
 675		else
 676			bit = 2;
 677	} else
 678		bit = 3;
 679
 680	return bit;
 681}
 682
 683static __always_inline int trace_test_and_set_recursion(int start, int max)
 684{
 685	unsigned int val = current->trace_recursion;
 686	int bit;
 687
 688	/* A previous recursion check was made */
 689	if ((val & TRACE_CONTEXT_MASK) > max)
 690		return 0;
 691
 692	bit = trace_get_context_bit() + start;
 693	if (unlikely(val & (1 << bit)))
 694		return -1;
 695
 696	val |= 1 << bit;
 697	current->trace_recursion = val;
 698	barrier();
 699
 700	return bit;
 701}
 702
 703static __always_inline void trace_clear_recursion(int bit)
 704{
 705	unsigned int val = current->trace_recursion;
 706
 707	if (!bit)
 708		return;
 709
 710	bit = 1 << bit;
 711	val &= ~bit;
 712
 713	barrier();
 714	current->trace_recursion = val;
 715}
 716
 717static inline struct ring_buffer_iter *
 718trace_buffer_iter(struct trace_iterator *iter, int cpu)
 719{
 720	return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
 721}
 722
 723int tracer_init(struct tracer *t, struct trace_array *tr);
 724int tracing_is_enabled(void);
 725void tracing_reset_online_cpus(struct array_buffer *buf);
 726void tracing_reset_current(int cpu);
 727void tracing_reset_all_online_cpus(void);
 728int tracing_open_generic(struct inode *inode, struct file *filp);
 729int tracing_open_generic_tr(struct inode *inode, struct file *filp);
 730bool tracing_is_disabled(void);
 731bool tracer_tracing_is_on(struct trace_array *tr);
 732void tracer_tracing_on(struct trace_array *tr);
 733void tracer_tracing_off(struct trace_array *tr);
 734struct dentry *trace_create_file(const char *name,
 735				 umode_t mode,
 736				 struct dentry *parent,
 737				 void *data,
 738				 const struct file_operations *fops);
 739
 740struct dentry *tracing_init_dentry(void);
 741
 742struct ring_buffer_event;
 743
 744struct ring_buffer_event *
 745trace_buffer_lock_reserve(struct trace_buffer *buffer,
 746			  int type,
 747			  unsigned long len,
 748			  unsigned long flags,
 749			  int pc);
 750
 751struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 752						struct trace_array_cpu *data);
 753
 754struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 755					  int *ent_cpu, u64 *ent_ts);
 756
 757void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
 758					struct ring_buffer_event *event);
 759
 760int trace_empty(struct trace_iterator *iter);
 761
 762void *trace_find_next_entry_inc(struct trace_iterator *iter);
 763
 764void trace_init_global_iter(struct trace_iterator *iter);
 765
 766void tracing_iter_reset(struct trace_iterator *iter, int cpu);
 767
 768unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
 769unsigned long trace_total_entries(struct trace_array *tr);
 770
 771void trace_function(struct trace_array *tr,
 772		    unsigned long ip,
 773		    unsigned long parent_ip,
 774		    unsigned long flags, int pc);
 775void trace_graph_function(struct trace_array *tr,
 776		    unsigned long ip,
 777		    unsigned long parent_ip,
 778		    unsigned long flags, int pc);
 779void trace_latency_header(struct seq_file *m);
 780void trace_default_header(struct seq_file *m);
 781void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
 782int trace_empty(struct trace_iterator *iter);
 783
 784void trace_graph_return(struct ftrace_graph_ret *trace);
 785int trace_graph_entry(struct ftrace_graph_ent *trace);
 786void set_graph_array(struct trace_array *tr);
 787
 788void tracing_start_cmdline_record(void);
 789void tracing_stop_cmdline_record(void);
 790void tracing_start_tgid_record(void);
 791void tracing_stop_tgid_record(void);
 792
 793int register_tracer(struct tracer *type);
 794int is_tracing_stopped(void);
 795
 796loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
 797
 798extern cpumask_var_t __read_mostly tracing_buffer_mask;
 799
 800#define for_each_tracing_cpu(cpu)	\
 801	for_each_cpu(cpu, tracing_buffer_mask)
 802
 803extern unsigned long nsecs_to_usecs(unsigned long nsecs);
 804
 805extern unsigned long tracing_thresh;
 806
 807/* PID filtering */
 808
 809extern int pid_max;
 810
 811bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
 812			     pid_t search_pid);
 813bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
 814			    struct trace_pid_list *filtered_no_pids,
 815			    struct task_struct *task);
 816void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
 817				  struct task_struct *self,
 818				  struct task_struct *task);
 819void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
 820void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
 821int trace_pid_show(struct seq_file *m, void *v);
 822void trace_free_pid_list(struct trace_pid_list *pid_list);
 823int trace_pid_write(struct trace_pid_list *filtered_pids,
 824		    struct trace_pid_list **new_pid_list,
 825		    const char __user *ubuf, size_t cnt);
 826
 827#ifdef CONFIG_TRACER_MAX_TRACE
 828void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 829		   void *cond_data);
 830void update_max_tr_single(struct trace_array *tr,
 831			  struct task_struct *tsk, int cpu);
 832#endif /* CONFIG_TRACER_MAX_TRACE */
 833
 834#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
 835	defined(CONFIG_FSNOTIFY)
 836
 837void latency_fsnotify(struct trace_array *tr);
 838
 839#else
 840
 841static inline void latency_fsnotify(struct trace_array *tr) { }
 842
 843#endif
 844
 845#ifdef CONFIG_STACKTRACE
 846void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
 847		   int pc);
 848#else
 849static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
 850				 int skip, int pc)
 851{
 852}
 853#endif /* CONFIG_STACKTRACE */
 854
 855extern u64 ftrace_now(int cpu);
 856
 857extern void trace_find_cmdline(int pid, char comm[]);
 858extern int trace_find_tgid(int pid);
 859extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
 860
 861#ifdef CONFIG_DYNAMIC_FTRACE
 862extern unsigned long ftrace_update_tot_cnt;
 863extern unsigned long ftrace_number_of_pages;
 864extern unsigned long ftrace_number_of_groups;
 865void ftrace_init_trace_array(struct trace_array *tr);
 866#else
 867static inline void ftrace_init_trace_array(struct trace_array *tr) { }
 868#endif
 869#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
 870extern int DYN_FTRACE_TEST_NAME(void);
 871#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
 872extern int DYN_FTRACE_TEST_NAME2(void);
 873
 874extern bool ring_buffer_expanded;
 875extern bool tracing_selftest_disabled;
 876
 877#ifdef CONFIG_FTRACE_STARTUP_TEST
 878extern int trace_selftest_startup_function(struct tracer *trace,
 879					   struct trace_array *tr);
 880extern int trace_selftest_startup_function_graph(struct tracer *trace,
 881						 struct trace_array *tr);
 882extern int trace_selftest_startup_irqsoff(struct tracer *trace,
 883					  struct trace_array *tr);
 884extern int trace_selftest_startup_preemptoff(struct tracer *trace,
 885					     struct trace_array *tr);
 886extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
 887						 struct trace_array *tr);
 888extern int trace_selftest_startup_wakeup(struct tracer *trace,
 889					 struct trace_array *tr);
 890extern int trace_selftest_startup_nop(struct tracer *trace,
 891					 struct trace_array *tr);
 892extern int trace_selftest_startup_branch(struct tracer *trace,
 893					 struct trace_array *tr);
 894/*
 895 * Tracer data references selftest functions that only occur
 896 * on boot up. These can be __init functions. Thus, when selftests
 897 * are enabled, then the tracers need to reference __init functions.
 898 */
 899#define __tracer_data		__refdata
 900#else
 901/* Tracers are seldom changed. Optimize when selftests are disabled. */
 902#define __tracer_data		__read_mostly
 903#endif /* CONFIG_FTRACE_STARTUP_TEST */
 904
 905extern void *head_page(struct trace_array_cpu *data);
 906extern unsigned long long ns2usecs(u64 nsec);
 907extern int
 908trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
 909extern int
 910trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 911extern int
 912trace_array_vprintk(struct trace_array *tr,
 913		    unsigned long ip, const char *fmt, va_list args);
 914int trace_array_printk_buf(struct trace_buffer *buffer,
 
 
 915			   unsigned long ip, const char *fmt, ...);
 916void trace_printk_seq(struct trace_seq *s);
 917enum print_line_t print_trace_line(struct trace_iterator *iter);
 918
 919extern char trace_find_mark(unsigned long long duration);
 920
 921struct ftrace_hash;
 922
 923struct ftrace_mod_load {
 924	struct list_head	list;
 925	char			*func;
 926	char			*module;
 927	int			 enable;
 928};
 929
 930enum {
 931	FTRACE_HASH_FL_MOD	= (1 << 0),
 932};
 933
 934struct ftrace_hash {
 935	unsigned long		size_bits;
 936	struct hlist_head	*buckets;
 937	unsigned long		count;
 938	unsigned long		flags;
 939	struct rcu_head		rcu;
 940};
 941
 942struct ftrace_func_entry *
 943ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
 944
 945static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
 946{
 947	return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
 948}
 949
 950/* Standard output formatting function used for function return traces */
 951#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 952
 953/* Flag options */
 954#define TRACE_GRAPH_PRINT_OVERRUN       0x1
 955#define TRACE_GRAPH_PRINT_CPU           0x2
 956#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
 957#define TRACE_GRAPH_PRINT_PROC          0x8
 958#define TRACE_GRAPH_PRINT_DURATION      0x10
 959#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
 960#define TRACE_GRAPH_PRINT_REL_TIME      0x40
 961#define TRACE_GRAPH_PRINT_IRQS          0x80
 962#define TRACE_GRAPH_PRINT_TAIL          0x100
 963#define TRACE_GRAPH_SLEEP_TIME          0x200
 964#define TRACE_GRAPH_GRAPH_TIME          0x400
 965#define TRACE_GRAPH_PRINT_FILL_SHIFT	28
 966#define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
 967
 968extern void ftrace_graph_sleep_time_control(bool enable);
 969
 970#ifdef CONFIG_FUNCTION_PROFILER
 971extern void ftrace_graph_graph_time_control(bool enable);
 972#else
 973static inline void ftrace_graph_graph_time_control(bool enable) { }
 974#endif
 975
 976extern enum print_line_t
 977print_graph_function_flags(struct trace_iterator *iter, u32 flags);
 978extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
 979extern void
 980trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
 981extern void graph_trace_open(struct trace_iterator *iter);
 982extern void graph_trace_close(struct trace_iterator *iter);
 983extern int __trace_graph_entry(struct trace_array *tr,
 984			       struct ftrace_graph_ent *trace,
 985			       unsigned long flags, int pc);
 986extern void __trace_graph_return(struct trace_array *tr,
 987				 struct ftrace_graph_ret *trace,
 988				 unsigned long flags, int pc);
 989
 990#ifdef CONFIG_DYNAMIC_FTRACE
 991extern struct ftrace_hash __rcu *ftrace_graph_hash;
 992extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash;
 993
 994static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
 995{
 996	unsigned long addr = trace->func;
 997	int ret = 0;
 998	struct ftrace_hash *hash;
 999
1000	preempt_disable_notrace();
1001
1002	/*
1003	 * Have to open code "rcu_dereference_sched()" because the
1004	 * function graph tracer can be called when RCU is not
1005	 * "watching".
1006	 * Protected with schedule_on_each_cpu(ftrace_sync)
1007	 */
1008	hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible());
1009
1010	if (ftrace_hash_empty(hash)) {
1011		ret = 1;
1012		goto out;
1013	}
1014
1015	if (ftrace_lookup_ip(hash, addr)) {
1016
1017		/*
1018		 * This needs to be cleared on the return functions
1019		 * when the depth is zero.
1020		 */
1021		trace_recursion_set(TRACE_GRAPH_BIT);
1022		trace_recursion_set_depth(trace->depth);
1023
1024		/*
1025		 * If no irqs are to be traced, but a set_graph_function
1026		 * is set, and called by an interrupt handler, we still
1027		 * want to trace it.
1028		 */
1029		if (in_irq())
1030			trace_recursion_set(TRACE_IRQ_BIT);
1031		else
1032			trace_recursion_clear(TRACE_IRQ_BIT);
1033		ret = 1;
1034	}
1035
1036out:
1037	preempt_enable_notrace();
1038	return ret;
1039}
1040
1041static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1042{
1043	if (trace_recursion_test(TRACE_GRAPH_BIT) &&
1044	    trace->depth == trace_recursion_depth())
1045		trace_recursion_clear(TRACE_GRAPH_BIT);
1046}
1047
1048static inline int ftrace_graph_notrace_addr(unsigned long addr)
1049{
1050	int ret = 0;
1051	struct ftrace_hash *notrace_hash;
1052
1053	preempt_disable_notrace();
1054
1055	/*
1056	 * Have to open code "rcu_dereference_sched()" because the
1057	 * function graph tracer can be called when RCU is not
1058	 * "watching".
1059	 * Protected with schedule_on_each_cpu(ftrace_sync)
1060	 */
1061	notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
1062						 !preemptible());
1063
1064	if (ftrace_lookup_ip(notrace_hash, addr))
1065		ret = 1;
1066
1067	preempt_enable_notrace();
1068	return ret;
1069}
1070#else
1071static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
1072{
1073	return 1;
1074}
1075
1076static inline int ftrace_graph_notrace_addr(unsigned long addr)
1077{
1078	return 0;
1079}
1080static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
1081{ }
1082#endif /* CONFIG_DYNAMIC_FTRACE */
1083
1084extern unsigned int fgraph_max_depth;
1085
1086static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
1087{
1088	/* trace it when it is-nested-in or is a function enabled. */
1089	return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
1090		 ftrace_graph_addr(trace)) ||
1091		(trace->depth < 0) ||
1092		(fgraph_max_depth && trace->depth >= fgraph_max_depth);
1093}
1094
1095#else /* CONFIG_FUNCTION_GRAPH_TRACER */
1096static inline enum print_line_t
1097print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1098{
1099	return TRACE_TYPE_UNHANDLED;
1100}
1101#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1102
1103extern struct list_head ftrace_pids;
1104
1105#ifdef CONFIG_FUNCTION_TRACER
1106
1107#define FTRACE_PID_IGNORE	-1
1108#define FTRACE_PID_TRACE	-2
1109
1110struct ftrace_func_command {
1111	struct list_head	list;
1112	char			*name;
1113	int			(*func)(struct trace_array *tr,
1114					struct ftrace_hash *hash,
1115					char *func, char *cmd,
1116					char *params, int enable);
1117};
1118extern bool ftrace_filter_param __initdata;
1119static inline int ftrace_trace_task(struct trace_array *tr)
1120{
1121	return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) !=
1122		FTRACE_PID_IGNORE;
1123}
1124extern int ftrace_is_dead(void);
1125int ftrace_create_function_files(struct trace_array *tr,
1126				 struct dentry *parent);
1127void ftrace_destroy_function_files(struct trace_array *tr);
1128void ftrace_init_global_array_ops(struct trace_array *tr);
1129void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
1130void ftrace_reset_array_ops(struct trace_array *tr);
1131void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
1132void ftrace_init_tracefs_toplevel(struct trace_array *tr,
1133				  struct dentry *d_tracer);
1134void ftrace_clear_pids(struct trace_array *tr);
1135int init_function_trace(void);
1136void ftrace_pid_follow_fork(struct trace_array *tr, bool enable);
1137#else
1138static inline int ftrace_trace_task(struct trace_array *tr)
1139{
1140	return 1;
1141}
1142static inline int ftrace_is_dead(void) { return 0; }
1143static inline int
1144ftrace_create_function_files(struct trace_array *tr,
1145			     struct dentry *parent)
1146{
1147	return 0;
1148}
1149static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
1150static inline __init void
1151ftrace_init_global_array_ops(struct trace_array *tr) { }
1152static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
1153static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
1154static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
1155static inline void ftrace_clear_pids(struct trace_array *tr) { }
1156static inline int init_function_trace(void) { return 0; }
1157static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { }
1158/* ftace_func_t type is not defined, use macro instead of static inline */
1159#define ftrace_init_array_ops(tr, func) do { } while (0)
1160#endif /* CONFIG_FUNCTION_TRACER */
1161
1162#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
1163
1164struct ftrace_probe_ops {
1165	void			(*func)(unsigned long ip,
1166					unsigned long parent_ip,
1167					struct trace_array *tr,
1168					struct ftrace_probe_ops *ops,
1169					void *data);
1170	int			(*init)(struct ftrace_probe_ops *ops,
1171					struct trace_array *tr,
1172					unsigned long ip, void *init_data,
1173					void **data);
1174	void			(*free)(struct ftrace_probe_ops *ops,
1175					struct trace_array *tr,
1176					unsigned long ip, void *data);
1177	int			(*print)(struct seq_file *m,
1178					 unsigned long ip,
1179					 struct ftrace_probe_ops *ops,
1180					 void *data);
1181};
1182
1183struct ftrace_func_mapper;
1184typedef int (*ftrace_mapper_func)(void *data);
1185
1186struct ftrace_func_mapper *allocate_ftrace_func_mapper(void);
1187void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
1188					   unsigned long ip);
1189int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
1190			       unsigned long ip, void *data);
1191void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
1192				   unsigned long ip);
1193void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
1194			     ftrace_mapper_func free_func);
1195
1196extern int
1197register_ftrace_function_probe(char *glob, struct trace_array *tr,
1198			       struct ftrace_probe_ops *ops, void *data);
1199extern int
1200unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
1201				      struct ftrace_probe_ops *ops);
1202extern void clear_ftrace_function_probes(struct trace_array *tr);
1203
1204int register_ftrace_command(struct ftrace_func_command *cmd);
1205int unregister_ftrace_command(struct ftrace_func_command *cmd);
1206
1207void ftrace_create_filter_files(struct ftrace_ops *ops,
1208				struct dentry *parent);
1209void ftrace_destroy_filter_files(struct ftrace_ops *ops);
1210
1211extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
1212			     int len, int reset);
1213extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
1214			      int len, int reset);
1215#else
1216struct ftrace_func_command;
1217
1218static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
1219{
1220	return -EINVAL;
1221}
1222static inline __init int unregister_ftrace_command(char *cmd_name)
1223{
1224	return -EINVAL;
1225}
1226static inline void clear_ftrace_function_probes(struct trace_array *tr)
1227{
1228}
1229
1230/*
1231 * The ops parameter passed in is usually undefined.
1232 * This must be a macro.
1233 */
1234#define ftrace_create_filter_files(ops, parent) do { } while (0)
1235#define ftrace_destroy_filter_files(ops) do { } while (0)
1236#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
1237
1238bool ftrace_event_is_function(struct trace_event_call *call);
1239
1240/*
1241 * struct trace_parser - servers for reading the user input separated by spaces
1242 * @cont: set if the input is not complete - no final space char was found
1243 * @buffer: holds the parsed user input
1244 * @idx: user input length
1245 * @size: buffer size
1246 */
1247struct trace_parser {
1248	bool		cont;
1249	char		*buffer;
1250	unsigned	idx;
1251	unsigned	size;
1252};
1253
1254static inline bool trace_parser_loaded(struct trace_parser *parser)
1255{
1256	return (parser->idx != 0);
1257}
1258
1259static inline bool trace_parser_cont(struct trace_parser *parser)
1260{
1261	return parser->cont;
1262}
1263
1264static inline void trace_parser_clear(struct trace_parser *parser)
1265{
1266	parser->cont = false;
1267	parser->idx = 0;
1268}
1269
1270extern int trace_parser_get_init(struct trace_parser *parser, int size);
1271extern void trace_parser_put(struct trace_parser *parser);
1272extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1273	size_t cnt, loff_t *ppos);
1274
1275/*
1276 * Only create function graph options if function graph is configured.
1277 */
1278#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1279# define FGRAPH_FLAGS						\
1280		C(DISPLAY_GRAPH,	"display-graph"),
1281#else
1282# define FGRAPH_FLAGS
1283#endif
1284
1285#ifdef CONFIG_BRANCH_TRACER
1286# define BRANCH_FLAGS					\
1287		C(BRANCH,		"branch"),
1288#else
1289# define BRANCH_FLAGS
1290#endif
1291
1292#ifdef CONFIG_FUNCTION_TRACER
1293# define FUNCTION_FLAGS						\
1294		C(FUNCTION,		"function-trace"),	\
1295		C(FUNC_FORK,		"function-fork"),
1296# define FUNCTION_DEFAULT_FLAGS		TRACE_ITER_FUNCTION
1297#else
1298# define FUNCTION_FLAGS
1299# define FUNCTION_DEFAULT_FLAGS		0UL
1300# define TRACE_ITER_FUNC_FORK		0UL
1301#endif
1302
1303#ifdef CONFIG_STACKTRACE
1304# define STACK_FLAGS				\
1305		C(STACKTRACE,		"stacktrace"),
1306#else
1307# define STACK_FLAGS
1308#endif
1309
1310/*
1311 * trace_iterator_flags is an enumeration that defines bit
1312 * positions into trace_flags that controls the output.
1313 *
1314 * NOTE: These bits must match the trace_options array in
1315 *       trace.c (this macro guarantees it).
1316 */
1317#define TRACE_FLAGS						\
1318		C(PRINT_PARENT,		"print-parent"),	\
1319		C(SYM_OFFSET,		"sym-offset"),		\
1320		C(SYM_ADDR,		"sym-addr"),		\
1321		C(VERBOSE,		"verbose"),		\
1322		C(RAW,			"raw"),			\
1323		C(HEX,			"hex"),			\
1324		C(BIN,			"bin"),			\
1325		C(BLOCK,		"block"),		\
1326		C(PRINTK,		"trace_printk"),	\
1327		C(ANNOTATE,		"annotate"),		\
1328		C(USERSTACKTRACE,	"userstacktrace"),	\
1329		C(SYM_USEROBJ,		"sym-userobj"),		\
1330		C(PRINTK_MSGONLY,	"printk-msg-only"),	\
1331		C(CONTEXT_INFO,		"context-info"),   /* Print pid/cpu/time */ \
1332		C(LATENCY_FMT,		"latency-format"),	\
1333		C(RECORD_CMD,		"record-cmd"),		\
1334		C(RECORD_TGID,		"record-tgid"),		\
1335		C(OVERWRITE,		"overwrite"),		\
1336		C(STOP_ON_FREE,		"disable_on_free"),	\
1337		C(IRQ_INFO,		"irq-info"),		\
1338		C(MARKERS,		"markers"),		\
1339		C(EVENT_FORK,		"event-fork"),		\
1340		C(PAUSE_ON_TRACE,	"pause-on-trace"),	\
1341		FUNCTION_FLAGS					\
1342		FGRAPH_FLAGS					\
1343		STACK_FLAGS					\
1344		BRANCH_FLAGS
1345
1346/*
1347 * By defining C, we can make TRACE_FLAGS a list of bit names
1348 * that will define the bits for the flag masks.
1349 */
1350#undef C
1351#define C(a, b) TRACE_ITER_##a##_BIT
1352
1353enum trace_iterator_bits {
1354	TRACE_FLAGS
1355	/* Make sure we don't go more than we have bits for */
1356	TRACE_ITER_LAST_BIT
1357};
1358
1359/*
1360 * By redefining C, we can make TRACE_FLAGS a list of masks that
1361 * use the bits as defined above.
1362 */
1363#undef C
1364#define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT)
1365
1366enum trace_iterator_flags { TRACE_FLAGS };
1367
1368/*
1369 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
1370 * control the output of kernel symbols.
1371 */
1372#define TRACE_ITER_SYM_MASK \
1373	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
1374
1375extern struct tracer nop_trace;
1376
1377#ifdef CONFIG_BRANCH_TRACER
1378extern int enable_branch_tracing(struct trace_array *tr);
1379extern void disable_branch_tracing(void);
1380static inline int trace_branch_enable(struct trace_array *tr)
1381{
1382	if (tr->trace_flags & TRACE_ITER_BRANCH)
1383		return enable_branch_tracing(tr);
1384	return 0;
1385}
1386static inline void trace_branch_disable(void)
1387{
1388	/* due to races, always disable */
1389	disable_branch_tracing();
1390}
1391#else
1392static inline int trace_branch_enable(struct trace_array *tr)
1393{
1394	return 0;
1395}
1396static inline void trace_branch_disable(void)
1397{
1398}
1399#endif /* CONFIG_BRANCH_TRACER */
1400
1401/* set ring buffers to default size if not already done so */
1402int tracing_update_buffers(void);
1403
1404struct ftrace_event_field {
1405	struct list_head	link;
1406	const char		*name;
1407	const char		*type;
1408	int			filter_type;
1409	int			offset;
1410	int			size;
1411	int			is_signed;
1412};
1413
1414struct prog_entry;
1415
1416struct event_filter {
1417	struct prog_entry __rcu	*prog;
1418	char			*filter_string;
1419};
1420
1421struct event_subsystem {
1422	struct list_head	list;
1423	const char		*name;
1424	struct event_filter	*filter;
1425	int			ref_count;
1426};
1427
1428struct trace_subsystem_dir {
1429	struct list_head		list;
1430	struct event_subsystem		*subsystem;
1431	struct trace_array		*tr;
1432	struct dentry			*entry;
1433	int				ref_count;
1434	int				nr_events;
1435};
1436
1437extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
1438				     struct trace_buffer *buffer,
1439				     struct ring_buffer_event *event);
1440
1441void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1442				     struct trace_buffer *buffer,
1443				     struct ring_buffer_event *event,
1444				     unsigned long flags, int pc,
1445				     struct pt_regs *regs);
1446
1447static inline void trace_buffer_unlock_commit(struct trace_array *tr,
1448					      struct trace_buffer *buffer,
1449					      struct ring_buffer_event *event,
1450					      unsigned long flags, int pc)
1451{
1452	trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL);
1453}
1454
1455DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
1456DECLARE_PER_CPU(int, trace_buffered_event_cnt);
1457void trace_buffered_event_disable(void);
1458void trace_buffered_event_enable(void);
1459
1460static inline void
1461__trace_event_discard_commit(struct trace_buffer *buffer,
1462			     struct ring_buffer_event *event)
1463{
1464	if (this_cpu_read(trace_buffered_event) == event) {
1465		/* Simply release the temp buffer */
1466		this_cpu_dec(trace_buffered_event_cnt);
1467		return;
1468	}
1469	ring_buffer_discard_commit(buffer, event);
1470}
1471
1472/*
1473 * Helper function for event_trigger_unlock_commit{_regs}().
1474 * If there are event triggers attached to this event that requires
1475 * filtering against its fields, then they wil be called as the
1476 * entry already holds the field information of the current event.
1477 *
1478 * It also checks if the event should be discarded or not.
1479 * It is to be discarded if the event is soft disabled and the
1480 * event was only recorded to process triggers, or if the event
1481 * filter is active and this event did not match the filters.
1482 *
1483 * Returns true if the event is discarded, false otherwise.
1484 */
1485static inline bool
1486__event_trigger_test_discard(struct trace_event_file *file,
1487			     struct trace_buffer *buffer,
1488			     struct ring_buffer_event *event,
1489			     void *entry,
1490			     enum event_trigger_type *tt)
1491{
1492	unsigned long eflags = file->flags;
1493
1494	if (eflags & EVENT_FILE_FL_TRIGGER_COND)
1495		*tt = event_triggers_call(file, entry, event);
1496
1497	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
1498	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
1499	     !filter_match_preds(file->filter, entry))) {
1500		__trace_event_discard_commit(buffer, event);
1501		return true;
1502	}
1503
1504	return false;
1505}
1506
1507/**
1508 * event_trigger_unlock_commit - handle triggers and finish event commit
1509 * @file: The file pointer assoctiated to the event
1510 * @buffer: The ring buffer that the event is being written to
1511 * @event: The event meta data in the ring buffer
1512 * @entry: The event itself
1513 * @irq_flags: The state of the interrupts at the start of the event
1514 * @pc: The state of the preempt count at the start of the event.
1515 *
1516 * This is a helper function to handle triggers that require data
1517 * from the event itself. It also tests the event against filters and
1518 * if the event is soft disabled and should be discarded.
1519 */
1520static inline void
1521event_trigger_unlock_commit(struct trace_event_file *file,
1522			    struct trace_buffer *buffer,
1523			    struct ring_buffer_event *event,
1524			    void *entry, unsigned long irq_flags, int pc)
1525{
1526	enum event_trigger_type tt = ETT_NONE;
1527
1528	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1529		trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
1530
1531	if (tt)
1532		event_triggers_post_call(file, tt);
1533}
1534
1535/**
1536 * event_trigger_unlock_commit_regs - handle triggers and finish event commit
1537 * @file: The file pointer assoctiated to the event
1538 * @buffer: The ring buffer that the event is being written to
1539 * @event: The event meta data in the ring buffer
1540 * @entry: The event itself
1541 * @irq_flags: The state of the interrupts at the start of the event
1542 * @pc: The state of the preempt count at the start of the event.
1543 *
1544 * This is a helper function to handle triggers that require data
1545 * from the event itself. It also tests the event against filters and
1546 * if the event is soft disabled and should be discarded.
1547 *
1548 * Same as event_trigger_unlock_commit() but calls
1549 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
1550 */
1551static inline void
1552event_trigger_unlock_commit_regs(struct trace_event_file *file,
1553				 struct trace_buffer *buffer,
1554				 struct ring_buffer_event *event,
1555				 void *entry, unsigned long irq_flags, int pc,
1556				 struct pt_regs *regs)
1557{
1558	enum event_trigger_type tt = ETT_NONE;
1559
1560	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
1561		trace_buffer_unlock_commit_regs(file->tr, buffer, event,
1562						irq_flags, pc, regs);
1563
1564	if (tt)
1565		event_triggers_post_call(file, tt);
1566}
1567
1568#define FILTER_PRED_INVALID	((unsigned short)-1)
1569#define FILTER_PRED_IS_RIGHT	(1 << 15)
1570#define FILTER_PRED_FOLD	(1 << 15)
1571
1572/*
1573 * The max preds is the size of unsigned short with
1574 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1575 * and FOLD flags. The other is reserved.
1576 *
1577 * 2^14 preds is way more than enough.
1578 */
1579#define MAX_FILTER_PRED		16384
1580
1581struct filter_pred;
1582struct regex;
1583
1584typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1585
1586typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1587
1588enum regex_type {
1589	MATCH_FULL = 0,
1590	MATCH_FRONT_ONLY,
1591	MATCH_MIDDLE_ONLY,
1592	MATCH_END_ONLY,
1593	MATCH_GLOB,
1594	MATCH_INDEX,
1595};
1596
1597struct regex {
1598	char			pattern[MAX_FILTER_STR_VAL];
1599	int			len;
1600	int			field_len;
1601	regex_match_func	match;
1602};
1603
1604struct filter_pred {
1605	filter_pred_fn_t 	fn;
1606	u64 			val;
1607	struct regex		regex;
1608	unsigned short		*ops;
1609	struct ftrace_event_field *field;
1610	int 			offset;
1611	int			not;
1612	int 			op;
1613};
1614
1615static inline bool is_string_field(struct ftrace_event_field *field)
1616{
1617	return field->filter_type == FILTER_DYN_STRING ||
1618	       field->filter_type == FILTER_STATIC_STRING ||
1619	       field->filter_type == FILTER_PTR_STRING ||
1620	       field->filter_type == FILTER_COMM;
1621}
1622
1623static inline bool is_function_field(struct ftrace_event_field *field)
1624{
1625	return field->filter_type == FILTER_TRACE_FN;
1626}
1627
1628extern enum regex_type
1629filter_parse_regex(char *buff, int len, char **search, int *not);
1630extern void print_event_filter(struct trace_event_file *file,
1631			       struct trace_seq *s);
1632extern int apply_event_filter(struct trace_event_file *file,
1633			      char *filter_string);
1634extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
1635					char *filter_string);
1636extern void print_subsystem_event_filter(struct event_subsystem *system,
1637					 struct trace_seq *s);
1638extern int filter_assign_type(const char *type);
1639extern int create_event_filter(struct trace_array *tr,
1640			       struct trace_event_call *call,
1641			       char *filter_str, bool set_str,
1642			       struct event_filter **filterp);
1643extern void free_event_filter(struct event_filter *filter);
1644
1645struct ftrace_event_field *
1646trace_find_event_field(struct trace_event_call *call, char *name);
1647
1648extern void trace_event_enable_cmd_record(bool enable);
1649extern void trace_event_enable_tgid_record(bool enable);
1650
1651extern int event_trace_init(void);
1652extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1653extern int event_trace_del_tracer(struct trace_array *tr);
1654
1655extern struct trace_event_file *__find_event_file(struct trace_array *tr,
1656						  const char *system,
1657						  const char *event);
1658extern struct trace_event_file *find_event_file(struct trace_array *tr,
1659						const char *system,
1660						const char *event);
1661
1662static inline void *event_file_data(struct file *filp)
1663{
1664	return READ_ONCE(file_inode(filp)->i_private);
1665}
1666
1667extern struct mutex event_mutex;
1668extern struct list_head ftrace_events;
1669
1670extern const struct file_operations event_trigger_fops;
1671extern const struct file_operations event_hist_fops;
1672extern const struct file_operations event_hist_debug_fops;
1673extern const struct file_operations event_inject_fops;
1674
1675#ifdef CONFIG_HIST_TRIGGERS
1676extern int register_trigger_hist_cmd(void);
1677extern int register_trigger_hist_enable_disable_cmds(void);
1678#else
1679static inline int register_trigger_hist_cmd(void) { return 0; }
1680static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; }
1681#endif
1682
1683extern int register_trigger_cmds(void);
1684extern void clear_event_triggers(struct trace_array *tr);
1685
1686struct event_trigger_data {
1687	unsigned long			count;
1688	int				ref;
1689	struct event_trigger_ops	*ops;
1690	struct event_command		*cmd_ops;
1691	struct event_filter __rcu	*filter;
1692	char				*filter_str;
1693	void				*private_data;
1694	bool				paused;
1695	bool				paused_tmp;
1696	struct list_head		list;
1697	char				*name;
1698	struct list_head		named_list;
1699	struct event_trigger_data	*named_data;
1700};
1701
1702/* Avoid typos */
1703#define ENABLE_EVENT_STR	"enable_event"
1704#define DISABLE_EVENT_STR	"disable_event"
1705#define ENABLE_HIST_STR		"enable_hist"
1706#define DISABLE_HIST_STR	"disable_hist"
1707
1708struct enable_trigger_data {
1709	struct trace_event_file		*file;
1710	bool				enable;
1711	bool				hist;
1712};
1713
1714extern int event_enable_trigger_print(struct seq_file *m,
1715				      struct event_trigger_ops *ops,
1716				      struct event_trigger_data *data);
1717extern void event_enable_trigger_free(struct event_trigger_ops *ops,
1718				      struct event_trigger_data *data);
1719extern int event_enable_trigger_func(struct event_command *cmd_ops,
1720				     struct trace_event_file *file,
1721				     char *glob, char *cmd, char *param);
1722extern int event_enable_register_trigger(char *glob,
1723					 struct event_trigger_ops *ops,
1724					 struct event_trigger_data *data,
1725					 struct trace_event_file *file);
1726extern void event_enable_unregister_trigger(char *glob,
1727					    struct event_trigger_ops *ops,
1728					    struct event_trigger_data *test,
1729					    struct trace_event_file *file);
1730extern void trigger_data_free(struct event_trigger_data *data);
1731extern int event_trigger_init(struct event_trigger_ops *ops,
1732			      struct event_trigger_data *data);
1733extern int trace_event_trigger_enable_disable(struct trace_event_file *file,
1734					      int trigger_enable);
1735extern void update_cond_flag(struct trace_event_file *file);
1736extern int set_trigger_filter(char *filter_str,
1737			      struct event_trigger_data *trigger_data,
1738			      struct trace_event_file *file);
1739extern struct event_trigger_data *find_named_trigger(const char *name);
1740extern bool is_named_trigger(struct event_trigger_data *test);
1741extern int save_named_trigger(const char *name,
1742			      struct event_trigger_data *data);
1743extern void del_named_trigger(struct event_trigger_data *data);
1744extern void pause_named_trigger(struct event_trigger_data *data);
1745extern void unpause_named_trigger(struct event_trigger_data *data);
1746extern void set_named_trigger_data(struct event_trigger_data *data,
1747				   struct event_trigger_data *named_data);
1748extern struct event_trigger_data *
1749get_named_trigger_data(struct event_trigger_data *data);
1750extern int register_event_command(struct event_command *cmd);
1751extern int unregister_event_command(struct event_command *cmd);
1752extern int register_trigger_hist_enable_disable_cmds(void);
1753
1754/**
1755 * struct event_trigger_ops - callbacks for trace event triggers
1756 *
1757 * The methods in this structure provide per-event trigger hooks for
1758 * various trigger operations.
1759 *
1760 * All the methods below, except for @init() and @free(), must be
1761 * implemented.
1762 *
1763 * @func: The trigger 'probe' function called when the triggering
1764 *	event occurs.  The data passed into this callback is the data
1765 *	that was supplied to the event_command @reg() function that
1766 *	registered the trigger (see struct event_command) along with
1767 *	the trace record, rec.
1768 *
1769 * @init: An optional initialization function called for the trigger
1770 *	when the trigger is registered (via the event_command reg()
1771 *	function).  This can be used to perform per-trigger
1772 *	initialization such as incrementing a per-trigger reference
1773 *	count, for instance.  This is usually implemented by the
1774 *	generic utility function @event_trigger_init() (see
1775 *	trace_event_triggers.c).
1776 *
1777 * @free: An optional de-initialization function called for the
1778 *	trigger when the trigger is unregistered (via the
1779 *	event_command @reg() function).  This can be used to perform
1780 *	per-trigger de-initialization such as decrementing a
1781 *	per-trigger reference count and freeing corresponding trigger
1782 *	data, for instance.  This is usually implemented by the
1783 *	generic utility function @event_trigger_free() (see
1784 *	trace_event_triggers.c).
1785 *
1786 * @print: The callback function invoked to have the trigger print
1787 *	itself.  This is usually implemented by a wrapper function
1788 *	that calls the generic utility function @event_trigger_print()
1789 *	(see trace_event_triggers.c).
1790 */
1791struct event_trigger_ops {
1792	void			(*func)(struct event_trigger_data *data,
1793					void *rec,
1794					struct ring_buffer_event *rbe);
1795	int			(*init)(struct event_trigger_ops *ops,
1796					struct event_trigger_data *data);
1797	void			(*free)(struct event_trigger_ops *ops,
1798					struct event_trigger_data *data);
1799	int			(*print)(struct seq_file *m,
1800					 struct event_trigger_ops *ops,
1801					 struct event_trigger_data *data);
1802};
1803
1804/**
1805 * struct event_command - callbacks and data members for event commands
1806 *
1807 * Event commands are invoked by users by writing the command name
1808 * into the 'trigger' file associated with a trace event.  The
1809 * parameters associated with a specific invocation of an event
1810 * command are used to create an event trigger instance, which is
1811 * added to the list of trigger instances associated with that trace
1812 * event.  When the event is hit, the set of triggers associated with
1813 * that event is invoked.
1814 *
1815 * The data members in this structure provide per-event command data
1816 * for various event commands.
1817 *
1818 * All the data members below, except for @post_trigger, must be set
1819 * for each event command.
1820 *
1821 * @name: The unique name that identifies the event command.  This is
1822 *	the name used when setting triggers via trigger files.
1823 *
1824 * @trigger_type: A unique id that identifies the event command
1825 *	'type'.  This value has two purposes, the first to ensure that
1826 *	only one trigger of the same type can be set at a given time
1827 *	for a particular event e.g. it doesn't make sense to have both
1828 *	a traceon and traceoff trigger attached to a single event at
1829 *	the same time, so traceon and traceoff have the same type
1830 *	though they have different names.  The @trigger_type value is
1831 *	also used as a bit value for deferring the actual trigger
1832 *	action until after the current event is finished.  Some
1833 *	commands need to do this if they themselves log to the trace
1834 *	buffer (see the @post_trigger() member below).  @trigger_type
1835 *	values are defined by adding new values to the trigger_type
1836 *	enum in include/linux/trace_events.h.
1837 *
1838 * @flags: See the enum event_command_flags below.
1839 *
1840 * All the methods below, except for @set_filter() and @unreg_all(),
1841 * must be implemented.
1842 *
1843 * @func: The callback function responsible for parsing and
1844 *	registering the trigger written to the 'trigger' file by the
1845 *	user.  It allocates the trigger instance and registers it with
1846 *	the appropriate trace event.  It makes use of the other
1847 *	event_command callback functions to orchestrate this, and is
1848 *	usually implemented by the generic utility function
1849 *	@event_trigger_callback() (see trace_event_triggers.c).
1850 *
1851 * @reg: Adds the trigger to the list of triggers associated with the
1852 *	event, and enables the event trigger itself, after
1853 *	initializing it (via the event_trigger_ops @init() function).
1854 *	This is also where commands can use the @trigger_type value to
1855 *	make the decision as to whether or not multiple instances of
1856 *	the trigger should be allowed.  This is usually implemented by
1857 *	the generic utility function @register_trigger() (see
1858 *	trace_event_triggers.c).
1859 *
1860 * @unreg: Removes the trigger from the list of triggers associated
1861 *	with the event, and disables the event trigger itself, after
1862 *	initializing it (via the event_trigger_ops @free() function).
1863 *	This is usually implemented by the generic utility function
1864 *	@unregister_trigger() (see trace_event_triggers.c).
1865 *
1866 * @unreg_all: An optional function called to remove all the triggers
1867 *	from the list of triggers associated with the event.  Called
1868 *	when a trigger file is opened in truncate mode.
1869 *
1870 * @set_filter: An optional function called to parse and set a filter
1871 *	for the trigger.  If no @set_filter() method is set for the
1872 *	event command, filters set by the user for the command will be
1873 *	ignored.  This is usually implemented by the generic utility
1874 *	function @set_trigger_filter() (see trace_event_triggers.c).
1875 *
1876 * @get_trigger_ops: The callback function invoked to retrieve the
1877 *	event_trigger_ops implementation associated with the command.
1878 */
1879struct event_command {
1880	struct list_head	list;
1881	char			*name;
1882	enum event_trigger_type	trigger_type;
1883	int			flags;
1884	int			(*func)(struct event_command *cmd_ops,
1885					struct trace_event_file *file,
1886					char *glob, char *cmd, char *params);
1887	int			(*reg)(char *glob,
1888				       struct event_trigger_ops *ops,
1889				       struct event_trigger_data *data,
1890				       struct trace_event_file *file);
1891	void			(*unreg)(char *glob,
1892					 struct event_trigger_ops *ops,
1893					 struct event_trigger_data *data,
1894					 struct trace_event_file *file);
1895	void			(*unreg_all)(struct trace_event_file *file);
1896	int			(*set_filter)(char *filter_str,
1897					      struct event_trigger_data *data,
1898					      struct trace_event_file *file);
1899	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1900};
1901
1902/**
1903 * enum event_command_flags - flags for struct event_command
1904 *
1905 * @POST_TRIGGER: A flag that says whether or not this command needs
1906 *	to have its action delayed until after the current event has
1907 *	been closed.  Some triggers need to avoid being invoked while
1908 *	an event is currently in the process of being logged, since
1909 *	the trigger may itself log data into the trace buffer.  Thus
1910 *	we make sure the current event is committed before invoking
1911 *	those triggers.  To do that, the trigger invocation is split
1912 *	in two - the first part checks the filter using the current
1913 *	trace record; if a command has the @post_trigger flag set, it
1914 *	sets a bit for itself in the return value, otherwise it
1915 *	directly invokes the trigger.  Once all commands have been
1916 *	either invoked or set their return flag, the current record is
1917 *	either committed or discarded.  At that point, if any commands
1918 *	have deferred their triggers, those commands are finally
1919 *	invoked following the close of the current event.  In other
1920 *	words, if the event_trigger_ops @func() probe implementation
1921 *	itself logs to the trace buffer, this flag should be set,
1922 *	otherwise it can be left unspecified.
1923 *
1924 * @NEEDS_REC: A flag that says whether or not this command needs
1925 *	access to the trace record in order to perform its function,
1926 *	regardless of whether or not it has a filter associated with
1927 *	it (filters make a trigger require access to the trace record
1928 *	but are not always present).
1929 */
1930enum event_command_flags {
1931	EVENT_CMD_FL_POST_TRIGGER	= 1,
1932	EVENT_CMD_FL_NEEDS_REC		= 2,
1933};
1934
1935static inline bool event_command_post_trigger(struct event_command *cmd_ops)
1936{
1937	return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER;
1938}
1939
1940static inline bool event_command_needs_rec(struct event_command *cmd_ops)
1941{
1942	return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC;
1943}
1944
1945extern int trace_event_enable_disable(struct trace_event_file *file,
1946				      int enable, int soft_disable);
1947extern int tracing_alloc_snapshot(void);
1948extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data);
1949extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update);
1950
1951extern int tracing_snapshot_cond_disable(struct trace_array *tr);
1952extern void *tracing_cond_snapshot_data(struct trace_array *tr);
1953
1954extern const char *__start___trace_bprintk_fmt[];
1955extern const char *__stop___trace_bprintk_fmt[];
1956
1957extern const char *__start___tracepoint_str[];
1958extern const char *__stop___tracepoint_str[];
1959
1960void trace_printk_control(bool enabled);
 
1961void trace_printk_start_comm(void);
1962int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1963int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1964
1965/* Used from boot time tracer */
1966extern int trace_set_options(struct trace_array *tr, char *option);
1967extern int tracing_set_tracer(struct trace_array *tr, const char *buf);
1968extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
1969					  unsigned long size, int cpu_id);
1970extern int tracing_set_cpumask(struct trace_array *tr,
1971				cpumask_var_t tracing_cpumask_new);
1972
1973
1974#define MAX_EVENT_NAME_LEN	64
1975
1976extern int trace_run_command(const char *buf, int (*createfn)(int, char**));
1977extern ssize_t trace_parse_run_command(struct file *file,
1978		const char __user *buffer, size_t count, loff_t *ppos,
1979		int (*createfn)(int, char**));
1980
1981extern unsigned int err_pos(char *cmd, const char *str);
1982extern void tracing_log_err(struct trace_array *tr,
1983			    const char *loc, const char *cmd,
1984			    const char **errs, u8 type, u8 pos);
1985
1986/*
1987 * Normal trace_printk() and friends allocates special buffers
1988 * to do the manipulation, as well as saves the print formats
1989 * into sections to display. But the trace infrastructure wants
1990 * to use these without the added overhead at the price of being
1991 * a bit slower (used mainly for warnings, where we don't care
1992 * about performance). The internal_trace_puts() is for such
1993 * a purpose.
1994 */
1995#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1996
1997#undef FTRACE_ENTRY
1998#define FTRACE_ENTRY(call, struct_name, id, tstruct, print)	\
1999	extern struct trace_event_call					\
2000	__aligned(4) event_##call;
2001#undef FTRACE_ENTRY_DUP
2002#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)	\
2003	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
 
2004#undef FTRACE_ENTRY_PACKED
2005#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \
2006	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
 
2007
2008#include "trace_entries.h"
2009
2010#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
2011int perf_ftrace_event_register(struct trace_event_call *call,
2012			       enum trace_reg type, void *data);
2013#else
2014#define perf_ftrace_event_register NULL
2015#endif
2016
2017#ifdef CONFIG_FTRACE_SYSCALLS
2018void init_ftrace_syscalls(void);
2019const char *get_syscall_name(int syscall);
2020#else
2021static inline void init_ftrace_syscalls(void) { }
2022static inline const char *get_syscall_name(int syscall)
2023{
2024	return NULL;
2025}
2026#endif
2027
2028#ifdef CONFIG_EVENT_TRACING
2029void trace_event_init(void);
2030void trace_event_eval_update(struct trace_eval_map **map, int len);
2031/* Used from boot time tracer */
2032extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
2033extern int trigger_process_regex(struct trace_event_file *file, char *buff);
2034#else
2035static inline void __init trace_event_init(void) { }
2036static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
2037#endif
2038
2039#ifdef CONFIG_TRACER_SNAPSHOT
2040void tracing_snapshot_instance(struct trace_array *tr);
2041int tracing_alloc_snapshot_instance(struct trace_array *tr);
2042#else
2043static inline void tracing_snapshot_instance(struct trace_array *tr) { }
2044static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
2045{
2046	return 0;
2047}
2048#endif
2049
2050#ifdef CONFIG_PREEMPT_TRACER
2051void tracer_preempt_on(unsigned long a0, unsigned long a1);
2052void tracer_preempt_off(unsigned long a0, unsigned long a1);
2053#else
2054static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
2055static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
2056#endif
2057#ifdef CONFIG_IRQSOFF_TRACER
2058void tracer_hardirqs_on(unsigned long a0, unsigned long a1);
2059void tracer_hardirqs_off(unsigned long a0, unsigned long a1);
2060#else
2061static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { }
2062static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { }
2063#endif
2064
2065extern struct trace_iterator *tracepoint_print_iter;
2066
2067/*
2068 * Reset the state of the trace_iterator so that it can read consumed data.
2069 * Normally, the trace_iterator is used for reading the data when it is not
2070 * consumed, and must retain state.
2071 */
2072static __always_inline void trace_iterator_reset(struct trace_iterator *iter)
2073{
2074	const size_t offset = offsetof(struct trace_iterator, seq);
2075
2076	/*
2077	 * Keep gcc from complaining about overwriting more than just one
2078	 * member in the structure.
2079	 */
2080	memset((char *)iter + offset, 0, sizeof(struct trace_iterator) - offset);
2081
2082	iter->pos = -1;
2083}
2084
2085#endif /* _LINUX_KERNEL_TRACE_H */