Linux Audio

Check our new training course

Loading...
v3.15
   1
   2#ifndef _LINUX_KERNEL_TRACE_H
   3#define _LINUX_KERNEL_TRACE_H
   4
   5#include <linux/fs.h>
   6#include <linux/atomic.h>
   7#include <linux/sched.h>
   8#include <linux/clocksource.h>
   9#include <linux/ring_buffer.h>
  10#include <linux/mmiotrace.h>
  11#include <linux/tracepoint.h>
  12#include <linux/ftrace.h>
  13#include <linux/hw_breakpoint.h>
  14#include <linux/trace_seq.h>
  15#include <linux/ftrace_event.h>
  16#include <linux/compiler.h>
  17
  18#ifdef CONFIG_FTRACE_SYSCALLS
  19#include <asm/unistd.h>		/* For NR_SYSCALLS	     */
  20#include <asm/syscall.h>	/* some archs define it here */
  21#endif
  22
  23enum trace_type {
  24	__TRACE_FIRST_TYPE = 0,
  25
  26	TRACE_FN,
  27	TRACE_CTX,
  28	TRACE_WAKE,
  29	TRACE_STACK,
  30	TRACE_PRINT,
  31	TRACE_BPRINT,
  32	TRACE_MMIO_RW,
  33	TRACE_MMIO_MAP,
  34	TRACE_BRANCH,
  35	TRACE_GRAPH_RET,
  36	TRACE_GRAPH_ENT,
  37	TRACE_USER_STACK,
  38	TRACE_BLK,
  39	TRACE_BPUTS,
  40
  41	__TRACE_LAST_TYPE,
  42};
  43
  44
  45#undef __field
  46#define __field(type, item)		type	item;
  47
  48#undef __field_struct
  49#define __field_struct(type, item)	__field(type, item)
  50
  51#undef __field_desc
  52#define __field_desc(type, container, item)
  53
  54#undef __array
  55#define __array(type, item, size)	type	item[size];
  56
  57#undef __array_desc
  58#define __array_desc(type, container, item, size)
  59
  60#undef __dynamic_array
  61#define __dynamic_array(type, item)	type	item[];
  62
  63#undef F_STRUCT
  64#define F_STRUCT(args...)		args
  65
  66#undef FTRACE_ENTRY
  67#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)	\
  68	struct struct_name {						\
  69		struct trace_entry	ent;				\
  70		tstruct							\
  71	}
  72
  73#undef TP_ARGS
  74#define TP_ARGS(args...)	args
  75
  76#undef FTRACE_ENTRY_DUP
  77#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
  78
  79#undef FTRACE_ENTRY_REG
  80#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,	\
  81			 filter, regfn) \
  82	FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
  83		     filter)
  84
  85#include "trace_entries.h"
  86
  87/*
  88 * syscalls are special, and need special handling, this is why
  89 * they are not included in trace_entries.h
  90 */
  91struct syscall_trace_enter {
  92	struct trace_entry	ent;
  93	int			nr;
  94	unsigned long		args[];
  95};
  96
  97struct syscall_trace_exit {
  98	struct trace_entry	ent;
  99	int			nr;
 100	long			ret;
 101};
 102
 103struct kprobe_trace_entry_head {
 104	struct trace_entry	ent;
 105	unsigned long		ip;
 106};
 107
 108struct kretprobe_trace_entry_head {
 109	struct trace_entry	ent;
 110	unsigned long		func;
 111	unsigned long		ret_ip;
 112};
 113
 114/*
 115 * trace_flag_type is an enumeration that holds different
 116 * states when a trace occurs. These are:
 117 *  IRQS_OFF		- interrupts were disabled
 118 *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
 119 *  NEED_RESCHED	- reschedule is requested
 120 *  HARDIRQ		- inside an interrupt handler
 121 *  SOFTIRQ		- inside a softirq handler
 122 */
 123enum trace_flag_type {
 124	TRACE_FLAG_IRQS_OFF		= 0x01,
 125	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
 126	TRACE_FLAG_NEED_RESCHED		= 0x04,
 127	TRACE_FLAG_HARDIRQ		= 0x08,
 128	TRACE_FLAG_SOFTIRQ		= 0x10,
 129	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
 130};
 131
 132#define TRACE_BUF_SIZE		1024
 133
 134struct trace_array;
 135
 136/*
 137 * The CPU trace array - it consists of thousands of trace entries
 138 * plus some other descriptor data: (for example which task started
 139 * the trace, etc.)
 140 */
 141struct trace_array_cpu {
 142	atomic_t		disabled;
 143	void			*buffer_page;	/* ring buffer spare */
 144
 145	unsigned long		entries;
 146	unsigned long		saved_latency;
 147	unsigned long		critical_start;
 148	unsigned long		critical_end;
 149	unsigned long		critical_sequence;
 150	unsigned long		nice;
 151	unsigned long		policy;
 152	unsigned long		rt_priority;
 153	unsigned long		skipped_entries;
 154	cycle_t			preempt_timestamp;
 155	pid_t			pid;
 156	kuid_t			uid;
 157	char			comm[TASK_COMM_LEN];
 158};
 159
 160struct tracer;
 161
 162struct trace_buffer {
 163	struct trace_array		*tr;
 164	struct ring_buffer		*buffer;
 165	struct trace_array_cpu __percpu	*data;
 166	cycle_t				time_start;
 167	int				cpu;
 168};
 169
 170/*
 171 * The trace array - an array of per-CPU trace arrays. This is the
 172 * highest level data structure that individual tracers deal with.
 173 * They have on/off state as well:
 174 */
 175struct trace_array {
 176	struct list_head	list;
 177	char			*name;
 178	struct trace_buffer	trace_buffer;
 179#ifdef CONFIG_TRACER_MAX_TRACE
 180	/*
 181	 * The max_buffer is used to snapshot the trace when a maximum
 182	 * latency is reached, or when the user initiates a snapshot.
 183	 * Some tracers will use this to store a maximum trace while
 184	 * it continues examining live traces.
 185	 *
 186	 * The buffers for the max_buffer are set up the same as the trace_buffer
 187	 * When a snapshot is taken, the buffer of the max_buffer is swapped
 188	 * with the buffer of the trace_buffer and the buffers are reset for
 189	 * the trace_buffer so the tracing can continue.
 190	 */
 191	struct trace_buffer	max_buffer;
 192	bool			allocated_snapshot;
 193#endif
 194	int			buffer_disabled;
 195#ifdef CONFIG_FTRACE_SYSCALLS
 196	int			sys_refcount_enter;
 197	int			sys_refcount_exit;
 198	struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls];
 199	struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls];
 200#endif
 201	int			stop_count;
 202	int			clock_id;
 203	struct tracer		*current_trace;
 204	unsigned int		flags;
 205	raw_spinlock_t		start_lock;
 206	struct dentry		*dir;
 207	struct dentry		*options;
 208	struct dentry		*percpu_dir;
 209	struct dentry		*event_dir;
 210	struct list_head	systems;
 211	struct list_head	events;
 212	cpumask_var_t		tracing_cpumask; /* only trace on set CPUs */
 213	int			ref;
 214#ifdef CONFIG_FUNCTION_TRACER
 215	struct ftrace_ops	*ops;
 216	/* function tracing enabled */
 217	int			function_enabled;
 218#endif
 219};
 220
 221enum {
 222	TRACE_ARRAY_FL_GLOBAL	= (1 << 0)
 223};
 224
 225extern struct list_head ftrace_trace_arrays;
 226
 227extern struct mutex trace_types_lock;
 228
 229extern int trace_array_get(struct trace_array *tr);
 230extern void trace_array_put(struct trace_array *tr);
 231
 232/*
 233 * The global tracer (top) should be the first trace array added,
 234 * but we check the flag anyway.
 235 */
 236static inline struct trace_array *top_trace_array(void)
 237{
 238	struct trace_array *tr;
 239
 240	tr = list_entry(ftrace_trace_arrays.prev,
 241			typeof(*tr), list);
 242	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
 243	return tr;
 244}
 245
 246#define FTRACE_CMP_TYPE(var, type) \
 247	__builtin_types_compatible_p(typeof(var), type *)
 248
 249#undef IF_ASSIGN
 250#define IF_ASSIGN(var, entry, etype, id)		\
 251	if (FTRACE_CMP_TYPE(var, etype)) {		\
 252		var = (typeof(var))(entry);		\
 253		WARN_ON(id && (entry)->type != id);	\
 254		break;					\
 255	}
 256
 257/* Will cause compile errors if type is not found. */
 258extern void __ftrace_bad_type(void);
 259
 260/*
 261 * The trace_assign_type is a verifier that the entry type is
 262 * the same as the type being assigned. To add new types simply
 263 * add a line with the following format:
 264 *
 265 * IF_ASSIGN(var, ent, type, id);
 266 *
 267 *  Where "type" is the trace type that includes the trace_entry
 268 *  as the "ent" item. And "id" is the trace identifier that is
 269 *  used in the trace_type enum.
 270 *
 271 *  If the type can have more than one id, then use zero.
 272 */
 273#define trace_assign_type(var, ent)					\
 274	do {								\
 275		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
 276		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
 277		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
 278		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
 279		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
 280		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
 281		IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS);	\
 282		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
 283			  TRACE_MMIO_RW);				\
 284		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
 285			  TRACE_MMIO_MAP);				\
 286		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
 287		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
 288			  TRACE_GRAPH_ENT);		\
 289		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
 290			  TRACE_GRAPH_RET);		\
 291		__ftrace_bad_type();					\
 292	} while (0)
 293
 294/*
 295 * An option specific to a tracer. This is a boolean value.
 296 * The bit is the bit index that sets its value on the
 297 * flags value in struct tracer_flags.
 298 */
 299struct tracer_opt {
 300	const char	*name; /* Will appear on the trace_options file */
 301	u32		bit; /* Mask assigned in val field in tracer_flags */
 302};
 303
 304/*
 305 * The set of specific options for a tracer. Your tracer
 306 * have to set the initial value of the flags val.
 307 */
 308struct tracer_flags {
 309	u32			val;
 310	struct tracer_opt	*opts;
 311};
 312
 313/* Makes more easy to define a tracer opt */
 314#define TRACER_OPT(s, b)	.name = #s, .bit = b
 315
 316
 317/**
 318 * struct tracer - a specific tracer and its callbacks to interact with debugfs
 319 * @name: the name chosen to select it on the available_tracers file
 320 * @init: called when one switches to this tracer (echo name > current_tracer)
 321 * @reset: called when one switches to another tracer
 322 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
 323 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
 324 * @open: called when the trace file is opened
 325 * @pipe_open: called when the trace_pipe file is opened
 326 * @wait_pipe: override how the user waits for traces on trace_pipe
 327 * @close: called when the trace file is released
 328 * @pipe_close: called when the trace_pipe file is released
 329 * @read: override the default read callback on trace_pipe
 330 * @splice_read: override the default splice_read callback on trace_pipe
 331 * @selftest: selftest to run on boot (see trace_selftest.c)
 332 * @print_headers: override the first lines that describe your columns
 333 * @print_line: callback that prints a trace
 334 * @set_flag: signals one of your private flags changed (trace_options file)
 335 * @flags: your private flags
 336 */
 337struct tracer {
 338	const char		*name;
 339	int			(*init)(struct trace_array *tr);
 340	void			(*reset)(struct trace_array *tr);
 341	void			(*start)(struct trace_array *tr);
 342	void			(*stop)(struct trace_array *tr);
 343	void			(*open)(struct trace_iterator *iter);
 344	void			(*pipe_open)(struct trace_iterator *iter);
 345	void			(*wait_pipe)(struct trace_iterator *iter);
 346	void			(*close)(struct trace_iterator *iter);
 347	void			(*pipe_close)(struct trace_iterator *iter);
 348	ssize_t			(*read)(struct trace_iterator *iter,
 349					struct file *filp, char __user *ubuf,
 350					size_t cnt, loff_t *ppos);
 351	ssize_t			(*splice_read)(struct trace_iterator *iter,
 352					       struct file *filp,
 353					       loff_t *ppos,
 354					       struct pipe_inode_info *pipe,
 355					       size_t len,
 356					       unsigned int flags);
 357#ifdef CONFIG_FTRACE_STARTUP_TEST
 358	int			(*selftest)(struct tracer *trace,
 359					    struct trace_array *tr);
 360#endif
 361	void			(*print_header)(struct seq_file *m);
 362	enum print_line_t	(*print_line)(struct trace_iterator *iter);
 363	/* If you handled the flag setting, return 0 */
 364	int			(*set_flag)(struct trace_array *tr,
 365					    u32 old_flags, u32 bit, int set);
 366	/* Return 0 if OK with change, else return non-zero */
 367	int			(*flag_changed)(struct trace_array *tr,
 368						u32 mask, int set);
 369	struct tracer		*next;
 370	struct tracer_flags	*flags;
 371	int			enabled;
 372	bool			print_max;
 373	bool			allow_instances;
 374#ifdef CONFIG_TRACER_MAX_TRACE
 375	bool			use_max_tr;
 376#endif
 377};
 378
 379
 380/* Only current can touch trace_recursion */
 
 
 381
 382/*
 383 * For function tracing recursion:
 384 *  The order of these bits are important.
 385 *
 386 *  When function tracing occurs, the following steps are made:
 387 *   If arch does not support a ftrace feature:
 388 *    call internal function (uses INTERNAL bits) which calls...
 389 *   If callback is registered to the "global" list, the list
 390 *    function is called and recursion checks the GLOBAL bits.
 391 *    then this function calls...
 392 *   The function callback, which can use the FTRACE bits to
 393 *    check for recursion.
 394 *
 395 * Now if the arch does not suppport a feature, and it calls
 396 * the global list function which calls the ftrace callback
 397 * all three of these steps will do a recursion protection.
 398 * There's no reason to do one if the previous caller already
 399 * did. The recursion that we are protecting against will
 400 * go through the same steps again.
 401 *
 402 * To prevent the multiple recursion checks, if a recursion
 403 * bit is set that is higher than the MAX bit of the current
 404 * check, then we know that the check was made by the previous
 405 * caller, and we can skip the current check.
 406 */
 407enum {
 408	TRACE_BUFFER_BIT,
 409	TRACE_BUFFER_NMI_BIT,
 410	TRACE_BUFFER_IRQ_BIT,
 411	TRACE_BUFFER_SIRQ_BIT,
 412
 413	/* Start of function recursion bits */
 414	TRACE_FTRACE_BIT,
 415	TRACE_FTRACE_NMI_BIT,
 416	TRACE_FTRACE_IRQ_BIT,
 417	TRACE_FTRACE_SIRQ_BIT,
 418
 419	/* GLOBAL_BITs must be greater than FTRACE_BITs */
 420	TRACE_GLOBAL_BIT,
 421	TRACE_GLOBAL_NMI_BIT,
 422	TRACE_GLOBAL_IRQ_BIT,
 423	TRACE_GLOBAL_SIRQ_BIT,
 424
 425	/* INTERNAL_BITs must be greater than GLOBAL_BITs */
 426	TRACE_INTERNAL_BIT,
 427	TRACE_INTERNAL_NMI_BIT,
 428	TRACE_INTERNAL_IRQ_BIT,
 429	TRACE_INTERNAL_SIRQ_BIT,
 430
 431	TRACE_CONTROL_BIT,
 432
 
 
 
 433/*
 434 * Abuse of the trace_recursion.
 435 * As we need a way to maintain state if we are tracing the function
 436 * graph in irq because we want to trace a particular function that
 437 * was called in irq context but we have irq tracing off. Since this
 438 * can only be modified by current, we can reuse trace_recursion.
 439 */
 440	TRACE_IRQ_BIT,
 441};
 442
 443#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (1<<(bit)); } while (0)
 444#define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
 445#define trace_recursion_test(bit)	((current)->trace_recursion & (1<<(bit)))
 446
 447#define TRACE_CONTEXT_BITS	4
 448
 449#define TRACE_FTRACE_START	TRACE_FTRACE_BIT
 450#define TRACE_FTRACE_MAX	((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
 451
 452#define TRACE_GLOBAL_START	TRACE_GLOBAL_BIT
 453#define TRACE_GLOBAL_MAX	((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
 454
 455#define TRACE_LIST_START	TRACE_INTERNAL_BIT
 456#define TRACE_LIST_MAX		((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 457
 458#define TRACE_CONTEXT_MASK	TRACE_LIST_MAX
 459
 460static __always_inline int trace_get_context_bit(void)
 461{
 462	int bit;
 463
 464	if (in_interrupt()) {
 465		if (in_nmi())
 466			bit = 0;
 467
 468		else if (in_irq())
 469			bit = 1;
 470		else
 471			bit = 2;
 472	} else
 473		bit = 3;
 474
 475	return bit;
 476}
 477
 478static __always_inline int trace_test_and_set_recursion(int start, int max)
 479{
 480	unsigned int val = current->trace_recursion;
 481	int bit;
 482
 483	/* A previous recursion check was made */
 484	if ((val & TRACE_CONTEXT_MASK) > max)
 485		return 0;
 486
 487	bit = trace_get_context_bit() + start;
 488	if (unlikely(val & (1 << bit)))
 489		return -1;
 490
 491	val |= 1 << bit;
 492	current->trace_recursion = val;
 493	barrier();
 494
 495	return bit;
 496}
 497
 498static __always_inline void trace_clear_recursion(int bit)
 499{
 500	unsigned int val = current->trace_recursion;
 501
 502	if (!bit)
 503		return;
 
 504
 505	bit = 1 << bit;
 506	val &= ~bit;
 507
 508	barrier();
 509	current->trace_recursion = val;
 510}
 511
 512static inline struct ring_buffer_iter *
 513trace_buffer_iter(struct trace_iterator *iter, int cpu)
 514{
 515	if (iter->buffer_iter && iter->buffer_iter[cpu])
 516		return iter->buffer_iter[cpu];
 517	return NULL;
 518}
 519
 520int tracer_init(struct tracer *t, struct trace_array *tr);
 521int tracing_is_enabled(void);
 522void tracing_reset(struct trace_buffer *buf, int cpu);
 523void tracing_reset_online_cpus(struct trace_buffer *buf);
 
 524void tracing_reset_current(int cpu);
 525void tracing_reset_all_online_cpus(void);
 526int tracing_open_generic(struct inode *inode, struct file *filp);
 527bool tracing_is_disabled(void);
 528struct dentry *trace_create_file(const char *name,
 529				 umode_t mode,
 530				 struct dentry *parent,
 531				 void *data,
 532				 const struct file_operations *fops);
 533
 534struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
 535struct dentry *tracing_init_dentry(void);
 536
 537struct ring_buffer_event;
 538
 539struct ring_buffer_event *
 540trace_buffer_lock_reserve(struct ring_buffer *buffer,
 541			  int type,
 542			  unsigned long len,
 543			  unsigned long flags,
 544			  int pc);
 
 
 
 545
 546struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
 547						struct trace_array_cpu *data);
 548
 549struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 550					  int *ent_cpu, u64 *ent_ts);
 551
 552void __buffer_unlock_commit(struct ring_buffer *buffer,
 553			    struct ring_buffer_event *event);
 554
 555int trace_empty(struct trace_iterator *iter);
 556
 557void *trace_find_next_entry_inc(struct trace_iterator *iter);
 558
 559void trace_init_global_iter(struct trace_iterator *iter);
 560
 561void tracing_iter_reset(struct trace_iterator *iter, int cpu);
 562
 
 563void poll_wait_pipe(struct trace_iterator *iter);
 564
 
 
 
 
 
 565void tracing_sched_switch_trace(struct trace_array *tr,
 566				struct task_struct *prev,
 567				struct task_struct *next,
 568				unsigned long flags, int pc);
 569
 570void tracing_sched_wakeup_trace(struct trace_array *tr,
 571				struct task_struct *wakee,
 572				struct task_struct *cur,
 573				unsigned long flags, int pc);
 574void trace_function(struct trace_array *tr,
 575		    unsigned long ip,
 576		    unsigned long parent_ip,
 577		    unsigned long flags, int pc);
 578void trace_graph_function(struct trace_array *tr,
 579		    unsigned long ip,
 580		    unsigned long parent_ip,
 581		    unsigned long flags, int pc);
 582void trace_latency_header(struct seq_file *m);
 583void trace_default_header(struct seq_file *m);
 584void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
 585int trace_empty(struct trace_iterator *iter);
 586
 587void trace_graph_return(struct ftrace_graph_ret *trace);
 588int trace_graph_entry(struct ftrace_graph_ent *trace);
 589void set_graph_array(struct trace_array *tr);
 590
 591void tracing_start_cmdline_record(void);
 592void tracing_stop_cmdline_record(void);
 593void tracing_sched_switch_assign_trace(struct trace_array *tr);
 594void tracing_stop_sched_switch_record(void);
 595void tracing_start_sched_switch_record(void);
 596int register_tracer(struct tracer *type);
 
 597int is_tracing_stopped(void);
 598
 599loff_t tracing_lseek(struct file *file, loff_t offset, int whence);
 
 
 600
 601extern cpumask_var_t __read_mostly tracing_buffer_mask;
 602
 603#define for_each_tracing_cpu(cpu)	\
 604	for_each_cpu(cpu, tracing_buffer_mask)
 605
 606extern unsigned long nsecs_to_usecs(unsigned long nsecs);
 607
 608extern unsigned long tracing_thresh;
 609
 610#ifdef CONFIG_TRACER_MAX_TRACE
 611extern unsigned long tracing_max_latency;
 612
 613void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
 614void update_max_tr_single(struct trace_array *tr,
 615			  struct task_struct *tsk, int cpu);
 616#endif /* CONFIG_TRACER_MAX_TRACE */
 617
 618#ifdef CONFIG_STACKTRACE
 619void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
 620			int skip, int pc);
 621
 622void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
 623			     int skip, int pc, struct pt_regs *regs);
 624
 625void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
 626			    int pc);
 627
 628void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
 629		   int pc);
 630#else
 631static inline void ftrace_trace_stack(struct ring_buffer *buffer,
 632				      unsigned long flags, int skip, int pc)
 633{
 634}
 635
 636static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
 637					   unsigned long flags, int skip,
 638					   int pc, struct pt_regs *regs)
 639{
 640}
 641
 642static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
 643					  unsigned long flags, int pc)
 644{
 645}
 646
 647static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
 648				 int skip, int pc)
 649{
 650}
 651#endif /* CONFIG_STACKTRACE */
 652
 653extern cycle_t ftrace_now(int cpu);
 654
 655extern void trace_find_cmdline(int pid, char comm[]);
 656
 657#ifdef CONFIG_DYNAMIC_FTRACE
 658extern unsigned long ftrace_update_tot_cnt;
 659#endif
 660#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
 661extern int DYN_FTRACE_TEST_NAME(void);
 662#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
 663extern int DYN_FTRACE_TEST_NAME2(void);
 
 664
 665extern bool ring_buffer_expanded;
 666extern bool tracing_selftest_disabled;
 667DECLARE_PER_CPU(int, ftrace_cpu_disabled);
 668
 669#ifdef CONFIG_FTRACE_STARTUP_TEST
 670extern int trace_selftest_startup_function(struct tracer *trace,
 671					   struct trace_array *tr);
 672extern int trace_selftest_startup_function_graph(struct tracer *trace,
 673						 struct trace_array *tr);
 674extern int trace_selftest_startup_irqsoff(struct tracer *trace,
 675					  struct trace_array *tr);
 676extern int trace_selftest_startup_preemptoff(struct tracer *trace,
 677					     struct trace_array *tr);
 678extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
 679						 struct trace_array *tr);
 680extern int trace_selftest_startup_wakeup(struct tracer *trace,
 681					 struct trace_array *tr);
 682extern int trace_selftest_startup_nop(struct tracer *trace,
 683					 struct trace_array *tr);
 684extern int trace_selftest_startup_sched_switch(struct tracer *trace,
 685					       struct trace_array *tr);
 686extern int trace_selftest_startup_branch(struct tracer *trace,
 687					 struct trace_array *tr);
 688/*
 689 * Tracer data references selftest functions that only occur
 690 * on boot up. These can be __init functions. Thus, when selftests
 691 * are enabled, then the tracers need to reference __init functions.
 692 */
 693#define __tracer_data		__refdata
 694#else
 695/* Tracers are seldom changed. Optimize when selftests are disabled. */
 696#define __tracer_data		__read_mostly
 697#endif /* CONFIG_FTRACE_STARTUP_TEST */
 698
 699extern void *head_page(struct trace_array_cpu *data);
 700extern unsigned long long ns2usecs(cycle_t nsec);
 701extern int
 702trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
 703extern int
 704trace_vprintk(unsigned long ip, const char *fmt, va_list args);
 705extern int
 706trace_array_vprintk(struct trace_array *tr,
 707		    unsigned long ip, const char *fmt, va_list args);
 708int trace_array_printk(struct trace_array *tr,
 709		       unsigned long ip, const char *fmt, ...);
 710int trace_array_printk_buf(struct ring_buffer *buffer,
 711			   unsigned long ip, const char *fmt, ...);
 712void trace_printk_seq(struct trace_seq *s);
 713enum print_line_t print_trace_line(struct trace_iterator *iter);
 714
 715extern unsigned long trace_flags;
 716
 
 
 717/* Standard output formatting function used for function return traces */
 718#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 719
 720/* Flag options */
 721#define TRACE_GRAPH_PRINT_OVERRUN       0x1
 722#define TRACE_GRAPH_PRINT_CPU           0x2
 723#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
 724#define TRACE_GRAPH_PRINT_PROC          0x8
 725#define TRACE_GRAPH_PRINT_DURATION      0x10
 726#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
 727#define TRACE_GRAPH_PRINT_FILL_SHIFT	28
 728#define TRACE_GRAPH_PRINT_FILL_MASK	(0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
 729
 730extern enum print_line_t
 731print_graph_function_flags(struct trace_iterator *iter, u32 flags);
 732extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
 733extern enum print_line_t
 734trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
 735extern void graph_trace_open(struct trace_iterator *iter);
 736extern void graph_trace_close(struct trace_iterator *iter);
 737extern int __trace_graph_entry(struct trace_array *tr,
 738			       struct ftrace_graph_ent *trace,
 739			       unsigned long flags, int pc);
 740extern void __trace_graph_return(struct trace_array *tr,
 741				 struct ftrace_graph_ret *trace,
 742				 unsigned long flags, int pc);
 743
 744
 745#ifdef CONFIG_DYNAMIC_FTRACE
 746/* TODO: make this variable */
 747#define FTRACE_GRAPH_MAX_FUNCS		32
 
 748extern int ftrace_graph_count;
 749extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
 750extern int ftrace_graph_notrace_count;
 751extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
 752
 753static inline int ftrace_graph_addr(unsigned long addr)
 754{
 755	int i;
 756
 757	if (!ftrace_graph_count)
 758		return 1;
 759
 760	for (i = 0; i < ftrace_graph_count; i++) {
 761		if (addr == ftrace_graph_funcs[i]) {
 762			/*
 763			 * If no irqs are to be traced, but a set_graph_function
 764			 * is set, and called by an interrupt handler, we still
 765			 * want to trace it.
 766			 */
 767			if (in_irq())
 768				trace_recursion_set(TRACE_IRQ_BIT);
 769			else
 770				trace_recursion_clear(TRACE_IRQ_BIT);
 771			return 1;
 772		}
 773	}
 774
 775	return 0;
 776}
 777
 778static inline int ftrace_graph_notrace_addr(unsigned long addr)
 779{
 780	int i;
 781
 782	if (!ftrace_graph_notrace_count)
 783		return 0;
 784
 785	for (i = 0; i < ftrace_graph_notrace_count; i++) {
 786		if (addr == ftrace_graph_notrace_funcs[i])
 787			return 1;
 788	}
 789
 790	return 0;
 791}
 792#else
 793static inline int ftrace_graph_addr(unsigned long addr)
 794{
 795	return 1;
 796}
 797
 798static inline int ftrace_graph_notrace_addr(unsigned long addr)
 799{
 800	return 0;
 801}
 802#endif /* CONFIG_DYNAMIC_FTRACE */
 803#else /* CONFIG_FUNCTION_GRAPH_TRACER */
 804static inline enum print_line_t
 805print_graph_function_flags(struct trace_iterator *iter, u32 flags)
 806{
 807	return TRACE_TYPE_UNHANDLED;
 808}
 809#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 810
 811extern struct list_head ftrace_pids;
 812
 813#ifdef CONFIG_FUNCTION_TRACER
 814extern bool ftrace_filter_param __initdata;
 815static inline int ftrace_trace_task(struct task_struct *task)
 816{
 817	if (list_empty(&ftrace_pids))
 818		return 1;
 819
 820	return test_tsk_trace_trace(task);
 821}
 822extern int ftrace_is_dead(void);
 823int ftrace_create_function_files(struct trace_array *tr,
 824				 struct dentry *parent);
 825void ftrace_destroy_function_files(struct trace_array *tr);
 826#else
 827static inline int ftrace_trace_task(struct task_struct *task)
 828{
 829	return 1;
 830}
 831static inline int ftrace_is_dead(void) { return 0; }
 832static inline int
 833ftrace_create_function_files(struct trace_array *tr,
 834			     struct dentry *parent)
 835{
 836	return 0;
 837}
 838static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
 839#endif /* CONFIG_FUNCTION_TRACER */
 840
 841#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
 842void ftrace_create_filter_files(struct ftrace_ops *ops,
 843				struct dentry *parent);
 844void ftrace_destroy_filter_files(struct ftrace_ops *ops);
 845#else
 846/*
 847 * The ops parameter passed in is usually undefined.
 848 * This must be a macro.
 849 */
 850#define ftrace_create_filter_files(ops, parent) do { } while (0)
 851#define ftrace_destroy_filter_files(ops) do { } while (0)
 852#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */
 853
 854int ftrace_event_is_function(struct ftrace_event_call *call);
 855
 856/*
 857 * struct trace_parser - servers for reading the user input separated by spaces
 858 * @cont: set if the input is not complete - no final space char was found
 859 * @buffer: holds the parsed user input
 860 * @idx: user input length
 861 * @size: buffer size
 862 */
 863struct trace_parser {
 864	bool		cont;
 865	char		*buffer;
 866	unsigned	idx;
 867	unsigned	size;
 868};
 869
 870static inline bool trace_parser_loaded(struct trace_parser *parser)
 871{
 872	return (parser->idx != 0);
 873}
 874
 875static inline bool trace_parser_cont(struct trace_parser *parser)
 876{
 877	return parser->cont;
 878}
 879
 880static inline void trace_parser_clear(struct trace_parser *parser)
 881{
 882	parser->cont = false;
 883	parser->idx = 0;
 884}
 885
 886extern int trace_parser_get_init(struct trace_parser *parser, int size);
 887extern void trace_parser_put(struct trace_parser *parser);
 888extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 889	size_t cnt, loff_t *ppos);
 890
 891/*
 892 * trace_iterator_flags is an enumeration that defines bit
 893 * positions into trace_flags that controls the output.
 894 *
 895 * NOTE: These bits must match the trace_options array in
 896 *       trace.c.
 897 */
 898enum trace_iterator_flags {
 899	TRACE_ITER_PRINT_PARENT		= 0x01,
 900	TRACE_ITER_SYM_OFFSET		= 0x02,
 901	TRACE_ITER_SYM_ADDR		= 0x04,
 902	TRACE_ITER_VERBOSE		= 0x08,
 903	TRACE_ITER_RAW			= 0x10,
 904	TRACE_ITER_HEX			= 0x20,
 905	TRACE_ITER_BIN			= 0x40,
 906	TRACE_ITER_BLOCK		= 0x80,
 907	TRACE_ITER_STACKTRACE		= 0x100,
 908	TRACE_ITER_PRINTK		= 0x200,
 909	TRACE_ITER_PREEMPTONLY		= 0x400,
 910	TRACE_ITER_BRANCH		= 0x800,
 911	TRACE_ITER_ANNOTATE		= 0x1000,
 912	TRACE_ITER_USERSTACKTRACE       = 0x2000,
 913	TRACE_ITER_SYM_USEROBJ          = 0x4000,
 914	TRACE_ITER_PRINTK_MSGONLY	= 0x8000,
 915	TRACE_ITER_CONTEXT_INFO		= 0x10000, /* Print pid/cpu/time */
 916	TRACE_ITER_LATENCY_FMT		= 0x20000,
 917	TRACE_ITER_SLEEP_TIME		= 0x40000,
 918	TRACE_ITER_GRAPH_TIME		= 0x80000,
 919	TRACE_ITER_RECORD_CMD		= 0x100000,
 920	TRACE_ITER_OVERWRITE		= 0x200000,
 921	TRACE_ITER_STOP_ON_FREE		= 0x400000,
 922	TRACE_ITER_IRQ_INFO		= 0x800000,
 923	TRACE_ITER_MARKERS		= 0x1000000,
 924	TRACE_ITER_FUNCTION		= 0x2000000,
 925};
 926
 927/*
 928 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
 929 * control the output of kernel symbols.
 930 */
 931#define TRACE_ITER_SYM_MASK \
 932	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
 933
 934extern struct tracer nop_trace;
 935
 936#ifdef CONFIG_BRANCH_TRACER
 937extern int enable_branch_tracing(struct trace_array *tr);
 938extern void disable_branch_tracing(void);
 939static inline int trace_branch_enable(struct trace_array *tr)
 940{
 941	if (trace_flags & TRACE_ITER_BRANCH)
 942		return enable_branch_tracing(tr);
 943	return 0;
 944}
 945static inline void trace_branch_disable(void)
 946{
 947	/* due to races, always disable */
 948	disable_branch_tracing();
 949}
 950#else
 951static inline int trace_branch_enable(struct trace_array *tr)
 952{
 953	return 0;
 954}
 955static inline void trace_branch_disable(void)
 956{
 957}
 958#endif /* CONFIG_BRANCH_TRACER */
 959
 960/* set ring buffers to default size if not already done so */
 961int tracing_update_buffers(void);
 962
 
 
 
 
 
 
 963struct ftrace_event_field {
 964	struct list_head	link;
 965	const char		*name;
 966	const char		*type;
 967	int			filter_type;
 968	int			offset;
 969	int			size;
 970	int			is_signed;
 971};
 972
 973struct event_filter {
 974	int			n_preds;	/* Number assigned */
 975	int			a_preds;	/* allocated */
 976	struct filter_pred	*preds;
 977	struct filter_pred	*root;
 978	char			*filter_string;
 979};
 980
 981struct event_subsystem {
 982	struct list_head	list;
 983	const char		*name;
 
 984	struct event_filter	*filter;
 
 985	int			ref_count;
 986};
 987
 988struct ftrace_subsystem_dir {
 989	struct list_head		list;
 990	struct event_subsystem		*subsystem;
 991	struct trace_array		*tr;
 992	struct dentry			*entry;
 993	int				ref_count;
 994	int				nr_events;
 995};
 996
 997#define FILTER_PRED_INVALID	((unsigned short)-1)
 998#define FILTER_PRED_IS_RIGHT	(1 << 15)
 999#define FILTER_PRED_FOLD	(1 << 15)
1000
1001/*
1002 * The max preds is the size of unsigned short with
1003 * two flags at the MSBs. One bit is used for both the IS_RIGHT
1004 * and FOLD flags. The other is reserved.
1005 *
1006 * 2^14 preds is way more than enough.
1007 */
1008#define MAX_FILTER_PRED		16384
1009
1010struct filter_pred;
1011struct regex;
1012
1013typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
1014
1015typedef int (*regex_match_func)(char *str, struct regex *r, int len);
1016
1017enum regex_type {
1018	MATCH_FULL = 0,
1019	MATCH_FRONT_ONLY,
1020	MATCH_MIDDLE_ONLY,
1021	MATCH_END_ONLY,
1022};
1023
1024struct regex {
1025	char			pattern[MAX_FILTER_STR_VAL];
1026	int			len;
1027	int			field_len;
1028	regex_match_func	match;
1029};
1030
1031struct filter_pred {
1032	filter_pred_fn_t 	fn;
1033	u64 			val;
1034	struct regex		regex;
1035	unsigned short		*ops;
1036	struct ftrace_event_field *field;
 
 
 
 
 
 
 
 
1037	int 			offset;
1038	int 			not;
1039	int 			op;
1040	unsigned short		index;
1041	unsigned short		parent;
1042	unsigned short		left;
1043	unsigned short		right;
1044};
1045
 
 
1046extern enum regex_type
1047filter_parse_regex(char *buff, int len, char **search, int *not);
1048extern void print_event_filter(struct ftrace_event_file *file,
1049			       struct trace_seq *s);
1050extern int apply_event_filter(struct ftrace_event_file *file,
1051			      char *filter_string);
1052extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
1053					char *filter_string);
1054extern void print_subsystem_event_filter(struct event_subsystem *system,
1055					 struct trace_seq *s);
1056extern int filter_assign_type(const char *type);
1057extern int create_event_filter(struct ftrace_event_call *call,
1058			       char *filter_str, bool set_str,
1059			       struct event_filter **filterp);
1060extern void free_event_filter(struct event_filter *filter);
1061
1062struct ftrace_event_field *
1063trace_find_event_field(struct ftrace_event_call *call, char *name);
1064
1065extern void trace_event_enable_cmd_record(bool enable);
1066extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
1067extern int event_trace_del_tracer(struct trace_array *tr);
1068
1069extern struct ftrace_event_file *find_event_file(struct trace_array *tr,
1070						 const char *system,
1071						 const char *event);
 
 
 
1072
1073static inline void *event_file_data(struct file *filp)
1074{
1075	return ACCESS_ONCE(file_inode(filp)->i_private);
1076}
1077
 
 
1078extern struct mutex event_mutex;
1079extern struct list_head ftrace_events;
1080
1081extern const struct file_operations event_trigger_fops;
1082
1083extern int register_trigger_cmds(void);
1084extern void clear_event_triggers(struct trace_array *tr);
1085
1086struct event_trigger_data {
1087	unsigned long			count;
1088	int				ref;
1089	struct event_trigger_ops	*ops;
1090	struct event_command		*cmd_ops;
1091	struct event_filter __rcu	*filter;
1092	char				*filter_str;
1093	void				*private_data;
1094	struct list_head		list;
1095};
1096
1097/**
1098 * struct event_trigger_ops - callbacks for trace event triggers
1099 *
1100 * The methods in this structure provide per-event trigger hooks for
1101 * various trigger operations.
1102 *
1103 * All the methods below, except for @init() and @free(), must be
1104 * implemented.
1105 *
1106 * @func: The trigger 'probe' function called when the triggering
1107 *	event occurs.  The data passed into this callback is the data
1108 *	that was supplied to the event_command @reg() function that
1109 *	registered the trigger (see struct event_command).
1110 *
1111 * @init: An optional initialization function called for the trigger
1112 *	when the trigger is registered (via the event_command reg()
1113 *	function).  This can be used to perform per-trigger
1114 *	initialization such as incrementing a per-trigger reference
1115 *	count, for instance.  This is usually implemented by the
1116 *	generic utility function @event_trigger_init() (see
1117 *	trace_event_triggers.c).
1118 *
1119 * @free: An optional de-initialization function called for the
1120 *	trigger when the trigger is unregistered (via the
1121 *	event_command @reg() function).  This can be used to perform
1122 *	per-trigger de-initialization such as decrementing a
1123 *	per-trigger reference count and freeing corresponding trigger
1124 *	data, for instance.  This is usually implemented by the
1125 *	generic utility function @event_trigger_free() (see
1126 *	trace_event_triggers.c).
1127 *
1128 * @print: The callback function invoked to have the trigger print
1129 *	itself.  This is usually implemented by a wrapper function
1130 *	that calls the generic utility function @event_trigger_print()
1131 *	(see trace_event_triggers.c).
1132 */
1133struct event_trigger_ops {
1134	void			(*func)(struct event_trigger_data *data);
1135	int			(*init)(struct event_trigger_ops *ops,
1136					struct event_trigger_data *data);
1137	void			(*free)(struct event_trigger_ops *ops,
1138					struct event_trigger_data *data);
1139	int			(*print)(struct seq_file *m,
1140					 struct event_trigger_ops *ops,
1141					 struct event_trigger_data *data);
1142};
1143
1144/**
1145 * struct event_command - callbacks and data members for event commands
1146 *
1147 * Event commands are invoked by users by writing the command name
1148 * into the 'trigger' file associated with a trace event.  The
1149 * parameters associated with a specific invocation of an event
1150 * command are used to create an event trigger instance, which is
1151 * added to the list of trigger instances associated with that trace
1152 * event.  When the event is hit, the set of triggers associated with
1153 * that event is invoked.
1154 *
1155 * The data members in this structure provide per-event command data
1156 * for various event commands.
1157 *
1158 * All the data members below, except for @post_trigger, must be set
1159 * for each event command.
1160 *
1161 * @name: The unique name that identifies the event command.  This is
1162 *	the name used when setting triggers via trigger files.
1163 *
1164 * @trigger_type: A unique id that identifies the event command
1165 *	'type'.  This value has two purposes, the first to ensure that
1166 *	only one trigger of the same type can be set at a given time
1167 *	for a particular event e.g. it doesn't make sense to have both
1168 *	a traceon and traceoff trigger attached to a single event at
1169 *	the same time, so traceon and traceoff have the same type
1170 *	though they have different names.  The @trigger_type value is
1171 *	also used as a bit value for deferring the actual trigger
1172 *	action until after the current event is finished.  Some
1173 *	commands need to do this if they themselves log to the trace
1174 *	buffer (see the @post_trigger() member below).  @trigger_type
1175 *	values are defined by adding new values to the trigger_type
1176 *	enum in include/linux/ftrace_event.h.
1177 *
1178 * @post_trigger: A flag that says whether or not this command needs
1179 *	to have its action delayed until after the current event has
1180 *	been closed.  Some triggers need to avoid being invoked while
1181 *	an event is currently in the process of being logged, since
1182 *	the trigger may itself log data into the trace buffer.  Thus
1183 *	we make sure the current event is committed before invoking
1184 *	those triggers.  To do that, the trigger invocation is split
1185 *	in two - the first part checks the filter using the current
1186 *	trace record; if a command has the @post_trigger flag set, it
1187 *	sets a bit for itself in the return value, otherwise it
1188 *	directly invokes the trigger.  Once all commands have been
1189 *	either invoked or set their return flag, the current record is
1190 *	either committed or discarded.  At that point, if any commands
1191 *	have deferred their triggers, those commands are finally
1192 *	invoked following the close of the current event.  In other
1193 *	words, if the event_trigger_ops @func() probe implementation
1194 *	itself logs to the trace buffer, this flag should be set,
1195 *	otherwise it can be left unspecified.
1196 *
1197 * All the methods below, except for @set_filter(), must be
1198 * implemented.
1199 *
1200 * @func: The callback function responsible for parsing and
1201 *	registering the trigger written to the 'trigger' file by the
1202 *	user.  It allocates the trigger instance and registers it with
1203 *	the appropriate trace event.  It makes use of the other
1204 *	event_command callback functions to orchestrate this, and is
1205 *	usually implemented by the generic utility function
1206 *	@event_trigger_callback() (see trace_event_triggers.c).
1207 *
1208 * @reg: Adds the trigger to the list of triggers associated with the
1209 *	event, and enables the event trigger itself, after
1210 *	initializing it (via the event_trigger_ops @init() function).
1211 *	This is also where commands can use the @trigger_type value to
1212 *	make the decision as to whether or not multiple instances of
1213 *	the trigger should be allowed.  This is usually implemented by
1214 *	the generic utility function @register_trigger() (see
1215 *	trace_event_triggers.c).
1216 *
1217 * @unreg: Removes the trigger from the list of triggers associated
1218 *	with the event, and disables the event trigger itself, after
1219 *	initializing it (via the event_trigger_ops @free() function).
1220 *	This is usually implemented by the generic utility function
1221 *	@unregister_trigger() (see trace_event_triggers.c).
1222 *
1223 * @set_filter: An optional function called to parse and set a filter
1224 *	for the trigger.  If no @set_filter() method is set for the
1225 *	event command, filters set by the user for the command will be
1226 *	ignored.  This is usually implemented by the generic utility
1227 *	function @set_trigger_filter() (see trace_event_triggers.c).
1228 *
1229 * @get_trigger_ops: The callback function invoked to retrieve the
1230 *	event_trigger_ops implementation associated with the command.
1231 */
1232struct event_command {
1233	struct list_head	list;
1234	char			*name;
1235	enum event_trigger_type	trigger_type;
1236	bool			post_trigger;
1237	int			(*func)(struct event_command *cmd_ops,
1238					struct ftrace_event_file *file,
1239					char *glob, char *cmd, char *params);
1240	int			(*reg)(char *glob,
1241				       struct event_trigger_ops *ops,
1242				       struct event_trigger_data *data,
1243				       struct ftrace_event_file *file);
1244	void			(*unreg)(char *glob,
1245					 struct event_trigger_ops *ops,
1246					 struct event_trigger_data *data,
1247					 struct ftrace_event_file *file);
1248	int			(*set_filter)(char *filter_str,
1249					      struct event_trigger_data *data,
1250					      struct ftrace_event_file *file);
1251	struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param);
1252};
1253
1254extern int trace_event_enable_disable(struct ftrace_event_file *file,
1255				      int enable, int soft_disable);
1256extern int tracing_alloc_snapshot(void);
1257
1258extern const char *__start___trace_bprintk_fmt[];
1259extern const char *__stop___trace_bprintk_fmt[];
1260
1261extern const char *__start___tracepoint_str[];
1262extern const char *__stop___tracepoint_str[];
1263
1264void trace_printk_init_buffers(void);
1265void trace_printk_start_comm(void);
1266int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
1267int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
1268
1269/*
1270 * Normal trace_printk() and friends allocates special buffers
1271 * to do the manipulation, as well as saves the print formats
1272 * into sections to display. But the trace infrastructure wants
1273 * to use these without the added overhead at the price of being
1274 * a bit slower (used mainly for warnings, where we don't care
1275 * about performance). The internal_trace_puts() is for such
1276 * a purpose.
1277 */
1278#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
1279
1280#undef FTRACE_ENTRY
1281#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
1282	extern struct ftrace_event_call					\
1283	__aligned(4) event_##call;
1284#undef FTRACE_ENTRY_DUP
1285#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
1286	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
1287		     filter)
1288#include "trace_entries.h"
1289
1290#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
1291int perf_ftrace_event_register(struct ftrace_event_call *call,
1292			       enum trace_reg type, void *data);
1293#else
1294#define perf_ftrace_event_register NULL
1295#endif
1296
1297#endif /* _LINUX_KERNEL_TRACE_H */
v3.1
 
  1#ifndef _LINUX_KERNEL_TRACE_H
  2#define _LINUX_KERNEL_TRACE_H
  3
  4#include <linux/fs.h>
  5#include <linux/atomic.h>
  6#include <linux/sched.h>
  7#include <linux/clocksource.h>
  8#include <linux/ring_buffer.h>
  9#include <linux/mmiotrace.h>
 10#include <linux/tracepoint.h>
 11#include <linux/ftrace.h>
 12#include <linux/hw_breakpoint.h>
 13#include <linux/trace_seq.h>
 14#include <linux/ftrace_event.h>
 
 
 
 
 
 
 15
 16enum trace_type {
 17	__TRACE_FIRST_TYPE = 0,
 18
 19	TRACE_FN,
 20	TRACE_CTX,
 21	TRACE_WAKE,
 22	TRACE_STACK,
 23	TRACE_PRINT,
 24	TRACE_BPRINT,
 25	TRACE_MMIO_RW,
 26	TRACE_MMIO_MAP,
 27	TRACE_BRANCH,
 28	TRACE_GRAPH_RET,
 29	TRACE_GRAPH_ENT,
 30	TRACE_USER_STACK,
 31	TRACE_BLK,
 
 32
 33	__TRACE_LAST_TYPE,
 34};
 35
 36
 37#undef __field
 38#define __field(type, item)		type	item;
 39
 40#undef __field_struct
 41#define __field_struct(type, item)	__field(type, item)
 42
 43#undef __field_desc
 44#define __field_desc(type, container, item)
 45
 46#undef __array
 47#define __array(type, item, size)	type	item[size];
 48
 49#undef __array_desc
 50#define __array_desc(type, container, item, size)
 51
 52#undef __dynamic_array
 53#define __dynamic_array(type, item)	type	item[];
 54
 55#undef F_STRUCT
 56#define F_STRUCT(args...)		args
 57
 58#undef FTRACE_ENTRY
 59#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)	\
 60	struct struct_name {					\
 61		struct trace_entry	ent;			\
 62		tstruct						\
 63	}
 64
 65#undef TP_ARGS
 66#define TP_ARGS(args...)	args
 67
 68#undef FTRACE_ENTRY_DUP
 69#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
 
 
 
 
 
 
 70
 71#include "trace_entries.h"
 72
 73/*
 74 * syscalls are special, and need special handling, this is why
 75 * they are not included in trace_entries.h
 76 */
 77struct syscall_trace_enter {
 78	struct trace_entry	ent;
 79	int			nr;
 80	unsigned long		args[];
 81};
 82
 83struct syscall_trace_exit {
 84	struct trace_entry	ent;
 85	int			nr;
 86	long			ret;
 87};
 88
 89struct kprobe_trace_entry_head {
 90	struct trace_entry	ent;
 91	unsigned long		ip;
 92};
 93
 94struct kretprobe_trace_entry_head {
 95	struct trace_entry	ent;
 96	unsigned long		func;
 97	unsigned long		ret_ip;
 98};
 99
100/*
101 * trace_flag_type is an enumeration that holds different
102 * states when a trace occurs. These are:
103 *  IRQS_OFF		- interrupts were disabled
104 *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
105 *  NEED_RESCHED	- reschedule is requested
106 *  HARDIRQ		- inside an interrupt handler
107 *  SOFTIRQ		- inside a softirq handler
108 */
109enum trace_flag_type {
110	TRACE_FLAG_IRQS_OFF		= 0x01,
111	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
112	TRACE_FLAG_NEED_RESCHED		= 0x04,
113	TRACE_FLAG_HARDIRQ		= 0x08,
114	TRACE_FLAG_SOFTIRQ		= 0x10,
 
115};
116
117#define TRACE_BUF_SIZE		1024
118
 
 
119/*
120 * The CPU trace array - it consists of thousands of trace entries
121 * plus some other descriptor data: (for example which task started
122 * the trace, etc.)
123 */
124struct trace_array_cpu {
125	atomic_t		disabled;
126	void			*buffer_page;	/* ring buffer spare */
127
 
128	unsigned long		saved_latency;
129	unsigned long		critical_start;
130	unsigned long		critical_end;
131	unsigned long		critical_sequence;
132	unsigned long		nice;
133	unsigned long		policy;
134	unsigned long		rt_priority;
135	unsigned long		skipped_entries;
136	cycle_t			preempt_timestamp;
137	pid_t			pid;
138	uid_t			uid;
139	char			comm[TASK_COMM_LEN];
140};
141
 
 
 
 
 
 
 
 
 
 
142/*
143 * The trace array - an array of per-CPU trace arrays. This is the
144 * highest level data structure that individual tracers deal with.
145 * They have on/off state as well:
146 */
147struct trace_array {
148	struct ring_buffer	*buffer;
149	unsigned long		entries;
150	int			cpu;
151	cycle_t			time_start;
152	struct task_struct	*waiter;
153	struct trace_array_cpu	*data[NR_CPUS];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154};
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156#define FTRACE_CMP_TYPE(var, type) \
157	__builtin_types_compatible_p(typeof(var), type *)
158
159#undef IF_ASSIGN
160#define IF_ASSIGN(var, entry, etype, id)		\
161	if (FTRACE_CMP_TYPE(var, etype)) {		\
162		var = (typeof(var))(entry);		\
163		WARN_ON(id && (entry)->type != id);	\
164		break;					\
165	}
166
167/* Will cause compile errors if type is not found. */
168extern void __ftrace_bad_type(void);
169
170/*
171 * The trace_assign_type is a verifier that the entry type is
172 * the same as the type being assigned. To add new types simply
173 * add a line with the following format:
174 *
175 * IF_ASSIGN(var, ent, type, id);
176 *
177 *  Where "type" is the trace type that includes the trace_entry
178 *  as the "ent" item. And "id" is the trace identifier that is
179 *  used in the trace_type enum.
180 *
181 *  If the type can have more than one id, then use zero.
182 */
183#define trace_assign_type(var, ent)					\
184	do {								\
185		IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);	\
186		IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);	\
187		IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);	\
188		IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
189		IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);	\
190		IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT);	\
 
191		IF_ASSIGN(var, ent, struct trace_mmiotrace_rw,		\
192			  TRACE_MMIO_RW);				\
193		IF_ASSIGN(var, ent, struct trace_mmiotrace_map,		\
194			  TRACE_MMIO_MAP);				\
195		IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
196		IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,	\
197			  TRACE_GRAPH_ENT);		\
198		IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,	\
199			  TRACE_GRAPH_RET);		\
200		__ftrace_bad_type();					\
201	} while (0)
202
203/*
204 * An option specific to a tracer. This is a boolean value.
205 * The bit is the bit index that sets its value on the
206 * flags value in struct tracer_flags.
207 */
208struct tracer_opt {
209	const char	*name; /* Will appear on the trace_options file */
210	u32		bit; /* Mask assigned in val field in tracer_flags */
211};
212
213/*
214 * The set of specific options for a tracer. Your tracer
215 * have to set the initial value of the flags val.
216 */
217struct tracer_flags {
218	u32			val;
219	struct tracer_opt	*opts;
220};
221
222/* Makes more easy to define a tracer opt */
223#define TRACER_OPT(s, b)	.name = #s, .bit = b
224
225
226/**
227 * struct tracer - a specific tracer and its callbacks to interact with debugfs
228 * @name: the name chosen to select it on the available_tracers file
229 * @init: called when one switches to this tracer (echo name > current_tracer)
230 * @reset: called when one switches to another tracer
231 * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
232 * @stop: called when tracing is paused (echo 0 > tracing_enabled)
233 * @open: called when the trace file is opened
234 * @pipe_open: called when the trace_pipe file is opened
235 * @wait_pipe: override how the user waits for traces on trace_pipe
236 * @close: called when the trace file is released
237 * @pipe_close: called when the trace_pipe file is released
238 * @read: override the default read callback on trace_pipe
239 * @splice_read: override the default splice_read callback on trace_pipe
240 * @selftest: selftest to run on boot (see trace_selftest.c)
241 * @print_headers: override the first lines that describe your columns
242 * @print_line: callback that prints a trace
243 * @set_flag: signals one of your private flags changed (trace_options file)
244 * @flags: your private flags
245 */
246struct tracer {
247	const char		*name;
248	int			(*init)(struct trace_array *tr);
249	void			(*reset)(struct trace_array *tr);
250	void			(*start)(struct trace_array *tr);
251	void			(*stop)(struct trace_array *tr);
252	void			(*open)(struct trace_iterator *iter);
253	void			(*pipe_open)(struct trace_iterator *iter);
254	void			(*wait_pipe)(struct trace_iterator *iter);
255	void			(*close)(struct trace_iterator *iter);
256	void			(*pipe_close)(struct trace_iterator *iter);
257	ssize_t			(*read)(struct trace_iterator *iter,
258					struct file *filp, char __user *ubuf,
259					size_t cnt, loff_t *ppos);
260	ssize_t			(*splice_read)(struct trace_iterator *iter,
261					       struct file *filp,
262					       loff_t *ppos,
263					       struct pipe_inode_info *pipe,
264					       size_t len,
265					       unsigned int flags);
266#ifdef CONFIG_FTRACE_STARTUP_TEST
267	int			(*selftest)(struct tracer *trace,
268					    struct trace_array *tr);
269#endif
270	void			(*print_header)(struct seq_file *m);
271	enum print_line_t	(*print_line)(struct trace_iterator *iter);
272	/* If you handled the flag setting, return 0 */
273	int			(*set_flag)(u32 old_flags, u32 bit, int set);
 
 
 
 
274	struct tracer		*next;
275	struct tracer_flags	*flags;
276	int			print_max;
277	int			use_max_tr;
 
 
 
 
278};
279
280
281/* Only current can touch trace_recursion */
282#define trace_recursion_inc() do { (current)->trace_recursion++; } while (0)
283#define trace_recursion_dec() do { (current)->trace_recursion--; } while (0)
284
285/* Ring buffer has the 10 LSB bits to count */
286#define trace_recursion_buffer() ((current)->trace_recursion & 0x3ff)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
288/* for function tracing recursion */
289#define TRACE_INTERNAL_BIT		(1<<11)
290#define TRACE_GLOBAL_BIT		(1<<12)
291/*
292 * Abuse of the trace_recursion.
293 * As we need a way to maintain state if we are tracing the function
294 * graph in irq because we want to trace a particular function that
295 * was called in irq context but we have irq tracing off. Since this
296 * can only be modified by current, we can reuse trace_recursion.
297 */
298#define TRACE_IRQ_BIT			(1<<13)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
300#define trace_recursion_set(bit)	do { (current)->trace_recursion |= (bit); } while (0)
301#define trace_recursion_clear(bit)	do { (current)->trace_recursion &= ~(bit); } while (0)
302#define trace_recursion_test(bit)	((current)->trace_recursion & (bit))
303
304#define TRACE_PIPE_ALL_CPU	-1
 
 
 
 
 
 
 
 
 
 
 
 
 
305
306int tracer_init(struct tracer *t, struct trace_array *tr);
307int tracing_is_enabled(void);
308void trace_wake_up(void);
309void tracing_reset(struct trace_array *tr, int cpu);
310void tracing_reset_online_cpus(struct trace_array *tr);
311void tracing_reset_current(int cpu);
312void tracing_reset_current_online_cpus(void);
313int tracing_open_generic(struct inode *inode, struct file *filp);
 
314struct dentry *trace_create_file(const char *name,
315				 mode_t mode,
316				 struct dentry *parent,
317				 void *data,
318				 const struct file_operations *fops);
319
 
320struct dentry *tracing_init_dentry(void);
321
322struct ring_buffer_event;
323
324struct ring_buffer_event *
325trace_buffer_lock_reserve(struct ring_buffer *buffer,
326			  int type,
327			  unsigned long len,
328			  unsigned long flags,
329			  int pc);
330void trace_buffer_unlock_commit(struct ring_buffer *buffer,
331				struct ring_buffer_event *event,
332				unsigned long flags, int pc);
333
334struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
335						struct trace_array_cpu *data);
336
337struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
338					  int *ent_cpu, u64 *ent_ts);
339
 
 
 
340int trace_empty(struct trace_iterator *iter);
341
342void *trace_find_next_entry_inc(struct trace_iterator *iter);
343
344void trace_init_global_iter(struct trace_iterator *iter);
345
346void tracing_iter_reset(struct trace_iterator *iter, int cpu);
347
348void default_wait_pipe(struct trace_iterator *iter);
349void poll_wait_pipe(struct trace_iterator *iter);
350
351void ftrace(struct trace_array *tr,
352			    struct trace_array_cpu *data,
353			    unsigned long ip,
354			    unsigned long parent_ip,
355			    unsigned long flags, int pc);
356void tracing_sched_switch_trace(struct trace_array *tr,
357				struct task_struct *prev,
358				struct task_struct *next,
359				unsigned long flags, int pc);
360
361void tracing_sched_wakeup_trace(struct trace_array *tr,
362				struct task_struct *wakee,
363				struct task_struct *cur,
364				unsigned long flags, int pc);
365void trace_function(struct trace_array *tr,
366		    unsigned long ip,
367		    unsigned long parent_ip,
368		    unsigned long flags, int pc);
369void trace_graph_function(struct trace_array *tr,
370		    unsigned long ip,
371		    unsigned long parent_ip,
372		    unsigned long flags, int pc);
 
373void trace_default_header(struct seq_file *m);
374void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
375int trace_empty(struct trace_iterator *iter);
376
377void trace_graph_return(struct ftrace_graph_ret *trace);
378int trace_graph_entry(struct ftrace_graph_ent *trace);
379void set_graph_array(struct trace_array *tr);
380
381void tracing_start_cmdline_record(void);
382void tracing_stop_cmdline_record(void);
383void tracing_sched_switch_assign_trace(struct trace_array *tr);
384void tracing_stop_sched_switch_record(void);
385void tracing_start_sched_switch_record(void);
386int register_tracer(struct tracer *type);
387void unregister_tracer(struct tracer *type);
388int is_tracing_stopped(void);
389enum trace_file_type {
390	TRACE_FILE_LAT_FMT	= 1,
391	TRACE_FILE_ANNOTATE	= 2,
392};
393
394extern cpumask_var_t __read_mostly tracing_buffer_mask;
395
396#define for_each_tracing_cpu(cpu)	\
397	for_each_cpu(cpu, tracing_buffer_mask)
398
399extern unsigned long nsecs_to_usecs(unsigned long nsecs);
400
401extern unsigned long tracing_thresh;
402
403#ifdef CONFIG_TRACER_MAX_TRACE
404extern unsigned long tracing_max_latency;
405
406void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
407void update_max_tr_single(struct trace_array *tr,
408			  struct task_struct *tsk, int cpu);
409#endif /* CONFIG_TRACER_MAX_TRACE */
410
411#ifdef CONFIG_STACKTRACE
412void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
413			int skip, int pc);
414
415void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
416			     int skip, int pc, struct pt_regs *regs);
417
418void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
419			    int pc);
420
421void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
422		   int pc);
423#else
424static inline void ftrace_trace_stack(struct ring_buffer *buffer,
425				      unsigned long flags, int skip, int pc)
426{
427}
428
429static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
430					   unsigned long flags, int skip,
431					   int pc, struct pt_regs *regs)
432{
433}
434
435static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
436					  unsigned long flags, int pc)
437{
438}
439
440static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
441				 int skip, int pc)
442{
443}
444#endif /* CONFIG_STACKTRACE */
445
446extern cycle_t ftrace_now(int cpu);
447
448extern void trace_find_cmdline(int pid, char comm[]);
449
450#ifdef CONFIG_DYNAMIC_FTRACE
451extern unsigned long ftrace_update_tot_cnt;
 
452#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
453extern int DYN_FTRACE_TEST_NAME(void);
454#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
455extern int DYN_FTRACE_TEST_NAME2(void);
456#endif
457
458extern int ring_buffer_expanded;
459extern bool tracing_selftest_disabled;
460DECLARE_PER_CPU(int, ftrace_cpu_disabled);
461
462#ifdef CONFIG_FTRACE_STARTUP_TEST
463extern int trace_selftest_startup_function(struct tracer *trace,
464					   struct trace_array *tr);
465extern int trace_selftest_startup_function_graph(struct tracer *trace,
466						 struct trace_array *tr);
467extern int trace_selftest_startup_irqsoff(struct tracer *trace,
468					  struct trace_array *tr);
469extern int trace_selftest_startup_preemptoff(struct tracer *trace,
470					     struct trace_array *tr);
471extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
472						 struct trace_array *tr);
473extern int trace_selftest_startup_wakeup(struct tracer *trace,
474					 struct trace_array *tr);
475extern int trace_selftest_startup_nop(struct tracer *trace,
476					 struct trace_array *tr);
477extern int trace_selftest_startup_sched_switch(struct tracer *trace,
478					       struct trace_array *tr);
479extern int trace_selftest_startup_branch(struct tracer *trace,
480					 struct trace_array *tr);
 
 
 
 
 
 
 
 
 
481#endif /* CONFIG_FTRACE_STARTUP_TEST */
482
483extern void *head_page(struct trace_array_cpu *data);
484extern unsigned long long ns2usecs(cycle_t nsec);
485extern int
486trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
487extern int
488trace_vprintk(unsigned long ip, const char *fmt, va_list args);
489extern int
490trace_array_vprintk(struct trace_array *tr,
491		    unsigned long ip, const char *fmt, va_list args);
492int trace_array_printk(struct trace_array *tr,
493		       unsigned long ip, const char *fmt, ...);
 
 
494void trace_printk_seq(struct trace_seq *s);
495enum print_line_t print_trace_line(struct trace_iterator *iter);
496
497extern unsigned long trace_flags;
498
499extern int trace_clock_id;
500
501/* Standard output formatting function used for function return traces */
502#ifdef CONFIG_FUNCTION_GRAPH_TRACER
503
504/* Flag options */
505#define TRACE_GRAPH_PRINT_OVERRUN       0x1
506#define TRACE_GRAPH_PRINT_CPU           0x2
507#define TRACE_GRAPH_PRINT_OVERHEAD      0x4
508#define TRACE_GRAPH_PRINT_PROC          0x8
509#define TRACE_GRAPH_PRINT_DURATION      0x10
510#define TRACE_GRAPH_PRINT_ABS_TIME      0x20
 
 
511
512extern enum print_line_t
513print_graph_function_flags(struct trace_iterator *iter, u32 flags);
514extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
515extern enum print_line_t
516trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
517extern void graph_trace_open(struct trace_iterator *iter);
518extern void graph_trace_close(struct trace_iterator *iter);
519extern int __trace_graph_entry(struct trace_array *tr,
520			       struct ftrace_graph_ent *trace,
521			       unsigned long flags, int pc);
522extern void __trace_graph_return(struct trace_array *tr,
523				 struct ftrace_graph_ret *trace,
524				 unsigned long flags, int pc);
525
526
527#ifdef CONFIG_DYNAMIC_FTRACE
528/* TODO: make this variable */
529#define FTRACE_GRAPH_MAX_FUNCS		32
530extern int ftrace_graph_filter_enabled;
531extern int ftrace_graph_count;
532extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
 
 
533
534static inline int ftrace_graph_addr(unsigned long addr)
535{
536	int i;
537
538	if (!ftrace_graph_filter_enabled)
539		return 1;
540
541	for (i = 0; i < ftrace_graph_count; i++) {
542		if (addr == ftrace_graph_funcs[i]) {
543			/*
544			 * If no irqs are to be traced, but a set_graph_function
545			 * is set, and called by an interrupt handler, we still
546			 * want to trace it.
547			 */
548			if (in_irq())
549				trace_recursion_set(TRACE_IRQ_BIT);
550			else
551				trace_recursion_clear(TRACE_IRQ_BIT);
552			return 1;
553		}
554	}
555
556	return 0;
557}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
558#else
559static inline int ftrace_graph_addr(unsigned long addr)
560{
561	return 1;
562}
 
 
 
 
 
563#endif /* CONFIG_DYNAMIC_FTRACE */
564#else /* CONFIG_FUNCTION_GRAPH_TRACER */
565static inline enum print_line_t
566print_graph_function_flags(struct trace_iterator *iter, u32 flags)
567{
568	return TRACE_TYPE_UNHANDLED;
569}
570#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
571
572extern struct list_head ftrace_pids;
573
574#ifdef CONFIG_FUNCTION_TRACER
 
575static inline int ftrace_trace_task(struct task_struct *task)
576{
577	if (list_empty(&ftrace_pids))
578		return 1;
579
580	return test_tsk_trace_trace(task);
581}
 
 
 
 
582#else
583static inline int ftrace_trace_task(struct task_struct *task)
584{
585	return 1;
586}
587#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588
589/*
590 * struct trace_parser - servers for reading the user input separated by spaces
591 * @cont: set if the input is not complete - no final space char was found
592 * @buffer: holds the parsed user input
593 * @idx: user input length
594 * @size: buffer size
595 */
596struct trace_parser {
597	bool		cont;
598	char		*buffer;
599	unsigned	idx;
600	unsigned	size;
601};
602
603static inline bool trace_parser_loaded(struct trace_parser *parser)
604{
605	return (parser->idx != 0);
606}
607
608static inline bool trace_parser_cont(struct trace_parser *parser)
609{
610	return parser->cont;
611}
612
613static inline void trace_parser_clear(struct trace_parser *parser)
614{
615	parser->cont = false;
616	parser->idx = 0;
617}
618
619extern int trace_parser_get_init(struct trace_parser *parser, int size);
620extern void trace_parser_put(struct trace_parser *parser);
621extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
622	size_t cnt, loff_t *ppos);
623
624/*
625 * trace_iterator_flags is an enumeration that defines bit
626 * positions into trace_flags that controls the output.
627 *
628 * NOTE: These bits must match the trace_options array in
629 *       trace.c.
630 */
631enum trace_iterator_flags {
632	TRACE_ITER_PRINT_PARENT		= 0x01,
633	TRACE_ITER_SYM_OFFSET		= 0x02,
634	TRACE_ITER_SYM_ADDR		= 0x04,
635	TRACE_ITER_VERBOSE		= 0x08,
636	TRACE_ITER_RAW			= 0x10,
637	TRACE_ITER_HEX			= 0x20,
638	TRACE_ITER_BIN			= 0x40,
639	TRACE_ITER_BLOCK		= 0x80,
640	TRACE_ITER_STACKTRACE		= 0x100,
641	TRACE_ITER_PRINTK		= 0x200,
642	TRACE_ITER_PREEMPTONLY		= 0x400,
643	TRACE_ITER_BRANCH		= 0x800,
644	TRACE_ITER_ANNOTATE		= 0x1000,
645	TRACE_ITER_USERSTACKTRACE       = 0x2000,
646	TRACE_ITER_SYM_USEROBJ          = 0x4000,
647	TRACE_ITER_PRINTK_MSGONLY	= 0x8000,
648	TRACE_ITER_CONTEXT_INFO		= 0x10000, /* Print pid/cpu/time */
649	TRACE_ITER_LATENCY_FMT		= 0x20000,
650	TRACE_ITER_SLEEP_TIME		= 0x40000,
651	TRACE_ITER_GRAPH_TIME		= 0x80000,
652	TRACE_ITER_RECORD_CMD		= 0x100000,
653	TRACE_ITER_OVERWRITE		= 0x200000,
654	TRACE_ITER_STOP_ON_FREE		= 0x400000,
 
 
 
655};
656
657/*
658 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
659 * control the output of kernel symbols.
660 */
661#define TRACE_ITER_SYM_MASK \
662	(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
663
664extern struct tracer nop_trace;
665
666#ifdef CONFIG_BRANCH_TRACER
667extern int enable_branch_tracing(struct trace_array *tr);
668extern void disable_branch_tracing(void);
669static inline int trace_branch_enable(struct trace_array *tr)
670{
671	if (trace_flags & TRACE_ITER_BRANCH)
672		return enable_branch_tracing(tr);
673	return 0;
674}
675static inline void trace_branch_disable(void)
676{
677	/* due to races, always disable */
678	disable_branch_tracing();
679}
680#else
681static inline int trace_branch_enable(struct trace_array *tr)
682{
683	return 0;
684}
685static inline void trace_branch_disable(void)
686{
687}
688#endif /* CONFIG_BRANCH_TRACER */
689
690/* set ring buffers to default size if not already done so */
691int tracing_update_buffers(void);
692
693/* trace event type bit fields, not numeric */
694enum {
695	TRACE_EVENT_TYPE_PRINTF		= 1,
696	TRACE_EVENT_TYPE_RAW		= 2,
697};
698
699struct ftrace_event_field {
700	struct list_head	link;
701	char			*name;
702	char			*type;
703	int			filter_type;
704	int			offset;
705	int			size;
706	int			is_signed;
707};
708
709struct event_filter {
710	int			n_preds;	/* Number assigned */
711	int			a_preds;	/* allocated */
712	struct filter_pred	*preds;
713	struct filter_pred	*root;
714	char			*filter_string;
715};
716
717struct event_subsystem {
718	struct list_head	list;
719	const char		*name;
720	struct dentry		*entry;
721	struct event_filter	*filter;
722	int			nr_events;
723	int			ref_count;
724};
725
 
 
 
 
 
 
 
 
 
726#define FILTER_PRED_INVALID	((unsigned short)-1)
727#define FILTER_PRED_IS_RIGHT	(1 << 15)
728#define FILTER_PRED_FOLD	(1 << 15)
729
730/*
731 * The max preds is the size of unsigned short with
732 * two flags at the MSBs. One bit is used for both the IS_RIGHT
733 * and FOLD flags. The other is reserved.
734 *
735 * 2^14 preds is way more than enough.
736 */
737#define MAX_FILTER_PRED		16384
738
739struct filter_pred;
740struct regex;
741
742typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
743
744typedef int (*regex_match_func)(char *str, struct regex *r, int len);
745
746enum regex_type {
747	MATCH_FULL = 0,
748	MATCH_FRONT_ONLY,
749	MATCH_MIDDLE_ONLY,
750	MATCH_END_ONLY,
751};
752
753struct regex {
754	char			pattern[MAX_FILTER_STR_VAL];
755	int			len;
756	int			field_len;
757	regex_match_func	match;
758};
759
760struct filter_pred {
761	filter_pred_fn_t 	fn;
762	u64 			val;
763	struct regex		regex;
764	/*
765	 * Leaf nodes use field_name, ops is used by AND and OR
766	 * nodes. The field_name is always freed when freeing a pred.
767	 * We can overload field_name for ops and have it freed
768	 * as well.
769	 */
770	union {
771		char		*field_name;
772		unsigned short	*ops;
773	};
774	int 			offset;
775	int 			not;
776	int 			op;
777	unsigned short		index;
778	unsigned short		parent;
779	unsigned short		left;
780	unsigned short		right;
781};
782
783extern struct list_head ftrace_common_fields;
784
785extern enum regex_type
786filter_parse_regex(char *buff, int len, char **search, int *not);
787extern void print_event_filter(struct ftrace_event_call *call,
788			       struct trace_seq *s);
789extern int apply_event_filter(struct ftrace_event_call *call,
790			      char *filter_string);
791extern int apply_subsystem_event_filter(struct event_subsystem *system,
792					char *filter_string);
793extern void print_subsystem_event_filter(struct event_subsystem *system,
794					 struct trace_seq *s);
795extern int filter_assign_type(const char *type);
 
 
 
 
796
797struct list_head *
798trace_get_fields(struct ftrace_event_call *event_call);
799
800static inline int
801filter_check_discard(struct ftrace_event_call *call, void *rec,
802		     struct ring_buffer *buffer,
803		     struct ring_buffer_event *event)
804{
805	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
806	    !filter_match_preds(call->filter, rec)) {
807		ring_buffer_discard_commit(buffer, event);
808		return 1;
809	}
810
811	return 0;
 
 
812}
813
814extern void trace_event_enable_cmd_record(bool enable);
815
816extern struct mutex event_mutex;
817extern struct list_head ftrace_events;
818
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
819extern const char *__start___trace_bprintk_fmt[];
820extern const char *__stop___trace_bprintk_fmt[];
821
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
822#undef FTRACE_ENTRY
823#define FTRACE_ENTRY(call, struct_name, id, tstruct, print)		\
824	extern struct ftrace_event_call					\
825	__attribute__((__aligned__(4))) event_##call;
826#undef FTRACE_ENTRY_DUP
827#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)		\
828	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
 
829#include "trace_entries.h"
 
 
 
 
 
 
 
830
831#endif /* _LINUX_KERNEL_TRACE_H */