Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.13.7
    1// SPDX-License-Identifier: GPL-2.0
    2/*
    3 * ring buffer based function tracer
    4 *
    5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
    6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
    7 *
    8 * Originally taken from the RT patch by:
    9 *    Arnaldo Carvalho de Melo <acme@redhat.com>
   10 *
   11 * Based on code from the latency_tracer, that is:
   12 *  Copyright (C) 2004-2006 Ingo Molnar
   13 *  Copyright (C) 2004 Nadia Yvette Chambers
   14 */
   15#include <linux/ring_buffer.h>
   16#include <linux/utsname.h>
   17#include <linux/stacktrace.h>
   18#include <linux/writeback.h>
   19#include <linux/kallsyms.h>
   20#include <linux/security.h>
   21#include <linux/seq_file.h>
 
   22#include <linux/irqflags.h>
   23#include <linux/debugfs.h>
   24#include <linux/tracefs.h>
   25#include <linux/pagemap.h>
   26#include <linux/hardirq.h>
   27#include <linux/linkage.h>
   28#include <linux/uaccess.h>
   29#include <linux/cleanup.h>
   30#include <linux/vmalloc.h>
   31#include <linux/ftrace.h>
   32#include <linux/module.h>
   33#include <linux/percpu.h>
   34#include <linux/splice.h>
   35#include <linux/kdebug.h>
   36#include <linux/string.h>
   37#include <linux/mount.h>
   38#include <linux/rwsem.h>
   39#include <linux/slab.h>
   40#include <linux/ctype.h>
   41#include <linux/init.h>
   42#include <linux/panic_notifier.h>
   43#include <linux/poll.h>
   44#include <linux/nmi.h>
   45#include <linux/fs.h>
   46#include <linux/trace.h>
   47#include <linux/sched/clock.h>
   48#include <linux/sched/rt.h>
   49#include <linux/fsnotify.h>
   50#include <linux/irq_work.h>
   51#include <linux/workqueue.h>
   52
   53#include <asm/setup.h> /* COMMAND_LINE_SIZE */
   54
   55#include "trace.h"
   56#include "trace_output.h"
   57
   58#ifdef CONFIG_FTRACE_STARTUP_TEST
 
 
 
 
 
   59/*
   60 * We need to change this state when a selftest is running.
   61 * A selftest will lurk into the ring-buffer to count the
   62 * entries inserted during the selftest although some concurrent
   63 * insertions into the ring-buffer such as trace_printk could occurred
   64 * at the same time, giving false positive or negative results.
   65 */
   66static bool __read_mostly tracing_selftest_running;
   67
   68/*
   69 * If boot-time tracing including tracers/events via kernel cmdline
   70 * is running, we do not want to run SELFTEST.
   71 */
   72bool __read_mostly tracing_selftest_disabled;
   73
   74void __init disable_tracing_selftest(const char *reason)
   75{
   76	if (!tracing_selftest_disabled) {
   77		tracing_selftest_disabled = true;
   78		pr_info("Ftrace startup test is disabled due to %s\n", reason);
   79	}
   80}
   81#else
   82#define tracing_selftest_running	0
   83#define tracing_selftest_disabled	0
   84#endif
   85
   86/* Pipe tracepoints to printk */
   87static struct trace_iterator *tracepoint_print_iter;
   88int tracepoint_printk;
   89static bool tracepoint_printk_stop_on_boot __initdata;
   90static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
   91
   92/* For tracers that don't implement custom flags */
   93static struct tracer_opt dummy_tracer_opt[] = {
   94	{ }
   95};
   96
   97static int
   98dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 
 
 
 
   99{
  100	return 0;
  101}
  102
  103/*
  104 * To prevent the comm cache from being overwritten when no
  105 * tracing is active, only save the comm when a trace event
  106 * occurred.
  107 */
  108DEFINE_PER_CPU(bool, trace_taskinfo_save);
  109
  110/*
  111 * Kill all tracing for good (never come back).
  112 * It is initialized to 1 but will turn to zero if the initialization
  113 * of the tracer is successful. But that is the only place that sets
  114 * this back to zero.
  115 */
  116static int tracing_disabled = 1;
  117
 
 
  118cpumask_var_t __read_mostly	tracing_buffer_mask;
  119
  120/*
  121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
  122 *
  123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
  124 * is set, then ftrace_dump is called. This will output the contents
  125 * of the ftrace buffers to the console.  This is very useful for
  126 * capturing traces that lead to crashes and outputing it to a
  127 * serial console.
  128 *
  129 * It is default off, but you can enable it with either specifying
  130 * "ftrace_dump_on_oops" in the kernel command line, or setting
  131 * /proc/sys/kernel/ftrace_dump_on_oops
  132 * Set 1 if you want to dump buffers of all CPUs
  133 * Set 2 if you want to dump the buffer of the CPU that triggered oops
  134 * Set instance name if you want to dump the specific trace instance
  135 * Multiple instance dump is also supported, and instances are seperated
  136 * by commas.
  137 */
  138/* Set to string format zero to disable by default */
  139char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
  140
  141/* When set, tracing will stop when a WARN*() is hit */
  142int __disable_trace_on_warning;
  143
  144#ifdef CONFIG_TRACE_EVAL_MAP_FILE
  145/* Map of enums to their values, for "eval_map" file */
  146struct trace_eval_map_head {
  147	struct module			*mod;
  148	unsigned long			length;
  149};
  150
  151union trace_eval_map_item;
  152
  153struct trace_eval_map_tail {
  154	/*
  155	 * "end" is first and points to NULL as it must be different
  156	 * than "mod" or "eval_string"
  157	 */
  158	union trace_eval_map_item	*next;
  159	const char			*end;	/* points to NULL */
  160};
  161
  162static DEFINE_MUTEX(trace_eval_mutex);
  163
  164/*
  165 * The trace_eval_maps are saved in an array with two extra elements,
  166 * one at the beginning, and one at the end. The beginning item contains
  167 * the count of the saved maps (head.length), and the module they
  168 * belong to if not built in (head.mod). The ending item contains a
  169 * pointer to the next array of saved eval_map items.
  170 */
  171union trace_eval_map_item {
  172	struct trace_eval_map		map;
  173	struct trace_eval_map_head	head;
  174	struct trace_eval_map_tail	tail;
  175};
  176
  177static union trace_eval_map_item *trace_eval_maps;
  178#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
  179
  180int tracing_set_tracer(struct trace_array *tr, const char *buf);
  181static void ftrace_trace_userstack(struct trace_array *tr,
  182				   struct trace_buffer *buffer,
  183				   unsigned int trace_ctx);
  184
 
  185static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
  186static char *default_bootup_tracer;
  187
  188static bool allocate_snapshot;
  189static bool snapshot_at_boot;
  190
  191static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
  192static int boot_instance_index;
  193
  194static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
  195static int boot_snapshot_index;
  196
  197static int __init set_cmdline_ftrace(char *str)
  198{
  199	strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
  200	default_bootup_tracer = bootup_tracer_buf;
  201	/* We are using ftrace early, expand it */
  202	trace_set_ring_buffer_expanded(NULL);
  203	return 1;
  204}
  205__setup("ftrace=", set_cmdline_ftrace);
  206
  207int ftrace_dump_on_oops_enabled(void)
  208{
  209	if (!strcmp("0", ftrace_dump_on_oops))
  210		return 0;
  211	else
  212		return 1;
  213}
  214
  215static int __init set_ftrace_dump_on_oops(char *str)
  216{
  217	if (!*str) {
  218		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
  219		return 1;
  220	}
  221
  222	if (*str == ',') {
  223		strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE);
  224		strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1);
  225		return 1;
  226	}
  227
  228	if (*str++ == '=') {
  229		strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE);
  230		return 1;
  231	}
  232
  233	return 0;
  234}
  235__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
  236
  237static int __init stop_trace_on_warning(char *str)
  238{
  239	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
  240		__disable_trace_on_warning = 1;
  241	return 1;
  242}
  243__setup("traceoff_on_warning", stop_trace_on_warning);
  244
  245static int __init boot_alloc_snapshot(char *str)
  246{
  247	char *slot = boot_snapshot_info + boot_snapshot_index;
  248	int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
  249	int ret;
  250
  251	if (str[0] == '=') {
  252		str++;
  253		if (strlen(str) >= left)
  254			return -1;
  255
  256		ret = snprintf(slot, left, "%s\t", str);
  257		boot_snapshot_index += ret;
  258	} else {
  259		allocate_snapshot = true;
  260		/* We also need the main ring buffer expanded */
  261		trace_set_ring_buffer_expanded(NULL);
  262	}
  263	return 1;
  264}
  265__setup("alloc_snapshot", boot_alloc_snapshot);
  266
  267
  268static int __init boot_snapshot(char *str)
  269{
  270	snapshot_at_boot = true;
  271	boot_alloc_snapshot(str);
  272	return 1;
  273}
  274__setup("ftrace_boot_snapshot", boot_snapshot);
  275
  276
  277static int __init boot_instance(char *str)
  278{
  279	char *slot = boot_instance_info + boot_instance_index;
  280	int left = sizeof(boot_instance_info) - boot_instance_index;
  281	int ret;
  282
  283	if (strlen(str) >= left)
  284		return -1;
  285
  286	ret = snprintf(slot, left, "%s\t", str);
  287	boot_instance_index += ret;
  288
  289	return 1;
  290}
  291__setup("trace_instance=", boot_instance);
  292
  293
  294static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
  295
  296static int __init set_trace_boot_options(char *str)
  297{
  298	strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
  299	return 1;
  300}
  301__setup("trace_options=", set_trace_boot_options);
  302
  303static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
  304static char *trace_boot_clock __initdata;
  305
  306static int __init set_trace_boot_clock(char *str)
  307{
  308	strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
  309	trace_boot_clock = trace_boot_clock_buf;
  310	return 1;
  311}
  312__setup("trace_clock=", set_trace_boot_clock);
  313
  314static int __init set_tracepoint_printk(char *str)
  315{
  316	/* Ignore the "tp_printk_stop_on_boot" param */
  317	if (*str == '_')
  318		return 0;
  319
  320	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
  321		tracepoint_printk = 1;
  322	return 1;
  323}
  324__setup("tp_printk", set_tracepoint_printk);
  325
  326static int __init set_tracepoint_printk_stop(char *str)
  327{
  328	tracepoint_printk_stop_on_boot = true;
  329	return 1;
  330}
  331__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
  332
  333unsigned long long ns2usecs(u64 nsec)
  334{
  335	nsec += 500;
  336	do_div(nsec, 1000);
  337	return nsec;
  338}
  339
  340static void
  341trace_process_export(struct trace_export *export,
  342	       struct ring_buffer_event *event, int flag)
  343{
  344	struct trace_entry *entry;
  345	unsigned int size = 0;
  346
  347	if (export->flags & flag) {
  348		entry = ring_buffer_event_data(event);
  349		size = ring_buffer_event_length(event);
  350		export->write(export, entry, size);
  351	}
  352}
  353
  354static DEFINE_MUTEX(ftrace_export_lock);
  355
  356static struct trace_export __rcu *ftrace_exports_list __read_mostly;
  357
  358static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
  359static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
  360static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
  361
  362static inline void ftrace_exports_enable(struct trace_export *export)
  363{
  364	if (export->flags & TRACE_EXPORT_FUNCTION)
  365		static_branch_inc(&trace_function_exports_enabled);
  366
  367	if (export->flags & TRACE_EXPORT_EVENT)
  368		static_branch_inc(&trace_event_exports_enabled);
  369
  370	if (export->flags & TRACE_EXPORT_MARKER)
  371		static_branch_inc(&trace_marker_exports_enabled);
  372}
  373
  374static inline void ftrace_exports_disable(struct trace_export *export)
  375{
  376	if (export->flags & TRACE_EXPORT_FUNCTION)
  377		static_branch_dec(&trace_function_exports_enabled);
  378
  379	if (export->flags & TRACE_EXPORT_EVENT)
  380		static_branch_dec(&trace_event_exports_enabled);
  381
  382	if (export->flags & TRACE_EXPORT_MARKER)
  383		static_branch_dec(&trace_marker_exports_enabled);
  384}
  385
  386static void ftrace_exports(struct ring_buffer_event *event, int flag)
  387{
  388	struct trace_export *export;
  389
  390	preempt_disable_notrace();
  391
  392	export = rcu_dereference_raw_check(ftrace_exports_list);
  393	while (export) {
  394		trace_process_export(export, event, flag);
  395		export = rcu_dereference_raw_check(export->next);
  396	}
  397
  398	preempt_enable_notrace();
  399}
  400
  401static inline void
  402add_trace_export(struct trace_export **list, struct trace_export *export)
  403{
  404	rcu_assign_pointer(export->next, *list);
  405	/*
  406	 * We are entering export into the list but another
  407	 * CPU might be walking that list. We need to make sure
  408	 * the export->next pointer is valid before another CPU sees
  409	 * the export pointer included into the list.
  410	 */
  411	rcu_assign_pointer(*list, export);
  412}
  413
  414static inline int
  415rm_trace_export(struct trace_export **list, struct trace_export *export)
  416{
  417	struct trace_export **p;
  418
  419	for (p = list; *p != NULL; p = &(*p)->next)
  420		if (*p == export)
  421			break;
  422
  423	if (*p != export)
  424		return -1;
  425
  426	rcu_assign_pointer(*p, (*p)->next);
  427
  428	return 0;
  429}
  430
  431static inline void
  432add_ftrace_export(struct trace_export **list, struct trace_export *export)
  433{
  434	ftrace_exports_enable(export);
  435
  436	add_trace_export(list, export);
  437}
  438
  439static inline int
  440rm_ftrace_export(struct trace_export **list, struct trace_export *export)
  441{
  442	int ret;
  443
  444	ret = rm_trace_export(list, export);
  445	ftrace_exports_disable(export);
  446
  447	return ret;
  448}
  449
  450int register_ftrace_export(struct trace_export *export)
  451{
  452	if (WARN_ON_ONCE(!export->write))
  453		return -1;
  454
  455	mutex_lock(&ftrace_export_lock);
  456
  457	add_ftrace_export(&ftrace_exports_list, export);
  458
  459	mutex_unlock(&ftrace_export_lock);
  460
  461	return 0;
  462}
  463EXPORT_SYMBOL_GPL(register_ftrace_export);
  464
  465int unregister_ftrace_export(struct trace_export *export)
  466{
  467	int ret;
  468
  469	mutex_lock(&ftrace_export_lock);
  470
  471	ret = rm_ftrace_export(&ftrace_exports_list, export);
  472
  473	mutex_unlock(&ftrace_export_lock);
  474
  475	return ret;
  476}
  477EXPORT_SYMBOL_GPL(unregister_ftrace_export);
  478
  479/* trace_flags holds trace_options default values */
  480#define TRACE_DEFAULT_FLAGS						\
  481	(FUNCTION_DEFAULT_FLAGS |					\
  482	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
  483	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
  484	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
  485	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
  486	 TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK)
  487
  488/* trace_options that are only supported by global_trace */
  489#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
  490	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
  491
  492/* trace_flags that are default zero for instances */
  493#define ZEROED_TRACE_FLAGS \
  494	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK)
  495
  496/*
  497 * The global_trace is the descriptor that holds the top-level tracing
  498 * buffers for the live tracing.
  499 */
  500static struct trace_array global_trace = {
  501	.trace_flags = TRACE_DEFAULT_FLAGS,
  502};
  503
  504static struct trace_array *printk_trace = &global_trace;
  505
  506static __always_inline bool printk_binsafe(struct trace_array *tr)
  507{
  508	/*
  509	 * The binary format of traceprintk can cause a crash if used
  510	 * by a buffer from another boot. Force the use of the
  511	 * non binary version of trace_printk if the trace_printk
  512	 * buffer is a boot mapped ring buffer.
  513	 */
  514	return !(tr->flags & TRACE_ARRAY_FL_BOOT);
  515}
  516
  517static void update_printk_trace(struct trace_array *tr)
  518{
  519	if (printk_trace == tr)
  520		return;
  521
  522	printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK;
  523	printk_trace = tr;
  524	tr->trace_flags |= TRACE_ITER_TRACE_PRINTK;
  525}
  526
  527void trace_set_ring_buffer_expanded(struct trace_array *tr)
  528{
  529	if (!tr)
  530		tr = &global_trace;
  531	tr->ring_buffer_expanded = true;
  532}
  533
  534LIST_HEAD(ftrace_trace_arrays);
  535
  536int trace_array_get(struct trace_array *this_tr)
  537{
  538	struct trace_array *tr;
  539
  540	guard(mutex)(&trace_types_lock);
  541	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
  542		if (tr == this_tr) {
  543			tr->ref++;
  544			return 0;
  545		}
  546	}
  547
  548	return -ENODEV;
  549}
  550
  551static void __trace_array_put(struct trace_array *this_tr)
  552{
  553	WARN_ON(!this_tr->ref);
  554	this_tr->ref--;
  555}
  556
  557/**
  558 * trace_array_put - Decrement the reference counter for this trace array.
  559 * @this_tr : pointer to the trace array
  560 *
  561 * NOTE: Use this when we no longer need the trace array returned by
  562 * trace_array_get_by_name(). This ensures the trace array can be later
  563 * destroyed.
  564 *
  565 */
  566void trace_array_put(struct trace_array *this_tr)
  567{
  568	if (!this_tr)
  569		return;
  570
  571	mutex_lock(&trace_types_lock);
  572	__trace_array_put(this_tr);
  573	mutex_unlock(&trace_types_lock);
  574}
  575EXPORT_SYMBOL_GPL(trace_array_put);
  576
  577int tracing_check_open_get_tr(struct trace_array *tr)
  578{
  579	int ret;
  580
  581	ret = security_locked_down(LOCKDOWN_TRACEFS);
  582	if (ret)
  583		return ret;
  584
  585	if (tracing_disabled)
  586		return -ENODEV;
  587
  588	if (tr && trace_array_get(tr) < 0)
  589		return -ENODEV;
  590
  591	return 0;
  592}
  593
  594/**
  595 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
  596 * @filtered_pids: The list of pids to check
  597 * @search_pid: The PID to find in @filtered_pids
  598 *
  599 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
  600 */
  601bool
  602trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
  603{
  604	return trace_pid_list_is_set(filtered_pids, search_pid);
  605}
  606
  607/**
  608 * trace_ignore_this_task - should a task be ignored for tracing
  609 * @filtered_pids: The list of pids to check
  610 * @filtered_no_pids: The list of pids not to be traced
  611 * @task: The task that should be ignored if not filtered
  612 *
  613 * Checks if @task should be traced or not from @filtered_pids.
  614 * Returns true if @task should *NOT* be traced.
  615 * Returns false if @task should be traced.
  616 */
  617bool
  618trace_ignore_this_task(struct trace_pid_list *filtered_pids,
  619		       struct trace_pid_list *filtered_no_pids,
  620		       struct task_struct *task)
  621{
  622	/*
  623	 * If filtered_no_pids is not empty, and the task's pid is listed
  624	 * in filtered_no_pids, then return true.
  625	 * Otherwise, if filtered_pids is empty, that means we can
  626	 * trace all tasks. If it has content, then only trace pids
  627	 * within filtered_pids.
  628	 */
  629
  630	return (filtered_pids &&
  631		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
  632		(filtered_no_pids &&
  633		 trace_find_filtered_pid(filtered_no_pids, task->pid));
  634}
  635
  636/**
  637 * trace_filter_add_remove_task - Add or remove a task from a pid_list
  638 * @pid_list: The list to modify
  639 * @self: The current task for fork or NULL for exit
  640 * @task: The task to add or remove
  641 *
  642 * If adding a task, if @self is defined, the task is only added if @self
  643 * is also included in @pid_list. This happens on fork and tasks should
  644 * only be added when the parent is listed. If @self is NULL, then the
  645 * @task pid will be removed from the list, which would happen on exit
  646 * of a task.
  647 */
  648void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
  649				  struct task_struct *self,
  650				  struct task_struct *task)
  651{
  652	if (!pid_list)
  653		return;
  654
  655	/* For forks, we only add if the forking task is listed */
  656	if (self) {
  657		if (!trace_find_filtered_pid(pid_list, self->pid))
  658			return;
  659	}
  660
  661	/* "self" is set for forks, and NULL for exits */
  662	if (self)
  663		trace_pid_list_set(pid_list, task->pid);
  664	else
  665		trace_pid_list_clear(pid_list, task->pid);
  666}
  667
  668/**
  669 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
  670 * @pid_list: The pid list to show
  671 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
  672 * @pos: The position of the file
  673 *
  674 * This is used by the seq_file "next" operation to iterate the pids
  675 * listed in a trace_pid_list structure.
  676 *
  677 * Returns the pid+1 as we want to display pid of zero, but NULL would
  678 * stop the iteration.
  679 */
  680void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
  681{
  682	long pid = (unsigned long)v;
  683	unsigned int next;
  684
  685	(*pos)++;
  686
  687	/* pid already is +1 of the actual previous bit */
  688	if (trace_pid_list_next(pid_list, pid, &next) < 0)
  689		return NULL;
  690
  691	pid = next;
  692
  693	/* Return pid + 1 to allow zero to be represented */
  694	return (void *)(pid + 1);
  695}
  696
  697/**
  698 * trace_pid_start - Used for seq_file to start reading pid lists
  699 * @pid_list: The pid list to show
  700 * @pos: The position of the file
  701 *
  702 * This is used by seq_file "start" operation to start the iteration
  703 * of listing pids.
  704 *
  705 * Returns the pid+1 as we want to display pid of zero, but NULL would
  706 * stop the iteration.
  707 */
  708void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
  709{
  710	unsigned long pid;
  711	unsigned int first;
  712	loff_t l = 0;
  713
  714	if (trace_pid_list_first(pid_list, &first) < 0)
  715		return NULL;
  716
  717	pid = first;
  718
  719	/* Return pid + 1 so that zero can be the exit value */
  720	for (pid++; pid && l < *pos;
  721	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
  722		;
  723	return (void *)pid;
  724}
  725
  726/**
  727 * trace_pid_show - show the current pid in seq_file processing
  728 * @m: The seq_file structure to write into
  729 * @v: A void pointer of the pid (+1) value to display
  730 *
  731 * Can be directly used by seq_file operations to display the current
  732 * pid value.
 
  733 */
  734int trace_pid_show(struct seq_file *m, void *v)
  735{
  736	unsigned long pid = (unsigned long)v - 1;
  737
  738	seq_printf(m, "%lu\n", pid);
  739	return 0;
  740}
  741
  742/* 128 should be much more than enough */
  743#define PID_BUF_SIZE		127
  744
  745int trace_pid_write(struct trace_pid_list *filtered_pids,
  746		    struct trace_pid_list **new_pid_list,
  747		    const char __user *ubuf, size_t cnt)
  748{
  749	struct trace_pid_list *pid_list;
  750	struct trace_parser parser;
  751	unsigned long val;
  752	int nr_pids = 0;
  753	ssize_t read = 0;
  754	ssize_t ret;
  755	loff_t pos;
  756	pid_t pid;
  757
  758	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
  759		return -ENOMEM;
  760
  761	/*
  762	 * Always recreate a new array. The write is an all or nothing
  763	 * operation. Always create a new array when adding new pids by
  764	 * the user. If the operation fails, then the current list is
  765	 * not modified.
  766	 */
  767	pid_list = trace_pid_list_alloc();
  768	if (!pid_list) {
  769		trace_parser_put(&parser);
  770		return -ENOMEM;
  771	}
  772
  773	if (filtered_pids) {
  774		/* copy the current bits to the new max */
  775		ret = trace_pid_list_first(filtered_pids, &pid);
  776		while (!ret) {
  777			trace_pid_list_set(pid_list, pid);
  778			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
  779			nr_pids++;
  780		}
  781	}
  782
  783	ret = 0;
  784	while (cnt > 0) {
  785
  786		pos = 0;
  787
  788		ret = trace_get_user(&parser, ubuf, cnt, &pos);
  789		if (ret < 0)
  790			break;
  791
  792		read += ret;
  793		ubuf += ret;
  794		cnt -= ret;
  795
  796		if (!trace_parser_loaded(&parser))
  797			break;
  798
  799		ret = -EINVAL;
  800		if (kstrtoul(parser.buffer, 0, &val))
  801			break;
  802
  803		pid = (pid_t)val;
  804
  805		if (trace_pid_list_set(pid_list, pid) < 0) {
  806			ret = -1;
  807			break;
  808		}
  809		nr_pids++;
  810
  811		trace_parser_clear(&parser);
  812		ret = 0;
  813	}
  814	trace_parser_put(&parser);
  815
  816	if (ret < 0) {
  817		trace_pid_list_free(pid_list);
  818		return ret;
  819	}
  820
  821	if (!nr_pids) {
  822		/* Cleared the list of pids */
  823		trace_pid_list_free(pid_list);
  824		pid_list = NULL;
  825	}
  826
  827	*new_pid_list = pid_list;
  828
  829	return read;
  830}
 
  831
  832static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
  833{
  834	u64 ts;
  835
  836	/* Early boot up does not have a buffer yet */
  837	if (!buf->buffer)
  838		return trace_clock_local();
  839
  840	ts = ring_buffer_time_stamp(buf->buffer);
  841	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
  842
  843	return ts;
  844}
  845
  846u64 ftrace_now(int cpu)
  847{
  848	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
  849}
 
 
 
 
 
 
 
 
 
 
 
 
  850
  851/**
  852 * tracing_is_enabled - Show if global_trace has been enabled
  853 *
  854 * Shows if the global trace has been enabled or not. It uses the
  855 * mirror flag "buffer_disabled" to be used in fast paths such as for
  856 * the irqsoff tracer. But it may be inaccurate due to races. If you
  857 * need to know the accurate state, use tracing_is_on() which is a little
  858 * slower, but accurate.
  859 */
  860int tracing_is_enabled(void)
  861{
  862	/*
  863	 * For quick access (irqsoff uses this in fast path), just
  864	 * return the mirror variable of the state of the ring buffer.
  865	 * It's a little racy, but we don't really care.
  866	 */
  867	smp_rmb();
  868	return !global_trace.buffer_disabled;
  869}
  870
  871/*
  872 * trace_buf_size is the size in bytes that is allocated
  873 * for a buffer. Note, the number of bytes is always rounded
  874 * to page size.
  875 *
  876 * This number is purposely set to a low number of 16384.
  877 * If the dump on oops happens, it will be much appreciated
  878 * to not have to wait for all that output. Anyway this can be
  879 * boot time and run time configurable.
  880 */
  881#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
  882
  883static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
  884
  885/* trace_types holds a link list of available tracers. */
  886static struct tracer		*trace_types __read_mostly;
  887
 
 
 
  888/*
  889 * trace_types_lock is used to protect the trace_types list.
  890 */
  891DEFINE_MUTEX(trace_types_lock);
  892
  893/*
  894 * serialize the access of the ring buffer
  895 *
  896 * ring buffer serializes readers, but it is low level protection.
  897 * The validity of the events (which returns by ring_buffer_peek() ..etc)
  898 * are not protected by ring buffer.
  899 *
  900 * The content of events may become garbage if we allow other process consumes
  901 * these events concurrently:
  902 *   A) the page of the consumed events may become a normal page
  903 *      (not reader page) in ring buffer, and this page will be rewritten
  904 *      by events producer.
  905 *   B) The page of the consumed events may become a page for splice_read,
  906 *      and this page will be returned to system.
  907 *
  908 * These primitives allow multi process access to different cpu ring buffer
  909 * concurrently.
  910 *
  911 * These primitives don't distinguish read-only and read-consume access.
  912 * Multi read-only access are also serialized.
  913 */
  914
  915#ifdef CONFIG_SMP
  916static DECLARE_RWSEM(all_cpu_access_lock);
  917static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
  918
  919static inline void trace_access_lock(int cpu)
  920{
  921	if (cpu == RING_BUFFER_ALL_CPUS) {
  922		/* gain it for accessing the whole ring buffer. */
  923		down_write(&all_cpu_access_lock);
  924	} else {
  925		/* gain it for accessing a cpu ring buffer. */
  926
  927		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
  928		down_read(&all_cpu_access_lock);
  929
  930		/* Secondly block other access to this @cpu ring buffer. */
  931		mutex_lock(&per_cpu(cpu_access_lock, cpu));
  932	}
  933}
  934
  935static inline void trace_access_unlock(int cpu)
  936{
  937	if (cpu == RING_BUFFER_ALL_CPUS) {
  938		up_write(&all_cpu_access_lock);
  939	} else {
  940		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
  941		up_read(&all_cpu_access_lock);
  942	}
  943}
  944
  945static inline void trace_access_lock_init(void)
  946{
  947	int cpu;
  948
  949	for_each_possible_cpu(cpu)
  950		mutex_init(&per_cpu(cpu_access_lock, cpu));
  951}
  952
  953#else
  954
  955static DEFINE_MUTEX(access_lock);
  956
  957static inline void trace_access_lock(int cpu)
  958{
  959	(void)cpu;
  960	mutex_lock(&access_lock);
  961}
  962
  963static inline void trace_access_unlock(int cpu)
  964{
  965	(void)cpu;
  966	mutex_unlock(&access_lock);
  967}
  968
  969static inline void trace_access_lock_init(void)
  970{
  971}
  972
  973#endif
  974
  975#ifdef CONFIG_STACKTRACE
  976static void __ftrace_trace_stack(struct trace_array *tr,
  977				 struct trace_buffer *buffer,
  978				 unsigned int trace_ctx,
  979				 int skip, struct pt_regs *regs);
  980static inline void ftrace_trace_stack(struct trace_array *tr,
  981				      struct trace_buffer *buffer,
  982				      unsigned int trace_ctx,
  983				      int skip, struct pt_regs *regs);
  984
  985#else
  986static inline void __ftrace_trace_stack(struct trace_array *tr,
  987					struct trace_buffer *buffer,
  988					unsigned int trace_ctx,
  989					int skip, struct pt_regs *regs)
  990{
  991}
  992static inline void ftrace_trace_stack(struct trace_array *tr,
  993				      struct trace_buffer *buffer,
  994				      unsigned long trace_ctx,
  995				      int skip, struct pt_regs *regs)
  996{
  997}
  998
  999#endif
 1000
 1001static __always_inline void
 1002trace_event_setup(struct ring_buffer_event *event,
 1003		  int type, unsigned int trace_ctx)
 1004{
 1005	struct trace_entry *ent = ring_buffer_event_data(event);
 1006
 1007	tracing_generic_entry_update(ent, type, trace_ctx);
 1008}
 1009
 1010static __always_inline struct ring_buffer_event *
 1011__trace_buffer_lock_reserve(struct trace_buffer *buffer,
 1012			  int type,
 1013			  unsigned long len,
 1014			  unsigned int trace_ctx)
 1015{
 1016	struct ring_buffer_event *event;
 1017
 1018	event = ring_buffer_lock_reserve(buffer, len);
 1019	if (event != NULL)
 1020		trace_event_setup(event, type, trace_ctx);
 1021
 1022	return event;
 1023}
 1024
 1025void tracer_tracing_on(struct trace_array *tr)
 1026{
 1027	if (tr->array_buffer.buffer)
 1028		ring_buffer_record_on(tr->array_buffer.buffer);
 1029	/*
 1030	 * This flag is looked at when buffers haven't been allocated
 1031	 * yet, or by some tracers (like irqsoff), that just want to
 1032	 * know if the ring buffer has been disabled, but it can handle
 1033	 * races of where it gets disabled but we still do a record.
 1034	 * As the check is in the fast path of the tracers, it is more
 1035	 * important to be fast than accurate.
 1036	 */
 1037	tr->buffer_disabled = 0;
 1038	/* Make the flag seen by readers */
 1039	smp_wmb();
 1040}
 1041
 1042/**
 1043 * tracing_on - enable tracing buffers
 1044 *
 1045 * This function enables tracing buffers that may have been
 1046 * disabled with tracing_off.
 1047 */
 1048void tracing_on(void)
 1049{
 1050	tracer_tracing_on(&global_trace);
 1051}
 1052EXPORT_SYMBOL_GPL(tracing_on);
 1053
 1054
 1055static __always_inline void
 1056__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
 1057{
 1058	__this_cpu_write(trace_taskinfo_save, true);
 1059
 1060	/* If this is the temp buffer, we need to commit fully */
 1061	if (this_cpu_read(trace_buffered_event) == event) {
 1062		/* Length is in event->array[0] */
 1063		ring_buffer_write(buffer, event->array[0], &event->array[1]);
 1064		/* Release the temp buffer */
 1065		this_cpu_dec(trace_buffered_event_cnt);
 1066		/* ring_buffer_unlock_commit() enables preemption */
 1067		preempt_enable_notrace();
 1068	} else
 1069		ring_buffer_unlock_commit(buffer);
 1070}
 1071
 1072int __trace_array_puts(struct trace_array *tr, unsigned long ip,
 1073		       const char *str, int size)
 1074{
 1075	struct ring_buffer_event *event;
 1076	struct trace_buffer *buffer;
 1077	struct print_entry *entry;
 1078	unsigned int trace_ctx;
 1079	int alloc;
 1080
 1081	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
 1082		return 0;
 1083
 1084	if (unlikely(tracing_selftest_running && tr == &global_trace))
 1085		return 0;
 1086
 1087	if (unlikely(tracing_disabled))
 1088		return 0;
 1089
 1090	alloc = sizeof(*entry) + size + 2; /* possible \n added */
 1091
 1092	trace_ctx = tracing_gen_ctx();
 1093	buffer = tr->array_buffer.buffer;
 1094	ring_buffer_nest_start(buffer);
 1095	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
 1096					    trace_ctx);
 1097	if (!event) {
 1098		size = 0;
 1099		goto out;
 1100	}
 1101
 1102	entry = ring_buffer_event_data(event);
 1103	entry->ip = ip;
 1104
 1105	memcpy(&entry->buf, str, size);
 1106
 1107	/* Add a newline if necessary */
 1108	if (entry->buf[size - 1] != '\n') {
 1109		entry->buf[size] = '\n';
 1110		entry->buf[size + 1] = '\0';
 1111	} else
 1112		entry->buf[size] = '\0';
 1113
 1114	__buffer_unlock_commit(buffer, event);
 1115	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
 1116 out:
 1117	ring_buffer_nest_end(buffer);
 1118	return size;
 1119}
 1120EXPORT_SYMBOL_GPL(__trace_array_puts);
 1121
 1122/**
 1123 * __trace_puts - write a constant string into the trace buffer.
 1124 * @ip:	   The address of the caller
 1125 * @str:   The constant string to write
 1126 * @size:  The size of the string.
 1127 */
 1128int __trace_puts(unsigned long ip, const char *str, int size)
 1129{
 1130	return __trace_array_puts(printk_trace, ip, str, size);
 1131}
 1132EXPORT_SYMBOL_GPL(__trace_puts);
 1133
 1134/**
 1135 * __trace_bputs - write the pointer to a constant string into trace buffer
 1136 * @ip:	   The address of the caller
 1137 * @str:   The constant string to write to the buffer to
 1138 */
 1139int __trace_bputs(unsigned long ip, const char *str)
 1140{
 1141	struct trace_array *tr = READ_ONCE(printk_trace);
 1142	struct ring_buffer_event *event;
 1143	struct trace_buffer *buffer;
 1144	struct bputs_entry *entry;
 1145	unsigned int trace_ctx;
 1146	int size = sizeof(struct bputs_entry);
 1147	int ret = 0;
 1148
 1149	if (!printk_binsafe(tr))
 1150		return __trace_puts(ip, str, strlen(str));
 1151
 1152	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
 1153		return 0;
 1154
 1155	if (unlikely(tracing_selftest_running || tracing_disabled))
 1156		return 0;
 1157
 1158	trace_ctx = tracing_gen_ctx();
 1159	buffer = tr->array_buffer.buffer;
 1160
 1161	ring_buffer_nest_start(buffer);
 1162	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 1163					    trace_ctx);
 1164	if (!event)
 1165		goto out;
 1166
 1167	entry = ring_buffer_event_data(event);
 1168	entry->ip			= ip;
 1169	entry->str			= str;
 1170
 1171	__buffer_unlock_commit(buffer, event);
 1172	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
 1173
 1174	ret = 1;
 1175 out:
 1176	ring_buffer_nest_end(buffer);
 1177	return ret;
 1178}
 1179EXPORT_SYMBOL_GPL(__trace_bputs);
 1180
 1181#ifdef CONFIG_TRACER_SNAPSHOT
 1182static void tracing_snapshot_instance_cond(struct trace_array *tr,
 1183					   void *cond_data)
 1184{
 1185	struct tracer *tracer = tr->current_trace;
 1186	unsigned long flags;
 1187
 1188	if (in_nmi()) {
 1189		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
 1190		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
 1191		return;
 1192	}
 1193
 1194	if (!tr->allocated_snapshot) {
 1195		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
 1196		trace_array_puts(tr, "*** stopping trace here!   ***\n");
 1197		tracer_tracing_off(tr);
 1198		return;
 1199	}
 1200
 1201	/* Note, snapshot can not be used when the tracer uses it */
 1202	if (tracer->use_max_tr) {
 1203		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
 1204		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
 1205		return;
 1206	}
 1207
 1208	if (tr->mapped) {
 1209		trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n");
 1210		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
 1211		return;
 1212	}
 1213
 1214	local_irq_save(flags);
 1215	update_max_tr(tr, current, smp_processor_id(), cond_data);
 1216	local_irq_restore(flags);
 1217}
 1218
 1219void tracing_snapshot_instance(struct trace_array *tr)
 1220{
 1221	tracing_snapshot_instance_cond(tr, NULL);
 1222}
 1223
 1224/**
 1225 * tracing_snapshot - take a snapshot of the current buffer.
 1226 *
 1227 * This causes a swap between the snapshot buffer and the current live
 1228 * tracing buffer. You can use this to take snapshots of the live
 1229 * trace when some condition is triggered, but continue to trace.
 1230 *
 1231 * Note, make sure to allocate the snapshot with either
 1232 * a tracing_snapshot_alloc(), or by doing it manually
 1233 * with: echo 1 > /sys/kernel/tracing/snapshot
 1234 *
 1235 * If the snapshot buffer is not allocated, it will stop tracing.
 1236 * Basically making a permanent snapshot.
 1237 */
 1238void tracing_snapshot(void)
 1239{
 1240	struct trace_array *tr = &global_trace;
 1241
 1242	tracing_snapshot_instance(tr);
 1243}
 1244EXPORT_SYMBOL_GPL(tracing_snapshot);
 1245
 1246/**
 1247 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
 1248 * @tr:		The tracing instance to snapshot
 1249 * @cond_data:	The data to be tested conditionally, and possibly saved
 1250 *
 1251 * This is the same as tracing_snapshot() except that the snapshot is
 1252 * conditional - the snapshot will only happen if the
 1253 * cond_snapshot.update() implementation receiving the cond_data
 1254 * returns true, which means that the trace array's cond_snapshot
 1255 * update() operation used the cond_data to determine whether the
 1256 * snapshot should be taken, and if it was, presumably saved it along
 1257 * with the snapshot.
 1258 */
 1259void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
 1260{
 1261	tracing_snapshot_instance_cond(tr, cond_data);
 1262}
 1263EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
 1264
 1265/**
 1266 * tracing_cond_snapshot_data - get the user data associated with a snapshot
 1267 * @tr:		The tracing instance
 1268 *
 1269 * When the user enables a conditional snapshot using
 1270 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
 1271 * with the snapshot.  This accessor is used to retrieve it.
 1272 *
 1273 * Should not be called from cond_snapshot.update(), since it takes
 1274 * the tr->max_lock lock, which the code calling
 1275 * cond_snapshot.update() has already done.
 1276 *
 1277 * Returns the cond_data associated with the trace array's snapshot.
 1278 */
 1279void *tracing_cond_snapshot_data(struct trace_array *tr)
 1280{
 1281	void *cond_data = NULL;
 1282
 1283	local_irq_disable();
 1284	arch_spin_lock(&tr->max_lock);
 1285
 1286	if (tr->cond_snapshot)
 1287		cond_data = tr->cond_snapshot->cond_data;
 1288
 1289	arch_spin_unlock(&tr->max_lock);
 1290	local_irq_enable();
 1291
 1292	return cond_data;
 1293}
 1294EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
 1295
 1296static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
 1297					struct array_buffer *size_buf, int cpu_id);
 1298static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
 1299
 1300int tracing_alloc_snapshot_instance(struct trace_array *tr)
 1301{
 1302	int order;
 1303	int ret;
 1304
 1305	if (!tr->allocated_snapshot) {
 1306
 1307		/* Make the snapshot buffer have the same order as main buffer */
 1308		order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
 1309		ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
 1310		if (ret < 0)
 1311			return ret;
 1312
 1313		/* allocate spare buffer */
 1314		ret = resize_buffer_duplicate_size(&tr->max_buffer,
 1315				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
 1316		if (ret < 0)
 1317			return ret;
 1318
 1319		tr->allocated_snapshot = true;
 1320	}
 1321
 1322	return 0;
 1323}
 1324
 1325static void free_snapshot(struct trace_array *tr)
 1326{
 1327	/*
 1328	 * We don't free the ring buffer. instead, resize it because
 1329	 * The max_tr ring buffer has some state (e.g. ring->clock) and
 1330	 * we want preserve it.
 1331	 */
 1332	ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
 1333	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
 1334	set_buffer_entries(&tr->max_buffer, 1);
 1335	tracing_reset_online_cpus(&tr->max_buffer);
 1336	tr->allocated_snapshot = false;
 1337}
 1338
 1339static int tracing_arm_snapshot_locked(struct trace_array *tr)
 1340{
 1341	int ret;
 1342
 1343	lockdep_assert_held(&trace_types_lock);
 1344
 1345	spin_lock(&tr->snapshot_trigger_lock);
 1346	if (tr->snapshot == UINT_MAX || tr->mapped) {
 1347		spin_unlock(&tr->snapshot_trigger_lock);
 1348		return -EBUSY;
 1349	}
 1350
 1351	tr->snapshot++;
 1352	spin_unlock(&tr->snapshot_trigger_lock);
 1353
 1354	ret = tracing_alloc_snapshot_instance(tr);
 1355	if (ret) {
 1356		spin_lock(&tr->snapshot_trigger_lock);
 1357		tr->snapshot--;
 1358		spin_unlock(&tr->snapshot_trigger_lock);
 1359	}
 1360
 1361	return ret;
 1362}
 1363
 1364int tracing_arm_snapshot(struct trace_array *tr)
 1365{
 1366	int ret;
 1367
 1368	mutex_lock(&trace_types_lock);
 1369	ret = tracing_arm_snapshot_locked(tr);
 1370	mutex_unlock(&trace_types_lock);
 1371
 1372	return ret;
 1373}
 1374
 1375void tracing_disarm_snapshot(struct trace_array *tr)
 1376{
 1377	spin_lock(&tr->snapshot_trigger_lock);
 1378	if (!WARN_ON(!tr->snapshot))
 1379		tr->snapshot--;
 1380	spin_unlock(&tr->snapshot_trigger_lock);
 1381}
 1382
 1383/**
 1384 * tracing_alloc_snapshot - allocate snapshot buffer.
 1385 *
 1386 * This only allocates the snapshot buffer if it isn't already
 1387 * allocated - it doesn't also take a snapshot.
 1388 *
 1389 * This is meant to be used in cases where the snapshot buffer needs
 1390 * to be set up for events that can't sleep but need to be able to
 1391 * trigger a snapshot.
 1392 */
 1393int tracing_alloc_snapshot(void)
 1394{
 1395	struct trace_array *tr = &global_trace;
 1396	int ret;
 1397
 1398	ret = tracing_alloc_snapshot_instance(tr);
 1399	WARN_ON(ret < 0);
 1400
 1401	return ret;
 1402}
 1403EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 1404
 1405/**
 1406 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
 1407 *
 1408 * This is similar to tracing_snapshot(), but it will allocate the
 1409 * snapshot buffer if it isn't already allocated. Use this only
 1410 * where it is safe to sleep, as the allocation may sleep.
 1411 *
 1412 * This causes a swap between the snapshot buffer and the current live
 1413 * tracing buffer. You can use this to take snapshots of the live
 1414 * trace when some condition is triggered, but continue to trace.
 1415 */
 1416void tracing_snapshot_alloc(void)
 1417{
 1418	int ret;
 1419
 1420	ret = tracing_alloc_snapshot();
 1421	if (ret < 0)
 1422		return;
 1423
 1424	tracing_snapshot();
 1425}
 1426EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 1427
 1428/**
 1429 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
 1430 * @tr:		The tracing instance
 1431 * @cond_data:	User data to associate with the snapshot
 1432 * @update:	Implementation of the cond_snapshot update function
 1433 *
 1434 * Check whether the conditional snapshot for the given instance has
 1435 * already been enabled, or if the current tracer is already using a
 1436 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
 1437 * save the cond_data and update function inside.
 1438 *
 1439 * Returns 0 if successful, error otherwise.
 1440 */
 1441int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
 1442				 cond_update_fn_t update)
 1443{
 1444	struct cond_snapshot *cond_snapshot __free(kfree) =
 1445		kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
 1446	int ret;
 1447
 1448	if (!cond_snapshot)
 1449		return -ENOMEM;
 1450
 1451	cond_snapshot->cond_data = cond_data;
 1452	cond_snapshot->update = update;
 1453
 1454	guard(mutex)(&trace_types_lock);
 1455
 1456	if (tr->current_trace->use_max_tr)
 1457		return -EBUSY;
 1458
 1459	/*
 1460	 * The cond_snapshot can only change to NULL without the
 1461	 * trace_types_lock. We don't care if we race with it going
 1462	 * to NULL, but we want to make sure that it's not set to
 1463	 * something other than NULL when we get here, which we can
 1464	 * do safely with only holding the trace_types_lock and not
 1465	 * having to take the max_lock.
 1466	 */
 1467	if (tr->cond_snapshot)
 1468		return -EBUSY;
 1469
 1470	ret = tracing_arm_snapshot_locked(tr);
 1471	if (ret)
 1472		return ret;
 1473
 1474	local_irq_disable();
 1475	arch_spin_lock(&tr->max_lock);
 1476	tr->cond_snapshot = no_free_ptr(cond_snapshot);
 1477	arch_spin_unlock(&tr->max_lock);
 1478	local_irq_enable();
 1479
 1480	return 0;
 1481}
 1482EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
 1483
 1484/**
 1485 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
 1486 * @tr:		The tracing instance
 1487 *
 1488 * Check whether the conditional snapshot for the given instance is
 1489 * enabled; if so, free the cond_snapshot associated with it,
 1490 * otherwise return -EINVAL.
 1491 *
 1492 * Returns 0 if successful, error otherwise.
 1493 */
 1494int tracing_snapshot_cond_disable(struct trace_array *tr)
 1495{
 1496	int ret = 0;
 1497
 1498	local_irq_disable();
 1499	arch_spin_lock(&tr->max_lock);
 1500
 1501	if (!tr->cond_snapshot)
 1502		ret = -EINVAL;
 1503	else {
 1504		kfree(tr->cond_snapshot);
 1505		tr->cond_snapshot = NULL;
 1506	}
 1507
 1508	arch_spin_unlock(&tr->max_lock);
 1509	local_irq_enable();
 1510
 1511	tracing_disarm_snapshot(tr);
 1512
 1513	return ret;
 1514}
 1515EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 1516#else
 1517void tracing_snapshot(void)
 1518{
 1519	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
 1520}
 1521EXPORT_SYMBOL_GPL(tracing_snapshot);
 1522void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
 1523{
 1524	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
 1525}
 1526EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
 1527int tracing_alloc_snapshot(void)
 1528{
 1529	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
 1530	return -ENODEV;
 1531}
 1532EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 1533void tracing_snapshot_alloc(void)
 1534{
 1535	/* Give warning */
 1536	tracing_snapshot();
 1537}
 1538EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 1539void *tracing_cond_snapshot_data(struct trace_array *tr)
 1540{
 1541	return NULL;
 1542}
 1543EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
 1544int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
 1545{
 1546	return -ENODEV;
 1547}
 1548EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
 1549int tracing_snapshot_cond_disable(struct trace_array *tr)
 1550{
 1551	return false;
 1552}
 1553EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 1554#define free_snapshot(tr)	do { } while (0)
 1555#define tracing_arm_snapshot_locked(tr) ({ -EBUSY; })
 1556#endif /* CONFIG_TRACER_SNAPSHOT */
 1557
 1558void tracer_tracing_off(struct trace_array *tr)
 1559{
 1560	if (tr->array_buffer.buffer)
 1561		ring_buffer_record_off(tr->array_buffer.buffer);
 1562	/*
 1563	 * This flag is looked at when buffers haven't been allocated
 1564	 * yet, or by some tracers (like irqsoff), that just want to
 1565	 * know if the ring buffer has been disabled, but it can handle
 1566	 * races of where it gets disabled but we still do a record.
 1567	 * As the check is in the fast path of the tracers, it is more
 1568	 * important to be fast than accurate.
 1569	 */
 1570	tr->buffer_disabled = 1;
 1571	/* Make the flag seen by readers */
 1572	smp_wmb();
 1573}
 
 1574
 1575/**
 1576 * tracing_off - turn off tracing buffers
 1577 *
 1578 * This function stops the tracing buffers from recording data.
 1579 * It does not disable any overhead the tracers themselves may
 1580 * be causing. This function simply causes all recording to
 1581 * the ring buffers to fail.
 1582 */
 1583void tracing_off(void)
 1584{
 1585	tracer_tracing_off(&global_trace);
 
 
 
 
 
 
 
 
 1586}
 1587EXPORT_SYMBOL_GPL(tracing_off);
 1588
 1589void disable_trace_on_warning(void)
 1590{
 1591	if (__disable_trace_on_warning) {
 1592		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
 1593			"Disabling tracing due to warning\n");
 1594		tracing_off();
 1595	}
 1596}
 1597
 1598/**
 1599 * tracer_tracing_is_on - show real state of ring buffer enabled
 1600 * @tr : the trace array to know if ring buffer is enabled
 1601 *
 1602 * Shows real state of the ring buffer if it is enabled or not.
 1603 */
 1604bool tracer_tracing_is_on(struct trace_array *tr)
 1605{
 1606	if (tr->array_buffer.buffer)
 1607		return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
 1608	return !tr->buffer_disabled;
 1609}
 
 1610
 1611/**
 1612 * tracing_is_on - show state of ring buffers enabled
 
 
 
 
 1613 */
 1614int tracing_is_on(void)
 1615{
 1616	return tracer_tracing_is_on(&global_trace);
 
 
 
 
 1617}
 1618EXPORT_SYMBOL_GPL(tracing_is_on);
 1619
 1620static int __init set_buf_size(char *str)
 1621{
 1622	unsigned long buf_size;
 1623
 1624	if (!str)
 1625		return 0;
 1626	buf_size = memparse(str, &str);
 1627	/*
 1628	 * nr_entries can not be zero and the startup
 1629	 * tests require some buffer space. Therefore
 1630	 * ensure we have at least 4096 bytes of buffer.
 1631	 */
 1632	trace_buf_size = max(4096UL, buf_size);
 1633	return 1;
 1634}
 1635__setup("trace_buf_size=", set_buf_size);
 1636
 1637static int __init set_tracing_thresh(char *str)
 1638{
 1639	unsigned long threshold;
 1640	int ret;
 1641
 1642	if (!str)
 1643		return 0;
 1644	ret = kstrtoul(str, 0, &threshold);
 1645	if (ret < 0)
 1646		return 0;
 1647	tracing_thresh = threshold * 1000;
 1648	return 1;
 1649}
 1650__setup("tracing_thresh=", set_tracing_thresh);
 1651
 1652unsigned long nsecs_to_usecs(unsigned long nsecs)
 1653{
 1654	return nsecs / 1000;
 1655}
 1656
 1657/*
 1658 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
 1659 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
 1660 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
 1661 * of strings in the order that the evals (enum) were defined.
 1662 */
 1663#undef C
 1664#define C(a, b) b
 1665
 1666/* These must match the bit positions in trace_iterator_flags */
 1667static const char *trace_options[] = {
 1668	TRACE_FLAGS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 1669	NULL
 1670};
 1671
 1672static struct {
 1673	u64 (*func)(void);
 1674	const char *name;
 1675	int in_ns;		/* is this clock in nanoseconds? */
 1676} trace_clocks[] = {
 1677	{ trace_clock_local,		"local",	1 },
 1678	{ trace_clock_global,		"global",	1 },
 1679	{ trace_clock_counter,		"counter",	0 },
 1680	{ trace_clock_jiffies,		"uptime",	0 },
 1681	{ trace_clock,			"perf",		1 },
 1682	{ ktime_get_mono_fast_ns,	"mono",		1 },
 1683	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
 1684	{ ktime_get_boot_fast_ns,	"boot",		1 },
 1685	{ ktime_get_tai_fast_ns,	"tai",		1 },
 1686	ARCH_TRACE_CLOCKS
 1687};
 1688
 1689bool trace_clock_in_ns(struct trace_array *tr)
 1690{
 1691	if (trace_clocks[tr->clock_id].in_ns)
 1692		return true;
 1693
 1694	return false;
 1695}
 1696
 1697/*
 1698 * trace_parser_get_init - gets the buffer for trace parser
 1699 */
 1700int trace_parser_get_init(struct trace_parser *parser, int size)
 1701{
 1702	memset(parser, 0, sizeof(*parser));
 1703
 1704	parser->buffer = kmalloc(size, GFP_KERNEL);
 1705	if (!parser->buffer)
 1706		return 1;
 1707
 1708	parser->size = size;
 1709	return 0;
 1710}
 1711
 1712/*
 1713 * trace_parser_put - frees the buffer for trace parser
 1714 */
 1715void trace_parser_put(struct trace_parser *parser)
 1716{
 1717	kfree(parser->buffer);
 1718	parser->buffer = NULL;
 1719}
 1720
 1721/*
 1722 * trace_get_user - reads the user input string separated by  space
 1723 * (matched by isspace(ch))
 1724 *
 1725 * For each string found the 'struct trace_parser' is updated,
 1726 * and the function returns.
 1727 *
 1728 * Returns number of bytes read.
 1729 *
 1730 * See kernel/trace/trace.h for 'struct trace_parser' details.
 1731 */
 1732int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 1733	size_t cnt, loff_t *ppos)
 1734{
 1735	char ch;
 1736	size_t read = 0;
 1737	ssize_t ret;
 1738
 1739	if (!*ppos)
 1740		trace_parser_clear(parser);
 1741
 1742	ret = get_user(ch, ubuf++);
 1743	if (ret)
 1744		goto out;
 1745
 1746	read++;
 1747	cnt--;
 1748
 1749	/*
 1750	 * The parser is not finished with the last write,
 1751	 * continue reading the user input without skipping spaces.
 1752	 */
 1753	if (!parser->cont) {
 1754		/* skip white space */
 1755		while (cnt && isspace(ch)) {
 1756			ret = get_user(ch, ubuf++);
 1757			if (ret)
 1758				goto out;
 1759			read++;
 1760			cnt--;
 1761		}
 1762
 1763		parser->idx = 0;
 1764
 1765		/* only spaces were written */
 1766		if (isspace(ch) || !ch) {
 1767			*ppos += read;
 1768			ret = read;
 1769			goto out;
 1770		}
 
 
 1771	}
 1772
 1773	/* read the non-space input */
 1774	while (cnt && !isspace(ch) && ch) {
 1775		if (parser->idx < parser->size - 1)
 1776			parser->buffer[parser->idx++] = ch;
 1777		else {
 1778			ret = -EINVAL;
 1779			goto out;
 1780		}
 1781		ret = get_user(ch, ubuf++);
 1782		if (ret)
 1783			goto out;
 1784		read++;
 1785		cnt--;
 1786	}
 1787
 1788	/* We either got finished input or we have to wait for another call. */
 1789	if (isspace(ch) || !ch) {
 1790		parser->buffer[parser->idx] = 0;
 1791		parser->cont = false;
 1792	} else if (parser->idx < parser->size - 1) {
 1793		parser->cont = true;
 1794		parser->buffer[parser->idx++] = ch;
 1795		/* Make sure the parsed string always terminates with '\0'. */
 1796		parser->buffer[parser->idx] = 0;
 1797	} else {
 1798		ret = -EINVAL;
 1799		goto out;
 1800	}
 1801
 1802	*ppos += read;
 1803	ret = read;
 1804
 1805out:
 1806	return ret;
 1807}
 1808
 1809/* TODO add a seq_buf_to_buffer() */
 1810static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 1811{
 1812	int len;
 
 1813
 1814	if (trace_seq_used(s) <= s->readpos)
 
 
 
 1815		return -EBUSY;
 1816
 1817	len = trace_seq_used(s) - s->readpos;
 1818	if (cnt > len)
 1819		cnt = len;
 1820	memcpy(buf, s->buffer + s->readpos, cnt);
 
 
 
 
 1821
 1822	s->readpos += cnt;
 1823	return cnt;
 1824}
 1825
 1826unsigned long __read_mostly	tracing_thresh;
 1827
 1828#ifdef CONFIG_TRACER_MAX_TRACE
 1829static const struct file_operations tracing_max_lat_fops;
 1830
 1831#ifdef LATENCY_FS_NOTIFY
 1832
 1833static struct workqueue_struct *fsnotify_wq;
 1834
 1835static void latency_fsnotify_workfn(struct work_struct *work)
 1836{
 1837	struct trace_array *tr = container_of(work, struct trace_array,
 1838					      fsnotify_work);
 1839	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
 1840}
 1841
 1842static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
 1843{
 1844	struct trace_array *tr = container_of(iwork, struct trace_array,
 1845					      fsnotify_irqwork);
 1846	queue_work(fsnotify_wq, &tr->fsnotify_work);
 1847}
 1848
 1849static void trace_create_maxlat_file(struct trace_array *tr,
 1850				     struct dentry *d_tracer)
 1851{
 1852	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
 1853	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
 1854	tr->d_max_latency = trace_create_file("tracing_max_latency",
 1855					      TRACE_MODE_WRITE,
 1856					      d_tracer, tr,
 1857					      &tracing_max_lat_fops);
 1858}
 1859
 1860__init static int latency_fsnotify_init(void)
 1861{
 1862	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
 1863				      WQ_UNBOUND | WQ_HIGHPRI, 0);
 1864	if (!fsnotify_wq) {
 1865		pr_err("Unable to allocate tr_max_lat_wq\n");
 1866		return -ENOMEM;
 1867	}
 1868	return 0;
 1869}
 1870
 1871late_initcall_sync(latency_fsnotify_init);
 
 
 
 1872
 1873void latency_fsnotify(struct trace_array *tr)
 1874{
 1875	if (!fsnotify_wq)
 1876		return;
 1877	/*
 1878	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
 1879	 * possible that we are called from __schedule() or do_idle(), which
 1880	 * could cause a deadlock.
 1881	 */
 1882	irq_work_queue(&tr->fsnotify_irqwork);
 1883}
 1884
 1885#else /* !LATENCY_FS_NOTIFY */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 1886
 1887#define trace_create_maxlat_file(tr, d_tracer)				\
 1888	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
 1889			  d_tracer, tr, &tracing_max_lat_fops)
 1890
 1891#endif
 
 1892
 1893/*
 1894 * Copy the new maximum trace into the separate maximum-trace
 1895 * structure. (this way the maximum trace is permanently saved,
 1896 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
 1897 */
 1898static void
 1899__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 1900{
 1901	struct array_buffer *trace_buf = &tr->array_buffer;
 1902	struct array_buffer *max_buf = &tr->max_buffer;
 1903	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
 1904	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
 1905
 1906	max_buf->cpu = cpu;
 1907	max_buf->time_start = data->preempt_timestamp;
 1908
 1909	max_data->saved_latency = tr->max_latency;
 
 1910	max_data->critical_start = data->critical_start;
 1911	max_data->critical_end = data->critical_end;
 1912
 1913	strscpy(max_data->comm, tsk->comm);
 1914	max_data->pid = tsk->pid;
 1915	/*
 1916	 * If tsk == current, then use current_uid(), as that does not use
 1917	 * RCU. The irq tracer can be called out of RCU scope.
 1918	 */
 1919	if (tsk == current)
 1920		max_data->uid = current_uid();
 1921	else
 1922		max_data->uid = task_uid(tsk);
 1923
 1924	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
 1925	max_data->policy = tsk->policy;
 1926	max_data->rt_priority = tsk->rt_priority;
 1927
 1928	/* record this tasks comm */
 1929	tracing_record_cmdline(tsk);
 1930	latency_fsnotify(tr);
 1931}
 1932
 1933/**
 1934 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 1935 * @tr: tracer
 1936 * @tsk: the task with the latency
 1937 * @cpu: The cpu that initiated the trace.
 1938 * @cond_data: User data associated with a conditional snapshot
 1939 *
 1940 * Flip the buffers between the @tr and the max_tr and record information
 1941 * about which task was the cause of this latency.
 1942 */
 1943void
 1944update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 1945	      void *cond_data)
 1946{
 1947	if (tr->stop_count)
 
 
 1948		return;
 1949
 1950	WARN_ON_ONCE(!irqs_disabled());
 1951
 1952	if (!tr->allocated_snapshot) {
 1953		/* Only the nop tracer should hit this when disabling */
 1954		WARN_ON_ONCE(tr->current_trace != &nop_trace);
 1955		return;
 1956	}
 
 1957
 1958	arch_spin_lock(&tr->max_lock);
 1959
 1960	/* Inherit the recordable setting from array_buffer */
 1961	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
 1962		ring_buffer_record_on(tr->max_buffer.buffer);
 1963	else
 1964		ring_buffer_record_off(tr->max_buffer.buffer);
 1965
 1966#ifdef CONFIG_TRACER_SNAPSHOT
 1967	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
 1968		arch_spin_unlock(&tr->max_lock);
 1969		return;
 1970	}
 1971#endif
 1972	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
 1973
 1974	__update_max_tr(tr, tsk, cpu);
 1975
 1976	arch_spin_unlock(&tr->max_lock);
 1977
 1978	/* Any waiters on the old snapshot buffer need to wake up */
 1979	ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
 1980}
 1981
 1982/**
 1983 * update_max_tr_single - only copy one trace over, and reset the rest
 1984 * @tr: tracer
 1985 * @tsk: task with the latency
 1986 * @cpu: the cpu of the buffer to copy.
 1987 *
 1988 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
 1989 */
 1990void
 1991update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 1992{
 1993	int ret;
 1994
 1995	if (tr->stop_count)
 1996		return;
 1997
 1998	WARN_ON_ONCE(!irqs_disabled());
 1999	if (!tr->allocated_snapshot) {
 2000		/* Only the nop tracer should hit this when disabling */
 2001		WARN_ON_ONCE(tr->current_trace != &nop_trace);
 2002		return;
 2003	}
 2004
 2005	arch_spin_lock(&tr->max_lock);
 2006
 2007	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
 2008
 2009	if (ret == -EBUSY) {
 2010		/*
 2011		 * We failed to swap the buffer due to a commit taking
 2012		 * place on this CPU. We fail to record, but we reset
 2013		 * the max trace buffer (no one writes directly to it)
 2014		 * and flag that it failed.
 2015		 * Another reason is resize is in progress.
 2016		 */
 2017		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
 2018			"Failed to swap buffers due to commit or resize in progress\n");
 2019	}
 2020
 2021	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
 2022
 2023	__update_max_tr(tr, tsk, cpu);
 2024	arch_spin_unlock(&tr->max_lock);
 2025}
 2026
 2027#endif /* CONFIG_TRACER_MAX_TRACE */
 2028
 2029struct pipe_wait {
 2030	struct trace_iterator		*iter;
 2031	int				wait_index;
 2032};
 2033
 2034static bool wait_pipe_cond(void *data)
 2035{
 2036	struct pipe_wait *pwait = data;
 2037	struct trace_iterator *iter = pwait->iter;
 2038
 2039	if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index)
 2040		return true;
 2041
 2042	return iter->closed;
 2043}
 2044
 2045static int wait_on_pipe(struct trace_iterator *iter, int full)
 2046{
 2047	struct pipe_wait pwait;
 2048	int ret;
 2049
 2050	/* Iterators are static, they should be filled or empty */
 2051	if (trace_buffer_iter(iter, iter->cpu_file))
 2052		return 0;
 2053
 2054	pwait.wait_index = atomic_read_acquire(&iter->wait_index);
 2055	pwait.iter = iter;
 2056
 2057	ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full,
 2058			       wait_pipe_cond, &pwait);
 2059
 2060#ifdef CONFIG_TRACER_MAX_TRACE
 2061	/*
 2062	 * Make sure this is still the snapshot buffer, as if a snapshot were
 2063	 * to happen, this would now be the main buffer.
 2064	 */
 2065	if (iter->snapshot)
 2066		iter->array_buffer = &iter->tr->max_buffer;
 2067#endif
 2068	return ret;
 2069}
 2070
 2071#ifdef CONFIG_FTRACE_STARTUP_TEST
 2072static bool selftests_can_run;
 2073
 2074struct trace_selftests {
 2075	struct list_head		list;
 2076	struct tracer			*type;
 2077};
 2078
 2079static LIST_HEAD(postponed_selftests);
 2080
 2081static int save_selftest(struct tracer *type)
 2082{
 2083	struct trace_selftests *selftest;
 2084
 2085	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
 2086	if (!selftest)
 2087		return -ENOMEM;
 2088
 2089	selftest->type = type;
 2090	list_add(&selftest->list, &postponed_selftests);
 2091	return 0;
 2092}
 2093
 2094static int run_tracer_selftest(struct tracer *type)
 2095{
 2096	struct trace_array *tr = &global_trace;
 2097	struct tracer *saved_tracer = tr->current_trace;
 2098	int ret;
 2099
 2100	if (!type->selftest || tracing_selftest_disabled)
 2101		return 0;
 2102
 2103	/*
 2104	 * If a tracer registers early in boot up (before scheduling is
 2105	 * initialized and such), then do not run its selftests yet.
 2106	 * Instead, run it a little later in the boot process.
 2107	 */
 2108	if (!selftests_can_run)
 2109		return save_selftest(type);
 2110
 2111	if (!tracing_is_on()) {
 2112		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
 2113			type->name);
 2114		return 0;
 2115	}
 2116
 2117	/*
 2118	 * Run a selftest on this tracer.
 2119	 * Here we reset the trace buffer, and set the current
 2120	 * tracer to be this tracer. The tracer can then run some
 2121	 * internal tracing to verify that everything is in order.
 2122	 * If we fail, we do not register this tracer.
 2123	 */
 2124	tracing_reset_online_cpus(&tr->array_buffer);
 2125
 2126	tr->current_trace = type;
 2127
 2128#ifdef CONFIG_TRACER_MAX_TRACE
 2129	if (type->use_max_tr) {
 2130		/* If we expanded the buffers, make sure the max is expanded too */
 2131		if (tr->ring_buffer_expanded)
 2132			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
 2133					   RING_BUFFER_ALL_CPUS);
 2134		tr->allocated_snapshot = true;
 2135	}
 2136#endif
 2137
 2138	/* the test is responsible for initializing and enabling */
 2139	pr_info("Testing tracer %s: ", type->name);
 2140	ret = type->selftest(type, tr);
 2141	/* the test is responsible for resetting too */
 2142	tr->current_trace = saved_tracer;
 2143	if (ret) {
 2144		printk(KERN_CONT "FAILED!\n");
 2145		/* Add the warning after printing 'FAILED' */
 2146		WARN_ON(1);
 2147		return -1;
 2148	}
 2149	/* Only reset on passing, to avoid touching corrupted buffers */
 2150	tracing_reset_online_cpus(&tr->array_buffer);
 2151
 2152#ifdef CONFIG_TRACER_MAX_TRACE
 2153	if (type->use_max_tr) {
 2154		tr->allocated_snapshot = false;
 2155
 2156		/* Shrink the max buffer again */
 2157		if (tr->ring_buffer_expanded)
 2158			ring_buffer_resize(tr->max_buffer.buffer, 1,
 2159					   RING_BUFFER_ALL_CPUS);
 2160	}
 2161#endif
 2162
 2163	printk(KERN_CONT "PASSED\n");
 2164	return 0;
 2165}
 2166
 2167static int do_run_tracer_selftest(struct tracer *type)
 2168{
 2169	int ret;
 2170
 2171	/*
 2172	 * Tests can take a long time, especially if they are run one after the
 2173	 * other, as does happen during bootup when all the tracers are
 2174	 * registered. This could cause the soft lockup watchdog to trigger.
 2175	 */
 2176	cond_resched();
 2177
 2178	tracing_selftest_running = true;
 2179	ret = run_tracer_selftest(type);
 2180	tracing_selftest_running = false;
 2181
 2182	return ret;
 2183}
 2184
 2185static __init int init_trace_selftests(void)
 2186{
 2187	struct trace_selftests *p, *n;
 2188	struct tracer *t, **last;
 2189	int ret;
 2190
 2191	selftests_can_run = true;
 2192
 2193	guard(mutex)(&trace_types_lock);
 2194
 2195	if (list_empty(&postponed_selftests))
 2196		return 0;
 2197
 2198	pr_info("Running postponed tracer tests:\n");
 2199
 2200	tracing_selftest_running = true;
 2201	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
 2202		/* This loop can take minutes when sanitizers are enabled, so
 2203		 * lets make sure we allow RCU processing.
 2204		 */
 2205		cond_resched();
 2206		ret = run_tracer_selftest(p->type);
 2207		/* If the test fails, then warn and remove from available_tracers */
 2208		if (ret < 0) {
 2209			WARN(1, "tracer: %s failed selftest, disabling\n",
 2210			     p->type->name);
 2211			last = &trace_types;
 2212			for (t = trace_types; t; t = t->next) {
 2213				if (t == p->type) {
 2214					*last = t->next;
 2215					break;
 2216				}
 2217				last = &t->next;
 2218			}
 2219		}
 2220		list_del(&p->list);
 2221		kfree(p);
 2222	}
 2223	tracing_selftest_running = false;
 2224
 2225	return 0;
 2226}
 2227core_initcall(init_trace_selftests);
 2228#else
 2229static inline int do_run_tracer_selftest(struct tracer *type)
 2230{
 2231	return 0;
 2232}
 2233#endif /* CONFIG_FTRACE_STARTUP_TEST */
 2234
 2235static void add_tracer_options(struct trace_array *tr, struct tracer *t);
 2236
 2237static void __init apply_trace_boot_options(void);
 2238
 2239/**
 2240 * register_tracer - register a tracer with the ftrace system.
 2241 * @type: the plugin for the tracer
 2242 *
 2243 * Register a new plugin tracer.
 2244 */
 2245int __init register_tracer(struct tracer *type)
 2246{
 2247	struct tracer *t;
 2248	int ret = 0;
 2249
 2250	if (!type->name) {
 2251		pr_info("Tracer must have a name\n");
 2252		return -1;
 2253	}
 2254
 2255	if (strlen(type->name) >= MAX_TRACER_SIZE) {
 2256		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
 2257		return -1;
 2258	}
 2259
 2260	if (security_locked_down(LOCKDOWN_TRACEFS)) {
 2261		pr_warn("Can not register tracer %s due to lockdown\n",
 2262			   type->name);
 2263		return -EPERM;
 2264	}
 2265
 2266	mutex_lock(&trace_types_lock);
 2267
 
 
 2268	for (t = trace_types; t; t = t->next) {
 2269		if (strcmp(type->name, t->name) == 0) {
 2270			/* already found */
 2271			pr_info("Tracer %s already registered\n",
 2272				type->name);
 2273			ret = -1;
 2274			goto out;
 2275		}
 2276	}
 2277
 2278	if (!type->set_flag)
 2279		type->set_flag = &dummy_set_flag;
 2280	if (!type->flags) {
 2281		/*allocate a dummy tracer_flags*/
 2282		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
 2283		if (!type->flags) {
 2284			ret = -ENOMEM;
 2285			goto out;
 2286		}
 2287		type->flags->val = 0;
 2288		type->flags->opts = dummy_tracer_opt;
 2289	} else
 2290		if (!type->flags->opts)
 2291			type->flags->opts = dummy_tracer_opt;
 
 
 2292
 2293	/* store the tracer for __set_tracer_option */
 2294	type->flags->trace = type;
 2295
 2296	ret = do_run_tracer_selftest(type);
 2297	if (ret < 0)
 2298		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 2299
 2300	type->next = trace_types;
 2301	trace_types = type;
 2302	add_tracer_options(&global_trace, type);
 2303
 2304 out:
 
 2305	mutex_unlock(&trace_types_lock);
 2306
 2307	if (ret || !default_bootup_tracer)
 2308		goto out_unlock;
 2309
 2310	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
 2311		goto out_unlock;
 2312
 2313	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
 2314	/* Do we want this tracer to start on bootup? */
 2315	tracing_set_tracer(&global_trace, type->name);
 2316	default_bootup_tracer = NULL;
 2317
 2318	apply_trace_boot_options();
 2319
 2320	/* disable other selftests, since this will break it. */
 2321	disable_tracing_selftest("running a tracer");
 
 
 
 
 2322
 2323 out_unlock:
 2324	return ret;
 2325}
 2326
 2327static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
 2328{
 2329	struct trace_buffer *buffer = buf->buffer;
 2330
 2331	if (!buffer)
 2332		return;
 2333
 2334	ring_buffer_record_disable(buffer);
 
 
 
 
 
 
 2335
 2336	/* Make sure all commits have finished */
 2337	synchronize_rcu();
 2338	ring_buffer_reset_cpu(buffer, cpu);
 2339
 2340	ring_buffer_record_enable(buffer);
 
 
 
 
 
 
 
 
 2341}
 2342
 2343void tracing_reset_online_cpus(struct array_buffer *buf)
 2344{
 2345	struct trace_buffer *buffer = buf->buffer;
 2346
 2347	if (!buffer)
 2348		return;
 2349
 2350	ring_buffer_record_disable(buffer);
 2351
 2352	/* Make sure all commits have finished */
 2353	synchronize_rcu();
 2354
 2355	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
 2356
 2357	ring_buffer_reset_online_cpus(buffer);
 2358
 2359	ring_buffer_record_enable(buffer);
 2360}
 2361
 2362static void tracing_reset_all_cpus(struct array_buffer *buf)
 2363{
 2364	struct trace_buffer *buffer = buf->buffer;
 2365
 2366	if (!buffer)
 2367		return;
 2368
 2369	ring_buffer_record_disable(buffer);
 2370
 2371	/* Make sure all commits have finished */
 2372	synchronize_rcu();
 2373
 2374	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
 2375
 2376	ring_buffer_reset(buffer);
 
 2377
 2378	ring_buffer_record_enable(buffer);
 2379}
 2380
 2381/* Must have trace_types_lock held */
 2382void tracing_reset_all_online_cpus_unlocked(void)
 2383{
 2384	struct trace_array *tr;
 2385
 2386	lockdep_assert_held(&trace_types_lock);
 2387
 2388	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 2389		if (!tr->clear_trace)
 2390			continue;
 2391		tr->clear_trace = false;
 2392		tracing_reset_online_cpus(&tr->array_buffer);
 2393#ifdef CONFIG_TRACER_MAX_TRACE
 2394		tracing_reset_online_cpus(&tr->max_buffer);
 2395#endif
 2396	}
 2397}
 2398
 2399void tracing_reset_all_online_cpus(void)
 
 
 
 
 
 
 
 
 
 
 
 2400{
 2401	mutex_lock(&trace_types_lock);
 2402	tracing_reset_all_online_cpus_unlocked();
 2403	mutex_unlock(&trace_types_lock);
 2404}
 2405
 2406int is_tracing_stopped(void)
 2407{
 2408	return global_trace.stop_count;
 2409}
 2410
 2411static void tracing_start_tr(struct trace_array *tr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 2412{
 2413	struct trace_buffer *buffer;
 2414	unsigned long flags;
 2415
 2416	if (tracing_disabled)
 2417		return;
 2418
 2419	raw_spin_lock_irqsave(&tr->start_lock, flags);
 2420	if (--tr->stop_count) {
 2421		if (WARN_ON_ONCE(tr->stop_count < 0)) {
 2422			/* Someone screwed up their debugging */
 2423			tr->stop_count = 0;
 
 2424		}
 2425		goto out;
 2426	}
 2427
 2428	/* Prevent the buffers from switching */
 2429	arch_spin_lock(&tr->max_lock);
 2430
 2431	buffer = tr->array_buffer.buffer;
 2432	if (buffer)
 2433		ring_buffer_record_enable(buffer);
 2434
 2435#ifdef CONFIG_TRACER_MAX_TRACE
 2436	buffer = tr->max_buffer.buffer;
 2437	if (buffer)
 2438		ring_buffer_record_enable(buffer);
 2439#endif
 2440
 2441	arch_spin_unlock(&tr->max_lock);
 2442
 
 2443 out:
 2444	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 2445}
 2446
 2447/**
 2448 * tracing_start - quick start of the tracer
 2449 *
 2450 * If tracing is enabled but was stopped by tracing_stop,
 2451 * this will start the tracer back up.
 2452 */
 2453void tracing_start(void)
 2454
 2455{
 2456	return tracing_start_tr(&global_trace);
 2457}
 2458
 2459static void tracing_stop_tr(struct trace_array *tr)
 2460{
 2461	struct trace_buffer *buffer;
 2462	unsigned long flags;
 2463
 2464	raw_spin_lock_irqsave(&tr->start_lock, flags);
 2465	if (tr->stop_count++)
 
 2466		goto out;
 2467
 2468	/* Prevent the buffers from switching */
 2469	arch_spin_lock(&tr->max_lock);
 2470
 2471	buffer = tr->array_buffer.buffer;
 2472	if (buffer)
 2473		ring_buffer_record_disable(buffer);
 2474
 2475#ifdef CONFIG_TRACER_MAX_TRACE
 2476	buffer = tr->max_buffer.buffer;
 2477	if (buffer)
 2478		ring_buffer_record_disable(buffer);
 2479#endif
 2480
 2481	arch_spin_unlock(&tr->max_lock);
 2482
 2483 out:
 2484	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 2485}
 2486
 2487/**
 2488 * tracing_stop - quick stop of the tracer
 2489 *
 2490 * Light weight way to stop tracing. Use in conjunction with
 2491 * tracing_start.
 2492 */
 2493void tracing_stop(void)
 2494{
 2495	return tracing_stop_tr(&global_trace);
 2496}
 2497
 2498/*
 2499 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
 2500 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
 2501 * simplifies those functions and keeps them in sync.
 2502 */
 2503enum print_line_t trace_handle_return(struct trace_seq *s)
 2504{
 2505	return trace_seq_has_overflowed(s) ?
 2506		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
 2507}
 2508EXPORT_SYMBOL_GPL(trace_handle_return);
 2509
 2510static unsigned short migration_disable_value(void)
 2511{
 2512#if defined(CONFIG_SMP)
 2513	return current->migration_disabled;
 2514#else
 2515	return 0;
 2516#endif
 2517}
 2518
 2519unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
 2520{
 2521	unsigned int trace_flags = irqs_status;
 2522	unsigned int pc;
 2523
 2524	pc = preempt_count();
 2525
 2526	if (pc & NMI_MASK)
 2527		trace_flags |= TRACE_FLAG_NMI;
 2528	if (pc & HARDIRQ_MASK)
 2529		trace_flags |= TRACE_FLAG_HARDIRQ;
 2530	if (in_serving_softirq())
 2531		trace_flags |= TRACE_FLAG_SOFTIRQ;
 2532	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
 2533		trace_flags |= TRACE_FLAG_BH_OFF;
 2534
 2535	if (tif_need_resched())
 2536		trace_flags |= TRACE_FLAG_NEED_RESCHED;
 2537	if (test_preempt_need_resched())
 2538		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
 2539	if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY))
 2540		trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
 2541	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
 2542		(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
 2543}
 2544
 2545struct ring_buffer_event *
 2546trace_buffer_lock_reserve(struct trace_buffer *buffer,
 2547			  int type,
 2548			  unsigned long len,
 2549			  unsigned int trace_ctx)
 2550{
 2551	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
 2552}
 2553
 2554DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
 2555DEFINE_PER_CPU(int, trace_buffered_event_cnt);
 2556static int trace_buffered_event_ref;
 2557
 2558/**
 2559 * trace_buffered_event_enable - enable buffering events
 2560 *
 2561 * When events are being filtered, it is quicker to use a temporary
 2562 * buffer to write the event data into if there's a likely chance
 2563 * that it will not be committed. The discard of the ring buffer
 2564 * is not as fast as committing, and is much slower than copying
 2565 * a commit.
 2566 *
 2567 * When an event is to be filtered, allocate per cpu buffers to
 2568 * write the event data into, and if the event is filtered and discarded
 2569 * it is simply dropped, otherwise, the entire data is to be committed
 2570 * in one shot.
 2571 */
 2572void trace_buffered_event_enable(void)
 2573{
 2574	struct ring_buffer_event *event;
 2575	struct page *page;
 2576	int cpu;
 2577
 2578	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
 
 2579
 2580	if (trace_buffered_event_ref++)
 
 
 
 
 
 
 2581		return;
 2582
 2583	for_each_tracing_cpu(cpu) {
 2584		page = alloc_pages_node(cpu_to_node(cpu),
 2585					GFP_KERNEL | __GFP_NORETRY, 0);
 2586		/* This is just an optimization and can handle failures */
 2587		if (!page) {
 2588			pr_err("Failed to allocate event buffer\n");
 2589			break;
 2590		}
 2591
 2592		event = page_address(page);
 2593		memset(event, 0, sizeof(*event));
 
 
 
 
 
 
 
 2594
 2595		per_cpu(trace_buffered_event, cpu) = event;
 
 2596
 2597		preempt_disable();
 2598		if (cpu == smp_processor_id() &&
 2599		    __this_cpu_read(trace_buffered_event) !=
 2600		    per_cpu(trace_buffered_event, cpu))
 2601			WARN_ON_ONCE(1);
 2602		preempt_enable();
 2603	}
 2604}
 2605
 2606static void enable_trace_buffered_event(void *data)
 2607{
 2608	/* Probably not needed, but do it anyway */
 2609	smp_rmb();
 2610	this_cpu_dec(trace_buffered_event_cnt);
 2611}
 2612
 2613static void disable_trace_buffered_event(void *data)
 2614{
 2615	this_cpu_inc(trace_buffered_event_cnt);
 2616}
 2617
 2618/**
 2619 * trace_buffered_event_disable - disable buffering events
 2620 *
 2621 * When a filter is removed, it is faster to not use the buffered
 2622 * events, and to commit directly into the ring buffer. Free up
 2623 * the temp buffers when there are no more users. This requires
 2624 * special synchronization with current events.
 2625 */
 2626void trace_buffered_event_disable(void)
 2627{
 2628	int cpu;
 2629
 2630	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
 2631
 2632	if (WARN_ON_ONCE(!trace_buffered_event_ref))
 2633		return;
 
 2634
 2635	if (--trace_buffered_event_ref)
 
 2636		return;
 2637
 2638	/* For each CPU, set the buffer as used. */
 2639	on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
 2640			 NULL, true);
 2641
 2642	/* Wait for all current users to finish */
 2643	synchronize_rcu();
 2644
 2645	for_each_tracing_cpu(cpu) {
 2646		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
 2647		per_cpu(trace_buffered_event, cpu) = NULL;
 2648	}
 2649
 2650	/*
 2651	 * Wait for all CPUs that potentially started checking if they can use
 2652	 * their event buffer only after the previous synchronize_rcu() call and
 2653	 * they still read a valid pointer from trace_buffered_event. It must be
 2654	 * ensured they don't see cleared trace_buffered_event_cnt else they
 2655	 * could wrongly decide to use the pointed-to buffer which is now freed.
 2656	 */
 2657	synchronize_rcu();
 2658
 2659	/* For each CPU, relinquish the buffer */
 2660	on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
 2661			 true);
 2662}
 2663
 2664static struct trace_buffer *temp_buffer;
 2665
 2666struct ring_buffer_event *
 2667trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
 2668			  struct trace_event_file *trace_file,
 2669			  int type, unsigned long len,
 2670			  unsigned int trace_ctx)
 2671{
 2672	struct ring_buffer_event *entry;
 2673	struct trace_array *tr = trace_file->tr;
 2674	int val;
 2675
 2676	*current_rb = tr->array_buffer.buffer;
 2677
 2678	if (!tr->no_filter_buffering_ref &&
 2679	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
 2680		preempt_disable_notrace();
 2681		/*
 2682		 * Filtering is on, so try to use the per cpu buffer first.
 2683		 * This buffer will simulate a ring_buffer_event,
 2684		 * where the type_len is zero and the array[0] will
 2685		 * hold the full length.
 2686		 * (see include/linux/ring-buffer.h for details on
 2687		 *  how the ring_buffer_event is structured).
 2688		 *
 2689		 * Using a temp buffer during filtering and copying it
 2690		 * on a matched filter is quicker than writing directly
 2691		 * into the ring buffer and then discarding it when
 2692		 * it doesn't match. That is because the discard
 2693		 * requires several atomic operations to get right.
 2694		 * Copying on match and doing nothing on a failed match
 2695		 * is still quicker than no copy on match, but having
 2696		 * to discard out of the ring buffer on a failed match.
 2697		 */
 2698		if ((entry = __this_cpu_read(trace_buffered_event))) {
 2699			int max_len = PAGE_SIZE - struct_size(entry, array, 1);
 2700
 2701			val = this_cpu_inc_return(trace_buffered_event_cnt);
 2702
 2703			/*
 2704			 * Preemption is disabled, but interrupts and NMIs
 2705			 * can still come in now. If that happens after
 2706			 * the above increment, then it will have to go
 2707			 * back to the old method of allocating the event
 2708			 * on the ring buffer, and if the filter fails, it
 2709			 * will have to call ring_buffer_discard_commit()
 2710			 * to remove it.
 2711			 *
 2712			 * Need to also check the unlikely case that the
 2713			 * length is bigger than the temp buffer size.
 2714			 * If that happens, then the reserve is pretty much
 2715			 * guaranteed to fail, as the ring buffer currently
 2716			 * only allows events less than a page. But that may
 2717			 * change in the future, so let the ring buffer reserve
 2718			 * handle the failure in that case.
 2719			 */
 2720			if (val == 1 && likely(len <= max_len)) {
 2721				trace_event_setup(entry, type, trace_ctx);
 2722				entry->array[0] = len;
 2723				/* Return with preemption disabled */
 2724				return entry;
 2725			}
 2726			this_cpu_dec(trace_buffered_event_cnt);
 2727		}
 2728		/* __trace_buffer_lock_reserve() disables preemption */
 2729		preempt_enable_notrace();
 2730	}
 2731
 2732	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
 2733					    trace_ctx);
 2734	/*
 2735	 * If tracing is off, but we have triggers enabled
 2736	 * we still need to look at the event data. Use the temp_buffer
 2737	 * to store the trace event for the trigger to use. It's recursive
 2738	 * safe and will not be recorded anywhere.
 2739	 */
 2740	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
 2741		*current_rb = temp_buffer;
 2742		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
 2743						    trace_ctx);
 2744	}
 2745	return entry;
 2746}
 2747EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
 2748
 2749static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
 2750static DEFINE_MUTEX(tracepoint_printk_mutex);
 
 2751
 2752static void output_printk(struct trace_event_buffer *fbuffer)
 2753{
 2754	struct trace_event_call *event_call;
 2755	struct trace_event_file *file;
 2756	struct trace_event *event;
 2757	unsigned long flags;
 2758	struct trace_iterator *iter = tracepoint_print_iter;
 2759
 2760	/* We should never get here if iter is NULL */
 2761	if (WARN_ON_ONCE(!iter))
 2762		return;
 2763
 2764	event_call = fbuffer->trace_file->event_call;
 2765	if (!event_call || !event_call->event.funcs ||
 2766	    !event_call->event.funcs->trace)
 2767		return;
 2768
 2769	file = fbuffer->trace_file;
 2770	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
 2771	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
 2772	     !filter_match_preds(file->filter, fbuffer->entry)))
 2773		return;
 2774
 2775	event = &fbuffer->trace_file->event_call->event;
 2776
 2777	raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
 2778	trace_seq_init(&iter->seq);
 2779	iter->ent = fbuffer->entry;
 2780	event_call->event.funcs->trace(iter, 0, event);
 2781	trace_seq_putc(&iter->seq, 0);
 2782	printk("%s", iter->seq.buffer);
 2783
 2784	raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
 2785}
 2786
 2787int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
 2788			     void *buffer, size_t *lenp,
 2789			     loff_t *ppos)
 2790{
 2791	int save_tracepoint_printk;
 2792	int ret;
 2793
 2794	guard(mutex)(&tracepoint_printk_mutex);
 2795	save_tracepoint_printk = tracepoint_printk;
 2796
 2797	ret = proc_dointvec(table, write, buffer, lenp, ppos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 2798
 2799	/*
 2800	 * This will force exiting early, as tracepoint_printk
 2801	 * is always zero when tracepoint_printk_iter is not allocated
 2802	 */
 2803	if (!tracepoint_print_iter)
 2804		tracepoint_printk = 0;
 
 2805
 2806	if (save_tracepoint_printk == tracepoint_printk)
 2807		return ret;
 
 2808
 2809	if (tracepoint_printk)
 2810		static_key_enable(&tracepoint_printk_key.key);
 2811	else
 2812		static_key_disable(&tracepoint_printk_key.key);
 2813
 2814	return ret;
 2815}
 2816
 2817void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 
 
 
 
 2818{
 2819	enum event_trigger_type tt = ETT_NONE;
 2820	struct trace_event_file *file = fbuffer->trace_file;
 2821
 2822	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
 2823			fbuffer->entry, &tt))
 2824		goto discard;
 2825
 2826	if (static_key_false(&tracepoint_printk_key.key))
 2827		output_printk(fbuffer);
 2828
 2829	if (static_branch_unlikely(&trace_event_exports_enabled))
 2830		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
 2831
 2832	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
 2833			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
 
 
 
 
 2834
 2835discard:
 2836	if (tt)
 2837		event_triggers_post_call(file, tt);
 
 
 
 
 
 
 
 2838
 
 
 
 
 
 2839}
 2840EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 2841
 2842/*
 2843 * Skip 3:
 2844 *
 2845 *   trace_buffer_unlock_commit_regs()
 2846 *   trace_event_buffer_commit()
 2847 *   trace_event_raw_event_xxx()
 2848 */
 2849# define STACK_SKIP 3
 2850
 2851void trace_buffer_unlock_commit_regs(struct trace_array *tr,
 2852				     struct trace_buffer *buffer,
 2853				     struct ring_buffer_event *event,
 2854				     unsigned int trace_ctx,
 2855				     struct pt_regs *regs)
 2856{
 2857	__buffer_unlock_commit(buffer, event);
 2858
 2859	/*
 2860	 * If regs is not set, then skip the necessary functions.
 2861	 * Note, we can still get here via blktrace, wakeup tracer
 2862	 * and mmiotrace, but that's ok if they lose a function or
 2863	 * two. They are not that meaningful.
 2864	 */
 2865	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
 2866	ftrace_trace_userstack(tr, buffer, trace_ctx);
 2867}
 
 2868
 2869/*
 2870 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
 2871 */
 2872void
 2873trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
 2874				   struct ring_buffer_event *event)
 2875{
 2876	__buffer_unlock_commit(buffer, event);
 2877}
 
 2878
 2879void
 2880trace_function(struct trace_array *tr, unsigned long ip, unsigned long
 2881	       parent_ip, unsigned int trace_ctx)
 
 2882{
 2883	struct trace_buffer *buffer = tr->array_buffer.buffer;
 
 2884	struct ring_buffer_event *event;
 2885	struct ftrace_entry *entry;
 2886
 2887	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
 2888					    trace_ctx);
 
 
 
 
 2889	if (!event)
 2890		return;
 2891	entry	= ring_buffer_event_data(event);
 2892	entry->ip			= ip;
 2893	entry->parent_ip		= parent_ip;
 2894
 2895	if (static_branch_unlikely(&trace_function_exports_enabled))
 2896		ftrace_exports(event, TRACE_EXPORT_FUNCTION);
 2897	__buffer_unlock_commit(buffer, event);
 2898}
 2899
 2900#ifdef CONFIG_STACKTRACE
 2901
 2902/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
 2903#define FTRACE_KSTACK_NESTING	4
 
 
 
 
 2904
 2905#define FTRACE_KSTACK_ENTRIES	(SZ_4K / FTRACE_KSTACK_NESTING)
 2906
 
 2907struct ftrace_stack {
 2908	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
 2909};
 2910
 2911
 2912struct ftrace_stacks {
 2913	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
 2914};
 2915
 2916static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
 2917static DEFINE_PER_CPU(int, ftrace_stack_reserve);
 2918
 2919static void __ftrace_trace_stack(struct trace_array *tr,
 2920				 struct trace_buffer *buffer,
 2921				 unsigned int trace_ctx,
 2922				 int skip, struct pt_regs *regs)
 2923{
 
 2924	struct ring_buffer_event *event;
 2925	unsigned int size, nr_entries;
 2926	struct ftrace_stack *fstack;
 2927	struct stack_entry *entry;
 2928	int stackidx;
 
 
 
 
 
 2929
 2930	/*
 2931	 * Add one, for this function and the call to save_stack_trace()
 2932	 * If regs is set, then these functions will not be in the way.
 
 
 2933	 */
 2934#ifndef CONFIG_UNWINDER_ORC
 2935	if (!regs)
 2936		skip++;
 2937#endif
 2938
 2939	preempt_disable_notrace();
 2940
 2941	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
 2942
 2943	/* This should never happen. If it does, yell once and skip */
 2944	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
 2945		goto out;
 2946
 2947	/*
 2948	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
 2949	 * interrupt will either see the value pre increment or post
 2950	 * increment. If the interrupt happens pre increment it will have
 2951	 * restored the counter when it returns.  We just need a barrier to
 2952	 * keep gcc from moving things around.
 2953	 */
 2954	barrier();
 
 
 
 2955
 2956	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
 2957	size = ARRAY_SIZE(fstack->calls);
 
 
 2958
 2959	if (regs) {
 2960		nr_entries = stack_trace_save_regs(regs, fstack->calls,
 2961						   size, skip);
 2962	} else {
 2963		nr_entries = stack_trace_save(fstack->calls, size, skip);
 2964	}
 2965
 2966#ifdef CONFIG_DYNAMIC_FTRACE
 2967	/* Mark entry of stack trace as trampoline code */
 2968	if (tr->ops && tr->ops->trampoline) {
 2969		unsigned long tramp_start = tr->ops->trampoline;
 2970		unsigned long tramp_end = tramp_start + tr->ops->trampoline_size;
 2971		unsigned long *calls = fstack->calls;
 2972
 2973		for (int i = 0; i < nr_entries; i++) {
 2974			if (calls[i] >= tramp_start && calls[i] < tramp_end)
 2975				calls[i] = FTRACE_TRAMPOLINE_MARKER;
 2976		}
 2977	}
 2978#endif
 2979
 2980	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
 2981				    struct_size(entry, caller, nr_entries),
 2982				    trace_ctx);
 2983	if (!event)
 2984		goto out;
 2985	entry = ring_buffer_event_data(event);
 2986
 2987	entry->size = nr_entries;
 2988	memcpy(&entry->caller, fstack->calls,
 2989	       flex_array_size(entry, caller, nr_entries));
 2990
 2991	__buffer_unlock_commit(buffer, event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 2992
 2993 out:
 2994	/* Again, don't let gcc optimize things here */
 2995	barrier();
 2996	__this_cpu_dec(ftrace_stack_reserve);
 2997	preempt_enable_notrace();
 2998
 2999}
 3000
 3001static inline void ftrace_trace_stack(struct trace_array *tr,
 3002				      struct trace_buffer *buffer,
 3003				      unsigned int trace_ctx,
 3004				      int skip, struct pt_regs *regs)
 3005{
 3006	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
 3007		return;
 3008
 3009	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs);
 3010}
 3011
 3012void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
 3013		   int skip)
 3014{
 3015	struct trace_buffer *buffer = tr->array_buffer.buffer;
 3016
 3017	if (rcu_is_watching()) {
 3018		__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
 3019		return;
 3020	}
 3021
 3022	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
 3023		return;
 3024
 3025	/*
 3026	 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
 3027	 * but if the above rcu_is_watching() failed, then the NMI
 3028	 * triggered someplace critical, and ct_irq_enter() should
 3029	 * not be called from NMI.
 3030	 */
 3031	if (unlikely(in_nmi()))
 3032		return;
 3033
 3034	ct_irq_enter_irqson();
 3035	__ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL);
 3036	ct_irq_exit_irqson();
 
 3037}
 3038
 3039/**
 3040 * trace_dump_stack - record a stack back trace in the trace buffer
 3041 * @skip: Number of functions to skip (helper handlers)
 3042 */
 3043void trace_dump_stack(int skip)
 3044{
 
 
 3045	if (tracing_disabled || tracing_selftest_running)
 3046		return;
 3047
 3048#ifndef CONFIG_UNWINDER_ORC
 3049	/* Skip 1 to skip this function. */
 3050	skip++;
 3051#endif
 3052	__ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer,
 3053				tracing_gen_ctx(), skip, NULL);
 3054}
 3055EXPORT_SYMBOL_GPL(trace_dump_stack);
 3056
 3057#ifdef CONFIG_USER_STACKTRACE_SUPPORT
 3058static DEFINE_PER_CPU(int, user_stack_count);
 3059
 3060static void
 3061ftrace_trace_userstack(struct trace_array *tr,
 3062		       struct trace_buffer *buffer, unsigned int trace_ctx)
 3063{
 
 3064	struct ring_buffer_event *event;
 3065	struct userstack_entry *entry;
 
 3066
 3067	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
 3068		return;
 3069
 3070	/*
 3071	 * NMIs can not handle page faults, even with fix ups.
 3072	 * The save user stack can (and often does) fault.
 3073	 */
 3074	if (unlikely(in_nmi()))
 3075		return;
 3076
 3077	/*
 3078	 * prevent recursion, since the user stack tracing may
 3079	 * trigger other kernel events.
 3080	 */
 3081	preempt_disable();
 3082	if (__this_cpu_read(user_stack_count))
 3083		goto out;
 3084
 3085	__this_cpu_inc(user_stack_count);
 3086
 3087	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
 3088					    sizeof(*entry), trace_ctx);
 3089	if (!event)
 3090		goto out_drop_count;
 3091	entry	= ring_buffer_event_data(event);
 3092
 3093	entry->tgid		= current->tgid;
 3094	memset(&entry->caller, 0, sizeof(entry->caller));
 3095
 3096	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
 3097	__buffer_unlock_commit(buffer, event);
 
 
 
 
 
 
 3098
 3099 out_drop_count:
 3100	__this_cpu_dec(user_stack_count);
 3101 out:
 3102	preempt_enable();
 3103}
 3104#else /* CONFIG_USER_STACKTRACE_SUPPORT */
 3105static void ftrace_trace_userstack(struct trace_array *tr,
 3106				   struct trace_buffer *buffer,
 3107				   unsigned int trace_ctx)
 3108{
 3109}
 3110#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
 3111
 3112#endif /* CONFIG_STACKTRACE */
 3113
 3114static inline void
 3115func_repeats_set_delta_ts(struct func_repeats_entry *entry,
 3116			  unsigned long long delta)
 3117{
 3118	entry->bottom_delta_ts = delta & U32_MAX;
 3119	entry->top_delta_ts = (delta >> 32);
 3120}
 
 3121
 3122void trace_last_func_repeats(struct trace_array *tr,
 3123			     struct trace_func_repeats *last_info,
 3124			     unsigned int trace_ctx)
 3125{
 3126	struct trace_buffer *buffer = tr->array_buffer.buffer;
 3127	struct func_repeats_entry *entry;
 3128	struct ring_buffer_event *event;
 3129	u64 delta;
 3130
 3131	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
 3132					    sizeof(*entry), trace_ctx);
 3133	if (!event)
 3134		return;
 3135
 3136	delta = ring_buffer_event_time_stamp(buffer, event) -
 3137		last_info->ts_last_call;
 3138
 3139	entry = ring_buffer_event_data(event);
 3140	entry->ip = last_info->ip;
 3141	entry->parent_ip = last_info->parent_ip;
 3142	entry->count = last_info->count;
 3143	func_repeats_set_delta_ts(entry, delta);
 3144
 3145	__buffer_unlock_commit(buffer, event);
 3146}
 3147
 3148/* created for use with alloc_percpu */
 3149struct trace_buffer_struct {
 3150	int nesting;
 3151	char buffer[4][TRACE_BUF_SIZE];
 3152};
 3153
 3154static struct trace_buffer_struct __percpu *trace_percpu_buffer;
 
 
 
 3155
 3156/*
 3157 * This allows for lockless recording.  If we're nested too deeply, then
 3158 * this returns NULL.
 
 
 
 3159 */
 3160static char *get_trace_buf(void)
 3161{
 3162	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
 
 3163
 3164	if (!trace_percpu_buffer || buffer->nesting >= 4)
 3165		return NULL;
 
 
 
 
 
 
 
 
 
 
 3166
 3167	buffer->nesting++;
 
 3168
 3169	/* Interrupts must see nesting incremented before we use the buffer */
 3170	barrier();
 3171	return &buffer->buffer[buffer->nesting - 1][0];
 3172}
 3173
 3174static void put_trace_buf(void)
 3175{
 3176	/* Don't let the decrement of nesting leak before this */
 3177	barrier();
 3178	this_cpu_dec(trace_percpu_buffer->nesting);
 3179}
 3180
 3181static int alloc_percpu_trace_buffer(void)
 3182{
 3183	struct trace_buffer_struct __percpu *buffers;
 3184
 3185	if (trace_percpu_buffer)
 3186		return 0;
 3187
 3188	buffers = alloc_percpu(struct trace_buffer_struct);
 3189	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
 3190		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 3191
 3192	trace_percpu_buffer = buffers;
 
 
 
 
 3193	return 0;
 3194}
 3195
 3196static int buffers_allocated;
 
 
 
 
 
 
 
 
 
 3197
 3198void trace_printk_init_buffers(void)
 3199{
 
 
 3200	if (buffers_allocated)
 3201		return;
 3202
 3203	if (alloc_percpu_trace_buffer())
 3204		return;
 3205
 3206	/* trace_printk() is for debug use only. Don't use it in production. */
 3207
 3208	pr_warn("\n");
 3209	pr_warn("**********************************************************\n");
 3210	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
 3211	pr_warn("**                                                      **\n");
 3212	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
 3213	pr_warn("**                                                      **\n");
 3214	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
 3215	pr_warn("** unsafe for production use.                           **\n");
 3216	pr_warn("**                                                      **\n");
 3217	pr_warn("** If you see this message and you are not debugging    **\n");
 3218	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
 3219	pr_warn("**                                                      **\n");
 3220	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
 3221	pr_warn("**********************************************************\n");
 3222
 3223	/* Expand the buffers to set size */
 3224	tracing_update_buffers(&global_trace);
 3225
 3226	buffers_allocated = 1;
 3227
 3228	/*
 3229	 * trace_printk_init_buffers() can be called by modules.
 3230	 * If that happens, then we need to start cmdline recording
 3231	 * directly here. If the global_trace.buffer is already
 3232	 * allocated here, then this was called by module code.
 3233	 */
 3234	if (global_trace.array_buffer.buffer)
 3235		tracing_start_cmdline_record();
 3236}
 3237EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
 3238
 3239void trace_printk_start_comm(void)
 3240{
 3241	/* Start tracing comms if trace printk is set */
 3242	if (!buffers_allocated)
 3243		return;
 3244	tracing_start_cmdline_record();
 3245}
 3246
 3247static void trace_printk_start_stop_comm(int enabled)
 3248{
 3249	if (!buffers_allocated)
 3250		return;
 3251
 3252	if (enabled)
 3253		tracing_start_cmdline_record();
 3254	else
 3255		tracing_stop_cmdline_record();
 3256}
 3257
 3258/**
 3259 * trace_vbprintk - write binary msg to tracing buffer
 3260 * @ip:    The address of the caller
 3261 * @fmt:   The string format to write to the buffer
 3262 * @args:  Arguments for @fmt
 3263 */
 3264int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 3265{
 
 3266	struct ring_buffer_event *event;
 3267	struct trace_buffer *buffer;
 3268	struct trace_array *tr = READ_ONCE(printk_trace);
 3269	struct bprint_entry *entry;
 3270	unsigned int trace_ctx;
 3271	char *tbuffer;
 3272	int len = 0, size;
 3273
 3274	if (!printk_binsafe(tr))
 3275		return trace_vprintk(ip, fmt, args);
 3276
 3277	if (unlikely(tracing_selftest_running || tracing_disabled))
 3278		return 0;
 3279
 3280	/* Don't pollute graph traces with trace_vprintk internals */
 3281	pause_graph_tracing();
 3282
 3283	trace_ctx = tracing_gen_ctx();
 3284	preempt_disable_notrace();
 3285
 3286	tbuffer = get_trace_buf();
 3287	if (!tbuffer) {
 3288		len = 0;
 3289		goto out_nobuffer;
 3290	}
 3291
 3292	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
 3293
 3294	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
 3295		goto out_put;
 3296
 
 3297	size = sizeof(*entry) + sizeof(u32) * len;
 3298	buffer = tr->array_buffer.buffer;
 3299	ring_buffer_nest_start(buffer);
 3300	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
 3301					    trace_ctx);
 3302	if (!event)
 3303		goto out;
 3304	entry = ring_buffer_event_data(event);
 3305	entry->ip			= ip;
 3306	entry->fmt			= fmt;
 3307
 3308	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
 3309	__buffer_unlock_commit(buffer, event);
 3310	ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
 
 
 3311
 3312out:
 3313	ring_buffer_nest_end(buffer);
 3314out_put:
 3315	put_trace_buf();
 3316
 3317out_nobuffer:
 3318	preempt_enable_notrace();
 3319	unpause_graph_tracing();
 3320
 3321	return len;
 3322}
 3323EXPORT_SYMBOL_GPL(trace_vbprintk);
 3324
 3325__printf(3, 0)
 3326static int
 3327__trace_array_vprintk(struct trace_buffer *buffer,
 3328		      unsigned long ip, const char *fmt, va_list args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 3329{
 
 3330	struct ring_buffer_event *event;
 3331	int len = 0, size;
 
 3332	struct print_entry *entry;
 3333	unsigned int trace_ctx;
 3334	char *tbuffer;
 3335
 3336	if (tracing_disabled)
 3337		return 0;
 3338
 3339	/* Don't pollute graph traces with trace_vprintk internals */
 3340	pause_graph_tracing();
 3341
 3342	trace_ctx = tracing_gen_ctx();
 3343	preempt_disable_notrace();
 3344
 3345
 3346	tbuffer = get_trace_buf();
 3347	if (!tbuffer) {
 3348		len = 0;
 3349		goto out_nobuffer;
 3350	}
 3351
 3352	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
 
 
 3353
 
 3354	size = sizeof(*entry) + len + 1;
 3355	ring_buffer_nest_start(buffer);
 3356	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 3357					    trace_ctx);
 3358	if (!event)
 3359		goto out;
 3360	entry = ring_buffer_event_data(event);
 3361	entry->ip = ip;
 3362
 3363	memcpy(&entry->buf, tbuffer, len + 1);
 3364	__buffer_unlock_commit(buffer, event);
 3365	ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL);
 3366
 3367out:
 3368	ring_buffer_nest_end(buffer);
 3369	put_trace_buf();
 3370
 3371out_nobuffer:
 3372	preempt_enable_notrace();
 3373	unpause_graph_tracing();
 3374
 3375	return len;
 3376}
 3377
 3378__printf(3, 0)
 3379int trace_array_vprintk(struct trace_array *tr,
 3380			unsigned long ip, const char *fmt, va_list args)
 3381{
 3382	if (tracing_selftest_running && tr == &global_trace)
 3383		return 0;
 3384
 3385	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
 3386}
 3387
 3388/**
 3389 * trace_array_printk - Print a message to a specific instance
 3390 * @tr: The instance trace_array descriptor
 3391 * @ip: The instruction pointer that this is called from.
 3392 * @fmt: The format to print (printf format)
 3393 *
 3394 * If a subsystem sets up its own instance, they have the right to
 3395 * printk strings into their tracing instance buffer using this
 3396 * function. Note, this function will not write into the top level
 3397 * buffer (use trace_printk() for that), as writing into the top level
 3398 * buffer should only have events that can be individually disabled.
 3399 * trace_printk() is only used for debugging a kernel, and should not
 3400 * be ever incorporated in normal use.
 3401 *
 3402 * trace_array_printk() can be used, as it will not add noise to the
 3403 * top level tracing buffer.
 3404 *
 3405 * Note, trace_array_init_printk() must be called on @tr before this
 3406 * can be used.
 3407 */
 3408__printf(3, 0)
 3409int trace_array_printk(struct trace_array *tr,
 3410		       unsigned long ip, const char *fmt, ...)
 3411{
 3412	int ret;
 3413	va_list ap;
 3414
 3415	if (!tr)
 3416		return -ENOENT;
 3417
 3418	/* This is only allowed for created instances */
 3419	if (tr == &global_trace)
 3420		return 0;
 3421
 3422	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
 3423		return 0;
 3424
 3425	va_start(ap, fmt);
 3426	ret = trace_array_vprintk(tr, ip, fmt, ap);
 3427	va_end(ap);
 3428	return ret;
 3429}
 3430EXPORT_SYMBOL_GPL(trace_array_printk);
 3431
 3432/**
 3433 * trace_array_init_printk - Initialize buffers for trace_array_printk()
 3434 * @tr: The trace array to initialize the buffers for
 3435 *
 3436 * As trace_array_printk() only writes into instances, they are OK to
 3437 * have in the kernel (unlike trace_printk()). This needs to be called
 3438 * before trace_array_printk() can be used on a trace_array.
 3439 */
 3440int trace_array_init_printk(struct trace_array *tr)
 3441{
 3442	if (!tr)
 3443		return -ENOENT;
 3444
 3445	/* This is only allowed for created instances */
 3446	if (tr == &global_trace)
 3447		return -EINVAL;
 3448
 3449	return alloc_percpu_trace_buffer();
 3450}
 3451EXPORT_SYMBOL_GPL(trace_array_init_printk);
 3452
 3453__printf(3, 4)
 3454int trace_array_printk_buf(struct trace_buffer *buffer,
 3455			   unsigned long ip, const char *fmt, ...)
 3456{
 3457	int ret;
 3458	va_list ap;
 3459
 3460	if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK))
 3461		return 0;
 3462
 3463	va_start(ap, fmt);
 3464	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
 3465	va_end(ap);
 3466	return ret;
 3467}
 3468
 3469__printf(2, 0)
 3470int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
 3471{
 3472	return trace_array_vprintk(printk_trace, ip, fmt, args);
 3473}
 3474EXPORT_SYMBOL_GPL(trace_vprintk);
 3475
 3476static void trace_iterator_increment(struct trace_iterator *iter)
 3477{
 3478	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
 3479
 3480	iter->idx++;
 3481	if (buf_iter)
 3482		ring_buffer_iter_advance(buf_iter);
 3483}
 3484
 3485static struct trace_entry *
 3486peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
 3487		unsigned long *lost_events)
 3488{
 3489	struct ring_buffer_event *event;
 3490	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
 3491
 3492	if (buf_iter) {
 3493		event = ring_buffer_iter_peek(buf_iter, ts);
 3494		if (lost_events)
 3495			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
 3496				(unsigned long)-1 : 0;
 3497	} else {
 3498		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
 3499					 lost_events);
 3500	}
 3501
 3502	if (event) {
 3503		iter->ent_size = ring_buffer_event_length(event);
 3504		return ring_buffer_event_data(event);
 3505	}
 3506	iter->ent_size = 0;
 3507	return NULL;
 3508}
 3509
 3510static struct trace_entry *
 3511__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
 3512		  unsigned long *missing_events, u64 *ent_ts)
 3513{
 3514	struct trace_buffer *buffer = iter->array_buffer->buffer;
 3515	struct trace_entry *ent, *next = NULL;
 3516	unsigned long lost_events = 0, next_lost = 0;
 3517	int cpu_file = iter->cpu_file;
 3518	u64 next_ts = 0, ts;
 3519	int next_cpu = -1;
 3520	int next_size = 0;
 3521	int cpu;
 3522
 3523	/*
 3524	 * If we are in a per_cpu trace file, don't bother by iterating over
 3525	 * all cpu and peek directly.
 3526	 */
 3527	if (cpu_file > RING_BUFFER_ALL_CPUS) {
 3528		if (ring_buffer_empty_cpu(buffer, cpu_file))
 3529			return NULL;
 3530		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
 3531		if (ent_cpu)
 3532			*ent_cpu = cpu_file;
 3533
 3534		return ent;
 3535	}
 3536
 3537	for_each_tracing_cpu(cpu) {
 3538
 3539		if (ring_buffer_empty_cpu(buffer, cpu))
 3540			continue;
 3541
 3542		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
 3543
 3544		/*
 3545		 * Pick the entry with the smallest timestamp:
 3546		 */
 3547		if (ent && (!next || ts < next_ts)) {
 3548			next = ent;
 3549			next_cpu = cpu;
 3550			next_ts = ts;
 3551			next_lost = lost_events;
 3552			next_size = iter->ent_size;
 3553		}
 3554	}
 3555
 3556	iter->ent_size = next_size;
 3557
 3558	if (ent_cpu)
 3559		*ent_cpu = next_cpu;
 3560
 3561	if (ent_ts)
 3562		*ent_ts = next_ts;
 3563
 3564	if (missing_events)
 3565		*missing_events = next_lost;
 3566
 3567	return next;
 3568}
 3569
 3570#define STATIC_FMT_BUF_SIZE	128
 3571static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
 3572
 3573char *trace_iter_expand_format(struct trace_iterator *iter)
 3574{
 3575	char *tmp;
 3576
 3577	/*
 3578	 * iter->tr is NULL when used with tp_printk, which makes
 3579	 * this get called where it is not safe to call krealloc().
 3580	 */
 3581	if (!iter->tr || iter->fmt == static_fmt_buf)
 3582		return NULL;
 3583
 3584	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
 3585		       GFP_KERNEL);
 3586	if (tmp) {
 3587		iter->fmt_size += STATIC_FMT_BUF_SIZE;
 3588		iter->fmt = tmp;
 3589	}
 3590
 3591	return tmp;
 3592}
 3593
 3594/* Returns true if the string is safe to dereference from an event */
 3595static bool trace_safe_str(struct trace_iterator *iter, const char *str)
 3596{
 3597	unsigned long addr = (unsigned long)str;
 3598	struct trace_event *trace_event;
 3599	struct trace_event_call *event;
 3600
 3601	/* OK if part of the event data */
 3602	if ((addr >= (unsigned long)iter->ent) &&
 3603	    (addr < (unsigned long)iter->ent + iter->ent_size))
 3604		return true;
 3605
 3606	/* OK if part of the temp seq buffer */
 3607	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
 3608	    (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
 3609		return true;
 3610
 3611	/* Core rodata can not be freed */
 3612	if (is_kernel_rodata(addr))
 3613		return true;
 3614
 3615	if (trace_is_tracepoint_string(str))
 3616		return true;
 3617
 3618	/*
 3619	 * Now this could be a module event, referencing core module
 3620	 * data, which is OK.
 3621	 */
 3622	if (!iter->ent)
 3623		return false;
 3624
 3625	trace_event = ftrace_find_event(iter->ent->type);
 3626	if (!trace_event)
 3627		return false;
 3628
 3629	event = container_of(trace_event, struct trace_event_call, event);
 3630	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
 3631		return false;
 3632
 3633	/* Would rather have rodata, but this will suffice */
 3634	if (within_module_core(addr, event->module))
 3635		return true;
 3636
 3637	return false;
 3638}
 3639
 3640/**
 3641 * ignore_event - Check dereferenced fields while writing to the seq buffer
 3642 * @iter: The iterator that holds the seq buffer and the event being printed
 3643 *
 3644 * At boot up, test_event_printk() will flag any event that dereferences
 3645 * a string with "%s" that does exist in the ring buffer. It may still
 3646 * be valid, as the string may point to a static string in the kernel
 3647 * rodata that never gets freed. But if the string pointer is pointing
 3648 * to something that was allocated, there's a chance that it can be freed
 3649 * by the time the user reads the trace. This would cause a bad memory
 3650 * access by the kernel and possibly crash the system.
 3651 *
 3652 * This function will check if the event has any fields flagged as needing
 3653 * to be checked at runtime and perform those checks.
 3654 *
 3655 * If it is found that a field is unsafe, it will write into the @iter->seq
 3656 * a message stating what was found to be unsafe.
 3657 *
 3658 * @return: true if the event is unsafe and should be ignored,
 3659 *          false otherwise.
 3660 */
 3661bool ignore_event(struct trace_iterator *iter)
 3662{
 3663	struct ftrace_event_field *field;
 3664	struct trace_event *trace_event;
 3665	struct trace_event_call *event;
 3666	struct list_head *head;
 3667	struct trace_seq *seq;
 3668	const void *ptr;
 3669
 3670	trace_event = ftrace_find_event(iter->ent->type);
 3671
 3672	seq = &iter->seq;
 3673
 3674	if (!trace_event) {
 3675		trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
 3676		return true;
 3677	}
 3678
 3679	event = container_of(trace_event, struct trace_event_call, event);
 3680	if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
 3681		return false;
 3682
 3683	head = trace_get_fields(event);
 3684	if (!head) {
 3685		trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n",
 3686				 trace_event_name(event));
 3687		return true;
 3688	}
 3689
 3690	/* Offsets are from the iter->ent that points to the raw event */
 3691	ptr = iter->ent;
 3692
 3693	list_for_each_entry(field, head, link) {
 3694		const char *str;
 3695		bool good;
 3696
 3697		if (!field->needs_test)
 3698			continue;
 3699
 3700		str = *(const char **)(ptr + field->offset);
 3701
 3702		good = trace_safe_str(iter, str);
 3703
 3704		/*
 3705		 * If you hit this warning, it is likely that the
 3706		 * trace event in question used %s on a string that
 3707		 * was saved at the time of the event, but may not be
 3708		 * around when the trace is read. Use __string(),
 3709		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
 3710		 * instead. See samples/trace_events/trace-events-sample.h
 3711		 * for reference.
 3712		 */
 3713		if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'",
 3714			      trace_event_name(event), field->name)) {
 3715			trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n",
 3716					 trace_event_name(event), field->name);
 3717			return true;
 3718		}
 3719	}
 3720	return false;
 3721}
 3722
 3723const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
 3724{
 3725	const char *p, *new_fmt;
 3726	char *q;
 3727
 3728	if (WARN_ON_ONCE(!fmt))
 3729		return fmt;
 3730
 3731	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
 3732		return fmt;
 3733
 3734	p = fmt;
 3735	new_fmt = q = iter->fmt;
 3736	while (*p) {
 3737		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
 3738			if (!trace_iter_expand_format(iter))
 3739				return fmt;
 3740
 3741			q += iter->fmt - new_fmt;
 3742			new_fmt = iter->fmt;
 3743		}
 3744
 3745		*q++ = *p++;
 3746
 3747		/* Replace %p with %px */
 3748		if (p[-1] == '%') {
 3749			if (p[0] == '%') {
 3750				*q++ = *p++;
 3751			} else if (p[0] == 'p' && !isalnum(p[1])) {
 3752				*q++ = *p++;
 3753				*q++ = 'x';
 3754			}
 3755		}
 3756	}
 3757	*q = '\0';
 3758
 3759	return new_fmt;
 3760}
 3761
 3762#define STATIC_TEMP_BUF_SIZE	128
 3763static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
 3764
 3765/* Find the next real entry, without updating the iterator itself */
 3766struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 3767					  int *ent_cpu, u64 *ent_ts)
 3768{
 3769	/* __find_next_entry will reset ent_size */
 3770	int ent_size = iter->ent_size;
 3771	struct trace_entry *entry;
 3772
 3773	/*
 3774	 * If called from ftrace_dump(), then the iter->temp buffer
 3775	 * will be the static_temp_buf and not created from kmalloc.
 3776	 * If the entry size is greater than the buffer, we can
 3777	 * not save it. Just return NULL in that case. This is only
 3778	 * used to add markers when two consecutive events' time
 3779	 * stamps have a large delta. See trace_print_lat_context()
 3780	 */
 3781	if (iter->temp == static_temp_buf &&
 3782	    STATIC_TEMP_BUF_SIZE < ent_size)
 3783		return NULL;
 3784
 3785	/*
 3786	 * The __find_next_entry() may call peek_next_entry(), which may
 3787	 * call ring_buffer_peek() that may make the contents of iter->ent
 3788	 * undefined. Need to copy iter->ent now.
 3789	 */
 3790	if (iter->ent && iter->ent != iter->temp) {
 3791		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
 3792		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
 3793			void *temp;
 3794			temp = kmalloc(iter->ent_size, GFP_KERNEL);
 3795			if (!temp)
 3796				return NULL;
 3797			kfree(iter->temp);
 3798			iter->temp = temp;
 3799			iter->temp_size = iter->ent_size;
 3800		}
 3801		memcpy(iter->temp, iter->ent, iter->ent_size);
 3802		iter->ent = iter->temp;
 3803	}
 3804	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
 3805	/* Put back the original ent_size */
 3806	iter->ent_size = ent_size;
 3807
 3808	return entry;
 3809}
 3810
 3811/* Find the next real entry, and increment the iterator to the next entry */
 3812void *trace_find_next_entry_inc(struct trace_iterator *iter)
 3813{
 3814	iter->ent = __find_next_entry(iter, &iter->cpu,
 3815				      &iter->lost_events, &iter->ts);
 3816
 3817	if (iter->ent)
 3818		trace_iterator_increment(iter);
 3819
 3820	return iter->ent ? iter : NULL;
 3821}
 3822
 3823static void trace_consume(struct trace_iterator *iter)
 3824{
 3825	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
 3826			    &iter->lost_events);
 3827}
 3828
 3829static void *s_next(struct seq_file *m, void *v, loff_t *pos)
 3830{
 3831	struct trace_iterator *iter = m->private;
 3832	int i = (int)*pos;
 3833	void *ent;
 3834
 3835	WARN_ON_ONCE(iter->leftover);
 3836
 3837	(*pos)++;
 3838
 3839	/* can't go backwards */
 3840	if (iter->idx > i)
 3841		return NULL;
 3842
 3843	if (iter->idx < 0)
 3844		ent = trace_find_next_entry_inc(iter);
 3845	else
 3846		ent = iter;
 3847
 3848	while (ent && iter->idx < i)
 3849		ent = trace_find_next_entry_inc(iter);
 3850
 3851	iter->pos = *pos;
 3852
 3853	return ent;
 3854}
 3855
 3856void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 3857{
 
 
 3858	struct ring_buffer_iter *buf_iter;
 3859	unsigned long entries = 0;
 3860	u64 ts;
 3861
 3862	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
 3863
 3864	buf_iter = trace_buffer_iter(iter, cpu);
 3865	if (!buf_iter)
 3866		return;
 3867
 
 3868	ring_buffer_iter_reset(buf_iter);
 3869
 3870	/*
 3871	 * We could have the case with the max latency tracers
 3872	 * that a reset never took place on a cpu. This is evident
 3873	 * by the timestamp being before the start of the buffer.
 3874	 */
 3875	while (ring_buffer_iter_peek(buf_iter, &ts)) {
 3876		if (ts >= iter->array_buffer->time_start)
 3877			break;
 3878		entries++;
 3879		ring_buffer_iter_advance(buf_iter);
 3880		/* This could be a big loop */
 3881		cond_resched();
 3882	}
 3883
 3884	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
 3885}
 3886
 3887/*
 3888 * The current tracer is copied to avoid a global locking
 3889 * all around.
 3890 */
 3891static void *s_start(struct seq_file *m, loff_t *pos)
 3892{
 3893	struct trace_iterator *iter = m->private;
 3894	struct trace_array *tr = iter->tr;
 3895	int cpu_file = iter->cpu_file;
 3896	void *p = NULL;
 3897	loff_t l = 0;
 3898	int cpu;
 3899
 
 3900	mutex_lock(&trace_types_lock);
 3901	if (unlikely(tr->current_trace != iter->trace)) {
 3902		/* Close iter->trace before switching to the new current tracer */
 3903		if (iter->trace->close)
 3904			iter->trace->close(iter);
 3905		iter->trace = tr->current_trace;
 3906		/* Reopen the new current tracer */
 3907		if (iter->trace->open)
 3908			iter->trace->open(iter);
 3909	}
 3910	mutex_unlock(&trace_types_lock);
 3911
 3912#ifdef CONFIG_TRACER_MAX_TRACE
 3913	if (iter->snapshot && iter->trace->use_max_tr)
 3914		return ERR_PTR(-EBUSY);
 3915#endif
 3916
 3917	if (*pos != iter->pos) {
 3918		iter->ent = NULL;
 3919		iter->cpu = 0;
 3920		iter->idx = -1;
 3921
 3922		if (cpu_file == RING_BUFFER_ALL_CPUS) {
 3923			for_each_tracing_cpu(cpu)
 3924				tracing_iter_reset(iter, cpu);
 3925		} else
 3926			tracing_iter_reset(iter, cpu_file);
 3927
 3928		iter->leftover = 0;
 3929		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
 3930			;
 3931
 3932	} else {
 3933		/*
 3934		 * If we overflowed the seq_file before, then we want
 3935		 * to just reuse the trace_seq buffer again.
 3936		 */
 3937		if (iter->leftover)
 3938			p = iter;
 3939		else {
 3940			l = *pos - 1;
 3941			p = s_next(m, p, &l);
 3942		}
 3943	}
 3944
 3945	trace_event_read_lock();
 3946	trace_access_lock(cpu_file);
 3947	return p;
 3948}
 3949
 3950static void s_stop(struct seq_file *m, void *p)
 3951{
 3952	struct trace_iterator *iter = m->private;
 3953
 3954#ifdef CONFIG_TRACER_MAX_TRACE
 3955	if (iter->snapshot && iter->trace->use_max_tr)
 3956		return;
 3957#endif
 3958
 3959	trace_access_unlock(iter->cpu_file);
 3960	trace_event_read_unlock();
 3961}
 3962
 3963static void
 3964get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
 3965		      unsigned long *entries, int cpu)
 3966{
 3967	unsigned long count;
 3968
 3969	count = ring_buffer_entries_cpu(buf->buffer, cpu);
 3970	/*
 3971	 * If this buffer has skipped entries, then we hold all
 3972	 * entries for the trace and we need to ignore the
 3973	 * ones before the time stamp.
 3974	 */
 3975	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
 3976		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
 3977		/* total is the same as the entries */
 3978		*total = count;
 3979	} else
 3980		*total = count +
 3981			ring_buffer_overrun_cpu(buf->buffer, cpu);
 3982	*entries = count;
 3983}
 3984
 3985static void
 3986get_total_entries(struct array_buffer *buf,
 3987		  unsigned long *total, unsigned long *entries)
 3988{
 3989	unsigned long t, e;
 3990	int cpu;
 3991
 3992	*total = 0;
 3993	*entries = 0;
 3994
 3995	for_each_tracing_cpu(cpu) {
 3996		get_total_entries_cpu(buf, &t, &e, cpu);
 3997		*total += t;
 3998		*entries += e;
 
 
 
 
 
 
 
 
 
 
 
 3999	}
 4000}
 4001
 4002unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
 4003{
 4004	unsigned long total, entries;
 4005
 4006	if (!tr)
 4007		tr = &global_trace;
 4008
 4009	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
 4010
 4011	return entries;
 4012}
 4013
 4014unsigned long trace_total_entries(struct trace_array *tr)
 4015{
 4016	unsigned long total, entries;
 4017
 4018	if (!tr)
 4019		tr = &global_trace;
 4020
 4021	get_total_entries(&tr->array_buffer, &total, &entries);
 4022
 4023	return entries;
 4024}
 4025
 4026static void print_lat_help_header(struct seq_file *m)
 4027{
 4028	seq_puts(m, "#                    _------=> CPU#            \n"
 4029		    "#                   / _-----=> irqs-off/BH-disabled\n"
 4030		    "#                  | / _----=> need-resched    \n"
 4031		    "#                  || / _---=> hardirq/softirq \n"
 4032		    "#                  ||| / _--=> preempt-depth   \n"
 4033		    "#                  |||| / _-=> migrate-disable \n"
 4034		    "#                  ||||| /     delay           \n"
 4035		    "#  cmd     pid     |||||| time  |   caller     \n"
 4036		    "#     \\   /        ||||||  \\    |    /       \n");
 4037}
 4038
 4039static void print_event_info(struct array_buffer *buf, struct seq_file *m)
 4040{
 4041	unsigned long total;
 4042	unsigned long entries;
 4043
 4044	get_total_entries(buf, &total, &entries);
 4045	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
 4046		   entries, total, num_online_cpus());
 4047	seq_puts(m, "#\n");
 4048}
 4049
 4050static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
 4051				   unsigned int flags)
 4052{
 4053	bool tgid = flags & TRACE_ITER_RECORD_TGID;
 4054
 4055	print_event_info(buf, m);
 4056
 4057	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
 4058	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
 4059}
 4060
 4061static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
 4062				       unsigned int flags)
 4063{
 4064	bool tgid = flags & TRACE_ITER_RECORD_TGID;
 4065	static const char space[] = "            ";
 4066	int prec = tgid ? 12 : 2;
 4067
 4068	print_event_info(buf, m);
 4069
 4070	seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
 4071	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
 4072	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
 4073	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
 4074	seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
 4075	seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
 4076	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
 4077	seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
 
 4078}
 4079
 4080void
 4081print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 4082{
 4083	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
 4084	struct array_buffer *buf = iter->array_buffer;
 4085	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
 4086	struct tracer *type = iter->trace;
 4087	unsigned long entries;
 4088	unsigned long total;
 4089	const char *name = type->name;
 
 
 
 4090
 4091	get_total_entries(buf, &total, &entries);
 4092
 4093	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
 4094		   name, init_utsname()->release);
 4095	seq_puts(m, "# -----------------------------------"
 4096		 "---------------------------------\n");
 4097	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
 4098		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
 4099		   nsecs_to_usecs(data->saved_latency),
 4100		   entries,
 4101		   total,
 4102		   buf->cpu,
 4103		   preempt_model_none()      ? "server" :
 4104		   preempt_model_voluntary() ? "desktop" :
 4105		   preempt_model_full()      ? "preempt" :
 4106		   preempt_model_lazy()	     ? "lazy"    :
 4107		   preempt_model_rt()        ? "preempt_rt" :
 
 
 4108		   "unknown",
 
 4109		   /* These are reserved for later use */
 4110		   0, 0, 0, 0);
 4111#ifdef CONFIG_SMP
 4112	seq_printf(m, " #P:%d)\n", num_online_cpus());
 4113#else
 4114	seq_puts(m, ")\n");
 4115#endif
 4116	seq_puts(m, "#    -----------------\n");
 4117	seq_printf(m, "#    | task: %.16s-%d "
 4118		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
 4119		   data->comm, data->pid,
 4120		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
 4121		   data->policy, data->rt_priority);
 4122	seq_puts(m, "#    -----------------\n");
 4123
 4124	if (data->critical_start) {
 4125		seq_puts(m, "#  => started at: ");
 4126		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
 4127		trace_print_seq(m, &iter->seq);
 4128		seq_puts(m, "\n#  => ended at:   ");
 4129		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
 4130		trace_print_seq(m, &iter->seq);
 4131		seq_puts(m, "\n#\n");
 4132	}
 4133
 4134	seq_puts(m, "#\n");
 4135}
 4136
 4137static void test_cpu_buff_start(struct trace_iterator *iter)
 4138{
 4139	struct trace_seq *s = &iter->seq;
 4140	struct trace_array *tr = iter->tr;
 4141
 4142	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
 4143		return;
 4144
 4145	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
 4146		return;
 4147
 4148	if (cpumask_available(iter->started) &&
 4149	    cpumask_test_cpu(iter->cpu, iter->started))
 4150		return;
 4151
 4152	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
 4153		return;
 4154
 4155	if (cpumask_available(iter->started))
 4156		cpumask_set_cpu(iter->cpu, iter->started);
 4157
 4158	/* Don't print started cpu buffer for the first entry of the trace */
 4159	if (iter->idx > 1)
 4160		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
 4161				iter->cpu);
 4162}
 4163
 4164static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
 4165{
 4166	struct trace_array *tr = iter->tr;
 4167	struct trace_seq *s = &iter->seq;
 4168	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
 4169	struct trace_entry *entry;
 4170	struct trace_event *event;
 4171
 4172	entry = iter->ent;
 4173
 4174	test_cpu_buff_start(iter);
 4175
 4176	event = ftrace_find_event(entry->type);
 4177
 4178	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 4179		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
 4180			trace_print_lat_context(iter);
 4181		else
 4182			trace_print_context(iter);
 
 
 
 4183	}
 4184
 4185	if (trace_seq_has_overflowed(s))
 4186		return TRACE_TYPE_PARTIAL_LINE;
 4187
 4188	if (event) {
 4189		if (tr->trace_flags & TRACE_ITER_FIELDS)
 4190			return print_event_fields(iter, event);
 4191		/*
 4192		 * For TRACE_EVENT() events, the print_fmt is not
 4193		 * safe to use if the array has delta offsets
 4194		 * Force printing via the fields.
 4195		 */
 4196		if ((tr->text_delta || tr->data_delta) &&
 4197		    event->type > __TRACE_LAST_TYPE)
 4198			return print_event_fields(iter, event);
 4199
 4200		return event->funcs->trace(iter, sym_flags, event);
 4201	}
 4202
 4203	trace_seq_printf(s, "Unknown type %d\n", entry->type);
 
 4204
 4205	return trace_handle_return(s);
 
 
 4206}
 4207
 4208static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
 4209{
 4210	struct trace_array *tr = iter->tr;
 4211	struct trace_seq *s = &iter->seq;
 4212	struct trace_entry *entry;
 4213	struct trace_event *event;
 4214
 4215	entry = iter->ent;
 4216
 4217	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
 4218		trace_seq_printf(s, "%d %d %llu ",
 4219				 entry->pid, iter->cpu, iter->ts);
 4220
 4221	if (trace_seq_has_overflowed(s))
 4222		return TRACE_TYPE_PARTIAL_LINE;
 4223
 4224	event = ftrace_find_event(entry->type);
 4225	if (event)
 4226		return event->funcs->raw(iter, 0, event);
 4227
 4228	trace_seq_printf(s, "%d ?\n", entry->type);
 
 4229
 4230	return trace_handle_return(s);
 
 
 4231}
 4232
 4233static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
 4234{
 4235	struct trace_array *tr = iter->tr;
 4236	struct trace_seq *s = &iter->seq;
 4237	unsigned char newline = '\n';
 4238	struct trace_entry *entry;
 4239	struct trace_event *event;
 4240
 4241	entry = iter->ent;
 4242
 4243	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 4244		SEQ_PUT_HEX_FIELD(s, entry->pid);
 4245		SEQ_PUT_HEX_FIELD(s, iter->cpu);
 4246		SEQ_PUT_HEX_FIELD(s, iter->ts);
 4247		if (trace_seq_has_overflowed(s))
 4248			return TRACE_TYPE_PARTIAL_LINE;
 4249	}
 4250
 4251	event = ftrace_find_event(entry->type);
 4252	if (event) {
 4253		enum print_line_t ret = event->funcs->hex(iter, 0, event);
 4254		if (ret != TRACE_TYPE_HANDLED)
 4255			return ret;
 4256	}
 4257
 4258	SEQ_PUT_FIELD(s, newline);
 4259
 4260	return trace_handle_return(s);
 4261}
 4262
 4263static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
 4264{
 4265	struct trace_array *tr = iter->tr;
 4266	struct trace_seq *s = &iter->seq;
 4267	struct trace_entry *entry;
 4268	struct trace_event *event;
 4269
 4270	entry = iter->ent;
 4271
 4272	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 4273		SEQ_PUT_FIELD(s, entry->pid);
 4274		SEQ_PUT_FIELD(s, iter->cpu);
 4275		SEQ_PUT_FIELD(s, iter->ts);
 4276		if (trace_seq_has_overflowed(s))
 4277			return TRACE_TYPE_PARTIAL_LINE;
 4278	}
 4279
 4280	event = ftrace_find_event(entry->type);
 4281	return event ? event->funcs->binary(iter, 0, event) :
 4282		TRACE_TYPE_HANDLED;
 4283}
 4284
 4285int trace_empty(struct trace_iterator *iter)
 4286{
 4287	struct ring_buffer_iter *buf_iter;
 4288	int cpu;
 4289
 4290	/* If we are looking at one CPU buffer, only check that one */
 4291	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
 4292		cpu = iter->cpu_file;
 4293		buf_iter = trace_buffer_iter(iter, cpu);
 4294		if (buf_iter) {
 4295			if (!ring_buffer_iter_empty(buf_iter))
 4296				return 0;
 4297		} else {
 4298			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
 4299				return 0;
 4300		}
 4301		return 1;
 4302	}
 4303
 4304	for_each_tracing_cpu(cpu) {
 4305		buf_iter = trace_buffer_iter(iter, cpu);
 4306		if (buf_iter) {
 4307			if (!ring_buffer_iter_empty(buf_iter))
 4308				return 0;
 4309		} else {
 4310			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
 4311				return 0;
 4312		}
 4313	}
 4314
 4315	return 1;
 4316}
 4317
 4318/*  Called with trace_event_read_lock() held. */
 4319enum print_line_t print_trace_line(struct trace_iterator *iter)
 4320{
 4321	struct trace_array *tr = iter->tr;
 4322	unsigned long trace_flags = tr->trace_flags;
 4323	enum print_line_t ret;
 4324
 4325	if (iter->lost_events) {
 4326		if (iter->lost_events == (unsigned long)-1)
 4327			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
 4328					 iter->cpu);
 4329		else
 4330			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
 4331					 iter->cpu, iter->lost_events);
 4332		if (trace_seq_has_overflowed(&iter->seq))
 4333			return TRACE_TYPE_PARTIAL_LINE;
 4334	}
 4335
 4336	if (iter->trace && iter->trace->print_line) {
 4337		ret = iter->trace->print_line(iter);
 4338		if (ret != TRACE_TYPE_UNHANDLED)
 4339			return ret;
 4340	}
 4341
 4342	if (iter->ent->type == TRACE_BPUTS &&
 4343			trace_flags & TRACE_ITER_PRINTK &&
 4344			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
 4345		return trace_print_bputs_msg_only(iter);
 4346
 4347	if (iter->ent->type == TRACE_BPRINT &&
 4348			trace_flags & TRACE_ITER_PRINTK &&
 4349			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
 4350		return trace_print_bprintk_msg_only(iter);
 4351
 4352	if (iter->ent->type == TRACE_PRINT &&
 4353			trace_flags & TRACE_ITER_PRINTK &&
 4354			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
 4355		return trace_print_printk_msg_only(iter);
 4356
 4357	if (trace_flags & TRACE_ITER_BIN)
 4358		return print_bin_fmt(iter);
 4359
 4360	if (trace_flags & TRACE_ITER_HEX)
 4361		return print_hex_fmt(iter);
 4362
 4363	if (trace_flags & TRACE_ITER_RAW)
 4364		return print_raw_fmt(iter);
 4365
 4366	return print_trace_fmt(iter);
 4367}
 4368
 4369void trace_latency_header(struct seq_file *m)
 4370{
 4371	struct trace_iterator *iter = m->private;
 4372	struct trace_array *tr = iter->tr;
 4373
 4374	/* print nothing if the buffers are empty */
 4375	if (trace_empty(iter))
 4376		return;
 4377
 4378	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
 4379		print_trace_header(m, iter);
 4380
 4381	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
 4382		print_lat_help_header(m);
 4383}
 4384
 4385void trace_default_header(struct seq_file *m)
 4386{
 4387	struct trace_iterator *iter = m->private;
 4388	struct trace_array *tr = iter->tr;
 4389	unsigned long trace_flags = tr->trace_flags;
 4390
 4391	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
 4392		return;
 4393
 4394	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
 4395		/* print nothing if the buffers are empty */
 4396		if (trace_empty(iter))
 4397			return;
 4398		print_trace_header(m, iter);
 4399		if (!(trace_flags & TRACE_ITER_VERBOSE))
 4400			print_lat_help_header(m);
 4401	} else {
 4402		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
 4403			if (trace_flags & TRACE_ITER_IRQ_INFO)
 4404				print_func_help_header_irq(iter->array_buffer,
 4405							   m, trace_flags);
 4406			else
 4407				print_func_help_header(iter->array_buffer, m,
 4408						       trace_flags);
 4409		}
 4410	}
 4411}
 4412
 4413static void test_ftrace_alive(struct seq_file *m)
 4414{
 4415	if (!ftrace_is_dead())
 4416		return;
 4417	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
 4418		    "#          MAY BE MISSING FUNCTION EVENTS\n");
 4419}
 4420
 4421#ifdef CONFIG_TRACER_MAX_TRACE
 4422static void show_snapshot_main_help(struct seq_file *m)
 4423{
 4424	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
 4425		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
 4426		    "#                      Takes a snapshot of the main buffer.\n"
 4427		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
 4428		    "#                      (Doesn't have to be '2' works with any number that\n"
 4429		    "#                       is not a '0' or '1')\n");
 4430}
 4431
 4432static void show_snapshot_percpu_help(struct seq_file *m)
 4433{
 4434	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
 4435#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
 4436	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
 4437		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
 4438#else
 4439	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
 4440		    "#                     Must use main snapshot file to allocate.\n");
 4441#endif
 4442	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
 4443		    "#                      (Doesn't have to be '2' works with any number that\n"
 4444		    "#                       is not a '0' or '1')\n");
 4445}
 4446
 4447static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
 4448{
 4449	if (iter->tr->allocated_snapshot)
 4450		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
 4451	else
 4452		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
 4453
 4454	seq_puts(m, "# Snapshot commands:\n");
 4455	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 4456		show_snapshot_main_help(m);
 4457	else
 4458		show_snapshot_percpu_help(m);
 4459}
 4460#else
 4461/* Should never be called */
 4462static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
 4463#endif
 4464
 4465static int s_show(struct seq_file *m, void *v)
 4466{
 4467	struct trace_iterator *iter = v;
 4468	int ret;
 4469
 4470	if (iter->ent == NULL) {
 4471		if (iter->tr) {
 4472			seq_printf(m, "# tracer: %s\n", iter->trace->name);
 4473			seq_puts(m, "#\n");
 4474			test_ftrace_alive(m);
 4475		}
 4476		if (iter->snapshot && trace_empty(iter))
 4477			print_snapshot_help(m, iter);
 4478		else if (iter->trace && iter->trace->print_header)
 4479			iter->trace->print_header(m);
 4480		else
 4481			trace_default_header(m);
 4482
 4483	} else if (iter->leftover) {
 4484		/*
 4485		 * If we filled the seq_file buffer earlier, we
 4486		 * want to just show it now.
 4487		 */
 4488		ret = trace_print_seq(m, &iter->seq);
 4489
 4490		/* ret should this time be zero, but you never know */
 4491		iter->leftover = ret;
 4492
 4493	} else {
 4494		ret = print_trace_line(iter);
 4495		if (ret == TRACE_TYPE_PARTIAL_LINE) {
 4496			iter->seq.full = 0;
 4497			trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
 4498		}
 4499		ret = trace_print_seq(m, &iter->seq);
 4500		/*
 4501		 * If we overflow the seq_file buffer, then it will
 4502		 * ask us for this data again at start up.
 4503		 * Use that instead.
 4504		 *  ret is 0 if seq_file write succeeded.
 4505		 *        -1 otherwise.
 4506		 */
 4507		iter->leftover = ret;
 4508	}
 4509
 4510	return 0;
 4511}
 4512
 4513/*
 4514 * Should be used after trace_array_get(), trace_types_lock
 4515 * ensures that i_cdev was already initialized.
 4516 */
 4517static inline int tracing_get_cpu(struct inode *inode)
 4518{
 4519	if (inode->i_cdev) /* See trace_create_cpu_file() */
 4520		return (long)inode->i_cdev - 1;
 4521	return RING_BUFFER_ALL_CPUS;
 4522}
 4523
 4524static const struct seq_operations tracer_seq_ops = {
 4525	.start		= s_start,
 4526	.next		= s_next,
 4527	.stop		= s_stop,
 4528	.show		= s_show,
 4529};
 4530
 4531/*
 4532 * Note, as iter itself can be allocated and freed in different
 4533 * ways, this function is only used to free its content, and not
 4534 * the iterator itself. The only requirement to all the allocations
 4535 * is that it must zero all fields (kzalloc), as freeing works with
 4536 * ethier allocated content or NULL.
 4537 */
 4538static void free_trace_iter_content(struct trace_iterator *iter)
 4539{
 4540	/* The fmt is either NULL, allocated or points to static_fmt_buf */
 4541	if (iter->fmt != static_fmt_buf)
 4542		kfree(iter->fmt);
 4543
 4544	kfree(iter->temp);
 4545	kfree(iter->buffer_iter);
 4546	mutex_destroy(&iter->mutex);
 4547	free_cpumask_var(iter->started);
 4548}
 4549
 4550static struct trace_iterator *
 4551__tracing_open(struct inode *inode, struct file *file, bool snapshot)
 4552{
 4553	struct trace_array *tr = inode->i_private;
 4554	struct trace_iterator *iter;
 4555	int cpu;
 4556
 4557	if (tracing_disabled)
 4558		return ERR_PTR(-ENODEV);
 4559
 4560	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
 4561	if (!iter)
 4562		return ERR_PTR(-ENOMEM);
 4563
 4564	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
 4565				    GFP_KERNEL);
 4566	if (!iter->buffer_iter)
 4567		goto release;
 4568
 4569	/*
 4570	 * trace_find_next_entry() may need to save off iter->ent.
 4571	 * It will place it into the iter->temp buffer. As most
 4572	 * events are less than 128, allocate a buffer of that size.
 4573	 * If one is greater, then trace_find_next_entry() will
 4574	 * allocate a new buffer to adjust for the bigger iter->ent.
 4575	 * It's not critical if it fails to get allocated here.
 4576	 */
 4577	iter->temp = kmalloc(128, GFP_KERNEL);
 4578	if (iter->temp)
 4579		iter->temp_size = 128;
 4580
 4581	/*
 4582	 * trace_event_printf() may need to modify given format
 4583	 * string to replace %p with %px so that it shows real address
 4584	 * instead of hash value. However, that is only for the event
 4585	 * tracing, other tracer may not need. Defer the allocation
 4586	 * until it is needed.
 4587	 */
 4588	iter->fmt = NULL;
 4589	iter->fmt_size = 0;
 4590
 4591	mutex_lock(&trace_types_lock);
 4592	iter->trace = tr->current_trace;
 
 
 
 
 
 4593
 4594	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
 4595		goto fail;
 4596
 4597	iter->tr = tr;
 4598
 4599#ifdef CONFIG_TRACER_MAX_TRACE
 4600	/* Currently only the top directory has a snapshot */
 4601	if (tr->current_trace->print_max || snapshot)
 4602		iter->array_buffer = &tr->max_buffer;
 4603	else
 4604#endif
 4605		iter->array_buffer = &tr->array_buffer;
 4606	iter->snapshot = snapshot;
 4607	iter->pos = -1;
 4608	iter->cpu_file = tracing_get_cpu(inode);
 4609	mutex_init(&iter->mutex);
 
 4610
 4611	/* Notify the tracer early; before we stop tracing. */
 4612	if (iter->trace->open)
 4613		iter->trace->open(iter);
 4614
 4615	/* Annotate start of buffers if we had overruns */
 4616	if (ring_buffer_overruns(iter->array_buffer->buffer))
 4617		iter->iter_flags |= TRACE_FILE_ANNOTATE;
 4618
 4619	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
 4620	if (trace_clocks[tr->clock_id].in_ns)
 4621		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 4622
 4623	/*
 4624	 * If pause-on-trace is enabled, then stop the trace while
 4625	 * dumping, unless this is the "snapshot" file
 4626	 */
 4627	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
 4628		tracing_stop_tr(tr);
 4629
 4630	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
 4631		for_each_tracing_cpu(cpu) {
 4632			iter->buffer_iter[cpu] =
 4633				ring_buffer_read_prepare(iter->array_buffer->buffer,
 4634							 cpu, GFP_KERNEL);
 4635		}
 4636		ring_buffer_read_prepare_sync();
 4637		for_each_tracing_cpu(cpu) {
 4638			ring_buffer_read_start(iter->buffer_iter[cpu]);
 4639			tracing_iter_reset(iter, cpu);
 4640		}
 4641	} else {
 4642		cpu = iter->cpu_file;
 4643		iter->buffer_iter[cpu] =
 4644			ring_buffer_read_prepare(iter->array_buffer->buffer,
 4645						 cpu, GFP_KERNEL);
 4646		ring_buffer_read_prepare_sync();
 4647		ring_buffer_read_start(iter->buffer_iter[cpu]);
 4648		tracing_iter_reset(iter, cpu);
 4649	}
 4650
 4651	mutex_unlock(&trace_types_lock);
 4652
 4653	return iter;
 4654
 4655 fail:
 4656	mutex_unlock(&trace_types_lock);
 4657	free_trace_iter_content(iter);
 4658release:
 4659	seq_release_private(inode, file);
 4660	return ERR_PTR(-ENOMEM);
 4661}
 4662
 4663int tracing_open_generic(struct inode *inode, struct file *filp)
 4664{
 4665	int ret;
 4666
 4667	ret = tracing_check_open_get_tr(NULL);
 4668	if (ret)
 4669		return ret;
 4670
 4671	filp->private_data = inode->i_private;
 4672	return 0;
 4673}
 4674
 4675bool tracing_is_disabled(void)
 4676{
 4677	return (tracing_disabled) ? true: false;
 4678}
 4679
 4680/*
 4681 * Open and update trace_array ref count.
 4682 * Must have the current trace_array passed to it.
 4683 */
 4684int tracing_open_generic_tr(struct inode *inode, struct file *filp)
 4685{
 4686	struct trace_array *tr = inode->i_private;
 4687	int ret;
 4688
 4689	ret = tracing_check_open_get_tr(tr);
 4690	if (ret)
 4691		return ret;
 4692
 4693	filp->private_data = inode->i_private;
 4694
 4695	return 0;
 4696}
 4697
 4698/*
 4699 * The private pointer of the inode is the trace_event_file.
 4700 * Update the tr ref count associated to it.
 4701 */
 4702int tracing_open_file_tr(struct inode *inode, struct file *filp)
 4703{
 4704	struct trace_event_file *file = inode->i_private;
 4705	int ret;
 4706
 4707	ret = tracing_check_open_get_tr(file->tr);
 4708	if (ret)
 4709		return ret;
 4710
 4711	mutex_lock(&event_mutex);
 4712
 4713	/* Fail if the file is marked for removal */
 4714	if (file->flags & EVENT_FILE_FL_FREED) {
 4715		trace_array_put(file->tr);
 4716		ret = -ENODEV;
 4717	} else {
 4718		event_file_get(file);
 4719	}
 4720
 4721	mutex_unlock(&event_mutex);
 4722	if (ret)
 4723		return ret;
 4724
 4725	filp->private_data = inode->i_private;
 4726
 4727	return 0;
 4728}
 4729
 4730int tracing_release_file_tr(struct inode *inode, struct file *filp)
 4731{
 4732	struct trace_event_file *file = inode->i_private;
 4733
 4734	trace_array_put(file->tr);
 4735	event_file_put(file);
 4736
 4737	return 0;
 4738}
 4739
 4740int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
 4741{
 4742	tracing_release_file_tr(inode, filp);
 4743	return single_release(inode, filp);
 4744}
 4745
 4746static int tracing_mark_open(struct inode *inode, struct file *filp)
 4747{
 4748	stream_open(inode, filp);
 4749	return tracing_open_generic_tr(inode, filp);
 4750}
 4751
 4752static int tracing_release(struct inode *inode, struct file *file)
 4753{
 4754	struct trace_array *tr = inode->i_private;
 4755	struct seq_file *m = file->private_data;
 4756	struct trace_iterator *iter;
 4757	int cpu;
 4758
 4759	if (!(file->f_mode & FMODE_READ)) {
 4760		trace_array_put(tr);
 4761		return 0;
 4762	}
 4763
 4764	/* Writes do not use seq_file */
 4765	iter = m->private;
 4766	mutex_lock(&trace_types_lock);
 4767
 
 4768	for_each_tracing_cpu(cpu) {
 4769		if (iter->buffer_iter[cpu])
 4770			ring_buffer_read_finish(iter->buffer_iter[cpu]);
 4771	}
 4772
 4773	if (iter->trace && iter->trace->close)
 4774		iter->trace->close(iter);
 4775
 4776	if (!iter->snapshot && tr->stop_count)
 4777		/* reenable tracing if it was previously enabled */
 4778		tracing_start_tr(tr);
 4779
 4780	__trace_array_put(tr);
 4781
 4782	mutex_unlock(&trace_types_lock);
 4783
 4784	free_trace_iter_content(iter);
 
 
 4785	seq_release_private(inode, file);
 4786
 4787	return 0;
 4788}
 4789
 4790int tracing_release_generic_tr(struct inode *inode, struct file *file)
 4791{
 4792	struct trace_array *tr = inode->i_private;
 4793
 4794	trace_array_put(tr);
 4795	return 0;
 4796}
 4797
 4798static int tracing_single_release_tr(struct inode *inode, struct file *file)
 4799{
 4800	struct trace_array *tr = inode->i_private;
 4801
 4802	trace_array_put(tr);
 4803
 4804	return single_release(inode, file);
 4805}
 4806
 4807static int tracing_open(struct inode *inode, struct file *file)
 4808{
 4809	struct trace_array *tr = inode->i_private;
 4810	struct trace_iterator *iter;
 4811	int ret;
 4812
 4813	ret = tracing_check_open_get_tr(tr);
 4814	if (ret)
 4815		return ret;
 4816
 4817	/* If this file was open for write, then erase contents */
 4818	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 4819		int cpu = tracing_get_cpu(inode);
 4820		struct array_buffer *trace_buf = &tr->array_buffer;
 4821
 4822#ifdef CONFIG_TRACER_MAX_TRACE
 4823		if (tr->current_trace->print_max)
 4824			trace_buf = &tr->max_buffer;
 4825#endif
 4826
 4827		if (cpu == RING_BUFFER_ALL_CPUS)
 4828			tracing_reset_online_cpus(trace_buf);
 4829		else
 4830			tracing_reset_cpu(trace_buf, cpu);
 4831	}
 4832
 4833	if (file->f_mode & FMODE_READ) {
 4834		iter = __tracing_open(inode, file, false);
 4835		if (IS_ERR(iter))
 4836			ret = PTR_ERR(iter);
 4837		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 4838			iter->iter_flags |= TRACE_FILE_LAT_FMT;
 4839	}
 4840
 4841	if (ret < 0)
 4842		trace_array_put(tr);
 4843
 4844	return ret;
 4845}
 4846
 4847/*
 4848 * Some tracers are not suitable for instance buffers.
 4849 * A tracer is always available for the global array (toplevel)
 4850 * or if it explicitly states that it is.
 4851 */
 4852static bool
 4853trace_ok_for_array(struct tracer *t, struct trace_array *tr)
 4854{
 4855#ifdef CONFIG_TRACER_SNAPSHOT
 4856	/* arrays with mapped buffer range do not have snapshots */
 4857	if (tr->range_addr_start && t->use_max_tr)
 4858		return false;
 4859#endif
 4860	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
 4861}
 4862
 4863/* Find the next tracer that this trace array may use */
 4864static struct tracer *
 4865get_tracer_for_array(struct trace_array *tr, struct tracer *t)
 4866{
 4867	while (t && !trace_ok_for_array(t, tr))
 4868		t = t->next;
 4869
 4870	return t;
 4871}
 4872
 4873static void *
 4874t_next(struct seq_file *m, void *v, loff_t *pos)
 4875{
 4876	struct trace_array *tr = m->private;
 4877	struct tracer *t = v;
 4878
 4879	(*pos)++;
 4880
 4881	if (t)
 4882		t = get_tracer_for_array(tr, t->next);
 4883
 4884	return t;
 4885}
 4886
 4887static void *t_start(struct seq_file *m, loff_t *pos)
 4888{
 4889	struct trace_array *tr = m->private;
 4890	struct tracer *t;
 4891	loff_t l = 0;
 4892
 4893	mutex_lock(&trace_types_lock);
 4894
 4895	t = get_tracer_for_array(tr, trace_types);
 4896	for (; t && l < *pos; t = t_next(m, t, &l))
 4897			;
 4898
 4899	return t;
 4900}
 4901
 4902static void t_stop(struct seq_file *m, void *p)
 4903{
 4904	mutex_unlock(&trace_types_lock);
 4905}
 4906
 4907static int t_show(struct seq_file *m, void *v)
 4908{
 4909	struct tracer *t = v;
 4910
 4911	if (!t)
 4912		return 0;
 4913
 4914	seq_puts(m, t->name);
 4915	if (t->next)
 4916		seq_putc(m, ' ');
 4917	else
 4918		seq_putc(m, '\n');
 4919
 4920	return 0;
 4921}
 4922
 4923static const struct seq_operations show_traces_seq_ops = {
 4924	.start		= t_start,
 4925	.next		= t_next,
 4926	.stop		= t_stop,
 4927	.show		= t_show,
 4928};
 4929
 4930static int show_traces_open(struct inode *inode, struct file *file)
 4931{
 4932	struct trace_array *tr = inode->i_private;
 4933	struct seq_file *m;
 4934	int ret;
 4935
 4936	ret = tracing_check_open_get_tr(tr);
 4937	if (ret)
 4938		return ret;
 4939
 4940	ret = seq_open(file, &show_traces_seq_ops);
 4941	if (ret) {
 4942		trace_array_put(tr);
 4943		return ret;
 4944	}
 4945
 4946	m = file->private_data;
 4947	m->private = tr;
 4948
 4949	return 0;
 4950}
 4951
 4952static int tracing_seq_release(struct inode *inode, struct file *file)
 4953{
 4954	struct trace_array *tr = inode->i_private;
 4955
 4956	trace_array_put(tr);
 4957	return seq_release(inode, file);
 4958}
 4959
 4960static ssize_t
 4961tracing_write_stub(struct file *filp, const char __user *ubuf,
 4962		   size_t count, loff_t *ppos)
 4963{
 4964	return count;
 4965}
 4966
 4967loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
 4968{
 4969	int ret;
 4970
 4971	if (file->f_mode & FMODE_READ)
 4972		ret = seq_lseek(file, offset, whence);
 4973	else
 4974		file->f_pos = ret = 0;
 4975
 4976	return ret;
 4977}
 4978
 4979static const struct file_operations tracing_fops = {
 4980	.open		= tracing_open,
 4981	.read		= seq_read,
 4982	.read_iter	= seq_read_iter,
 4983	.splice_read	= copy_splice_read,
 4984	.write		= tracing_write_stub,
 4985	.llseek		= tracing_lseek,
 4986	.release	= tracing_release,
 4987};
 4988
 4989static const struct file_operations show_traces_fops = {
 4990	.open		= show_traces_open,
 4991	.read		= seq_read,
 
 4992	.llseek		= seq_lseek,
 4993	.release	= tracing_seq_release,
 4994};
 4995
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 4996static ssize_t
 4997tracing_cpumask_read(struct file *filp, char __user *ubuf,
 4998		     size_t count, loff_t *ppos)
 4999{
 5000	struct trace_array *tr = file_inode(filp)->i_private;
 5001	char *mask_str;
 5002	int len;
 5003
 5004	len = snprintf(NULL, 0, "%*pb\n",
 5005		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
 5006	mask_str = kmalloc(len, GFP_KERNEL);
 5007	if (!mask_str)
 5008		return -ENOMEM;
 5009
 5010	len = snprintf(mask_str, len, "%*pb\n",
 5011		       cpumask_pr_args(tr->tracing_cpumask));
 5012	if (len >= count) {
 5013		count = -EINVAL;
 5014		goto out_err;
 5015	}
 5016	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
 
 5017
 5018out_err:
 5019	kfree(mask_str);
 5020
 5021	return count;
 5022}
 5023
 5024int tracing_set_cpumask(struct trace_array *tr,
 5025			cpumask_var_t tracing_cpumask_new)
 
 5026{
 5027	int cpu;
 
 5028
 5029	if (!tr)
 5030		return -EINVAL;
 
 
 
 
 
 
 5031
 5032	local_irq_disable();
 5033	arch_spin_lock(&tr->max_lock);
 5034	for_each_tracing_cpu(cpu) {
 5035		/*
 5036		 * Increase/decrease the disabled counter if we are
 5037		 * about to flip a bit in the cpumask:
 5038		 */
 5039		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 5040				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
 5041			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
 5042			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
 5043#ifdef CONFIG_TRACER_MAX_TRACE
 5044			ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
 5045#endif
 5046		}
 5047		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 5048				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
 5049			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
 5050			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
 5051#ifdef CONFIG_TRACER_MAX_TRACE
 5052			ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
 5053#endif
 5054		}
 5055	}
 5056	arch_spin_unlock(&tr->max_lock);
 5057	local_irq_enable();
 5058
 5059	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
 5060
 5061	return 0;
 5062}
 5063
 5064static ssize_t
 5065tracing_cpumask_write(struct file *filp, const char __user *ubuf,
 5066		      size_t count, loff_t *ppos)
 5067{
 5068	struct trace_array *tr = file_inode(filp)->i_private;
 5069	cpumask_var_t tracing_cpumask_new;
 5070	int err;
 5071
 5072	if (count == 0 || count > KMALLOC_MAX_SIZE)
 5073		return -EINVAL;
 5074
 5075	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
 5076		return -ENOMEM;
 5077
 5078	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
 5079	if (err)
 5080		goto err_free;
 5081
 5082	err = tracing_set_cpumask(tr, tracing_cpumask_new);
 5083	if (err)
 5084		goto err_free;
 5085
 
 5086	free_cpumask_var(tracing_cpumask_new);
 5087
 5088	return count;
 5089
 5090err_free:
 5091	free_cpumask_var(tracing_cpumask_new);
 5092
 5093	return err;
 5094}
 5095
 5096static const struct file_operations tracing_cpumask_fops = {
 5097	.open		= tracing_open_generic_tr,
 5098	.read		= tracing_cpumask_read,
 5099	.write		= tracing_cpumask_write,
 5100	.release	= tracing_release_generic_tr,
 5101	.llseek		= generic_file_llseek,
 5102};
 5103
 5104static int tracing_trace_options_show(struct seq_file *m, void *v)
 5105{
 5106	struct tracer_opt *trace_opts;
 5107	struct trace_array *tr = m->private;
 5108	u32 tracer_flags;
 5109	int i;
 5110
 5111	guard(mutex)(&trace_types_lock);
 5112
 5113	tracer_flags = tr->current_trace->flags->val;
 5114	trace_opts = tr->current_trace->flags->opts;
 5115
 5116	for (i = 0; trace_options[i]; i++) {
 5117		if (tr->trace_flags & (1 << i))
 5118			seq_printf(m, "%s\n", trace_options[i]);
 5119		else
 5120			seq_printf(m, "no%s\n", trace_options[i]);
 5121	}
 5122
 5123	for (i = 0; trace_opts[i].name; i++) {
 5124		if (tracer_flags & trace_opts[i].bit)
 5125			seq_printf(m, "%s\n", trace_opts[i].name);
 5126		else
 5127			seq_printf(m, "no%s\n", trace_opts[i].name);
 5128	}
 
 5129
 5130	return 0;
 5131}
 5132
 5133static int __set_tracer_option(struct trace_array *tr,
 5134			       struct tracer_flags *tracer_flags,
 5135			       struct tracer_opt *opts, int neg)
 5136{
 5137	struct tracer *trace = tracer_flags->trace;
 5138	int ret;
 5139
 5140	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
 5141	if (ret)
 5142		return ret;
 5143
 5144	if (neg)
 5145		tracer_flags->val &= ~opts->bit;
 5146	else
 5147		tracer_flags->val |= opts->bit;
 5148	return 0;
 5149}
 5150
 5151/* Try to assign a tracer specific option */
 5152static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
 5153{
 5154	struct tracer *trace = tr->current_trace;
 5155	struct tracer_flags *tracer_flags = trace->flags;
 5156	struct tracer_opt *opts = NULL;
 5157	int i;
 5158
 5159	for (i = 0; tracer_flags->opts[i].name; i++) {
 5160		opts = &tracer_flags->opts[i];
 5161
 5162		if (strcmp(cmp, opts->name) == 0)
 5163			return __set_tracer_option(tr, trace->flags, opts, neg);
 
 5164	}
 5165
 5166	return -EINVAL;
 5167}
 5168
 5169/* Some tracers require overwrite to stay enabled */
 5170int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
 5171{
 5172	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
 5173		return -1;
 5174
 5175	return 0;
 5176}
 5177
 5178int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
 5179{
 5180	if ((mask == TRACE_ITER_RECORD_TGID) ||
 5181	    (mask == TRACE_ITER_RECORD_CMD) ||
 5182	    (mask == TRACE_ITER_TRACE_PRINTK))
 5183		lockdep_assert_held(&event_mutex);
 5184
 5185	/* do nothing if flag is already set */
 5186	if (!!(tr->trace_flags & mask) == !!enabled)
 5187		return 0;
 5188
 5189	/* Give the tracer a chance to approve the change */
 5190	if (tr->current_trace->flag_changed)
 5191		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
 5192			return -EINVAL;
 5193
 5194	if (mask == TRACE_ITER_TRACE_PRINTK) {
 5195		if (enabled) {
 5196			update_printk_trace(tr);
 5197		} else {
 5198			/*
 5199			 * The global_trace cannot clear this.
 5200			 * It's flag only gets cleared if another instance sets it.
 5201			 */
 5202			if (printk_trace == &global_trace)
 5203				return -EINVAL;
 5204			/*
 5205			 * An instance must always have it set.
 5206			 * by default, that's the global_trace instane.
 5207			 */
 5208			if (printk_trace == tr)
 5209				update_printk_trace(&global_trace);
 5210		}
 5211	}
 5212
 5213	if (enabled)
 5214		tr->trace_flags |= mask;
 5215	else
 5216		tr->trace_flags &= ~mask;
 5217
 5218	if (mask == TRACE_ITER_RECORD_CMD)
 5219		trace_event_enable_cmd_record(enabled);
 5220
 5221	if (mask == TRACE_ITER_RECORD_TGID) {
 5222
 5223		if (trace_alloc_tgid_map() < 0) {
 5224			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
 5225			return -ENOMEM;
 5226		}
 5227
 5228		trace_event_enable_tgid_record(enabled);
 5229	}
 5230
 5231	if (mask == TRACE_ITER_EVENT_FORK)
 5232		trace_event_follow_fork(tr, enabled);
 5233
 5234	if (mask == TRACE_ITER_FUNC_FORK)
 5235		ftrace_pid_follow_fork(tr, enabled);
 5236
 5237	if (mask == TRACE_ITER_OVERWRITE) {
 5238		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
 5239#ifdef CONFIG_TRACER_MAX_TRACE
 5240		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
 5241#endif
 5242	}
 5243
 5244	if (mask == TRACE_ITER_PRINTK) {
 5245		trace_printk_start_stop_comm(enabled);
 5246		trace_printk_control(enabled);
 5247	}
 5248
 5249	return 0;
 5250}
 5251
 5252int trace_set_options(struct trace_array *tr, char *option)
 5253{
 5254	char *cmp;
 5255	int neg = 0;
 5256	int ret;
 5257	size_t orig_len = strlen(option);
 5258	int len;
 5259
 5260	cmp = strstrip(option);
 5261
 5262	len = str_has_prefix(cmp, "no");
 5263	if (len)
 5264		neg = 1;
 5265
 5266	cmp += len;
 5267
 5268	mutex_lock(&event_mutex);
 5269	mutex_lock(&trace_types_lock);
 5270
 5271	ret = match_string(trace_options, -1, cmp);
 5272	/* If no option could be set, test the specific tracer options */
 5273	if (ret < 0)
 5274		ret = set_tracer_option(tr, cmp, neg);
 5275	else
 5276		ret = set_tracer_flag(tr, 1 << ret, !neg);
 5277
 5278	mutex_unlock(&trace_types_lock);
 5279	mutex_unlock(&event_mutex);
 5280
 5281	/*
 5282	 * If the first trailing whitespace is replaced with '\0' by strstrip,
 5283	 * turn it back into a space.
 5284	 */
 5285	if (orig_len > strlen(option))
 5286		option[strlen(option)] = ' ';
 5287
 5288	return ret;
 5289}
 5290
 5291static void __init apply_trace_boot_options(void)
 5292{
 5293	char *buf = trace_boot_options_buf;
 5294	char *option;
 5295
 5296	while (true) {
 5297		option = strsep(&buf, ",");
 5298
 5299		if (!option)
 5300			break;
 5301
 5302		if (*option)
 5303			trace_set_options(&global_trace, option);
 5304
 5305		/* Put back the comma to allow this to be called again */
 5306		if (buf)
 5307			*(buf - 1) = ',';
 5308	}
 5309}
 5310
 5311static ssize_t
 5312tracing_trace_options_write(struct file *filp, const char __user *ubuf,
 5313			size_t cnt, loff_t *ppos)
 5314{
 5315	struct seq_file *m = filp->private_data;
 5316	struct trace_array *tr = m->private;
 5317	char buf[64];
 
 
 5318	int ret;
 
 5319
 5320	if (cnt >= sizeof(buf))
 5321		return -EINVAL;
 5322
 5323	if (copy_from_user(buf, ubuf, cnt))
 5324		return -EFAULT;
 5325
 5326	buf[cnt] = 0;
 
 5327
 5328	ret = trace_set_options(tr, buf);
 5329	if (ret < 0)
 5330		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 5331
 5332	*ppos += cnt;
 5333
 5334	return cnt;
 5335}
 5336
 5337static int tracing_trace_options_open(struct inode *inode, struct file *file)
 5338{
 5339	struct trace_array *tr = inode->i_private;
 5340	int ret;
 5341
 5342	ret = tracing_check_open_get_tr(tr);
 5343	if (ret)
 5344		return ret;
 5345
 5346	ret = single_open(file, tracing_trace_options_show, inode->i_private);
 5347	if (ret < 0)
 5348		trace_array_put(tr);
 5349
 5350	return ret;
 5351}
 5352
 5353static const struct file_operations tracing_iter_fops = {
 5354	.open		= tracing_trace_options_open,
 5355	.read		= seq_read,
 5356	.llseek		= seq_lseek,
 5357	.release	= tracing_single_release_tr,
 5358	.write		= tracing_trace_options_write,
 5359};
 5360
 5361static const char readme_msg[] =
 5362	"tracing mini-HOWTO:\n\n"
 5363	"By default tracefs removes all OTH file permission bits.\n"
 5364	"When mounting tracefs an optional group id can be specified\n"
 5365	"which adds the group to every directory and file in tracefs:\n\n"
 5366	"\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n"
 5367	"# echo 0 > tracing_on : quick way to disable tracing\n"
 5368	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
 5369	" Important files:\n"
 5370	"  trace\t\t\t- The static contents of the buffer\n"
 5371	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
 5372	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
 5373	"  current_tracer\t- function and latency tracers\n"
 5374	"  available_tracers\t- list of configured tracers for current_tracer\n"
 5375	"  error_log\t- error log for failed commands (that support it)\n"
 5376	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
 5377	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
 5378	"  trace_clock\t\t- change the clock used to order events\n"
 5379	"       local:   Per cpu clock but may not be synced across CPUs\n"
 5380	"      global:   Synced across CPUs but slows tracing down.\n"
 5381	"     counter:   Not a clock, but just an increment\n"
 5382	"      uptime:   Jiffy counter from time of boot\n"
 5383	"        perf:   Same clock that perf events use\n"
 5384#ifdef CONFIG_X86_64
 5385	"     x86-tsc:   TSC cycle counter\n"
 5386#endif
 5387	"\n  timestamp_mode\t- view the mode used to timestamp events\n"
 5388	"       delta:   Delta difference against a buffer-wide timestamp\n"
 5389	"    absolute:   Absolute (standalone) timestamp\n"
 5390	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
 5391	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
 5392	"  tracing_cpumask\t- Limit which CPUs to trace\n"
 5393	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
 5394	"\t\t\t  Remove sub-buffer with rmdir\n"
 5395	"  trace_options\t\t- Set format or modify how tracing happens\n"
 5396	"\t\t\t  Disable an option by prefixing 'no' to the\n"
 5397	"\t\t\t  option name\n"
 5398	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
 5399#ifdef CONFIG_DYNAMIC_FTRACE
 5400	"\n  available_filter_functions - list of functions that can be filtered on\n"
 5401	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
 5402	"\t\t\t  functions\n"
 5403	"\t     accepts: func_full_name or glob-matching-pattern\n"
 5404	"\t     modules: Can select a group via module\n"
 5405	"\t      Format: :mod:<module-name>\n"
 5406	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
 5407	"\t    triggers: a command to perform when function is hit\n"
 5408	"\t      Format: <function>:<trigger>[:count]\n"
 5409	"\t     trigger: traceon, traceoff\n"
 5410	"\t\t      enable_event:<system>:<event>\n"
 5411	"\t\t      disable_event:<system>:<event>\n"
 5412#ifdef CONFIG_STACKTRACE
 5413	"\t\t      stacktrace\n"
 5414#endif
 5415#ifdef CONFIG_TRACER_SNAPSHOT
 5416	"\t\t      snapshot\n"
 5417#endif
 5418	"\t\t      dump\n"
 5419	"\t\t      cpudump\n"
 5420	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
 5421	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
 5422	"\t     The first one will disable tracing every time do_fault is hit\n"
 5423	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
 5424	"\t       The first time do trap is hit and it disables tracing, the\n"
 5425	"\t       counter will decrement to 2. If tracing is already disabled,\n"
 5426	"\t       the counter will not decrement. It only decrements when the\n"
 5427	"\t       trigger did work\n"
 5428	"\t     To remove trigger without count:\n"
 5429	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
 5430	"\t     To remove trigger with a count:\n"
 5431	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
 5432	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
 5433	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
 5434	"\t    modules: Can select a group via module command :mod:\n"
 5435	"\t    Does not accept triggers\n"
 5436#endif /* CONFIG_DYNAMIC_FTRACE */
 5437#ifdef CONFIG_FUNCTION_TRACER
 5438	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
 5439	"\t\t    (function)\n"
 5440	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
 5441	"\t\t    (function)\n"
 5442#endif
 5443#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 5444	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
 5445	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
 5446	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
 5447#endif
 5448#ifdef CONFIG_TRACER_SNAPSHOT
 5449	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
 5450	"\t\t\t  snapshot buffer. Read the contents for more\n"
 5451	"\t\t\t  information\n"
 5452#endif
 5453#ifdef CONFIG_STACK_TRACER
 5454	"  stack_trace\t\t- Shows the max stack trace when active\n"
 5455	"  stack_max_size\t- Shows current max stack size that was traced\n"
 5456	"\t\t\t  Write into this file to reset the max size (trigger a\n"
 5457	"\t\t\t  new trace)\n"
 5458#ifdef CONFIG_DYNAMIC_FTRACE
 5459	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
 5460	"\t\t\t  traces\n"
 5461#endif
 5462#endif /* CONFIG_STACK_TRACER */
 5463#ifdef CONFIG_DYNAMIC_EVENTS
 5464	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
 5465	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 5466#endif
 5467#ifdef CONFIG_KPROBE_EVENTS
 5468	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
 5469	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 5470#endif
 5471#ifdef CONFIG_UPROBE_EVENTS
 5472	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
 5473	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 5474#endif
 5475#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
 5476    defined(CONFIG_FPROBE_EVENTS)
 5477	"\t  accepts: event-definitions (one definition per line)\n"
 5478#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
 5479	"\t   Format: p[:[<group>/][<event>]] <place> [<args>]\n"
 5480	"\t           r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
 5481#endif
 5482#ifdef CONFIG_FPROBE_EVENTS
 5483	"\t           f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
 5484	"\t           t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
 5485#endif
 5486#ifdef CONFIG_HIST_TRIGGERS
 5487	"\t           s:[synthetic/]<event> <field> [<field>]\n"
 5488#endif
 5489	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
 5490	"\t           -:[<group>/][<event>]\n"
 5491#ifdef CONFIG_KPROBE_EVENTS
 5492	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
 5493  "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
 5494#endif
 5495#ifdef CONFIG_UPROBE_EVENTS
 5496  "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
 5497#endif
 5498	"\t     args: <name>=fetcharg[:type]\n"
 5499	"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
 5500#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
 5501	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
 5502#ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
 5503	"\t           <argname>[->field[->field|.field...]],\n"
 5504#endif
 5505#else
 5506	"\t           $stack<index>, $stack, $retval, $comm,\n"
 5507#endif
 5508	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
 5509	"\t     kernel return probes support: $retval, $arg<N>, $comm\n"
 5510	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
 5511	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
 5512	"\t           symstr, %pd/%pD, <type>\\[<array-size>\\]\n"
 5513#ifdef CONFIG_HIST_TRIGGERS
 5514	"\t    field: <stype> <name>;\n"
 5515	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
 5516	"\t           [unsigned] char/int/long\n"
 5517#endif
 5518	"\t    efield: For event probes ('e' types), the field is on of the fields\n"
 5519	"\t            of the <attached-group>/<attached-event>.\n"
 5520#endif
 5521	"  events/\t\t- Directory containing all trace event subsystems:\n"
 5522	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
 5523	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
 5524	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
 5525	"\t\t\t  events\n"
 5526	"      filter\t\t- If set, only events passing filter are traced\n"
 5527	"  events/<system>/<event>/\t- Directory containing control files for\n"
 5528	"\t\t\t  <event>:\n"
 5529	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
 5530	"      filter\t\t- If set, only events passing filter are traced\n"
 5531	"      trigger\t\t- If set, a command to perform when event is hit\n"
 5532	"\t    Format: <trigger>[:count][if <filter>]\n"
 5533	"\t   trigger: traceon, traceoff\n"
 5534	"\t            enable_event:<system>:<event>\n"
 5535	"\t            disable_event:<system>:<event>\n"
 5536#ifdef CONFIG_HIST_TRIGGERS
 5537	"\t            enable_hist:<system>:<event>\n"
 5538	"\t            disable_hist:<system>:<event>\n"
 5539#endif
 5540#ifdef CONFIG_STACKTRACE
 5541	"\t\t    stacktrace\n"
 5542#endif
 5543#ifdef CONFIG_TRACER_SNAPSHOT
 5544	"\t\t    snapshot\n"
 5545#endif
 5546#ifdef CONFIG_HIST_TRIGGERS
 5547	"\t\t    hist (see below)\n"
 5548#endif
 5549	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
 5550	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
 5551	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
 5552	"\t                  events/block/block_unplug/trigger\n"
 5553	"\t   The first disables tracing every time block_unplug is hit.\n"
 5554	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
 5555	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
 5556	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
 5557	"\t   Like function triggers, the counter is only decremented if it\n"
 5558	"\t    enabled or disabled tracing.\n"
 5559	"\t   To remove a trigger without a count:\n"
 5560	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
 5561	"\t   To remove a trigger with a count:\n"
 5562	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
 5563	"\t   Filters can be ignored when removing a trigger.\n"
 5564#ifdef CONFIG_HIST_TRIGGERS
 5565	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
 5566	"\t    Format: hist:keys=<field1[,field2,...]>\n"
 5567	"\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
 5568	"\t            [:values=<field1[,field2,...]>]\n"
 5569	"\t            [:sort=<field1[,field2,...]>]\n"
 5570	"\t            [:size=#entries]\n"
 5571	"\t            [:pause][:continue][:clear]\n"
 5572	"\t            [:name=histname1]\n"
 5573	"\t            [:nohitcount]\n"
 5574	"\t            [:<handler>.<action>]\n"
 5575	"\t            [if <filter>]\n\n"
 5576	"\t    Note, special fields can be used as well:\n"
 5577	"\t            common_timestamp - to record current timestamp\n"
 5578	"\t            common_cpu - to record the CPU the event happened on\n"
 5579	"\n"
 5580	"\t    A hist trigger variable can be:\n"
 5581	"\t        - a reference to a field e.g. x=current_timestamp,\n"
 5582	"\t        - a reference to another variable e.g. y=$x,\n"
 5583	"\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
 5584	"\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
 5585	"\n"
 5586	"\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
 5587	"\t    multiplication(*) and division(/) operators. An operand can be either a\n"
 5588	"\t    variable reference, field or numeric literal.\n"
 5589	"\n"
 5590	"\t    When a matching event is hit, an entry is added to a hash\n"
 5591	"\t    table using the key(s) and value(s) named, and the value of a\n"
 5592	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
 5593	"\t    correspond to fields in the event's format description.  Keys\n"
 5594	"\t    can be any field, or the special string 'common_stacktrace'.\n"
 5595	"\t    Compound keys consisting of up to two fields can be specified\n"
 5596	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
 5597	"\t    fields.  Sort keys consisting of up to two fields can be\n"
 5598	"\t    specified using the 'sort' keyword.  The sort direction can\n"
 5599	"\t    be modified by appending '.descending' or '.ascending' to a\n"
 5600	"\t    sort field.  The 'size' parameter can be used to specify more\n"
 5601	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
 5602	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
 5603	"\t    its histogram data will be shared with other triggers of the\n"
 5604	"\t    same name, and trigger hits will update this common data.\n\n"
 5605	"\t    Reading the 'hist' file for the event will dump the hash\n"
 5606	"\t    table in its entirety to stdout.  If there are multiple hist\n"
 5607	"\t    triggers attached to an event, there will be a table for each\n"
 5608	"\t    trigger in the output.  The table displayed for a named\n"
 5609	"\t    trigger will be the same as any other instance having the\n"
 5610	"\t    same name.  The default format used to display a given field\n"
 5611	"\t    can be modified by appending any of the following modifiers\n"
 5612	"\t    to the field name, as applicable:\n\n"
 5613	"\t            .hex        display a number as a hex value\n"
 5614	"\t            .sym        display an address as a symbol\n"
 5615	"\t            .sym-offset display an address as a symbol and offset\n"
 5616	"\t            .execname   display a common_pid as a program name\n"
 5617	"\t            .syscall    display a syscall id as a syscall name\n"
 5618	"\t            .log2       display log2 value rather than raw number\n"
 5619	"\t            .buckets=size  display values in groups of size rather than raw number\n"
 5620	"\t            .usecs      display a common_timestamp in microseconds\n"
 5621	"\t            .percent    display a number of percentage value\n"
 5622	"\t            .graph      display a bar-graph of a value\n\n"
 5623	"\t    The 'pause' parameter can be used to pause an existing hist\n"
 5624	"\t    trigger or to start a hist trigger but not log any events\n"
 5625	"\t    until told to do so.  'continue' can be used to start or\n"
 5626	"\t    restart a paused hist trigger.\n\n"
 5627	"\t    The 'clear' parameter will clear the contents of a running\n"
 5628	"\t    hist trigger and leave its current paused/active state\n"
 5629	"\t    unchanged.\n\n"
 5630	"\t    The 'nohitcount' (or NOHC) parameter will suppress display of\n"
 5631	"\t    raw hitcount in the histogram.\n\n"
 5632	"\t    The enable_hist and disable_hist triggers can be used to\n"
 5633	"\t    have one event conditionally start and stop another event's\n"
 5634	"\t    already-attached hist trigger.  The syntax is analogous to\n"
 5635	"\t    the enable_event and disable_event triggers.\n\n"
 5636	"\t    Hist trigger handlers and actions are executed whenever a\n"
 5637	"\t    a histogram entry is added or updated.  They take the form:\n\n"
 5638	"\t        <handler>.<action>\n\n"
 5639	"\t    The available handlers are:\n\n"
 5640	"\t        onmatch(matching.event)  - invoke on addition or update\n"
 5641	"\t        onmax(var)               - invoke if var exceeds current max\n"
 5642	"\t        onchange(var)            - invoke action if var changes\n\n"
 5643	"\t    The available actions are:\n\n"
 5644	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
 5645	"\t        save(field,...)                      - save current event fields\n"
 5646#ifdef CONFIG_TRACER_SNAPSHOT
 5647	"\t        snapshot()                           - snapshot the trace buffer\n\n"
 5648#endif
 5649#ifdef CONFIG_SYNTH_EVENTS
 5650	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
 5651	"\t  Write into this file to define/undefine new synthetic events.\n"
 5652	"\t     example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
 5653#endif
 5654#endif
 5655;
 5656
 5657static ssize_t
 5658tracing_readme_read(struct file *filp, char __user *ubuf,
 5659		       size_t cnt, loff_t *ppos)
 5660{
 5661	return simple_read_from_buffer(ubuf, cnt, ppos,
 5662					readme_msg, strlen(readme_msg));
 5663}
 5664
 5665static const struct file_operations tracing_readme_fops = {
 5666	.open		= tracing_open_generic,
 5667	.read		= tracing_readme_read,
 5668	.llseek		= generic_file_llseek,
 5669};
 5670
 5671#ifdef CONFIG_TRACE_EVAL_MAP_FILE
 5672static union trace_eval_map_item *
 5673update_eval_map(union trace_eval_map_item *ptr)
 5674{
 5675	if (!ptr->map.eval_string) {
 5676		if (ptr->tail.next) {
 5677			ptr = ptr->tail.next;
 5678			/* Set ptr to the next real item (skip head) */
 5679			ptr++;
 5680		} else
 5681			return NULL;
 5682	}
 5683	return ptr;
 5684}
 5685
 5686static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
 5687{
 5688	union trace_eval_map_item *ptr = v;
 5689
 5690	/*
 5691	 * Paranoid! If ptr points to end, we don't want to increment past it.
 5692	 * This really should never happen.
 5693	 */
 5694	(*pos)++;
 5695	ptr = update_eval_map(ptr);
 5696	if (WARN_ON_ONCE(!ptr))
 5697		return NULL;
 5698
 5699	ptr++;
 5700	ptr = update_eval_map(ptr);
 
 5701
 5702	return ptr;
 5703}
 
 
 
 5704
 5705static void *eval_map_start(struct seq_file *m, loff_t *pos)
 5706{
 5707	union trace_eval_map_item *v;
 5708	loff_t l = 0;
 5709
 5710	mutex_lock(&trace_eval_mutex);
 
 5711
 5712	v = trace_eval_maps;
 5713	if (v)
 5714		v++;
 5715
 5716	while (v && l < *pos) {
 5717		v = eval_map_next(m, v, &l);
 
 
 5718	}
 5719
 5720	return v;
 5721}
 5722
 5723static void eval_map_stop(struct seq_file *m, void *v)
 5724{
 5725	mutex_unlock(&trace_eval_mutex);
 5726}
 5727
 5728static int eval_map_show(struct seq_file *m, void *v)
 5729{
 5730	union trace_eval_map_item *ptr = v;
 5731
 5732	seq_printf(m, "%s %ld (%s)\n",
 5733		   ptr->map.eval_string, ptr->map.eval_value,
 5734		   ptr->map.system);
 5735
 5736	return 0;
 5737}
 5738
 5739static const struct seq_operations tracing_eval_map_seq_ops = {
 5740	.start		= eval_map_start,
 5741	.next		= eval_map_next,
 5742	.stop		= eval_map_stop,
 5743	.show		= eval_map_show,
 5744};
 5745
 5746static int tracing_eval_map_open(struct inode *inode, struct file *filp)
 
 
 5747{
 5748	int ret;
 5749
 5750	ret = tracing_check_open_get_tr(NULL);
 5751	if (ret)
 5752		return ret;
 5753
 5754	return seq_open(filp, &tracing_eval_map_seq_ops);
 
 5755}
 5756
 5757static const struct file_operations tracing_eval_map_fops = {
 5758	.open		= tracing_eval_map_open,
 5759	.read		= seq_read,
 5760	.llseek		= seq_lseek,
 5761	.release	= seq_release,
 5762};
 5763
 5764static inline union trace_eval_map_item *
 5765trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
 5766{
 5767	/* Return tail of array given the head */
 5768	return ptr + ptr->head.length + 1;
 5769}
 5770
 5771static void
 5772trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
 5773			   int len)
 5774{
 5775	struct trace_eval_map **stop;
 5776	struct trace_eval_map **map;
 5777	union trace_eval_map_item *map_array;
 5778	union trace_eval_map_item *ptr;
 5779
 5780	stop = start + len;
 5781
 5782	/*
 5783	 * The trace_eval_maps contains the map plus a head and tail item,
 5784	 * where the head holds the module and length of array, and the
 5785	 * tail holds a pointer to the next list.
 5786	 */
 5787	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
 5788	if (!map_array) {
 5789		pr_warn("Unable to allocate trace eval mapping\n");
 5790		return;
 5791	}
 5792
 5793	guard(mutex)(&trace_eval_mutex);
 5794
 5795	if (!trace_eval_maps)
 5796		trace_eval_maps = map_array;
 5797	else {
 5798		ptr = trace_eval_maps;
 5799		for (;;) {
 5800			ptr = trace_eval_jmp_to_tail(ptr);
 5801			if (!ptr->tail.next)
 5802				break;
 5803			ptr = ptr->tail.next;
 5804
 
 
 
 
 
 
 
 
 
 
 5805		}
 5806		ptr->tail.next = map_array;
 5807	}
 5808	map_array->head.mod = mod;
 5809	map_array->head.length = len;
 5810	map_array++;
 5811
 5812	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
 5813		map_array->map = **map;
 5814		map_array++;
 5815	}
 5816	memset(map_array, 0, sizeof(*map_array));
 5817}
 5818
 5819static void trace_create_eval_file(struct dentry *d_tracer)
 5820{
 5821	trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
 5822			  NULL, &tracing_eval_map_fops);
 5823}
 5824
 5825#else /* CONFIG_TRACE_EVAL_MAP_FILE */
 5826static inline void trace_create_eval_file(struct dentry *d_tracer) { }
 5827static inline void trace_insert_eval_map_file(struct module *mod,
 5828			      struct trace_eval_map **start, int len) { }
 5829#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
 5830
 5831static void trace_insert_eval_map(struct module *mod,
 5832				  struct trace_eval_map **start, int len)
 5833{
 5834	struct trace_eval_map **map;
 5835
 5836	if (len <= 0)
 5837		return;
 5838
 5839	map = start;
 5840
 5841	trace_event_eval_update(map, len);
 5842
 5843	trace_insert_eval_map_file(mod, start, len);
 5844}
 5845
 5846static ssize_t
 5847tracing_set_trace_read(struct file *filp, char __user *ubuf,
 5848		       size_t cnt, loff_t *ppos)
 5849{
 5850	struct trace_array *tr = filp->private_data;
 5851	char buf[MAX_TRACER_SIZE+2];
 5852	int r;
 5853
 5854	mutex_lock(&trace_types_lock);
 5855	r = sprintf(buf, "%s\n", tr->current_trace->name);
 
 
 
 5856	mutex_unlock(&trace_types_lock);
 5857
 5858	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 5859}
 5860
 5861int tracer_init(struct tracer *t, struct trace_array *tr)
 5862{
 5863	tracing_reset_online_cpus(&tr->array_buffer);
 5864	return t->init(tr);
 5865}
 5866
 5867static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
 5868{
 5869	int cpu;
 5870
 5871	for_each_tracing_cpu(cpu)
 5872		per_cpu_ptr(buf->data, cpu)->entries = val;
 5873}
 5874
 5875static void update_buffer_entries(struct array_buffer *buf, int cpu)
 5876{
 5877	if (cpu == RING_BUFFER_ALL_CPUS) {
 5878		set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
 5879	} else {
 5880		per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
 5881	}
 5882}
 5883
 5884#ifdef CONFIG_TRACER_MAX_TRACE
 5885/* resize @tr's buffer to the size of @size_tr's entries */
 5886static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
 5887					struct array_buffer *size_buf, int cpu_id)
 5888{
 5889	int cpu, ret = 0;
 5890
 5891	if (cpu_id == RING_BUFFER_ALL_CPUS) {
 5892		for_each_tracing_cpu(cpu) {
 5893			ret = ring_buffer_resize(trace_buf->buffer,
 5894				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
 5895			if (ret < 0)
 5896				break;
 5897			per_cpu_ptr(trace_buf->data, cpu)->entries =
 5898				per_cpu_ptr(size_buf->data, cpu)->entries;
 5899		}
 5900	} else {
 5901		ret = ring_buffer_resize(trace_buf->buffer,
 5902				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
 5903		if (ret == 0)
 5904			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
 5905				per_cpu_ptr(size_buf->data, cpu_id)->entries;
 5906	}
 5907
 5908	return ret;
 5909}
 5910#endif /* CONFIG_TRACER_MAX_TRACE */
 5911
 5912static int __tracing_resize_ring_buffer(struct trace_array *tr,
 5913					unsigned long size, int cpu)
 5914{
 5915	int ret;
 5916
 5917	/*
 5918	 * If kernel or user changes the size of the ring buffer
 5919	 * we use the size that was given, and we can forget about
 5920	 * expanding it later.
 5921	 */
 5922	trace_set_ring_buffer_expanded(tr);
 5923
 5924	/* May be called before buffers are initialized */
 5925	if (!tr->array_buffer.buffer)
 5926		return 0;
 5927
 5928	/* Do not allow tracing while resizing ring buffer */
 5929	tracing_stop_tr(tr);
 5930
 5931	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
 5932	if (ret < 0)
 5933		goto out_start;
 5934
 5935#ifdef CONFIG_TRACER_MAX_TRACE
 5936	if (!tr->allocated_snapshot)
 5937		goto out;
 5938
 5939	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
 5940	if (ret < 0) {
 5941		int r = resize_buffer_duplicate_size(&tr->array_buffer,
 5942						     &tr->array_buffer, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 5943		if (r < 0) {
 5944			/*
 5945			 * AARGH! We are left with different
 5946			 * size max buffer!!!!
 5947			 * The max buffer is our "snapshot" buffer.
 5948			 * When a tracer needs a snapshot (one of the
 5949			 * latency tracers), it swaps the max buffer
 5950			 * with the saved snap shot. We succeeded to
 5951			 * update the size of the main buffer, but failed to
 5952			 * update the size of the max buffer. But when we tried
 5953			 * to reset the main buffer to the original size, we
 5954			 * failed there too. This is very unlikely to
 5955			 * happen, but if it does, warn and kill all
 5956			 * tracing.
 5957			 */
 5958			WARN_ON(1);
 5959			tracing_disabled = 1;
 5960		}
 5961		goto out_start;
 5962	}
 5963
 5964	update_buffer_entries(&tr->max_buffer, cpu);
 
 
 
 5965
 5966 out:
 5967#endif /* CONFIG_TRACER_MAX_TRACE */
 
 
 
 5968
 5969	update_buffer_entries(&tr->array_buffer, cpu);
 5970 out_start:
 5971	tracing_start_tr(tr);
 5972	return ret;
 5973}
 5974
 5975ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
 5976				  unsigned long size, int cpu_id)
 5977{
 5978	guard(mutex)(&trace_types_lock);
 
 
 5979
 5980	if (cpu_id != RING_BUFFER_ALL_CPUS) {
 5981		/* make sure, this cpu is enabled in the mask */
 5982		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
 5983			return -EINVAL;
 
 
 5984	}
 5985
 5986	return __tracing_resize_ring_buffer(tr, size, cpu_id);
 5987}
 5988
 5989static void update_last_data(struct trace_array *tr)
 5990{
 5991	if (!tr->text_delta && !tr->data_delta)
 5992		return;
 5993
 5994	/*
 5995	 * Need to clear all CPU buffers as there cannot be events
 5996	 * from the previous boot mixed with events with this boot
 5997	 * as that will cause a confusing trace. Need to clear all
 5998	 * CPU buffers, even for those that may currently be offline.
 5999	 */
 6000	tracing_reset_all_cpus(&tr->array_buffer);
 6001
 6002	/* Using current data now */
 6003	tr->text_delta = 0;
 6004	tr->data_delta = 0;
 6005}
 6006
 
 6007/**
 6008 * tracing_update_buffers - used by tracing facility to expand ring buffers
 6009 * @tr: The tracing instance
 6010 *
 6011 * To save on memory when the tracing is never used on a system with it
 6012 * configured in. The ring buffers are set to a minimum size. But once
 6013 * a user starts to use the tracing facility, then they need to grow
 6014 * to their default size.
 6015 *
 6016 * This function is to be called when a tracer is about to be used.
 6017 */
 6018int tracing_update_buffers(struct trace_array *tr)
 6019{
 6020	int ret = 0;
 6021
 6022	mutex_lock(&trace_types_lock);
 6023
 6024	update_last_data(tr);
 6025
 6026	if (!tr->ring_buffer_expanded)
 6027		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
 6028						RING_BUFFER_ALL_CPUS);
 6029	mutex_unlock(&trace_types_lock);
 6030
 6031	return ret;
 6032}
 6033
 6034struct trace_option_dentry;
 6035
 6036static void
 6037create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
 6038
 6039/*
 6040 * Used to clear out the tracer before deletion of an instance.
 6041 * Must have trace_types_lock held.
 6042 */
 6043static void tracing_set_nop(struct trace_array *tr)
 6044{
 6045	if (tr->current_trace == &nop_trace)
 6046		return;
 6047
 6048	tr->current_trace->enabled--;
 6049
 6050	if (tr->current_trace->reset)
 6051		tr->current_trace->reset(tr);
 6052
 6053	tr->current_trace = &nop_trace;
 6054}
 6055
 6056static bool tracer_options_updated;
 6057
 6058static void add_tracer_options(struct trace_array *tr, struct tracer *t)
 6059{
 6060	/* Only enable if the directory has been created already. */
 6061	if (!tr->dir)
 6062		return;
 6063
 6064	/* Only create trace option files after update_tracer_options finish */
 6065	if (!tracer_options_updated)
 6066		return;
 6067
 6068	create_trace_option_files(tr, t);
 6069}
 6070
 6071int tracing_set_tracer(struct trace_array *tr, const char *buf)
 6072{
 
 
 6073	struct tracer *t;
 6074#ifdef CONFIG_TRACER_MAX_TRACE
 6075	bool had_max_tr;
 6076#endif
 6077	int ret;
 6078
 6079	guard(mutex)(&trace_types_lock);
 6080
 6081	update_last_data(tr);
 6082
 6083	if (!tr->ring_buffer_expanded) {
 6084		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
 6085						RING_BUFFER_ALL_CPUS);
 6086		if (ret < 0)
 6087			return ret;
 6088		ret = 0;
 6089	}
 6090
 6091	for (t = trace_types; t; t = t->next) {
 6092		if (strcmp(t->name, buf) == 0)
 6093			break;
 6094	}
 6095	if (!t)
 6096		return -EINVAL;
 6097
 6098	if (t == tr->current_trace)
 6099		return 0;
 6100
 6101#ifdef CONFIG_TRACER_SNAPSHOT
 6102	if (t->use_max_tr) {
 6103		local_irq_disable();
 6104		arch_spin_lock(&tr->max_lock);
 6105		ret = tr->cond_snapshot ? -EBUSY : 0;
 6106		arch_spin_unlock(&tr->max_lock);
 6107		local_irq_enable();
 6108		if (ret)
 6109			return ret;
 6110	}
 6111#endif
 6112	/* Some tracers won't work on kernel command line */
 6113	if (system_state < SYSTEM_RUNNING && t->noboot) {
 6114		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
 6115			t->name);
 6116		return 0;
 6117	}
 6118
 6119	/* Some tracers are only allowed for the top level buffer */
 6120	if (!trace_ok_for_array(t, tr))
 6121		return -EINVAL;
 6122
 6123	/* If trace pipe files are being read, we can't change the tracer */
 6124	if (tr->trace_ref)
 6125		return -EBUSY;
 6126
 6127	trace_branch_disable();
 6128
 6129	tr->current_trace->enabled--;
 6130
 6131	if (tr->current_trace->reset)
 6132		tr->current_trace->reset(tr);
 6133
 6134#ifdef CONFIG_TRACER_MAX_TRACE
 6135	had_max_tr = tr->current_trace->use_max_tr;
 6136
 6137	/* Current trace needs to be nop_trace before synchronize_rcu */
 6138	tr->current_trace = &nop_trace;
 6139
 6140	if (had_max_tr && !t->use_max_tr) {
 6141		/*
 6142		 * We need to make sure that the update_max_tr sees that
 6143		 * current_trace changed to nop_trace to keep it from
 6144		 * swapping the buffers after we resize it.
 6145		 * The update_max_tr is called from interrupts disabled
 6146		 * so a synchronized_sched() is sufficient.
 6147		 */
 6148		synchronize_rcu();
 6149		free_snapshot(tr);
 6150		tracing_disarm_snapshot(tr);
 6151	}
 
 6152
 6153	if (!had_max_tr && t->use_max_tr) {
 6154		ret = tracing_arm_snapshot_locked(tr);
 6155		if (ret)
 6156			return ret;
 
 
 
 
 
 
 
 
 
 
 
 6157	}
 6158#else
 6159	tr->current_trace = &nop_trace;
 6160#endif
 6161
 6162	if (t->init) {
 6163		ret = tracer_init(t, tr);
 6164		if (ret) {
 6165#ifdef CONFIG_TRACER_MAX_TRACE
 6166			if (t->use_max_tr)
 6167				tracing_disarm_snapshot(tr);
 6168#endif
 6169			return ret;
 6170		}
 6171	}
 6172
 6173	tr->current_trace = t;
 6174	tr->current_trace->enabled++;
 6175	trace_branch_enable(tr);
 
 
 6176
 6177	return 0;
 6178}
 6179
 6180static ssize_t
 6181tracing_set_trace_write(struct file *filp, const char __user *ubuf,
 6182			size_t cnt, loff_t *ppos)
 6183{
 6184	struct trace_array *tr = filp->private_data;
 6185	char buf[MAX_TRACER_SIZE+1];
 6186	char *name;
 6187	size_t ret;
 6188	int err;
 6189
 6190	ret = cnt;
 6191
 6192	if (cnt > MAX_TRACER_SIZE)
 6193		cnt = MAX_TRACER_SIZE;
 6194
 6195	if (copy_from_user(buf, ubuf, cnt))
 6196		return -EFAULT;
 6197
 6198	buf[cnt] = 0;
 6199
 6200	name = strim(buf);
 
 
 6201
 6202	err = tracing_set_tracer(tr, name);
 6203	if (err)
 6204		return err;
 6205
 6206	*ppos += ret;
 6207
 6208	return ret;
 6209}
 6210
 6211static ssize_t
 6212tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
 6213		   size_t cnt, loff_t *ppos)
 6214{
 
 6215	char buf[64];
 6216	int r;
 6217
 6218	r = snprintf(buf, sizeof(buf), "%ld\n",
 6219		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
 6220	if (r > sizeof(buf))
 6221		r = sizeof(buf);
 6222	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6223}
 6224
 6225static ssize_t
 6226tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
 6227		    size_t cnt, loff_t *ppos)
 6228{
 
 6229	unsigned long val;
 6230	int ret;
 6231
 6232	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 6233	if (ret)
 6234		return ret;
 6235
 6236	*ptr = val * 1000;
 6237
 6238	return cnt;
 6239}
 6240
 6241static ssize_t
 6242tracing_thresh_read(struct file *filp, char __user *ubuf,
 6243		    size_t cnt, loff_t *ppos)
 6244{
 6245	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
 6246}
 6247
 6248static ssize_t
 6249tracing_thresh_write(struct file *filp, const char __user *ubuf,
 6250		     size_t cnt, loff_t *ppos)
 6251{
 6252	struct trace_array *tr = filp->private_data;
 6253	int ret;
 6254
 6255	guard(mutex)(&trace_types_lock);
 6256	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
 6257	if (ret < 0)
 6258		return ret;
 6259
 6260	if (tr->current_trace->update_thresh) {
 6261		ret = tr->current_trace->update_thresh(tr);
 6262		if (ret < 0)
 6263			return ret;
 6264	}
 6265
 6266	return cnt;
 6267}
 6268
 6269#ifdef CONFIG_TRACER_MAX_TRACE
 6270
 6271static ssize_t
 6272tracing_max_lat_read(struct file *filp, char __user *ubuf,
 6273		     size_t cnt, loff_t *ppos)
 6274{
 6275	struct trace_array *tr = filp->private_data;
 6276
 6277	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
 6278}
 6279
 6280static ssize_t
 6281tracing_max_lat_write(struct file *filp, const char __user *ubuf,
 6282		      size_t cnt, loff_t *ppos)
 6283{
 6284	struct trace_array *tr = filp->private_data;
 6285
 6286	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
 6287}
 6288
 6289#endif
 6290
 6291static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
 6292{
 6293	if (cpu == RING_BUFFER_ALL_CPUS) {
 6294		if (cpumask_empty(tr->pipe_cpumask)) {
 6295			cpumask_setall(tr->pipe_cpumask);
 6296			return 0;
 6297		}
 6298	} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
 6299		cpumask_set_cpu(cpu, tr->pipe_cpumask);
 6300		return 0;
 6301	}
 6302	return -EBUSY;
 6303}
 6304
 6305static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
 6306{
 6307	if (cpu == RING_BUFFER_ALL_CPUS) {
 6308		WARN_ON(!cpumask_full(tr->pipe_cpumask));
 6309		cpumask_clear(tr->pipe_cpumask);
 6310	} else {
 6311		WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
 6312		cpumask_clear_cpu(cpu, tr->pipe_cpumask);
 6313	}
 6314}
 6315
 6316static int tracing_open_pipe(struct inode *inode, struct file *filp)
 6317{
 6318	struct trace_array *tr = inode->i_private;
 6319	struct trace_iterator *iter;
 6320	int cpu;
 6321	int ret;
 6322
 6323	ret = tracing_check_open_get_tr(tr);
 6324	if (ret)
 6325		return ret;
 6326
 6327	mutex_lock(&trace_types_lock);
 6328	cpu = tracing_get_cpu(inode);
 6329	ret = open_pipe_on_cpu(tr, cpu);
 6330	if (ret)
 6331		goto fail_pipe_on_cpu;
 6332
 6333	/* create a buffer to store the information to pass to userspace */
 6334	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 6335	if (!iter) {
 6336		ret = -ENOMEM;
 6337		goto fail_alloc_iter;
 6338	}
 6339
 6340	trace_seq_init(&iter->seq);
 6341	iter->trace = tr->current_trace;
 
 
 
 
 
 
 
 
 
 6342
 6343	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
 6344		ret = -ENOMEM;
 6345		goto fail;
 6346	}
 6347
 6348	/* trace pipe does not show start of buffer */
 6349	cpumask_setall(iter->started);
 6350
 6351	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 6352		iter->iter_flags |= TRACE_FILE_LAT_FMT;
 6353
 6354	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
 6355	if (trace_clocks[tr->clock_id].in_ns)
 6356		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 6357
 6358	iter->tr = tr;
 6359	iter->array_buffer = &tr->array_buffer;
 6360	iter->cpu_file = cpu;
 6361	mutex_init(&iter->mutex);
 6362	filp->private_data = iter;
 6363
 6364	if (iter->trace->pipe_open)
 6365		iter->trace->pipe_open(iter);
 6366
 6367	nonseekable_open(inode, filp);
 6368
 6369	tr->trace_ref++;
 6370
 6371	mutex_unlock(&trace_types_lock);
 6372	return ret;
 6373
 6374fail:
 
 6375	kfree(iter);
 6376fail_alloc_iter:
 6377	close_pipe_on_cpu(tr, cpu);
 6378fail_pipe_on_cpu:
 6379	__trace_array_put(tr);
 6380	mutex_unlock(&trace_types_lock);
 6381	return ret;
 6382}
 6383
 6384static int tracing_release_pipe(struct inode *inode, struct file *file)
 6385{
 6386	struct trace_iterator *iter = file->private_data;
 6387	struct trace_array *tr = inode->i_private;
 6388
 6389	mutex_lock(&trace_types_lock);
 6390
 6391	tr->trace_ref--;
 6392
 6393	if (iter->trace->pipe_close)
 6394		iter->trace->pipe_close(iter);
 6395	close_pipe_on_cpu(tr, iter->cpu_file);
 6396	mutex_unlock(&trace_types_lock);
 6397
 6398	free_trace_iter_content(iter);
 
 
 6399	kfree(iter);
 6400
 6401	trace_array_put(tr);
 6402
 6403	return 0;
 6404}
 6405
 6406static __poll_t
 6407trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
 6408{
 6409	struct trace_array *tr = iter->tr;
 6410
 6411	/* Iterators are static, they should be filled or empty */
 6412	if (trace_buffer_iter(iter, iter->cpu_file))
 6413		return EPOLLIN | EPOLLRDNORM;
 6414
 6415	if (tr->trace_flags & TRACE_ITER_BLOCK)
 6416		/*
 6417		 * Always select as readable when in blocking mode
 6418		 */
 6419		return EPOLLIN | EPOLLRDNORM;
 6420	else
 6421		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
 6422					     filp, poll_table, iter->tr->buffer_percent);
 
 
 
 
 
 
 6423}
 6424
 6425static __poll_t
 6426tracing_poll_pipe(struct file *filp, poll_table *poll_table)
 6427{
 6428	struct trace_iterator *iter = filp->private_data;
 
 
 
 
 
 
 
 
 6429
 6430	return trace_poll(iter, filp, poll_table);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6431}
 6432
 6433/* Must be called with iter->mutex held. */
 6434static int tracing_wait_pipe(struct file *filp)
 6435{
 6436	struct trace_iterator *iter = filp->private_data;
 6437	int ret;
 6438
 6439	while (trace_empty(iter)) {
 6440
 6441		if ((filp->f_flags & O_NONBLOCK)) {
 6442			return -EAGAIN;
 6443		}
 6444
 
 
 
 
 
 
 
 
 
 6445		/*
 6446		 * We block until we read something and tracing is disabled.
 6447		 * We still block if tracing is disabled, but we have never
 6448		 * read anything. This allows a user to cat this file, and
 6449		 * then enable tracing. But after we have read something,
 6450		 * we give an EOF when tracing is again disabled.
 6451		 *
 6452		 * iter->pos will be 0 if we haven't read anything.
 6453		 */
 6454		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
 6455			break;
 6456
 6457		mutex_unlock(&iter->mutex);
 6458
 6459		ret = wait_on_pipe(iter, 0);
 6460
 6461		mutex_lock(&iter->mutex);
 6462
 6463		if (ret)
 6464			return ret;
 6465	}
 6466
 6467	return 1;
 6468}
 6469
 6470/*
 6471 * Consumer reader.
 6472 */
 6473static ssize_t
 6474tracing_read_pipe(struct file *filp, char __user *ubuf,
 6475		  size_t cnt, loff_t *ppos)
 6476{
 6477	struct trace_iterator *iter = filp->private_data;
 
 6478	ssize_t sret;
 6479
 6480	/*
 6481	 * Avoid more than one consumer on a single file descriptor
 6482	 * This is just a matter of traces coherency, the ring buffer itself
 6483	 * is protected.
 6484	 */
 6485	guard(mutex)(&iter->mutex);
 6486
 6487	/* return any leftover data */
 6488	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
 6489	if (sret != -EBUSY)
 6490		return sret;
 6491
 6492	trace_seq_init(&iter->seq);
 6493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6494	if (iter->trace->read) {
 6495		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
 6496		if (sret)
 6497			return sret;
 6498	}
 6499
 6500waitagain:
 6501	sret = tracing_wait_pipe(filp);
 6502	if (sret <= 0)
 6503		return sret;
 6504
 6505	/* stop when tracing is finished */
 6506	if (trace_empty(iter))
 6507		return 0;
 
 
 6508
 6509	if (cnt >= TRACE_SEQ_BUFFER_SIZE)
 6510		cnt = TRACE_SEQ_BUFFER_SIZE - 1;
 6511
 6512	/* reset all but tr, trace, and overruns */
 6513	trace_iterator_reset(iter);
 6514	cpumask_clear(iter->started);
 6515	trace_seq_init(&iter->seq);
 
 6516
 6517	trace_event_read_lock();
 6518	trace_access_lock(iter->cpu_file);
 6519	while (trace_find_next_entry_inc(iter) != NULL) {
 6520		enum print_line_t ret;
 6521		int save_len = iter->seq.seq.len;
 6522
 6523		ret = print_trace_line(iter);
 6524		if (ret == TRACE_TYPE_PARTIAL_LINE) {
 6525			/*
 6526			 * If one print_trace_line() fills entire trace_seq in one shot,
 6527			 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
 6528			 * In this case, we need to consume it, otherwise, loop will peek
 6529			 * this event next time, resulting in an infinite loop.
 6530			 */
 6531			if (save_len == 0) {
 6532				iter->seq.full = 0;
 6533				trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
 6534				trace_consume(iter);
 6535				break;
 6536			}
 6537
 6538			/* In other cases, don't print partial lines */
 6539			iter->seq.seq.len = save_len;
 6540			break;
 6541		}
 6542		if (ret != TRACE_TYPE_NO_CONSUME)
 6543			trace_consume(iter);
 6544
 6545		if (trace_seq_used(&iter->seq) >= cnt)
 6546			break;
 6547
 6548		/*
 6549		 * Setting the full flag means we reached the trace_seq buffer
 6550		 * size and we should leave by partial output condition above.
 6551		 * One of the trace_seq_* functions is not used properly.
 6552		 */
 6553		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
 6554			  iter->ent->type);
 6555	}
 6556	trace_access_unlock(iter->cpu_file);
 6557	trace_event_read_unlock();
 6558
 6559	/* Now copy what we have to the user */
 6560	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
 6561	if (iter->seq.readpos >= trace_seq_used(&iter->seq))
 6562		trace_seq_init(&iter->seq);
 6563
 6564	/*
 6565	 * If there was nothing to send to user, in spite of consuming trace
 6566	 * entries, go back to wait for more entries.
 6567	 */
 6568	if (sret == -EBUSY)
 6569		goto waitagain;
 6570
 
 
 
 6571	return sret;
 6572}
 6573
 
 
 
 
 
 
 6574static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
 6575				     unsigned int idx)
 6576{
 6577	__free_page(spd->pages[idx]);
 6578}
 6579
 
 
 
 
 
 
 
 
 
 
 6580static size_t
 6581tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
 6582{
 6583	size_t count;
 6584	int save_len;
 6585	int ret;
 6586
 6587	/* Seq buffer is page-sized, exactly what we need. */
 6588	for (;;) {
 6589		save_len = iter->seq.seq.len;
 6590		ret = print_trace_line(iter);
 6591
 6592		if (trace_seq_has_overflowed(&iter->seq)) {
 6593			iter->seq.seq.len = save_len;
 
 6594			break;
 6595		}
 6596
 6597		/*
 6598		 * This should not be hit, because it should only
 6599		 * be set if the iter->seq overflowed. But check it
 6600		 * anyway to be safe.
 6601		 */
 6602		if (ret == TRACE_TYPE_PARTIAL_LINE) {
 6603			iter->seq.seq.len = save_len;
 6604			break;
 6605		}
 6606
 6607		count = trace_seq_used(&iter->seq) - save_len;
 6608		if (rem < count) {
 6609			rem = 0;
 6610			iter->seq.seq.len = save_len;
 6611			break;
 6612		}
 6613
 6614		if (ret != TRACE_TYPE_NO_CONSUME)
 6615			trace_consume(iter);
 6616		rem -= count;
 6617		if (!trace_find_next_entry_inc(iter))	{
 6618			rem = 0;
 6619			iter->ent = NULL;
 6620			break;
 6621		}
 6622	}
 6623
 6624	return rem;
 6625}
 6626
 6627static ssize_t tracing_splice_read_pipe(struct file *filp,
 6628					loff_t *ppos,
 6629					struct pipe_inode_info *pipe,
 6630					size_t len,
 6631					unsigned int flags)
 6632{
 6633	struct page *pages_def[PIPE_DEF_BUFFERS];
 6634	struct partial_page partial_def[PIPE_DEF_BUFFERS];
 6635	struct trace_iterator *iter = filp->private_data;
 6636	struct splice_pipe_desc spd = {
 6637		.pages		= pages_def,
 6638		.partial	= partial_def,
 6639		.nr_pages	= 0, /* This gets updated below. */
 6640		.nr_pages_max	= PIPE_DEF_BUFFERS,
 6641		.ops		= &default_pipe_buf_ops,
 
 6642		.spd_release	= tracing_spd_release_pipe,
 6643	};
 
 6644	ssize_t ret;
 6645	size_t rem;
 6646	unsigned int i;
 6647
 6648	if (splice_grow_spd(pipe, &spd))
 6649		return -ENOMEM;
 6650
 
 
 
 
 
 
 
 
 6651	mutex_lock(&iter->mutex);
 6652
 6653	if (iter->trace->splice_read) {
 6654		ret = iter->trace->splice_read(iter, filp,
 6655					       ppos, pipe, len, flags);
 6656		if (ret)
 6657			goto out_err;
 6658	}
 6659
 6660	ret = tracing_wait_pipe(filp);
 6661	if (ret <= 0)
 6662		goto out_err;
 6663
 6664	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
 6665		ret = -EFAULT;
 6666		goto out_err;
 6667	}
 6668
 6669	trace_event_read_lock();
 6670	trace_access_lock(iter->cpu_file);
 6671
 6672	/* Fill as many pages as possible. */
 6673	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
 6674		spd.pages[i] = alloc_page(GFP_KERNEL);
 6675		if (!spd.pages[i])
 6676			break;
 6677
 6678		rem = tracing_fill_pipe_page(rem, iter);
 6679
 6680		/* Copy the data into the page, so we can start over. */
 6681		ret = trace_seq_to_buffer(&iter->seq,
 6682					  page_address(spd.pages[i]),
 6683					  trace_seq_used(&iter->seq));
 6684		if (ret < 0) {
 6685			__free_page(spd.pages[i]);
 6686			break;
 6687		}
 6688		spd.partial[i].offset = 0;
 6689		spd.partial[i].len = trace_seq_used(&iter->seq);
 6690
 6691		trace_seq_init(&iter->seq);
 6692	}
 6693
 6694	trace_access_unlock(iter->cpu_file);
 6695	trace_event_read_unlock();
 6696	mutex_unlock(&iter->mutex);
 6697
 6698	spd.nr_pages = i;
 6699
 6700	if (i)
 6701		ret = splice_to_pipe(pipe, &spd);
 6702	else
 6703		ret = 0;
 6704out:
 6705	splice_shrink_spd(&spd);
 6706	return ret;
 6707
 6708out_err:
 6709	mutex_unlock(&iter->mutex);
 6710	goto out;
 6711}
 6712
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6713static ssize_t
 6714tracing_entries_read(struct file *filp, char __user *ubuf,
 6715		     size_t cnt, loff_t *ppos)
 6716{
 6717	struct inode *inode = file_inode(filp);
 6718	struct trace_array *tr = inode->i_private;
 6719	int cpu = tracing_get_cpu(inode);
 6720	char buf[64];
 6721	int r = 0;
 6722	ssize_t ret;
 6723
 6724	mutex_lock(&trace_types_lock);
 6725
 6726	if (cpu == RING_BUFFER_ALL_CPUS) {
 6727		int cpu, buf_size_same;
 6728		unsigned long size;
 6729
 6730		size = 0;
 6731		buf_size_same = 1;
 6732		/* check if all cpu sizes are same */
 6733		for_each_tracing_cpu(cpu) {
 6734			/* fill in the size from first enabled cpu */
 6735			if (size == 0)
 6736				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
 6737			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
 6738				buf_size_same = 0;
 6739				break;
 6740			}
 6741		}
 6742
 6743		if (buf_size_same) {
 6744			if (!tr->ring_buffer_expanded)
 6745				r = sprintf(buf, "%lu (expanded: %lu)\n",
 6746					    size >> 10,
 6747					    trace_buf_size >> 10);
 6748			else
 6749				r = sprintf(buf, "%lu\n", size >> 10);
 6750		} else
 6751			r = sprintf(buf, "X\n");
 6752	} else
 6753		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
 6754
 6755	mutex_unlock(&trace_types_lock);
 6756
 6757	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6758	return ret;
 6759}
 6760
 6761static ssize_t
 6762tracing_entries_write(struct file *filp, const char __user *ubuf,
 6763		      size_t cnt, loff_t *ppos)
 6764{
 6765	struct inode *inode = file_inode(filp);
 6766	struct trace_array *tr = inode->i_private;
 6767	unsigned long val;
 6768	int ret;
 6769
 6770	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 6771	if (ret)
 6772		return ret;
 6773
 6774	/* must have at least 1 entry */
 6775	if (!val)
 6776		return -EINVAL;
 6777
 6778	/* value is in KB */
 6779	val <<= 10;
 6780	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
 
 6781	if (ret < 0)
 6782		return ret;
 6783
 6784	*ppos += cnt;
 6785
 6786	return cnt;
 6787}
 6788
 
 
 
 
 
 
 
 
 
 
 6789static ssize_t
 6790tracing_total_entries_read(struct file *filp, char __user *ubuf,
 6791				size_t cnt, loff_t *ppos)
 6792{
 6793	struct trace_array *tr = filp->private_data;
 6794	char buf[64];
 6795	int r, cpu;
 6796	unsigned long size = 0, expanded_size = 0;
 6797
 6798	mutex_lock(&trace_types_lock);
 6799	for_each_tracing_cpu(cpu) {
 6800		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
 6801		if (!tr->ring_buffer_expanded)
 6802			expanded_size += trace_buf_size >> 10;
 6803	}
 6804	if (tr->ring_buffer_expanded)
 6805		r = sprintf(buf, "%lu\n", size);
 6806	else
 6807		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
 6808	mutex_unlock(&trace_types_lock);
 6809
 6810	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6811}
 6812
 6813static ssize_t
 6814tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 6815{
 6816	struct trace_array *tr = filp->private_data;
 6817	struct seq_buf seq;
 6818	char buf[64];
 6819
 6820	seq_buf_init(&seq, buf, 64);
 6821
 6822	seq_buf_printf(&seq, "text delta:\t%ld\n", tr->text_delta);
 6823	seq_buf_printf(&seq, "data delta:\t%ld\n", tr->data_delta);
 6824
 6825	return simple_read_from_buffer(ubuf, cnt, ppos, buf, seq_buf_used(&seq));
 6826}
 6827
 6828static int tracing_buffer_meta_open(struct inode *inode, struct file *filp)
 6829{
 6830	struct trace_array *tr = inode->i_private;
 6831	int cpu = tracing_get_cpu(inode);
 6832	int ret;
 6833
 6834	ret = tracing_check_open_get_tr(tr);
 6835	if (ret)
 6836		return ret;
 6837
 6838	ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu);
 6839	if (ret < 0)
 6840		__trace_array_put(tr);
 6841	return ret;
 6842}
 6843
 6844static ssize_t
 6845tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
 6846			  size_t cnt, loff_t *ppos)
 6847{
 6848	/*
 6849	 * There is no need to read what the user has written, this function
 6850	 * is just to make sure that there is no error when "echo" is used
 6851	 */
 6852
 6853	*ppos += cnt;
 6854
 6855	return cnt;
 6856}
 6857
 6858static int
 6859tracing_free_buffer_release(struct inode *inode, struct file *filp)
 6860{
 6861	struct trace_array *tr = inode->i_private;
 6862
 6863	/* disable tracing ? */
 6864	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
 6865		tracer_tracing_off(tr);
 6866	/* resize the ring buffer to 0 */
 6867	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
 6868
 6869	trace_array_put(tr);
 6870
 6871	return 0;
 6872}
 6873
 6874#define TRACE_MARKER_MAX_SIZE		4096
 6875
 6876static ssize_t
 6877tracing_mark_write(struct file *filp, const char __user *ubuf,
 6878					size_t cnt, loff_t *fpos)
 6879{
 6880	struct trace_array *tr = filp->private_data;
 6881	struct ring_buffer_event *event;
 6882	enum event_trigger_type tt = ETT_NONE;
 6883	struct trace_buffer *buffer;
 6884	struct print_entry *entry;
 6885	int meta_size;
 
 
 
 6886	ssize_t written;
 6887	size_t size;
 
 6888	int len;
 6889
 6890/* Used in tracing_mark_raw_write() as well */
 6891#define FAULTED_STR "<faulted>"
 6892#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
 6893
 6894	if (tracing_disabled)
 6895		return -EINVAL;
 6896
 6897	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
 6898		return -EINVAL;
 6899
 6900	if ((ssize_t)cnt < 0)
 6901		return -EINVAL;
 6902
 6903	if (cnt > TRACE_MARKER_MAX_SIZE)
 6904		cnt = TRACE_MARKER_MAX_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6905
 6906	meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
 6907 again:
 6908	size = cnt + meta_size;
 6909
 6910	/* If less than "<faulted>", then make sure we can still add that */
 6911	if (cnt < FAULTED_SIZE)
 6912		size += FAULTED_SIZE - cnt;
 6913
 6914	buffer = tr->array_buffer.buffer;
 6915	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 6916					    tracing_gen_ctx());
 6917	if (unlikely(!event)) {
 6918		/*
 6919		 * If the size was greater than what was allowed, then
 6920		 * make it smaller and try again.
 6921		 */
 6922		if (size > ring_buffer_max_event_size(buffer)) {
 6923			/* cnt < FAULTED size should never be bigger than max */
 6924			if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
 6925				return -EBADF;
 6926			cnt = ring_buffer_max_event_size(buffer) - meta_size;
 6927			/* The above should only happen once */
 6928			if (WARN_ON_ONCE(cnt + meta_size == size))
 6929				return -EBADF;
 6930			goto again;
 6931		}
 6932
 
 
 
 
 
 
 6933		/* Ring buffer disabled, return as if not open for write */
 6934		return -EBADF;
 
 6935	}
 6936
 6937	entry = ring_buffer_event_data(event);
 6938	entry->ip = _THIS_IP_;
 6939
 6940	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
 6941	if (len) {
 6942		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
 6943		cnt = FAULTED_SIZE;
 6944		written = -EFAULT;
 6945	} else
 6946		written = cnt;
 6947
 6948	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
 6949		/* do not add \n before testing triggers, but add \0 */
 6950		entry->buf[cnt] = '\0';
 6951		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
 6952	}
 6953
 6954	if (entry->buf[cnt - 1] != '\n') {
 6955		entry->buf[cnt] = '\n';
 6956		entry->buf[cnt + 1] = '\0';
 6957	} else
 6958		entry->buf[cnt] = '\0';
 6959
 6960	if (static_branch_unlikely(&trace_marker_exports_enabled))
 6961		ftrace_exports(event, TRACE_EXPORT_MARKER);
 6962	__buffer_unlock_commit(buffer, event);
 6963
 6964	if (tt)
 6965		event_triggers_post_call(tr->trace_marker_file, tt);
 6966
 6967	return written;
 6968}
 6969
 6970static ssize_t
 6971tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
 6972					size_t cnt, loff_t *fpos)
 6973{
 6974	struct trace_array *tr = filp->private_data;
 6975	struct ring_buffer_event *event;
 6976	struct trace_buffer *buffer;
 6977	struct raw_data_entry *entry;
 6978	ssize_t written;
 6979	int size;
 6980	int len;
 6981
 6982#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
 6983
 6984	if (tracing_disabled)
 6985		return -EINVAL;
 6986
 6987	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
 6988		return -EINVAL;
 6989
 6990	/* The marker must at least have a tag id */
 6991	if (cnt < sizeof(unsigned int))
 6992		return -EINVAL;
 6993
 6994	size = sizeof(*entry) + cnt;
 6995	if (cnt < FAULT_SIZE_ID)
 6996		size += FAULT_SIZE_ID - cnt;
 6997
 6998	buffer = tr->array_buffer.buffer;
 6999
 7000	if (size > ring_buffer_max_event_size(buffer))
 7001		return -EINVAL;
 7002
 7003	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
 7004					    tracing_gen_ctx());
 7005	if (!event)
 7006		/* Ring buffer disabled, return as if not open for write */
 7007		return -EBADF;
 7008
 7009	entry = ring_buffer_event_data(event);
 7010
 7011	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
 7012	if (len) {
 7013		entry->id = -1;
 7014		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
 7015		written = -EFAULT;
 7016	} else
 7017		written = cnt;
 7018
 7019	__buffer_unlock_commit(buffer, event);
 7020
 
 
 
 
 
 
 7021	return written;
 7022}
 7023
 7024static int tracing_clock_show(struct seq_file *m, void *v)
 7025{
 7026	struct trace_array *tr = m->private;
 7027	int i;
 7028
 7029	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
 7030		seq_printf(m,
 7031			"%s%s%s%s", i ? " " : "",
 7032			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
 7033			i == tr->clock_id ? "]" : "");
 7034	seq_putc(m, '\n');
 7035
 7036	return 0;
 7037}
 7038
 7039int tracing_set_clock(struct trace_array *tr, const char *clockstr)
 7040{
 7041	int i;
 7042
 7043	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
 7044		if (strcmp(trace_clocks[i].name, clockstr) == 0)
 7045			break;
 7046	}
 7047	if (i == ARRAY_SIZE(trace_clocks))
 7048		return -EINVAL;
 7049
 7050	mutex_lock(&trace_types_lock);
 7051
 7052	tr->clock_id = i;
 7053
 7054	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
 7055
 7056	/*
 7057	 * New clock may not be consistent with the previous clock.
 7058	 * Reset the buffer so that it doesn't have incomparable timestamps.
 7059	 */
 7060	tracing_reset_online_cpus(&tr->array_buffer);
 7061
 7062#ifdef CONFIG_TRACER_MAX_TRACE
 7063	if (tr->max_buffer.buffer)
 7064		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
 7065	tracing_reset_online_cpus(&tr->max_buffer);
 7066#endif
 7067
 7068	mutex_unlock(&trace_types_lock);
 7069
 7070	return 0;
 7071}
 7072
 7073static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
 7074				   size_t cnt, loff_t *fpos)
 7075{
 7076	struct seq_file *m = filp->private_data;
 7077	struct trace_array *tr = m->private;
 7078	char buf[64];
 7079	const char *clockstr;
 7080	int ret;
 7081
 7082	if (cnt >= sizeof(buf))
 7083		return -EINVAL;
 7084
 7085	if (copy_from_user(buf, ubuf, cnt))
 7086		return -EFAULT;
 7087
 7088	buf[cnt] = 0;
 7089
 7090	clockstr = strstrip(buf);
 7091
 7092	ret = tracing_set_clock(tr, clockstr);
 7093	if (ret)
 7094		return ret;
 7095
 7096	*fpos += cnt;
 7097
 7098	return cnt;
 7099}
 7100
 7101static int tracing_clock_open(struct inode *inode, struct file *file)
 7102{
 7103	struct trace_array *tr = inode->i_private;
 7104	int ret;
 7105
 7106	ret = tracing_check_open_get_tr(tr);
 7107	if (ret)
 7108		return ret;
 7109
 7110	ret = single_open(file, tracing_clock_show, inode->i_private);
 7111	if (ret < 0)
 7112		trace_array_put(tr);
 7113
 7114	return ret;
 7115}
 7116
 7117static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
 7118{
 7119	struct trace_array *tr = m->private;
 7120
 7121	mutex_lock(&trace_types_lock);
 7122
 7123	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
 7124		seq_puts(m, "delta [absolute]\n");
 7125	else
 7126		seq_puts(m, "[delta] absolute\n");
 7127
 7128	mutex_unlock(&trace_types_lock);
 7129
 7130	return 0;
 7131}
 7132
 7133static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
 7134{
 7135	struct trace_array *tr = inode->i_private;
 7136	int ret;
 7137
 7138	ret = tracing_check_open_get_tr(tr);
 7139	if (ret)
 7140		return ret;
 7141
 7142	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
 7143	if (ret < 0)
 7144		trace_array_put(tr);
 7145
 7146	return ret;
 7147}
 7148
 7149u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
 7150{
 7151	if (rbe == this_cpu_read(trace_buffered_event))
 7152		return ring_buffer_time_stamp(buffer);
 7153
 7154	return ring_buffer_event_time_stamp(buffer, rbe);
 7155}
 7156
 7157/*
 7158 * Set or disable using the per CPU trace_buffer_event when possible.
 7159 */
 7160int tracing_set_filter_buffering(struct trace_array *tr, bool set)
 7161{
 7162	guard(mutex)(&trace_types_lock);
 7163
 7164	if (set && tr->no_filter_buffering_ref++)
 7165		return 0;
 7166
 7167	if (!set) {
 7168		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
 7169			return -EINVAL;
 7170
 7171		--tr->no_filter_buffering_ref;
 7172	}
 7173
 7174	return 0;
 7175}
 7176
 7177struct ftrace_buffer_info {
 7178	struct trace_iterator	iter;
 7179	void			*spare;
 7180	unsigned int		spare_cpu;
 7181	unsigned int		spare_size;
 7182	unsigned int		read;
 7183};
 7184
 7185#ifdef CONFIG_TRACER_SNAPSHOT
 7186static int tracing_snapshot_open(struct inode *inode, struct file *file)
 7187{
 7188	struct trace_array *tr = inode->i_private;
 7189	struct trace_iterator *iter;
 7190	struct seq_file *m;
 7191	int ret;
 7192
 7193	ret = tracing_check_open_get_tr(tr);
 7194	if (ret)
 7195		return ret;
 7196
 7197	if (file->f_mode & FMODE_READ) {
 7198		iter = __tracing_open(inode, file, true);
 7199		if (IS_ERR(iter))
 7200			ret = PTR_ERR(iter);
 7201	} else {
 7202		/* Writes still need the seq_file to hold the private data */
 7203		ret = -ENOMEM;
 7204		m = kzalloc(sizeof(*m), GFP_KERNEL);
 7205		if (!m)
 7206			goto out;
 7207		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 7208		if (!iter) {
 7209			kfree(m);
 7210			goto out;
 7211		}
 7212		ret = 0;
 7213
 7214		iter->tr = tr;
 7215		iter->array_buffer = &tr->max_buffer;
 7216		iter->cpu_file = tracing_get_cpu(inode);
 7217		m->private = iter;
 7218		file->private_data = m;
 7219	}
 7220out:
 7221	if (ret < 0)
 7222		trace_array_put(tr);
 7223
 7224	return ret;
 7225}
 7226
 7227static void tracing_swap_cpu_buffer(void *tr)
 7228{
 7229	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
 7230}
 7231
 7232static ssize_t
 7233tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
 7234		       loff_t *ppos)
 7235{
 7236	struct seq_file *m = filp->private_data;
 7237	struct trace_iterator *iter = m->private;
 7238	struct trace_array *tr = iter->tr;
 7239	unsigned long val;
 7240	int ret;
 7241
 7242	ret = tracing_update_buffers(tr);
 7243	if (ret < 0)
 7244		return ret;
 7245
 7246	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 7247	if (ret)
 7248		return ret;
 7249
 7250	guard(mutex)(&trace_types_lock);
 7251
 7252	if (tr->current_trace->use_max_tr)
 7253		return -EBUSY;
 7254
 7255	local_irq_disable();
 7256	arch_spin_lock(&tr->max_lock);
 7257	if (tr->cond_snapshot)
 7258		ret = -EBUSY;
 7259	arch_spin_unlock(&tr->max_lock);
 7260	local_irq_enable();
 7261	if (ret)
 7262		return ret;
 7263
 7264	switch (val) {
 7265	case 0:
 7266		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
 7267			return -EINVAL;
 7268		if (tr->allocated_snapshot)
 7269			free_snapshot(tr);
 7270		break;
 7271	case 1:
 7272/* Only allow per-cpu swap if the ring buffer supports it */
 7273#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
 7274		if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
 7275			return -EINVAL;
 7276#endif
 7277		if (tr->allocated_snapshot)
 7278			ret = resize_buffer_duplicate_size(&tr->max_buffer,
 7279					&tr->array_buffer, iter->cpu_file);
 7280
 7281		ret = tracing_arm_snapshot_locked(tr);
 7282		if (ret)
 7283			return ret;
 7284
 7285		/* Now, we're going to swap */
 7286		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
 7287			local_irq_disable();
 7288			update_max_tr(tr, current, smp_processor_id(), NULL);
 7289			local_irq_enable();
 7290		} else {
 7291			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
 7292						 (void *)tr, 1);
 7293		}
 7294		tracing_disarm_snapshot(tr);
 7295		break;
 7296	default:
 7297		if (tr->allocated_snapshot) {
 7298			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 7299				tracing_reset_online_cpus(&tr->max_buffer);
 7300			else
 7301				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
 7302		}
 7303		break;
 7304	}
 7305
 7306	if (ret >= 0) {
 7307		*ppos += cnt;
 7308		ret = cnt;
 7309	}
 7310
 7311	return ret;
 7312}
 7313
 7314static int tracing_snapshot_release(struct inode *inode, struct file *file)
 7315{
 7316	struct seq_file *m = file->private_data;
 7317	int ret;
 7318
 7319	ret = tracing_release(inode, file);
 7320
 7321	if (file->f_mode & FMODE_READ)
 7322		return ret;
 7323
 7324	/* If write only, the seq_file is just a stub */
 7325	if (m)
 7326		kfree(m->private);
 7327	kfree(m);
 7328
 7329	return 0;
 7330}
 7331
 7332static int tracing_buffers_open(struct inode *inode, struct file *filp);
 7333static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
 7334				    size_t count, loff_t *ppos);
 7335static int tracing_buffers_release(struct inode *inode, struct file *file);
 7336static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 7337		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
 7338
 7339static int snapshot_raw_open(struct inode *inode, struct file *filp)
 7340{
 7341	struct ftrace_buffer_info *info;
 7342	int ret;
 7343
 7344	/* The following checks for tracefs lockdown */
 7345	ret = tracing_buffers_open(inode, filp);
 7346	if (ret < 0)
 7347		return ret;
 7348
 7349	info = filp->private_data;
 7350
 7351	if (info->iter.trace->use_max_tr) {
 7352		tracing_buffers_release(inode, filp);
 7353		return -EBUSY;
 7354	}
 7355
 7356	info->iter.snapshot = true;
 7357	info->iter.array_buffer = &info->iter.tr->max_buffer;
 7358
 7359	return ret;
 7360}
 7361
 7362#endif /* CONFIG_TRACER_SNAPSHOT */
 7363
 7364
 7365static const struct file_operations tracing_thresh_fops = {
 7366	.open		= tracing_open_generic,
 7367	.read		= tracing_thresh_read,
 7368	.write		= tracing_thresh_write,
 7369	.llseek		= generic_file_llseek,
 7370};
 7371
 7372#ifdef CONFIG_TRACER_MAX_TRACE
 7373static const struct file_operations tracing_max_lat_fops = {
 7374	.open		= tracing_open_generic_tr,
 7375	.read		= tracing_max_lat_read,
 7376	.write		= tracing_max_lat_write,
 7377	.llseek		= generic_file_llseek,
 7378	.release	= tracing_release_generic_tr,
 7379};
 7380#endif
 7381
 7382static const struct file_operations set_tracer_fops = {
 7383	.open		= tracing_open_generic_tr,
 7384	.read		= tracing_set_trace_read,
 7385	.write		= tracing_set_trace_write,
 7386	.llseek		= generic_file_llseek,
 7387	.release	= tracing_release_generic_tr,
 7388};
 7389
 7390static const struct file_operations tracing_pipe_fops = {
 7391	.open		= tracing_open_pipe,
 7392	.poll		= tracing_poll_pipe,
 7393	.read		= tracing_read_pipe,
 7394	.splice_read	= tracing_splice_read_pipe,
 7395	.release	= tracing_release_pipe,
 
 7396};
 7397
 7398static const struct file_operations tracing_entries_fops = {
 7399	.open		= tracing_open_generic_tr,
 7400	.read		= tracing_entries_read,
 7401	.write		= tracing_entries_write,
 
 7402	.llseek		= generic_file_llseek,
 7403	.release	= tracing_release_generic_tr,
 7404};
 7405
 7406static const struct file_operations tracing_buffer_meta_fops = {
 7407	.open		= tracing_buffer_meta_open,
 7408	.read		= seq_read,
 7409	.llseek		= seq_lseek,
 7410	.release	= tracing_seq_release,
 7411};
 7412
 7413static const struct file_operations tracing_total_entries_fops = {
 7414	.open		= tracing_open_generic_tr,
 7415	.read		= tracing_total_entries_read,
 7416	.llseek		= generic_file_llseek,
 7417	.release	= tracing_release_generic_tr,
 7418};
 7419
 7420static const struct file_operations tracing_free_buffer_fops = {
 7421	.open		= tracing_open_generic_tr,
 7422	.write		= tracing_free_buffer_write,
 7423	.release	= tracing_free_buffer_release,
 7424};
 7425
 7426static const struct file_operations tracing_mark_fops = {
 7427	.open		= tracing_mark_open,
 7428	.write		= tracing_mark_write,
 7429	.release	= tracing_release_generic_tr,
 7430};
 7431
 7432static const struct file_operations tracing_mark_raw_fops = {
 7433	.open		= tracing_mark_open,
 7434	.write		= tracing_mark_raw_write,
 7435	.release	= tracing_release_generic_tr,
 7436};
 7437
 7438static const struct file_operations trace_clock_fops = {
 7439	.open		= tracing_clock_open,
 7440	.read		= seq_read,
 7441	.llseek		= seq_lseek,
 7442	.release	= tracing_single_release_tr,
 7443	.write		= tracing_clock_write,
 7444};
 7445
 7446static const struct file_operations trace_time_stamp_mode_fops = {
 7447	.open		= tracing_time_stamp_mode_open,
 7448	.read		= seq_read,
 7449	.llseek		= seq_lseek,
 7450	.release	= tracing_single_release_tr,
 7451};
 7452
 7453static const struct file_operations last_boot_fops = {
 7454	.open		= tracing_open_generic_tr,
 7455	.read		= tracing_last_boot_read,
 7456	.llseek		= generic_file_llseek,
 7457	.release	= tracing_release_generic_tr,
 7458};
 7459
 7460#ifdef CONFIG_TRACER_SNAPSHOT
 7461static const struct file_operations snapshot_fops = {
 7462	.open		= tracing_snapshot_open,
 7463	.read		= seq_read,
 7464	.write		= tracing_snapshot_write,
 7465	.llseek		= tracing_lseek,
 7466	.release	= tracing_snapshot_release,
 7467};
 7468
 7469static const struct file_operations snapshot_raw_fops = {
 7470	.open		= snapshot_raw_open,
 7471	.read		= tracing_buffers_read,
 7472	.release	= tracing_buffers_release,
 7473	.splice_read	= tracing_buffers_splice_read,
 7474};
 7475
 7476#endif /* CONFIG_TRACER_SNAPSHOT */
 7477
 7478/*
 7479 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
 7480 * @filp: The active open file structure
 7481 * @ubuf: The userspace provided buffer to read value into
 7482 * @cnt: The maximum number of bytes to read
 7483 * @ppos: The current "file" position
 7484 *
 7485 * This function implements the write interface for a struct trace_min_max_param.
 7486 * The filp->private_data must point to a trace_min_max_param structure that
 7487 * defines where to write the value, the min and the max acceptable values,
 7488 * and a lock to protect the write.
 7489 */
 7490static ssize_t
 7491trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
 7492{
 7493	struct trace_min_max_param *param = filp->private_data;
 7494	u64 val;
 7495	int err;
 7496
 7497	if (!param)
 7498		return -EFAULT;
 7499
 7500	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
 7501	if (err)
 7502		return err;
 7503
 7504	if (param->lock)
 7505		mutex_lock(param->lock);
 7506
 7507	if (param->min && val < *param->min)
 7508		err = -EINVAL;
 7509
 7510	if (param->max && val > *param->max)
 7511		err = -EINVAL;
 7512
 7513	if (!err)
 7514		*param->val = val;
 7515
 7516	if (param->lock)
 7517		mutex_unlock(param->lock);
 7518
 7519	if (err)
 7520		return err;
 7521
 7522	return cnt;
 7523}
 7524
 7525/*
 7526 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
 7527 * @filp: The active open file structure
 7528 * @ubuf: The userspace provided buffer to read value into
 7529 * @cnt: The maximum number of bytes to read
 7530 * @ppos: The current "file" position
 7531 *
 7532 * This function implements the read interface for a struct trace_min_max_param.
 7533 * The filp->private_data must point to a trace_min_max_param struct with valid
 7534 * data.
 7535 */
 7536static ssize_t
 7537trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 7538{
 7539	struct trace_min_max_param *param = filp->private_data;
 7540	char buf[U64_STR_SIZE];
 7541	int len;
 7542	u64 val;
 7543
 7544	if (!param)
 7545		return -EFAULT;
 7546
 7547	val = *param->val;
 7548
 7549	if (cnt > sizeof(buf))
 7550		cnt = sizeof(buf);
 7551
 7552	len = snprintf(buf, sizeof(buf), "%llu\n", val);
 7553
 7554	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
 7555}
 7556
 7557const struct file_operations trace_min_max_fops = {
 7558	.open		= tracing_open_generic,
 7559	.read		= trace_min_max_read,
 7560	.write		= trace_min_max_write,
 7561};
 7562
 7563#define TRACING_LOG_ERRS_MAX	8
 7564#define TRACING_LOG_LOC_MAX	128
 7565
 7566#define CMD_PREFIX "  Command: "
 7567
 7568struct err_info {
 7569	const char	**errs;	/* ptr to loc-specific array of err strings */
 7570	u8		type;	/* index into errs -> specific err string */
 7571	u16		pos;	/* caret position */
 7572	u64		ts;
 7573};
 7574
 7575struct tracing_log_err {
 7576	struct list_head	list;
 7577	struct err_info		info;
 7578	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
 7579	char			*cmd;                     /* what caused err */
 7580};
 7581
 7582static DEFINE_MUTEX(tracing_err_log_lock);
 7583
 7584static struct tracing_log_err *alloc_tracing_log_err(int len)
 7585{
 7586	struct tracing_log_err *err;
 7587
 7588	err = kzalloc(sizeof(*err), GFP_KERNEL);
 7589	if (!err)
 7590		return ERR_PTR(-ENOMEM);
 7591
 7592	err->cmd = kzalloc(len, GFP_KERNEL);
 7593	if (!err->cmd) {
 7594		kfree(err);
 7595		return ERR_PTR(-ENOMEM);
 7596	}
 7597
 7598	return err;
 7599}
 7600
 7601static void free_tracing_log_err(struct tracing_log_err *err)
 7602{
 7603	kfree(err->cmd);
 7604	kfree(err);
 7605}
 7606
 7607static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
 7608						   int len)
 7609{
 7610	struct tracing_log_err *err;
 7611	char *cmd;
 7612
 7613	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
 7614		err = alloc_tracing_log_err(len);
 7615		if (PTR_ERR(err) != -ENOMEM)
 7616			tr->n_err_log_entries++;
 7617
 7618		return err;
 7619	}
 7620	cmd = kzalloc(len, GFP_KERNEL);
 7621	if (!cmd)
 7622		return ERR_PTR(-ENOMEM);
 7623	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
 7624	kfree(err->cmd);
 7625	err->cmd = cmd;
 7626	list_del(&err->list);
 7627
 7628	return err;
 7629}
 7630
 7631/**
 7632 * err_pos - find the position of a string within a command for error careting
 7633 * @cmd: The tracing command that caused the error
 7634 * @str: The string to position the caret at within @cmd
 7635 *
 7636 * Finds the position of the first occurrence of @str within @cmd.  The
 7637 * return value can be passed to tracing_log_err() for caret placement
 7638 * within @cmd.
 7639 *
 7640 * Returns the index within @cmd of the first occurrence of @str or 0
 7641 * if @str was not found.
 7642 */
 7643unsigned int err_pos(char *cmd, const char *str)
 7644{
 7645	char *found;
 7646
 7647	if (WARN_ON(!strlen(cmd)))
 7648		return 0;
 7649
 7650	found = strstr(cmd, str);
 7651	if (found)
 7652		return found - cmd;
 7653
 7654	return 0;
 7655}
 7656
 7657/**
 7658 * tracing_log_err - write an error to the tracing error log
 7659 * @tr: The associated trace array for the error (NULL for top level array)
 7660 * @loc: A string describing where the error occurred
 7661 * @cmd: The tracing command that caused the error
 7662 * @errs: The array of loc-specific static error strings
 7663 * @type: The index into errs[], which produces the specific static err string
 7664 * @pos: The position the caret should be placed in the cmd
 7665 *
 7666 * Writes an error into tracing/error_log of the form:
 7667 *
 7668 * <loc>: error: <text>
 7669 *   Command: <cmd>
 7670 *              ^
 7671 *
 7672 * tracing/error_log is a small log file containing the last
 7673 * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
 7674 * unless there has been a tracing error, and the error log can be
 7675 * cleared and have its memory freed by writing the empty string in
 7676 * truncation mode to it i.e. echo > tracing/error_log.
 7677 *
 7678 * NOTE: the @errs array along with the @type param are used to
 7679 * produce a static error string - this string is not copied and saved
 7680 * when the error is logged - only a pointer to it is saved.  See
 7681 * existing callers for examples of how static strings are typically
 7682 * defined for use with tracing_log_err().
 7683 */
 7684void tracing_log_err(struct trace_array *tr,
 7685		     const char *loc, const char *cmd,
 7686		     const char **errs, u8 type, u16 pos)
 7687{
 7688	struct tracing_log_err *err;
 7689	int len = 0;
 7690
 7691	if (!tr)
 7692		tr = &global_trace;
 7693
 7694	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
 7695
 7696	guard(mutex)(&tracing_err_log_lock);
 7697
 7698	err = get_tracing_log_err(tr, len);
 7699	if (PTR_ERR(err) == -ENOMEM)
 7700		return;
 7701
 7702	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
 7703	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
 7704
 7705	err->info.errs = errs;
 7706	err->info.type = type;
 7707	err->info.pos = pos;
 7708	err->info.ts = local_clock();
 7709
 7710	list_add_tail(&err->list, &tr->err_log);
 7711}
 7712
 7713static void clear_tracing_err_log(struct trace_array *tr)
 7714{
 7715	struct tracing_log_err *err, *next;
 7716
 7717	mutex_lock(&tracing_err_log_lock);
 7718	list_for_each_entry_safe(err, next, &tr->err_log, list) {
 7719		list_del(&err->list);
 7720		free_tracing_log_err(err);
 7721	}
 7722
 7723	tr->n_err_log_entries = 0;
 7724	mutex_unlock(&tracing_err_log_lock);
 7725}
 7726
 7727static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
 7728{
 7729	struct trace_array *tr = m->private;
 7730
 7731	mutex_lock(&tracing_err_log_lock);
 7732
 7733	return seq_list_start(&tr->err_log, *pos);
 7734}
 7735
 7736static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
 7737{
 7738	struct trace_array *tr = m->private;
 7739
 7740	return seq_list_next(v, &tr->err_log, pos);
 7741}
 7742
 7743static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
 7744{
 7745	mutex_unlock(&tracing_err_log_lock);
 7746}
 7747
 7748static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
 7749{
 7750	u16 i;
 7751
 7752	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
 7753		seq_putc(m, ' ');
 7754	for (i = 0; i < pos; i++)
 7755		seq_putc(m, ' ');
 7756	seq_puts(m, "^\n");
 7757}
 7758
 7759static int tracing_err_log_seq_show(struct seq_file *m, void *v)
 7760{
 7761	struct tracing_log_err *err = v;
 7762
 7763	if (err) {
 7764		const char *err_text = err->info.errs[err->info.type];
 7765		u64 sec = err->info.ts;
 7766		u32 nsec;
 7767
 7768		nsec = do_div(sec, NSEC_PER_SEC);
 7769		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
 7770			   err->loc, err_text);
 7771		seq_printf(m, "%s", err->cmd);
 7772		tracing_err_log_show_pos(m, err->info.pos);
 7773	}
 7774
 7775	return 0;
 7776}
 7777
 7778static const struct seq_operations tracing_err_log_seq_ops = {
 7779	.start  = tracing_err_log_seq_start,
 7780	.next   = tracing_err_log_seq_next,
 7781	.stop   = tracing_err_log_seq_stop,
 7782	.show   = tracing_err_log_seq_show
 7783};
 7784
 7785static int tracing_err_log_open(struct inode *inode, struct file *file)
 7786{
 7787	struct trace_array *tr = inode->i_private;
 7788	int ret = 0;
 7789
 7790	ret = tracing_check_open_get_tr(tr);
 7791	if (ret)
 7792		return ret;
 7793
 7794	/* If this file was opened for write, then erase contents */
 7795	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
 7796		clear_tracing_err_log(tr);
 7797
 7798	if (file->f_mode & FMODE_READ) {
 7799		ret = seq_open(file, &tracing_err_log_seq_ops);
 7800		if (!ret) {
 7801			struct seq_file *m = file->private_data;
 7802			m->private = tr;
 7803		} else {
 7804			trace_array_put(tr);
 7805		}
 7806	}
 7807	return ret;
 7808}
 7809
 7810static ssize_t tracing_err_log_write(struct file *file,
 7811				     const char __user *buffer,
 7812				     size_t count, loff_t *ppos)
 7813{
 7814	return count;
 7815}
 7816
 7817static int tracing_err_log_release(struct inode *inode, struct file *file)
 7818{
 7819	struct trace_array *tr = inode->i_private;
 7820
 7821	trace_array_put(tr);
 7822
 7823	if (file->f_mode & FMODE_READ)
 7824		seq_release(inode, file);
 7825
 7826	return 0;
 7827}
 7828
 7829static const struct file_operations tracing_err_log_fops = {
 7830	.open           = tracing_err_log_open,
 7831	.write		= tracing_err_log_write,
 7832	.read           = seq_read,
 7833	.llseek         = tracing_lseek,
 7834	.release        = tracing_err_log_release,
 7835};
 7836
 7837static int tracing_buffers_open(struct inode *inode, struct file *filp)
 7838{
 7839	struct trace_array *tr = inode->i_private;
 7840	struct ftrace_buffer_info *info;
 7841	int ret;
 7842
 7843	ret = tracing_check_open_get_tr(tr);
 7844	if (ret)
 7845		return ret;
 7846
 7847	info = kvzalloc(sizeof(*info), GFP_KERNEL);
 7848	if (!info) {
 7849		trace_array_put(tr);
 7850		return -ENOMEM;
 7851	}
 7852
 7853	mutex_lock(&trace_types_lock);
 7854
 7855	info->iter.tr		= tr;
 7856	info->iter.cpu_file	= tracing_get_cpu(inode);
 7857	info->iter.trace	= tr->current_trace;
 7858	info->iter.array_buffer = &tr->array_buffer;
 7859	info->spare		= NULL;
 7860	/* Force reading ring buffer for first read */
 7861	info->read		= (unsigned int)-1;
 7862
 7863	filp->private_data = info;
 7864
 7865	tr->trace_ref++;
 7866
 7867	mutex_unlock(&trace_types_lock);
 7868
 7869	ret = nonseekable_open(inode, filp);
 7870	if (ret < 0)
 7871		trace_array_put(tr);
 7872
 7873	return ret;
 7874}
 7875
 7876static __poll_t
 7877tracing_buffers_poll(struct file *filp, poll_table *poll_table)
 7878{
 7879	struct ftrace_buffer_info *info = filp->private_data;
 7880	struct trace_iterator *iter = &info->iter;
 7881
 7882	return trace_poll(iter, filp, poll_table);
 7883}
 7884
 7885static ssize_t
 7886tracing_buffers_read(struct file *filp, char __user *ubuf,
 7887		     size_t count, loff_t *ppos)
 7888{
 7889	struct ftrace_buffer_info *info = filp->private_data;
 7890	struct trace_iterator *iter = &info->iter;
 7891	void *trace_data;
 7892	int page_size;
 7893	ssize_t ret = 0;
 7894	ssize_t size;
 7895
 7896	if (!count)
 7897		return 0;
 7898
 7899#ifdef CONFIG_TRACER_MAX_TRACE
 7900	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
 7901		return -EBUSY;
 7902#endif
 7903
 7904	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
 7905
 7906	/* Make sure the spare matches the current sub buffer size */
 7907	if (info->spare) {
 7908		if (page_size != info->spare_size) {
 7909			ring_buffer_free_read_page(iter->array_buffer->buffer,
 7910						   info->spare_cpu, info->spare);
 7911			info->spare = NULL;
 7912		}
 7913	}
 7914
 7915	if (!info->spare) {
 7916		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
 7917							  iter->cpu_file);
 7918		if (IS_ERR(info->spare)) {
 7919			ret = PTR_ERR(info->spare);
 7920			info->spare = NULL;
 7921		} else {
 7922			info->spare_cpu = iter->cpu_file;
 7923			info->spare_size = page_size;
 7924		}
 7925	}
 7926	if (!info->spare)
 7927		return ret;
 
 
 7928
 7929	/* Do we have previous read data to read? */
 7930	if (info->read < page_size)
 7931		goto read;
 7932
 7933 again:
 7934	trace_access_lock(iter->cpu_file);
 7935	ret = ring_buffer_read_page(iter->array_buffer->buffer,
 7936				    info->spare,
 7937				    count,
 7938				    iter->cpu_file, 0);
 7939	trace_access_unlock(iter->cpu_file);
 7940
 7941	if (ret < 0) {
 7942		if (trace_empty(iter) && !iter->closed) {
 7943			if ((filp->f_flags & O_NONBLOCK))
 7944				return -EAGAIN;
 7945
 7946			ret = wait_on_pipe(iter, 0);
 7947			if (ret)
 7948				return ret;
 7949
 7950			goto again;
 7951		}
 7952		return 0;
 7953	}
 7954
 7955	info->read = 0;
 7956 read:
 7957	size = page_size - info->read;
 
 7958	if (size > count)
 7959		size = count;
 7960	trace_data = ring_buffer_read_page_data(info->spare);
 7961	ret = copy_to_user(ubuf, trace_data + info->read, size);
 7962	if (ret == size)
 7963		return -EFAULT;
 7964
 7965	size -= ret;
 7966
 7967	*ppos += size;
 7968	info->read += size;
 7969
 7970	return size;
 7971}
 7972
 7973static int tracing_buffers_flush(struct file *file, fl_owner_t id)
 7974{
 7975	struct ftrace_buffer_info *info = file->private_data;
 7976	struct trace_iterator *iter = &info->iter;
 7977
 7978	iter->closed = true;
 7979	/* Make sure the waiters see the new wait_index */
 7980	(void)atomic_fetch_inc_release(&iter->wait_index);
 7981
 7982	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
 7983
 7984	return 0;
 7985}
 7986
 7987static int tracing_buffers_release(struct inode *inode, struct file *file)
 7988{
 7989	struct ftrace_buffer_info *info = file->private_data;
 7990	struct trace_iterator *iter = &info->iter;
 7991
 7992	mutex_lock(&trace_types_lock);
 7993
 7994	iter->tr->trace_ref--;
 7995
 7996	__trace_array_put(iter->tr);
 7997
 7998	if (info->spare)
 7999		ring_buffer_free_read_page(iter->array_buffer->buffer,
 8000					   info->spare_cpu, info->spare);
 8001	kvfree(info);
 8002
 8003	mutex_unlock(&trace_types_lock);
 8004
 8005	return 0;
 8006}
 8007
 8008struct buffer_ref {
 8009	struct trace_buffer	*buffer;
 8010	void			*page;
 8011	int			cpu;
 8012	refcount_t		refcount;
 8013};
 8014
 8015static void buffer_ref_release(struct buffer_ref *ref)
 8016{
 8017	if (!refcount_dec_and_test(&ref->refcount))
 8018		return;
 8019	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
 8020	kfree(ref);
 8021}
 8022
 8023static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
 8024				    struct pipe_buffer *buf)
 8025{
 8026	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 8027
 8028	buffer_ref_release(ref);
 
 
 
 
 8029	buf->private = 0;
 8030}
 8031
 8032static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
 
 
 
 
 
 
 8033				struct pipe_buffer *buf)
 8034{
 8035	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 8036
 8037	if (refcount_read(&ref->refcount) > INT_MAX/2)
 8038		return false;
 8039
 8040	refcount_inc(&ref->refcount);
 8041	return true;
 8042}
 8043
 8044/* Pipe buffer operations for a buffer. */
 8045static const struct pipe_buf_operations buffer_pipe_buf_ops = {
 
 
 
 
 8046	.release		= buffer_pipe_buf_release,
 
 8047	.get			= buffer_pipe_buf_get,
 8048};
 8049
 8050/*
 8051 * Callback from splice_to_pipe(), if we need to release some pages
 8052 * at the end of the spd in case we error'ed out in filling the pipe.
 8053 */
 8054static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
 8055{
 8056	struct buffer_ref *ref =
 8057		(struct buffer_ref *)spd->partial[i].private;
 8058
 8059	buffer_ref_release(ref);
 
 
 
 
 8060	spd->partial[i].private = 0;
 8061}
 8062
 8063static ssize_t
 8064tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 8065			    struct pipe_inode_info *pipe, size_t len,
 8066			    unsigned int flags)
 8067{
 8068	struct ftrace_buffer_info *info = file->private_data;
 8069	struct trace_iterator *iter = &info->iter;
 8070	struct partial_page partial_def[PIPE_DEF_BUFFERS];
 8071	struct page *pages_def[PIPE_DEF_BUFFERS];
 8072	struct splice_pipe_desc spd = {
 8073		.pages		= pages_def,
 8074		.partial	= partial_def,
 8075		.nr_pages_max	= PIPE_DEF_BUFFERS,
 
 8076		.ops		= &buffer_pipe_buf_ops,
 8077		.spd_release	= buffer_spd_release,
 8078	};
 8079	struct buffer_ref *ref;
 8080	bool woken = false;
 8081	int page_size;
 8082	int entries, i;
 8083	ssize_t ret = 0;
 8084
 8085#ifdef CONFIG_TRACER_MAX_TRACE
 8086	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
 8087		return -EBUSY;
 8088#endif
 8089
 8090	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
 8091	if (*ppos & (page_size - 1))
 8092		return -EINVAL;
 8093
 8094	if (len & (page_size - 1)) {
 8095		if (len < page_size)
 8096			return -EINVAL;
 8097		len &= (~(page_size - 1));
 8098	}
 8099
 8100	if (splice_grow_spd(pipe, &spd))
 8101		return -ENOMEM;
 
 
 
 
 
 
 8102
 8103 again:
 8104	trace_access_lock(iter->cpu_file);
 8105	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 8106
 8107	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
 8108		struct page *page;
 8109		int r;
 8110
 8111		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
 8112		if (!ref) {
 8113			ret = -ENOMEM;
 8114			break;
 8115		}
 8116
 8117		refcount_set(&ref->refcount, 1);
 8118		ref->buffer = iter->array_buffer->buffer;
 8119		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
 8120		if (IS_ERR(ref->page)) {
 8121			ret = PTR_ERR(ref->page);
 8122			ref->page = NULL;
 8123			kfree(ref);
 8124			break;
 8125		}
 8126		ref->cpu = iter->cpu_file;
 8127
 8128		r = ring_buffer_read_page(ref->buffer, ref->page,
 8129					  len, iter->cpu_file, 1);
 8130		if (r < 0) {
 8131			ring_buffer_free_read_page(ref->buffer, ref->cpu,
 8132						   ref->page);
 8133			kfree(ref);
 8134			break;
 8135		}
 8136
 8137		page = virt_to_page(ring_buffer_read_page_data(ref->page));
 
 
 
 
 
 
 
 
 8138
 8139		spd.pages[i] = page;
 8140		spd.partial[i].len = page_size;
 8141		spd.partial[i].offset = 0;
 8142		spd.partial[i].private = (unsigned long)ref;
 8143		spd.nr_pages++;
 8144		*ppos += page_size;
 8145
 8146		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 8147	}
 8148
 8149	trace_access_unlock(iter->cpu_file);
 8150	spd.nr_pages = i;
 8151
 8152	/* did we read anything? */
 8153	if (!spd.nr_pages) {
 8154
 8155		if (ret)
 8156			goto out;
 8157
 8158		if (woken)
 8159			goto out;
 8160
 8161		ret = -EAGAIN;
 8162		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
 8163			goto out;
 8164
 8165		ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
 8166		if (ret)
 8167			goto out;
 8168
 8169		/* No need to wait after waking up when tracing is off */
 8170		if (!tracer_tracing_is_on(iter->tr))
 8171			goto out;
 8172
 8173		/* Iterate one more time to collect any new data then exit */
 8174		woken = true;
 8175
 8176		goto again;
 8177	}
 8178
 8179	ret = splice_to_pipe(pipe, &spd);
 8180out:
 8181	splice_shrink_spd(&spd);
 8182
 8183	return ret;
 8184}
 8185
 8186static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 8187{
 8188	struct ftrace_buffer_info *info = file->private_data;
 8189	struct trace_iterator *iter = &info->iter;
 8190	int err;
 8191
 8192	if (cmd == TRACE_MMAP_IOCTL_GET_READER) {
 8193		if (!(file->f_flags & O_NONBLOCK)) {
 8194			err = ring_buffer_wait(iter->array_buffer->buffer,
 8195					       iter->cpu_file,
 8196					       iter->tr->buffer_percent,
 8197					       NULL, NULL);
 8198			if (err)
 8199				return err;
 8200		}
 8201
 8202		return ring_buffer_map_get_reader(iter->array_buffer->buffer,
 8203						  iter->cpu_file);
 8204	} else if (cmd) {
 8205		return -ENOTTY;
 8206	}
 8207
 8208	/*
 8209	 * An ioctl call with cmd 0 to the ring buffer file will wake up all
 8210	 * waiters
 8211	 */
 8212	mutex_lock(&trace_types_lock);
 8213
 8214	/* Make sure the waiters see the new wait_index */
 8215	(void)atomic_fetch_inc_release(&iter->wait_index);
 8216
 8217	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
 8218
 8219	mutex_unlock(&trace_types_lock);
 8220	return 0;
 8221}
 8222
 8223#ifdef CONFIG_TRACER_MAX_TRACE
 8224static int get_snapshot_map(struct trace_array *tr)
 8225{
 8226	int err = 0;
 8227
 8228	/*
 8229	 * Called with mmap_lock held. lockdep would be unhappy if we would now
 8230	 * take trace_types_lock. Instead use the specific
 8231	 * snapshot_trigger_lock.
 8232	 */
 8233	spin_lock(&tr->snapshot_trigger_lock);
 8234
 8235	if (tr->snapshot || tr->mapped == UINT_MAX)
 8236		err = -EBUSY;
 8237	else
 8238		tr->mapped++;
 8239
 8240	spin_unlock(&tr->snapshot_trigger_lock);
 8241
 8242	/* Wait for update_max_tr() to observe iter->tr->mapped */
 8243	if (tr->mapped == 1)
 8244		synchronize_rcu();
 8245
 8246	return err;
 8247
 8248}
 8249static void put_snapshot_map(struct trace_array *tr)
 8250{
 8251	spin_lock(&tr->snapshot_trigger_lock);
 8252	if (!WARN_ON(!tr->mapped))
 8253		tr->mapped--;
 8254	spin_unlock(&tr->snapshot_trigger_lock);
 8255}
 8256#else
 8257static inline int get_snapshot_map(struct trace_array *tr) { return 0; }
 8258static inline void put_snapshot_map(struct trace_array *tr) { }
 8259#endif
 8260
 8261static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
 8262{
 8263	struct ftrace_buffer_info *info = vma->vm_file->private_data;
 8264	struct trace_iterator *iter = &info->iter;
 8265
 8266	WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file));
 8267	put_snapshot_map(iter->tr);
 8268}
 8269
 8270static const struct vm_operations_struct tracing_buffers_vmops = {
 8271	.close		= tracing_buffers_mmap_close,
 8272};
 8273
 8274static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
 8275{
 8276	struct ftrace_buffer_info *info = filp->private_data;
 8277	struct trace_iterator *iter = &info->iter;
 8278	int ret = 0;
 8279
 8280	/* Currently the boot mapped buffer is not supported for mmap */
 8281	if (iter->tr->flags & TRACE_ARRAY_FL_BOOT)
 8282		return -ENODEV;
 8283
 8284	ret = get_snapshot_map(iter->tr);
 8285	if (ret)
 8286		return ret;
 8287
 8288	ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma);
 8289	if (ret)
 8290		put_snapshot_map(iter->tr);
 8291
 8292	vma->vm_ops = &tracing_buffers_vmops;
 8293
 8294	return ret;
 8295}
 8296
 8297static const struct file_operations tracing_buffers_fops = {
 8298	.open		= tracing_buffers_open,
 8299	.read		= tracing_buffers_read,
 8300	.poll		= tracing_buffers_poll,
 8301	.release	= tracing_buffers_release,
 8302	.flush		= tracing_buffers_flush,
 8303	.splice_read	= tracing_buffers_splice_read,
 8304	.unlocked_ioctl = tracing_buffers_ioctl,
 8305	.mmap		= tracing_buffers_mmap,
 8306};
 8307
 8308static ssize_t
 8309tracing_stats_read(struct file *filp, char __user *ubuf,
 8310		   size_t count, loff_t *ppos)
 8311{
 8312	struct inode *inode = file_inode(filp);
 8313	struct trace_array *tr = inode->i_private;
 8314	struct array_buffer *trace_buf = &tr->array_buffer;
 8315	int cpu = tracing_get_cpu(inode);
 8316	struct trace_seq *s;
 8317	unsigned long cnt;
 8318	unsigned long long t;
 8319	unsigned long usec_rem;
 8320
 8321	s = kmalloc(sizeof(*s), GFP_KERNEL);
 8322	if (!s)
 8323		return -ENOMEM;
 8324
 8325	trace_seq_init(s);
 8326
 8327	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
 8328	trace_seq_printf(s, "entries: %ld\n", cnt);
 8329
 8330	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
 8331	trace_seq_printf(s, "overrun: %ld\n", cnt);
 8332
 8333	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
 8334	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
 8335
 8336	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
 8337	trace_seq_printf(s, "bytes: %ld\n", cnt);
 8338
 8339	if (trace_clocks[tr->clock_id].in_ns) {
 8340		/* local or global for trace_clock */
 8341		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
 8342		usec_rem = do_div(t, USEC_PER_SEC);
 8343		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
 8344								t, usec_rem);
 8345
 8346		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
 8347		usec_rem = do_div(t, USEC_PER_SEC);
 8348		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
 8349	} else {
 8350		/* counter or tsc mode for trace_clock */
 8351		trace_seq_printf(s, "oldest event ts: %llu\n",
 8352				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
 8353
 8354		trace_seq_printf(s, "now ts: %llu\n",
 8355				ring_buffer_time_stamp(trace_buf->buffer));
 8356	}
 8357
 8358	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
 8359	trace_seq_printf(s, "dropped events: %ld\n", cnt);
 8360
 8361	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
 8362	trace_seq_printf(s, "read events: %ld\n", cnt);
 8363
 8364	count = simple_read_from_buffer(ubuf, count, ppos,
 8365					s->buffer, trace_seq_used(s));
 8366
 8367	kfree(s);
 8368
 8369	return count;
 8370}
 8371
 8372static const struct file_operations tracing_stats_fops = {
 8373	.open		= tracing_open_generic_tr,
 8374	.read		= tracing_stats_read,
 8375	.llseek		= generic_file_llseek,
 8376	.release	= tracing_release_generic_tr,
 8377};
 8378
 8379#ifdef CONFIG_DYNAMIC_FTRACE
 8380
 
 
 
 
 
 8381static ssize_t
 8382tracing_read_dyn_info(struct file *filp, char __user *ubuf,
 8383		  size_t cnt, loff_t *ppos)
 8384{
 8385	ssize_t ret;
 8386	char *buf;
 
 
 
 8387	int r;
 8388
 8389	/* 512 should be plenty to hold the amount needed */
 8390#define DYN_INFO_BUF_SIZE	512
 8391
 8392	buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL);
 8393	if (!buf)
 8394		return -ENOMEM;
 8395
 8396	r = scnprintf(buf, DYN_INFO_BUF_SIZE,
 8397		      "%ld pages:%ld groups: %ld\n"
 8398		      "ftrace boot update time = %llu (ns)\n"
 8399		      "ftrace module total update time = %llu (ns)\n",
 8400		      ftrace_update_tot_cnt,
 8401		      ftrace_number_of_pages,
 8402		      ftrace_number_of_groups,
 8403		      ftrace_update_time,
 8404		      ftrace_total_mod_time);
 8405
 8406	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 8407	kfree(buf);
 8408	return ret;
 8409}
 8410
 8411static const struct file_operations tracing_dyn_info_fops = {
 8412	.open		= tracing_open_generic,
 8413	.read		= tracing_read_dyn_info,
 8414	.llseek		= generic_file_llseek,
 8415};
 8416#endif /* CONFIG_DYNAMIC_FTRACE */
 8417
 8418#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
 8419static void
 8420ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
 8421		struct trace_array *tr, struct ftrace_probe_ops *ops,
 8422		void *data)
 8423{
 8424	tracing_snapshot_instance(tr);
 8425}
 8426
 8427static void
 8428ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
 8429		      struct trace_array *tr, struct ftrace_probe_ops *ops,
 8430		      void *data)
 8431{
 8432	struct ftrace_func_mapper *mapper = data;
 8433	long *count = NULL;
 8434
 8435	if (mapper)
 8436		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 8437
 8438	if (count) {
 8439
 8440		if (*count <= 0)
 8441			return;
 8442
 8443		(*count)--;
 8444	}
 8445
 8446	tracing_snapshot_instance(tr);
 8447}
 8448
 8449static int
 8450ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
 8451		      struct ftrace_probe_ops *ops, void *data)
 8452{
 8453	struct ftrace_func_mapper *mapper = data;
 8454	long *count = NULL;
 8455
 8456	seq_printf(m, "%ps:", (void *)ip);
 8457
 8458	seq_puts(m, "snapshot");
 8459
 8460	if (mapper)
 8461		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 8462
 8463	if (count)
 8464		seq_printf(m, ":count=%ld\n", *count);
 8465	else
 8466		seq_puts(m, ":unlimited\n");
 8467
 8468	return 0;
 8469}
 8470
 8471static int
 8472ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
 8473		     unsigned long ip, void *init_data, void **data)
 8474{
 8475	struct ftrace_func_mapper *mapper = *data;
 8476
 8477	if (!mapper) {
 8478		mapper = allocate_ftrace_func_mapper();
 8479		if (!mapper)
 8480			return -ENOMEM;
 8481		*data = mapper;
 8482	}
 8483
 8484	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
 8485}
 8486
 8487static void
 8488ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
 8489		     unsigned long ip, void *data)
 8490{
 8491	struct ftrace_func_mapper *mapper = data;
 8492
 8493	if (!ip) {
 8494		if (!mapper)
 8495			return;
 8496		free_ftrace_func_mapper(mapper, NULL);
 8497		return;
 8498	}
 8499
 8500	ftrace_func_mapper_remove_ip(mapper, ip);
 8501}
 8502
 8503static struct ftrace_probe_ops snapshot_probe_ops = {
 8504	.func			= ftrace_snapshot,
 8505	.print			= ftrace_snapshot_print,
 8506};
 8507
 8508static struct ftrace_probe_ops snapshot_count_probe_ops = {
 8509	.func			= ftrace_count_snapshot,
 8510	.print			= ftrace_snapshot_print,
 8511	.init			= ftrace_snapshot_init,
 8512	.free			= ftrace_snapshot_free,
 8513};
 8514
 8515static int
 8516ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
 8517			       char *glob, char *cmd, char *param, int enable)
 8518{
 8519	struct ftrace_probe_ops *ops;
 8520	void *count = (void *)-1;
 8521	char *number;
 8522	int ret;
 8523
 8524	if (!tr)
 8525		return -ENODEV;
 8526
 8527	/* hash funcs only work with set_ftrace_filter */
 8528	if (!enable)
 8529		return -EINVAL;
 8530
 8531	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
 8532
 8533	if (glob[0] == '!') {
 8534		ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
 8535		if (!ret)
 8536			tracing_disarm_snapshot(tr);
 8537
 8538		return ret;
 8539	}
 8540
 8541	if (!param)
 8542		goto out_reg;
 8543
 8544	number = strsep(&param, ":");
 8545
 8546	if (!strlen(number))
 8547		goto out_reg;
 8548
 8549	/*
 8550	 * We use the callback data field (which is a pointer)
 8551	 * as our counter.
 8552	 */
 8553	ret = kstrtoul(number, 0, (unsigned long *)&count);
 8554	if (ret)
 8555		return ret;
 8556
 8557 out_reg:
 8558	ret = tracing_arm_snapshot(tr);
 8559	if (ret < 0)
 8560		goto out;
 8561
 8562	ret = register_ftrace_function_probe(glob, tr, ops, count);
 8563	if (ret < 0)
 8564		tracing_disarm_snapshot(tr);
 8565 out:
 8566	return ret < 0 ? ret : 0;
 8567}
 8568
 8569static struct ftrace_func_command ftrace_snapshot_cmd = {
 8570	.name			= "snapshot",
 8571	.func			= ftrace_trace_snapshot_callback,
 8572};
 8573
 8574static __init int register_snapshot_cmd(void)
 8575{
 8576	return register_ftrace_command(&ftrace_snapshot_cmd);
 8577}
 8578#else
 8579static inline __init int register_snapshot_cmd(void) { return 0; }
 8580#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
 8581
 8582static struct dentry *tracing_get_dentry(struct trace_array *tr)
 8583{
 8584	if (WARN_ON(!tr->dir))
 8585		return ERR_PTR(-ENODEV);
 8586
 8587	/* Top directory uses NULL as the parent */
 8588	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 8589		return NULL;
 8590
 8591	/* All sub buffers have a descriptor */
 8592	return tr->dir;
 8593}
 8594
 8595static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
 8596{
 8597	struct dentry *d_tracer;
 8598
 8599	if (tr->percpu_dir)
 8600		return tr->percpu_dir;
 8601
 8602	d_tracer = tracing_get_dentry(tr);
 8603	if (IS_ERR(d_tracer))
 8604		return NULL;
 
 8605
 8606	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
 8607
 8608	MEM_FAIL(!tr->percpu_dir,
 8609		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
 8610
 8611	return tr->percpu_dir;
 8612}
 8613
 8614static struct dentry *
 8615trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
 8616		      void *data, long cpu, const struct file_operations *fops)
 8617{
 8618	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
 8619
 8620	if (ret) /* See tracing_get_cpu() */
 8621		d_inode(ret)->i_cdev = (void *)(cpu + 1);
 8622	return ret;
 8623}
 8624
 8625static void
 8626tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
 8627{
 8628	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
 8629	struct dentry *d_cpu;
 8630	char cpu_dir[30]; /* 30 characters should be more than enough */
 8631
 8632	if (!d_percpu)
 8633		return;
 8634
 8635	snprintf(cpu_dir, 30, "cpu%ld", cpu);
 8636	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
 8637	if (!d_cpu) {
 8638		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
 8639		return;
 8640	}
 8641
 8642	/* per cpu trace_pipe */
 8643	trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
 8644				tr, cpu, &tracing_pipe_fops);
 8645
 8646	/* per cpu trace */
 8647	trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
 8648				tr, cpu, &tracing_fops);
 8649
 8650	trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
 8651				tr, cpu, &tracing_buffers_fops);
 8652
 8653	trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
 8654				tr, cpu, &tracing_stats_fops);
 8655
 8656	trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
 8657				tr, cpu, &tracing_entries_fops);
 8658
 8659	if (tr->range_addr_start)
 8660		trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu,
 8661				      tr, cpu, &tracing_buffer_meta_fops);
 8662#ifdef CONFIG_TRACER_SNAPSHOT
 8663	if (!tr->range_addr_start) {
 8664		trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
 8665				      tr, cpu, &snapshot_fops);
 8666
 8667		trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
 8668				      tr, cpu, &snapshot_raw_fops);
 8669	}
 8670#endif
 8671}
 8672
 8673#ifdef CONFIG_FTRACE_SELFTEST
 8674/* Let selftest have access to static functions in this file */
 8675#include "trace_selftest.c"
 8676#endif
 8677
 
 
 
 
 
 
 8678static ssize_t
 8679trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
 8680			loff_t *ppos)
 8681{
 8682	struct trace_option_dentry *topt = filp->private_data;
 8683	char *buf;
 8684
 8685	if (topt->flags->val & topt->opt->bit)
 8686		buf = "1\n";
 8687	else
 8688		buf = "0\n";
 8689
 8690	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
 8691}
 8692
 8693static ssize_t
 8694trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
 8695			 loff_t *ppos)
 8696{
 8697	struct trace_option_dentry *topt = filp->private_data;
 8698	unsigned long val;
 8699	int ret;
 8700
 8701	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 8702	if (ret)
 8703		return ret;
 8704
 8705	if (val != 0 && val != 1)
 8706		return -EINVAL;
 8707
 8708	if (!!(topt->flags->val & topt->opt->bit) != val) {
 8709		mutex_lock(&trace_types_lock);
 8710		ret = __set_tracer_option(topt->tr, topt->flags,
 8711					  topt->opt, !val);
 8712		mutex_unlock(&trace_types_lock);
 8713		if (ret)
 8714			return ret;
 8715	}
 8716
 8717	*ppos += cnt;
 8718
 8719	return cnt;
 8720}
 8721
 8722static int tracing_open_options(struct inode *inode, struct file *filp)
 8723{
 8724	struct trace_option_dentry *topt = inode->i_private;
 8725	int ret;
 8726
 8727	ret = tracing_check_open_get_tr(topt->tr);
 8728	if (ret)
 8729		return ret;
 8730
 8731	filp->private_data = inode->i_private;
 8732	return 0;
 8733}
 8734
 8735static int tracing_release_options(struct inode *inode, struct file *file)
 8736{
 8737	struct trace_option_dentry *topt = file->private_data;
 8738
 8739	trace_array_put(topt->tr);
 8740	return 0;
 8741}
 8742
 8743static const struct file_operations trace_options_fops = {
 8744	.open = tracing_open_options,
 8745	.read = trace_options_read,
 8746	.write = trace_options_write,
 8747	.llseek	= generic_file_llseek,
 8748	.release = tracing_release_options,
 8749};
 8750
 8751/*
 8752 * In order to pass in both the trace_array descriptor as well as the index
 8753 * to the flag that the trace option file represents, the trace_array
 8754 * has a character array of trace_flags_index[], which holds the index
 8755 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
 8756 * The address of this character array is passed to the flag option file
 8757 * read/write callbacks.
 8758 *
 8759 * In order to extract both the index and the trace_array descriptor,
 8760 * get_tr_index() uses the following algorithm.
 8761 *
 8762 *   idx = *ptr;
 8763 *
 8764 * As the pointer itself contains the address of the index (remember
 8765 * index[1] == 1).
 8766 *
 8767 * Then to get the trace_array descriptor, by subtracting that index
 8768 * from the ptr, we get to the start of the index itself.
 8769 *
 8770 *   ptr - idx == &index[0]
 8771 *
 8772 * Then a simple container_of() from that pointer gets us to the
 8773 * trace_array descriptor.
 8774 */
 8775static void get_tr_index(void *data, struct trace_array **ptr,
 8776			 unsigned int *pindex)
 8777{
 8778	*pindex = *(unsigned char *)data;
 8779
 8780	*ptr = container_of(data - *pindex, struct trace_array,
 8781			    trace_flags_index);
 8782}
 8783
 8784static ssize_t
 8785trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
 8786			loff_t *ppos)
 8787{
 8788	void *tr_index = filp->private_data;
 8789	struct trace_array *tr;
 8790	unsigned int index;
 8791	char *buf;
 8792
 8793	get_tr_index(tr_index, &tr, &index);
 8794
 8795	if (tr->trace_flags & (1 << index))
 8796		buf = "1\n";
 8797	else
 8798		buf = "0\n";
 8799
 8800	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
 8801}
 8802
 8803static ssize_t
 8804trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
 8805			 loff_t *ppos)
 8806{
 8807	void *tr_index = filp->private_data;
 8808	struct trace_array *tr;
 8809	unsigned int index;
 8810	unsigned long val;
 8811	int ret;
 8812
 8813	get_tr_index(tr_index, &tr, &index);
 8814
 8815	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 8816	if (ret)
 8817		return ret;
 8818
 8819	if (val != 0 && val != 1)
 8820		return -EINVAL;
 8821
 8822	mutex_lock(&event_mutex);
 8823	mutex_lock(&trace_types_lock);
 8824	ret = set_tracer_flag(tr, 1 << index, val);
 8825	mutex_unlock(&trace_types_lock);
 8826	mutex_unlock(&event_mutex);
 8827
 8828	if (ret < 0)
 8829		return ret;
 8830
 8831	*ppos += cnt;
 8832
 8833	return cnt;
 8834}
 8835
 8836static const struct file_operations trace_options_core_fops = {
 8837	.open = tracing_open_generic,
 8838	.read = trace_options_core_read,
 8839	.write = trace_options_core_write,
 8840	.llseek = generic_file_llseek,
 8841};
 8842
 8843struct dentry *trace_create_file(const char *name,
 8844				 umode_t mode,
 8845				 struct dentry *parent,
 8846				 void *data,
 8847				 const struct file_operations *fops)
 8848{
 8849	struct dentry *ret;
 8850
 8851	ret = tracefs_create_file(name, mode, parent, data, fops);
 8852	if (!ret)
 8853		pr_warn("Could not create tracefs '%s' entry\n", name);
 8854
 8855	return ret;
 8856}
 8857
 8858
 8859static struct dentry *trace_options_init_dentry(struct trace_array *tr)
 8860{
 8861	struct dentry *d_tracer;
 
 8862
 8863	if (tr->options)
 8864		return tr->options;
 8865
 8866	d_tracer = tracing_get_dentry(tr);
 8867	if (IS_ERR(d_tracer))
 8868		return NULL;
 8869
 8870	tr->options = tracefs_create_dir("options", d_tracer);
 8871	if (!tr->options) {
 8872		pr_warn("Could not create tracefs directory 'options'\n");
 8873		return NULL;
 8874	}
 8875
 8876	return tr->options;
 8877}
 8878
 8879static void
 8880create_trace_option_file(struct trace_array *tr,
 8881			 struct trace_option_dentry *topt,
 8882			 struct tracer_flags *flags,
 8883			 struct tracer_opt *opt)
 8884{
 8885	struct dentry *t_options;
 8886
 8887	t_options = trace_options_init_dentry(tr);
 8888	if (!t_options)
 8889		return;
 8890
 8891	topt->flags = flags;
 8892	topt->opt = opt;
 8893	topt->tr = tr;
 8894
 8895	topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
 8896					t_options, topt, &trace_options_fops);
 8897
 8898}
 8899
 8900static void
 8901create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
 8902{
 8903	struct trace_option_dentry *topts;
 8904	struct trace_options *tr_topts;
 8905	struct tracer_flags *flags;
 8906	struct tracer_opt *opts;
 8907	int cnt;
 8908	int i;
 8909
 8910	if (!tracer)
 8911		return;
 8912
 8913	flags = tracer->flags;
 8914
 8915	if (!flags || !flags->opts)
 8916		return;
 8917
 8918	/*
 8919	 * If this is an instance, only create flags for tracers
 8920	 * the instance may have.
 8921	 */
 8922	if (!trace_ok_for_array(tracer, tr))
 8923		return;
 8924
 8925	for (i = 0; i < tr->nr_topts; i++) {
 8926		/* Make sure there's no duplicate flags. */
 8927		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
 8928			return;
 8929	}
 8930
 8931	opts = flags->opts;
 8932
 8933	for (cnt = 0; opts[cnt].name; cnt++)
 8934		;
 8935
 8936	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
 8937	if (!topts)
 8938		return;
 8939
 8940	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
 8941			    GFP_KERNEL);
 8942	if (!tr_topts) {
 8943		kfree(topts);
 8944		return;
 8945	}
 8946
 8947	tr->topts = tr_topts;
 8948	tr->topts[tr->nr_topts].tracer = tracer;
 8949	tr->topts[tr->nr_topts].topts = topts;
 8950	tr->nr_topts++;
 8951
 8952	for (cnt = 0; opts[cnt].name; cnt++) {
 8953		create_trace_option_file(tr, &topts[cnt], flags,
 8954					 &opts[cnt]);
 8955		MEM_FAIL(topts[cnt].entry == NULL,
 8956			  "Failed to create trace option: %s",
 8957			  opts[cnt].name);
 
 
 
 
 
 8958	}
 
 
 8959}
 8960
 8961static struct dentry *
 8962create_trace_option_core_file(struct trace_array *tr,
 8963			      const char *option, long index)
 8964{
 8965	struct dentry *t_options;
 8966
 8967	t_options = trace_options_init_dentry(tr);
 8968	if (!t_options)
 8969		return NULL;
 8970
 8971	return trace_create_file(option, TRACE_MODE_WRITE, t_options,
 8972				 (void *)&tr->trace_flags_index[index],
 8973				 &trace_options_core_fops);
 8974}
 8975
 8976static void create_trace_options_dir(struct trace_array *tr)
 8977{
 8978	struct dentry *t_options;
 8979	bool top_level = tr == &global_trace;
 8980	int i;
 8981
 8982	t_options = trace_options_init_dentry(tr);
 8983	if (!t_options)
 8984		return;
 8985
 8986	for (i = 0; trace_options[i]; i++) {
 8987		if (top_level ||
 8988		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
 8989			create_trace_option_core_file(tr, trace_options[i], i);
 8990	}
 8991}
 8992
 8993static ssize_t
 8994rb_simple_read(struct file *filp, char __user *ubuf,
 8995	       size_t cnt, loff_t *ppos)
 8996{
 8997	struct trace_array *tr = filp->private_data;
 
 8998	char buf[64];
 8999	int r;
 9000
 9001	r = tracer_tracing_is_on(tr);
 
 
 
 
 9002	r = sprintf(buf, "%d\n", r);
 9003
 9004	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 9005}
 9006
 9007static ssize_t
 9008rb_simple_write(struct file *filp, const char __user *ubuf,
 9009		size_t cnt, loff_t *ppos)
 9010{
 9011	struct trace_array *tr = filp->private_data;
 9012	struct trace_buffer *buffer = tr->array_buffer.buffer;
 9013	unsigned long val;
 9014	int ret;
 9015
 9016	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 9017	if (ret)
 9018		return ret;
 9019
 9020	if (buffer) {
 9021		mutex_lock(&trace_types_lock);
 9022		if (!!val == tracer_tracing_is_on(tr)) {
 9023			val = 0; /* do nothing */
 9024		} else if (val) {
 9025			tracer_tracing_on(tr);
 9026			if (tr->current_trace->start)
 9027				tr->current_trace->start(tr);
 9028		} else {
 9029			tracer_tracing_off(tr);
 9030			if (tr->current_trace->stop)
 9031				tr->current_trace->stop(tr);
 9032			/* Wake up any waiters */
 9033			ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
 9034		}
 9035		mutex_unlock(&trace_types_lock);
 9036	}
 9037
 9038	(*ppos)++;
 9039
 9040	return cnt;
 9041}
 9042
 9043static const struct file_operations rb_simple_fops = {
 9044	.open		= tracing_open_generic_tr,
 9045	.read		= rb_simple_read,
 9046	.write		= rb_simple_write,
 9047	.release	= tracing_release_generic_tr,
 9048	.llseek		= default_llseek,
 9049};
 9050
 9051static ssize_t
 9052buffer_percent_read(struct file *filp, char __user *ubuf,
 9053		    size_t cnt, loff_t *ppos)
 9054{
 9055	struct trace_array *tr = filp->private_data;
 9056	char buf[64];
 9057	int r;
 9058
 9059	r = tr->buffer_percent;
 9060	r = sprintf(buf, "%d\n", r);
 9061
 9062	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 9063}
 9064
 9065static ssize_t
 9066buffer_percent_write(struct file *filp, const char __user *ubuf,
 9067		     size_t cnt, loff_t *ppos)
 9068{
 9069	struct trace_array *tr = filp->private_data;
 9070	unsigned long val;
 9071	int ret;
 9072
 9073	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 9074	if (ret)
 9075		return ret;
 9076
 9077	if (val > 100)
 9078		return -EINVAL;
 9079
 9080	tr->buffer_percent = val;
 9081
 9082	(*ppos)++;
 9083
 9084	return cnt;
 9085}
 9086
 9087static const struct file_operations buffer_percent_fops = {
 9088	.open		= tracing_open_generic_tr,
 9089	.read		= buffer_percent_read,
 9090	.write		= buffer_percent_write,
 9091	.release	= tracing_release_generic_tr,
 9092	.llseek		= default_llseek,
 9093};
 9094
 9095static ssize_t
 9096buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 9097{
 9098	struct trace_array *tr = filp->private_data;
 9099	size_t size;
 9100	char buf[64];
 9101	int order;
 9102	int r;
 9103
 9104	order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
 9105	size = (PAGE_SIZE << order) / 1024;
 9106
 9107	r = sprintf(buf, "%zd\n", size);
 9108
 9109	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 9110}
 9111
 9112static ssize_t
 9113buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
 9114			 size_t cnt, loff_t *ppos)
 9115{
 9116	struct trace_array *tr = filp->private_data;
 9117	unsigned long val;
 9118	int old_order;
 9119	int order;
 9120	int pages;
 9121	int ret;
 9122
 9123	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 9124	if (ret)
 9125		return ret;
 9126
 9127	val *= 1024; /* value passed in is in KB */
 9128
 9129	pages = DIV_ROUND_UP(val, PAGE_SIZE);
 9130	order = fls(pages - 1);
 9131
 9132	/* limit between 1 and 128 system pages */
 9133	if (order < 0 || order > 7)
 9134		return -EINVAL;
 9135
 9136	/* Do not allow tracing while changing the order of the ring buffer */
 9137	tracing_stop_tr(tr);
 9138
 9139	old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
 9140	if (old_order == order)
 9141		goto out;
 9142
 9143	ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
 9144	if (ret)
 9145		goto out;
 9146
 9147#ifdef CONFIG_TRACER_MAX_TRACE
 9148
 9149	if (!tr->allocated_snapshot)
 9150		goto out_max;
 9151
 9152	ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
 9153	if (ret) {
 9154		/* Put back the old order */
 9155		cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
 9156		if (WARN_ON_ONCE(cnt)) {
 9157			/*
 9158			 * AARGH! We are left with different orders!
 9159			 * The max buffer is our "snapshot" buffer.
 9160			 * When a tracer needs a snapshot (one of the
 9161			 * latency tracers), it swaps the max buffer
 9162			 * with the saved snap shot. We succeeded to
 9163			 * update the order of the main buffer, but failed to
 9164			 * update the order of the max buffer. But when we tried
 9165			 * to reset the main buffer to the original size, we
 9166			 * failed there too. This is very unlikely to
 9167			 * happen, but if it does, warn and kill all
 9168			 * tracing.
 9169			 */
 9170			tracing_disabled = 1;
 9171		}
 9172		goto out;
 9173	}
 9174 out_max:
 9175#endif
 9176	(*ppos)++;
 9177 out:
 9178	if (ret)
 9179		cnt = ret;
 9180	tracing_start_tr(tr);
 9181	return cnt;
 9182}
 9183
 9184static const struct file_operations buffer_subbuf_size_fops = {
 9185	.open		= tracing_open_generic_tr,
 9186	.read		= buffer_subbuf_size_read,
 9187	.write		= buffer_subbuf_size_write,
 9188	.release	= tracing_release_generic_tr,
 9189	.llseek		= default_llseek,
 9190};
 9191
 9192static struct dentry *trace_instance_dir;
 9193
 9194static void
 9195init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 9196
 9197static int
 9198allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
 9199{
 9200	enum ring_buffer_flags rb_flags;
 9201
 9202	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
 9203
 9204	buf->tr = tr;
 9205
 9206	if (tr->range_addr_start && tr->range_addr_size) {
 9207		buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
 9208						      tr->range_addr_start,
 9209						      tr->range_addr_size);
 9210
 9211		ring_buffer_last_boot_delta(buf->buffer,
 9212					    &tr->text_delta, &tr->data_delta);
 9213		/*
 9214		 * This is basically the same as a mapped buffer,
 9215		 * with the same restrictions.
 9216		 */
 9217		tr->mapped++;
 9218	} else {
 9219		buf->buffer = ring_buffer_alloc(size, rb_flags);
 9220	}
 9221	if (!buf->buffer)
 9222		return -ENOMEM;
 9223
 9224	buf->data = alloc_percpu(struct trace_array_cpu);
 9225	if (!buf->data) {
 9226		ring_buffer_free(buf->buffer);
 9227		buf->buffer = NULL;
 9228		return -ENOMEM;
 9229	}
 9230
 9231	/* Allocate the first page for all buffers */
 9232	set_buffer_entries(&tr->array_buffer,
 9233			   ring_buffer_size(tr->array_buffer.buffer, 0));
 9234
 9235	return 0;
 9236}
 9237
 9238static void free_trace_buffer(struct array_buffer *buf)
 9239{
 9240	if (buf->buffer) {
 9241		ring_buffer_free(buf->buffer);
 9242		buf->buffer = NULL;
 9243		free_percpu(buf->data);
 9244		buf->data = NULL;
 9245	}
 9246}
 9247
 9248static int allocate_trace_buffers(struct trace_array *tr, int size)
 9249{
 9250	int ret;
 9251
 9252	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
 9253	if (ret)
 9254		return ret;
 9255
 9256#ifdef CONFIG_TRACER_MAX_TRACE
 9257	/* Fix mapped buffer trace arrays do not have snapshot buffers */
 9258	if (tr->range_addr_start)
 9259		return 0;
 9260
 9261	ret = allocate_trace_buffer(tr, &tr->max_buffer,
 9262				    allocate_snapshot ? size : 1);
 9263	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
 9264		free_trace_buffer(&tr->array_buffer);
 9265		return -ENOMEM;
 9266	}
 9267	tr->allocated_snapshot = allocate_snapshot;
 9268
 9269	allocate_snapshot = false;
 9270#endif
 9271
 9272	return 0;
 9273}
 9274
 9275static void free_trace_buffers(struct trace_array *tr)
 9276{
 9277	if (!tr)
 9278		return;
 9279
 9280	free_trace_buffer(&tr->array_buffer);
 9281
 9282#ifdef CONFIG_TRACER_MAX_TRACE
 9283	free_trace_buffer(&tr->max_buffer);
 9284#endif
 9285}
 9286
 9287static void init_trace_flags_index(struct trace_array *tr)
 9288{
 9289	int i;
 9290
 9291	/* Used by the trace options files */
 9292	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
 9293		tr->trace_flags_index[i] = i;
 9294}
 9295
 9296static void __update_tracer_options(struct trace_array *tr)
 9297{
 9298	struct tracer *t;
 9299
 9300	for (t = trace_types; t; t = t->next)
 9301		add_tracer_options(tr, t);
 9302}
 9303
 9304static void update_tracer_options(struct trace_array *tr)
 9305{
 9306	mutex_lock(&trace_types_lock);
 9307	tracer_options_updated = true;
 9308	__update_tracer_options(tr);
 9309	mutex_unlock(&trace_types_lock);
 9310}
 9311
 9312/* Must have trace_types_lock held */
 9313struct trace_array *trace_array_find(const char *instance)
 9314{
 9315	struct trace_array *tr, *found = NULL;
 9316
 9317	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9318		if (tr->name && strcmp(tr->name, instance) == 0) {
 9319			found = tr;
 9320			break;
 9321		}
 9322	}
 9323
 9324	return found;
 9325}
 9326
 9327struct trace_array *trace_array_find_get(const char *instance)
 9328{
 9329	struct trace_array *tr;
 9330
 9331	mutex_lock(&trace_types_lock);
 9332	tr = trace_array_find(instance);
 9333	if (tr)
 9334		tr->ref++;
 9335	mutex_unlock(&trace_types_lock);
 9336
 9337	return tr;
 9338}
 9339
 9340static int trace_array_create_dir(struct trace_array *tr)
 9341{
 9342	int ret;
 9343
 9344	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
 9345	if (!tr->dir)
 9346		return -EINVAL;
 9347
 9348	ret = event_trace_add_tracer(tr->dir, tr);
 9349	if (ret) {
 9350		tracefs_remove(tr->dir);
 9351		return ret;
 9352	}
 9353
 9354	init_tracer_tracefs(tr, tr->dir);
 9355	__update_tracer_options(tr);
 9356
 9357	return ret;
 9358}
 9359
 9360static struct trace_array *
 9361trace_array_create_systems(const char *name, const char *systems,
 9362			   unsigned long range_addr_start,
 9363			   unsigned long range_addr_size)
 9364{
 9365	struct trace_array *tr;
 9366	int ret;
 9367
 9368	ret = -ENOMEM;
 9369	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
 9370	if (!tr)
 9371		return ERR_PTR(ret);
 9372
 9373	tr->name = kstrdup(name, GFP_KERNEL);
 9374	if (!tr->name)
 9375		goto out_free_tr;
 9376
 9377	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
 9378		goto out_free_tr;
 9379
 9380	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
 9381		goto out_free_tr;
 9382
 9383	if (systems) {
 9384		tr->system_names = kstrdup_const(systems, GFP_KERNEL);
 9385		if (!tr->system_names)
 9386			goto out_free_tr;
 9387	}
 9388
 9389	/* Only for boot up memory mapped ring buffers */
 9390	tr->range_addr_start = range_addr_start;
 9391	tr->range_addr_size = range_addr_size;
 9392
 9393	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
 
 9394
 9395	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
 
 9396
 9397	raw_spin_lock_init(&tr->start_lock);
 
 9398
 9399	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 9400#ifdef CONFIG_TRACER_MAX_TRACE
 9401	spin_lock_init(&tr->snapshot_trigger_lock);
 
 9402#endif
 9403	tr->current_trace = &nop_trace;
 9404
 9405	INIT_LIST_HEAD(&tr->systems);
 9406	INIT_LIST_HEAD(&tr->events);
 9407	INIT_LIST_HEAD(&tr->hist_vars);
 9408	INIT_LIST_HEAD(&tr->err_log);
 9409
 9410	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
 9411		goto out_free_tr;
 9412
 9413	/* The ring buffer is defaultly expanded */
 9414	trace_set_ring_buffer_expanded(tr);
 9415
 9416	if (ftrace_allocate_ftrace_ops(tr) < 0)
 9417		goto out_free_tr;
 9418
 9419	ftrace_init_trace_array(tr);
 9420
 9421	init_trace_flags_index(tr);
 
 9422
 9423	if (trace_instance_dir) {
 9424		ret = trace_array_create_dir(tr);
 9425		if (ret)
 9426			goto out_free_tr;
 9427	} else
 9428		__trace_early_add_events(tr);
 9429
 9430	list_add(&tr->list, &ftrace_trace_arrays);
 9431
 9432	tr->ref++;
 9433
 9434	return tr;
 9435
 9436 out_free_tr:
 9437	ftrace_free_ftrace_ops(tr);
 9438	free_trace_buffers(tr);
 9439	free_cpumask_var(tr->pipe_cpumask);
 9440	free_cpumask_var(tr->tracing_cpumask);
 9441	kfree_const(tr->system_names);
 9442	kfree(tr->name);
 9443	kfree(tr);
 9444
 9445	return ERR_PTR(ret);
 9446}
 9447
 9448static struct trace_array *trace_array_create(const char *name)
 9449{
 9450	return trace_array_create_systems(name, NULL, 0, 0);
 9451}
 9452
 9453static int instance_mkdir(const char *name)
 9454{
 9455	struct trace_array *tr;
 9456	int ret;
 9457
 9458	guard(mutex)(&event_mutex);
 9459	guard(mutex)(&trace_types_lock);
 9460
 9461	ret = -EEXIST;
 9462	if (trace_array_find(name))
 9463		return -EEXIST;
 9464
 9465	tr = trace_array_create(name);
 9466
 9467	ret = PTR_ERR_OR_ZERO(tr);
 9468
 9469	return ret;
 9470}
 9471
 9472static u64 map_pages(u64 start, u64 size)
 9473{
 9474	struct page **pages;
 9475	phys_addr_t page_start;
 9476	unsigned int page_count;
 9477	unsigned int i;
 9478	void *vaddr;
 9479
 9480	page_count = DIV_ROUND_UP(size, PAGE_SIZE);
 9481
 9482	page_start = start;
 9483	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
 9484	if (!pages)
 9485		return 0;
 9486
 9487	for (i = 0; i < page_count; i++) {
 9488		phys_addr_t addr = page_start + i * PAGE_SIZE;
 9489		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
 9490	}
 9491	vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL);
 9492	kfree(pages);
 9493
 9494	return (u64)(unsigned long)vaddr;
 9495}
 9496
 9497/**
 9498 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
 9499 * @name: The name of the trace array to be looked up/created.
 9500 * @systems: A list of systems to create event directories for (NULL for all)
 9501 *
 9502 * Returns pointer to trace array with given name.
 9503 * NULL, if it cannot be created.
 9504 *
 9505 * NOTE: This function increments the reference counter associated with the
 9506 * trace array returned. This makes sure it cannot be freed while in use.
 9507 * Use trace_array_put() once the trace array is no longer needed.
 9508 * If the trace_array is to be freed, trace_array_destroy() needs to
 9509 * be called after the trace_array_put(), or simply let user space delete
 9510 * it from the tracefs instances directory. But until the
 9511 * trace_array_put() is called, user space can not delete it.
 9512 *
 9513 */
 9514struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
 9515{
 9516	struct trace_array *tr;
 9517
 9518	guard(mutex)(&event_mutex);
 9519	guard(mutex)(&trace_types_lock);
 9520
 9521	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9522		if (tr->name && strcmp(tr->name, name) == 0) {
 9523			tr->ref++;
 9524			return tr;
 9525		}
 9526	}
 9527
 9528	tr = trace_array_create_systems(name, systems, 0, 0);
 9529
 9530	if (IS_ERR(tr))
 9531		tr = NULL;
 9532	else
 9533		tr->ref++;
 9534
 9535	return tr;
 9536}
 9537EXPORT_SYMBOL_GPL(trace_array_get_by_name);
 9538
 9539static int __remove_instance(struct trace_array *tr)
 9540{
 9541	int i;
 9542
 9543	/* Reference counter for a newly created trace array = 1. */
 9544	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
 9545		return -EBUSY;
 9546
 9547	list_del(&tr->list);
 9548
 9549	/* Disable all the flags that were enabled coming in */
 9550	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
 9551		if ((1 << i) & ZEROED_TRACE_FLAGS)
 9552			set_tracer_flag(tr, 1 << i, 0);
 9553	}
 9554
 9555	if (printk_trace == tr)
 9556		update_printk_trace(&global_trace);
 9557
 9558	tracing_set_nop(tr);
 9559	clear_ftrace_function_probes(tr);
 9560	event_trace_del_tracer(tr);
 9561	ftrace_clear_pids(tr);
 9562	ftrace_destroy_function_files(tr);
 9563	tracefs_remove(tr->dir);
 9564	free_percpu(tr->last_func_repeats);
 9565	free_trace_buffers(tr);
 9566	clear_tracing_err_log(tr);
 9567
 9568	for (i = 0; i < tr->nr_topts; i++) {
 9569		kfree(tr->topts[i].topts);
 9570	}
 9571	kfree(tr->topts);
 9572
 9573	free_cpumask_var(tr->pipe_cpumask);
 9574	free_cpumask_var(tr->tracing_cpumask);
 9575	kfree_const(tr->system_names);
 9576	kfree(tr->name);
 9577	kfree(tr);
 9578
 9579	return 0;
 9580}
 9581
 9582int trace_array_destroy(struct trace_array *this_tr)
 9583{
 9584	struct trace_array *tr;
 9585
 9586	if (!this_tr)
 9587		return -EINVAL;
 9588
 9589	guard(mutex)(&event_mutex);
 9590	guard(mutex)(&trace_types_lock);
 9591
 9592
 9593	/* Making sure trace array exists before destroying it. */
 9594	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9595		if (tr == this_tr)
 9596			return __remove_instance(tr);
 9597	}
 9598
 9599	return -ENODEV;
 9600}
 9601EXPORT_SYMBOL_GPL(trace_array_destroy);
 9602
 9603static int instance_rmdir(const char *name)
 9604{
 9605	struct trace_array *tr;
 9606
 9607	guard(mutex)(&event_mutex);
 9608	guard(mutex)(&trace_types_lock);
 9609
 9610	tr = trace_array_find(name);
 9611	if (!tr)
 9612		return -ENODEV;
 9613
 9614	return __remove_instance(tr);
 9615}
 9616
 9617static __init void create_trace_instances(struct dentry *d_tracer)
 9618{
 9619	struct trace_array *tr;
 9620
 9621	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
 9622							 instance_mkdir,
 9623							 instance_rmdir);
 9624	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
 9625		return;
 9626
 9627	guard(mutex)(&event_mutex);
 9628	guard(mutex)(&trace_types_lock);
 9629
 9630	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9631		if (!tr->name)
 9632			continue;
 9633		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
 9634			     "Failed to create instance directory\n"))
 9635			return;
 9636	}
 9637}
 9638
 9639static void
 9640init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 9641{
 9642	int cpu;
 9643
 9644	trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
 9645			tr, &show_traces_fops);
 9646
 9647	trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
 9648			tr, &set_tracer_fops);
 9649
 9650	trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
 9651			  tr, &tracing_cpumask_fops);
 9652
 9653	trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
 9654			  tr, &tracing_iter_fops);
 9655
 9656	trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
 9657			  tr, &tracing_fops);
 9658
 9659	trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
 9660			  tr, &tracing_pipe_fops);
 9661
 9662	trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
 9663			  tr, &tracing_entries_fops);
 9664
 9665	trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
 9666			  tr, &tracing_total_entries_fops);
 9667
 9668	trace_create_file("free_buffer", 0200, d_tracer,
 9669			  tr, &tracing_free_buffer_fops);
 9670
 9671	trace_create_file("trace_marker", 0220, d_tracer,
 9672			  tr, &tracing_mark_fops);
 9673
 9674	tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
 9675
 9676	trace_create_file("trace_marker_raw", 0220, d_tracer,
 9677			  tr, &tracing_mark_raw_fops);
 9678
 9679	trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
 9680			  &trace_clock_fops);
 9681
 9682	trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
 9683			  tr, &rb_simple_fops);
 9684
 9685	trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
 9686			  &trace_time_stamp_mode_fops);
 9687
 9688	tr->buffer_percent = 50;
 9689
 9690	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
 9691			tr, &buffer_percent_fops);
 9692
 9693	trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
 9694			  tr, &buffer_subbuf_size_fops);
 9695
 9696	create_trace_options_dir(tr);
 9697
 9698#ifdef CONFIG_TRACER_MAX_TRACE
 9699	trace_create_maxlat_file(tr, d_tracer);
 9700#endif
 9701
 9702	if (ftrace_create_function_files(tr, d_tracer))
 9703		MEM_FAIL(1, "Could not allocate function filter files");
 9704
 9705	if (tr->range_addr_start) {
 9706		trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer,
 9707				  tr, &last_boot_fops);
 9708#ifdef CONFIG_TRACER_SNAPSHOT
 9709	} else {
 9710		trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
 9711				  tr, &snapshot_fops);
 9712#endif
 9713	}
 9714
 9715	trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
 9716			  tr, &tracing_err_log_fops);
 9717
 9718	for_each_tracing_cpu(cpu)
 9719		tracing_init_tracefs_percpu(tr, cpu);
 9720
 9721	ftrace_init_tracefs(tr, d_tracer);
 9722}
 9723
 9724static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
 9725{
 9726	struct vfsmount *mnt;
 9727	struct file_system_type *type;
 9728
 9729	/*
 9730	 * To maintain backward compatibility for tools that mount
 9731	 * debugfs to get to the tracing facility, tracefs is automatically
 9732	 * mounted to the debugfs/tracing directory.
 9733	 */
 9734	type = get_fs_type("tracefs");
 9735	if (!type)
 9736		return NULL;
 9737	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
 9738	put_filesystem(type);
 9739	if (IS_ERR(mnt))
 9740		return NULL;
 9741	mntget(mnt);
 9742
 9743	return mnt;
 9744}
 9745
 9746/**
 9747 * tracing_init_dentry - initialize top level trace array
 9748 *
 9749 * This is called when creating files or directories in the tracing
 9750 * directory. It is called via fs_initcall() by any of the boot up code
 9751 * and expects to return the dentry of the top level tracing directory.
 9752 */
 9753int tracing_init_dentry(void)
 9754{
 9755	struct trace_array *tr = &global_trace;
 9756
 9757	if (security_locked_down(LOCKDOWN_TRACEFS)) {
 9758		pr_warn("Tracing disabled due to lockdown\n");
 9759		return -EPERM;
 9760	}
 9761
 9762	/* The top level trace array uses  NULL as parent */
 9763	if (tr->dir)
 9764		return 0;
 9765
 9766	if (WARN_ON(!tracefs_initialized()))
 9767		return -ENODEV;
 9768
 9769	/*
 9770	 * As there may still be users that expect the tracing
 9771	 * files to exist in debugfs/tracing, we must automount
 9772	 * the tracefs file system there, so older tools still
 9773	 * work with the newer kernel.
 9774	 */
 9775	tr->dir = debugfs_create_automount("tracing", NULL,
 9776					   trace_automount, NULL);
 9777
 9778	return 0;
 9779}
 9780
 9781extern struct trace_eval_map *__start_ftrace_eval_maps[];
 9782extern struct trace_eval_map *__stop_ftrace_eval_maps[];
 9783
 9784static struct workqueue_struct *eval_map_wq __initdata;
 9785static struct work_struct eval_map_work __initdata;
 9786static struct work_struct tracerfs_init_work __initdata;
 9787
 9788static void __init eval_map_work_func(struct work_struct *work)
 9789{
 9790	int len;
 9791
 9792	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
 9793	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
 9794}
 9795
 9796static int __init trace_eval_init(void)
 9797{
 9798	INIT_WORK(&eval_map_work, eval_map_work_func);
 9799
 9800	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
 9801	if (!eval_map_wq) {
 9802		pr_err("Unable to allocate eval_map_wq\n");
 9803		/* Do work here */
 9804		eval_map_work_func(&eval_map_work);
 9805		return -ENOMEM;
 9806	}
 9807
 9808	queue_work(eval_map_wq, &eval_map_work);
 9809	return 0;
 9810}
 9811
 9812subsys_initcall(trace_eval_init);
 9813
 9814static int __init trace_eval_sync(void)
 9815{
 9816	/* Make sure the eval map updates are finished */
 9817	if (eval_map_wq)
 9818		destroy_workqueue(eval_map_wq);
 9819	return 0;
 9820}
 9821
 9822late_initcall_sync(trace_eval_sync);
 9823
 9824
 9825#ifdef CONFIG_MODULES
 9826static void trace_module_add_evals(struct module *mod)
 9827{
 9828	if (!mod->num_trace_evals)
 9829		return;
 9830
 9831	/*
 9832	 * Modules with bad taint do not have events created, do
 9833	 * not bother with enums either.
 9834	 */
 9835	if (trace_module_has_bad_taint(mod))
 9836		return;
 9837
 9838	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
 9839}
 9840
 9841#ifdef CONFIG_TRACE_EVAL_MAP_FILE
 9842static void trace_module_remove_evals(struct module *mod)
 9843{
 9844	union trace_eval_map_item *map;
 9845	union trace_eval_map_item **last = &trace_eval_maps;
 9846
 9847	if (!mod->num_trace_evals)
 9848		return;
 9849
 9850	guard(mutex)(&trace_eval_mutex);
 9851
 9852	map = trace_eval_maps;
 9853
 9854	while (map) {
 9855		if (map->head.mod == mod)
 9856			break;
 9857		map = trace_eval_jmp_to_tail(map);
 9858		last = &map->tail.next;
 9859		map = map->tail.next;
 9860	}
 9861	if (!map)
 9862		return;
 9863
 9864	*last = trace_eval_jmp_to_tail(map)->tail.next;
 9865	kfree(map);
 9866}
 9867#else
 9868static inline void trace_module_remove_evals(struct module *mod) { }
 9869#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
 9870
 9871static int trace_module_notify(struct notifier_block *self,
 9872			       unsigned long val, void *data)
 
 9873{
 9874	struct module *mod = data;
 9875
 9876	switch (val) {
 9877	case MODULE_STATE_COMING:
 9878		trace_module_add_evals(mod);
 
 9879		break;
 9880	case MODULE_STATE_GOING:
 9881		trace_module_remove_evals(mod);
 9882		break;
 9883	}
 9884
 9885	return NOTIFY_OK;
 9886}
 9887
 9888static struct notifier_block trace_module_nb = {
 9889	.notifier_call = trace_module_notify,
 9890	.priority = 0,
 9891};
 9892#endif /* CONFIG_MODULES */
 9893
 9894static __init void tracer_init_tracefs_work_func(struct work_struct *work)
 9895{
 9896
 9897	event_trace_init();
 9898
 9899	init_tracer_tracefs(&global_trace, NULL);
 9900	ftrace_init_tracefs_toplevel(&global_trace, NULL);
 9901
 9902	trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
 9903			&global_trace, &tracing_thresh_fops);
 9904
 9905	trace_create_file("README", TRACE_MODE_READ, NULL,
 9906			NULL, &tracing_readme_fops);
 9907
 9908	trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
 9909			NULL, &tracing_saved_cmdlines_fops);
 9910
 9911	trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
 9912			  NULL, &tracing_saved_cmdlines_size_fops);
 9913
 9914	trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
 9915			NULL, &tracing_saved_tgids_fops);
 9916
 9917	trace_create_eval_file(NULL);
 9918
 9919#ifdef CONFIG_MODULES
 9920	register_module_notifier(&trace_module_nb);
 9921#endif
 9922
 9923#ifdef CONFIG_DYNAMIC_FTRACE
 9924	trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
 9925			NULL, &tracing_dyn_info_fops);
 9926#endif
 9927
 9928	create_trace_instances(NULL);
 9929
 9930	update_tracer_options(&global_trace);
 9931}
 9932
 9933static __init int tracer_init_tracefs(void)
 9934{
 9935	int ret;
 9936
 9937	trace_access_lock_init();
 9938
 9939	ret = tracing_init_dentry();
 9940	if (ret)
 9941		return 0;
 9942
 9943	if (eval_map_wq) {
 9944		INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
 9945		queue_work(eval_map_wq, &tracerfs_init_work);
 9946	} else {
 9947		tracer_init_tracefs_work_func(NULL);
 9948	}
 9949
 9950	rv_init_interface();
 9951
 9952	return 0;
 9953}
 9954
 9955fs_initcall(tracer_init_tracefs);
 9956
 9957static int trace_die_panic_handler(struct notifier_block *self,
 9958				unsigned long ev, void *unused);
 9959
 9960static struct notifier_block trace_panic_notifier = {
 9961	.notifier_call = trace_die_panic_handler,
 9962	.priority = INT_MAX - 1,
 9963};
 9964
 9965static struct notifier_block trace_die_notifier = {
 9966	.notifier_call = trace_die_panic_handler,
 9967	.priority = INT_MAX - 1,
 9968};
 9969
 9970/*
 9971 * The idea is to execute the following die/panic callback early, in order
 9972 * to avoid showing irrelevant information in the trace (like other panic
 9973 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
 9974 * warnings get disabled (to prevent potential log flooding).
 9975 */
 9976static int trace_die_panic_handler(struct notifier_block *self,
 9977				unsigned long ev, void *unused)
 9978{
 9979	if (!ftrace_dump_on_oops_enabled())
 9980		return NOTIFY_DONE;
 9981
 9982	/* The die notifier requires DIE_OOPS to trigger */
 9983	if (self == &trace_die_notifier && ev != DIE_OOPS)
 9984		return NOTIFY_DONE;
 9985
 9986	ftrace_dump(DUMP_PARAM);
 9987
 9988	return NOTIFY_DONE;
 9989}
 9990
 9991/*
 9992 * printk is set to max of 1024, we really don't need it that big.
 9993 * Nothing should be printing 1000 characters anyway.
 9994 */
 9995#define TRACE_MAX_PRINT		1000
 9996
 9997/*
 9998 * Define here KERN_TRACE so that we have one place to modify
 9999 * it if we decide to change what log level the ftrace dump
10000 * should be at.
10001 */
10002#define KERN_TRACE		KERN_EMERG
10003
10004void
10005trace_printk_seq(struct trace_seq *s)
10006{
10007	/* Probably should print a warning here. */
10008	if (s->seq.len >= TRACE_MAX_PRINT)
10009		s->seq.len = TRACE_MAX_PRINT;
10010
10011	/*
10012	 * More paranoid code. Although the buffer size is set to
10013	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10014	 * an extra layer of protection.
10015	 */
10016	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10017		s->seq.len = s->seq.size - 1;
10018
10019	/* should be zero ended, but we are paranoid. */
10020	s->buffer[s->seq.len] = 0;
10021
10022	printk(KERN_TRACE "%s", s->buffer);
10023
10024	trace_seq_init(s);
10025}
10026
10027static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr)
10028{
10029	iter->tr = tr;
10030	iter->trace = iter->tr->current_trace;
10031	iter->cpu_file = RING_BUFFER_ALL_CPUS;
10032	iter->array_buffer = &tr->array_buffer;
10033
10034	if (iter->trace && iter->trace->open)
10035		iter->trace->open(iter);
10036
10037	/* Annotate start of buffers if we had overruns */
10038	if (ring_buffer_overruns(iter->array_buffer->buffer))
10039		iter->iter_flags |= TRACE_FILE_ANNOTATE;
10040
10041	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
10042	if (trace_clocks[iter->tr->clock_id].in_ns)
10043		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10044
10045	/* Can not use kmalloc for iter.temp and iter.fmt */
10046	iter->temp = static_temp_buf;
10047	iter->temp_size = STATIC_TEMP_BUF_SIZE;
10048	iter->fmt = static_fmt_buf;
10049	iter->fmt_size = STATIC_FMT_BUF_SIZE;
10050}
10051
10052void trace_init_global_iter(struct trace_iterator *iter)
10053{
10054	trace_init_iter(iter, &global_trace);
 
 
10055}
10056
10057static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode)
 
10058{
 
 
10059	/* use static because iter can be a bit big for the stack */
10060	static struct trace_iterator iter;
10061	unsigned int old_userobj;
 
10062	unsigned long flags;
10063	int cnt = 0, cpu;
10064
10065	/*
10066	 * Always turn off tracing when we dump.
10067	 * We don't need to show trace output of what happens
10068	 * between multiple crashes.
10069	 *
10070	 * If the user does a sysrq-z, then they can re-enable
10071	 * tracing with echo 1 > tracing_on.
10072	 */
10073	tracer_tracing_off(tr);
10074
10075	local_irq_save(flags);
 
 
 
10076
10077	/* Simulate the iterator */
10078	trace_init_iter(&iter, tr);
 
 
 
 
 
 
 
 
 
 
 
 
10079
10080	for_each_tracing_cpu(cpu) {
10081		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10082	}
10083
10084	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10085
10086	/* don't look at user memory in panic mode */
10087	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10088
10089	if (dump_mode == DUMP_ORIG)
10090		iter.cpu_file = raw_smp_processor_id();
10091	else
10092		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10093
10094	if (tr == &global_trace)
10095		printk(KERN_TRACE "Dumping ftrace buffer:\n");
10096	else
10097		printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name);
10098
10099	/* Did function tracer already get disabled? */
10100	if (ftrace_is_dead()) {
10101		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10102		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
 
 
 
 
 
 
 
 
10103	}
10104
 
 
10105	/*
10106	 * We need to stop all tracing on all CPUS to read
10107	 * the next buffer. This is a bit expensive, but is
10108	 * not done often. We fill all what we can read,
10109	 * and then release the locks again.
10110	 */
10111
10112	while (!trace_empty(&iter)) {
10113
10114		if (!cnt)
10115			printk(KERN_TRACE "---------------------------------\n");
10116
10117		cnt++;
10118
10119		trace_iterator_reset(&iter);
 
 
 
10120		iter.iter_flags |= TRACE_FILE_LAT_FMT;
 
10121
10122		if (trace_find_next_entry_inc(&iter) != NULL) {
10123			int ret;
10124
10125			ret = print_trace_line(&iter);
10126			if (ret != TRACE_TYPE_NO_CONSUME)
10127				trace_consume(&iter);
10128		}
10129		touch_nmi_watchdog();
10130
10131		trace_printk_seq(&iter.seq);
10132	}
10133
10134	if (!cnt)
10135		printk(KERN_TRACE "   (ftrace buffer empty)\n");
10136	else
10137		printk(KERN_TRACE "---------------------------------\n");
10138
10139	tr->trace_flags |= old_userobj;
10140
10141	for_each_tracing_cpu(cpu) {
10142		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10143	}
10144	local_irq_restore(flags);
10145}
10146
10147static void ftrace_dump_by_param(void)
10148{
10149	bool first_param = true;
10150	char dump_param[MAX_TRACER_SIZE];
10151	char *buf, *token, *inst_name;
10152	struct trace_array *tr;
10153
10154	strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE);
10155	buf = dump_param;
10156
10157	while ((token = strsep(&buf, ",")) != NULL) {
10158		if (first_param) {
10159			first_param = false;
10160			if (!strcmp("0", token))
10161				continue;
10162			else if (!strcmp("1", token)) {
10163				ftrace_dump_one(&global_trace, DUMP_ALL);
10164				continue;
10165			}
10166			else if (!strcmp("2", token) ||
10167			  !strcmp("orig_cpu", token)) {
10168				ftrace_dump_one(&global_trace, DUMP_ORIG);
10169				continue;
10170			}
10171		}
10172
10173		inst_name = strsep(&token, "=");
10174		tr = trace_array_find(inst_name);
10175		if (!tr) {
10176			printk(KERN_TRACE "Instance %s not found\n", inst_name);
10177			continue;
10178		}
10179
10180		if (token && (!strcmp("2", token) ||
10181			  !strcmp("orig_cpu", token)))
10182			ftrace_dump_one(tr, DUMP_ORIG);
10183		else
10184			ftrace_dump_one(tr, DUMP_ALL);
10185	}
 
 
 
 
10186}
10187
 
10188void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10189{
10190	static atomic_t dump_running;
10191
10192	/* Only allow one dump user at a time. */
10193	if (atomic_inc_return(&dump_running) != 1) {
10194		atomic_dec(&dump_running);
10195		return;
10196	}
10197
10198	switch (oops_dump_mode) {
10199	case DUMP_ALL:
10200		ftrace_dump_one(&global_trace, DUMP_ALL);
10201		break;
10202	case DUMP_ORIG:
10203		ftrace_dump_one(&global_trace, DUMP_ORIG);
10204		break;
10205	case DUMP_PARAM:
10206		ftrace_dump_by_param();
10207		break;
10208	case DUMP_NONE:
10209		break;
10210	default:
10211		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10212		ftrace_dump_one(&global_trace, DUMP_ALL);
10213	}
10214
10215	atomic_dec(&dump_running);
10216}
10217EXPORT_SYMBOL_GPL(ftrace_dump);
10218
10219#define WRITE_BUFSIZE  4096
10220
10221ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10222				size_t count, loff_t *ppos,
10223				int (*createfn)(const char *))
10224{
10225	char *kbuf, *buf, *tmp;
10226	int ret = 0;
10227	size_t done = 0;
10228	size_t size;
10229
10230	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10231	if (!kbuf)
10232		return -ENOMEM;
10233
10234	while (done < count) {
10235		size = count - done;
10236
10237		if (size >= WRITE_BUFSIZE)
10238			size = WRITE_BUFSIZE - 1;
10239
10240		if (copy_from_user(kbuf, buffer + done, size)) {
10241			ret = -EFAULT;
10242			goto out;
10243		}
10244		kbuf[size] = '\0';
10245		buf = kbuf;
10246		do {
10247			tmp = strchr(buf, '\n');
10248			if (tmp) {
10249				*tmp = '\0';
10250				size = tmp - buf + 1;
10251			} else {
10252				size = strlen(buf);
10253				if (done + size < count) {
10254					if (buf != kbuf)
10255						break;
10256					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10257					pr_warn("Line length is too long: Should be less than %d\n",
10258						WRITE_BUFSIZE - 2);
10259					ret = -EINVAL;
10260					goto out;
10261				}
10262			}
10263			done += size;
10264
10265			/* Remove comments */
10266			tmp = strchr(buf, '#');
10267
10268			if (tmp)
10269				*tmp = '\0';
10270
10271			ret = createfn(buf);
10272			if (ret)
10273				goto out;
10274			buf += size;
10275
10276		} while (done < count);
10277	}
10278	ret = done;
10279
10280out:
10281	kfree(kbuf);
10282
10283	return ret;
10284}
10285
10286#ifdef CONFIG_TRACER_MAX_TRACE
10287__init static bool tr_needs_alloc_snapshot(const char *name)
10288{
10289	char *test;
10290	int len = strlen(name);
10291	bool ret;
10292
10293	if (!boot_snapshot_index)
10294		return false;
10295
10296	if (strncmp(name, boot_snapshot_info, len) == 0 &&
10297	    boot_snapshot_info[len] == '\t')
10298		return true;
10299
10300	test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10301	if (!test)
10302		return false;
10303
10304	sprintf(test, "\t%s\t", name);
10305	ret = strstr(boot_snapshot_info, test) == NULL;
10306	kfree(test);
10307	return ret;
10308}
10309
10310__init static void do_allocate_snapshot(const char *name)
10311{
10312	if (!tr_needs_alloc_snapshot(name))
10313		return;
10314
10315	/*
10316	 * When allocate_snapshot is set, the next call to
10317	 * allocate_trace_buffers() (called by trace_array_get_by_name())
10318	 * will allocate the snapshot buffer. That will alse clear
10319	 * this flag.
10320	 */
10321	allocate_snapshot = true;
10322}
10323#else
10324static inline void do_allocate_snapshot(const char *name) { }
10325#endif
10326
10327__init static void enable_instances(void)
10328{
10329	struct trace_array *tr;
10330	char *curr_str;
10331	char *name;
10332	char *str;
10333	char *tok;
10334
10335	/* A tab is always appended */
10336	boot_instance_info[boot_instance_index - 1] = '\0';
10337	str = boot_instance_info;
10338
10339	while ((curr_str = strsep(&str, "\t"))) {
10340		phys_addr_t start = 0;
10341		phys_addr_t size = 0;
10342		unsigned long addr = 0;
10343		bool traceprintk = false;
10344		bool traceoff = false;
10345		char *flag_delim;
10346		char *addr_delim;
10347
10348		tok = strsep(&curr_str, ",");
10349
10350		flag_delim = strchr(tok, '^');
10351		addr_delim = strchr(tok, '@');
10352
10353		if (addr_delim)
10354			*addr_delim++ = '\0';
10355
10356		if (flag_delim)
10357			*flag_delim++ = '\0';
10358
10359		name = tok;
10360
10361		if (flag_delim) {
10362			char *flag;
10363
10364			while ((flag = strsep(&flag_delim, "^"))) {
10365				if (strcmp(flag, "traceoff") == 0) {
10366					traceoff = true;
10367				} else if ((strcmp(flag, "printk") == 0) ||
10368					   (strcmp(flag, "traceprintk") == 0) ||
10369					   (strcmp(flag, "trace_printk") == 0)) {
10370					traceprintk = true;
10371				} else {
10372					pr_info("Tracing: Invalid instance flag '%s' for %s\n",
10373						flag, name);
10374				}
10375			}
10376		}
10377
10378		tok = addr_delim;
10379		if (tok && isdigit(*tok)) {
10380			start = memparse(tok, &tok);
10381			if (!start) {
10382				pr_warn("Tracing: Invalid boot instance address for %s\n",
10383					name);
10384				continue;
10385			}
10386			if (*tok != ':') {
10387				pr_warn("Tracing: No size specified for instance %s\n", name);
10388				continue;
10389			}
10390			tok++;
10391			size = memparse(tok, &tok);
10392			if (!size) {
10393				pr_warn("Tracing: Invalid boot instance size for %s\n",
10394					name);
10395				continue;
10396			}
10397		} else if (tok) {
10398			if (!reserve_mem_find_by_name(tok, &start, &size)) {
10399				start = 0;
10400				pr_warn("Failed to map boot instance %s to %s\n", name, tok);
10401				continue;
10402			}
10403		}
10404
10405		if (start) {
10406			addr = map_pages(start, size);
10407			if (addr) {
10408				pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n",
10409					name, &start, (unsigned long)size);
10410			} else {
10411				pr_warn("Tracing: Failed to map boot instance %s\n", name);
10412				continue;
10413			}
10414		} else {
10415			/* Only non mapped buffers have snapshot buffers */
10416			if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10417				do_allocate_snapshot(name);
10418		}
10419
10420		tr = trace_array_create_systems(name, NULL, addr, size);
10421		if (IS_ERR(tr)) {
10422			pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str);
10423			continue;
10424		}
10425
10426		if (traceoff)
10427			tracer_tracing_off(tr);
10428
10429		if (traceprintk)
10430			update_printk_trace(tr);
10431
10432		/*
10433		 * If start is set, then this is a mapped buffer, and
10434		 * cannot be deleted by user space, so keep the reference
10435		 * to it.
10436		 */
10437		if (start) {
10438			tr->flags |= TRACE_ARRAY_FL_BOOT;
10439			tr->ref++;
10440		}
10441
10442		while ((tok = strsep(&curr_str, ","))) {
10443			early_enable_events(tr, tok, true);
10444		}
10445	}
10446}
10447
10448__init static int tracer_alloc_buffers(void)
10449{
10450	int ring_buf_size;
 
 
10451	int ret = -ENOMEM;
10452
10453
10454	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10455		pr_warn("Tracing disabled due to lockdown\n");
10456		return -EPERM;
10457	}
10458
10459	/*
10460	 * Make sure we don't accidentally add more trace options
10461	 * than we have bits for.
10462	 */
10463	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10464
10465	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10466		goto out;
10467
10468	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10469		goto out_free_buffer_mask;
10470
10471	/* Only allocate trace_printk buffers if a trace_printk exists */
10472	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10473		/* Must be called before global_trace.buffer is allocated */
10474		trace_printk_init_buffers();
10475
10476	/* To save memory, keep the ring buffer size to its minimum */
10477	if (global_trace.ring_buffer_expanded)
10478		ring_buf_size = trace_buf_size;
10479	else
10480		ring_buf_size = 1;
10481
10482	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10483	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10484
10485	raw_spin_lock_init(&global_trace.start_lock);
10486
10487	/*
10488	 * The prepare callbacks allocates some memory for the ring buffer. We
10489	 * don't free the buffer if the CPU goes down. If we were to free
10490	 * the buffer, then the user would lose any trace that was in the
10491	 * buffer. The memory will be removed once the "instance" is removed.
10492	 */
10493	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10494				      "trace/RB:prepare", trace_rb_cpu_prepare,
10495				      NULL);
10496	if (ret < 0)
10497		goto out_free_cpumask;
10498	/* Used for event triggers */
10499	ret = -ENOMEM;
10500	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10501	if (!temp_buffer)
10502		goto out_rm_hp_state;
10503
10504	if (trace_create_savedcmd() < 0)
10505		goto out_free_temp_buffer;
10506
10507	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10508		goto out_free_savedcmd;
10509
10510	/* TODO: make the number of buffers hot pluggable with CPUS */
10511	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10512		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10513		goto out_free_pipe_cpumask;
 
 
10514	}
10515	if (global_trace.buffer_disabled)
10516		tracing_off();
10517
10518	if (trace_boot_clock) {
10519		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10520		if (ret < 0)
10521			pr_warn("Trace clock %s not defined, going back to default\n",
10522				trace_boot_clock);
 
 
 
10523	}
 
10524
10525	/*
10526	 * register_tracer() might reference current_trace, so it
10527	 * needs to be set before we register anything. This is
10528	 * just a bootstrap of current_trace anyway.
10529	 */
10530	global_trace.current_trace = &nop_trace;
10531
10532	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
10533#ifdef CONFIG_TRACER_MAX_TRACE
10534	spin_lock_init(&global_trace.snapshot_trigger_lock);
10535#endif
10536	ftrace_init_global_array_ops(&global_trace);
10537
10538	init_trace_flags_index(&global_trace);
10539
10540	register_tracer(&nop_trace);
10541
10542	/* Function tracing may start here (via kernel command line) */
10543	init_function_trace();
10544
10545	/* All seems OK, enable tracing */
10546	tracing_disabled = 0;
10547
10548	atomic_notifier_chain_register(&panic_notifier_list,
10549				       &trace_panic_notifier);
10550
10551	register_die_notifier(&trace_die_notifier);
10552
10553	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10554
10555	INIT_LIST_HEAD(&global_trace.systems);
10556	INIT_LIST_HEAD(&global_trace.events);
10557	INIT_LIST_HEAD(&global_trace.hist_vars);
10558	INIT_LIST_HEAD(&global_trace.err_log);
10559	list_add(&global_trace.list, &ftrace_trace_arrays);
10560
10561	apply_trace_boot_options();
10562
10563	register_snapshot_cmd();
10564
10565	return 0;
10566
10567out_free_pipe_cpumask:
10568	free_cpumask_var(global_trace.pipe_cpumask);
10569out_free_savedcmd:
10570	trace_free_saved_cmdlines_buffer();
10571out_free_temp_buffer:
10572	ring_buffer_free(temp_buffer);
10573out_rm_hp_state:
10574	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10575out_free_cpumask:
10576	free_cpumask_var(global_trace.tracing_cpumask);
10577out_free_buffer_mask:
10578	free_cpumask_var(tracing_buffer_mask);
10579out:
10580	return ret;
10581}
10582
10583void __init ftrace_boot_snapshot(void)
10584{
10585#ifdef CONFIG_TRACER_MAX_TRACE
10586	struct trace_array *tr;
10587
10588	if (!snapshot_at_boot)
10589		return;
10590
10591	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10592		if (!tr->allocated_snapshot)
10593			continue;
10594
10595		tracing_snapshot_instance(tr);
10596		trace_array_puts(tr, "** Boot snapshot taken **\n");
10597	}
10598#endif
10599}
10600
10601void __init early_trace_init(void)
10602{
10603	if (tracepoint_printk) {
10604		tracepoint_print_iter =
10605			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10606		if (MEM_FAIL(!tracepoint_print_iter,
10607			     "Failed to allocate trace iterator\n"))
10608			tracepoint_printk = 0;
10609		else
10610			static_key_enable(&tracepoint_printk_key.key);
10611	}
10612	tracer_alloc_buffers();
10613
10614	init_events();
10615}
10616
10617void __init trace_init(void)
10618{
10619	trace_event_init();
10620
10621	if (boot_instance_index)
10622		enable_instances();
10623}
10624
10625__init static void clear_boot_tracer(void)
10626{
10627	/*
10628	 * The default tracer at boot buffer is an init section.
10629	 * This function is called in lateinit. If we did not
10630	 * find the boot tracer, then clear it out, to prevent
10631	 * later registration from accessing the buffer that is
10632	 * about to be freed.
10633	 */
10634	if (!default_bootup_tracer)
10635		return;
10636
10637	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10638	       default_bootup_tracer);
10639	default_bootup_tracer = NULL;
10640}
10641
10642#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10643__init static void tracing_set_default_clock(void)
10644{
10645	/* sched_clock_stable() is determined in late_initcall */
10646	if (!trace_boot_clock && !sched_clock_stable()) {
10647		if (security_locked_down(LOCKDOWN_TRACEFS)) {
10648			pr_warn("Can not set tracing clock due to lockdown\n");
10649			return;
10650		}
10651
10652		printk(KERN_WARNING
10653		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
10654		       "If you want to keep using the local clock, then add:\n"
10655		       "  \"trace_clock=local\"\n"
10656		       "on the kernel command line\n");
10657		tracing_set_clock(&global_trace, "global");
10658	}
10659}
10660#else
10661static inline void tracing_set_default_clock(void) { }
10662#endif
10663
10664__init static int late_trace_init(void)
10665{
10666	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10667		static_key_disable(&tracepoint_printk_key.key);
10668		tracepoint_printk = 0;
10669	}
10670
10671	tracing_set_default_clock();
10672	clear_boot_tracer();
10673	return 0;
10674}
10675
10676late_initcall_sync(late_trace_init);
 
 
v3.5.6
 
   1/*
   2 * ring buffer based function tracer
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally taken from the RT patch by:
   8 *    Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code from the latency_tracer, that is:
  11 *  Copyright (C) 2004-2006 Ingo Molnar
  12 *  Copyright (C) 2004 William Lee Irwin III
  13 */
  14#include <linux/ring_buffer.h>
  15#include <generated/utsrelease.h>
  16#include <linux/stacktrace.h>
  17#include <linux/writeback.h>
  18#include <linux/kallsyms.h>
 
  19#include <linux/seq_file.h>
  20#include <linux/notifier.h>
  21#include <linux/irqflags.h>
  22#include <linux/debugfs.h>
 
  23#include <linux/pagemap.h>
  24#include <linux/hardirq.h>
  25#include <linux/linkage.h>
  26#include <linux/uaccess.h>
  27#include <linux/kprobes.h>
 
  28#include <linux/ftrace.h>
  29#include <linux/module.h>
  30#include <linux/percpu.h>
  31#include <linux/splice.h>
  32#include <linux/kdebug.h>
  33#include <linux/string.h>
 
  34#include <linux/rwsem.h>
  35#include <linux/slab.h>
  36#include <linux/ctype.h>
  37#include <linux/init.h>
 
  38#include <linux/poll.h>
  39#include <linux/nmi.h>
  40#include <linux/fs.h>
 
 
 
 
 
 
 
 
  41
  42#include "trace.h"
  43#include "trace_output.h"
  44
  45/*
  46 * On boot up, the ring buffer is set to the minimum size, so that
  47 * we do not waste memory on systems that are not using tracing.
  48 */
  49int ring_buffer_expanded;
  50
  51/*
  52 * We need to change this state when a selftest is running.
  53 * A selftest will lurk into the ring-buffer to count the
  54 * entries inserted during the selftest although some concurrent
  55 * insertions into the ring-buffer such as trace_printk could occurred
  56 * at the same time, giving false positive or negative results.
  57 */
  58static bool __read_mostly tracing_selftest_running;
  59
  60/*
  61 * If a tracer is running, we do not want to run SELFTEST.
 
  62 */
  63bool __read_mostly tracing_selftest_disabled;
  64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  65/* For tracers that don't implement custom flags */
  66static struct tracer_opt dummy_tracer_opt[] = {
  67	{ }
  68};
  69
  70static struct tracer_flags dummy_tracer_flags = {
  71	.val = 0,
  72	.opts = dummy_tracer_opt
  73};
  74
  75static int dummy_set_flag(u32 old_flags, u32 bit, int set)
  76{
  77	return 0;
  78}
  79
  80/*
 
 
 
 
 
 
 
  81 * Kill all tracing for good (never come back).
  82 * It is initialized to 1 but will turn to zero if the initialization
  83 * of the tracer is successful. But that is the only place that sets
  84 * this back to zero.
  85 */
  86static int tracing_disabled = 1;
  87
  88DEFINE_PER_CPU(int, ftrace_cpu_disabled);
  89
  90cpumask_var_t __read_mostly	tracing_buffer_mask;
  91
  92/*
  93 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
  94 *
  95 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
  96 * is set, then ftrace_dump is called. This will output the contents
  97 * of the ftrace buffers to the console.  This is very useful for
  98 * capturing traces that lead to crashes and outputing it to a
  99 * serial console.
 100 *
 101 * It is default off, but you can enable it with either specifying
 102 * "ftrace_dump_on_oops" in the kernel command line, or setting
 103 * /proc/sys/kernel/ftrace_dump_on_oops
 104 * Set 1 if you want to dump buffers of all CPUs
 105 * Set 2 if you want to dump the buffer of the CPU that triggered oops
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 106 */
 
 
 
 
 
 107
 108enum ftrace_dump_mode ftrace_dump_on_oops;
 
 109
 110static int tracing_set_tracer(const char *buf);
 
 
 
 111
 112#define MAX_TRACER_SIZE		100
 113static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
 114static char *default_bootup_tracer;
 115
 
 
 
 
 
 
 
 
 
 116static int __init set_cmdline_ftrace(char *str)
 117{
 118	strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
 119	default_bootup_tracer = bootup_tracer_buf;
 120	/* We are using ftrace early, expand it */
 121	ring_buffer_expanded = 1;
 122	return 1;
 123}
 124__setup("ftrace=", set_cmdline_ftrace);
 125
 
 
 
 
 
 
 
 
 126static int __init set_ftrace_dump_on_oops(char *str)
 127{
 128	if (*str++ != '=' || !*str) {
 129		ftrace_dump_on_oops = DUMP_ALL;
 
 
 
 
 
 
 130		return 1;
 131	}
 132
 133	if (!strcmp("orig_cpu", str)) {
 134		ftrace_dump_on_oops = DUMP_ORIG;
 135                return 1;
 136        }
 137
 138        return 0;
 139}
 140__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 141
 142unsigned long long ns2usecs(cycle_t nsec)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 143{
 144	nsec += 500;
 145	do_div(nsec, 1000);
 146	return nsec;
 147}
 148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 149/*
 150 * The global_trace is the descriptor that holds the tracing
 151 * buffers for the live tracing. For each CPU, it contains
 152 * a link list of pages that will store trace entries. The
 153 * page descriptor of the pages in the memory is used to hold
 154 * the link list by linking the lru item in the page descriptor
 155 * to each of the pages in the buffer per CPU.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 156 *
 157 * For each active CPU there is a data field that holds the
 158 * pages for the buffer for that CPU. Each CPU has the same number
 159 * of pages allocated for its buffer.
 160 */
 161static struct trace_array	global_trace;
 
 
 
 
 
 
 162
 163static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
 
 164
 165int filter_current_check_discard(struct ring_buffer *buffer,
 166				 struct ftrace_event_call *call, void *rec,
 167				 struct ring_buffer_event *event)
 168{
 169	return filter_check_discard(call, rec, buffer, event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170}
 171EXPORT_SYMBOL_GPL(filter_current_check_discard);
 172
 173cycle_t ftrace_now(int cpu)
 174{
 175	u64 ts;
 176
 177	/* Early boot up does not have a buffer yet */
 178	if (!global_trace.buffer)
 179		return trace_clock_local();
 180
 181	ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
 182	ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
 183
 184	return ts;
 185}
 186
 187/*
 188 * The max_tr is used to snapshot the global_trace when a maximum
 189 * latency is reached. Some tracers will use this to store a maximum
 190 * trace while it continues examining live traces.
 191 *
 192 * The buffers for the max_tr are set up the same as the global_trace.
 193 * When a snapshot is taken, the link list of the max_tr is swapped
 194 * with the link list of the global_trace and the buffers are reset for
 195 * the global_trace so the tracing can continue.
 196 */
 197static struct trace_array	max_tr;
 198
 199static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
 200
 201/* tracer_enabled is used to toggle activation of a tracer */
 202static int			tracer_enabled = 1;
 203
 204/**
 205 * tracing_is_enabled - return tracer_enabled status
 206 *
 207 * This function is used by other tracers to know the status
 208 * of the tracer_enabled flag.  Tracers may use this function
 209 * to know if it should enable their features when starting
 210 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
 
 211 */
 212int tracing_is_enabled(void)
 213{
 214	return tracer_enabled;
 
 
 
 
 
 
 215}
 216
 217/*
 218 * trace_buf_size is the size in bytes that is allocated
 219 * for a buffer. Note, the number of bytes is always rounded
 220 * to page size.
 221 *
 222 * This number is purposely set to a low number of 16384.
 223 * If the dump on oops happens, it will be much appreciated
 224 * to not have to wait for all that output. Anyway this can be
 225 * boot time and run time configurable.
 226 */
 227#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
 228
 229static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
 230
 231/* trace_types holds a link list of available tracers. */
 232static struct tracer		*trace_types __read_mostly;
 233
 234/* current_trace points to the tracer that is currently active */
 235static struct tracer		*current_trace __read_mostly;
 236
 237/*
 238 * trace_types_lock is used to protect the trace_types list.
 239 */
 240static DEFINE_MUTEX(trace_types_lock);
 241
 242/*
 243 * serialize the access of the ring buffer
 244 *
 245 * ring buffer serializes readers, but it is low level protection.
 246 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 247 * are not protected by ring buffer.
 248 *
 249 * The content of events may become garbage if we allow other process consumes
 250 * these events concurrently:
 251 *   A) the page of the consumed events may become a normal page
 252 *      (not reader page) in ring buffer, and this page will be rewrited
 253 *      by events producer.
 254 *   B) The page of the consumed events may become a page for splice_read,
 255 *      and this page will be returned to system.
 256 *
 257 * These primitives allow multi process access to different cpu ring buffer
 258 * concurrently.
 259 *
 260 * These primitives don't distinguish read-only and read-consume access.
 261 * Multi read-only access are also serialized.
 262 */
 263
 264#ifdef CONFIG_SMP
 265static DECLARE_RWSEM(all_cpu_access_lock);
 266static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
 267
 268static inline void trace_access_lock(int cpu)
 269{
 270	if (cpu == TRACE_PIPE_ALL_CPU) {
 271		/* gain it for accessing the whole ring buffer. */
 272		down_write(&all_cpu_access_lock);
 273	} else {
 274		/* gain it for accessing a cpu ring buffer. */
 275
 276		/* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
 277		down_read(&all_cpu_access_lock);
 278
 279		/* Secondly block other access to this @cpu ring buffer. */
 280		mutex_lock(&per_cpu(cpu_access_lock, cpu));
 281	}
 282}
 283
 284static inline void trace_access_unlock(int cpu)
 285{
 286	if (cpu == TRACE_PIPE_ALL_CPU) {
 287		up_write(&all_cpu_access_lock);
 288	} else {
 289		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
 290		up_read(&all_cpu_access_lock);
 291	}
 292}
 293
 294static inline void trace_access_lock_init(void)
 295{
 296	int cpu;
 297
 298	for_each_possible_cpu(cpu)
 299		mutex_init(&per_cpu(cpu_access_lock, cpu));
 300}
 301
 302#else
 303
 304static DEFINE_MUTEX(access_lock);
 305
 306static inline void trace_access_lock(int cpu)
 307{
 308	(void)cpu;
 309	mutex_lock(&access_lock);
 310}
 311
 312static inline void trace_access_unlock(int cpu)
 313{
 314	(void)cpu;
 315	mutex_unlock(&access_lock);
 316}
 317
 318static inline void trace_access_lock_init(void)
 319{
 320}
 321
 322#endif
 323
 324/* trace_wait is a waitqueue for tasks blocked on trace_poll */
 325static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 326
 327/* trace_flags holds trace_options default values */
 328unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
 329	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
 330	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
 331	TRACE_ITER_IRQ_INFO;
 332
 333static int trace_stop_count;
 334static DEFINE_RAW_SPINLOCK(tracing_start_lock);
 335
 336static void wakeup_work_handler(struct work_struct *work)
 
 
 
 
 337{
 338	wake_up(&trace_wait);
 
 
 
 
 
 
 339}
 340
 341static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 342
 343/**
 344 * tracing_on - enable tracing buffers
 345 *
 346 * This function enables tracing buffers that may have been
 347 * disabled with tracing_off.
 348 */
 349void tracing_on(void)
 350{
 351	if (global_trace.buffer)
 352		ring_buffer_record_on(global_trace.buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 353	/*
 354	 * This flag is only looked at when buffers haven't been
 355	 * allocated yet. We don't really care about the race
 356	 * between setting this flag and actually turning
 357	 * on the buffer.
 
 
 358	 */
 359	global_trace.buffer_disabled = 0;
 
 
 360}
 361EXPORT_SYMBOL_GPL(tracing_on);
 362
 363/**
 364 * tracing_off - turn off tracing buffers
 365 *
 366 * This function stops the tracing buffers from recording data.
 367 * It does not disable any overhead the tracers themselves may
 368 * be causing. This function simply causes all recording to
 369 * the ring buffers to fail.
 370 */
 371void tracing_off(void)
 372{
 373	if (global_trace.buffer)
 374		ring_buffer_record_off(global_trace.buffer);
 375	/*
 376	 * This flag is only looked at when buffers haven't been
 377	 * allocated yet. We don't really care about the race
 378	 * between setting this flag and actually turning
 379	 * on the buffer.
 380	 */
 381	global_trace.buffer_disabled = 1;
 382}
 383EXPORT_SYMBOL_GPL(tracing_off);
 384
 
 
 
 
 
 
 
 
 
 385/**
 386 * tracing_is_on - show state of ring buffers enabled
 
 
 
 387 */
 388int tracing_is_on(void)
 389{
 390	if (global_trace.buffer)
 391		return ring_buffer_record_is_on(global_trace.buffer);
 392	return !global_trace.buffer_disabled;
 393}
 394EXPORT_SYMBOL_GPL(tracing_is_on);
 395
 396/**
 397 * trace_wake_up - wake up tasks waiting for trace input
 398 *
 399 * Schedules a delayed work to wake up any task that is blocked on the
 400 * trace_wait queue. These is used with trace_poll for tasks polling the
 401 * trace.
 402 */
 403void trace_wake_up(void)
 404{
 405	const unsigned long delay = msecs_to_jiffies(2);
 406
 407	if (trace_flags & TRACE_ITER_BLOCK)
 408		return;
 409	schedule_delayed_work(&wakeup_work, delay);
 410}
 
 411
 412static int __init set_buf_size(char *str)
 413{
 414	unsigned long buf_size;
 415
 416	if (!str)
 417		return 0;
 418	buf_size = memparse(str, &str);
 419	/* nr_entries can not be zero */
 420	if (buf_size == 0)
 421		return 0;
 422	trace_buf_size = buf_size;
 
 
 423	return 1;
 424}
 425__setup("trace_buf_size=", set_buf_size);
 426
 427static int __init set_tracing_thresh(char *str)
 428{
 429	unsigned long threshhold;
 430	int ret;
 431
 432	if (!str)
 433		return 0;
 434	ret = strict_strtoul(str, 0, &threshhold);
 435	if (ret < 0)
 436		return 0;
 437	tracing_thresh = threshhold * 1000;
 438	return 1;
 439}
 440__setup("tracing_thresh=", set_tracing_thresh);
 441
 442unsigned long nsecs_to_usecs(unsigned long nsecs)
 443{
 444	return nsecs / 1000;
 445}
 446
 447/* These must match the bit postions in trace_iterator_flags */
 
 
 
 
 
 
 
 
 
 448static const char *trace_options[] = {
 449	"print-parent",
 450	"sym-offset",
 451	"sym-addr",
 452	"verbose",
 453	"raw",
 454	"hex",
 455	"bin",
 456	"block",
 457	"stacktrace",
 458	"trace_printk",
 459	"ftrace_preempt",
 460	"branch",
 461	"annotate",
 462	"userstacktrace",
 463	"sym-userobj",
 464	"printk-msg-only",
 465	"context-info",
 466	"latency-format",
 467	"sleep-time",
 468	"graph-time",
 469	"record-cmd",
 470	"overwrite",
 471	"disable_on_free",
 472	"irq-info",
 473	NULL
 474};
 475
 476static struct {
 477	u64 (*func)(void);
 478	const char *name;
 
 479} trace_clocks[] = {
 480	{ trace_clock_local,	"local" },
 481	{ trace_clock_global,	"global" },
 482	{ trace_clock_counter,	"counter" },
 
 
 
 
 
 
 
 483};
 484
 485int trace_clock_id;
 
 
 
 
 
 
 486
 487/*
 488 * trace_parser_get_init - gets the buffer for trace parser
 489 */
 490int trace_parser_get_init(struct trace_parser *parser, int size)
 491{
 492	memset(parser, 0, sizeof(*parser));
 493
 494	parser->buffer = kmalloc(size, GFP_KERNEL);
 495	if (!parser->buffer)
 496		return 1;
 497
 498	parser->size = size;
 499	return 0;
 500}
 501
 502/*
 503 * trace_parser_put - frees the buffer for trace parser
 504 */
 505void trace_parser_put(struct trace_parser *parser)
 506{
 507	kfree(parser->buffer);
 
 508}
 509
 510/*
 511 * trace_get_user - reads the user input string separated by  space
 512 * (matched by isspace(ch))
 513 *
 514 * For each string found the 'struct trace_parser' is updated,
 515 * and the function returns.
 516 *
 517 * Returns number of bytes read.
 518 *
 519 * See kernel/trace/trace.h for 'struct trace_parser' details.
 520 */
 521int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 522	size_t cnt, loff_t *ppos)
 523{
 524	char ch;
 525	size_t read = 0;
 526	ssize_t ret;
 527
 528	if (!*ppos)
 529		trace_parser_clear(parser);
 530
 531	ret = get_user(ch, ubuf++);
 532	if (ret)
 533		goto out;
 534
 535	read++;
 536	cnt--;
 537
 538	/*
 539	 * The parser is not finished with the last write,
 540	 * continue reading the user input without skipping spaces.
 541	 */
 542	if (!parser->cont) {
 543		/* skip white space */
 544		while (cnt && isspace(ch)) {
 545			ret = get_user(ch, ubuf++);
 546			if (ret)
 547				goto out;
 548			read++;
 549			cnt--;
 550		}
 551
 
 
 552		/* only spaces were written */
 553		if (isspace(ch)) {
 554			*ppos += read;
 555			ret = read;
 556			goto out;
 557		}
 558
 559		parser->idx = 0;
 560	}
 561
 562	/* read the non-space input */
 563	while (cnt && !isspace(ch)) {
 564		if (parser->idx < parser->size - 1)
 565			parser->buffer[parser->idx++] = ch;
 566		else {
 567			ret = -EINVAL;
 568			goto out;
 569		}
 570		ret = get_user(ch, ubuf++);
 571		if (ret)
 572			goto out;
 573		read++;
 574		cnt--;
 575	}
 576
 577	/* We either got finished input or we have to wait for another call. */
 578	if (isspace(ch)) {
 579		parser->buffer[parser->idx] = 0;
 580		parser->cont = false;
 581	} else {
 582		parser->cont = true;
 583		parser->buffer[parser->idx++] = ch;
 
 
 
 
 
 584	}
 585
 586	*ppos += read;
 587	ret = read;
 588
 589out:
 590	return ret;
 591}
 592
 593ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
 
 594{
 595	int len;
 596	int ret;
 597
 598	if (!cnt)
 599		return 0;
 600
 601	if (s->len <= s->readpos)
 602		return -EBUSY;
 603
 604	len = s->len - s->readpos;
 605	if (cnt > len)
 606		cnt = len;
 607	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
 608	if (ret == cnt)
 609		return -EFAULT;
 610
 611	cnt -= ret;
 612
 613	s->readpos += cnt;
 614	return cnt;
 615}
 616
 617static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 618{
 619	int len;
 
 
 
 
 
 
 620
 621	if (s->len <= s->readpos)
 622		return -EBUSY;
 
 
 
 
 
 
 
 
 623
 624	len = s->len - s->readpos;
 625	if (cnt > len)
 626		cnt = len;
 627	memcpy(buf, s->buffer + s->readpos, cnt);
 628
 629	s->readpos += cnt;
 630	return cnt;
 
 
 
 
 
 
 
 
 631}
 632
 633/*
 634 * ftrace_max_lock is used to protect the swapping of buffers
 635 * when taking a max snapshot. The buffers themselves are
 636 * protected by per_cpu spinlocks. But the action of the swap
 637 * needs its own lock.
 638 *
 639 * This is defined as a arch_spinlock_t in order to help
 640 * with performance when lockdep debugging is enabled.
 641 *
 642 * It is also used in other places outside the update_max_tr
 643 * so it needs to be defined outside of the
 644 * CONFIG_TRACER_MAX_TRACE.
 645 */
 646static arch_spinlock_t ftrace_max_lock =
 647	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 648
 649unsigned long __read_mostly	tracing_thresh;
 
 
 650
 651#ifdef CONFIG_TRACER_MAX_TRACE
 652unsigned long __read_mostly	tracing_max_latency;
 653
 654/*
 655 * Copy the new maximum trace into the separate maximum-trace
 656 * structure. (this way the maximum trace is permanently saved,
 657 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
 658 */
 659static void
 660__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 661{
 662	struct trace_array_cpu *data = tr->data[cpu];
 663	struct trace_array_cpu *max_data;
 
 
 664
 665	max_tr.cpu = cpu;
 666	max_tr.time_start = data->preempt_timestamp;
 667
 668	max_data = max_tr.data[cpu];
 669	max_data->saved_latency = tracing_max_latency;
 670	max_data->critical_start = data->critical_start;
 671	max_data->critical_end = data->critical_end;
 672
 673	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
 674	max_data->pid = tsk->pid;
 675	max_data->uid = task_uid(tsk);
 
 
 
 
 
 
 
 
 676	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
 677	max_data->policy = tsk->policy;
 678	max_data->rt_priority = tsk->rt_priority;
 679
 680	/* record this tasks comm */
 681	tracing_record_cmdline(tsk);
 
 682}
 683
 684/**
 685 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 686 * @tr: tracer
 687 * @tsk: the task with the latency
 688 * @cpu: The cpu that initiated the trace.
 
 689 *
 690 * Flip the buffers between the @tr and the max_tr and record information
 691 * about which task was the cause of this latency.
 692 */
 693void
 694update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
 695{
 696	struct ring_buffer *buf = tr->buffer;
 697
 698	if (trace_stop_count)
 699		return;
 700
 701	WARN_ON_ONCE(!irqs_disabled());
 702	if (!current_trace->use_max_tr) {
 703		WARN_ON_ONCE(1);
 
 
 704		return;
 705	}
 706	arch_spin_lock(&ftrace_max_lock);
 707
 708	tr->buffer = max_tr.buffer;
 709	max_tr.buffer = buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 710
 711	__update_max_tr(tr, tsk, cpu);
 712	arch_spin_unlock(&ftrace_max_lock);
 
 
 
 
 713}
 714
 715/**
 716 * update_max_tr_single - only copy one trace over, and reset the rest
 717 * @tr - tracer
 718 * @tsk - task with the latency
 719 * @cpu - the cpu of the buffer to copy.
 720 *
 721 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
 722 */
 723void
 724update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 725{
 726	int ret;
 727
 728	if (trace_stop_count)
 729		return;
 730
 731	WARN_ON_ONCE(!irqs_disabled());
 732	if (!current_trace->use_max_tr) {
 733		WARN_ON_ONCE(1);
 
 734		return;
 735	}
 736
 737	arch_spin_lock(&ftrace_max_lock);
 738
 739	ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
 740
 741	if (ret == -EBUSY) {
 742		/*
 743		 * We failed to swap the buffer due to a commit taking
 744		 * place on this CPU. We fail to record, but we reset
 745		 * the max trace buffer (no one writes directly to it)
 746		 * and flag that it failed.
 
 747		 */
 748		trace_array_printk(&max_tr, _THIS_IP_,
 749			"Failed to swap buffers due to commit in progress\n");
 750	}
 751
 752	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
 753
 754	__update_max_tr(tr, tsk, cpu);
 755	arch_spin_unlock(&ftrace_max_lock);
 756}
 
 757#endif /* CONFIG_TRACER_MAX_TRACE */
 758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 759/**
 760 * register_tracer - register a tracer with the ftrace system.
 761 * @type - the plugin for the tracer
 762 *
 763 * Register a new plugin tracer.
 764 */
 765int register_tracer(struct tracer *type)
 766{
 767	struct tracer *t;
 768	int ret = 0;
 769
 770	if (!type->name) {
 771		pr_info("Tracer must have a name\n");
 772		return -1;
 773	}
 774
 775	if (strlen(type->name) >= MAX_TRACER_SIZE) {
 776		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
 777		return -1;
 778	}
 779
 
 
 
 
 
 
 780	mutex_lock(&trace_types_lock);
 781
 782	tracing_selftest_running = true;
 783
 784	for (t = trace_types; t; t = t->next) {
 785		if (strcmp(type->name, t->name) == 0) {
 786			/* already found */
 787			pr_info("Tracer %s already registered\n",
 788				type->name);
 789			ret = -1;
 790			goto out;
 791		}
 792	}
 793
 794	if (!type->set_flag)
 795		type->set_flag = &dummy_set_flag;
 796	if (!type->flags)
 797		type->flags = &dummy_tracer_flags;
 798	else
 
 
 
 
 
 
 
 799		if (!type->flags->opts)
 800			type->flags->opts = dummy_tracer_opt;
 801	if (!type->wait_pipe)
 802		type->wait_pipe = default_wait_pipe;
 803
 
 
 804
 805#ifdef CONFIG_FTRACE_STARTUP_TEST
 806	if (type->selftest && !tracing_selftest_disabled) {
 807		struct tracer *saved_tracer = current_trace;
 808		struct trace_array *tr = &global_trace;
 809
 810		/*
 811		 * Run a selftest on this tracer.
 812		 * Here we reset the trace buffer, and set the current
 813		 * tracer to be this tracer. The tracer can then run some
 814		 * internal tracing to verify that everything is in order.
 815		 * If we fail, we do not register this tracer.
 816		 */
 817		tracing_reset_online_cpus(tr);
 818
 819		current_trace = type;
 820
 821		/* If we expanded the buffers, make sure the max is expanded too */
 822		if (ring_buffer_expanded && type->use_max_tr)
 823			ring_buffer_resize(max_tr.buffer, trace_buf_size,
 824						RING_BUFFER_ALL_CPUS);
 825
 826		/* the test is responsible for initializing and enabling */
 827		pr_info("Testing tracer %s: ", type->name);
 828		ret = type->selftest(type, tr);
 829		/* the test is responsible for resetting too */
 830		current_trace = saved_tracer;
 831		if (ret) {
 832			printk(KERN_CONT "FAILED!\n");
 833			goto out;
 834		}
 835		/* Only reset on passing, to avoid touching corrupted buffers */
 836		tracing_reset_online_cpus(tr);
 837
 838		/* Shrink the max buffer again */
 839		if (ring_buffer_expanded && type->use_max_tr)
 840			ring_buffer_resize(max_tr.buffer, 1,
 841						RING_BUFFER_ALL_CPUS);
 842
 843		printk(KERN_CONT "PASSED\n");
 844	}
 845#endif
 846
 847	type->next = trace_types;
 848	trace_types = type;
 
 849
 850 out:
 851	tracing_selftest_running = false;
 852	mutex_unlock(&trace_types_lock);
 853
 854	if (ret || !default_bootup_tracer)
 855		goto out_unlock;
 856
 857	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
 858		goto out_unlock;
 859
 860	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
 861	/* Do we want this tracer to start on bootup? */
 862	tracing_set_tracer(type->name);
 863	default_bootup_tracer = NULL;
 
 
 
 864	/* disable other selftests, since this will break it. */
 865	tracing_selftest_disabled = 1;
 866#ifdef CONFIG_FTRACE_STARTUP_TEST
 867	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
 868	       type->name);
 869#endif
 870
 871 out_unlock:
 872	return ret;
 873}
 874
 875void unregister_tracer(struct tracer *type)
 876{
 877	struct tracer **t;
 
 
 
 878
 879	mutex_lock(&trace_types_lock);
 880	for (t = &trace_types; *t; t = &(*t)->next) {
 881		if (*t == type)
 882			goto found;
 883	}
 884	pr_info("Tracer %s not registered\n", type->name);
 885	goto out;
 886
 887 found:
 888	*t = (*t)->next;
 
 889
 890	if (type == current_trace && tracer_enabled) {
 891		tracer_enabled = 0;
 892		tracing_stop();
 893		if (current_trace->stop)
 894			current_trace->stop(&global_trace);
 895		current_trace = &nop_trace;
 896	}
 897out:
 898	mutex_unlock(&trace_types_lock);
 899}
 900
 901void tracing_reset(struct trace_array *tr, int cpu)
 902{
 903	struct ring_buffer *buffer = tr->buffer;
 
 
 
 904
 905	ring_buffer_record_disable(buffer);
 906
 907	/* Make sure all commits have finished */
 908	synchronize_sched();
 909	ring_buffer_reset_cpu(buffer, cpu);
 
 
 
 910
 911	ring_buffer_record_enable(buffer);
 912}
 913
 914void tracing_reset_online_cpus(struct trace_array *tr)
 915{
 916	struct ring_buffer *buffer = tr->buffer;
 917	int cpu;
 
 
 918
 919	ring_buffer_record_disable(buffer);
 920
 921	/* Make sure all commits have finished */
 922	synchronize_sched();
 923
 924	tr->time_start = ftrace_now(tr->cpu);
 925
 926	for_each_online_cpu(cpu)
 927		ring_buffer_reset_cpu(buffer, cpu);
 928
 929	ring_buffer_record_enable(buffer);
 930}
 931
 932void tracing_reset_current(int cpu)
 
 933{
 934	tracing_reset(&global_trace, cpu);
 935}
 
 936
 937void tracing_reset_current_online_cpus(void)
 938{
 939	tracing_reset_online_cpus(&global_trace);
 
 
 
 
 
 
 940}
 941
 942#define SAVED_CMDLINES 128
 943#define NO_CMDLINE_MAP UINT_MAX
 944static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 945static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
 946static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
 947static int cmdline_idx;
 948static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 949
 950/* temporary disable recording */
 951static atomic_t trace_record_cmdline_disabled __read_mostly;
 952
 953static void trace_init_cmdlines(void)
 954{
 955	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
 956	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
 957	cmdline_idx = 0;
 958}
 959
 960int is_tracing_stopped(void)
 961{
 962	return trace_stop_count;
 963}
 964
 965/**
 966 * ftrace_off_permanent - disable all ftrace code permanently
 967 *
 968 * This should only be called when a serious anomally has
 969 * been detected.  This will turn off the function tracing,
 970 * ring buffers, and other tracing utilites. It takes no
 971 * locks and can be called from any context.
 972 */
 973void ftrace_off_permanent(void)
 974{
 975	tracing_disabled = 1;
 976	ftrace_stop();
 977	tracing_off_permanent();
 978}
 979
 980/**
 981 * tracing_start - quick start of the tracer
 982 *
 983 * If tracing is enabled but was stopped by tracing_stop,
 984 * this will start the tracer back up.
 985 */
 986void tracing_start(void)
 987{
 988	struct ring_buffer *buffer;
 989	unsigned long flags;
 990
 991	if (tracing_disabled)
 992		return;
 993
 994	raw_spin_lock_irqsave(&tracing_start_lock, flags);
 995	if (--trace_stop_count) {
 996		if (trace_stop_count < 0) {
 997			/* Someone screwed up their debugging */
 998			WARN_ON_ONCE(1);
 999			trace_stop_count = 0;
1000		}
1001		goto out;
1002	}
1003
1004	/* Prevent the buffers from switching */
1005	arch_spin_lock(&ftrace_max_lock);
1006
1007	buffer = global_trace.buffer;
1008	if (buffer)
1009		ring_buffer_record_enable(buffer);
1010
1011	buffer = max_tr.buffer;
 
1012	if (buffer)
1013		ring_buffer_record_enable(buffer);
 
1014
1015	arch_spin_unlock(&ftrace_max_lock);
1016
1017	ftrace_start();
1018 out:
1019	raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1020}
1021
1022/**
1023 * tracing_stop - quick stop of the tracer
1024 *
1025 * Light weight way to stop tracing. Use in conjunction with
1026 * tracing_start.
1027 */
1028void tracing_stop(void)
 
 
 
 
 
 
1029{
1030	struct ring_buffer *buffer;
1031	unsigned long flags;
1032
1033	ftrace_stop();
1034	raw_spin_lock_irqsave(&tracing_start_lock, flags);
1035	if (trace_stop_count++)
1036		goto out;
1037
1038	/* Prevent the buffers from switching */
1039	arch_spin_lock(&ftrace_max_lock);
1040
1041	buffer = global_trace.buffer;
1042	if (buffer)
1043		ring_buffer_record_disable(buffer);
1044
1045	buffer = max_tr.buffer;
 
1046	if (buffer)
1047		ring_buffer_record_disable(buffer);
 
1048
1049	arch_spin_unlock(&ftrace_max_lock);
1050
1051 out:
1052	raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053}
1054
1055void trace_stop_cmdline_recording(void);
 
 
1056
1057static void trace_save_cmdline(struct task_struct *tsk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058{
1059	unsigned pid, idx;
 
 
1060
1061	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1062		return;
1063
1064	/*
1065	 * It's not the end of the world if we don't get
1066	 * the lock, but we also don't want to spin
1067	 * nor do we want to disable interrupts,
1068	 * so if we miss here, then better luck next time.
1069	 */
1070	if (!arch_spin_trylock(&trace_cmdline_lock))
1071		return;
1072
1073	idx = map_pid_to_cmdline[tsk->pid];
1074	if (idx == NO_CMDLINE_MAP) {
1075		idx = (cmdline_idx + 1) % SAVED_CMDLINES;
 
 
 
 
 
1076
1077		/*
1078		 * Check whether the cmdline buffer at idx has a pid
1079		 * mapped. We are going to overwrite that entry so we
1080		 * need to clear the map_pid_to_cmdline. Otherwise we
1081		 * would read the new comm for the old pid.
1082		 */
1083		pid = map_cmdline_to_pid[idx];
1084		if (pid != NO_CMDLINE_MAP)
1085			map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1086
1087		map_cmdline_to_pid[idx] = tsk->pid;
1088		map_pid_to_cmdline[tsk->pid] = idx;
1089
1090		cmdline_idx = idx;
 
 
 
 
 
1091	}
 
1092
1093	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
 
 
 
 
 
1094
1095	arch_spin_unlock(&trace_cmdline_lock);
 
 
1096}
1097
1098void trace_find_cmdline(int pid, char comm[])
 
 
 
 
 
 
 
 
1099{
1100	unsigned map;
1101
1102	if (!pid) {
1103		strcpy(comm, "<idle>");
 
1104		return;
1105	}
1106
1107	if (WARN_ON_ONCE(pid < 0)) {
1108		strcpy(comm, "<XXX>");
1109		return;
 
 
 
 
 
 
 
 
 
 
 
1110	}
1111
1112	if (pid > PID_MAX_DEFAULT) {
1113		strcpy(comm, "<...>");
1114		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1115	}
1116
1117	preempt_disable();
1118	arch_spin_lock(&trace_cmdline_lock);
1119	map = map_pid_to_cmdline[pid];
1120	if (map != NO_CMDLINE_MAP)
1121		strcpy(comm, saved_cmdlines[map]);
1122	else
1123		strcpy(comm, "<...>");
 
 
 
 
 
 
 
 
 
1124
1125	arch_spin_unlock(&trace_cmdline_lock);
1126	preempt_enable();
1127}
1128
1129void tracing_record_cmdline(struct task_struct *tsk)
1130{
1131	if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
1132	    !tracing_is_on())
 
 
 
 
 
 
 
 
 
 
 
1133		return;
1134
1135	trace_save_cmdline(tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136}
1137
1138void
1139tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1140			     int pc)
1141{
1142	struct task_struct *tsk = current;
 
 
 
 
1143
1144	entry->preempt_count		= pc & 0xff;
1145	entry->pid			= (tsk) ? tsk->pid : 0;
1146	entry->padding			= 0;
1147	entry->flags =
1148#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1149		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1150#else
1151		TRACE_FLAG_IRQS_NOSUPPORT |
1152#endif
1153		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1154		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1155		(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1156}
1157EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1158
1159struct ring_buffer_event *
1160trace_buffer_lock_reserve(struct ring_buffer *buffer,
1161			  int type,
1162			  unsigned long len,
1163			  unsigned long flags, int pc)
1164{
1165	struct ring_buffer_event *event;
1166
1167	event = ring_buffer_lock_reserve(buffer, len);
1168	if (event != NULL) {
1169		struct trace_entry *ent = ring_buffer_event_data(event);
1170
1171		tracing_generic_entry_update(ent, flags, pc);
1172		ent->type = type;
1173	}
 
1174
1175	return event;
1176}
1177
1178static inline void
1179__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1180			     struct ring_buffer_event *event,
1181			     unsigned long flags, int pc,
1182			     int wake)
1183{
1184	ring_buffer_unlock_commit(buffer, event);
 
1185
1186	ftrace_trace_stack(buffer, flags, 6, pc);
1187	ftrace_trace_userstack(buffer, flags, pc);
 
1188
1189	if (wake)
1190		trace_wake_up();
1191}
 
 
1192
1193void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1194				struct ring_buffer_event *event,
1195				unsigned long flags, int pc)
1196{
1197	__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1198}
1199
1200struct ring_buffer_event *
1201trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1202				  int type, unsigned long len,
1203				  unsigned long flags, int pc)
1204{
1205	*current_rb = global_trace.buffer;
1206	return trace_buffer_lock_reserve(*current_rb,
1207					 type, len, flags, pc);
1208}
1209EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1210
1211void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1212					struct ring_buffer_event *event,
1213					unsigned long flags, int pc)
1214{
1215	__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1216}
1217EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1218
1219void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
1220				       struct ring_buffer_event *event,
1221				       unsigned long flags, int pc)
1222{
1223	__trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1224}
1225EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
 
1226
1227void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1228					    struct ring_buffer_event *event,
1229					    unsigned long flags, int pc,
1230					    struct pt_regs *regs)
 
1231{
1232	ring_buffer_unlock_commit(buffer, event);
1233
1234	ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1235	ftrace_trace_userstack(buffer, flags, pc);
 
 
 
 
 
 
1236}
1237EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
1238
1239void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1240					 struct ring_buffer_event *event)
 
 
 
 
1241{
1242	ring_buffer_discard_commit(buffer, event);
1243}
1244EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1245
1246void
1247trace_function(struct trace_array *tr,
1248	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
1249	       int pc)
1250{
1251	struct ftrace_event_call *call = &event_function;
1252	struct ring_buffer *buffer = tr->buffer;
1253	struct ring_buffer_event *event;
1254	struct ftrace_entry *entry;
1255
1256	/* If we are reading the ring buffer, don't trace */
1257	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1258		return;
1259
1260	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1261					  flags, pc);
1262	if (!event)
1263		return;
1264	entry	= ring_buffer_event_data(event);
1265	entry->ip			= ip;
1266	entry->parent_ip		= parent_ip;
1267
1268	if (!filter_check_discard(call, entry, buffer, event))
1269		ring_buffer_unlock_commit(buffer, event);
 
1270}
1271
1272void
1273ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1274       unsigned long ip, unsigned long parent_ip, unsigned long flags,
1275       int pc)
1276{
1277	if (likely(!atomic_read(&data->disabled)))
1278		trace_function(tr, ip, parent_ip, flags, pc);
1279}
1280
1281#ifdef CONFIG_STACKTRACE
1282
1283#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1284struct ftrace_stack {
1285	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];
 
 
 
 
 
1286};
1287
1288static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1289static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1290
1291static void __ftrace_trace_stack(struct ring_buffer *buffer,
1292				 unsigned long flags,
1293				 int skip, int pc, struct pt_regs *regs)
 
1294{
1295	struct ftrace_event_call *call = &event_kernel_stack;
1296	struct ring_buffer_event *event;
 
 
1297	struct stack_entry *entry;
1298	struct stack_trace trace;
1299	int use_stack;
1300	int size = FTRACE_STACK_ENTRIES;
1301
1302	trace.nr_entries	= 0;
1303	trace.skip		= skip;
1304
1305	/*
1306	 * Since events can happen in NMIs there's no safe way to
1307	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1308	 * or NMI comes in, it will just have to use the default
1309	 * FTRACE_STACK_SIZE.
1310	 */
 
 
 
 
 
1311	preempt_disable_notrace();
1312
1313	use_stack = ++__get_cpu_var(ftrace_stack_reserve);
 
 
 
 
 
1314	/*
1315	 * We don't need any atomic variables, just a barrier.
1316	 * If an interrupt comes in, we don't care, because it would
1317	 * have exited and put the counter back to what we want.
1318	 * We just need a barrier to keep gcc from moving things
1319	 * around.
1320	 */
1321	barrier();
1322	if (use_stack == 1) {
1323		trace.entries		= &__get_cpu_var(ftrace_stack).calls[0];
1324		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;
1325
1326		if (regs)
1327			save_stack_trace_regs(regs, &trace);
1328		else
1329			save_stack_trace(&trace);
1330
1331		if (trace.nr_entries > size)
1332			size = trace.nr_entries;
1333	} else
1334		/* From now on, use_stack is a boolean */
1335		use_stack = 0;
 
1336
1337	size *= sizeof(unsigned long);
 
 
 
 
 
 
 
 
 
 
 
 
1338
1339	event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1340					  sizeof(*entry) + size, flags, pc);
 
1341	if (!event)
1342		goto out;
1343	entry = ring_buffer_event_data(event);
1344
1345	memset(&entry->caller, 0, size);
 
 
1346
1347	if (use_stack)
1348		memcpy(&entry->caller, trace.entries,
1349		       trace.nr_entries * sizeof(unsigned long));
1350	else {
1351		trace.max_entries	= FTRACE_STACK_ENTRIES;
1352		trace.entries		= entry->caller;
1353		if (regs)
1354			save_stack_trace_regs(regs, &trace);
1355		else
1356			save_stack_trace(&trace);
1357	}
1358
1359	entry->size = trace.nr_entries;
1360
1361	if (!filter_check_discard(call, entry, buffer, event))
1362		ring_buffer_unlock_commit(buffer, event);
1363
1364 out:
1365	/* Again, don't let gcc optimize things here */
1366	barrier();
1367	__get_cpu_var(ftrace_stack_reserve)--;
1368	preempt_enable_notrace();
1369
1370}
1371
1372void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1373			     int skip, int pc, struct pt_regs *regs)
 
 
1374{
1375	if (!(trace_flags & TRACE_ITER_STACKTRACE))
1376		return;
1377
1378	__ftrace_trace_stack(buffer, flags, skip, pc, regs);
1379}
1380
1381void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1382			int skip, int pc)
1383{
1384	if (!(trace_flags & TRACE_ITER_STACKTRACE))
 
 
 
 
 
 
 
1385		return;
1386
1387	__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1388}
 
 
 
 
 
 
1389
1390void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1391		   int pc)
1392{
1393	__ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
1394}
1395
1396/**
1397 * trace_dump_stack - record a stack back trace in the trace buffer
 
1398 */
1399void trace_dump_stack(void)
1400{
1401	unsigned long flags;
1402
1403	if (tracing_disabled || tracing_selftest_running)
1404		return;
1405
1406	local_save_flags(flags);
1407
1408	/* skipping 3 traces, seems to get us at the caller of this function */
1409	__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
 
 
1410}
 
1411
 
1412static DEFINE_PER_CPU(int, user_stack_count);
1413
1414void
1415ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
 
1416{
1417	struct ftrace_event_call *call = &event_user_stack;
1418	struct ring_buffer_event *event;
1419	struct userstack_entry *entry;
1420	struct stack_trace trace;
1421
1422	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1423		return;
1424
1425	/*
1426	 * NMIs can not handle page faults, even with fix ups.
1427	 * The save user stack can (and often does) fault.
1428	 */
1429	if (unlikely(in_nmi()))
1430		return;
1431
1432	/*
1433	 * prevent recursion, since the user stack tracing may
1434	 * trigger other kernel events.
1435	 */
1436	preempt_disable();
1437	if (__this_cpu_read(user_stack_count))
1438		goto out;
1439
1440	__this_cpu_inc(user_stack_count);
1441
1442	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1443					  sizeof(*entry), flags, pc);
1444	if (!event)
1445		goto out_drop_count;
1446	entry	= ring_buffer_event_data(event);
1447
1448	entry->tgid		= current->tgid;
1449	memset(&entry->caller, 0, sizeof(entry->caller));
1450
1451	trace.nr_entries	= 0;
1452	trace.max_entries	= FTRACE_STACK_ENTRIES;
1453	trace.skip		= 0;
1454	trace.entries		= entry->caller;
1455
1456	save_stack_trace_user(&trace);
1457	if (!filter_check_discard(call, entry, buffer, event))
1458		ring_buffer_unlock_commit(buffer, event);
1459
1460 out_drop_count:
1461	__this_cpu_dec(user_stack_count);
1462 out:
1463	preempt_enable();
1464}
 
 
 
 
 
 
 
 
 
1465
1466#ifdef UNUSED
1467static void __trace_userstack(struct trace_array *tr, unsigned long flags)
 
1468{
1469	ftrace_trace_userstack(tr, flags, preempt_count());
 
1470}
1471#endif /* UNUSED */
1472
1473#endif /* CONFIG_STACKTRACE */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1474
1475/* created for use with alloc_percpu */
1476struct trace_buffer_struct {
1477	char buffer[TRACE_BUF_SIZE];
 
1478};
1479
1480static struct trace_buffer_struct *trace_percpu_buffer;
1481static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1482static struct trace_buffer_struct *trace_percpu_irq_buffer;
1483static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1484
1485/*
1486 * The buffer used is dependent on the context. There is a per cpu
1487 * buffer for normal context, softirq contex, hard irq context and
1488 * for NMI context. Thise allows for lockless recording.
1489 *
1490 * Note, if the buffers failed to be allocated, then this returns NULL
1491 */
1492static char *get_trace_buf(void)
1493{
1494	struct trace_buffer_struct *percpu_buffer;
1495	struct trace_buffer_struct *buffer;
1496
1497	/*
1498	 * If we have allocated per cpu buffers, then we do not
1499	 * need to do any locking.
1500	 */
1501	if (in_nmi())
1502		percpu_buffer = trace_percpu_nmi_buffer;
1503	else if (in_irq())
1504		percpu_buffer = trace_percpu_irq_buffer;
1505	else if (in_softirq())
1506		percpu_buffer = trace_percpu_sirq_buffer;
1507	else
1508		percpu_buffer = trace_percpu_buffer;
1509
1510	if (!percpu_buffer)
1511		return NULL;
1512
1513	buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
 
 
 
1514
1515	return buffer->buffer;
 
 
 
 
1516}
1517
1518static int alloc_percpu_trace_buffer(void)
1519{
1520	struct trace_buffer_struct *buffers;
1521	struct trace_buffer_struct *sirq_buffers;
1522	struct trace_buffer_struct *irq_buffers;
1523	struct trace_buffer_struct *nmi_buffers;
1524
1525	buffers = alloc_percpu(struct trace_buffer_struct);
1526	if (!buffers)
1527		goto err_warn;
1528
1529	sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1530	if (!sirq_buffers)
1531		goto err_sirq;
1532
1533	irq_buffers = alloc_percpu(struct trace_buffer_struct);
1534	if (!irq_buffers)
1535		goto err_irq;
1536
1537	nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1538	if (!nmi_buffers)
1539		goto err_nmi;
1540
1541	trace_percpu_buffer = buffers;
1542	trace_percpu_sirq_buffer = sirq_buffers;
1543	trace_percpu_irq_buffer = irq_buffers;
1544	trace_percpu_nmi_buffer = nmi_buffers;
1545
1546	return 0;
 
1547
1548 err_nmi:
1549	free_percpu(irq_buffers);
1550 err_irq:
1551	free_percpu(sirq_buffers);
1552 err_sirq:
1553	free_percpu(buffers);
1554 err_warn:
1555	WARN(1, "Could not allocate percpu trace_printk buffer");
1556	return -ENOMEM;
1557}
1558
1559void trace_printk_init_buffers(void)
1560{
1561	static int buffers_allocated;
1562
1563	if (buffers_allocated)
1564		return;
1565
1566	if (alloc_percpu_trace_buffer())
1567		return;
1568
1569	pr_info("ftrace: Allocated trace_printk buffers\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1570
1571	buffers_allocated = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1572}
1573
1574/**
1575 * trace_vbprintk - write binary msg to tracing buffer
1576 *
 
 
1577 */
1578int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1579{
1580	struct ftrace_event_call *call = &event_bprint;
1581	struct ring_buffer_event *event;
1582	struct ring_buffer *buffer;
1583	struct trace_array *tr = &global_trace;
1584	struct bprint_entry *entry;
1585	unsigned long flags;
1586	char *tbuffer;
1587	int len = 0, size, pc;
 
 
 
1588
1589	if (unlikely(tracing_selftest_running || tracing_disabled))
1590		return 0;
1591
1592	/* Don't pollute graph traces with trace_vprintk internals */
1593	pause_graph_tracing();
1594
1595	pc = preempt_count();
1596	preempt_disable_notrace();
1597
1598	tbuffer = get_trace_buf();
1599	if (!tbuffer) {
1600		len = 0;
1601		goto out;
1602	}
1603
1604	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1605
1606	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1607		goto out;
1608
1609	local_save_flags(flags);
1610	size = sizeof(*entry) + sizeof(u32) * len;
1611	buffer = tr->buffer;
1612	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1613					  flags, pc);
 
1614	if (!event)
1615		goto out;
1616	entry = ring_buffer_event_data(event);
1617	entry->ip			= ip;
1618	entry->fmt			= fmt;
1619
1620	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1621	if (!filter_check_discard(call, entry, buffer, event)) {
1622		ring_buffer_unlock_commit(buffer, event);
1623		ftrace_trace_stack(buffer, flags, 6, pc);
1624	}
1625
1626out:
 
 
 
 
 
1627	preempt_enable_notrace();
1628	unpause_graph_tracing();
1629
1630	return len;
1631}
1632EXPORT_SYMBOL_GPL(trace_vbprintk);
1633
1634int trace_array_printk(struct trace_array *tr,
1635		       unsigned long ip, const char *fmt, ...)
1636{
1637	int ret;
1638	va_list ap;
1639
1640	if (!(trace_flags & TRACE_ITER_PRINTK))
1641		return 0;
1642
1643	va_start(ap, fmt);
1644	ret = trace_array_vprintk(tr, ip, fmt, ap);
1645	va_end(ap);
1646	return ret;
1647}
1648
1649int trace_array_vprintk(struct trace_array *tr,
1650			unsigned long ip, const char *fmt, va_list args)
1651{
1652	struct ftrace_event_call *call = &event_print;
1653	struct ring_buffer_event *event;
1654	struct ring_buffer *buffer;
1655	int len = 0, size, pc;
1656	struct print_entry *entry;
1657	unsigned long flags;
1658	char *tbuffer;
1659
1660	if (tracing_disabled || tracing_selftest_running)
1661		return 0;
1662
1663	/* Don't pollute graph traces with trace_vprintk internals */
1664	pause_graph_tracing();
1665
1666	pc = preempt_count();
1667	preempt_disable_notrace();
1668
1669
1670	tbuffer = get_trace_buf();
1671	if (!tbuffer) {
1672		len = 0;
1673		goto out;
1674	}
1675
1676	len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1677	if (len > TRACE_BUF_SIZE)
1678		goto out;
1679
1680	local_save_flags(flags);
1681	size = sizeof(*entry) + len + 1;
1682	buffer = tr->buffer;
1683	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1684					  flags, pc);
1685	if (!event)
1686		goto out;
1687	entry = ring_buffer_event_data(event);
1688	entry->ip = ip;
1689
1690	memcpy(&entry->buf, tbuffer, len);
1691	entry->buf[len] = '\0';
1692	if (!filter_check_discard(call, entry, buffer, event)) {
1693		ring_buffer_unlock_commit(buffer, event);
1694		ftrace_trace_stack(buffer, flags, 6, pc);
1695	}
1696 out:
 
 
1697	preempt_enable_notrace();
1698	unpause_graph_tracing();
1699
1700	return len;
1701}
1702
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1703int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1704{
1705	return trace_array_vprintk(&global_trace, ip, fmt, args);
1706}
1707EXPORT_SYMBOL_GPL(trace_vprintk);
1708
1709static void trace_iterator_increment(struct trace_iterator *iter)
1710{
 
 
1711	iter->idx++;
1712	if (iter->buffer_iter[iter->cpu])
1713		ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1714}
1715
1716static struct trace_entry *
1717peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1718		unsigned long *lost_events)
1719{
1720	struct ring_buffer_event *event;
1721	struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1722
1723	if (buf_iter)
1724		event = ring_buffer_iter_peek(buf_iter, ts);
1725	else
1726		event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
 
 
 
1727					 lost_events);
 
1728
1729	if (event) {
1730		iter->ent_size = ring_buffer_event_length(event);
1731		return ring_buffer_event_data(event);
1732	}
1733	iter->ent_size = 0;
1734	return NULL;
1735}
1736
1737static struct trace_entry *
1738__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1739		  unsigned long *missing_events, u64 *ent_ts)
1740{
1741	struct ring_buffer *buffer = iter->tr->buffer;
1742	struct trace_entry *ent, *next = NULL;
1743	unsigned long lost_events = 0, next_lost = 0;
1744	int cpu_file = iter->cpu_file;
1745	u64 next_ts = 0, ts;
1746	int next_cpu = -1;
1747	int next_size = 0;
1748	int cpu;
1749
1750	/*
1751	 * If we are in a per_cpu trace file, don't bother by iterating over
1752	 * all cpu and peek directly.
1753	 */
1754	if (cpu_file > TRACE_PIPE_ALL_CPU) {
1755		if (ring_buffer_empty_cpu(buffer, cpu_file))
1756			return NULL;
1757		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1758		if (ent_cpu)
1759			*ent_cpu = cpu_file;
1760
1761		return ent;
1762	}
1763
1764	for_each_tracing_cpu(cpu) {
1765
1766		if (ring_buffer_empty_cpu(buffer, cpu))
1767			continue;
1768
1769		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1770
1771		/*
1772		 * Pick the entry with the smallest timestamp:
1773		 */
1774		if (ent && (!next || ts < next_ts)) {
1775			next = ent;
1776			next_cpu = cpu;
1777			next_ts = ts;
1778			next_lost = lost_events;
1779			next_size = iter->ent_size;
1780		}
1781	}
1782
1783	iter->ent_size = next_size;
1784
1785	if (ent_cpu)
1786		*ent_cpu = next_cpu;
1787
1788	if (ent_ts)
1789		*ent_ts = next_ts;
1790
1791	if (missing_events)
1792		*missing_events = next_lost;
1793
1794	return next;
1795}
1796
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1797/* Find the next real entry, without updating the iterator itself */
1798struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1799					  int *ent_cpu, u64 *ent_ts)
1800{
1801	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1802}
1803
1804/* Find the next real entry, and increment the iterator to the next entry */
1805void *trace_find_next_entry_inc(struct trace_iterator *iter)
1806{
1807	iter->ent = __find_next_entry(iter, &iter->cpu,
1808				      &iter->lost_events, &iter->ts);
1809
1810	if (iter->ent)
1811		trace_iterator_increment(iter);
1812
1813	return iter->ent ? iter : NULL;
1814}
1815
1816static void trace_consume(struct trace_iterator *iter)
1817{
1818	ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1819			    &iter->lost_events);
1820}
1821
1822static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1823{
1824	struct trace_iterator *iter = m->private;
1825	int i = (int)*pos;
1826	void *ent;
1827
1828	WARN_ON_ONCE(iter->leftover);
1829
1830	(*pos)++;
1831
1832	/* can't go backwards */
1833	if (iter->idx > i)
1834		return NULL;
1835
1836	if (iter->idx < 0)
1837		ent = trace_find_next_entry_inc(iter);
1838	else
1839		ent = iter;
1840
1841	while (ent && iter->idx < i)
1842		ent = trace_find_next_entry_inc(iter);
1843
1844	iter->pos = *pos;
1845
1846	return ent;
1847}
1848
1849void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1850{
1851	struct trace_array *tr = iter->tr;
1852	struct ring_buffer_event *event;
1853	struct ring_buffer_iter *buf_iter;
1854	unsigned long entries = 0;
1855	u64 ts;
1856
1857	tr->data[cpu]->skipped_entries = 0;
1858
1859	if (!iter->buffer_iter[cpu])
 
1860		return;
1861
1862	buf_iter = iter->buffer_iter[cpu];
1863	ring_buffer_iter_reset(buf_iter);
1864
1865	/*
1866	 * We could have the case with the max latency tracers
1867	 * that a reset never took place on a cpu. This is evident
1868	 * by the timestamp being before the start of the buffer.
1869	 */
1870	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1871		if (ts >= iter->tr->time_start)
1872			break;
1873		entries++;
1874		ring_buffer_read(buf_iter, NULL);
 
 
1875	}
1876
1877	tr->data[cpu]->skipped_entries = entries;
1878}
1879
1880/*
1881 * The current tracer is copied to avoid a global locking
1882 * all around.
1883 */
1884static void *s_start(struct seq_file *m, loff_t *pos)
1885{
1886	struct trace_iterator *iter = m->private;
1887	static struct tracer *old_tracer;
1888	int cpu_file = iter->cpu_file;
1889	void *p = NULL;
1890	loff_t l = 0;
1891	int cpu;
1892
1893	/* copy the tracer to avoid using a global lock all around */
1894	mutex_lock(&trace_types_lock);
1895	if (unlikely(old_tracer != current_trace && current_trace)) {
1896		old_tracer = current_trace;
1897		*iter->trace = *current_trace;
 
 
 
 
 
1898	}
1899	mutex_unlock(&trace_types_lock);
1900
1901	atomic_inc(&trace_record_cmdline_disabled);
 
 
 
1902
1903	if (*pos != iter->pos) {
1904		iter->ent = NULL;
1905		iter->cpu = 0;
1906		iter->idx = -1;
1907
1908		if (cpu_file == TRACE_PIPE_ALL_CPU) {
1909			for_each_tracing_cpu(cpu)
1910				tracing_iter_reset(iter, cpu);
1911		} else
1912			tracing_iter_reset(iter, cpu_file);
1913
1914		iter->leftover = 0;
1915		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1916			;
1917
1918	} else {
1919		/*
1920		 * If we overflowed the seq_file before, then we want
1921		 * to just reuse the trace_seq buffer again.
1922		 */
1923		if (iter->leftover)
1924			p = iter;
1925		else {
1926			l = *pos - 1;
1927			p = s_next(m, p, &l);
1928		}
1929	}
1930
1931	trace_event_read_lock();
1932	trace_access_lock(cpu_file);
1933	return p;
1934}
1935
1936static void s_stop(struct seq_file *m, void *p)
1937{
1938	struct trace_iterator *iter = m->private;
1939
1940	atomic_dec(&trace_record_cmdline_disabled);
 
 
 
 
1941	trace_access_unlock(iter->cpu_file);
1942	trace_event_read_unlock();
1943}
1944
1945static void
1946get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
 
1947{
1948	unsigned long count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1949	int cpu;
1950
1951	*total = 0;
1952	*entries = 0;
1953
1954	for_each_tracing_cpu(cpu) {
1955		count = ring_buffer_entries_cpu(tr->buffer, cpu);
1956		/*
1957		 * If this buffer has skipped entries, then we hold all
1958		 * entries for the trace and we need to ignore the
1959		 * ones before the time stamp.
1960		 */
1961		if (tr->data[cpu]->skipped_entries) {
1962			count -= tr->data[cpu]->skipped_entries;
1963			/* total is the same as the entries */
1964			*total += count;
1965		} else
1966			*total += count +
1967				ring_buffer_overrun_cpu(tr->buffer, cpu);
1968		*entries += count;
1969	}
1970}
1971
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1972static void print_lat_help_header(struct seq_file *m)
1973{
1974	seq_puts(m, "#                  _------=> CPU#            \n");
1975	seq_puts(m, "#                 / _-----=> irqs-off        \n");
1976	seq_puts(m, "#                | / _----=> need-resched    \n");
1977	seq_puts(m, "#                || / _---=> hardirq/softirq \n");
1978	seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
1979	seq_puts(m, "#                |||| /     delay             \n");
1980	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
1981	seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
 
1982}
1983
1984static void print_event_info(struct trace_array *tr, struct seq_file *m)
1985{
1986	unsigned long total;
1987	unsigned long entries;
1988
1989	get_total_entries(tr, &total, &entries);
1990	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
1991		   entries, total, num_online_cpus());
1992	seq_puts(m, "#\n");
1993}
1994
1995static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
 
 
 
 
 
 
 
 
 
 
 
 
1996{
1997	print_event_info(tr, m);
1998	seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
1999	seq_puts(m, "#              | |       |          |         |\n");
2000}
2001
2002static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
2003{
2004	print_event_info(tr, m);
2005	seq_puts(m, "#                              _-----=> irqs-off\n");
2006	seq_puts(m, "#                             / _----=> need-resched\n");
2007	seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2008	seq_puts(m, "#                            || / _--=> preempt-depth\n");
2009	seq_puts(m, "#                            ||| /     delay\n");
2010	seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2011	seq_puts(m, "#              | |       |   ||||       |         |\n");
2012}
2013
2014void
2015print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2016{
2017	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2018	struct trace_array *tr = iter->tr;
2019	struct trace_array_cpu *data = tr->data[tr->cpu];
2020	struct tracer *type = current_trace;
2021	unsigned long entries;
2022	unsigned long total;
2023	const char *name = "preemption";
2024
2025	if (type)
2026		name = type->name;
2027
2028	get_total_entries(tr, &total, &entries);
2029
2030	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2031		   name, UTS_RELEASE);
2032	seq_puts(m, "# -----------------------------------"
2033		 "---------------------------------\n");
2034	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2035		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2036		   nsecs_to_usecs(data->saved_latency),
2037		   entries,
2038		   total,
2039		   tr->cpu,
2040#if defined(CONFIG_PREEMPT_NONE)
2041		   "server",
2042#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2043		   "desktop",
2044#elif defined(CONFIG_PREEMPT)
2045		   "preempt",
2046#else
2047		   "unknown",
2048#endif
2049		   /* These are reserved for later use */
2050		   0, 0, 0, 0);
2051#ifdef CONFIG_SMP
2052	seq_printf(m, " #P:%d)\n", num_online_cpus());
2053#else
2054	seq_puts(m, ")\n");
2055#endif
2056	seq_puts(m, "#    -----------------\n");
2057	seq_printf(m, "#    | task: %.16s-%d "
2058		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2059		   data->comm, data->pid, data->uid, data->nice,
 
2060		   data->policy, data->rt_priority);
2061	seq_puts(m, "#    -----------------\n");
2062
2063	if (data->critical_start) {
2064		seq_puts(m, "#  => started at: ");
2065		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2066		trace_print_seq(m, &iter->seq);
2067		seq_puts(m, "\n#  => ended at:   ");
2068		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2069		trace_print_seq(m, &iter->seq);
2070		seq_puts(m, "\n#\n");
2071	}
2072
2073	seq_puts(m, "#\n");
2074}
2075
2076static void test_cpu_buff_start(struct trace_iterator *iter)
2077{
2078	struct trace_seq *s = &iter->seq;
 
2079
2080	if (!(trace_flags & TRACE_ITER_ANNOTATE))
2081		return;
2082
2083	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2084		return;
2085
2086	if (cpumask_test_cpu(iter->cpu, iter->started))
 
2087		return;
2088
2089	if (iter->tr->data[iter->cpu]->skipped_entries)
2090		return;
2091
2092	cpumask_set_cpu(iter->cpu, iter->started);
 
2093
2094	/* Don't print started cpu buffer for the first entry of the trace */
2095	if (iter->idx > 1)
2096		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2097				iter->cpu);
2098}
2099
2100static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2101{
 
2102	struct trace_seq *s = &iter->seq;
2103	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2104	struct trace_entry *entry;
2105	struct trace_event *event;
2106
2107	entry = iter->ent;
2108
2109	test_cpu_buff_start(iter);
2110
2111	event = ftrace_find_event(entry->type);
2112
2113	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2114		if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2115			if (!trace_print_lat_context(iter))
2116				goto partial;
2117		} else {
2118			if (!trace_print_context(iter))
2119				goto partial;
2120		}
2121	}
2122
2123	if (event)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2124		return event->funcs->trace(iter, sym_flags, event);
 
2125
2126	if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2127		goto partial;
2128
2129	return TRACE_TYPE_HANDLED;
2130partial:
2131	return TRACE_TYPE_PARTIAL_LINE;
2132}
2133
2134static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2135{
 
2136	struct trace_seq *s = &iter->seq;
2137	struct trace_entry *entry;
2138	struct trace_event *event;
2139
2140	entry = iter->ent;
2141
2142	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2143		if (!trace_seq_printf(s, "%d %d %llu ",
2144				      entry->pid, iter->cpu, iter->ts))
2145			goto partial;
2146	}
 
2147
2148	event = ftrace_find_event(entry->type);
2149	if (event)
2150		return event->funcs->raw(iter, 0, event);
2151
2152	if (!trace_seq_printf(s, "%d ?\n", entry->type))
2153		goto partial;
2154
2155	return TRACE_TYPE_HANDLED;
2156partial:
2157	return TRACE_TYPE_PARTIAL_LINE;
2158}
2159
2160static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2161{
 
2162	struct trace_seq *s = &iter->seq;
2163	unsigned char newline = '\n';
2164	struct trace_entry *entry;
2165	struct trace_event *event;
2166
2167	entry = iter->ent;
2168
2169	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2170		SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2171		SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2172		SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
 
 
2173	}
2174
2175	event = ftrace_find_event(entry->type);
2176	if (event) {
2177		enum print_line_t ret = event->funcs->hex(iter, 0, event);
2178		if (ret != TRACE_TYPE_HANDLED)
2179			return ret;
2180	}
2181
2182	SEQ_PUT_FIELD_RET(s, newline);
2183
2184	return TRACE_TYPE_HANDLED;
2185}
2186
2187static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2188{
 
2189	struct trace_seq *s = &iter->seq;
2190	struct trace_entry *entry;
2191	struct trace_event *event;
2192
2193	entry = iter->ent;
2194
2195	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2196		SEQ_PUT_FIELD_RET(s, entry->pid);
2197		SEQ_PUT_FIELD_RET(s, iter->cpu);
2198		SEQ_PUT_FIELD_RET(s, iter->ts);
 
 
2199	}
2200
2201	event = ftrace_find_event(entry->type);
2202	return event ? event->funcs->binary(iter, 0, event) :
2203		TRACE_TYPE_HANDLED;
2204}
2205
2206int trace_empty(struct trace_iterator *iter)
2207{
 
2208	int cpu;
2209
2210	/* If we are looking at one CPU buffer, only check that one */
2211	if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
2212		cpu = iter->cpu_file;
2213		if (iter->buffer_iter[cpu]) {
2214			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
 
2215				return 0;
2216		} else {
2217			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2218				return 0;
2219		}
2220		return 1;
2221	}
2222
2223	for_each_tracing_cpu(cpu) {
2224		if (iter->buffer_iter[cpu]) {
2225			if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
 
2226				return 0;
2227		} else {
2228			if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2229				return 0;
2230		}
2231	}
2232
2233	return 1;
2234}
2235
2236/*  Called with trace_event_read_lock() held. */
2237enum print_line_t print_trace_line(struct trace_iterator *iter)
2238{
 
 
2239	enum print_line_t ret;
2240
2241	if (iter->lost_events &&
2242	    !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2243				 iter->cpu, iter->lost_events))
2244		return TRACE_TYPE_PARTIAL_LINE;
 
 
 
 
 
 
2245
2246	if (iter->trace && iter->trace->print_line) {
2247		ret = iter->trace->print_line(iter);
2248		if (ret != TRACE_TYPE_UNHANDLED)
2249			return ret;
2250	}
2251
 
 
 
 
 
2252	if (iter->ent->type == TRACE_BPRINT &&
2253			trace_flags & TRACE_ITER_PRINTK &&
2254			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2255		return trace_print_bprintk_msg_only(iter);
2256
2257	if (iter->ent->type == TRACE_PRINT &&
2258			trace_flags & TRACE_ITER_PRINTK &&
2259			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2260		return trace_print_printk_msg_only(iter);
2261
2262	if (trace_flags & TRACE_ITER_BIN)
2263		return print_bin_fmt(iter);
2264
2265	if (trace_flags & TRACE_ITER_HEX)
2266		return print_hex_fmt(iter);
2267
2268	if (trace_flags & TRACE_ITER_RAW)
2269		return print_raw_fmt(iter);
2270
2271	return print_trace_fmt(iter);
2272}
2273
2274void trace_latency_header(struct seq_file *m)
2275{
2276	struct trace_iterator *iter = m->private;
 
2277
2278	/* print nothing if the buffers are empty */
2279	if (trace_empty(iter))
2280		return;
2281
2282	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2283		print_trace_header(m, iter);
2284
2285	if (!(trace_flags & TRACE_ITER_VERBOSE))
2286		print_lat_help_header(m);
2287}
2288
2289void trace_default_header(struct seq_file *m)
2290{
2291	struct trace_iterator *iter = m->private;
 
 
2292
2293	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2294		return;
2295
2296	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2297		/* print nothing if the buffers are empty */
2298		if (trace_empty(iter))
2299			return;
2300		print_trace_header(m, iter);
2301		if (!(trace_flags & TRACE_ITER_VERBOSE))
2302			print_lat_help_header(m);
2303	} else {
2304		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2305			if (trace_flags & TRACE_ITER_IRQ_INFO)
2306				print_func_help_header_irq(iter->tr, m);
 
2307			else
2308				print_func_help_header(iter->tr, m);
 
2309		}
2310	}
2311}
2312
2313static void test_ftrace_alive(struct seq_file *m)
2314{
2315	if (!ftrace_is_dead())
2316		return;
2317	seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2318	seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2319}
 
 
 
 
2320
2321static int s_show(struct seq_file *m, void *v)
2322{
2323	struct trace_iterator *iter = v;
2324	int ret;
2325
2326	if (iter->ent == NULL) {
2327		if (iter->tr) {
2328			seq_printf(m, "# tracer: %s\n", iter->trace->name);
2329			seq_puts(m, "#\n");
2330			test_ftrace_alive(m);
2331		}
2332		if (iter->trace && iter->trace->print_header)
 
 
2333			iter->trace->print_header(m);
2334		else
2335			trace_default_header(m);
2336
2337	} else if (iter->leftover) {
2338		/*
2339		 * If we filled the seq_file buffer earlier, we
2340		 * want to just show it now.
2341		 */
2342		ret = trace_print_seq(m, &iter->seq);
2343
2344		/* ret should this time be zero, but you never know */
2345		iter->leftover = ret;
2346
2347	} else {
2348		print_trace_line(iter);
 
 
 
 
2349		ret = trace_print_seq(m, &iter->seq);
2350		/*
2351		 * If we overflow the seq_file buffer, then it will
2352		 * ask us for this data again at start up.
2353		 * Use that instead.
2354		 *  ret is 0 if seq_file write succeeded.
2355		 *        -1 otherwise.
2356		 */
2357		iter->leftover = ret;
2358	}
2359
2360	return 0;
2361}
2362
 
 
 
 
 
 
 
 
 
 
 
2363static const struct seq_operations tracer_seq_ops = {
2364	.start		= s_start,
2365	.next		= s_next,
2366	.stop		= s_stop,
2367	.show		= s_show,
2368};
2369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2370static struct trace_iterator *
2371__tracing_open(struct inode *inode, struct file *file)
2372{
2373	long cpu_file = (long) inode->i_private;
2374	struct trace_iterator *iter;
2375	int cpu;
2376
2377	if (tracing_disabled)
2378		return ERR_PTR(-ENODEV);
2379
2380	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2381	if (!iter)
2382		return ERR_PTR(-ENOMEM);
2383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2384	/*
2385	 * We make a copy of the current tracer to avoid concurrent
2386	 * changes on it while we are reading.
 
 
 
2387	 */
 
 
 
2388	mutex_lock(&trace_types_lock);
2389	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2390	if (!iter->trace)
2391		goto fail;
2392
2393	if (current_trace)
2394		*iter->trace = *current_trace;
2395
2396	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2397		goto fail;
2398
2399	if (current_trace && current_trace->print_max)
2400		iter->tr = &max_tr;
 
 
 
 
2401	else
2402		iter->tr = &global_trace;
 
 
2403	iter->pos = -1;
 
2404	mutex_init(&iter->mutex);
2405	iter->cpu_file = cpu_file;
2406
2407	/* Notify the tracer early; before we stop tracing. */
2408	if (iter->trace && iter->trace->open)
2409		iter->trace->open(iter);
2410
2411	/* Annotate start of buffers if we had overruns */
2412	if (ring_buffer_overruns(iter->tr->buffer))
2413		iter->iter_flags |= TRACE_FILE_ANNOTATE;
2414
2415	/* stop the trace while dumping */
2416	tracing_stop();
 
2417
2418	if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
 
 
 
 
 
 
 
2419		for_each_tracing_cpu(cpu) {
2420			iter->buffer_iter[cpu] =
2421				ring_buffer_read_prepare(iter->tr->buffer, cpu);
 
2422		}
2423		ring_buffer_read_prepare_sync();
2424		for_each_tracing_cpu(cpu) {
2425			ring_buffer_read_start(iter->buffer_iter[cpu]);
2426			tracing_iter_reset(iter, cpu);
2427		}
2428	} else {
2429		cpu = iter->cpu_file;
2430		iter->buffer_iter[cpu] =
2431			ring_buffer_read_prepare(iter->tr->buffer, cpu);
 
2432		ring_buffer_read_prepare_sync();
2433		ring_buffer_read_start(iter->buffer_iter[cpu]);
2434		tracing_iter_reset(iter, cpu);
2435	}
2436
2437	mutex_unlock(&trace_types_lock);
2438
2439	return iter;
2440
2441 fail:
2442	mutex_unlock(&trace_types_lock);
2443	kfree(iter->trace);
 
2444	seq_release_private(inode, file);
2445	return ERR_PTR(-ENOMEM);
2446}
2447
2448int tracing_open_generic(struct inode *inode, struct file *filp)
2449{
2450	if (tracing_disabled)
2451		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2452
2453	filp->private_data = inode->i_private;
 
 
 
 
 
 
 
 
 
 
 
2454	return 0;
2455}
2456
 
 
 
 
 
 
 
 
 
 
 
 
2457static int tracing_release(struct inode *inode, struct file *file)
2458{
 
2459	struct seq_file *m = file->private_data;
2460	struct trace_iterator *iter;
2461	int cpu;
2462
2463	if (!(file->f_mode & FMODE_READ))
 
2464		return 0;
 
2465
 
2466	iter = m->private;
 
2467
2468	mutex_lock(&trace_types_lock);
2469	for_each_tracing_cpu(cpu) {
2470		if (iter->buffer_iter[cpu])
2471			ring_buffer_read_finish(iter->buffer_iter[cpu]);
2472	}
2473
2474	if (iter->trace && iter->trace->close)
2475		iter->trace->close(iter);
2476
2477	/* reenable tracing if it was previously enabled */
2478	tracing_start();
 
 
 
 
2479	mutex_unlock(&trace_types_lock);
2480
2481	mutex_destroy(&iter->mutex);
2482	free_cpumask_var(iter->started);
2483	kfree(iter->trace);
2484	seq_release_private(inode, file);
 
 
 
 
 
 
 
 
 
2485	return 0;
2486}
2487
 
 
 
 
 
 
 
 
 
2488static int tracing_open(struct inode *inode, struct file *file)
2489{
 
2490	struct trace_iterator *iter;
2491	int ret = 0;
 
 
 
 
2492
2493	/* If this file was open for write, then erase contents */
2494	if ((file->f_mode & FMODE_WRITE) &&
2495	    (file->f_flags & O_TRUNC)) {
2496		long cpu = (long) inode->i_private;
 
 
 
 
 
2497
2498		if (cpu == TRACE_PIPE_ALL_CPU)
2499			tracing_reset_online_cpus(&global_trace);
2500		else
2501			tracing_reset(&global_trace, cpu);
2502	}
2503
2504	if (file->f_mode & FMODE_READ) {
2505		iter = __tracing_open(inode, file);
2506		if (IS_ERR(iter))
2507			ret = PTR_ERR(iter);
2508		else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2509			iter->iter_flags |= TRACE_FILE_LAT_FMT;
2510	}
 
 
 
 
2511	return ret;
2512}
2513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2514static void *
2515t_next(struct seq_file *m, void *v, loff_t *pos)
2516{
 
2517	struct tracer *t = v;
2518
2519	(*pos)++;
2520
2521	if (t)
2522		t = t->next;
2523
2524	return t;
2525}
2526
2527static void *t_start(struct seq_file *m, loff_t *pos)
2528{
 
2529	struct tracer *t;
2530	loff_t l = 0;
2531
2532	mutex_lock(&trace_types_lock);
2533	for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2534		;
 
 
2535
2536	return t;
2537}
2538
2539static void t_stop(struct seq_file *m, void *p)
2540{
2541	mutex_unlock(&trace_types_lock);
2542}
2543
2544static int t_show(struct seq_file *m, void *v)
2545{
2546	struct tracer *t = v;
2547
2548	if (!t)
2549		return 0;
2550
2551	seq_printf(m, "%s", t->name);
2552	if (t->next)
2553		seq_putc(m, ' ');
2554	else
2555		seq_putc(m, '\n');
2556
2557	return 0;
2558}
2559
2560static const struct seq_operations show_traces_seq_ops = {
2561	.start		= t_start,
2562	.next		= t_next,
2563	.stop		= t_stop,
2564	.show		= t_show,
2565};
2566
2567static int show_traces_open(struct inode *inode, struct file *file)
2568{
2569	if (tracing_disabled)
2570		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2571
2572	return seq_open(file, &show_traces_seq_ops);
 
2573}
2574
2575static ssize_t
2576tracing_write_stub(struct file *filp, const char __user *ubuf,
2577		   size_t count, loff_t *ppos)
2578{
2579	return count;
2580}
2581
2582static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2583{
 
 
2584	if (file->f_mode & FMODE_READ)
2585		return seq_lseek(file, offset, origin);
2586	else
2587		return 0;
 
 
2588}
2589
2590static const struct file_operations tracing_fops = {
2591	.open		= tracing_open,
2592	.read		= seq_read,
 
 
2593	.write		= tracing_write_stub,
2594	.llseek		= tracing_seek,
2595	.release	= tracing_release,
2596};
2597
2598static const struct file_operations show_traces_fops = {
2599	.open		= show_traces_open,
2600	.read		= seq_read,
2601	.release	= seq_release,
2602	.llseek		= seq_lseek,
 
2603};
2604
2605/*
2606 * Only trace on a CPU if the bitmask is set:
2607 */
2608static cpumask_var_t tracing_cpumask;
2609
2610/*
2611 * The tracer itself will not take this lock, but still we want
2612 * to provide a consistent cpumask to user-space:
2613 */
2614static DEFINE_MUTEX(tracing_cpumask_update_lock);
2615
2616/*
2617 * Temporary storage for the character representation of the
2618 * CPU bitmask (and one more byte for the newline):
2619 */
2620static char mask_str[NR_CPUS + 1];
2621
2622static ssize_t
2623tracing_cpumask_read(struct file *filp, char __user *ubuf,
2624		     size_t count, loff_t *ppos)
2625{
 
 
2626	int len;
2627
2628	mutex_lock(&tracing_cpumask_update_lock);
 
 
 
 
2629
2630	len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2631	if (count - len < 2) {
 
2632		count = -EINVAL;
2633		goto out_err;
2634	}
2635	len += sprintf(mask_str + len, "\n");
2636	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2637
2638out_err:
2639	mutex_unlock(&tracing_cpumask_update_lock);
2640
2641	return count;
2642}
2643
2644static ssize_t
2645tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2646		      size_t count, loff_t *ppos)
2647{
2648	int err, cpu;
2649	cpumask_var_t tracing_cpumask_new;
2650
2651	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2652		return -ENOMEM;
2653
2654	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2655	if (err)
2656		goto err_unlock;
2657
2658	mutex_lock(&tracing_cpumask_update_lock);
2659
2660	local_irq_disable();
2661	arch_spin_lock(&ftrace_max_lock);
2662	for_each_tracing_cpu(cpu) {
2663		/*
2664		 * Increase/decrease the disabled counter if we are
2665		 * about to flip a bit in the cpumask:
2666		 */
2667		if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2668				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2669			atomic_inc(&global_trace.data[cpu]->disabled);
2670			ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
 
 
 
2671		}
2672		if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2673				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2674			atomic_dec(&global_trace.data[cpu]->disabled);
2675			ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
 
 
 
2676		}
2677	}
2678	arch_spin_unlock(&ftrace_max_lock);
2679	local_irq_enable();
2680
2681	cpumask_copy(tracing_cpumask, tracing_cpumask_new);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2682
2683	mutex_unlock(&tracing_cpumask_update_lock);
2684	free_cpumask_var(tracing_cpumask_new);
2685
2686	return count;
2687
2688err_unlock:
2689	free_cpumask_var(tracing_cpumask_new);
2690
2691	return err;
2692}
2693
2694static const struct file_operations tracing_cpumask_fops = {
2695	.open		= tracing_open_generic,
2696	.read		= tracing_cpumask_read,
2697	.write		= tracing_cpumask_write,
 
2698	.llseek		= generic_file_llseek,
2699};
2700
2701static int tracing_trace_options_show(struct seq_file *m, void *v)
2702{
2703	struct tracer_opt *trace_opts;
 
2704	u32 tracer_flags;
2705	int i;
2706
2707	mutex_lock(&trace_types_lock);
2708	tracer_flags = current_trace->flags->val;
2709	trace_opts = current_trace->flags->opts;
 
2710
2711	for (i = 0; trace_options[i]; i++) {
2712		if (trace_flags & (1 << i))
2713			seq_printf(m, "%s\n", trace_options[i]);
2714		else
2715			seq_printf(m, "no%s\n", trace_options[i]);
2716	}
2717
2718	for (i = 0; trace_opts[i].name; i++) {
2719		if (tracer_flags & trace_opts[i].bit)
2720			seq_printf(m, "%s\n", trace_opts[i].name);
2721		else
2722			seq_printf(m, "no%s\n", trace_opts[i].name);
2723	}
2724	mutex_unlock(&trace_types_lock);
2725
2726	return 0;
2727}
2728
2729static int __set_tracer_option(struct tracer *trace,
2730			       struct tracer_flags *tracer_flags,
2731			       struct tracer_opt *opts, int neg)
2732{
 
2733	int ret;
2734
2735	ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2736	if (ret)
2737		return ret;
2738
2739	if (neg)
2740		tracer_flags->val &= ~opts->bit;
2741	else
2742		tracer_flags->val |= opts->bit;
2743	return 0;
2744}
2745
2746/* Try to assign a tracer specific option */
2747static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2748{
 
2749	struct tracer_flags *tracer_flags = trace->flags;
2750	struct tracer_opt *opts = NULL;
2751	int i;
2752
2753	for (i = 0; tracer_flags->opts[i].name; i++) {
2754		opts = &tracer_flags->opts[i];
2755
2756		if (strcmp(cmp, opts->name) == 0)
2757			return __set_tracer_option(trace, trace->flags,
2758						   opts, neg);
2759	}
2760
2761	return -EINVAL;
2762}
2763
2764static void set_tracer_flags(unsigned int mask, int enabled)
 
 
 
 
 
 
 
 
 
2765{
 
 
 
 
 
2766	/* do nothing if flag is already set */
2767	if (!!(trace_flags & mask) == !!enabled)
2768		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2769
2770	if (enabled)
2771		trace_flags |= mask;
2772	else
2773		trace_flags &= ~mask;
2774
2775	if (mask == TRACE_ITER_RECORD_CMD)
2776		trace_event_enable_cmd_record(enabled);
2777
2778	if (mask == TRACE_ITER_OVERWRITE)
2779		ring_buffer_change_overwrite(global_trace.buffer, enabled);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2780}
2781
2782static ssize_t
2783tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2784			size_t cnt, loff_t *ppos)
2785{
 
 
2786	char buf[64];
2787	char *cmp;
2788	int neg = 0;
2789	int ret;
2790	int i;
2791
2792	if (cnt >= sizeof(buf))
2793		return -EINVAL;
2794
2795	if (copy_from_user(&buf, ubuf, cnt))
2796		return -EFAULT;
2797
2798	buf[cnt] = 0;
2799	cmp = strstrip(buf);
2800
2801	if (strncmp(cmp, "no", 2) == 0) {
2802		neg = 1;
2803		cmp += 2;
2804	}
2805
2806	for (i = 0; trace_options[i]; i++) {
2807		if (strcmp(cmp, trace_options[i]) == 0) {
2808			set_tracer_flags(1 << i, !neg);
2809			break;
2810		}
2811	}
2812
2813	/* If no option could be set, test the specific tracer options */
2814	if (!trace_options[i]) {
2815		mutex_lock(&trace_types_lock);
2816		ret = set_tracer_option(current_trace, cmp, neg);
2817		mutex_unlock(&trace_types_lock);
2818		if (ret)
2819			return ret;
2820	}
2821
2822	*ppos += cnt;
2823
2824	return cnt;
2825}
2826
2827static int tracing_trace_options_open(struct inode *inode, struct file *file)
2828{
2829	if (tracing_disabled)
2830		return -ENODEV;
2831	return single_open(file, tracing_trace_options_show, NULL);
 
 
 
 
 
 
 
 
 
2832}
2833
2834static const struct file_operations tracing_iter_fops = {
2835	.open		= tracing_trace_options_open,
2836	.read		= seq_read,
2837	.llseek		= seq_lseek,
2838	.release	= single_release,
2839	.write		= tracing_trace_options_write,
2840};
2841
2842static const char readme_msg[] =
2843	"tracing mini-HOWTO:\n\n"
2844	"# mount -t debugfs nodev /sys/kernel/debug\n\n"
2845	"# cat /sys/kernel/debug/tracing/available_tracers\n"
2846	"wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
2847	"# cat /sys/kernel/debug/tracing/current_tracer\n"
2848	"nop\n"
2849	"# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
2850	"# cat /sys/kernel/debug/tracing/current_tracer\n"
2851	"wakeup\n"
2852	"# cat /sys/kernel/debug/tracing/trace_options\n"
2853	"noprint-parent nosym-offset nosym-addr noverbose\n"
2854	"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2855	"# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
2856	"# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2857	"# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2858;
2859
2860static ssize_t
2861tracing_readme_read(struct file *filp, char __user *ubuf,
2862		       size_t cnt, loff_t *ppos)
2863{
2864	return simple_read_from_buffer(ubuf, cnt, ppos,
2865					readme_msg, strlen(readme_msg));
2866}
2867
2868static const struct file_operations tracing_readme_fops = {
2869	.open		= tracing_open_generic,
2870	.read		= tracing_readme_read,
2871	.llseek		= generic_file_llseek,
2872};
2873
2874static ssize_t
2875tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2876				size_t cnt, loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 
 
 
2877{
2878	char *buf_comm;
2879	char *file_buf;
2880	char *buf;
2881	int len = 0;
2882	int pid;
2883	int i;
 
 
 
 
2884
2885	file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2886	if (!file_buf)
2887		return -ENOMEM;
2888
2889	buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2890	if (!buf_comm) {
2891		kfree(file_buf);
2892		return -ENOMEM;
2893	}
2894
2895	buf = file_buf;
 
 
 
2896
2897	for (i = 0; i < SAVED_CMDLINES; i++) {
2898		int r;
2899
2900		pid = map_cmdline_to_pid[i];
2901		if (pid == -1 || pid == NO_CMDLINE_MAP)
2902			continue;
2903
2904		trace_find_cmdline(pid, buf_comm);
2905		r = sprintf(buf, "%d %s\n", pid, buf_comm);
2906		buf += r;
2907		len += r;
2908	}
2909
2910	len = simple_read_from_buffer(ubuf, cnt, ppos,
2911				      file_buf, len);
 
 
 
 
 
 
 
 
 
2912
2913	kfree(file_buf);
2914	kfree(buf_comm);
 
2915
2916	return len;
2917}
2918
2919static const struct file_operations tracing_saved_cmdlines_fops = {
2920    .open       = tracing_open_generic,
2921    .read       = tracing_saved_cmdlines_read,
2922    .llseek	= generic_file_llseek,
 
2923};
2924
2925static ssize_t
2926tracing_ctrl_read(struct file *filp, char __user *ubuf,
2927		  size_t cnt, loff_t *ppos)
2928{
2929	char buf[64];
2930	int r;
 
 
 
2931
2932	r = sprintf(buf, "%u\n", tracer_enabled);
2933	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2934}
2935
2936static ssize_t
2937tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2938		   size_t cnt, loff_t *ppos)
 
 
 
 
 
 
2939{
2940	struct trace_array *tr = filp->private_data;
2941	unsigned long val;
2942	int ret;
2943
2944	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2945	if (ret)
2946		return ret;
 
 
 
 
 
2947
2948	val = !!val;
2949
2950	mutex_lock(&trace_types_lock);
2951	if (tracer_enabled ^ val) {
 
 
 
 
 
 
 
 
 
 
2952
2953		/* Only need to warn if this is used to change the state */
2954		WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
 
 
 
 
 
 
 
2955
2956		if (val) {
2957			tracer_enabled = 1;
2958			if (current_trace->start)
2959				current_trace->start(tr);
2960			tracing_start();
2961		} else {
2962			tracer_enabled = 0;
2963			tracing_stop();
2964			if (current_trace->stop)
2965				current_trace->stop(tr);
2966		}
 
 
 
 
 
 
 
 
 
2967	}
2968	mutex_unlock(&trace_types_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2969
2970	*ppos += cnt;
2971
2972	return cnt;
2973}
2974
2975static ssize_t
2976tracing_set_trace_read(struct file *filp, char __user *ubuf,
2977		       size_t cnt, loff_t *ppos)
2978{
 
2979	char buf[MAX_TRACER_SIZE+2];
2980	int r;
2981
2982	mutex_lock(&trace_types_lock);
2983	if (current_trace)
2984		r = sprintf(buf, "%s\n", current_trace->name);
2985	else
2986		r = sprintf(buf, "\n");
2987	mutex_unlock(&trace_types_lock);
2988
2989	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2990}
2991
2992int tracer_init(struct tracer *t, struct trace_array *tr)
2993{
2994	tracing_reset_online_cpus(tr);
2995	return t->init(tr);
2996}
2997
2998static void set_buffer_entries(struct trace_array *tr, unsigned long val)
2999{
3000	int cpu;
 
3001	for_each_tracing_cpu(cpu)
3002		tr->data[cpu]->entries = val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3003}
 
3004
3005static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
 
3006{
3007	int ret;
3008
3009	/*
3010	 * If kernel or user changes the size of the ring buffer
3011	 * we use the size that was given, and we can forget about
3012	 * expanding it later.
3013	 */
3014	ring_buffer_expanded = 1;
 
 
 
 
 
 
 
3015
3016	ret = ring_buffer_resize(global_trace.buffer, size, cpu);
3017	if (ret < 0)
3018		return ret;
3019
3020	if (!current_trace->use_max_tr)
 
3021		goto out;
3022
3023	ret = ring_buffer_resize(max_tr.buffer, size, cpu);
3024	if (ret < 0) {
3025		int r = 0;
3026
3027		if (cpu == RING_BUFFER_ALL_CPUS) {
3028			int i;
3029			for_each_tracing_cpu(i) {
3030				r = ring_buffer_resize(global_trace.buffer,
3031						global_trace.data[i]->entries,
3032						i);
3033				if (r < 0)
3034					break;
3035			}
3036		} else {
3037			r = ring_buffer_resize(global_trace.buffer,
3038						global_trace.data[cpu]->entries,
3039						cpu);
3040		}
3041
3042		if (r < 0) {
3043			/*
3044			 * AARGH! We are left with different
3045			 * size max buffer!!!!
3046			 * The max buffer is our "snapshot" buffer.
3047			 * When a tracer needs a snapshot (one of the
3048			 * latency tracers), it swaps the max buffer
3049			 * with the saved snap shot. We succeeded to
3050			 * update the size of the main buffer, but failed to
3051			 * update the size of the max buffer. But when we tried
3052			 * to reset the main buffer to the original size, we
3053			 * failed there too. This is very unlikely to
3054			 * happen, but if it does, warn and kill all
3055			 * tracing.
3056			 */
3057			WARN_ON(1);
3058			tracing_disabled = 1;
3059		}
3060		return ret;
3061	}
3062
3063	if (cpu == RING_BUFFER_ALL_CPUS)
3064		set_buffer_entries(&max_tr, size);
3065	else
3066		max_tr.data[cpu]->entries = size;
3067
3068 out:
3069	if (cpu == RING_BUFFER_ALL_CPUS)
3070		set_buffer_entries(&global_trace, size);
3071	else
3072		global_trace.data[cpu]->entries = size;
3073
 
 
 
3074	return ret;
3075}
3076
3077static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
 
3078{
3079	int ret = size;
3080
3081	mutex_lock(&trace_types_lock);
3082
3083	if (cpu_id != RING_BUFFER_ALL_CPUS) {
3084		/* make sure, this cpu is enabled in the mask */
3085		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3086			ret = -EINVAL;
3087			goto out;
3088		}
3089	}
3090
3091	ret = __tracing_resize_ring_buffer(size, cpu_id);
3092	if (ret < 0)
3093		ret = -ENOMEM;
 
 
 
 
3094
3095out:
3096	mutex_unlock(&trace_types_lock);
 
 
 
 
 
3097
3098	return ret;
 
 
3099}
3100
3101
3102/**
3103 * tracing_update_buffers - used by tracing facility to expand ring buffers
 
3104 *
3105 * To save on memory when the tracing is never used on a system with it
3106 * configured in. The ring buffers are set to a minimum size. But once
3107 * a user starts to use the tracing facility, then they need to grow
3108 * to their default size.
3109 *
3110 * This function is to be called when a tracer is about to be used.
3111 */
3112int tracing_update_buffers(void)
3113{
3114	int ret = 0;
3115
3116	mutex_lock(&trace_types_lock);
3117	if (!ring_buffer_expanded)
3118		ret = __tracing_resize_ring_buffer(trace_buf_size,
 
 
 
3119						RING_BUFFER_ALL_CPUS);
3120	mutex_unlock(&trace_types_lock);
3121
3122	return ret;
3123}
3124
3125struct trace_option_dentry;
3126
3127static struct trace_option_dentry *
3128create_trace_option_files(struct tracer *tracer);
 
 
 
 
 
 
 
 
 
 
 
3129
3130static void
3131destroy_trace_option_files(struct trace_option_dentry *topts);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3132
3133static int tracing_set_tracer(const char *buf)
3134{
3135	static struct trace_option_dentry *topts;
3136	struct trace_array *tr = &global_trace;
3137	struct tracer *t;
3138	int ret = 0;
 
 
 
 
 
3139
3140	mutex_lock(&trace_types_lock);
3141
3142	if (!ring_buffer_expanded) {
3143		ret = __tracing_resize_ring_buffer(trace_buf_size,
3144						RING_BUFFER_ALL_CPUS);
3145		if (ret < 0)
3146			goto out;
3147		ret = 0;
3148	}
3149
3150	for (t = trace_types; t; t = t->next) {
3151		if (strcmp(t->name, buf) == 0)
3152			break;
3153	}
3154	if (!t) {
3155		ret = -EINVAL;
3156		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3157	}
3158	if (t == current_trace)
3159		goto out;
 
 
 
 
 
 
3160
3161	trace_branch_disable();
3162	if (current_trace && current_trace->reset)
3163		current_trace->reset(tr);
3164	if (current_trace && current_trace->use_max_tr) {
 
 
 
 
 
 
 
 
 
 
3165		/*
3166		 * We don't free the ring buffer. instead, resize it because
3167		 * The max_tr ring buffer has some state (e.g. ring->clock) and
3168		 * we want preserve it.
 
 
3169		 */
3170		ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3171		set_buffer_entries(&max_tr, 1);
 
3172	}
3173	destroy_trace_option_files(topts);
3174
3175	current_trace = t;
3176
3177	topts = create_trace_option_files(current_trace);
3178	if (current_trace->use_max_tr) {
3179		int cpu;
3180		/* we need to make per cpu buffer sizes equivalent */
3181		for_each_tracing_cpu(cpu) {
3182			ret = ring_buffer_resize(max_tr.buffer,
3183						global_trace.data[cpu]->entries,
3184						cpu);
3185			if (ret < 0)
3186				goto out;
3187			max_tr.data[cpu]->entries =
3188					global_trace.data[cpu]->entries;
3189		}
3190	}
 
 
 
3191
3192	if (t->init) {
3193		ret = tracer_init(t, tr);
3194		if (ret)
3195			goto out;
 
 
 
 
 
3196	}
3197
 
 
3198	trace_branch_enable(tr);
3199 out:
3200	mutex_unlock(&trace_types_lock);
3201
3202	return ret;
3203}
3204
3205static ssize_t
3206tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3207			size_t cnt, loff_t *ppos)
3208{
 
3209	char buf[MAX_TRACER_SIZE+1];
3210	int i;
3211	size_t ret;
3212	int err;
3213
3214	ret = cnt;
3215
3216	if (cnt > MAX_TRACER_SIZE)
3217		cnt = MAX_TRACER_SIZE;
3218
3219	if (copy_from_user(&buf, ubuf, cnt))
3220		return -EFAULT;
3221
3222	buf[cnt] = 0;
3223
3224	/* strip ending whitespace. */
3225	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3226		buf[i] = 0;
3227
3228	err = tracing_set_tracer(buf);
3229	if (err)
3230		return err;
3231
3232	*ppos += ret;
3233
3234	return ret;
3235}
3236
3237static ssize_t
3238tracing_max_lat_read(struct file *filp, char __user *ubuf,
3239		     size_t cnt, loff_t *ppos)
3240{
3241	unsigned long *ptr = filp->private_data;
3242	char buf[64];
3243	int r;
3244
3245	r = snprintf(buf, sizeof(buf), "%ld\n",
3246		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3247	if (r > sizeof(buf))
3248		r = sizeof(buf);
3249	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3250}
3251
3252static ssize_t
3253tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3254		      size_t cnt, loff_t *ppos)
3255{
3256	unsigned long *ptr = filp->private_data;
3257	unsigned long val;
3258	int ret;
3259
3260	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3261	if (ret)
3262		return ret;
3263
3264	*ptr = val * 1000;
3265
3266	return cnt;
3267}
3268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3269static int tracing_open_pipe(struct inode *inode, struct file *filp)
3270{
3271	long cpu_file = (long) inode->i_private;
3272	struct trace_iterator *iter;
3273	int ret = 0;
 
3274
3275	if (tracing_disabled)
3276		return -ENODEV;
 
3277
3278	mutex_lock(&trace_types_lock);
 
 
 
 
3279
3280	/* create a buffer to store the information to pass to userspace */
3281	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3282	if (!iter) {
3283		ret = -ENOMEM;
3284		goto out;
3285	}
3286
3287	/*
3288	 * We make a copy of the current tracer to avoid concurrent
3289	 * changes on it while we are reading.
3290	 */
3291	iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3292	if (!iter->trace) {
3293		ret = -ENOMEM;
3294		goto fail;
3295	}
3296	if (current_trace)
3297		*iter->trace = *current_trace;
3298
3299	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3300		ret = -ENOMEM;
3301		goto fail;
3302	}
3303
3304	/* trace pipe does not show start of buffer */
3305	cpumask_setall(iter->started);
3306
3307	if (trace_flags & TRACE_ITER_LATENCY_FMT)
3308		iter->iter_flags |= TRACE_FILE_LAT_FMT;
3309
3310	iter->cpu_file = cpu_file;
3311	iter->tr = &global_trace;
 
 
 
 
 
3312	mutex_init(&iter->mutex);
3313	filp->private_data = iter;
3314
3315	if (iter->trace->pipe_open)
3316		iter->trace->pipe_open(iter);
3317
3318	nonseekable_open(inode, filp);
3319out:
 
 
3320	mutex_unlock(&trace_types_lock);
3321	return ret;
3322
3323fail:
3324	kfree(iter->trace);
3325	kfree(iter);
 
 
 
 
3326	mutex_unlock(&trace_types_lock);
3327	return ret;
3328}
3329
3330static int tracing_release_pipe(struct inode *inode, struct file *file)
3331{
3332	struct trace_iterator *iter = file->private_data;
 
3333
3334	mutex_lock(&trace_types_lock);
3335
 
 
3336	if (iter->trace->pipe_close)
3337		iter->trace->pipe_close(iter);
3338
3339	mutex_unlock(&trace_types_lock);
3340
3341	free_cpumask_var(iter->started);
3342	mutex_destroy(&iter->mutex);
3343	kfree(iter->trace);
3344	kfree(iter);
3345
 
 
3346	return 0;
3347}
3348
3349static unsigned int
3350tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3351{
3352	struct trace_iterator *iter = filp->private_data;
 
 
 
 
3353
3354	if (trace_flags & TRACE_ITER_BLOCK) {
3355		/*
3356		 * Always select as readable when in blocking mode
3357		 */
3358		return POLLIN | POLLRDNORM;
3359	} else {
3360		if (!trace_empty(iter))
3361			return POLLIN | POLLRDNORM;
3362		poll_wait(filp, &trace_wait, poll_table);
3363		if (!trace_empty(iter))
3364			return POLLIN | POLLRDNORM;
3365
3366		return 0;
3367	}
3368}
3369
3370
3371void default_wait_pipe(struct trace_iterator *iter)
3372{
3373	DEFINE_WAIT(wait);
3374
3375	prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3376
3377	if (trace_empty(iter))
3378		schedule();
3379
3380	finish_wait(&trace_wait, &wait);
3381}
3382
3383/*
3384 * This is a make-shift waitqueue.
3385 * A tracer might use this callback on some rare cases:
3386 *
3387 *  1) the current tracer might hold the runqueue lock when it wakes up
3388 *     a reader, hence a deadlock (sched, function, and function graph tracers)
3389 *  2) the function tracers, trace all functions, we don't want
3390 *     the overhead of calling wake_up and friends
3391 *     (and tracing them too)
3392 *
3393 *     Anyway, this is really very primitive wakeup.
3394 */
3395void poll_wait_pipe(struct trace_iterator *iter)
3396{
3397	set_current_state(TASK_INTERRUPTIBLE);
3398	/* sleep for 100 msecs, and try again. */
3399	schedule_timeout(HZ / 10);
3400}
3401
3402/* Must be called with trace_types_lock mutex held. */
3403static int tracing_wait_pipe(struct file *filp)
3404{
3405	struct trace_iterator *iter = filp->private_data;
 
3406
3407	while (trace_empty(iter)) {
3408
3409		if ((filp->f_flags & O_NONBLOCK)) {
3410			return -EAGAIN;
3411		}
3412
3413		mutex_unlock(&iter->mutex);
3414
3415		iter->trace->wait_pipe(iter);
3416
3417		mutex_lock(&iter->mutex);
3418
3419		if (signal_pending(current))
3420			return -EINTR;
3421
3422		/*
3423		 * We block until we read something and tracing is disabled.
3424		 * We still block if tracing is disabled, but we have never
3425		 * read anything. This allows a user to cat this file, and
3426		 * then enable tracing. But after we have read something,
3427		 * we give an EOF when tracing is again disabled.
3428		 *
3429		 * iter->pos will be 0 if we haven't read anything.
3430		 */
3431		if (!tracer_enabled && iter->pos)
3432			break;
 
 
 
 
 
 
 
 
 
3433	}
3434
3435	return 1;
3436}
3437
3438/*
3439 * Consumer reader.
3440 */
3441static ssize_t
3442tracing_read_pipe(struct file *filp, char __user *ubuf,
3443		  size_t cnt, loff_t *ppos)
3444{
3445	struct trace_iterator *iter = filp->private_data;
3446	static struct tracer *old_tracer;
3447	ssize_t sret;
3448
 
 
 
 
 
 
 
3449	/* return any leftover data */
3450	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3451	if (sret != -EBUSY)
3452		return sret;
3453
3454	trace_seq_init(&iter->seq);
3455
3456	/* copy the tracer to avoid using a global lock all around */
3457	mutex_lock(&trace_types_lock);
3458	if (unlikely(old_tracer != current_trace && current_trace)) {
3459		old_tracer = current_trace;
3460		*iter->trace = *current_trace;
3461	}
3462	mutex_unlock(&trace_types_lock);
3463
3464	/*
3465	 * Avoid more than one consumer on a single file descriptor
3466	 * This is just a matter of traces coherency, the ring buffer itself
3467	 * is protected.
3468	 */
3469	mutex_lock(&iter->mutex);
3470	if (iter->trace->read) {
3471		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3472		if (sret)
3473			goto out;
3474	}
3475
3476waitagain:
3477	sret = tracing_wait_pipe(filp);
3478	if (sret <= 0)
3479		goto out;
3480
3481	/* stop when tracing is finished */
3482	if (trace_empty(iter)) {
3483		sret = 0;
3484		goto out;
3485	}
3486
3487	if (cnt >= PAGE_SIZE)
3488		cnt = PAGE_SIZE - 1;
3489
3490	/* reset all but tr, trace, and overruns */
3491	memset(&iter->seq, 0,
3492	       sizeof(struct trace_iterator) -
3493	       offsetof(struct trace_iterator, seq));
3494	iter->pos = -1;
3495
3496	trace_event_read_lock();
3497	trace_access_lock(iter->cpu_file);
3498	while (trace_find_next_entry_inc(iter) != NULL) {
3499		enum print_line_t ret;
3500		int len = iter->seq.len;
3501
3502		ret = print_trace_line(iter);
3503		if (ret == TRACE_TYPE_PARTIAL_LINE) {
3504			/* don't print partial lines */
3505			iter->seq.len = len;
 
 
 
 
 
 
 
 
 
 
 
 
 
3506			break;
3507		}
3508		if (ret != TRACE_TYPE_NO_CONSUME)
3509			trace_consume(iter);
3510
3511		if (iter->seq.len >= cnt)
3512			break;
3513
3514		/*
3515		 * Setting the full flag means we reached the trace_seq buffer
3516		 * size and we should leave by partial output condition above.
3517		 * One of the trace_seq_* functions is not used properly.
3518		 */
3519		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3520			  iter->ent->type);
3521	}
3522	trace_access_unlock(iter->cpu_file);
3523	trace_event_read_unlock();
3524
3525	/* Now copy what we have to the user */
3526	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3527	if (iter->seq.readpos >= iter->seq.len)
3528		trace_seq_init(&iter->seq);
3529
3530	/*
3531	 * If there was nothing to send to user, in spite of consuming trace
3532	 * entries, go back to wait for more entries.
3533	 */
3534	if (sret == -EBUSY)
3535		goto waitagain;
3536
3537out:
3538	mutex_unlock(&iter->mutex);
3539
3540	return sret;
3541}
3542
3543static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3544				     struct pipe_buffer *buf)
3545{
3546	__free_page(buf->page);
3547}
3548
3549static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3550				     unsigned int idx)
3551{
3552	__free_page(spd->pages[idx]);
3553}
3554
3555static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3556	.can_merge		= 0,
3557	.map			= generic_pipe_buf_map,
3558	.unmap			= generic_pipe_buf_unmap,
3559	.confirm		= generic_pipe_buf_confirm,
3560	.release		= tracing_pipe_buf_release,
3561	.steal			= generic_pipe_buf_steal,
3562	.get			= generic_pipe_buf_get,
3563};
3564
3565static size_t
3566tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3567{
3568	size_t count;
 
3569	int ret;
3570
3571	/* Seq buffer is page-sized, exactly what we need. */
3572	for (;;) {
3573		count = iter->seq.len;
3574		ret = print_trace_line(iter);
3575		count = iter->seq.len - count;
3576		if (rem < count) {
3577			rem = 0;
3578			iter->seq.len -= count;
3579			break;
3580		}
 
 
 
 
 
 
3581		if (ret == TRACE_TYPE_PARTIAL_LINE) {
3582			iter->seq.len -= count;
 
 
 
 
 
 
 
3583			break;
3584		}
3585
3586		if (ret != TRACE_TYPE_NO_CONSUME)
3587			trace_consume(iter);
3588		rem -= count;
3589		if (!trace_find_next_entry_inc(iter))	{
3590			rem = 0;
3591			iter->ent = NULL;
3592			break;
3593		}
3594	}
3595
3596	return rem;
3597}
3598
3599static ssize_t tracing_splice_read_pipe(struct file *filp,
3600					loff_t *ppos,
3601					struct pipe_inode_info *pipe,
3602					size_t len,
3603					unsigned int flags)
3604{
3605	struct page *pages_def[PIPE_DEF_BUFFERS];
3606	struct partial_page partial_def[PIPE_DEF_BUFFERS];
3607	struct trace_iterator *iter = filp->private_data;
3608	struct splice_pipe_desc spd = {
3609		.pages		= pages_def,
3610		.partial	= partial_def,
3611		.nr_pages	= 0, /* This gets updated below. */
3612		.nr_pages_max	= PIPE_DEF_BUFFERS,
3613		.flags		= flags,
3614		.ops		= &tracing_pipe_buf_ops,
3615		.spd_release	= tracing_spd_release_pipe,
3616	};
3617	static struct tracer *old_tracer;
3618	ssize_t ret;
3619	size_t rem;
3620	unsigned int i;
3621
3622	if (splice_grow_spd(pipe, &spd))
3623		return -ENOMEM;
3624
3625	/* copy the tracer to avoid using a global lock all around */
3626	mutex_lock(&trace_types_lock);
3627	if (unlikely(old_tracer != current_trace && current_trace)) {
3628		old_tracer = current_trace;
3629		*iter->trace = *current_trace;
3630	}
3631	mutex_unlock(&trace_types_lock);
3632
3633	mutex_lock(&iter->mutex);
3634
3635	if (iter->trace->splice_read) {
3636		ret = iter->trace->splice_read(iter, filp,
3637					       ppos, pipe, len, flags);
3638		if (ret)
3639			goto out_err;
3640	}
3641
3642	ret = tracing_wait_pipe(filp);
3643	if (ret <= 0)
3644		goto out_err;
3645
3646	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3647		ret = -EFAULT;
3648		goto out_err;
3649	}
3650
3651	trace_event_read_lock();
3652	trace_access_lock(iter->cpu_file);
3653
3654	/* Fill as many pages as possible. */
3655	for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3656		spd.pages[i] = alloc_page(GFP_KERNEL);
3657		if (!spd.pages[i])
3658			break;
3659
3660		rem = tracing_fill_pipe_page(rem, iter);
3661
3662		/* Copy the data into the page, so we can start over. */
3663		ret = trace_seq_to_buffer(&iter->seq,
3664					  page_address(spd.pages[i]),
3665					  iter->seq.len);
3666		if (ret < 0) {
3667			__free_page(spd.pages[i]);
3668			break;
3669		}
3670		spd.partial[i].offset = 0;
3671		spd.partial[i].len = iter->seq.len;
3672
3673		trace_seq_init(&iter->seq);
3674	}
3675
3676	trace_access_unlock(iter->cpu_file);
3677	trace_event_read_unlock();
3678	mutex_unlock(&iter->mutex);
3679
3680	spd.nr_pages = i;
3681
3682	ret = splice_to_pipe(pipe, &spd);
 
 
 
3683out:
3684	splice_shrink_spd(&spd);
3685	return ret;
3686
3687out_err:
3688	mutex_unlock(&iter->mutex);
3689	goto out;
3690}
3691
3692struct ftrace_entries_info {
3693	struct trace_array	*tr;
3694	int			cpu;
3695};
3696
3697static int tracing_entries_open(struct inode *inode, struct file *filp)
3698{
3699	struct ftrace_entries_info *info;
3700
3701	if (tracing_disabled)
3702		return -ENODEV;
3703
3704	info = kzalloc(sizeof(*info), GFP_KERNEL);
3705	if (!info)
3706		return -ENOMEM;
3707
3708	info->tr = &global_trace;
3709	info->cpu = (unsigned long)inode->i_private;
3710
3711	filp->private_data = info;
3712
3713	return 0;
3714}
3715
3716static ssize_t
3717tracing_entries_read(struct file *filp, char __user *ubuf,
3718		     size_t cnt, loff_t *ppos)
3719{
3720	struct ftrace_entries_info *info = filp->private_data;
3721	struct trace_array *tr = info->tr;
 
3722	char buf[64];
3723	int r = 0;
3724	ssize_t ret;
3725
3726	mutex_lock(&trace_types_lock);
3727
3728	if (info->cpu == RING_BUFFER_ALL_CPUS) {
3729		int cpu, buf_size_same;
3730		unsigned long size;
3731
3732		size = 0;
3733		buf_size_same = 1;
3734		/* check if all cpu sizes are same */
3735		for_each_tracing_cpu(cpu) {
3736			/* fill in the size from first enabled cpu */
3737			if (size == 0)
3738				size = tr->data[cpu]->entries;
3739			if (size != tr->data[cpu]->entries) {
3740				buf_size_same = 0;
3741				break;
3742			}
3743		}
3744
3745		if (buf_size_same) {
3746			if (!ring_buffer_expanded)
3747				r = sprintf(buf, "%lu (expanded: %lu)\n",
3748					    size >> 10,
3749					    trace_buf_size >> 10);
3750			else
3751				r = sprintf(buf, "%lu\n", size >> 10);
3752		} else
3753			r = sprintf(buf, "X\n");
3754	} else
3755		r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
3756
3757	mutex_unlock(&trace_types_lock);
3758
3759	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3760	return ret;
3761}
3762
3763static ssize_t
3764tracing_entries_write(struct file *filp, const char __user *ubuf,
3765		      size_t cnt, loff_t *ppos)
3766{
3767	struct ftrace_entries_info *info = filp->private_data;
 
3768	unsigned long val;
3769	int ret;
3770
3771	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3772	if (ret)
3773		return ret;
3774
3775	/* must have at least 1 entry */
3776	if (!val)
3777		return -EINVAL;
3778
3779	/* value is in KB */
3780	val <<= 10;
3781
3782	ret = tracing_resize_ring_buffer(val, info->cpu);
3783	if (ret < 0)
3784		return ret;
3785
3786	*ppos += cnt;
3787
3788	return cnt;
3789}
3790
3791static int
3792tracing_entries_release(struct inode *inode, struct file *filp)
3793{
3794	struct ftrace_entries_info *info = filp->private_data;
3795
3796	kfree(info);
3797
3798	return 0;
3799}
3800
3801static ssize_t
3802tracing_total_entries_read(struct file *filp, char __user *ubuf,
3803				size_t cnt, loff_t *ppos)
3804{
3805	struct trace_array *tr = filp->private_data;
3806	char buf[64];
3807	int r, cpu;
3808	unsigned long size = 0, expanded_size = 0;
3809
3810	mutex_lock(&trace_types_lock);
3811	for_each_tracing_cpu(cpu) {
3812		size += tr->data[cpu]->entries >> 10;
3813		if (!ring_buffer_expanded)
3814			expanded_size += trace_buf_size >> 10;
3815	}
3816	if (ring_buffer_expanded)
3817		r = sprintf(buf, "%lu\n", size);
3818	else
3819		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
3820	mutex_unlock(&trace_types_lock);
3821
3822	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3823}
3824
3825static ssize_t
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3826tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3827			  size_t cnt, loff_t *ppos)
3828{
3829	/*
3830	 * There is no need to read what the user has written, this function
3831	 * is just to make sure that there is no error when "echo" is used
3832	 */
3833
3834	*ppos += cnt;
3835
3836	return cnt;
3837}
3838
3839static int
3840tracing_free_buffer_release(struct inode *inode, struct file *filp)
3841{
 
 
3842	/* disable tracing ? */
3843	if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3844		tracing_off();
3845	/* resize the ring buffer to 0 */
3846	tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
 
 
3847
3848	return 0;
3849}
3850
 
 
3851static ssize_t
3852tracing_mark_write(struct file *filp, const char __user *ubuf,
3853					size_t cnt, loff_t *fpos)
3854{
3855	unsigned long addr = (unsigned long)ubuf;
3856	struct ring_buffer_event *event;
3857	struct ring_buffer *buffer;
 
3858	struct print_entry *entry;
3859	unsigned long irq_flags;
3860	struct page *pages[2];
3861	void *map_page[2];
3862	int nr_pages = 1;
3863	ssize_t written;
3864	int offset;
3865	int size;
3866	int len;
3867	int ret;
3868	int i;
 
 
3869
3870	if (tracing_disabled)
3871		return -EINVAL;
3872
3873	if (cnt > TRACE_BUF_SIZE)
3874		cnt = TRACE_BUF_SIZE;
3875
3876	/*
3877	 * Userspace is injecting traces into the kernel trace buffer.
3878	 * We want to be as non intrusive as possible.
3879	 * To do so, we do not want to allocate any special buffers
3880	 * or take any locks, but instead write the userspace data
3881	 * straight into the ring buffer.
3882	 *
3883	 * First we need to pin the userspace buffer into memory,
3884	 * which, most likely it is, because it just referenced it.
3885	 * But there's no guarantee that it is. By using get_user_pages_fast()
3886	 * and kmap_atomic/kunmap_atomic() we can get access to the
3887	 * pages directly. We then write the data directly into the
3888	 * ring buffer.
3889	 */
3890	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
3891
3892	/* check if we cross pages */
3893	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
3894		nr_pages = 2;
3895
3896	offset = addr & (PAGE_SIZE - 1);
3897	addr &= PAGE_MASK;
3898
3899	ret = get_user_pages_fast(addr, nr_pages, 0, pages);
3900	if (ret < nr_pages) {
3901		while (--ret >= 0)
3902			put_page(pages[ret]);
3903		written = -EFAULT;
3904		goto out;
3905	}
3906
3907	for (i = 0; i < nr_pages; i++)
3908		map_page[i] = kmap_atomic(pages[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3909
3910	local_save_flags(irq_flags);
3911	size = sizeof(*entry) + cnt + 2; /* possible \n added */
3912	buffer = global_trace.buffer;
3913	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3914					  irq_flags, preempt_count());
3915	if (!event) {
3916		/* Ring buffer disabled, return as if not open for write */
3917		written = -EBADF;
3918		goto out_unlock;
3919	}
3920
3921	entry = ring_buffer_event_data(event);
3922	entry->ip = _THIS_IP_;
3923
3924	if (nr_pages == 2) {
3925		len = PAGE_SIZE - offset;
3926		memcpy(&entry->buf, map_page[0] + offset, len);
3927		memcpy(&entry->buf[len], map_page[1], cnt - len);
 
3928	} else
3929		memcpy(&entry->buf, map_page[0] + offset, cnt);
 
 
 
 
 
 
3930
3931	if (entry->buf[cnt - 1] != '\n') {
3932		entry->buf[cnt] = '\n';
3933		entry->buf[cnt + 1] = '\0';
3934	} else
3935		entry->buf[cnt] = '\0';
3936
3937	ring_buffer_unlock_commit(buffer, event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3938
3939	written = cnt;
 
 
 
 
 
 
3940
3941	*fpos += written;
3942
3943 out_unlock:
3944	for (i = 0; i < nr_pages; i++){
3945		kunmap_atomic(map_page[i]);
3946		put_page(pages[i]);
3947	}
3948 out:
3949	return written;
3950}
3951
3952static int tracing_clock_show(struct seq_file *m, void *v)
3953{
 
3954	int i;
3955
3956	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3957		seq_printf(m,
3958			"%s%s%s%s", i ? " " : "",
3959			i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3960			i == trace_clock_id ? "]" : "");
3961	seq_putc(m, '\n');
3962
3963	return 0;
3964}
3965
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3966static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3967				   size_t cnt, loff_t *fpos)
3968{
 
 
3969	char buf[64];
3970	const char *clockstr;
3971	int i;
3972
3973	if (cnt >= sizeof(buf))
3974		return -EINVAL;
3975
3976	if (copy_from_user(&buf, ubuf, cnt))
3977		return -EFAULT;
3978
3979	buf[cnt] = 0;
3980
3981	clockstr = strstrip(buf);
3982
3983	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3984		if (strcmp(trace_clocks[i].name, clockstr) == 0)
3985			break;
3986	}
3987	if (i == ARRAY_SIZE(trace_clocks))
3988		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3989
3990	trace_clock_id = i;
 
 
3991
3992	mutex_lock(&trace_types_lock);
3993
3994	ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3995	if (max_tr.buffer)
3996		ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
 
3997
3998	mutex_unlock(&trace_types_lock);
3999
4000	*fpos += cnt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4001
4002	return cnt;
4003}
4004
4005static int tracing_clock_open(struct inode *inode, struct file *file)
 
 
 
 
 
 
 
4006{
4007	if (tracing_disabled)
4008		return -ENODEV;
4009	return single_open(file, tracing_clock_show, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4010}
4011
4012static const struct file_operations tracing_max_lat_fops = {
 
 
 
4013	.open		= tracing_open_generic,
4014	.read		= tracing_max_lat_read,
4015	.write		= tracing_max_lat_write,
4016	.llseek		= generic_file_llseek,
4017};
4018
4019static const struct file_operations tracing_ctrl_fops = {
4020	.open		= tracing_open_generic,
4021	.read		= tracing_ctrl_read,
4022	.write		= tracing_ctrl_write,
 
4023	.llseek		= generic_file_llseek,
 
4024};
 
4025
4026static const struct file_operations set_tracer_fops = {
4027	.open		= tracing_open_generic,
4028	.read		= tracing_set_trace_read,
4029	.write		= tracing_set_trace_write,
4030	.llseek		= generic_file_llseek,
 
4031};
4032
4033static const struct file_operations tracing_pipe_fops = {
4034	.open		= tracing_open_pipe,
4035	.poll		= tracing_poll_pipe,
4036	.read		= tracing_read_pipe,
4037	.splice_read	= tracing_splice_read_pipe,
4038	.release	= tracing_release_pipe,
4039	.llseek		= no_llseek,
4040};
4041
4042static const struct file_operations tracing_entries_fops = {
4043	.open		= tracing_entries_open,
4044	.read		= tracing_entries_read,
4045	.write		= tracing_entries_write,
4046	.release	= tracing_entries_release,
4047	.llseek		= generic_file_llseek,
 
 
 
 
 
 
 
 
4048};
4049
4050static const struct file_operations tracing_total_entries_fops = {
4051	.open		= tracing_open_generic,
4052	.read		= tracing_total_entries_read,
4053	.llseek		= generic_file_llseek,
 
4054};
4055
4056static const struct file_operations tracing_free_buffer_fops = {
 
4057	.write		= tracing_free_buffer_write,
4058	.release	= tracing_free_buffer_release,
4059};
4060
4061static const struct file_operations tracing_mark_fops = {
4062	.open		= tracing_open_generic,
4063	.write		= tracing_mark_write,
4064	.llseek		= generic_file_llseek,
 
 
 
 
 
 
4065};
4066
4067static const struct file_operations trace_clock_fops = {
4068	.open		= tracing_clock_open,
4069	.read		= seq_read,
4070	.llseek		= seq_lseek,
4071	.release	= single_release,
4072	.write		= tracing_clock_write,
4073};
4074
4075struct ftrace_buffer_info {
4076	struct trace_array	*tr;
4077	void			*spare;
4078	int			cpu;
4079	unsigned int		read;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4080};
4081
4082static int tracing_buffers_open(struct inode *inode, struct file *filp)
4083{
4084	int cpu = (int)(long)inode->i_private;
4085	struct ftrace_buffer_info *info;
 
4086
4087	if (tracing_disabled)
4088		return -ENODEV;
 
4089
4090	info = kzalloc(sizeof(*info), GFP_KERNEL);
4091	if (!info)
 
4092		return -ENOMEM;
 
4093
4094	info->tr	= &global_trace;
4095	info->cpu	= cpu;
4096	info->spare	= NULL;
 
 
 
 
4097	/* Force reading ring buffer for first read */
4098	info->read	= (unsigned int)-1;
4099
4100	filp->private_data = info;
4101
4102	return nonseekable_open(inode, filp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4103}
4104
4105static ssize_t
4106tracing_buffers_read(struct file *filp, char __user *ubuf,
4107		     size_t count, loff_t *ppos)
4108{
4109	struct ftrace_buffer_info *info = filp->private_data;
4110	ssize_t ret;
4111	size_t size;
 
 
 
4112
4113	if (!count)
4114		return 0;
4115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4116	if (!info->spare)
4117		info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
4118	if (!info->spare)
4119		return -ENOMEM;
4120
4121	/* Do we have previous read data to read? */
4122	if (info->read < PAGE_SIZE)
4123		goto read;
4124
4125	trace_access_lock(info->cpu);
4126	ret = ring_buffer_read_page(info->tr->buffer,
4127				    &info->spare,
 
4128				    count,
4129				    info->cpu, 0);
4130	trace_access_unlock(info->cpu);
4131	if (ret < 0)
 
 
 
 
 
 
 
 
 
 
 
4132		return 0;
 
4133
4134	info->read = 0;
4135
4136read:
4137	size = PAGE_SIZE - info->read;
4138	if (size > count)
4139		size = count;
4140
4141	ret = copy_to_user(ubuf, info->spare + info->read, size);
4142	if (ret == size)
4143		return -EFAULT;
 
4144	size -= ret;
4145
4146	*ppos += size;
4147	info->read += size;
4148
4149	return size;
4150}
4151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4152static int tracing_buffers_release(struct inode *inode, struct file *file)
4153{
4154	struct ftrace_buffer_info *info = file->private_data;
 
 
 
 
 
 
 
4155
4156	if (info->spare)
4157		ring_buffer_free_read_page(info->tr->buffer, info->spare);
4158	kfree(info);
 
 
 
4159
4160	return 0;
4161}
4162
4163struct buffer_ref {
4164	struct ring_buffer	*buffer;
4165	void			*page;
4166	int			ref;
 
4167};
4168
 
 
 
 
 
 
 
 
4169static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4170				    struct pipe_buffer *buf)
4171{
4172	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4173
4174	if (--ref->ref)
4175		return;
4176
4177	ring_buffer_free_read_page(ref->buffer, ref->page);
4178	kfree(ref);
4179	buf->private = 0;
4180}
4181
4182static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
4183				 struct pipe_buffer *buf)
4184{
4185	return 1;
4186}
4187
4188static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4189				struct pipe_buffer *buf)
4190{
4191	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4192
4193	ref->ref++;
 
 
 
 
4194}
4195
4196/* Pipe buffer operations for a buffer. */
4197static const struct pipe_buf_operations buffer_pipe_buf_ops = {
4198	.can_merge		= 0,
4199	.map			= generic_pipe_buf_map,
4200	.unmap			= generic_pipe_buf_unmap,
4201	.confirm		= generic_pipe_buf_confirm,
4202	.release		= buffer_pipe_buf_release,
4203	.steal			= buffer_pipe_buf_steal,
4204	.get			= buffer_pipe_buf_get,
4205};
4206
4207/*
4208 * Callback from splice_to_pipe(), if we need to release some pages
4209 * at the end of the spd in case we error'ed out in filling the pipe.
4210 */
4211static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4212{
4213	struct buffer_ref *ref =
4214		(struct buffer_ref *)spd->partial[i].private;
4215
4216	if (--ref->ref)
4217		return;
4218
4219	ring_buffer_free_read_page(ref->buffer, ref->page);
4220	kfree(ref);
4221	spd->partial[i].private = 0;
4222}
4223
4224static ssize_t
4225tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4226			    struct pipe_inode_info *pipe, size_t len,
4227			    unsigned int flags)
4228{
4229	struct ftrace_buffer_info *info = file->private_data;
 
4230	struct partial_page partial_def[PIPE_DEF_BUFFERS];
4231	struct page *pages_def[PIPE_DEF_BUFFERS];
4232	struct splice_pipe_desc spd = {
4233		.pages		= pages_def,
4234		.partial	= partial_def,
4235		.nr_pages_max	= PIPE_DEF_BUFFERS,
4236		.flags		= flags,
4237		.ops		= &buffer_pipe_buf_ops,
4238		.spd_release	= buffer_spd_release,
4239	};
4240	struct buffer_ref *ref;
4241	int entries, size, i;
4242	size_t ret;
 
 
 
 
 
 
 
4243
4244	if (splice_grow_spd(pipe, &spd))
4245		return -ENOMEM;
 
4246
4247	if (*ppos & (PAGE_SIZE - 1)) {
4248		WARN_ONCE(1, "Ftrace: previous read must page-align\n");
4249		ret = -EINVAL;
4250		goto out;
4251	}
4252
4253	if (len & (PAGE_SIZE - 1)) {
4254		WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
4255		if (len < PAGE_SIZE) {
4256			ret = -EINVAL;
4257			goto out;
4258		}
4259		len &= PAGE_MASK;
4260	}
4261
4262	trace_access_lock(info->cpu);
4263	entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
 
4264
4265	for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4266		struct page *page;
4267		int r;
4268
4269		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4270		if (!ref)
 
4271			break;
 
4272
4273		ref->ref = 1;
4274		ref->buffer = info->tr->buffer;
4275		ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
4276		if (!ref->page) {
 
 
4277			kfree(ref);
4278			break;
4279		}
 
4280
4281		r = ring_buffer_read_page(ref->buffer, &ref->page,
4282					  len, info->cpu, 1);
4283		if (r < 0) {
4284			ring_buffer_free_read_page(ref->buffer, ref->page);
 
4285			kfree(ref);
4286			break;
4287		}
4288
4289		/*
4290		 * zero out any left over data, this is going to
4291		 * user land.
4292		 */
4293		size = ring_buffer_page_len(ref->page);
4294		if (size < PAGE_SIZE)
4295			memset(ref->page + size, 0, PAGE_SIZE - size);
4296
4297		page = virt_to_page(ref->page);
4298
4299		spd.pages[i] = page;
4300		spd.partial[i].len = PAGE_SIZE;
4301		spd.partial[i].offset = 0;
4302		spd.partial[i].private = (unsigned long)ref;
4303		spd.nr_pages++;
4304		*ppos += PAGE_SIZE;
4305
4306		entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4307	}
4308
4309	trace_access_unlock(info->cpu);
4310	spd.nr_pages = i;
4311
4312	/* did we read anything? */
4313	if (!spd.nr_pages) {
4314		if (flags & SPLICE_F_NONBLOCK)
4315			ret = -EAGAIN;
4316		else
4317			ret = 0;
4318		/* TODO: block */
4319		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4320	}
4321
4322	ret = splice_to_pipe(pipe, &spd);
 
4323	splice_shrink_spd(&spd);
4324out:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4325	return ret;
4326}
4327
4328static const struct file_operations tracing_buffers_fops = {
4329	.open		= tracing_buffers_open,
4330	.read		= tracing_buffers_read,
 
4331	.release	= tracing_buffers_release,
 
4332	.splice_read	= tracing_buffers_splice_read,
4333	.llseek		= no_llseek,
 
4334};
4335
4336static ssize_t
4337tracing_stats_read(struct file *filp, char __user *ubuf,
4338		   size_t count, loff_t *ppos)
4339{
4340	unsigned long cpu = (unsigned long)filp->private_data;
4341	struct trace_array *tr = &global_trace;
 
 
4342	struct trace_seq *s;
4343	unsigned long cnt;
4344	unsigned long long t;
4345	unsigned long usec_rem;
4346
4347	s = kmalloc(sizeof(*s), GFP_KERNEL);
4348	if (!s)
4349		return -ENOMEM;
4350
4351	trace_seq_init(s);
4352
4353	cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
4354	trace_seq_printf(s, "entries: %ld\n", cnt);
4355
4356	cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
4357	trace_seq_printf(s, "overrun: %ld\n", cnt);
4358
4359	cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
4360	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4361
4362	cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4363	trace_seq_printf(s, "bytes: %ld\n", cnt);
4364
4365	t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4366	usec_rem = do_div(t, USEC_PER_SEC);
4367	trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
4368
4369	t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4370	usec_rem = do_div(t, USEC_PER_SEC);
4371	trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4372
4373	count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
 
4374
4375	kfree(s);
4376
4377	return count;
4378}
4379
4380static const struct file_operations tracing_stats_fops = {
4381	.open		= tracing_open_generic,
4382	.read		= tracing_stats_read,
4383	.llseek		= generic_file_llseek,
 
4384};
4385
4386#ifdef CONFIG_DYNAMIC_FTRACE
4387
4388int __weak ftrace_arch_read_dyn_info(char *buf, int size)
4389{
4390	return 0;
4391}
4392
4393static ssize_t
4394tracing_read_dyn_info(struct file *filp, char __user *ubuf,
4395		  size_t cnt, loff_t *ppos)
4396{
4397	static char ftrace_dyn_info_buffer[1024];
4398	static DEFINE_MUTEX(dyn_info_mutex);
4399	unsigned long *p = filp->private_data;
4400	char *buf = ftrace_dyn_info_buffer;
4401	int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
4402	int r;
4403
4404	mutex_lock(&dyn_info_mutex);
4405	r = sprintf(buf, "%ld ", *p);
4406
4407	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
4408	buf[r++] = '\n';
 
4409
4410	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 
 
 
 
 
 
 
 
4411
4412	mutex_unlock(&dyn_info_mutex);
4413
4414	return r;
4415}
4416
4417static const struct file_operations tracing_dyn_info_fops = {
4418	.open		= tracing_open_generic,
4419	.read		= tracing_read_dyn_info,
4420	.llseek		= generic_file_llseek,
4421};
4422#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4423
4424static struct dentry *d_tracer;
 
4425
4426struct dentry *tracing_init_dentry(void)
 
 
4427{
4428	static int once;
4429
4430	if (d_tracer)
4431		return d_tracer;
 
 
 
 
4432
4433	if (!debugfs_initialized())
4434		return NULL;
4435
4436	d_tracer = debugfs_create_dir("tracing", NULL);
 
 
 
 
4437
4438	if (!d_tracer && !once) {
4439		once = 1;
4440		pr_warning("Could not create debugfs directory 'tracing'\n");
4441		return NULL;
 
4442	}
4443
4444	return d_tracer;
4445}
4446
4447static struct dentry *d_percpu;
 
 
 
 
 
 
 
 
 
 
4448
4449struct dentry *tracing_dentry_percpu(void)
 
 
4450{
4451	static int once;
4452	struct dentry *d_tracer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4453
4454	if (d_percpu)
4455		return d_percpu;
 
 
4456
4457	d_tracer = tracing_init_dentry();
 
 
 
 
 
 
 
 
 
 
 
4458
4459	if (!d_tracer)
 
4460		return NULL;
4461
4462	d_percpu = debugfs_create_dir("per_cpu", d_tracer);
 
 
4463
4464	if (!d_percpu && !once) {
4465		once = 1;
4466		pr_warning("Could not create debugfs directory 'per_cpu'\n");
 
 
 
 
 
 
4467		return NULL;
4468	}
4469
4470	return d_percpu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4471}
4472
4473static void tracing_init_debugfs_percpu(long cpu)
 
4474{
4475	struct dentry *d_percpu = tracing_dentry_percpu();
4476	struct dentry *d_cpu;
4477	char cpu_dir[30]; /* 30 characters should be more than enough */
4478
4479	if (!d_percpu)
4480		return;
4481
4482	snprintf(cpu_dir, 30, "cpu%ld", cpu);
4483	d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4484	if (!d_cpu) {
4485		pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
4486		return;
4487	}
4488
4489	/* per cpu trace_pipe */
4490	trace_create_file("trace_pipe", 0444, d_cpu,
4491			(void *) cpu, &tracing_pipe_fops);
4492
4493	/* per cpu trace */
4494	trace_create_file("trace", 0644, d_cpu,
4495			(void *) cpu, &tracing_fops);
4496
4497	trace_create_file("trace_pipe_raw", 0444, d_cpu,
4498			(void *) cpu, &tracing_buffers_fops);
4499
4500	trace_create_file("stats", 0444, d_cpu,
4501			(void *) cpu, &tracing_stats_fops);
4502
4503	trace_create_file("buffer_size_kb", 0444, d_cpu,
4504			(void *) cpu, &tracing_entries_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
4505}
4506
4507#ifdef CONFIG_FTRACE_SELFTEST
4508/* Let selftest have access to static functions in this file */
4509#include "trace_selftest.c"
4510#endif
4511
4512struct trace_option_dentry {
4513	struct tracer_opt		*opt;
4514	struct tracer_flags		*flags;
4515	struct dentry			*entry;
4516};
4517
4518static ssize_t
4519trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
4520			loff_t *ppos)
4521{
4522	struct trace_option_dentry *topt = filp->private_data;
4523	char *buf;
4524
4525	if (topt->flags->val & topt->opt->bit)
4526		buf = "1\n";
4527	else
4528		buf = "0\n";
4529
4530	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4531}
4532
4533static ssize_t
4534trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
4535			 loff_t *ppos)
4536{
4537	struct trace_option_dentry *topt = filp->private_data;
4538	unsigned long val;
4539	int ret;
4540
4541	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4542	if (ret)
4543		return ret;
4544
4545	if (val != 0 && val != 1)
4546		return -EINVAL;
4547
4548	if (!!(topt->flags->val & topt->opt->bit) != val) {
4549		mutex_lock(&trace_types_lock);
4550		ret = __set_tracer_option(current_trace, topt->flags,
4551					  topt->opt, !val);
4552		mutex_unlock(&trace_types_lock);
4553		if (ret)
4554			return ret;
4555	}
4556
4557	*ppos += cnt;
4558
4559	return cnt;
4560}
4561
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4562
4563static const struct file_operations trace_options_fops = {
4564	.open = tracing_open_generic,
4565	.read = trace_options_read,
4566	.write = trace_options_write,
4567	.llseek	= generic_file_llseek,
 
4568};
4569
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4570static ssize_t
4571trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
4572			loff_t *ppos)
4573{
4574	long index = (long)filp->private_data;
 
 
4575	char *buf;
4576
4577	if (trace_flags & (1 << index))
 
 
4578		buf = "1\n";
4579	else
4580		buf = "0\n";
4581
4582	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4583}
4584
4585static ssize_t
4586trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4587			 loff_t *ppos)
4588{
4589	long index = (long)filp->private_data;
 
 
4590	unsigned long val;
4591	int ret;
4592
 
 
4593	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4594	if (ret)
4595		return ret;
4596
4597	if (val != 0 && val != 1)
4598		return -EINVAL;
4599	set_tracer_flags(1 << index, val);
 
 
 
 
 
 
 
 
4600
4601	*ppos += cnt;
4602
4603	return cnt;
4604}
4605
4606static const struct file_operations trace_options_core_fops = {
4607	.open = tracing_open_generic,
4608	.read = trace_options_core_read,
4609	.write = trace_options_core_write,
4610	.llseek = generic_file_llseek,
4611};
4612
4613struct dentry *trace_create_file(const char *name,
4614				 umode_t mode,
4615				 struct dentry *parent,
4616				 void *data,
4617				 const struct file_operations *fops)
4618{
4619	struct dentry *ret;
4620
4621	ret = debugfs_create_file(name, mode, parent, data, fops);
4622	if (!ret)
4623		pr_warning("Could not create debugfs '%s' entry\n", name);
4624
4625	return ret;
4626}
4627
4628
4629static struct dentry *trace_options_init_dentry(void)
4630{
4631	struct dentry *d_tracer;
4632	static struct dentry *t_options;
4633
4634	if (t_options)
4635		return t_options;
4636
4637	d_tracer = tracing_init_dentry();
4638	if (!d_tracer)
4639		return NULL;
4640
4641	t_options = debugfs_create_dir("options", d_tracer);
4642	if (!t_options) {
4643		pr_warning("Could not create debugfs directory 'options'\n");
4644		return NULL;
4645	}
4646
4647	return t_options;
4648}
4649
4650static void
4651create_trace_option_file(struct trace_option_dentry *topt,
 
4652			 struct tracer_flags *flags,
4653			 struct tracer_opt *opt)
4654{
4655	struct dentry *t_options;
4656
4657	t_options = trace_options_init_dentry();
4658	if (!t_options)
4659		return;
4660
4661	topt->flags = flags;
4662	topt->opt = opt;
 
4663
4664	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
4665				    &trace_options_fops);
4666
4667}
4668
4669static struct trace_option_dentry *
4670create_trace_option_files(struct tracer *tracer)
4671{
4672	struct trace_option_dentry *topts;
 
4673	struct tracer_flags *flags;
4674	struct tracer_opt *opts;
4675	int cnt;
 
4676
4677	if (!tracer)
4678		return NULL;
4679
4680	flags = tracer->flags;
4681
4682	if (!flags || !flags->opts)
4683		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
4684
4685	opts = flags->opts;
4686
4687	for (cnt = 0; opts[cnt].name; cnt++)
4688		;
4689
4690	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4691	if (!topts)
4692		return NULL;
4693
4694	for (cnt = 0; opts[cnt].name; cnt++)
4695		create_trace_option_file(&topts[cnt], flags,
4696					 &opts[cnt]);
 
 
 
4697
4698	return topts;
4699}
 
 
4700
4701static void
4702destroy_trace_option_files(struct trace_option_dentry *topts)
4703{
4704	int cnt;
4705
4706	if (!topts)
4707		return;
4708
4709	for (cnt = 0; topts[cnt].opt; cnt++) {
4710		if (topts[cnt].entry)
4711			debugfs_remove(topts[cnt].entry);
4712	}
4713
4714	kfree(topts);
4715}
4716
4717static struct dentry *
4718create_trace_option_core_file(const char *option, long index)
 
4719{
4720	struct dentry *t_options;
4721
4722	t_options = trace_options_init_dentry();
4723	if (!t_options)
4724		return NULL;
4725
4726	return trace_create_file(option, 0644, t_options, (void *)index,
4727				    &trace_options_core_fops);
 
4728}
4729
4730static __init void create_trace_options_dir(void)
4731{
4732	struct dentry *t_options;
 
4733	int i;
4734
4735	t_options = trace_options_init_dentry();
4736	if (!t_options)
4737		return;
4738
4739	for (i = 0; trace_options[i]; i++)
4740		create_trace_option_core_file(trace_options[i], i);
 
 
 
4741}
4742
4743static ssize_t
4744rb_simple_read(struct file *filp, char __user *ubuf,
4745	       size_t cnt, loff_t *ppos)
4746{
4747	struct trace_array *tr = filp->private_data;
4748	struct ring_buffer *buffer = tr->buffer;
4749	char buf[64];
4750	int r;
4751
4752	if (buffer)
4753		r = ring_buffer_record_is_on(buffer);
4754	else
4755		r = 0;
4756
4757	r = sprintf(buf, "%d\n", r);
4758
4759	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4760}
4761
4762static ssize_t
4763rb_simple_write(struct file *filp, const char __user *ubuf,
4764		size_t cnt, loff_t *ppos)
4765{
4766	struct trace_array *tr = filp->private_data;
4767	struct ring_buffer *buffer = tr->buffer;
4768	unsigned long val;
4769	int ret;
4770
4771	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4772	if (ret)
4773		return ret;
4774
4775	if (buffer) {
4776		if (val)
4777			ring_buffer_record_on(buffer);
4778		else
4779			ring_buffer_record_off(buffer);
 
 
 
 
 
 
 
 
 
 
 
4780	}
4781
4782	(*ppos)++;
4783
4784	return cnt;
4785}
4786
4787static const struct file_operations rb_simple_fops = {
4788	.open		= tracing_open_generic,
4789	.read		= rb_simple_read,
4790	.write		= rb_simple_write,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4791	.llseek		= default_llseek,
4792};
4793
4794static __init int tracer_init_debugfs(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4795{
4796	struct dentry *d_tracer;
4797	int cpu;
 
 
 
 
 
4798
4799	trace_access_lock_init();
 
 
4800
4801	d_tracer = tracing_init_dentry();
 
4802
4803	trace_create_file("tracing_enabled", 0644, d_tracer,
4804			&global_trace, &tracing_ctrl_fops);
4805
4806	trace_create_file("trace_options", 0644, d_tracer,
4807			NULL, &tracing_iter_fops);
 
 
 
4808
4809	trace_create_file("tracing_cpumask", 0644, d_tracer,
4810			NULL, &tracing_cpumask_fops);
 
4811
4812	trace_create_file("trace", 0644, d_tracer,
4813			(void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4814
4815	trace_create_file("available_tracers", 0444, d_tracer,
4816			&global_trace, &show_traces_fops);
4817
4818	trace_create_file("current_tracer", 0644, d_tracer,
4819			&global_trace, &set_tracer_fops);
4820
 
4821#ifdef CONFIG_TRACER_MAX_TRACE
4822	trace_create_file("tracing_max_latency", 0644, d_tracer,
4823			&tracing_max_latency, &tracing_max_lat_fops);
4824#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4825
4826	trace_create_file("tracing_thresh", 0644, d_tracer,
4827			&tracing_thresh, &tracing_max_lat_fops);
4828
4829	trace_create_file("README", 0444, d_tracer,
4830			NULL, &tracing_readme_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4831
4832	trace_create_file("trace_pipe", 0444, d_tracer,
4833			(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4834
4835	trace_create_file("buffer_size_kb", 0644, d_tracer,
4836			(void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
4837
4838	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4839			&global_trace, &tracing_total_entries_fops);
4840
4841	trace_create_file("free_buffer", 0644, d_tracer,
4842			&global_trace, &tracing_free_buffer_fops);
4843
4844	trace_create_file("trace_marker", 0220, d_tracer,
4845			NULL, &tracing_mark_fops);
 
 
4846
4847	trace_create_file("saved_cmdlines", 0444, d_tracer,
4848			NULL, &tracing_saved_cmdlines_fops);
4849
4850	trace_create_file("trace_clock", 0644, d_tracer, NULL,
4851			  &trace_clock_fops);
4852
4853	trace_create_file("tracing_on", 0644, d_tracer,
4854			    &global_trace, &rb_simple_fops);
 
 
 
 
 
4855
4856#ifdef CONFIG_DYNAMIC_FTRACE
4857	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4858			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4859#endif
 
4860
4861	create_trace_options_dir();
 
4862
4863	for_each_tracing_cpu(cpu)
4864		tracing_init_debugfs_percpu(cpu);
 
 
 
 
 
 
 
 
4865
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4866	return 0;
4867}
4868
4869static int trace_panic_handler(struct notifier_block *this,
4870			       unsigned long event, void *unused)
 
 
 
4871{
4872	if (ftrace_dump_on_oops)
4873		ftrace_dump(ftrace_dump_on_oops);
4874	return NOTIFY_OK;
 
 
 
 
 
 
 
 
4875}
4876
4877static struct notifier_block trace_panic_notifier = {
4878	.notifier_call  = trace_panic_handler,
4879	.next           = NULL,
4880	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
4881};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4882
4883static int trace_die_handler(struct notifier_block *self,
4884			     unsigned long val,
4885			     void *data)
4886{
 
 
4887	switch (val) {
4888	case DIE_OOPS:
4889		if (ftrace_dump_on_oops)
4890			ftrace_dump(ftrace_dump_on_oops);
4891		break;
4892	default:
 
4893		break;
4894	}
 
4895	return NOTIFY_OK;
4896}
4897
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4898static struct notifier_block trace_die_notifier = {
4899	.notifier_call = trace_die_handler,
4900	.priority = 200
4901};
4902
4903/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4904 * printk is set to max of 1024, we really don't need it that big.
4905 * Nothing should be printing 1000 characters anyway.
4906 */
4907#define TRACE_MAX_PRINT		1000
4908
4909/*
4910 * Define here KERN_TRACE so that we have one place to modify
4911 * it if we decide to change what log level the ftrace dump
4912 * should be at.
4913 */
4914#define KERN_TRACE		KERN_EMERG
4915
4916void
4917trace_printk_seq(struct trace_seq *s)
4918{
4919	/* Probably should print a warning here. */
4920	if (s->len >= 1000)
4921		s->len = 1000;
 
 
 
 
 
 
 
 
4922
4923	/* should be zero ended, but we are paranoid. */
4924	s->buffer[s->len] = 0;
4925
4926	printk(KERN_TRACE "%s", s->buffer);
4927
4928	trace_seq_init(s);
4929}
4930
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4931void trace_init_global_iter(struct trace_iterator *iter)
4932{
4933	iter->tr = &global_trace;
4934	iter->trace = current_trace;
4935	iter->cpu_file = TRACE_PIPE_ALL_CPU;
4936}
4937
4938static void
4939__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4940{
4941	static arch_spinlock_t ftrace_dump_lock =
4942		(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4943	/* use static because iter can be a bit big for the stack */
4944	static struct trace_iterator iter;
4945	unsigned int old_userobj;
4946	static int dump_ran;
4947	unsigned long flags;
4948	int cnt = 0, cpu;
4949
4950	/* only one dump */
 
 
 
 
 
 
 
 
 
4951	local_irq_save(flags);
4952	arch_spin_lock(&ftrace_dump_lock);
4953	if (dump_ran)
4954		goto out;
4955
4956	dump_ran = 1;
4957
4958	tracing_off();
4959
4960	/* Did function tracer already get disabled? */
4961	if (ftrace_is_dead()) {
4962		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
4963		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
4964	}
4965
4966	if (disable_tracing)
4967		ftrace_kill();
4968
4969	trace_init_global_iter(&iter);
4970
4971	for_each_tracing_cpu(cpu) {
4972		atomic_inc(&iter.tr->data[cpu]->disabled);
4973	}
4974
4975	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4976
4977	/* don't look at user memory in panic mode */
4978	trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4979
4980	/* Simulate the iterator */
4981	iter.tr = &global_trace;
4982	iter.trace = current_trace;
 
 
 
 
 
 
4983
4984	switch (oops_dump_mode) {
4985	case DUMP_ALL:
4986		iter.cpu_file = TRACE_PIPE_ALL_CPU;
4987		break;
4988	case DUMP_ORIG:
4989		iter.cpu_file = raw_smp_processor_id();
4990		break;
4991	case DUMP_NONE:
4992		goto out_enable;
4993	default:
4994		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4995		iter.cpu_file = TRACE_PIPE_ALL_CPU;
4996	}
4997
4998	printk(KERN_TRACE "Dumping ftrace buffer:\n");
4999
5000	/*
5001	 * We need to stop all tracing on all CPUS to read the
5002	 * the next buffer. This is a bit expensive, but is
5003	 * not done often. We fill all what we can read,
5004	 * and then release the locks again.
5005	 */
5006
5007	while (!trace_empty(&iter)) {
5008
5009		if (!cnt)
5010			printk(KERN_TRACE "---------------------------------\n");
5011
5012		cnt++;
5013
5014		/* reset all but tr, trace, and overruns */
5015		memset(&iter.seq, 0,
5016		       sizeof(struct trace_iterator) -
5017		       offsetof(struct trace_iterator, seq));
5018		iter.iter_flags |= TRACE_FILE_LAT_FMT;
5019		iter.pos = -1;
5020
5021		if (trace_find_next_entry_inc(&iter) != NULL) {
5022			int ret;
5023
5024			ret = print_trace_line(&iter);
5025			if (ret != TRACE_TYPE_NO_CONSUME)
5026				trace_consume(&iter);
5027		}
5028		touch_nmi_watchdog();
5029
5030		trace_printk_seq(&iter.seq);
5031	}
5032
5033	if (!cnt)
5034		printk(KERN_TRACE "   (ftrace buffer empty)\n");
5035	else
5036		printk(KERN_TRACE "---------------------------------\n");
5037
5038 out_enable:
5039	/* Re-enable tracing if requested */
5040	if (!disable_tracing) {
5041		trace_flags |= old_userobj;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5042
5043		for_each_tracing_cpu(cpu) {
5044			atomic_dec(&iter.tr->data[cpu]->disabled);
 
 
 
5045		}
5046		tracing_on();
 
 
 
 
 
5047	}
5048
5049 out:
5050	arch_spin_unlock(&ftrace_dump_lock);
5051	local_irq_restore(flags);
5052}
5053
5054/* By default: disable tracing after the dump */
5055void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
5056{
5057	__ftrace_dump(true, oops_dump_mode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5058}
5059EXPORT_SYMBOL_GPL(ftrace_dump);
5060
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5061__init static int tracer_alloc_buffers(void)
5062{
5063	int ring_buf_size;
5064	enum ring_buffer_flags rb_flags;
5065	int i;
5066	int ret = -ENOMEM;
5067
5068
 
 
 
 
 
 
 
 
 
 
 
5069	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
5070		goto out;
5071
5072	if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
5073		goto out_free_buffer_mask;
5074
5075	/* Only allocate trace_printk buffers if a trace_printk exists */
5076	if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
 
5077		trace_printk_init_buffers();
5078
5079	/* To save memory, keep the ring buffer size to its minimum */
5080	if (ring_buffer_expanded)
5081		ring_buf_size = trace_buf_size;
5082	else
5083		ring_buf_size = 1;
5084
5085	rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5086
5087	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
5088	cpumask_copy(tracing_cpumask, cpu_all_mask);
5089
5090	/* TODO: make the number of buffers hot pluggable with CPUS */
5091	global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
5092	if (!global_trace.buffer) {
5093		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
5094		WARN_ON(1);
5095		goto out_free_cpumask;
5096	}
5097	if (global_trace.buffer_disabled)
5098		tracing_off();
5099
5100
5101#ifdef CONFIG_TRACER_MAX_TRACE
5102	max_tr.buffer = ring_buffer_alloc(1, rb_flags);
5103	if (!max_tr.buffer) {
5104		printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5105		WARN_ON(1);
5106		ring_buffer_free(global_trace.buffer);
5107		goto out_free_cpumask;
5108	}
5109#endif
5110
5111	/* Allocate the first page for all buffers */
5112	for_each_tracing_cpu(i) {
5113		global_trace.data[i] = &per_cpu(global_trace_cpu, i);
5114		max_tr.data[i] = &per_cpu(max_tr_data, i);
5115	}
 
5116
5117	set_buffer_entries(&global_trace,
5118			   ring_buffer_size(global_trace.buffer, 0));
5119#ifdef CONFIG_TRACER_MAX_TRACE
5120	set_buffer_entries(&max_tr, 1);
5121#endif
 
5122
5123	trace_init_cmdlines();
5124
5125	register_tracer(&nop_trace);
5126	current_trace = &nop_trace;
 
 
 
5127	/* All seems OK, enable tracing */
5128	tracing_disabled = 0;
5129
5130	atomic_notifier_chain_register(&panic_notifier_list,
5131				       &trace_panic_notifier);
5132
5133	register_die_notifier(&trace_die_notifier);
5134
 
 
 
 
 
 
 
 
 
 
 
 
5135	return 0;
5136
 
 
 
 
 
 
 
 
5137out_free_cpumask:
5138	free_cpumask_var(tracing_cpumask);
5139out_free_buffer_mask:
5140	free_cpumask_var(tracing_buffer_mask);
5141out:
5142	return ret;
5143}
5144
5145__init static int clear_boot_tracer(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5146{
5147	/*
5148	 * The default tracer at boot buffer is an init section.
5149	 * This function is called in lateinit. If we did not
5150	 * find the boot tracer, then clear it out, to prevent
5151	 * later registration from accessing the buffer that is
5152	 * about to be freed.
5153	 */
5154	if (!default_bootup_tracer)
5155		return 0;
5156
5157	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
5158	       default_bootup_tracer);
5159	default_bootup_tracer = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5160
 
 
5161	return 0;
5162}
5163
5164early_initcall(tracer_alloc_buffers);
5165fs_initcall(tracer_init_debugfs);
5166late_initcall(clear_boot_tracer);