Linux Audio

Check our new training course

Loading...
v6.8
    1// SPDX-License-Identifier: GPL-2.0
    2/*
    3 * ring buffer based function tracer
    4 *
    5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
    6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
    7 *
    8 * Originally taken from the RT patch by:
    9 *    Arnaldo Carvalho de Melo <acme@redhat.com>
   10 *
   11 * Based on code from the latency_tracer, that is:
   12 *  Copyright (C) 2004-2006 Ingo Molnar
   13 *  Copyright (C) 2004 Nadia Yvette Chambers
   14 */
   15#include <linux/ring_buffer.h>
   16#include <generated/utsrelease.h>
   17#include <linux/stacktrace.h>
   18#include <linux/writeback.h>
   19#include <linux/kallsyms.h>
   20#include <linux/security.h>
   21#include <linux/seq_file.h>
 
   22#include <linux/irqflags.h>
   23#include <linux/debugfs.h>
   24#include <linux/tracefs.h>
   25#include <linux/pagemap.h>
   26#include <linux/hardirq.h>
   27#include <linux/linkage.h>
   28#include <linux/uaccess.h>
   29#include <linux/vmalloc.h>
   30#include <linux/ftrace.h>
   31#include <linux/module.h>
   32#include <linux/percpu.h>
   33#include <linux/splice.h>
   34#include <linux/kdebug.h>
   35#include <linux/string.h>
   36#include <linux/mount.h>
   37#include <linux/rwsem.h>
   38#include <linux/slab.h>
   39#include <linux/ctype.h>
   40#include <linux/init.h>
   41#include <linux/panic_notifier.h>
   42#include <linux/kmemleak.h>
   43#include <linux/poll.h>
   44#include <linux/nmi.h>
   45#include <linux/fs.h>
   46#include <linux/trace.h>
   47#include <linux/sched/clock.h>
   48#include <linux/sched/rt.h>
   49#include <linux/fsnotify.h>
   50#include <linux/irq_work.h>
   51#include <linux/workqueue.h>
   52
   53#include <asm/setup.h> /* COMMAND_LINE_SIZE */
   54
   55#include "trace.h"
   56#include "trace_output.h"
   57
   58#ifdef CONFIG_FTRACE_STARTUP_TEST
 
 
 
 
 
   59/*
   60 * We need to change this state when a selftest is running.
   61 * A selftest will lurk into the ring-buffer to count the
   62 * entries inserted during the selftest although some concurrent
   63 * insertions into the ring-buffer such as trace_printk could occurred
   64 * at the same time, giving false positive or negative results.
   65 */
   66static bool __read_mostly tracing_selftest_running;
   67
   68/*
   69 * If boot-time tracing including tracers/events via kernel cmdline
   70 * is running, we do not want to run SELFTEST.
   71 */
   72bool __read_mostly tracing_selftest_disabled;
   73
   74void __init disable_tracing_selftest(const char *reason)
   75{
   76	if (!tracing_selftest_disabled) {
   77		tracing_selftest_disabled = true;
   78		pr_info("Ftrace startup test is disabled due to %s\n", reason);
   79	}
   80}
   81#else
   82#define tracing_selftest_running	0
   83#define tracing_selftest_disabled	0
   84#endif
   85
   86/* Pipe tracepoints to printk */
   87static struct trace_iterator *tracepoint_print_iter;
   88int tracepoint_printk;
   89static bool tracepoint_printk_stop_on_boot __initdata;
   90static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
   91
   92/* For tracers that don't implement custom flags */
   93static struct tracer_opt dummy_tracer_opt[] = {
   94	{ }
   95};
   96
   97static int
   98dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
   99{
  100	return 0;
  101}
  102
  103/*
  104 * To prevent the comm cache from being overwritten when no
  105 * tracing is active, only save the comm when a trace event
  106 * occurred.
  107 */
  108static DEFINE_PER_CPU(bool, trace_taskinfo_save);
  109
  110/*
  111 * Kill all tracing for good (never come back).
  112 * It is initialized to 1 but will turn to zero if the initialization
  113 * of the tracer is successful. But that is the only place that sets
  114 * this back to zero.
  115 */
  116static int tracing_disabled = 1;
  117
  118cpumask_var_t __read_mostly	tracing_buffer_mask;
  119
  120/*
  121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
  122 *
  123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
  124 * is set, then ftrace_dump is called. This will output the contents
  125 * of the ftrace buffers to the console.  This is very useful for
  126 * capturing traces that lead to crashes and outputing it to a
  127 * serial console.
  128 *
  129 * It is default off, but you can enable it with either specifying
  130 * "ftrace_dump_on_oops" in the kernel command line, or setting
  131 * /proc/sys/kernel/ftrace_dump_on_oops
  132 * Set 1 if you want to dump buffers of all CPUs
  133 * Set 2 if you want to dump the buffer of the CPU that triggered oops
  134 */
  135
  136enum ftrace_dump_mode ftrace_dump_on_oops;
  137
  138/* When set, tracing will stop when a WARN*() is hit */
  139int __disable_trace_on_warning;
  140
  141#ifdef CONFIG_TRACE_EVAL_MAP_FILE
  142/* Map of enums to their values, for "eval_map" file */
  143struct trace_eval_map_head {
  144	struct module			*mod;
  145	unsigned long			length;
  146};
  147
  148union trace_eval_map_item;
  149
  150struct trace_eval_map_tail {
  151	/*
  152	 * "end" is first and points to NULL as it must be different
  153	 * than "mod" or "eval_string"
  154	 */
  155	union trace_eval_map_item	*next;
  156	const char			*end;	/* points to NULL */
  157};
  158
  159static DEFINE_MUTEX(trace_eval_mutex);
  160
  161/*
  162 * The trace_eval_maps are saved in an array with two extra elements,
  163 * one at the beginning, and one at the end. The beginning item contains
  164 * the count of the saved maps (head.length), and the module they
  165 * belong to if not built in (head.mod). The ending item contains a
  166 * pointer to the next array of saved eval_map items.
  167 */
  168union trace_eval_map_item {
  169	struct trace_eval_map		map;
  170	struct trace_eval_map_head	head;
  171	struct trace_eval_map_tail	tail;
  172};
  173
  174static union trace_eval_map_item *trace_eval_maps;
  175#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
  176
  177int tracing_set_tracer(struct trace_array *tr, const char *buf);
  178static void ftrace_trace_userstack(struct trace_array *tr,
  179				   struct trace_buffer *buffer,
  180				   unsigned int trace_ctx);
  181
  182#define MAX_TRACER_SIZE		100
  183static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
  184static char *default_bootup_tracer;
  185
  186static bool allocate_snapshot;
  187static bool snapshot_at_boot;
  188
  189static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
  190static int boot_instance_index;
  191
  192static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
  193static int boot_snapshot_index;
  194
  195static int __init set_cmdline_ftrace(char *str)
  196{
  197	strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
  198	default_bootup_tracer = bootup_tracer_buf;
  199	/* We are using ftrace early, expand it */
  200	trace_set_ring_buffer_expanded(NULL);
  201	return 1;
  202}
  203__setup("ftrace=", set_cmdline_ftrace);
  204
  205static int __init set_ftrace_dump_on_oops(char *str)
  206{
  207	if (*str++ != '=' || !*str || !strcmp("1", str)) {
  208		ftrace_dump_on_oops = DUMP_ALL;
  209		return 1;
  210	}
  211
  212	if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
  213		ftrace_dump_on_oops = DUMP_ORIG;
  214                return 1;
  215        }
  216
  217        return 0;
  218}
  219__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
  220
  221static int __init stop_trace_on_warning(char *str)
  222{
  223	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
  224		__disable_trace_on_warning = 1;
  225	return 1;
  226}
  227__setup("traceoff_on_warning", stop_trace_on_warning);
  228
  229static int __init boot_alloc_snapshot(char *str)
  230{
  231	char *slot = boot_snapshot_info + boot_snapshot_index;
  232	int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
  233	int ret;
  234
  235	if (str[0] == '=') {
  236		str++;
  237		if (strlen(str) >= left)
  238			return -1;
  239
  240		ret = snprintf(slot, left, "%s\t", str);
  241		boot_snapshot_index += ret;
  242	} else {
  243		allocate_snapshot = true;
  244		/* We also need the main ring buffer expanded */
  245		trace_set_ring_buffer_expanded(NULL);
  246	}
  247	return 1;
  248}
  249__setup("alloc_snapshot", boot_alloc_snapshot);
  250
  251
  252static int __init boot_snapshot(char *str)
  253{
  254	snapshot_at_boot = true;
  255	boot_alloc_snapshot(str);
  256	return 1;
  257}
  258__setup("ftrace_boot_snapshot", boot_snapshot);
  259
  260
  261static int __init boot_instance(char *str)
  262{
  263	char *slot = boot_instance_info + boot_instance_index;
  264	int left = sizeof(boot_instance_info) - boot_instance_index;
  265	int ret;
  266
  267	if (strlen(str) >= left)
  268		return -1;
  269
  270	ret = snprintf(slot, left, "%s\t", str);
  271	boot_instance_index += ret;
  272
  273	return 1;
  274}
  275__setup("trace_instance=", boot_instance);
  276
  277
  278static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
  279
  280static int __init set_trace_boot_options(char *str)
  281{
  282	strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
  283	return 1;
  284}
  285__setup("trace_options=", set_trace_boot_options);
  286
  287static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
  288static char *trace_boot_clock __initdata;
  289
  290static int __init set_trace_boot_clock(char *str)
  291{
  292	strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
  293	trace_boot_clock = trace_boot_clock_buf;
  294	return 1;
  295}
  296__setup("trace_clock=", set_trace_boot_clock);
  297
  298static int __init set_tracepoint_printk(char *str)
  299{
  300	/* Ignore the "tp_printk_stop_on_boot" param */
  301	if (*str == '_')
  302		return 0;
  303
  304	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
  305		tracepoint_printk = 1;
  306	return 1;
  307}
  308__setup("tp_printk", set_tracepoint_printk);
  309
  310static int __init set_tracepoint_printk_stop(char *str)
  311{
  312	tracepoint_printk_stop_on_boot = true;
  313	return 1;
  314}
  315__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
  316
  317unsigned long long ns2usecs(u64 nsec)
  318{
  319	nsec += 500;
  320	do_div(nsec, 1000);
  321	return nsec;
  322}
  323
  324static void
  325trace_process_export(struct trace_export *export,
  326	       struct ring_buffer_event *event, int flag)
  327{
  328	struct trace_entry *entry;
  329	unsigned int size = 0;
  330
  331	if (export->flags & flag) {
  332		entry = ring_buffer_event_data(event);
  333		size = ring_buffer_event_length(event);
  334		export->write(export, entry, size);
  335	}
  336}
  337
  338static DEFINE_MUTEX(ftrace_export_lock);
  339
  340static struct trace_export __rcu *ftrace_exports_list __read_mostly;
  341
  342static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
  343static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
  344static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
  345
  346static inline void ftrace_exports_enable(struct trace_export *export)
  347{
  348	if (export->flags & TRACE_EXPORT_FUNCTION)
  349		static_branch_inc(&trace_function_exports_enabled);
  350
  351	if (export->flags & TRACE_EXPORT_EVENT)
  352		static_branch_inc(&trace_event_exports_enabled);
  353
  354	if (export->flags & TRACE_EXPORT_MARKER)
  355		static_branch_inc(&trace_marker_exports_enabled);
  356}
  357
  358static inline void ftrace_exports_disable(struct trace_export *export)
  359{
  360	if (export->flags & TRACE_EXPORT_FUNCTION)
  361		static_branch_dec(&trace_function_exports_enabled);
  362
  363	if (export->flags & TRACE_EXPORT_EVENT)
  364		static_branch_dec(&trace_event_exports_enabled);
  365
  366	if (export->flags & TRACE_EXPORT_MARKER)
  367		static_branch_dec(&trace_marker_exports_enabled);
  368}
  369
  370static void ftrace_exports(struct ring_buffer_event *event, int flag)
  371{
  372	struct trace_export *export;
  373
  374	preempt_disable_notrace();
  375
  376	export = rcu_dereference_raw_check(ftrace_exports_list);
  377	while (export) {
  378		trace_process_export(export, event, flag);
  379		export = rcu_dereference_raw_check(export->next);
  380	}
  381
  382	preempt_enable_notrace();
  383}
  384
  385static inline void
  386add_trace_export(struct trace_export **list, struct trace_export *export)
  387{
  388	rcu_assign_pointer(export->next, *list);
  389	/*
  390	 * We are entering export into the list but another
  391	 * CPU might be walking that list. We need to make sure
  392	 * the export->next pointer is valid before another CPU sees
  393	 * the export pointer included into the list.
  394	 */
  395	rcu_assign_pointer(*list, export);
  396}
  397
  398static inline int
  399rm_trace_export(struct trace_export **list, struct trace_export *export)
  400{
  401	struct trace_export **p;
  402
  403	for (p = list; *p != NULL; p = &(*p)->next)
  404		if (*p == export)
  405			break;
  406
  407	if (*p != export)
  408		return -1;
  409
  410	rcu_assign_pointer(*p, (*p)->next);
  411
  412	return 0;
  413}
  414
  415static inline void
  416add_ftrace_export(struct trace_export **list, struct trace_export *export)
  417{
  418	ftrace_exports_enable(export);
  419
  420	add_trace_export(list, export);
  421}
  422
  423static inline int
  424rm_ftrace_export(struct trace_export **list, struct trace_export *export)
  425{
  426	int ret;
  427
  428	ret = rm_trace_export(list, export);
  429	ftrace_exports_disable(export);
  430
  431	return ret;
  432}
  433
  434int register_ftrace_export(struct trace_export *export)
  435{
  436	if (WARN_ON_ONCE(!export->write))
  437		return -1;
  438
  439	mutex_lock(&ftrace_export_lock);
  440
  441	add_ftrace_export(&ftrace_exports_list, export);
  442
  443	mutex_unlock(&ftrace_export_lock);
  444
  445	return 0;
  446}
  447EXPORT_SYMBOL_GPL(register_ftrace_export);
  448
  449int unregister_ftrace_export(struct trace_export *export)
  450{
  451	int ret;
  452
  453	mutex_lock(&ftrace_export_lock);
  454
  455	ret = rm_ftrace_export(&ftrace_exports_list, export);
  456
  457	mutex_unlock(&ftrace_export_lock);
  458
  459	return ret;
  460}
  461EXPORT_SYMBOL_GPL(unregister_ftrace_export);
  462
  463/* trace_flags holds trace_options default values */
  464#define TRACE_DEFAULT_FLAGS						\
  465	(FUNCTION_DEFAULT_FLAGS |					\
  466	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
  467	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
  468	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
  469	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
  470	 TRACE_ITER_HASH_PTR)
  471
  472/* trace_options that are only supported by global_trace */
  473#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
  474	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
  475
  476/* trace_flags that are default zero for instances */
  477#define ZEROED_TRACE_FLAGS \
  478	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
  479
  480/*
  481 * The global_trace is the descriptor that holds the top-level tracing
  482 * buffers for the live tracing.
  483 */
  484static struct trace_array global_trace = {
  485	.trace_flags = TRACE_DEFAULT_FLAGS,
  486};
  487
  488void trace_set_ring_buffer_expanded(struct trace_array *tr)
  489{
  490	if (!tr)
  491		tr = &global_trace;
  492	tr->ring_buffer_expanded = true;
  493}
  494
  495LIST_HEAD(ftrace_trace_arrays);
  496
  497int trace_array_get(struct trace_array *this_tr)
  498{
  499	struct trace_array *tr;
  500	int ret = -ENODEV;
  501
  502	mutex_lock(&trace_types_lock);
  503	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
  504		if (tr == this_tr) {
  505			tr->ref++;
  506			ret = 0;
  507			break;
  508		}
  509	}
  510	mutex_unlock(&trace_types_lock);
  511
  512	return ret;
  513}
  514
  515static void __trace_array_put(struct trace_array *this_tr)
  516{
  517	WARN_ON(!this_tr->ref);
  518	this_tr->ref--;
  519}
  520
  521/**
  522 * trace_array_put - Decrement the reference counter for this trace array.
  523 * @this_tr : pointer to the trace array
  524 *
  525 * NOTE: Use this when we no longer need the trace array returned by
  526 * trace_array_get_by_name(). This ensures the trace array can be later
  527 * destroyed.
  528 *
  529 */
  530void trace_array_put(struct trace_array *this_tr)
  531{
  532	if (!this_tr)
  533		return;
  534
  535	mutex_lock(&trace_types_lock);
  536	__trace_array_put(this_tr);
  537	mutex_unlock(&trace_types_lock);
  538}
  539EXPORT_SYMBOL_GPL(trace_array_put);
  540
  541int tracing_check_open_get_tr(struct trace_array *tr)
  542{
  543	int ret;
  544
  545	ret = security_locked_down(LOCKDOWN_TRACEFS);
  546	if (ret)
  547		return ret;
  548
  549	if (tracing_disabled)
  550		return -ENODEV;
  551
  552	if (tr && trace_array_get(tr) < 0)
  553		return -ENODEV;
  554
  555	return 0;
  556}
  557
  558int call_filter_check_discard(struct trace_event_call *call, void *rec,
  559			      struct trace_buffer *buffer,
  560			      struct ring_buffer_event *event)
  561{
  562	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
  563	    !filter_match_preds(call->filter, rec)) {
  564		__trace_event_discard_commit(buffer, event);
  565		return 1;
  566	}
  567
  568	return 0;
  569}
  570
 
 
 
 
 
 
  571/**
  572 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
  573 * @filtered_pids: The list of pids to check
  574 * @search_pid: The PID to find in @filtered_pids
  575 *
  576 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
  577 */
  578bool
  579trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
  580{
  581	return trace_pid_list_is_set(filtered_pids, search_pid);
 
 
 
 
 
 
 
  582}
  583
  584/**
  585 * trace_ignore_this_task - should a task be ignored for tracing
  586 * @filtered_pids: The list of pids to check
  587 * @filtered_no_pids: The list of pids not to be traced
  588 * @task: The task that should be ignored if not filtered
  589 *
  590 * Checks if @task should be traced or not from @filtered_pids.
  591 * Returns true if @task should *NOT* be traced.
  592 * Returns false if @task should be traced.
  593 */
  594bool
  595trace_ignore_this_task(struct trace_pid_list *filtered_pids,
  596		       struct trace_pid_list *filtered_no_pids,
  597		       struct task_struct *task)
  598{
  599	/*
  600	 * If filtered_no_pids is not empty, and the task's pid is listed
  601	 * in filtered_no_pids, then return true.
  602	 * Otherwise, if filtered_pids is empty, that means we can
  603	 * trace all tasks. If it has content, then only trace pids
  604	 * within filtered_pids.
  605	 */
  606
  607	return (filtered_pids &&
  608		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
  609		(filtered_no_pids &&
  610		 trace_find_filtered_pid(filtered_no_pids, task->pid));
  611}
  612
  613/**
  614 * trace_filter_add_remove_task - Add or remove a task from a pid_list
  615 * @pid_list: The list to modify
  616 * @self: The current task for fork or NULL for exit
  617 * @task: The task to add or remove
  618 *
  619 * If adding a task, if @self is defined, the task is only added if @self
  620 * is also included in @pid_list. This happens on fork and tasks should
  621 * only be added when the parent is listed. If @self is NULL, then the
  622 * @task pid will be removed from the list, which would happen on exit
  623 * of a task.
  624 */
  625void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
  626				  struct task_struct *self,
  627				  struct task_struct *task)
  628{
  629	if (!pid_list)
  630		return;
  631
  632	/* For forks, we only add if the forking task is listed */
  633	if (self) {
  634		if (!trace_find_filtered_pid(pid_list, self->pid))
  635			return;
  636	}
  637
 
 
 
 
  638	/* "self" is set for forks, and NULL for exits */
  639	if (self)
  640		trace_pid_list_set(pid_list, task->pid);
  641	else
  642		trace_pid_list_clear(pid_list, task->pid);
  643}
  644
  645/**
  646 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
  647 * @pid_list: The pid list to show
  648 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
  649 * @pos: The position of the file
  650 *
  651 * This is used by the seq_file "next" operation to iterate the pids
  652 * listed in a trace_pid_list structure.
  653 *
  654 * Returns the pid+1 as we want to display pid of zero, but NULL would
  655 * stop the iteration.
  656 */
  657void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
  658{
  659	long pid = (unsigned long)v;
  660	unsigned int next;
  661
  662	(*pos)++;
  663
  664	/* pid already is +1 of the actual previous bit */
  665	if (trace_pid_list_next(pid_list, pid, &next) < 0)
  666		return NULL;
  667
  668	pid = next;
  669
  670	/* Return pid + 1 to allow zero to be represented */
  671	return (void *)(pid + 1);
 
 
 
  672}
  673
  674/**
  675 * trace_pid_start - Used for seq_file to start reading pid lists
  676 * @pid_list: The pid list to show
  677 * @pos: The position of the file
  678 *
  679 * This is used by seq_file "start" operation to start the iteration
  680 * of listing pids.
  681 *
  682 * Returns the pid+1 as we want to display pid of zero, but NULL would
  683 * stop the iteration.
  684 */
  685void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
  686{
  687	unsigned long pid;
  688	unsigned int first;
  689	loff_t l = 0;
  690
  691	if (trace_pid_list_first(pid_list, &first) < 0)
 
  692		return NULL;
  693
  694	pid = first;
  695
  696	/* Return pid + 1 so that zero can be the exit value */
  697	for (pid++; pid && l < *pos;
  698	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
  699		;
  700	return (void *)pid;
  701}
  702
  703/**
  704 * trace_pid_show - show the current pid in seq_file processing
  705 * @m: The seq_file structure to write into
  706 * @v: A void pointer of the pid (+1) value to display
  707 *
  708 * Can be directly used by seq_file operations to display the current
  709 * pid value.
  710 */
  711int trace_pid_show(struct seq_file *m, void *v)
  712{
  713	unsigned long pid = (unsigned long)v - 1;
  714
  715	seq_printf(m, "%lu\n", pid);
  716	return 0;
  717}
  718
  719/* 128 should be much more than enough */
  720#define PID_BUF_SIZE		127
  721
  722int trace_pid_write(struct trace_pid_list *filtered_pids,
  723		    struct trace_pid_list **new_pid_list,
  724		    const char __user *ubuf, size_t cnt)
  725{
  726	struct trace_pid_list *pid_list;
  727	struct trace_parser parser;
  728	unsigned long val;
  729	int nr_pids = 0;
  730	ssize_t read = 0;
  731	ssize_t ret;
  732	loff_t pos;
  733	pid_t pid;
  734
  735	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
  736		return -ENOMEM;
  737
  738	/*
  739	 * Always recreate a new array. The write is an all or nothing
  740	 * operation. Always create a new array when adding new pids by
  741	 * the user. If the operation fails, then the current list is
  742	 * not modified.
  743	 */
  744	pid_list = trace_pid_list_alloc();
  745	if (!pid_list) {
  746		trace_parser_put(&parser);
  747		return -ENOMEM;
  748	}
  749
 
 
 
 
 
 
 
 
 
 
 
 
 
  750	if (filtered_pids) {
  751		/* copy the current bits to the new max */
  752		ret = trace_pid_list_first(filtered_pids, &pid);
  753		while (!ret) {
  754			trace_pid_list_set(pid_list, pid);
  755			ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
  756			nr_pids++;
  757		}
  758	}
  759
  760	ret = 0;
  761	while (cnt > 0) {
  762
  763		pos = 0;
  764
  765		ret = trace_get_user(&parser, ubuf, cnt, &pos);
  766		if (ret < 0)
  767			break;
  768
  769		read += ret;
  770		ubuf += ret;
  771		cnt -= ret;
  772
  773		if (!trace_parser_loaded(&parser))
  774			break;
  775
  776		ret = -EINVAL;
  777		if (kstrtoul(parser.buffer, 0, &val))
  778			break;
 
 
  779
  780		pid = (pid_t)val;
  781
  782		if (trace_pid_list_set(pid_list, pid) < 0) {
  783			ret = -1;
  784			break;
  785		}
  786		nr_pids++;
  787
  788		trace_parser_clear(&parser);
  789		ret = 0;
  790	}
  791	trace_parser_put(&parser);
  792
  793	if (ret < 0) {
  794		trace_pid_list_free(pid_list);
  795		return ret;
  796	}
  797
  798	if (!nr_pids) {
  799		/* Cleared the list of pids */
  800		trace_pid_list_free(pid_list);
 
  801		pid_list = NULL;
  802	}
  803
  804	*new_pid_list = pid_list;
  805
  806	return read;
  807}
  808
  809static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
  810{
  811	u64 ts;
  812
  813	/* Early boot up does not have a buffer yet */
  814	if (!buf->buffer)
  815		return trace_clock_local();
  816
  817	ts = ring_buffer_time_stamp(buf->buffer);
  818	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
  819
  820	return ts;
  821}
  822
  823u64 ftrace_now(int cpu)
  824{
  825	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
  826}
  827
  828/**
  829 * tracing_is_enabled - Show if global_trace has been enabled
  830 *
  831 * Shows if the global trace has been enabled or not. It uses the
  832 * mirror flag "buffer_disabled" to be used in fast paths such as for
  833 * the irqsoff tracer. But it may be inaccurate due to races. If you
  834 * need to know the accurate state, use tracing_is_on() which is a little
  835 * slower, but accurate.
  836 */
  837int tracing_is_enabled(void)
  838{
  839	/*
  840	 * For quick access (irqsoff uses this in fast path), just
  841	 * return the mirror variable of the state of the ring buffer.
  842	 * It's a little racy, but we don't really care.
  843	 */
  844	smp_rmb();
  845	return !global_trace.buffer_disabled;
  846}
  847
  848/*
  849 * trace_buf_size is the size in bytes that is allocated
  850 * for a buffer. Note, the number of bytes is always rounded
  851 * to page size.
  852 *
  853 * This number is purposely set to a low number of 16384.
  854 * If the dump on oops happens, it will be much appreciated
  855 * to not have to wait for all that output. Anyway this can be
  856 * boot time and run time configurable.
  857 */
  858#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
  859
  860static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
  861
  862/* trace_types holds a link list of available tracers. */
  863static struct tracer		*trace_types __read_mostly;
  864
  865/*
  866 * trace_types_lock is used to protect the trace_types list.
  867 */
  868DEFINE_MUTEX(trace_types_lock);
  869
  870/*
  871 * serialize the access of the ring buffer
  872 *
  873 * ring buffer serializes readers, but it is low level protection.
  874 * The validity of the events (which returns by ring_buffer_peek() ..etc)
  875 * are not protected by ring buffer.
  876 *
  877 * The content of events may become garbage if we allow other process consumes
  878 * these events concurrently:
  879 *   A) the page of the consumed events may become a normal page
  880 *      (not reader page) in ring buffer, and this page will be rewritten
  881 *      by events producer.
  882 *   B) The page of the consumed events may become a page for splice_read,
  883 *      and this page will be returned to system.
  884 *
  885 * These primitives allow multi process access to different cpu ring buffer
  886 * concurrently.
  887 *
  888 * These primitives don't distinguish read-only and read-consume access.
  889 * Multi read-only access are also serialized.
  890 */
  891
  892#ifdef CONFIG_SMP
  893static DECLARE_RWSEM(all_cpu_access_lock);
  894static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
  895
  896static inline void trace_access_lock(int cpu)
  897{
  898	if (cpu == RING_BUFFER_ALL_CPUS) {
  899		/* gain it for accessing the whole ring buffer. */
  900		down_write(&all_cpu_access_lock);
  901	} else {
  902		/* gain it for accessing a cpu ring buffer. */
  903
  904		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
  905		down_read(&all_cpu_access_lock);
  906
  907		/* Secondly block other access to this @cpu ring buffer. */
  908		mutex_lock(&per_cpu(cpu_access_lock, cpu));
  909	}
  910}
  911
  912static inline void trace_access_unlock(int cpu)
  913{
  914	if (cpu == RING_BUFFER_ALL_CPUS) {
  915		up_write(&all_cpu_access_lock);
  916	} else {
  917		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
  918		up_read(&all_cpu_access_lock);
  919	}
  920}
  921
  922static inline void trace_access_lock_init(void)
  923{
  924	int cpu;
  925
  926	for_each_possible_cpu(cpu)
  927		mutex_init(&per_cpu(cpu_access_lock, cpu));
  928}
  929
  930#else
  931
  932static DEFINE_MUTEX(access_lock);
  933
  934static inline void trace_access_lock(int cpu)
  935{
  936	(void)cpu;
  937	mutex_lock(&access_lock);
  938}
  939
  940static inline void trace_access_unlock(int cpu)
  941{
  942	(void)cpu;
  943	mutex_unlock(&access_lock);
  944}
  945
  946static inline void trace_access_lock_init(void)
  947{
  948}
  949
  950#endif
  951
  952#ifdef CONFIG_STACKTRACE
  953static void __ftrace_trace_stack(struct trace_buffer *buffer,
  954				 unsigned int trace_ctx,
  955				 int skip, struct pt_regs *regs);
  956static inline void ftrace_trace_stack(struct trace_array *tr,
  957				      struct trace_buffer *buffer,
  958				      unsigned int trace_ctx,
  959				      int skip, struct pt_regs *regs);
  960
  961#else
  962static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
  963					unsigned int trace_ctx,
  964					int skip, struct pt_regs *regs)
  965{
  966}
  967static inline void ftrace_trace_stack(struct trace_array *tr,
  968				      struct trace_buffer *buffer,
  969				      unsigned long trace_ctx,
  970				      int skip, struct pt_regs *regs)
  971{
  972}
  973
  974#endif
  975
  976static __always_inline void
  977trace_event_setup(struct ring_buffer_event *event,
  978		  int type, unsigned int trace_ctx)
  979{
  980	struct trace_entry *ent = ring_buffer_event_data(event);
  981
  982	tracing_generic_entry_update(ent, type, trace_ctx);
  983}
  984
  985static __always_inline struct ring_buffer_event *
  986__trace_buffer_lock_reserve(struct trace_buffer *buffer,
  987			  int type,
  988			  unsigned long len,
  989			  unsigned int trace_ctx)
  990{
  991	struct ring_buffer_event *event;
  992
  993	event = ring_buffer_lock_reserve(buffer, len);
  994	if (event != NULL)
  995		trace_event_setup(event, type, trace_ctx);
  996
  997	return event;
  998}
  999
 1000void tracer_tracing_on(struct trace_array *tr)
 1001{
 1002	if (tr->array_buffer.buffer)
 1003		ring_buffer_record_on(tr->array_buffer.buffer);
 1004	/*
 1005	 * This flag is looked at when buffers haven't been allocated
 1006	 * yet, or by some tracers (like irqsoff), that just want to
 1007	 * know if the ring buffer has been disabled, but it can handle
 1008	 * races of where it gets disabled but we still do a record.
 1009	 * As the check is in the fast path of the tracers, it is more
 1010	 * important to be fast than accurate.
 1011	 */
 1012	tr->buffer_disabled = 0;
 1013	/* Make the flag seen by readers */
 1014	smp_wmb();
 1015}
 1016
 1017/**
 1018 * tracing_on - enable tracing buffers
 1019 *
 1020 * This function enables tracing buffers that may have been
 1021 * disabled with tracing_off.
 1022 */
 1023void tracing_on(void)
 1024{
 1025	tracer_tracing_on(&global_trace);
 1026}
 1027EXPORT_SYMBOL_GPL(tracing_on);
 1028
 1029
 1030static __always_inline void
 1031__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
 1032{
 1033	__this_cpu_write(trace_taskinfo_save, true);
 1034
 1035	/* If this is the temp buffer, we need to commit fully */
 1036	if (this_cpu_read(trace_buffered_event) == event) {
 1037		/* Length is in event->array[0] */
 1038		ring_buffer_write(buffer, event->array[0], &event->array[1]);
 1039		/* Release the temp buffer */
 1040		this_cpu_dec(trace_buffered_event_cnt);
 1041		/* ring_buffer_unlock_commit() enables preemption */
 1042		preempt_enable_notrace();
 1043	} else
 1044		ring_buffer_unlock_commit(buffer);
 1045}
 1046
 1047int __trace_array_puts(struct trace_array *tr, unsigned long ip,
 1048		       const char *str, int size)
 
 
 
 
 
 1049{
 1050	struct ring_buffer_event *event;
 1051	struct trace_buffer *buffer;
 1052	struct print_entry *entry;
 1053	unsigned int trace_ctx;
 1054	int alloc;
 
 1055
 1056	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
 1057		return 0;
 1058
 1059	if (unlikely(tracing_selftest_running && tr == &global_trace))
 1060		return 0;
 1061
 1062	if (unlikely(tracing_disabled))
 1063		return 0;
 1064
 1065	alloc = sizeof(*entry) + size + 2; /* possible \n added */
 1066
 1067	trace_ctx = tracing_gen_ctx();
 1068	buffer = tr->array_buffer.buffer;
 1069	ring_buffer_nest_start(buffer);
 1070	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
 1071					    trace_ctx);
 1072	if (!event) {
 1073		size = 0;
 1074		goto out;
 1075	}
 1076
 1077	entry = ring_buffer_event_data(event);
 1078	entry->ip = ip;
 1079
 1080	memcpy(&entry->buf, str, size);
 1081
 1082	/* Add a newline if necessary */
 1083	if (entry->buf[size - 1] != '\n') {
 1084		entry->buf[size] = '\n';
 1085		entry->buf[size + 1] = '\0';
 1086	} else
 1087		entry->buf[size] = '\0';
 1088
 1089	__buffer_unlock_commit(buffer, event);
 1090	ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
 1091 out:
 1092	ring_buffer_nest_end(buffer);
 1093	return size;
 1094}
 1095EXPORT_SYMBOL_GPL(__trace_array_puts);
 1096
 1097/**
 1098 * __trace_puts - write a constant string into the trace buffer.
 1099 * @ip:	   The address of the caller
 1100 * @str:   The constant string to write
 1101 * @size:  The size of the string.
 1102 */
 1103int __trace_puts(unsigned long ip, const char *str, int size)
 1104{
 1105	return __trace_array_puts(&global_trace, ip, str, size);
 1106}
 1107EXPORT_SYMBOL_GPL(__trace_puts);
 1108
 1109/**
 1110 * __trace_bputs - write the pointer to a constant string into trace buffer
 1111 * @ip:	   The address of the caller
 1112 * @str:   The constant string to write to the buffer to
 1113 */
 1114int __trace_bputs(unsigned long ip, const char *str)
 1115{
 1116	struct ring_buffer_event *event;
 1117	struct trace_buffer *buffer;
 1118	struct bputs_entry *entry;
 1119	unsigned int trace_ctx;
 1120	int size = sizeof(struct bputs_entry);
 1121	int ret = 0;
 1122
 1123	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 1124		return 0;
 1125
 
 
 1126	if (unlikely(tracing_selftest_running || tracing_disabled))
 1127		return 0;
 1128
 1129	trace_ctx = tracing_gen_ctx();
 1130	buffer = global_trace.array_buffer.buffer;
 1131
 1132	ring_buffer_nest_start(buffer);
 1133	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 1134					    trace_ctx);
 1135	if (!event)
 1136		goto out;
 1137
 1138	entry = ring_buffer_event_data(event);
 1139	entry->ip			= ip;
 1140	entry->str			= str;
 1141
 1142	__buffer_unlock_commit(buffer, event);
 1143	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
 1144
 1145	ret = 1;
 1146 out:
 1147	ring_buffer_nest_end(buffer);
 1148	return ret;
 1149}
 1150EXPORT_SYMBOL_GPL(__trace_bputs);
 1151
 1152#ifdef CONFIG_TRACER_SNAPSHOT
 1153static void tracing_snapshot_instance_cond(struct trace_array *tr,
 1154					   void *cond_data)
 1155{
 1156	struct tracer *tracer = tr->current_trace;
 1157	unsigned long flags;
 1158
 1159	if (in_nmi()) {
 1160		trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
 1161		trace_array_puts(tr, "*** snapshot is being ignored        ***\n");
 1162		return;
 1163	}
 1164
 1165	if (!tr->allocated_snapshot) {
 1166		trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
 1167		trace_array_puts(tr, "*** stopping trace here!   ***\n");
 1168		tracer_tracing_off(tr);
 1169		return;
 1170	}
 1171
 1172	/* Note, snapshot can not be used when the tracer uses it */
 1173	if (tracer->use_max_tr) {
 1174		trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
 1175		trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
 1176		return;
 1177	}
 1178
 1179	local_irq_save(flags);
 1180	update_max_tr(tr, current, smp_processor_id(), cond_data);
 1181	local_irq_restore(flags);
 1182}
 1183
 1184void tracing_snapshot_instance(struct trace_array *tr)
 1185{
 1186	tracing_snapshot_instance_cond(tr, NULL);
 1187}
 1188
 1189/**
 1190 * tracing_snapshot - take a snapshot of the current buffer.
 1191 *
 1192 * This causes a swap between the snapshot buffer and the current live
 1193 * tracing buffer. You can use this to take snapshots of the live
 1194 * trace when some condition is triggered, but continue to trace.
 1195 *
 1196 * Note, make sure to allocate the snapshot with either
 1197 * a tracing_snapshot_alloc(), or by doing it manually
 1198 * with: echo 1 > /sys/kernel/tracing/snapshot
 1199 *
 1200 * If the snapshot buffer is not allocated, it will stop tracing.
 1201 * Basically making a permanent snapshot.
 1202 */
 1203void tracing_snapshot(void)
 1204{
 1205	struct trace_array *tr = &global_trace;
 1206
 1207	tracing_snapshot_instance(tr);
 1208}
 1209EXPORT_SYMBOL_GPL(tracing_snapshot);
 1210
 1211/**
 1212 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
 1213 * @tr:		The tracing instance to snapshot
 1214 * @cond_data:	The data to be tested conditionally, and possibly saved
 1215 *
 1216 * This is the same as tracing_snapshot() except that the snapshot is
 1217 * conditional - the snapshot will only happen if the
 1218 * cond_snapshot.update() implementation receiving the cond_data
 1219 * returns true, which means that the trace array's cond_snapshot
 1220 * update() operation used the cond_data to determine whether the
 1221 * snapshot should be taken, and if it was, presumably saved it along
 1222 * with the snapshot.
 1223 */
 1224void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
 1225{
 1226	tracing_snapshot_instance_cond(tr, cond_data);
 1227}
 1228EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
 1229
 1230/**
 1231 * tracing_cond_snapshot_data - get the user data associated with a snapshot
 1232 * @tr:		The tracing instance
 1233 *
 1234 * When the user enables a conditional snapshot using
 1235 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
 1236 * with the snapshot.  This accessor is used to retrieve it.
 1237 *
 1238 * Should not be called from cond_snapshot.update(), since it takes
 1239 * the tr->max_lock lock, which the code calling
 1240 * cond_snapshot.update() has already done.
 1241 *
 1242 * Returns the cond_data associated with the trace array's snapshot.
 1243 */
 1244void *tracing_cond_snapshot_data(struct trace_array *tr)
 1245{
 1246	void *cond_data = NULL;
 1247
 1248	local_irq_disable();
 1249	arch_spin_lock(&tr->max_lock);
 1250
 1251	if (tr->cond_snapshot)
 1252		cond_data = tr->cond_snapshot->cond_data;
 1253
 1254	arch_spin_unlock(&tr->max_lock);
 1255	local_irq_enable();
 1256
 1257	return cond_data;
 1258}
 1259EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
 1260
 1261static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
 1262					struct array_buffer *size_buf, int cpu_id);
 1263static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
 1264
 1265int tracing_alloc_snapshot_instance(struct trace_array *tr)
 1266{
 1267	int order;
 1268	int ret;
 1269
 1270	if (!tr->allocated_snapshot) {
 1271
 1272		/* Make the snapshot buffer have the same order as main buffer */
 1273		order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
 1274		ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
 1275		if (ret < 0)
 1276			return ret;
 1277
 1278		/* allocate spare buffer */
 1279		ret = resize_buffer_duplicate_size(&tr->max_buffer,
 1280				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
 1281		if (ret < 0)
 1282			return ret;
 1283
 1284		tr->allocated_snapshot = true;
 1285	}
 1286
 1287	return 0;
 1288}
 1289
 1290static void free_snapshot(struct trace_array *tr)
 1291{
 1292	/*
 1293	 * We don't free the ring buffer. instead, resize it because
 1294	 * The max_tr ring buffer has some state (e.g. ring->clock) and
 1295	 * we want preserve it.
 1296	 */
 1297	ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0);
 1298	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
 1299	set_buffer_entries(&tr->max_buffer, 1);
 1300	tracing_reset_online_cpus(&tr->max_buffer);
 1301	tr->allocated_snapshot = false;
 1302}
 1303
 1304/**
 1305 * tracing_alloc_snapshot - allocate snapshot buffer.
 1306 *
 1307 * This only allocates the snapshot buffer if it isn't already
 1308 * allocated - it doesn't also take a snapshot.
 1309 *
 1310 * This is meant to be used in cases where the snapshot buffer needs
 1311 * to be set up for events that can't sleep but need to be able to
 1312 * trigger a snapshot.
 1313 */
 1314int tracing_alloc_snapshot(void)
 1315{
 1316	struct trace_array *tr = &global_trace;
 1317	int ret;
 1318
 1319	ret = tracing_alloc_snapshot_instance(tr);
 1320	WARN_ON(ret < 0);
 1321
 1322	return ret;
 1323}
 1324EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 1325
 1326/**
 1327 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
 1328 *
 1329 * This is similar to tracing_snapshot(), but it will allocate the
 1330 * snapshot buffer if it isn't already allocated. Use this only
 1331 * where it is safe to sleep, as the allocation may sleep.
 1332 *
 1333 * This causes a swap between the snapshot buffer and the current live
 1334 * tracing buffer. You can use this to take snapshots of the live
 1335 * trace when some condition is triggered, but continue to trace.
 1336 */
 1337void tracing_snapshot_alloc(void)
 1338{
 1339	int ret;
 1340
 1341	ret = tracing_alloc_snapshot();
 1342	if (ret < 0)
 1343		return;
 1344
 1345	tracing_snapshot();
 1346}
 1347EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 1348
 1349/**
 1350 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
 1351 * @tr:		The tracing instance
 1352 * @cond_data:	User data to associate with the snapshot
 1353 * @update:	Implementation of the cond_snapshot update function
 1354 *
 1355 * Check whether the conditional snapshot for the given instance has
 1356 * already been enabled, or if the current tracer is already using a
 1357 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
 1358 * save the cond_data and update function inside.
 1359 *
 1360 * Returns 0 if successful, error otherwise.
 1361 */
 1362int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
 1363				 cond_update_fn_t update)
 1364{
 1365	struct cond_snapshot *cond_snapshot;
 1366	int ret = 0;
 1367
 1368	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
 1369	if (!cond_snapshot)
 1370		return -ENOMEM;
 1371
 1372	cond_snapshot->cond_data = cond_data;
 1373	cond_snapshot->update = update;
 1374
 1375	mutex_lock(&trace_types_lock);
 1376
 1377	ret = tracing_alloc_snapshot_instance(tr);
 1378	if (ret)
 1379		goto fail_unlock;
 1380
 1381	if (tr->current_trace->use_max_tr) {
 1382		ret = -EBUSY;
 1383		goto fail_unlock;
 1384	}
 1385
 1386	/*
 1387	 * The cond_snapshot can only change to NULL without the
 1388	 * trace_types_lock. We don't care if we race with it going
 1389	 * to NULL, but we want to make sure that it's not set to
 1390	 * something other than NULL when we get here, which we can
 1391	 * do safely with only holding the trace_types_lock and not
 1392	 * having to take the max_lock.
 1393	 */
 1394	if (tr->cond_snapshot) {
 1395		ret = -EBUSY;
 1396		goto fail_unlock;
 1397	}
 1398
 1399	local_irq_disable();
 1400	arch_spin_lock(&tr->max_lock);
 1401	tr->cond_snapshot = cond_snapshot;
 1402	arch_spin_unlock(&tr->max_lock);
 1403	local_irq_enable();
 1404
 1405	mutex_unlock(&trace_types_lock);
 1406
 1407	return ret;
 1408
 1409 fail_unlock:
 1410	mutex_unlock(&trace_types_lock);
 1411	kfree(cond_snapshot);
 1412	return ret;
 1413}
 1414EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
 1415
 1416/**
 1417 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
 1418 * @tr:		The tracing instance
 1419 *
 1420 * Check whether the conditional snapshot for the given instance is
 1421 * enabled; if so, free the cond_snapshot associated with it,
 1422 * otherwise return -EINVAL.
 1423 *
 1424 * Returns 0 if successful, error otherwise.
 1425 */
 1426int tracing_snapshot_cond_disable(struct trace_array *tr)
 1427{
 1428	int ret = 0;
 1429
 1430	local_irq_disable();
 1431	arch_spin_lock(&tr->max_lock);
 1432
 1433	if (!tr->cond_snapshot)
 1434		ret = -EINVAL;
 1435	else {
 1436		kfree(tr->cond_snapshot);
 1437		tr->cond_snapshot = NULL;
 1438	}
 1439
 1440	arch_spin_unlock(&tr->max_lock);
 1441	local_irq_enable();
 1442
 1443	return ret;
 1444}
 1445EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 1446#else
 1447void tracing_snapshot(void)
 1448{
 1449	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
 1450}
 1451EXPORT_SYMBOL_GPL(tracing_snapshot);
 1452void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
 1453{
 1454	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
 1455}
 1456EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
 1457int tracing_alloc_snapshot(void)
 1458{
 1459	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
 1460	return -ENODEV;
 1461}
 1462EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 1463void tracing_snapshot_alloc(void)
 1464{
 1465	/* Give warning */
 1466	tracing_snapshot();
 1467}
 1468EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 1469void *tracing_cond_snapshot_data(struct trace_array *tr)
 1470{
 1471	return NULL;
 1472}
 1473EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
 1474int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
 1475{
 1476	return -ENODEV;
 1477}
 1478EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
 1479int tracing_snapshot_cond_disable(struct trace_array *tr)
 1480{
 1481	return false;
 1482}
 1483EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 1484#define free_snapshot(tr)	do { } while (0)
 1485#endif /* CONFIG_TRACER_SNAPSHOT */
 1486
 1487void tracer_tracing_off(struct trace_array *tr)
 1488{
 1489	if (tr->array_buffer.buffer)
 1490		ring_buffer_record_off(tr->array_buffer.buffer);
 1491	/*
 1492	 * This flag is looked at when buffers haven't been allocated
 1493	 * yet, or by some tracers (like irqsoff), that just want to
 1494	 * know if the ring buffer has been disabled, but it can handle
 1495	 * races of where it gets disabled but we still do a record.
 1496	 * As the check is in the fast path of the tracers, it is more
 1497	 * important to be fast than accurate.
 1498	 */
 1499	tr->buffer_disabled = 1;
 1500	/* Make the flag seen by readers */
 1501	smp_wmb();
 1502}
 1503
 1504/**
 1505 * tracing_off - turn off tracing buffers
 1506 *
 1507 * This function stops the tracing buffers from recording data.
 1508 * It does not disable any overhead the tracers themselves may
 1509 * be causing. This function simply causes all recording to
 1510 * the ring buffers to fail.
 1511 */
 1512void tracing_off(void)
 1513{
 1514	tracer_tracing_off(&global_trace);
 1515}
 1516EXPORT_SYMBOL_GPL(tracing_off);
 1517
 1518void disable_trace_on_warning(void)
 1519{
 1520	if (__disable_trace_on_warning) {
 1521		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
 1522			"Disabling tracing due to warning\n");
 1523		tracing_off();
 1524	}
 1525}
 1526
 1527/**
 1528 * tracer_tracing_is_on - show real state of ring buffer enabled
 1529 * @tr : the trace array to know if ring buffer is enabled
 1530 *
 1531 * Shows real state of the ring buffer if it is enabled or not.
 1532 */
 1533bool tracer_tracing_is_on(struct trace_array *tr)
 1534{
 1535	if (tr->array_buffer.buffer)
 1536		return ring_buffer_record_is_set_on(tr->array_buffer.buffer);
 1537	return !tr->buffer_disabled;
 1538}
 1539
 1540/**
 1541 * tracing_is_on - show state of ring buffers enabled
 1542 */
 1543int tracing_is_on(void)
 1544{
 1545	return tracer_tracing_is_on(&global_trace);
 1546}
 1547EXPORT_SYMBOL_GPL(tracing_is_on);
 1548
 1549static int __init set_buf_size(char *str)
 1550{
 1551	unsigned long buf_size;
 1552
 1553	if (!str)
 1554		return 0;
 1555	buf_size = memparse(str, &str);
 1556	/*
 1557	 * nr_entries can not be zero and the startup
 1558	 * tests require some buffer space. Therefore
 1559	 * ensure we have at least 4096 bytes of buffer.
 1560	 */
 1561	trace_buf_size = max(4096UL, buf_size);
 1562	return 1;
 1563}
 1564__setup("trace_buf_size=", set_buf_size);
 1565
 1566static int __init set_tracing_thresh(char *str)
 1567{
 1568	unsigned long threshold;
 1569	int ret;
 1570
 1571	if (!str)
 1572		return 0;
 1573	ret = kstrtoul(str, 0, &threshold);
 1574	if (ret < 0)
 1575		return 0;
 1576	tracing_thresh = threshold * 1000;
 1577	return 1;
 1578}
 1579__setup("tracing_thresh=", set_tracing_thresh);
 1580
 1581unsigned long nsecs_to_usecs(unsigned long nsecs)
 1582{
 1583	return nsecs / 1000;
 1584}
 1585
 1586/*
 1587 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
 1588 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
 1589 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
 1590 * of strings in the order that the evals (enum) were defined.
 1591 */
 1592#undef C
 1593#define C(a, b) b
 1594
 1595/* These must match the bit positions in trace_iterator_flags */
 1596static const char *trace_options[] = {
 1597	TRACE_FLAGS
 1598	NULL
 1599};
 1600
 1601static struct {
 1602	u64 (*func)(void);
 1603	const char *name;
 1604	int in_ns;		/* is this clock in nanoseconds? */
 1605} trace_clocks[] = {
 1606	{ trace_clock_local,		"local",	1 },
 1607	{ trace_clock_global,		"global",	1 },
 1608	{ trace_clock_counter,		"counter",	0 },
 1609	{ trace_clock_jiffies,		"uptime",	0 },
 1610	{ trace_clock,			"perf",		1 },
 1611	{ ktime_get_mono_fast_ns,	"mono",		1 },
 1612	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
 1613	{ ktime_get_boot_fast_ns,	"boot",		1 },
 1614	{ ktime_get_tai_fast_ns,	"tai",		1 },
 1615	ARCH_TRACE_CLOCKS
 1616};
 1617
 1618bool trace_clock_in_ns(struct trace_array *tr)
 1619{
 1620	if (trace_clocks[tr->clock_id].in_ns)
 1621		return true;
 1622
 1623	return false;
 1624}
 1625
 1626/*
 1627 * trace_parser_get_init - gets the buffer for trace parser
 1628 */
 1629int trace_parser_get_init(struct trace_parser *parser, int size)
 1630{
 1631	memset(parser, 0, sizeof(*parser));
 1632
 1633	parser->buffer = kmalloc(size, GFP_KERNEL);
 1634	if (!parser->buffer)
 1635		return 1;
 1636
 1637	parser->size = size;
 1638	return 0;
 1639}
 1640
 1641/*
 1642 * trace_parser_put - frees the buffer for trace parser
 1643 */
 1644void trace_parser_put(struct trace_parser *parser)
 1645{
 1646	kfree(parser->buffer);
 1647	parser->buffer = NULL;
 1648}
 1649
 1650/*
 1651 * trace_get_user - reads the user input string separated by  space
 1652 * (matched by isspace(ch))
 1653 *
 1654 * For each string found the 'struct trace_parser' is updated,
 1655 * and the function returns.
 1656 *
 1657 * Returns number of bytes read.
 1658 *
 1659 * See kernel/trace/trace.h for 'struct trace_parser' details.
 1660 */
 1661int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 1662	size_t cnt, loff_t *ppos)
 1663{
 1664	char ch;
 1665	size_t read = 0;
 1666	ssize_t ret;
 1667
 1668	if (!*ppos)
 1669		trace_parser_clear(parser);
 1670
 1671	ret = get_user(ch, ubuf++);
 1672	if (ret)
 1673		goto out;
 1674
 1675	read++;
 1676	cnt--;
 1677
 1678	/*
 1679	 * The parser is not finished with the last write,
 1680	 * continue reading the user input without skipping spaces.
 1681	 */
 1682	if (!parser->cont) {
 1683		/* skip white space */
 1684		while (cnt && isspace(ch)) {
 1685			ret = get_user(ch, ubuf++);
 1686			if (ret)
 1687				goto out;
 1688			read++;
 1689			cnt--;
 1690		}
 1691
 1692		parser->idx = 0;
 1693
 1694		/* only spaces were written */
 1695		if (isspace(ch) || !ch) {
 1696			*ppos += read;
 1697			ret = read;
 1698			goto out;
 1699		}
 1700	}
 1701
 1702	/* read the non-space input */
 1703	while (cnt && !isspace(ch) && ch) {
 1704		if (parser->idx < parser->size - 1)
 1705			parser->buffer[parser->idx++] = ch;
 1706		else {
 1707			ret = -EINVAL;
 1708			goto out;
 1709		}
 1710		ret = get_user(ch, ubuf++);
 1711		if (ret)
 1712			goto out;
 1713		read++;
 1714		cnt--;
 1715	}
 1716
 1717	/* We either got finished input or we have to wait for another call. */
 1718	if (isspace(ch) || !ch) {
 1719		parser->buffer[parser->idx] = 0;
 1720		parser->cont = false;
 1721	} else if (parser->idx < parser->size - 1) {
 1722		parser->cont = true;
 1723		parser->buffer[parser->idx++] = ch;
 1724		/* Make sure the parsed string always terminates with '\0'. */
 1725		parser->buffer[parser->idx] = 0;
 1726	} else {
 1727		ret = -EINVAL;
 1728		goto out;
 1729	}
 1730
 1731	*ppos += read;
 1732	ret = read;
 1733
 1734out:
 1735	return ret;
 1736}
 1737
 1738/* TODO add a seq_buf_to_buffer() */
 1739static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 1740{
 1741	int len;
 1742
 1743	if (trace_seq_used(s) <= s->readpos)
 1744		return -EBUSY;
 1745
 1746	len = trace_seq_used(s) - s->readpos;
 1747	if (cnt > len)
 1748		cnt = len;
 1749	memcpy(buf, s->buffer + s->readpos, cnt);
 1750
 1751	s->readpos += cnt;
 1752	return cnt;
 1753}
 1754
 1755unsigned long __read_mostly	tracing_thresh;
 1756
 1757#ifdef CONFIG_TRACER_MAX_TRACE
 1758static const struct file_operations tracing_max_lat_fops;
 1759
 1760#ifdef LATENCY_FS_NOTIFY
 1761
 1762static struct workqueue_struct *fsnotify_wq;
 1763
 1764static void latency_fsnotify_workfn(struct work_struct *work)
 1765{
 1766	struct trace_array *tr = container_of(work, struct trace_array,
 1767					      fsnotify_work);
 1768	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
 1769}
 1770
 1771static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
 1772{
 1773	struct trace_array *tr = container_of(iwork, struct trace_array,
 1774					      fsnotify_irqwork);
 1775	queue_work(fsnotify_wq, &tr->fsnotify_work);
 1776}
 1777
 1778static void trace_create_maxlat_file(struct trace_array *tr,
 1779				     struct dentry *d_tracer)
 1780{
 1781	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
 1782	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
 1783	tr->d_max_latency = trace_create_file("tracing_max_latency",
 1784					      TRACE_MODE_WRITE,
 1785					      d_tracer, tr,
 1786					      &tracing_max_lat_fops);
 1787}
 1788
 1789__init static int latency_fsnotify_init(void)
 1790{
 1791	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
 1792				      WQ_UNBOUND | WQ_HIGHPRI, 0);
 1793	if (!fsnotify_wq) {
 1794		pr_err("Unable to allocate tr_max_lat_wq\n");
 1795		return -ENOMEM;
 1796	}
 1797	return 0;
 1798}
 1799
 1800late_initcall_sync(latency_fsnotify_init);
 1801
 1802void latency_fsnotify(struct trace_array *tr)
 1803{
 1804	if (!fsnotify_wq)
 1805		return;
 1806	/*
 1807	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
 1808	 * possible that we are called from __schedule() or do_idle(), which
 1809	 * could cause a deadlock.
 1810	 */
 1811	irq_work_queue(&tr->fsnotify_irqwork);
 1812}
 1813
 1814#else /* !LATENCY_FS_NOTIFY */
 1815
 1816#define trace_create_maxlat_file(tr, d_tracer)				\
 1817	trace_create_file("tracing_max_latency", TRACE_MODE_WRITE,	\
 1818			  d_tracer, tr, &tracing_max_lat_fops)
 1819
 1820#endif
 1821
 1822/*
 1823 * Copy the new maximum trace into the separate maximum-trace
 1824 * structure. (this way the maximum trace is permanently saved,
 1825 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
 1826 */
 1827static void
 1828__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 1829{
 1830	struct array_buffer *trace_buf = &tr->array_buffer;
 1831	struct array_buffer *max_buf = &tr->max_buffer;
 1832	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
 1833	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
 1834
 1835	max_buf->cpu = cpu;
 1836	max_buf->time_start = data->preempt_timestamp;
 1837
 1838	max_data->saved_latency = tr->max_latency;
 1839	max_data->critical_start = data->critical_start;
 1840	max_data->critical_end = data->critical_end;
 1841
 1842	strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
 1843	max_data->pid = tsk->pid;
 1844	/*
 1845	 * If tsk == current, then use current_uid(), as that does not use
 1846	 * RCU. The irq tracer can be called out of RCU scope.
 1847	 */
 1848	if (tsk == current)
 1849		max_data->uid = current_uid();
 1850	else
 1851		max_data->uid = task_uid(tsk);
 1852
 1853	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
 1854	max_data->policy = tsk->policy;
 1855	max_data->rt_priority = tsk->rt_priority;
 1856
 1857	/* record this tasks comm */
 1858	tracing_record_cmdline(tsk);
 1859	latency_fsnotify(tr);
 1860}
 1861
 1862/**
 1863 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 1864 * @tr: tracer
 1865 * @tsk: the task with the latency
 1866 * @cpu: The cpu that initiated the trace.
 1867 * @cond_data: User data associated with a conditional snapshot
 1868 *
 1869 * Flip the buffers between the @tr and the max_tr and record information
 1870 * about which task was the cause of this latency.
 1871 */
 1872void
 1873update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 1874	      void *cond_data)
 1875{
 1876	if (tr->stop_count)
 1877		return;
 1878
 1879	WARN_ON_ONCE(!irqs_disabled());
 1880
 1881	if (!tr->allocated_snapshot) {
 1882		/* Only the nop tracer should hit this when disabling */
 1883		WARN_ON_ONCE(tr->current_trace != &nop_trace);
 1884		return;
 1885	}
 1886
 1887	arch_spin_lock(&tr->max_lock);
 1888
 1889	/* Inherit the recordable setting from array_buffer */
 1890	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
 1891		ring_buffer_record_on(tr->max_buffer.buffer);
 1892	else
 1893		ring_buffer_record_off(tr->max_buffer.buffer);
 1894
 1895#ifdef CONFIG_TRACER_SNAPSHOT
 1896	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
 1897		arch_spin_unlock(&tr->max_lock);
 1898		return;
 1899	}
 1900#endif
 1901	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
 1902
 1903	__update_max_tr(tr, tsk, cpu);
 1904
 
 1905	arch_spin_unlock(&tr->max_lock);
 1906
 1907	/* Any waiters on the old snapshot buffer need to wake up */
 1908	ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
 1909}
 1910
 1911/**
 1912 * update_max_tr_single - only copy one trace over, and reset the rest
 1913 * @tr: tracer
 1914 * @tsk: task with the latency
 1915 * @cpu: the cpu of the buffer to copy.
 1916 *
 1917 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
 1918 */
 1919void
 1920update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 1921{
 1922	int ret;
 1923
 1924	if (tr->stop_count)
 1925		return;
 1926
 1927	WARN_ON_ONCE(!irqs_disabled());
 1928	if (!tr->allocated_snapshot) {
 1929		/* Only the nop tracer should hit this when disabling */
 1930		WARN_ON_ONCE(tr->current_trace != &nop_trace);
 1931		return;
 1932	}
 1933
 1934	arch_spin_lock(&tr->max_lock);
 1935
 1936	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
 1937
 1938	if (ret == -EBUSY) {
 1939		/*
 1940		 * We failed to swap the buffer due to a commit taking
 1941		 * place on this CPU. We fail to record, but we reset
 1942		 * the max trace buffer (no one writes directly to it)
 1943		 * and flag that it failed.
 1944		 * Another reason is resize is in progress.
 1945		 */
 1946		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
 1947			"Failed to swap buffers due to commit or resize in progress\n");
 1948	}
 1949
 1950	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
 1951
 1952	__update_max_tr(tr, tsk, cpu);
 1953	arch_spin_unlock(&tr->max_lock);
 1954}
 1955
 1956#endif /* CONFIG_TRACER_MAX_TRACE */
 1957
 1958static int wait_on_pipe(struct trace_iterator *iter, int full)
 1959{
 1960	int ret;
 1961
 1962	/* Iterators are static, they should be filled or empty */
 1963	if (trace_buffer_iter(iter, iter->cpu_file))
 1964		return 0;
 1965
 1966	ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
 1967
 1968#ifdef CONFIG_TRACER_MAX_TRACE
 1969	/*
 1970	 * Make sure this is still the snapshot buffer, as if a snapshot were
 1971	 * to happen, this would now be the main buffer.
 1972	 */
 1973	if (iter->snapshot)
 1974		iter->array_buffer = &iter->tr->max_buffer;
 1975#endif
 1976	return ret;
 1977}
 1978
 1979#ifdef CONFIG_FTRACE_STARTUP_TEST
 1980static bool selftests_can_run;
 1981
 1982struct trace_selftests {
 1983	struct list_head		list;
 1984	struct tracer			*type;
 1985};
 1986
 1987static LIST_HEAD(postponed_selftests);
 1988
 1989static int save_selftest(struct tracer *type)
 1990{
 1991	struct trace_selftests *selftest;
 1992
 1993	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
 1994	if (!selftest)
 1995		return -ENOMEM;
 1996
 1997	selftest->type = type;
 1998	list_add(&selftest->list, &postponed_selftests);
 1999	return 0;
 2000}
 2001
 2002static int run_tracer_selftest(struct tracer *type)
 2003{
 2004	struct trace_array *tr = &global_trace;
 2005	struct tracer *saved_tracer = tr->current_trace;
 2006	int ret;
 2007
 2008	if (!type->selftest || tracing_selftest_disabled)
 2009		return 0;
 2010
 2011	/*
 2012	 * If a tracer registers early in boot up (before scheduling is
 2013	 * initialized and such), then do not run its selftests yet.
 2014	 * Instead, run it a little later in the boot process.
 2015	 */
 2016	if (!selftests_can_run)
 2017		return save_selftest(type);
 2018
 2019	if (!tracing_is_on()) {
 2020		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
 2021			type->name);
 2022		return 0;
 2023	}
 2024
 2025	/*
 2026	 * Run a selftest on this tracer.
 2027	 * Here we reset the trace buffer, and set the current
 2028	 * tracer to be this tracer. The tracer can then run some
 2029	 * internal tracing to verify that everything is in order.
 2030	 * If we fail, we do not register this tracer.
 2031	 */
 2032	tracing_reset_online_cpus(&tr->array_buffer);
 2033
 2034	tr->current_trace = type;
 2035
 2036#ifdef CONFIG_TRACER_MAX_TRACE
 2037	if (type->use_max_tr) {
 2038		/* If we expanded the buffers, make sure the max is expanded too */
 2039		if (tr->ring_buffer_expanded)
 2040			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
 2041					   RING_BUFFER_ALL_CPUS);
 2042		tr->allocated_snapshot = true;
 2043	}
 2044#endif
 2045
 2046	/* the test is responsible for initializing and enabling */
 2047	pr_info("Testing tracer %s: ", type->name);
 2048	ret = type->selftest(type, tr);
 2049	/* the test is responsible for resetting too */
 2050	tr->current_trace = saved_tracer;
 2051	if (ret) {
 2052		printk(KERN_CONT "FAILED!\n");
 2053		/* Add the warning after printing 'FAILED' */
 2054		WARN_ON(1);
 2055		return -1;
 2056	}
 2057	/* Only reset on passing, to avoid touching corrupted buffers */
 2058	tracing_reset_online_cpus(&tr->array_buffer);
 2059
 2060#ifdef CONFIG_TRACER_MAX_TRACE
 2061	if (type->use_max_tr) {
 2062		tr->allocated_snapshot = false;
 2063
 2064		/* Shrink the max buffer again */
 2065		if (tr->ring_buffer_expanded)
 2066			ring_buffer_resize(tr->max_buffer.buffer, 1,
 2067					   RING_BUFFER_ALL_CPUS);
 2068	}
 2069#endif
 2070
 2071	printk(KERN_CONT "PASSED\n");
 2072	return 0;
 2073}
 2074
 2075static int do_run_tracer_selftest(struct tracer *type)
 2076{
 2077	int ret;
 2078
 2079	/*
 2080	 * Tests can take a long time, especially if they are run one after the
 2081	 * other, as does happen during bootup when all the tracers are
 2082	 * registered. This could cause the soft lockup watchdog to trigger.
 2083	 */
 2084	cond_resched();
 2085
 2086	tracing_selftest_running = true;
 2087	ret = run_tracer_selftest(type);
 2088	tracing_selftest_running = false;
 2089
 2090	return ret;
 2091}
 2092
 2093static __init int init_trace_selftests(void)
 2094{
 2095	struct trace_selftests *p, *n;
 2096	struct tracer *t, **last;
 2097	int ret;
 2098
 2099	selftests_can_run = true;
 2100
 2101	mutex_lock(&trace_types_lock);
 2102
 2103	if (list_empty(&postponed_selftests))
 2104		goto out;
 2105
 2106	pr_info("Running postponed tracer tests:\n");
 2107
 2108	tracing_selftest_running = true;
 2109	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
 2110		/* This loop can take minutes when sanitizers are enabled, so
 2111		 * lets make sure we allow RCU processing.
 2112		 */
 2113		cond_resched();
 2114		ret = run_tracer_selftest(p->type);
 2115		/* If the test fails, then warn and remove from available_tracers */
 2116		if (ret < 0) {
 2117			WARN(1, "tracer: %s failed selftest, disabling\n",
 2118			     p->type->name);
 2119			last = &trace_types;
 2120			for (t = trace_types; t; t = t->next) {
 2121				if (t == p->type) {
 2122					*last = t->next;
 2123					break;
 2124				}
 2125				last = &t->next;
 2126			}
 2127		}
 2128		list_del(&p->list);
 2129		kfree(p);
 2130	}
 2131	tracing_selftest_running = false;
 2132
 2133 out:
 2134	mutex_unlock(&trace_types_lock);
 2135
 2136	return 0;
 2137}
 2138core_initcall(init_trace_selftests);
 2139#else
 2140static inline int run_tracer_selftest(struct tracer *type)
 2141{
 2142	return 0;
 2143}
 2144static inline int do_run_tracer_selftest(struct tracer *type)
 2145{
 2146	return 0;
 2147}
 2148#endif /* CONFIG_FTRACE_STARTUP_TEST */
 2149
 2150static void add_tracer_options(struct trace_array *tr, struct tracer *t);
 2151
 2152static void __init apply_trace_boot_options(void);
 2153
 2154/**
 2155 * register_tracer - register a tracer with the ftrace system.
 2156 * @type: the plugin for the tracer
 2157 *
 2158 * Register a new plugin tracer.
 2159 */
 2160int __init register_tracer(struct tracer *type)
 2161{
 2162	struct tracer *t;
 2163	int ret = 0;
 2164
 2165	if (!type->name) {
 2166		pr_info("Tracer must have a name\n");
 2167		return -1;
 2168	}
 2169
 2170	if (strlen(type->name) >= MAX_TRACER_SIZE) {
 2171		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
 2172		return -1;
 2173	}
 2174
 2175	if (security_locked_down(LOCKDOWN_TRACEFS)) {
 2176		pr_warn("Can not register tracer %s due to lockdown\n",
 2177			   type->name);
 2178		return -EPERM;
 2179	}
 2180
 2181	mutex_lock(&trace_types_lock);
 2182
 
 
 2183	for (t = trace_types; t; t = t->next) {
 2184		if (strcmp(type->name, t->name) == 0) {
 2185			/* already found */
 2186			pr_info("Tracer %s already registered\n",
 2187				type->name);
 2188			ret = -1;
 2189			goto out;
 2190		}
 2191	}
 2192
 2193	if (!type->set_flag)
 2194		type->set_flag = &dummy_set_flag;
 2195	if (!type->flags) {
 2196		/*allocate a dummy tracer_flags*/
 2197		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
 2198		if (!type->flags) {
 2199			ret = -ENOMEM;
 2200			goto out;
 2201		}
 2202		type->flags->val = 0;
 2203		type->flags->opts = dummy_tracer_opt;
 2204	} else
 2205		if (!type->flags->opts)
 2206			type->flags->opts = dummy_tracer_opt;
 2207
 2208	/* store the tracer for __set_tracer_option */
 2209	type->flags->trace = type;
 2210
 2211	ret = do_run_tracer_selftest(type);
 2212	if (ret < 0)
 2213		goto out;
 2214
 2215	type->next = trace_types;
 2216	trace_types = type;
 2217	add_tracer_options(&global_trace, type);
 2218
 2219 out:
 
 2220	mutex_unlock(&trace_types_lock);
 2221
 2222	if (ret || !default_bootup_tracer)
 2223		goto out_unlock;
 2224
 2225	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
 2226		goto out_unlock;
 2227
 2228	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
 2229	/* Do we want this tracer to start on bootup? */
 2230	tracing_set_tracer(&global_trace, type->name);
 2231	default_bootup_tracer = NULL;
 2232
 2233	apply_trace_boot_options();
 2234
 2235	/* disable other selftests, since this will break it. */
 2236	disable_tracing_selftest("running a tracer");
 
 
 
 
 2237
 2238 out_unlock:
 2239	return ret;
 2240}
 2241
 2242static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
 2243{
 2244	struct trace_buffer *buffer = buf->buffer;
 2245
 2246	if (!buffer)
 2247		return;
 2248
 2249	ring_buffer_record_disable(buffer);
 2250
 2251	/* Make sure all commits have finished */
 2252	synchronize_rcu();
 2253	ring_buffer_reset_cpu(buffer, cpu);
 2254
 2255	ring_buffer_record_enable(buffer);
 2256}
 2257
 2258void tracing_reset_online_cpus(struct array_buffer *buf)
 2259{
 2260	struct trace_buffer *buffer = buf->buffer;
 
 2261
 2262	if (!buffer)
 2263		return;
 2264
 2265	ring_buffer_record_disable(buffer);
 2266
 2267	/* Make sure all commits have finished */
 2268	synchronize_rcu();
 2269
 2270	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
 2271
 2272	ring_buffer_reset_online_cpus(buffer);
 
 2273
 2274	ring_buffer_record_enable(buffer);
 2275}
 2276
 2277/* Must have trace_types_lock held */
 2278void tracing_reset_all_online_cpus_unlocked(void)
 2279{
 2280	struct trace_array *tr;
 2281
 2282	lockdep_assert_held(&trace_types_lock);
 2283
 2284	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 2285		if (!tr->clear_trace)
 2286			continue;
 2287		tr->clear_trace = false;
 2288		tracing_reset_online_cpus(&tr->array_buffer);
 2289#ifdef CONFIG_TRACER_MAX_TRACE
 2290		tracing_reset_online_cpus(&tr->max_buffer);
 2291#endif
 2292	}
 2293}
 2294
 2295void tracing_reset_all_online_cpus(void)
 2296{
 2297	mutex_lock(&trace_types_lock);
 2298	tracing_reset_all_online_cpus_unlocked();
 2299	mutex_unlock(&trace_types_lock);
 2300}
 2301
 2302/*
 2303 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
 2304 * is the tgid last observed corresponding to pid=i.
 2305 */
 2306static int *tgid_map;
 2307
 2308/* The maximum valid index into tgid_map. */
 2309static size_t tgid_map_max;
 2310
 2311#define SAVED_CMDLINES_DEFAULT 128
 2312#define NO_CMDLINE_MAP UINT_MAX
 2313/*
 2314 * Preemption must be disabled before acquiring trace_cmdline_lock.
 2315 * The various trace_arrays' max_lock must be acquired in a context
 2316 * where interrupt is disabled.
 2317 */
 2318static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 2319struct saved_cmdlines_buffer {
 2320	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 2321	unsigned *map_cmdline_to_pid;
 2322	unsigned cmdline_num;
 2323	int cmdline_idx;
 2324	char saved_cmdlines[];
 2325};
 2326static struct saved_cmdlines_buffer *savedcmd;
 2327
 
 
 
 2328static inline char *get_saved_cmdlines(int idx)
 2329{
 2330	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
 2331}
 2332
 2333static inline void set_cmdline(int idx, const char *cmdline)
 2334{
 2335	strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
 2336}
 2337
 2338static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
 2339{
 2340	int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN);
 2341
 2342	kfree(s->map_cmdline_to_pid);
 2343	kmemleak_free(s);
 2344	free_pages((unsigned long)s, order);
 2345}
 2346
 2347static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val)
 2348{
 2349	struct saved_cmdlines_buffer *s;
 2350	struct page *page;
 2351	int orig_size, size;
 2352	int order;
 2353
 2354	/* Figure out how much is needed to hold the given number of cmdlines */
 2355	orig_size = sizeof(*s) + val * TASK_COMM_LEN;
 2356	order = get_order(orig_size);
 2357	size = 1 << (order + PAGE_SHIFT);
 2358	page = alloc_pages(GFP_KERNEL, order);
 2359	if (!page)
 2360		return NULL;
 2361
 2362	s = page_address(page);
 2363	kmemleak_alloc(s, size, 1, GFP_KERNEL);
 2364	memset(s, 0, sizeof(*s));
 2365
 2366	/* Round up to actual allocation */
 2367	val = (size - sizeof(*s)) / TASK_COMM_LEN;
 2368	s->cmdline_num = val;
 2369
 2370	s->map_cmdline_to_pid = kmalloc_array(val,
 2371					      sizeof(*s->map_cmdline_to_pid),
 2372					      GFP_KERNEL);
 2373	if (!s->map_cmdline_to_pid) {
 2374		free_saved_cmdlines_buffer(s);
 2375		return NULL;
 
 
 
 
 2376	}
 2377
 2378	s->cmdline_idx = 0;
 
 2379	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
 2380	       sizeof(s->map_pid_to_cmdline));
 2381	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
 2382	       val * sizeof(*s->map_cmdline_to_pid));
 2383
 2384	return s;
 2385}
 2386
 2387static int trace_create_savedcmd(void)
 2388{
 2389	savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT);
 
 
 
 
 2390
 2391	return savedcmd ? 0 : -ENOMEM;
 
 
 
 
 
 
 
 2392}
 2393
 2394int is_tracing_stopped(void)
 2395{
 2396	return global_trace.stop_count;
 2397}
 2398
 2399static void tracing_start_tr(struct trace_array *tr)
 
 
 
 
 
 
 2400{
 2401	struct trace_buffer *buffer;
 2402	unsigned long flags;
 2403
 2404	if (tracing_disabled)
 2405		return;
 2406
 2407	raw_spin_lock_irqsave(&tr->start_lock, flags);
 2408	if (--tr->stop_count) {
 2409		if (WARN_ON_ONCE(tr->stop_count < 0)) {
 2410			/* Someone screwed up their debugging */
 2411			tr->stop_count = 0;
 
 2412		}
 2413		goto out;
 2414	}
 2415
 2416	/* Prevent the buffers from switching */
 2417	arch_spin_lock(&tr->max_lock);
 2418
 2419	buffer = tr->array_buffer.buffer;
 2420	if (buffer)
 2421		ring_buffer_record_enable(buffer);
 2422
 2423#ifdef CONFIG_TRACER_MAX_TRACE
 2424	buffer = tr->max_buffer.buffer;
 2425	if (buffer)
 2426		ring_buffer_record_enable(buffer);
 2427#endif
 2428
 2429	arch_spin_unlock(&tr->max_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 2430
 2431 out:
 2432	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 2433}
 2434
 2435/**
 2436 * tracing_start - quick start of the tracer
 2437 *
 2438 * If tracing is enabled but was stopped by tracing_stop,
 2439 * this will start the tracer back up.
 2440 */
 2441void tracing_start(void)
 2442
 2443{
 2444	return tracing_start_tr(&global_trace);
 2445}
 2446
 2447static void tracing_stop_tr(struct trace_array *tr)
 2448{
 2449	struct trace_buffer *buffer;
 2450	unsigned long flags;
 2451
 2452	raw_spin_lock_irqsave(&tr->start_lock, flags);
 2453	if (tr->stop_count++)
 2454		goto out;
 2455
 2456	/* Prevent the buffers from switching */
 2457	arch_spin_lock(&tr->max_lock);
 2458
 2459	buffer = tr->array_buffer.buffer;
 2460	if (buffer)
 2461		ring_buffer_record_disable(buffer);
 2462
 2463#ifdef CONFIG_TRACER_MAX_TRACE
 2464	buffer = tr->max_buffer.buffer;
 2465	if (buffer)
 2466		ring_buffer_record_disable(buffer);
 2467#endif
 2468
 2469	arch_spin_unlock(&tr->max_lock);
 2470
 2471 out:
 2472	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 2473}
 2474
 2475/**
 2476 * tracing_stop - quick stop of the tracer
 2477 *
 2478 * Light weight way to stop tracing. Use in conjunction with
 2479 * tracing_start.
 2480 */
 2481void tracing_stop(void)
 2482{
 2483	return tracing_stop_tr(&global_trace);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 2484}
 2485
 2486static int trace_save_cmdline(struct task_struct *tsk)
 2487{
 2488	unsigned tpid, idx;
 2489
 2490	/* treat recording of idle task as a success */
 2491	if (!tsk->pid)
 2492		return 1;
 2493
 2494	tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
 
 2495
 2496	/*
 2497	 * It's not the end of the world if we don't get
 2498	 * the lock, but we also don't want to spin
 2499	 * nor do we want to disable interrupts,
 2500	 * so if we miss here, then better luck next time.
 2501	 *
 2502	 * This is called within the scheduler and wake up, so interrupts
 2503	 * had better been disabled and run queue lock been held.
 2504	 */
 2505	lockdep_assert_preemption_disabled();
 2506	if (!arch_spin_trylock(&trace_cmdline_lock))
 2507		return 0;
 2508
 2509	idx = savedcmd->map_pid_to_cmdline[tpid];
 2510	if (idx == NO_CMDLINE_MAP) {
 2511		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
 2512
 2513		savedcmd->map_pid_to_cmdline[tpid] = idx;
 
 
 
 
 
 
 
 
 
 
 
 
 2514		savedcmd->cmdline_idx = idx;
 2515	}
 2516
 2517	savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
 2518	set_cmdline(idx, tsk->comm);
 2519
 2520	arch_spin_unlock(&trace_cmdline_lock);
 2521
 2522	return 1;
 2523}
 2524
 2525static void __trace_find_cmdline(int pid, char comm[])
 2526{
 2527	unsigned map;
 2528	int tpid;
 2529
 2530	if (!pid) {
 2531		strcpy(comm, "<idle>");
 2532		return;
 2533	}
 2534
 2535	if (WARN_ON_ONCE(pid < 0)) {
 2536		strcpy(comm, "<XXX>");
 2537		return;
 2538	}
 2539
 2540	tpid = pid & (PID_MAX_DEFAULT - 1);
 2541	map = savedcmd->map_pid_to_cmdline[tpid];
 2542	if (map != NO_CMDLINE_MAP) {
 2543		tpid = savedcmd->map_cmdline_to_pid[map];
 2544		if (tpid == pid) {
 2545			strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
 2546			return;
 2547		}
 2548	}
 2549	strcpy(comm, "<...>");
 
 
 
 
 
 2550}
 2551
 2552void trace_find_cmdline(int pid, char comm[])
 2553{
 2554	preempt_disable();
 2555	arch_spin_lock(&trace_cmdline_lock);
 2556
 2557	__trace_find_cmdline(pid, comm);
 2558
 2559	arch_spin_unlock(&trace_cmdline_lock);
 2560	preempt_enable();
 2561}
 2562
 2563static int *trace_find_tgid_ptr(int pid)
 2564{
 2565	/*
 2566	 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
 2567	 * if we observe a non-NULL tgid_map then we also observe the correct
 2568	 * tgid_map_max.
 2569	 */
 2570	int *map = smp_load_acquire(&tgid_map);
 2571
 2572	if (unlikely(!map || pid > tgid_map_max))
 2573		return NULL;
 2574
 2575	return &map[pid];
 2576}
 2577
 2578int trace_find_tgid(int pid)
 2579{
 2580	int *ptr = trace_find_tgid_ptr(pid);
 
 2581
 2582	return ptr ? *ptr : 0;
 2583}
 2584
 2585static int trace_save_tgid(struct task_struct *tsk)
 2586{
 2587	int *ptr;
 2588
 2589	/* treat recording of idle task as a success */
 2590	if (!tsk->pid)
 2591		return 1;
 2592
 2593	ptr = trace_find_tgid_ptr(tsk->pid);
 2594	if (!ptr)
 2595		return 0;
 2596
 2597	*ptr = tsk->tgid;
 2598	return 1;
 2599}
 2600
 2601static bool tracing_record_taskinfo_skip(int flags)
 2602{
 2603	if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
 2604		return true;
 
 
 2605	if (!__this_cpu_read(trace_taskinfo_save))
 2606		return true;
 2607	return false;
 2608}
 2609
 2610/**
 2611 * tracing_record_taskinfo - record the task info of a task
 2612 *
 2613 * @task:  task to record
 2614 * @flags: TRACE_RECORD_CMDLINE for recording comm
 2615 *         TRACE_RECORD_TGID for recording tgid
 2616 */
 2617void tracing_record_taskinfo(struct task_struct *task, int flags)
 2618{
 2619	bool done;
 2620
 2621	if (tracing_record_taskinfo_skip(flags))
 2622		return;
 2623
 2624	/*
 2625	 * Record as much task information as possible. If some fail, continue
 2626	 * to try to record the others.
 2627	 */
 2628	done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
 2629	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
 2630
 2631	/* If recording any information failed, retry again soon. */
 2632	if (!done)
 2633		return;
 2634
 2635	__this_cpu_write(trace_taskinfo_save, false);
 2636}
 2637
 2638/**
 2639 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
 2640 *
 2641 * @prev: previous task during sched_switch
 2642 * @next: next task during sched_switch
 2643 * @flags: TRACE_RECORD_CMDLINE for recording comm
 2644 *         TRACE_RECORD_TGID for recording tgid
 2645 */
 2646void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
 2647					  struct task_struct *next, int flags)
 2648{
 2649	bool done;
 2650
 2651	if (tracing_record_taskinfo_skip(flags))
 2652		return;
 2653
 2654	/*
 2655	 * Record as much task information as possible. If some fail, continue
 2656	 * to try to record the others.
 2657	 */
 2658	done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
 2659	done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
 2660	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
 2661	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
 2662
 2663	/* If recording any information failed, retry again soon. */
 2664	if (!done)
 2665		return;
 2666
 2667	__this_cpu_write(trace_taskinfo_save, false);
 2668}
 2669
 2670/* Helpers to record a specific task information */
 2671void tracing_record_cmdline(struct task_struct *task)
 2672{
 2673	tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
 2674}
 2675
 2676void tracing_record_tgid(struct task_struct *task)
 2677{
 2678	tracing_record_taskinfo(task, TRACE_RECORD_TGID);
 2679}
 2680
 2681/*
 2682 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
 2683 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
 2684 * simplifies those functions and keeps them in sync.
 2685 */
 2686enum print_line_t trace_handle_return(struct trace_seq *s)
 2687{
 2688	return trace_seq_has_overflowed(s) ?
 2689		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
 2690}
 2691EXPORT_SYMBOL_GPL(trace_handle_return);
 2692
 2693static unsigned short migration_disable_value(void)
 
 
 2694{
 2695#if defined(CONFIG_SMP)
 2696	return current->migration_disabled;
 
 
 
 
 
 
 2697#else
 2698	return 0;
 2699#endif
 
 
 
 
 
 2700}
 2701
 2702unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
 2703{
 2704	unsigned int trace_flags = irqs_status;
 2705	unsigned int pc;
 2706
 2707	pc = preempt_count();
 2708
 2709	if (pc & NMI_MASK)
 2710		trace_flags |= TRACE_FLAG_NMI;
 2711	if (pc & HARDIRQ_MASK)
 2712		trace_flags |= TRACE_FLAG_HARDIRQ;
 2713	if (in_serving_softirq())
 2714		trace_flags |= TRACE_FLAG_SOFTIRQ;
 2715	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
 2716		trace_flags |= TRACE_FLAG_BH_OFF;
 2717
 2718	if (tif_need_resched())
 2719		trace_flags |= TRACE_FLAG_NEED_RESCHED;
 2720	if (test_preempt_need_resched())
 2721		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
 2722	return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
 2723		(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
 2724}
 2725
 2726struct ring_buffer_event *
 2727trace_buffer_lock_reserve(struct trace_buffer *buffer,
 2728			  int type,
 2729			  unsigned long len,
 2730			  unsigned int trace_ctx)
 2731{
 2732	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
 2733}
 2734
 2735DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
 2736DEFINE_PER_CPU(int, trace_buffered_event_cnt);
 2737static int trace_buffered_event_ref;
 2738
 2739/**
 2740 * trace_buffered_event_enable - enable buffering events
 2741 *
 2742 * When events are being filtered, it is quicker to use a temporary
 2743 * buffer to write the event data into if there's a likely chance
 2744 * that it will not be committed. The discard of the ring buffer
 2745 * is not as fast as committing, and is much slower than copying
 2746 * a commit.
 2747 *
 2748 * When an event is to be filtered, allocate per cpu buffers to
 2749 * write the event data into, and if the event is filtered and discarded
 2750 * it is simply dropped, otherwise, the entire data is to be committed
 2751 * in one shot.
 2752 */
 2753void trace_buffered_event_enable(void)
 2754{
 2755	struct ring_buffer_event *event;
 2756	struct page *page;
 2757	int cpu;
 2758
 2759	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
 2760
 2761	if (trace_buffered_event_ref++)
 2762		return;
 2763
 2764	for_each_tracing_cpu(cpu) {
 2765		page = alloc_pages_node(cpu_to_node(cpu),
 2766					GFP_KERNEL | __GFP_NORETRY, 0);
 2767		/* This is just an optimization and can handle failures */
 2768		if (!page) {
 2769			pr_err("Failed to allocate event buffer\n");
 2770			break;
 2771		}
 2772
 2773		event = page_address(page);
 2774		memset(event, 0, sizeof(*event));
 2775
 2776		per_cpu(trace_buffered_event, cpu) = event;
 2777
 2778		preempt_disable();
 2779		if (cpu == smp_processor_id() &&
 2780		    __this_cpu_read(trace_buffered_event) !=
 2781		    per_cpu(trace_buffered_event, cpu))
 2782			WARN_ON_ONCE(1);
 2783		preempt_enable();
 2784	}
 
 
 
 
 2785}
 2786
 2787static void enable_trace_buffered_event(void *data)
 2788{
 2789	/* Probably not needed, but do it anyway */
 2790	smp_rmb();
 2791	this_cpu_dec(trace_buffered_event_cnt);
 2792}
 2793
 2794static void disable_trace_buffered_event(void *data)
 2795{
 2796	this_cpu_inc(trace_buffered_event_cnt);
 2797}
 2798
 2799/**
 2800 * trace_buffered_event_disable - disable buffering events
 2801 *
 2802 * When a filter is removed, it is faster to not use the buffered
 2803 * events, and to commit directly into the ring buffer. Free up
 2804 * the temp buffers when there are no more users. This requires
 2805 * special synchronization with current events.
 2806 */
 2807void trace_buffered_event_disable(void)
 2808{
 2809	int cpu;
 2810
 2811	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
 2812
 2813	if (WARN_ON_ONCE(!trace_buffered_event_ref))
 2814		return;
 2815
 2816	if (--trace_buffered_event_ref)
 2817		return;
 2818
 
 2819	/* For each CPU, set the buffer as used. */
 2820	on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
 2821			 NULL, true);
 
 2822
 2823	/* Wait for all current users to finish */
 2824	synchronize_rcu();
 2825
 2826	for_each_tracing_cpu(cpu) {
 2827		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
 2828		per_cpu(trace_buffered_event, cpu) = NULL;
 2829	}
 2830
 2831	/*
 2832	 * Wait for all CPUs that potentially started checking if they can use
 2833	 * their event buffer only after the previous synchronize_rcu() call and
 2834	 * they still read a valid pointer from trace_buffered_event. It must be
 2835	 * ensured they don't see cleared trace_buffered_event_cnt else they
 2836	 * could wrongly decide to use the pointed-to buffer which is now freed.
 2837	 */
 2838	synchronize_rcu();
 2839
 2840	/* For each CPU, relinquish the buffer */
 2841	on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
 2842			 true);
 
 
 2843}
 2844
 2845static struct trace_buffer *temp_buffer;
 2846
 2847struct ring_buffer_event *
 2848trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
 2849			  struct trace_event_file *trace_file,
 2850			  int type, unsigned long len,
 2851			  unsigned int trace_ctx)
 2852{
 2853	struct ring_buffer_event *entry;
 2854	struct trace_array *tr = trace_file->tr;
 2855	int val;
 2856
 2857	*current_rb = tr->array_buffer.buffer;
 2858
 2859	if (!tr->no_filter_buffering_ref &&
 2860	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
 2861		preempt_disable_notrace();
 2862		/*
 2863		 * Filtering is on, so try to use the per cpu buffer first.
 2864		 * This buffer will simulate a ring_buffer_event,
 2865		 * where the type_len is zero and the array[0] will
 2866		 * hold the full length.
 2867		 * (see include/linux/ring-buffer.h for details on
 2868		 *  how the ring_buffer_event is structured).
 2869		 *
 2870		 * Using a temp buffer during filtering and copying it
 2871		 * on a matched filter is quicker than writing directly
 2872		 * into the ring buffer and then discarding it when
 2873		 * it doesn't match. That is because the discard
 2874		 * requires several atomic operations to get right.
 2875		 * Copying on match and doing nothing on a failed match
 2876		 * is still quicker than no copy on match, but having
 2877		 * to discard out of the ring buffer on a failed match.
 2878		 */
 2879		if ((entry = __this_cpu_read(trace_buffered_event))) {
 2880			int max_len = PAGE_SIZE - struct_size(entry, array, 1);
 2881
 2882			val = this_cpu_inc_return(trace_buffered_event_cnt);
 2883
 2884			/*
 2885			 * Preemption is disabled, but interrupts and NMIs
 2886			 * can still come in now. If that happens after
 2887			 * the above increment, then it will have to go
 2888			 * back to the old method of allocating the event
 2889			 * on the ring buffer, and if the filter fails, it
 2890			 * will have to call ring_buffer_discard_commit()
 2891			 * to remove it.
 2892			 *
 2893			 * Need to also check the unlikely case that the
 2894			 * length is bigger than the temp buffer size.
 2895			 * If that happens, then the reserve is pretty much
 2896			 * guaranteed to fail, as the ring buffer currently
 2897			 * only allows events less than a page. But that may
 2898			 * change in the future, so let the ring buffer reserve
 2899			 * handle the failure in that case.
 2900			 */
 2901			if (val == 1 && likely(len <= max_len)) {
 2902				trace_event_setup(entry, type, trace_ctx);
 2903				entry->array[0] = len;
 2904				/* Return with preemption disabled */
 2905				return entry;
 2906			}
 2907			this_cpu_dec(trace_buffered_event_cnt);
 2908		}
 2909		/* __trace_buffer_lock_reserve() disables preemption */
 2910		preempt_enable_notrace();
 2911	}
 2912
 2913	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
 2914					    trace_ctx);
 2915	/*
 2916	 * If tracing is off, but we have triggers enabled
 2917	 * we still need to look at the event data. Use the temp_buffer
 2918	 * to store the trace event for the trigger to use. It's recursive
 2919	 * safe and will not be recorded anywhere.
 2920	 */
 2921	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
 2922		*current_rb = temp_buffer;
 2923		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
 2924						    trace_ctx);
 2925	}
 2926	return entry;
 2927}
 2928EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
 2929
 2930static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
 2931static DEFINE_MUTEX(tracepoint_printk_mutex);
 2932
 2933static void output_printk(struct trace_event_buffer *fbuffer)
 2934{
 2935	struct trace_event_call *event_call;
 2936	struct trace_event_file *file;
 2937	struct trace_event *event;
 2938	unsigned long flags;
 2939	struct trace_iterator *iter = tracepoint_print_iter;
 2940
 2941	/* We should never get here if iter is NULL */
 2942	if (WARN_ON_ONCE(!iter))
 2943		return;
 2944
 2945	event_call = fbuffer->trace_file->event_call;
 2946	if (!event_call || !event_call->event.funcs ||
 2947	    !event_call->event.funcs->trace)
 2948		return;
 2949
 2950	file = fbuffer->trace_file;
 2951	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
 2952	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
 2953	     !filter_match_preds(file->filter, fbuffer->entry)))
 2954		return;
 2955
 2956	event = &fbuffer->trace_file->event_call->event;
 2957
 2958	raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
 2959	trace_seq_init(&iter->seq);
 2960	iter->ent = fbuffer->entry;
 2961	event_call->event.funcs->trace(iter, 0, event);
 2962	trace_seq_putc(&iter->seq, 0);
 2963	printk("%s", iter->seq.buffer);
 2964
 2965	raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
 2966}
 2967
 2968int tracepoint_printk_sysctl(struct ctl_table *table, int write,
 2969			     void *buffer, size_t *lenp,
 2970			     loff_t *ppos)
 2971{
 2972	int save_tracepoint_printk;
 2973	int ret;
 2974
 2975	mutex_lock(&tracepoint_printk_mutex);
 2976	save_tracepoint_printk = tracepoint_printk;
 2977
 2978	ret = proc_dointvec(table, write, buffer, lenp, ppos);
 2979
 2980	/*
 2981	 * This will force exiting early, as tracepoint_printk
 2982	 * is always zero when tracepoint_printk_iter is not allocated
 2983	 */
 2984	if (!tracepoint_print_iter)
 2985		tracepoint_printk = 0;
 2986
 2987	if (save_tracepoint_printk == tracepoint_printk)
 2988		goto out;
 2989
 2990	if (tracepoint_printk)
 2991		static_key_enable(&tracepoint_printk_key.key);
 2992	else
 2993		static_key_disable(&tracepoint_printk_key.key);
 2994
 2995 out:
 2996	mutex_unlock(&tracepoint_printk_mutex);
 2997
 2998	return ret;
 2999}
 3000
 3001void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 3002{
 3003	enum event_trigger_type tt = ETT_NONE;
 3004	struct trace_event_file *file = fbuffer->trace_file;
 3005
 3006	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
 3007			fbuffer->entry, &tt))
 3008		goto discard;
 3009
 3010	if (static_key_false(&tracepoint_printk_key.key))
 3011		output_printk(fbuffer);
 3012
 3013	if (static_branch_unlikely(&trace_event_exports_enabled))
 3014		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
 3015
 3016	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
 3017			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
 3018
 3019discard:
 3020	if (tt)
 3021		event_triggers_post_call(file, tt);
 3022
 3023}
 3024EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 3025
 3026/*
 3027 * Skip 3:
 3028 *
 3029 *   trace_buffer_unlock_commit_regs()
 3030 *   trace_event_buffer_commit()
 3031 *   trace_event_raw_event_xxx()
 3032 */
 3033# define STACK_SKIP 3
 3034
 3035void trace_buffer_unlock_commit_regs(struct trace_array *tr,
 3036				     struct trace_buffer *buffer,
 3037				     struct ring_buffer_event *event,
 3038				     unsigned int trace_ctx,
 3039				     struct pt_regs *regs)
 3040{
 3041	__buffer_unlock_commit(buffer, event);
 3042
 3043	/*
 3044	 * If regs is not set, then skip the necessary functions.
 3045	 * Note, we can still get here via blktrace, wakeup tracer
 3046	 * and mmiotrace, but that's ok if they lose a function or
 3047	 * two. They are not that meaningful.
 3048	 */
 3049	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
 3050	ftrace_trace_userstack(tr, buffer, trace_ctx);
 3051}
 3052
 3053/*
 3054 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
 3055 */
 3056void
 3057trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
 3058				   struct ring_buffer_event *event)
 3059{
 3060	__buffer_unlock_commit(buffer, event);
 3061}
 3062
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 3063void
 3064trace_function(struct trace_array *tr, unsigned long ip, unsigned long
 3065	       parent_ip, unsigned int trace_ctx)
 
 3066{
 3067	struct trace_event_call *call = &event_function;
 3068	struct trace_buffer *buffer = tr->array_buffer.buffer;
 3069	struct ring_buffer_event *event;
 3070	struct ftrace_entry *entry;
 3071
 3072	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
 3073					    trace_ctx);
 3074	if (!event)
 3075		return;
 3076	entry	= ring_buffer_event_data(event);
 3077	entry->ip			= ip;
 3078	entry->parent_ip		= parent_ip;
 3079
 3080	if (!call_filter_check_discard(call, entry, buffer, event)) {
 3081		if (static_branch_unlikely(&trace_function_exports_enabled))
 3082			ftrace_exports(event, TRACE_EXPORT_FUNCTION);
 3083		__buffer_unlock_commit(buffer, event);
 3084	}
 3085}
 3086
 3087#ifdef CONFIG_STACKTRACE
 3088
 3089/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
 3090#define FTRACE_KSTACK_NESTING	4
 3091
 3092#define FTRACE_KSTACK_ENTRIES	(PAGE_SIZE / FTRACE_KSTACK_NESTING)
 3093
 3094struct ftrace_stack {
 3095	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
 3096};
 3097
 3098
 3099struct ftrace_stacks {
 3100	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
 3101};
 3102
 3103static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
 3104static DEFINE_PER_CPU(int, ftrace_stack_reserve);
 3105
 3106static void __ftrace_trace_stack(struct trace_buffer *buffer,
 3107				 unsigned int trace_ctx,
 3108				 int skip, struct pt_regs *regs)
 3109{
 3110	struct trace_event_call *call = &event_kernel_stack;
 3111	struct ring_buffer_event *event;
 3112	unsigned int size, nr_entries;
 3113	struct ftrace_stack *fstack;
 3114	struct stack_entry *entry;
 3115	int stackidx;
 3116
 3117	/*
 3118	 * Add one, for this function and the call to save_stack_trace()
 3119	 * If regs is set, then these functions will not be in the way.
 3120	 */
 3121#ifndef CONFIG_UNWINDER_ORC
 3122	if (!regs)
 3123		skip++;
 3124#endif
 3125
 
 
 
 
 
 
 3126	preempt_disable_notrace();
 3127
 3128	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
 3129
 3130	/* This should never happen. If it does, yell once and skip */
 3131	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
 3132		goto out;
 3133
 3134	/*
 3135	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
 3136	 * interrupt will either see the value pre increment or post
 3137	 * increment. If the interrupt happens pre increment it will have
 3138	 * restored the counter when it returns.  We just need a barrier to
 3139	 * keep gcc from moving things around.
 3140	 */
 3141	barrier();
 3142
 3143	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
 3144	size = ARRAY_SIZE(fstack->calls);
 3145
 3146	if (regs) {
 3147		nr_entries = stack_trace_save_regs(regs, fstack->calls,
 3148						   size, skip);
 3149	} else {
 3150		nr_entries = stack_trace_save(fstack->calls, size, skip);
 3151	}
 3152
 
 3153	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
 3154				    struct_size(entry, caller, nr_entries),
 3155				    trace_ctx);
 3156	if (!event)
 3157		goto out;
 3158	entry = ring_buffer_event_data(event);
 3159
 
 3160	entry->size = nr_entries;
 3161	memcpy(&entry->caller, fstack->calls,
 3162	       flex_array_size(entry, caller, nr_entries));
 3163
 3164	if (!call_filter_check_discard(call, entry, buffer, event))
 3165		__buffer_unlock_commit(buffer, event);
 3166
 3167 out:
 3168	/* Again, don't let gcc optimize things here */
 3169	barrier();
 3170	__this_cpu_dec(ftrace_stack_reserve);
 3171	preempt_enable_notrace();
 3172
 3173}
 3174
 3175static inline void ftrace_trace_stack(struct trace_array *tr,
 3176				      struct trace_buffer *buffer,
 3177				      unsigned int trace_ctx,
 3178				      int skip, struct pt_regs *regs)
 3179{
 3180	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
 3181		return;
 3182
 3183	__ftrace_trace_stack(buffer, trace_ctx, skip, regs);
 3184}
 3185
 3186void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
 3187		   int skip)
 3188{
 3189	struct trace_buffer *buffer = tr->array_buffer.buffer;
 3190
 3191	if (rcu_is_watching()) {
 3192		__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
 3193		return;
 3194	}
 3195
 3196	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
 3197		return;
 3198
 3199	/*
 3200	 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
 3201	 * but if the above rcu_is_watching() failed, then the NMI
 3202	 * triggered someplace critical, and ct_irq_enter() should
 3203	 * not be called from NMI.
 3204	 */
 3205	if (unlikely(in_nmi()))
 3206		return;
 3207
 3208	ct_irq_enter_irqson();
 3209	__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
 3210	ct_irq_exit_irqson();
 3211}
 3212
 3213/**
 3214 * trace_dump_stack - record a stack back trace in the trace buffer
 3215 * @skip: Number of functions to skip (helper handlers)
 3216 */
 3217void trace_dump_stack(int skip)
 3218{
 
 
 3219	if (tracing_disabled || tracing_selftest_running)
 3220		return;
 3221
 
 
 3222#ifndef CONFIG_UNWINDER_ORC
 3223	/* Skip 1 to skip this function. */
 3224	skip++;
 3225#endif
 3226	__ftrace_trace_stack(global_trace.array_buffer.buffer,
 3227			     tracing_gen_ctx(), skip, NULL);
 3228}
 3229EXPORT_SYMBOL_GPL(trace_dump_stack);
 3230
 3231#ifdef CONFIG_USER_STACKTRACE_SUPPORT
 3232static DEFINE_PER_CPU(int, user_stack_count);
 3233
 3234static void
 3235ftrace_trace_userstack(struct trace_array *tr,
 3236		       struct trace_buffer *buffer, unsigned int trace_ctx)
 3237{
 3238	struct trace_event_call *call = &event_user_stack;
 3239	struct ring_buffer_event *event;
 3240	struct userstack_entry *entry;
 3241
 3242	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
 3243		return;
 3244
 3245	/*
 3246	 * NMIs can not handle page faults, even with fix ups.
 3247	 * The save user stack can (and often does) fault.
 3248	 */
 3249	if (unlikely(in_nmi()))
 3250		return;
 3251
 3252	/*
 3253	 * prevent recursion, since the user stack tracing may
 3254	 * trigger other kernel events.
 3255	 */
 3256	preempt_disable();
 3257	if (__this_cpu_read(user_stack_count))
 3258		goto out;
 3259
 3260	__this_cpu_inc(user_stack_count);
 3261
 3262	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
 3263					    sizeof(*entry), trace_ctx);
 3264	if (!event)
 3265		goto out_drop_count;
 3266	entry	= ring_buffer_event_data(event);
 3267
 3268	entry->tgid		= current->tgid;
 3269	memset(&entry->caller, 0, sizeof(entry->caller));
 3270
 3271	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
 3272	if (!call_filter_check_discard(call, entry, buffer, event))
 3273		__buffer_unlock_commit(buffer, event);
 3274
 3275 out_drop_count:
 3276	__this_cpu_dec(user_stack_count);
 3277 out:
 3278	preempt_enable();
 3279}
 3280#else /* CONFIG_USER_STACKTRACE_SUPPORT */
 3281static void ftrace_trace_userstack(struct trace_array *tr,
 3282				   struct trace_buffer *buffer,
 3283				   unsigned int trace_ctx)
 3284{
 3285}
 3286#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
 3287
 3288#endif /* CONFIG_STACKTRACE */
 3289
 3290static inline void
 3291func_repeats_set_delta_ts(struct func_repeats_entry *entry,
 3292			  unsigned long long delta)
 3293{
 3294	entry->bottom_delta_ts = delta & U32_MAX;
 3295	entry->top_delta_ts = (delta >> 32);
 3296}
 3297
 3298void trace_last_func_repeats(struct trace_array *tr,
 3299			     struct trace_func_repeats *last_info,
 3300			     unsigned int trace_ctx)
 3301{
 3302	struct trace_buffer *buffer = tr->array_buffer.buffer;
 3303	struct func_repeats_entry *entry;
 3304	struct ring_buffer_event *event;
 3305	u64 delta;
 3306
 3307	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
 3308					    sizeof(*entry), trace_ctx);
 3309	if (!event)
 3310		return;
 3311
 3312	delta = ring_buffer_event_time_stamp(buffer, event) -
 3313		last_info->ts_last_call;
 3314
 3315	entry = ring_buffer_event_data(event);
 3316	entry->ip = last_info->ip;
 3317	entry->parent_ip = last_info->parent_ip;
 3318	entry->count = last_info->count;
 3319	func_repeats_set_delta_ts(entry, delta);
 3320
 3321	__buffer_unlock_commit(buffer, event);
 3322}
 3323
 3324/* created for use with alloc_percpu */
 3325struct trace_buffer_struct {
 3326	int nesting;
 3327	char buffer[4][TRACE_BUF_SIZE];
 3328};
 3329
 3330static struct trace_buffer_struct __percpu *trace_percpu_buffer;
 3331
 3332/*
 3333 * This allows for lockless recording.  If we're nested too deeply, then
 3334 * this returns NULL.
 3335 */
 3336static char *get_trace_buf(void)
 3337{
 3338	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
 3339
 3340	if (!trace_percpu_buffer || buffer->nesting >= 4)
 3341		return NULL;
 3342
 3343	buffer->nesting++;
 3344
 3345	/* Interrupts must see nesting incremented before we use the buffer */
 3346	barrier();
 3347	return &buffer->buffer[buffer->nesting - 1][0];
 3348}
 3349
 3350static void put_trace_buf(void)
 3351{
 3352	/* Don't let the decrement of nesting leak before this */
 3353	barrier();
 3354	this_cpu_dec(trace_percpu_buffer->nesting);
 3355}
 3356
 3357static int alloc_percpu_trace_buffer(void)
 3358{
 3359	struct trace_buffer_struct __percpu *buffers;
 3360
 3361	if (trace_percpu_buffer)
 3362		return 0;
 3363
 3364	buffers = alloc_percpu(struct trace_buffer_struct);
 3365	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
 3366		return -ENOMEM;
 3367
 3368	trace_percpu_buffer = buffers;
 3369	return 0;
 3370}
 3371
 3372static int buffers_allocated;
 3373
 3374void trace_printk_init_buffers(void)
 3375{
 3376	if (buffers_allocated)
 3377		return;
 3378
 3379	if (alloc_percpu_trace_buffer())
 3380		return;
 3381
 3382	/* trace_printk() is for debug use only. Don't use it in production. */
 3383
 3384	pr_warn("\n");
 3385	pr_warn("**********************************************************\n");
 3386	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
 3387	pr_warn("**                                                      **\n");
 3388	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
 3389	pr_warn("**                                                      **\n");
 3390	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
 3391	pr_warn("** unsafe for production use.                           **\n");
 3392	pr_warn("**                                                      **\n");
 3393	pr_warn("** If you see this message and you are not debugging    **\n");
 3394	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
 3395	pr_warn("**                                                      **\n");
 3396	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
 3397	pr_warn("**********************************************************\n");
 3398
 3399	/* Expand the buffers to set size */
 3400	tracing_update_buffers(&global_trace);
 3401
 3402	buffers_allocated = 1;
 3403
 3404	/*
 3405	 * trace_printk_init_buffers() can be called by modules.
 3406	 * If that happens, then we need to start cmdline recording
 3407	 * directly here. If the global_trace.buffer is already
 3408	 * allocated here, then this was called by module code.
 3409	 */
 3410	if (global_trace.array_buffer.buffer)
 3411		tracing_start_cmdline_record();
 3412}
 3413EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
 3414
 3415void trace_printk_start_comm(void)
 3416{
 3417	/* Start tracing comms if trace printk is set */
 3418	if (!buffers_allocated)
 3419		return;
 3420	tracing_start_cmdline_record();
 3421}
 3422
 3423static void trace_printk_start_stop_comm(int enabled)
 3424{
 3425	if (!buffers_allocated)
 3426		return;
 3427
 3428	if (enabled)
 3429		tracing_start_cmdline_record();
 3430	else
 3431		tracing_stop_cmdline_record();
 3432}
 3433
 3434/**
 3435 * trace_vbprintk - write binary msg to tracing buffer
 3436 * @ip:    The address of the caller
 3437 * @fmt:   The string format to write to the buffer
 3438 * @args:  Arguments for @fmt
 3439 */
 3440int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 3441{
 3442	struct trace_event_call *call = &event_bprint;
 3443	struct ring_buffer_event *event;
 3444	struct trace_buffer *buffer;
 3445	struct trace_array *tr = &global_trace;
 3446	struct bprint_entry *entry;
 3447	unsigned int trace_ctx;
 3448	char *tbuffer;
 3449	int len = 0, size;
 3450
 3451	if (unlikely(tracing_selftest_running || tracing_disabled))
 3452		return 0;
 3453
 3454	/* Don't pollute graph traces with trace_vprintk internals */
 3455	pause_graph_tracing();
 3456
 3457	trace_ctx = tracing_gen_ctx();
 3458	preempt_disable_notrace();
 3459
 3460	tbuffer = get_trace_buf();
 3461	if (!tbuffer) {
 3462		len = 0;
 3463		goto out_nobuffer;
 3464	}
 3465
 3466	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
 3467
 3468	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
 3469		goto out_put;
 3470
 
 3471	size = sizeof(*entry) + sizeof(u32) * len;
 3472	buffer = tr->array_buffer.buffer;
 3473	ring_buffer_nest_start(buffer);
 3474	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
 3475					    trace_ctx);
 3476	if (!event)
 3477		goto out;
 3478	entry = ring_buffer_event_data(event);
 3479	entry->ip			= ip;
 3480	entry->fmt			= fmt;
 3481
 3482	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
 3483	if (!call_filter_check_discard(call, entry, buffer, event)) {
 3484		__buffer_unlock_commit(buffer, event);
 3485		ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
 3486	}
 3487
 3488out:
 3489	ring_buffer_nest_end(buffer);
 3490out_put:
 3491	put_trace_buf();
 3492
 3493out_nobuffer:
 3494	preempt_enable_notrace();
 3495	unpause_graph_tracing();
 3496
 3497	return len;
 3498}
 3499EXPORT_SYMBOL_GPL(trace_vbprintk);
 3500
 3501__printf(3, 0)
 3502static int
 3503__trace_array_vprintk(struct trace_buffer *buffer,
 3504		      unsigned long ip, const char *fmt, va_list args)
 3505{
 3506	struct trace_event_call *call = &event_print;
 3507	struct ring_buffer_event *event;
 3508	int len = 0, size;
 3509	struct print_entry *entry;
 3510	unsigned int trace_ctx;
 3511	char *tbuffer;
 3512
 3513	if (tracing_disabled)
 3514		return 0;
 3515
 3516	/* Don't pollute graph traces with trace_vprintk internals */
 3517	pause_graph_tracing();
 3518
 3519	trace_ctx = tracing_gen_ctx();
 3520	preempt_disable_notrace();
 3521
 3522
 3523	tbuffer = get_trace_buf();
 3524	if (!tbuffer) {
 3525		len = 0;
 3526		goto out_nobuffer;
 3527	}
 3528
 3529	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
 3530
 
 3531	size = sizeof(*entry) + len + 1;
 3532	ring_buffer_nest_start(buffer);
 3533	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 3534					    trace_ctx);
 3535	if (!event)
 3536		goto out;
 3537	entry = ring_buffer_event_data(event);
 3538	entry->ip = ip;
 3539
 3540	memcpy(&entry->buf, tbuffer, len + 1);
 3541	if (!call_filter_check_discard(call, entry, buffer, event)) {
 3542		__buffer_unlock_commit(buffer, event);
 3543		ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
 3544	}
 3545
 3546out:
 3547	ring_buffer_nest_end(buffer);
 3548	put_trace_buf();
 3549
 3550out_nobuffer:
 3551	preempt_enable_notrace();
 3552	unpause_graph_tracing();
 3553
 3554	return len;
 3555}
 3556
 3557__printf(3, 0)
 3558int trace_array_vprintk(struct trace_array *tr,
 3559			unsigned long ip, const char *fmt, va_list args)
 3560{
 3561	if (tracing_selftest_running && tr == &global_trace)
 3562		return 0;
 3563
 3564	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
 3565}
 3566
 3567/**
 3568 * trace_array_printk - Print a message to a specific instance
 3569 * @tr: The instance trace_array descriptor
 3570 * @ip: The instruction pointer that this is called from.
 3571 * @fmt: The format to print (printf format)
 3572 *
 3573 * If a subsystem sets up its own instance, they have the right to
 3574 * printk strings into their tracing instance buffer using this
 3575 * function. Note, this function will not write into the top level
 3576 * buffer (use trace_printk() for that), as writing into the top level
 3577 * buffer should only have events that can be individually disabled.
 3578 * trace_printk() is only used for debugging a kernel, and should not
 3579 * be ever incorporated in normal use.
 3580 *
 3581 * trace_array_printk() can be used, as it will not add noise to the
 3582 * top level tracing buffer.
 3583 *
 3584 * Note, trace_array_init_printk() must be called on @tr before this
 3585 * can be used.
 3586 */
 3587__printf(3, 0)
 3588int trace_array_printk(struct trace_array *tr,
 3589		       unsigned long ip, const char *fmt, ...)
 3590{
 3591	int ret;
 3592	va_list ap;
 3593
 3594	if (!tr)
 3595		return -ENOENT;
 3596
 3597	/* This is only allowed for created instances */
 3598	if (tr == &global_trace)
 3599		return 0;
 3600
 3601	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
 3602		return 0;
 3603
 3604	va_start(ap, fmt);
 3605	ret = trace_array_vprintk(tr, ip, fmt, ap);
 3606	va_end(ap);
 3607	return ret;
 3608}
 3609EXPORT_SYMBOL_GPL(trace_array_printk);
 3610
 3611/**
 3612 * trace_array_init_printk - Initialize buffers for trace_array_printk()
 3613 * @tr: The trace array to initialize the buffers for
 3614 *
 3615 * As trace_array_printk() only writes into instances, they are OK to
 3616 * have in the kernel (unlike trace_printk()). This needs to be called
 3617 * before trace_array_printk() can be used on a trace_array.
 3618 */
 3619int trace_array_init_printk(struct trace_array *tr)
 3620{
 3621	if (!tr)
 3622		return -ENOENT;
 3623
 3624	/* This is only allowed for created instances */
 3625	if (tr == &global_trace)
 3626		return -EINVAL;
 3627
 3628	return alloc_percpu_trace_buffer();
 3629}
 3630EXPORT_SYMBOL_GPL(trace_array_init_printk);
 3631
 3632__printf(3, 4)
 3633int trace_array_printk_buf(struct trace_buffer *buffer,
 3634			   unsigned long ip, const char *fmt, ...)
 3635{
 3636	int ret;
 3637	va_list ap;
 3638
 3639	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 3640		return 0;
 3641
 3642	va_start(ap, fmt);
 3643	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
 3644	va_end(ap);
 3645	return ret;
 3646}
 3647
 3648__printf(2, 0)
 3649int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
 3650{
 3651	return trace_array_vprintk(&global_trace, ip, fmt, args);
 3652}
 3653EXPORT_SYMBOL_GPL(trace_vprintk);
 3654
 3655static void trace_iterator_increment(struct trace_iterator *iter)
 3656{
 3657	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
 3658
 3659	iter->idx++;
 3660	if (buf_iter)
 3661		ring_buffer_iter_advance(buf_iter);
 3662}
 3663
 3664static struct trace_entry *
 3665peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
 3666		unsigned long *lost_events)
 3667{
 3668	struct ring_buffer_event *event;
 3669	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
 3670
 3671	if (buf_iter) {
 3672		event = ring_buffer_iter_peek(buf_iter, ts);
 3673		if (lost_events)
 3674			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
 3675				(unsigned long)-1 : 0;
 3676	} else {
 3677		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
 3678					 lost_events);
 3679	}
 3680
 3681	if (event) {
 3682		iter->ent_size = ring_buffer_event_length(event);
 3683		return ring_buffer_event_data(event);
 3684	}
 3685	iter->ent_size = 0;
 3686	return NULL;
 3687}
 3688
 3689static struct trace_entry *
 3690__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
 3691		  unsigned long *missing_events, u64 *ent_ts)
 3692{
 3693	struct trace_buffer *buffer = iter->array_buffer->buffer;
 3694	struct trace_entry *ent, *next = NULL;
 3695	unsigned long lost_events = 0, next_lost = 0;
 3696	int cpu_file = iter->cpu_file;
 3697	u64 next_ts = 0, ts;
 3698	int next_cpu = -1;
 3699	int next_size = 0;
 3700	int cpu;
 3701
 3702	/*
 3703	 * If we are in a per_cpu trace file, don't bother by iterating over
 3704	 * all cpu and peek directly.
 3705	 */
 3706	if (cpu_file > RING_BUFFER_ALL_CPUS) {
 3707		if (ring_buffer_empty_cpu(buffer, cpu_file))
 3708			return NULL;
 3709		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
 3710		if (ent_cpu)
 3711			*ent_cpu = cpu_file;
 3712
 3713		return ent;
 3714	}
 3715
 3716	for_each_tracing_cpu(cpu) {
 3717
 3718		if (ring_buffer_empty_cpu(buffer, cpu))
 3719			continue;
 3720
 3721		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
 3722
 3723		/*
 3724		 * Pick the entry with the smallest timestamp:
 3725		 */
 3726		if (ent && (!next || ts < next_ts)) {
 3727			next = ent;
 3728			next_cpu = cpu;
 3729			next_ts = ts;
 3730			next_lost = lost_events;
 3731			next_size = iter->ent_size;
 3732		}
 3733	}
 3734
 3735	iter->ent_size = next_size;
 3736
 3737	if (ent_cpu)
 3738		*ent_cpu = next_cpu;
 3739
 3740	if (ent_ts)
 3741		*ent_ts = next_ts;
 3742
 3743	if (missing_events)
 3744		*missing_events = next_lost;
 3745
 3746	return next;
 3747}
 3748
 3749#define STATIC_FMT_BUF_SIZE	128
 3750static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
 3751
 3752char *trace_iter_expand_format(struct trace_iterator *iter)
 3753{
 3754	char *tmp;
 3755
 3756	/*
 3757	 * iter->tr is NULL when used with tp_printk, which makes
 3758	 * this get called where it is not safe to call krealloc().
 3759	 */
 3760	if (!iter->tr || iter->fmt == static_fmt_buf)
 3761		return NULL;
 3762
 3763	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
 3764		       GFP_KERNEL);
 3765	if (tmp) {
 3766		iter->fmt_size += STATIC_FMT_BUF_SIZE;
 3767		iter->fmt = tmp;
 3768	}
 3769
 3770	return tmp;
 3771}
 3772
 3773/* Returns true if the string is safe to dereference from an event */
 3774static bool trace_safe_str(struct trace_iterator *iter, const char *str,
 3775			   bool star, int len)
 3776{
 3777	unsigned long addr = (unsigned long)str;
 3778	struct trace_event *trace_event;
 3779	struct trace_event_call *event;
 3780
 3781	/* Ignore strings with no length */
 3782	if (star && !len)
 3783		return true;
 3784
 3785	/* OK if part of the event data */
 3786	if ((addr >= (unsigned long)iter->ent) &&
 3787	    (addr < (unsigned long)iter->ent + iter->ent_size))
 3788		return true;
 3789
 3790	/* OK if part of the temp seq buffer */
 3791	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
 3792	    (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE))
 3793		return true;
 3794
 3795	/* Core rodata can not be freed */
 3796	if (is_kernel_rodata(addr))
 3797		return true;
 3798
 3799	if (trace_is_tracepoint_string(str))
 3800		return true;
 3801
 3802	/*
 3803	 * Now this could be a module event, referencing core module
 3804	 * data, which is OK.
 3805	 */
 3806	if (!iter->ent)
 3807		return false;
 3808
 3809	trace_event = ftrace_find_event(iter->ent->type);
 3810	if (!trace_event)
 3811		return false;
 3812
 3813	event = container_of(trace_event, struct trace_event_call, event);
 3814	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
 3815		return false;
 3816
 3817	/* Would rather have rodata, but this will suffice */
 3818	if (within_module_core(addr, event->module))
 3819		return true;
 3820
 3821	return false;
 3822}
 3823
 3824static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
 3825
 3826static int test_can_verify_check(const char *fmt, ...)
 3827{
 3828	char buf[16];
 3829	va_list ap;
 3830	int ret;
 3831
 3832	/*
 3833	 * The verifier is dependent on vsnprintf() modifies the va_list
 3834	 * passed to it, where it is sent as a reference. Some architectures
 3835	 * (like x86_32) passes it by value, which means that vsnprintf()
 3836	 * does not modify the va_list passed to it, and the verifier
 3837	 * would then need to be able to understand all the values that
 3838	 * vsnprintf can use. If it is passed by value, then the verifier
 3839	 * is disabled.
 3840	 */
 3841	va_start(ap, fmt);
 3842	vsnprintf(buf, 16, "%d", ap);
 3843	ret = va_arg(ap, int);
 3844	va_end(ap);
 3845
 3846	return ret;
 3847}
 3848
 3849static void test_can_verify(void)
 3850{
 3851	if (!test_can_verify_check("%d %d", 0, 1)) {
 3852		pr_info("trace event string verifier disabled\n");
 3853		static_branch_inc(&trace_no_verify);
 3854	}
 3855}
 3856
 3857/**
 3858 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
 3859 * @iter: The iterator that holds the seq buffer and the event being printed
 3860 * @fmt: The format used to print the event
 3861 * @ap: The va_list holding the data to print from @fmt.
 3862 *
 3863 * This writes the data into the @iter->seq buffer using the data from
 3864 * @fmt and @ap. If the format has a %s, then the source of the string
 3865 * is examined to make sure it is safe to print, otherwise it will
 3866 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
 3867 * pointer.
 3868 */
 3869void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
 3870			 va_list ap)
 3871{
 3872	const char *p = fmt;
 3873	const char *str;
 3874	int i, j;
 3875
 3876	if (WARN_ON_ONCE(!fmt))
 3877		return;
 3878
 3879	if (static_branch_unlikely(&trace_no_verify))
 3880		goto print;
 3881
 3882	/* Don't bother checking when doing a ftrace_dump() */
 3883	if (iter->fmt == static_fmt_buf)
 3884		goto print;
 3885
 3886	while (*p) {
 3887		bool star = false;
 3888		int len = 0;
 3889
 3890		j = 0;
 3891
 3892		/* We only care about %s and variants */
 3893		for (i = 0; p[i]; i++) {
 3894			if (i + 1 >= iter->fmt_size) {
 3895				/*
 3896				 * If we can't expand the copy buffer,
 3897				 * just print it.
 3898				 */
 3899				if (!trace_iter_expand_format(iter))
 3900					goto print;
 3901			}
 3902
 3903			if (p[i] == '\\' && p[i+1]) {
 3904				i++;
 3905				continue;
 3906			}
 3907			if (p[i] == '%') {
 3908				/* Need to test cases like %08.*s */
 3909				for (j = 1; p[i+j]; j++) {
 3910					if (isdigit(p[i+j]) ||
 3911					    p[i+j] == '.')
 3912						continue;
 3913					if (p[i+j] == '*') {
 3914						star = true;
 3915						continue;
 3916					}
 3917					break;
 3918				}
 3919				if (p[i+j] == 's')
 3920					break;
 3921				star = false;
 3922			}
 3923			j = 0;
 3924		}
 3925		/* If no %s found then just print normally */
 3926		if (!p[i])
 3927			break;
 3928
 3929		/* Copy up to the %s, and print that */
 3930		strncpy(iter->fmt, p, i);
 3931		iter->fmt[i] = '\0';
 3932		trace_seq_vprintf(&iter->seq, iter->fmt, ap);
 3933
 3934		/*
 3935		 * If iter->seq is full, the above call no longer guarantees
 3936		 * that ap is in sync with fmt processing, and further calls
 3937		 * to va_arg() can return wrong positional arguments.
 3938		 *
 3939		 * Ensure that ap is no longer used in this case.
 3940		 */
 3941		if (iter->seq.full) {
 3942			p = "";
 3943			break;
 3944		}
 3945
 3946		if (star)
 3947			len = va_arg(ap, int);
 3948
 3949		/* The ap now points to the string data of the %s */
 3950		str = va_arg(ap, const char *);
 3951
 3952		/*
 3953		 * If you hit this warning, it is likely that the
 3954		 * trace event in question used %s on a string that
 3955		 * was saved at the time of the event, but may not be
 3956		 * around when the trace is read. Use __string(),
 3957		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
 3958		 * instead. See samples/trace_events/trace-events-sample.h
 3959		 * for reference.
 3960		 */
 3961		if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
 3962			      "fmt: '%s' current_buffer: '%s'",
 3963			      fmt, seq_buf_str(&iter->seq.seq))) {
 3964			int ret;
 3965
 3966			/* Try to safely read the string */
 3967			if (star) {
 3968				if (len + 1 > iter->fmt_size)
 3969					len = iter->fmt_size - 1;
 3970				if (len < 0)
 3971					len = 0;
 3972				ret = copy_from_kernel_nofault(iter->fmt, str, len);
 3973				iter->fmt[len] = 0;
 3974				star = false;
 3975			} else {
 3976				ret = strncpy_from_kernel_nofault(iter->fmt, str,
 3977								  iter->fmt_size);
 3978			}
 3979			if (ret < 0)
 3980				trace_seq_printf(&iter->seq, "(0x%px)", str);
 3981			else
 3982				trace_seq_printf(&iter->seq, "(0x%px:%s)",
 3983						 str, iter->fmt);
 3984			str = "[UNSAFE-MEMORY]";
 3985			strcpy(iter->fmt, "%s");
 3986		} else {
 3987			strncpy(iter->fmt, p + i, j + 1);
 3988			iter->fmt[j+1] = '\0';
 3989		}
 3990		if (star)
 3991			trace_seq_printf(&iter->seq, iter->fmt, len, str);
 3992		else
 3993			trace_seq_printf(&iter->seq, iter->fmt, str);
 3994
 3995		p += i + j + 1;
 3996	}
 3997 print:
 3998	if (*p)
 3999		trace_seq_vprintf(&iter->seq, p, ap);
 4000}
 4001
 4002const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
 4003{
 4004	const char *p, *new_fmt;
 4005	char *q;
 4006
 4007	if (WARN_ON_ONCE(!fmt))
 4008		return fmt;
 4009
 4010	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
 4011		return fmt;
 4012
 4013	p = fmt;
 4014	new_fmt = q = iter->fmt;
 4015	while (*p) {
 4016		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
 4017			if (!trace_iter_expand_format(iter))
 4018				return fmt;
 4019
 4020			q += iter->fmt - new_fmt;
 4021			new_fmt = iter->fmt;
 4022		}
 4023
 4024		*q++ = *p++;
 4025
 4026		/* Replace %p with %px */
 4027		if (p[-1] == '%') {
 4028			if (p[0] == '%') {
 4029				*q++ = *p++;
 4030			} else if (p[0] == 'p' && !isalnum(p[1])) {
 4031				*q++ = *p++;
 4032				*q++ = 'x';
 4033			}
 4034		}
 4035	}
 4036	*q = '\0';
 4037
 4038	return new_fmt;
 4039}
 4040
 4041#define STATIC_TEMP_BUF_SIZE	128
 4042static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
 4043
 4044/* Find the next real entry, without updating the iterator itself */
 4045struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 4046					  int *ent_cpu, u64 *ent_ts)
 4047{
 4048	/* __find_next_entry will reset ent_size */
 4049	int ent_size = iter->ent_size;
 4050	struct trace_entry *entry;
 4051
 4052	/*
 4053	 * If called from ftrace_dump(), then the iter->temp buffer
 4054	 * will be the static_temp_buf and not created from kmalloc.
 4055	 * If the entry size is greater than the buffer, we can
 4056	 * not save it. Just return NULL in that case. This is only
 4057	 * used to add markers when two consecutive events' time
 4058	 * stamps have a large delta. See trace_print_lat_context()
 4059	 */
 4060	if (iter->temp == static_temp_buf &&
 4061	    STATIC_TEMP_BUF_SIZE < ent_size)
 4062		return NULL;
 4063
 4064	/*
 4065	 * The __find_next_entry() may call peek_next_entry(), which may
 4066	 * call ring_buffer_peek() that may make the contents of iter->ent
 4067	 * undefined. Need to copy iter->ent now.
 4068	 */
 4069	if (iter->ent && iter->ent != iter->temp) {
 4070		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
 4071		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
 4072			void *temp;
 4073			temp = kmalloc(iter->ent_size, GFP_KERNEL);
 4074			if (!temp)
 4075				return NULL;
 4076			kfree(iter->temp);
 4077			iter->temp = temp;
 4078			iter->temp_size = iter->ent_size;
 4079		}
 4080		memcpy(iter->temp, iter->ent, iter->ent_size);
 4081		iter->ent = iter->temp;
 4082	}
 4083	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
 4084	/* Put back the original ent_size */
 4085	iter->ent_size = ent_size;
 4086
 4087	return entry;
 4088}
 4089
 4090/* Find the next real entry, and increment the iterator to the next entry */
 4091void *trace_find_next_entry_inc(struct trace_iterator *iter)
 4092{
 4093	iter->ent = __find_next_entry(iter, &iter->cpu,
 4094				      &iter->lost_events, &iter->ts);
 4095
 4096	if (iter->ent)
 4097		trace_iterator_increment(iter);
 4098
 4099	return iter->ent ? iter : NULL;
 4100}
 4101
 4102static void trace_consume(struct trace_iterator *iter)
 4103{
 4104	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
 4105			    &iter->lost_events);
 4106}
 4107
 4108static void *s_next(struct seq_file *m, void *v, loff_t *pos)
 4109{
 4110	struct trace_iterator *iter = m->private;
 4111	int i = (int)*pos;
 4112	void *ent;
 4113
 4114	WARN_ON_ONCE(iter->leftover);
 4115
 4116	(*pos)++;
 4117
 4118	/* can't go backwards */
 4119	if (iter->idx > i)
 4120		return NULL;
 4121
 4122	if (iter->idx < 0)
 4123		ent = trace_find_next_entry_inc(iter);
 4124	else
 4125		ent = iter;
 4126
 4127	while (ent && iter->idx < i)
 4128		ent = trace_find_next_entry_inc(iter);
 4129
 4130	iter->pos = *pos;
 4131
 4132	return ent;
 4133}
 4134
 4135void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 4136{
 
 4137	struct ring_buffer_iter *buf_iter;
 4138	unsigned long entries = 0;
 4139	u64 ts;
 4140
 4141	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
 4142
 4143	buf_iter = trace_buffer_iter(iter, cpu);
 4144	if (!buf_iter)
 4145		return;
 4146
 4147	ring_buffer_iter_reset(buf_iter);
 4148
 4149	/*
 4150	 * We could have the case with the max latency tracers
 4151	 * that a reset never took place on a cpu. This is evident
 4152	 * by the timestamp being before the start of the buffer.
 4153	 */
 4154	while (ring_buffer_iter_peek(buf_iter, &ts)) {
 4155		if (ts >= iter->array_buffer->time_start)
 4156			break;
 4157		entries++;
 4158		ring_buffer_iter_advance(buf_iter);
 4159	}
 4160
 4161	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
 4162}
 4163
 4164/*
 4165 * The current tracer is copied to avoid a global locking
 4166 * all around.
 4167 */
 4168static void *s_start(struct seq_file *m, loff_t *pos)
 4169{
 4170	struct trace_iterator *iter = m->private;
 4171	struct trace_array *tr = iter->tr;
 4172	int cpu_file = iter->cpu_file;
 4173	void *p = NULL;
 4174	loff_t l = 0;
 4175	int cpu;
 4176
 
 
 
 
 
 
 4177	mutex_lock(&trace_types_lock);
 4178	if (unlikely(tr->current_trace != iter->trace)) {
 4179		/* Close iter->trace before switching to the new current tracer */
 4180		if (iter->trace->close)
 4181			iter->trace->close(iter);
 4182		iter->trace = tr->current_trace;
 4183		/* Reopen the new current tracer */
 4184		if (iter->trace->open)
 4185			iter->trace->open(iter);
 4186	}
 4187	mutex_unlock(&trace_types_lock);
 4188
 4189#ifdef CONFIG_TRACER_MAX_TRACE
 4190	if (iter->snapshot && iter->trace->use_max_tr)
 4191		return ERR_PTR(-EBUSY);
 4192#endif
 4193
 
 
 
 4194	if (*pos != iter->pos) {
 4195		iter->ent = NULL;
 4196		iter->cpu = 0;
 4197		iter->idx = -1;
 4198
 4199		if (cpu_file == RING_BUFFER_ALL_CPUS) {
 4200			for_each_tracing_cpu(cpu)
 4201				tracing_iter_reset(iter, cpu);
 4202		} else
 4203			tracing_iter_reset(iter, cpu_file);
 4204
 4205		iter->leftover = 0;
 4206		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
 4207			;
 4208
 4209	} else {
 4210		/*
 4211		 * If we overflowed the seq_file before, then we want
 4212		 * to just reuse the trace_seq buffer again.
 4213		 */
 4214		if (iter->leftover)
 4215			p = iter;
 4216		else {
 4217			l = *pos - 1;
 4218			p = s_next(m, p, &l);
 4219		}
 4220	}
 4221
 4222	trace_event_read_lock();
 4223	trace_access_lock(cpu_file);
 4224	return p;
 4225}
 4226
 4227static void s_stop(struct seq_file *m, void *p)
 4228{
 4229	struct trace_iterator *iter = m->private;
 4230
 4231#ifdef CONFIG_TRACER_MAX_TRACE
 4232	if (iter->snapshot && iter->trace->use_max_tr)
 4233		return;
 4234#endif
 4235
 
 
 
 4236	trace_access_unlock(iter->cpu_file);
 4237	trace_event_read_unlock();
 4238}
 4239
 4240static void
 4241get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
 4242		      unsigned long *entries, int cpu)
 4243{
 4244	unsigned long count;
 4245
 4246	count = ring_buffer_entries_cpu(buf->buffer, cpu);
 4247	/*
 4248	 * If this buffer has skipped entries, then we hold all
 4249	 * entries for the trace and we need to ignore the
 4250	 * ones before the time stamp.
 4251	 */
 4252	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
 4253		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
 4254		/* total is the same as the entries */
 4255		*total = count;
 4256	} else
 4257		*total = count +
 4258			ring_buffer_overrun_cpu(buf->buffer, cpu);
 4259	*entries = count;
 4260}
 4261
 4262static void
 4263get_total_entries(struct array_buffer *buf,
 4264		  unsigned long *total, unsigned long *entries)
 4265{
 4266	unsigned long t, e;
 4267	int cpu;
 4268
 4269	*total = 0;
 4270	*entries = 0;
 4271
 4272	for_each_tracing_cpu(cpu) {
 4273		get_total_entries_cpu(buf, &t, &e, cpu);
 4274		*total += t;
 4275		*entries += e;
 4276	}
 4277}
 4278
 4279unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
 4280{
 4281	unsigned long total, entries;
 4282
 4283	if (!tr)
 4284		tr = &global_trace;
 4285
 4286	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
 4287
 4288	return entries;
 4289}
 4290
 4291unsigned long trace_total_entries(struct trace_array *tr)
 4292{
 4293	unsigned long total, entries;
 4294
 4295	if (!tr)
 4296		tr = &global_trace;
 4297
 4298	get_total_entries(&tr->array_buffer, &total, &entries);
 4299
 4300	return entries;
 4301}
 4302
 4303static void print_lat_help_header(struct seq_file *m)
 4304{
 4305	seq_puts(m, "#                    _------=> CPU#            \n"
 4306		    "#                   / _-----=> irqs-off/BH-disabled\n"
 4307		    "#                  | / _----=> need-resched    \n"
 4308		    "#                  || / _---=> hardirq/softirq \n"
 4309		    "#                  ||| / _--=> preempt-depth   \n"
 4310		    "#                  |||| / _-=> migrate-disable \n"
 4311		    "#                  ||||| /     delay           \n"
 4312		    "#  cmd     pid     |||||| time  |   caller     \n"
 4313		    "#     \\   /        ||||||  \\    |    /       \n");
 4314}
 4315
 4316static void print_event_info(struct array_buffer *buf, struct seq_file *m)
 4317{
 4318	unsigned long total;
 4319	unsigned long entries;
 4320
 4321	get_total_entries(buf, &total, &entries);
 4322	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
 4323		   entries, total, num_online_cpus());
 4324	seq_puts(m, "#\n");
 4325}
 4326
 4327static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
 4328				   unsigned int flags)
 4329{
 4330	bool tgid = flags & TRACE_ITER_RECORD_TGID;
 4331
 4332	print_event_info(buf, m);
 4333
 4334	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
 4335	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
 4336}
 4337
 4338static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
 4339				       unsigned int flags)
 4340{
 4341	bool tgid = flags & TRACE_ITER_RECORD_TGID;
 4342	static const char space[] = "            ";
 4343	int prec = tgid ? 12 : 2;
 4344
 4345	print_event_info(buf, m);
 4346
 4347	seq_printf(m, "#                            %.*s  _-----=> irqs-off/BH-disabled\n", prec, space);
 4348	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
 4349	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
 4350	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
 4351	seq_printf(m, "#                            %.*s||| / _-=> migrate-disable\n", prec, space);
 4352	seq_printf(m, "#                            %.*s|||| /     delay\n", prec, space);
 4353	seq_printf(m, "#           TASK-PID  %.*s CPU#  |||||  TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
 4354	seq_printf(m, "#              | |    %.*s   |   |||||     |         |\n", prec, "       |    ");
 4355}
 4356
 4357void
 4358print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 4359{
 4360	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
 4361	struct array_buffer *buf = iter->array_buffer;
 4362	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
 4363	struct tracer *type = iter->trace;
 4364	unsigned long entries;
 4365	unsigned long total;
 4366	const char *name = type->name;
 
 
 4367
 4368	get_total_entries(buf, &total, &entries);
 4369
 4370	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
 4371		   name, UTS_RELEASE);
 4372	seq_puts(m, "# -----------------------------------"
 4373		 "---------------------------------\n");
 4374	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
 4375		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
 4376		   nsecs_to_usecs(data->saved_latency),
 4377		   entries,
 4378		   total,
 4379		   buf->cpu,
 4380		   preempt_model_none()      ? "server" :
 4381		   preempt_model_voluntary() ? "desktop" :
 4382		   preempt_model_full()      ? "preempt" :
 4383		   preempt_model_rt()        ? "preempt_rt" :
 
 
 
 4384		   "unknown",
 
 4385		   /* These are reserved for later use */
 4386		   0, 0, 0, 0);
 4387#ifdef CONFIG_SMP
 4388	seq_printf(m, " #P:%d)\n", num_online_cpus());
 4389#else
 4390	seq_puts(m, ")\n");
 4391#endif
 4392	seq_puts(m, "#    -----------------\n");
 4393	seq_printf(m, "#    | task: %.16s-%d "
 4394		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
 4395		   data->comm, data->pid,
 4396		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
 4397		   data->policy, data->rt_priority);
 4398	seq_puts(m, "#    -----------------\n");
 4399
 4400	if (data->critical_start) {
 4401		seq_puts(m, "#  => started at: ");
 4402		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
 4403		trace_print_seq(m, &iter->seq);
 4404		seq_puts(m, "\n#  => ended at:   ");
 4405		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
 4406		trace_print_seq(m, &iter->seq);
 4407		seq_puts(m, "\n#\n");
 4408	}
 4409
 4410	seq_puts(m, "#\n");
 4411}
 4412
 4413static void test_cpu_buff_start(struct trace_iterator *iter)
 4414{
 4415	struct trace_seq *s = &iter->seq;
 4416	struct trace_array *tr = iter->tr;
 4417
 4418	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
 4419		return;
 4420
 4421	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
 4422		return;
 4423
 4424	if (cpumask_available(iter->started) &&
 4425	    cpumask_test_cpu(iter->cpu, iter->started))
 4426		return;
 4427
 4428	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
 4429		return;
 4430
 4431	if (cpumask_available(iter->started))
 4432		cpumask_set_cpu(iter->cpu, iter->started);
 4433
 4434	/* Don't print started cpu buffer for the first entry of the trace */
 4435	if (iter->idx > 1)
 4436		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
 4437				iter->cpu);
 4438}
 4439
 4440static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
 4441{
 4442	struct trace_array *tr = iter->tr;
 4443	struct trace_seq *s = &iter->seq;
 4444	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
 4445	struct trace_entry *entry;
 4446	struct trace_event *event;
 4447
 4448	entry = iter->ent;
 4449
 4450	test_cpu_buff_start(iter);
 4451
 4452	event = ftrace_find_event(entry->type);
 4453
 4454	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 4455		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
 4456			trace_print_lat_context(iter);
 4457		else
 4458			trace_print_context(iter);
 4459	}
 4460
 4461	if (trace_seq_has_overflowed(s))
 4462		return TRACE_TYPE_PARTIAL_LINE;
 4463
 4464	if (event) {
 4465		if (tr->trace_flags & TRACE_ITER_FIELDS)
 4466			return print_event_fields(iter, event);
 4467		return event->funcs->trace(iter, sym_flags, event);
 4468	}
 4469
 4470	trace_seq_printf(s, "Unknown type %d\n", entry->type);
 4471
 4472	return trace_handle_return(s);
 4473}
 4474
 4475static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
 4476{
 4477	struct trace_array *tr = iter->tr;
 4478	struct trace_seq *s = &iter->seq;
 4479	struct trace_entry *entry;
 4480	struct trace_event *event;
 4481
 4482	entry = iter->ent;
 4483
 4484	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
 4485		trace_seq_printf(s, "%d %d %llu ",
 4486				 entry->pid, iter->cpu, iter->ts);
 4487
 4488	if (trace_seq_has_overflowed(s))
 4489		return TRACE_TYPE_PARTIAL_LINE;
 4490
 4491	event = ftrace_find_event(entry->type);
 4492	if (event)
 4493		return event->funcs->raw(iter, 0, event);
 4494
 4495	trace_seq_printf(s, "%d ?\n", entry->type);
 4496
 4497	return trace_handle_return(s);
 4498}
 4499
 4500static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
 4501{
 4502	struct trace_array *tr = iter->tr;
 4503	struct trace_seq *s = &iter->seq;
 4504	unsigned char newline = '\n';
 4505	struct trace_entry *entry;
 4506	struct trace_event *event;
 4507
 4508	entry = iter->ent;
 4509
 4510	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 4511		SEQ_PUT_HEX_FIELD(s, entry->pid);
 4512		SEQ_PUT_HEX_FIELD(s, iter->cpu);
 4513		SEQ_PUT_HEX_FIELD(s, iter->ts);
 4514		if (trace_seq_has_overflowed(s))
 4515			return TRACE_TYPE_PARTIAL_LINE;
 4516	}
 4517
 4518	event = ftrace_find_event(entry->type);
 4519	if (event) {
 4520		enum print_line_t ret = event->funcs->hex(iter, 0, event);
 4521		if (ret != TRACE_TYPE_HANDLED)
 4522			return ret;
 4523	}
 4524
 4525	SEQ_PUT_FIELD(s, newline);
 4526
 4527	return trace_handle_return(s);
 4528}
 4529
 4530static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
 4531{
 4532	struct trace_array *tr = iter->tr;
 4533	struct trace_seq *s = &iter->seq;
 4534	struct trace_entry *entry;
 4535	struct trace_event *event;
 4536
 4537	entry = iter->ent;
 4538
 4539	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 4540		SEQ_PUT_FIELD(s, entry->pid);
 4541		SEQ_PUT_FIELD(s, iter->cpu);
 4542		SEQ_PUT_FIELD(s, iter->ts);
 4543		if (trace_seq_has_overflowed(s))
 4544			return TRACE_TYPE_PARTIAL_LINE;
 4545	}
 4546
 4547	event = ftrace_find_event(entry->type);
 4548	return event ? event->funcs->binary(iter, 0, event) :
 4549		TRACE_TYPE_HANDLED;
 4550}
 4551
 4552int trace_empty(struct trace_iterator *iter)
 4553{
 4554	struct ring_buffer_iter *buf_iter;
 4555	int cpu;
 4556
 4557	/* If we are looking at one CPU buffer, only check that one */
 4558	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
 4559		cpu = iter->cpu_file;
 4560		buf_iter = trace_buffer_iter(iter, cpu);
 4561		if (buf_iter) {
 4562			if (!ring_buffer_iter_empty(buf_iter))
 4563				return 0;
 4564		} else {
 4565			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
 4566				return 0;
 4567		}
 4568		return 1;
 4569	}
 4570
 4571	for_each_tracing_cpu(cpu) {
 4572		buf_iter = trace_buffer_iter(iter, cpu);
 4573		if (buf_iter) {
 4574			if (!ring_buffer_iter_empty(buf_iter))
 4575				return 0;
 4576		} else {
 4577			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
 4578				return 0;
 4579		}
 4580	}
 4581
 4582	return 1;
 4583}
 4584
 4585/*  Called with trace_event_read_lock() held. */
 4586enum print_line_t print_trace_line(struct trace_iterator *iter)
 4587{
 4588	struct trace_array *tr = iter->tr;
 4589	unsigned long trace_flags = tr->trace_flags;
 4590	enum print_line_t ret;
 4591
 4592	if (iter->lost_events) {
 4593		if (iter->lost_events == (unsigned long)-1)
 4594			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
 4595					 iter->cpu);
 4596		else
 4597			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
 4598					 iter->cpu, iter->lost_events);
 4599		if (trace_seq_has_overflowed(&iter->seq))
 4600			return TRACE_TYPE_PARTIAL_LINE;
 4601	}
 4602
 4603	if (iter->trace && iter->trace->print_line) {
 4604		ret = iter->trace->print_line(iter);
 4605		if (ret != TRACE_TYPE_UNHANDLED)
 4606			return ret;
 4607	}
 4608
 4609	if (iter->ent->type == TRACE_BPUTS &&
 4610			trace_flags & TRACE_ITER_PRINTK &&
 4611			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
 4612		return trace_print_bputs_msg_only(iter);
 4613
 4614	if (iter->ent->type == TRACE_BPRINT &&
 4615			trace_flags & TRACE_ITER_PRINTK &&
 4616			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
 4617		return trace_print_bprintk_msg_only(iter);
 4618
 4619	if (iter->ent->type == TRACE_PRINT &&
 4620			trace_flags & TRACE_ITER_PRINTK &&
 4621			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
 4622		return trace_print_printk_msg_only(iter);
 4623
 4624	if (trace_flags & TRACE_ITER_BIN)
 4625		return print_bin_fmt(iter);
 4626
 4627	if (trace_flags & TRACE_ITER_HEX)
 4628		return print_hex_fmt(iter);
 4629
 4630	if (trace_flags & TRACE_ITER_RAW)
 4631		return print_raw_fmt(iter);
 4632
 4633	return print_trace_fmt(iter);
 4634}
 4635
 4636void trace_latency_header(struct seq_file *m)
 4637{
 4638	struct trace_iterator *iter = m->private;
 4639	struct trace_array *tr = iter->tr;
 4640
 4641	/* print nothing if the buffers are empty */
 4642	if (trace_empty(iter))
 4643		return;
 4644
 4645	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
 4646		print_trace_header(m, iter);
 4647
 4648	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
 4649		print_lat_help_header(m);
 4650}
 4651
 4652void trace_default_header(struct seq_file *m)
 4653{
 4654	struct trace_iterator *iter = m->private;
 4655	struct trace_array *tr = iter->tr;
 4656	unsigned long trace_flags = tr->trace_flags;
 4657
 4658	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
 4659		return;
 4660
 4661	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
 4662		/* print nothing if the buffers are empty */
 4663		if (trace_empty(iter))
 4664			return;
 4665		print_trace_header(m, iter);
 4666		if (!(trace_flags & TRACE_ITER_VERBOSE))
 4667			print_lat_help_header(m);
 4668	} else {
 4669		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
 4670			if (trace_flags & TRACE_ITER_IRQ_INFO)
 4671				print_func_help_header_irq(iter->array_buffer,
 4672							   m, trace_flags);
 4673			else
 4674				print_func_help_header(iter->array_buffer, m,
 4675						       trace_flags);
 4676		}
 4677	}
 4678}
 4679
 4680static void test_ftrace_alive(struct seq_file *m)
 4681{
 4682	if (!ftrace_is_dead())
 4683		return;
 4684	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
 4685		    "#          MAY BE MISSING FUNCTION EVENTS\n");
 4686}
 4687
 4688#ifdef CONFIG_TRACER_MAX_TRACE
 4689static void show_snapshot_main_help(struct seq_file *m)
 4690{
 4691	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
 4692		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
 4693		    "#                      Takes a snapshot of the main buffer.\n"
 4694		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
 4695		    "#                      (Doesn't have to be '2' works with any number that\n"
 4696		    "#                       is not a '0' or '1')\n");
 4697}
 4698
 4699static void show_snapshot_percpu_help(struct seq_file *m)
 4700{
 4701	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
 4702#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
 4703	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
 4704		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
 4705#else
 4706	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
 4707		    "#                     Must use main snapshot file to allocate.\n");
 4708#endif
 4709	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
 4710		    "#                      (Doesn't have to be '2' works with any number that\n"
 4711		    "#                       is not a '0' or '1')\n");
 4712}
 4713
 4714static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
 4715{
 4716	if (iter->tr->allocated_snapshot)
 4717		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
 4718	else
 4719		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
 4720
 4721	seq_puts(m, "# Snapshot commands:\n");
 4722	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 4723		show_snapshot_main_help(m);
 4724	else
 4725		show_snapshot_percpu_help(m);
 4726}
 4727#else
 4728/* Should never be called */
 4729static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
 4730#endif
 4731
 4732static int s_show(struct seq_file *m, void *v)
 4733{
 4734	struct trace_iterator *iter = v;
 4735	int ret;
 4736
 4737	if (iter->ent == NULL) {
 4738		if (iter->tr) {
 4739			seq_printf(m, "# tracer: %s\n", iter->trace->name);
 4740			seq_puts(m, "#\n");
 4741			test_ftrace_alive(m);
 4742		}
 4743		if (iter->snapshot && trace_empty(iter))
 4744			print_snapshot_help(m, iter);
 4745		else if (iter->trace && iter->trace->print_header)
 4746			iter->trace->print_header(m);
 4747		else
 4748			trace_default_header(m);
 4749
 4750	} else if (iter->leftover) {
 4751		/*
 4752		 * If we filled the seq_file buffer earlier, we
 4753		 * want to just show it now.
 4754		 */
 4755		ret = trace_print_seq(m, &iter->seq);
 4756
 4757		/* ret should this time be zero, but you never know */
 4758		iter->leftover = ret;
 4759
 4760	} else {
 4761		ret = print_trace_line(iter);
 4762		if (ret == TRACE_TYPE_PARTIAL_LINE) {
 4763			iter->seq.full = 0;
 4764			trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
 4765		}
 4766		ret = trace_print_seq(m, &iter->seq);
 4767		/*
 4768		 * If we overflow the seq_file buffer, then it will
 4769		 * ask us for this data again at start up.
 4770		 * Use that instead.
 4771		 *  ret is 0 if seq_file write succeeded.
 4772		 *        -1 otherwise.
 4773		 */
 4774		iter->leftover = ret;
 4775	}
 4776
 4777	return 0;
 4778}
 4779
 4780/*
 4781 * Should be used after trace_array_get(), trace_types_lock
 4782 * ensures that i_cdev was already initialized.
 4783 */
 4784static inline int tracing_get_cpu(struct inode *inode)
 4785{
 4786	if (inode->i_cdev) /* See trace_create_cpu_file() */
 4787		return (long)inode->i_cdev - 1;
 4788	return RING_BUFFER_ALL_CPUS;
 4789}
 4790
 4791static const struct seq_operations tracer_seq_ops = {
 4792	.start		= s_start,
 4793	.next		= s_next,
 4794	.stop		= s_stop,
 4795	.show		= s_show,
 4796};
 4797
 4798/*
 4799 * Note, as iter itself can be allocated and freed in different
 4800 * ways, this function is only used to free its content, and not
 4801 * the iterator itself. The only requirement to all the allocations
 4802 * is that it must zero all fields (kzalloc), as freeing works with
 4803 * ethier allocated content or NULL.
 4804 */
 4805static void free_trace_iter_content(struct trace_iterator *iter)
 4806{
 4807	/* The fmt is either NULL, allocated or points to static_fmt_buf */
 4808	if (iter->fmt != static_fmt_buf)
 4809		kfree(iter->fmt);
 4810
 4811	kfree(iter->temp);
 4812	kfree(iter->buffer_iter);
 4813	mutex_destroy(&iter->mutex);
 4814	free_cpumask_var(iter->started);
 4815}
 4816
 4817static struct trace_iterator *
 4818__tracing_open(struct inode *inode, struct file *file, bool snapshot)
 4819{
 4820	struct trace_array *tr = inode->i_private;
 4821	struct trace_iterator *iter;
 4822	int cpu;
 4823
 4824	if (tracing_disabled)
 4825		return ERR_PTR(-ENODEV);
 4826
 4827	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
 4828	if (!iter)
 4829		return ERR_PTR(-ENOMEM);
 4830
 4831	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
 4832				    GFP_KERNEL);
 4833	if (!iter->buffer_iter)
 4834		goto release;
 4835
 4836	/*
 4837	 * trace_find_next_entry() may need to save off iter->ent.
 4838	 * It will place it into the iter->temp buffer. As most
 4839	 * events are less than 128, allocate a buffer of that size.
 4840	 * If one is greater, then trace_find_next_entry() will
 4841	 * allocate a new buffer to adjust for the bigger iter->ent.
 4842	 * It's not critical if it fails to get allocated here.
 4843	 */
 4844	iter->temp = kmalloc(128, GFP_KERNEL);
 4845	if (iter->temp)
 4846		iter->temp_size = 128;
 4847
 4848	/*
 4849	 * trace_event_printf() may need to modify given format
 4850	 * string to replace %p with %px so that it shows real address
 4851	 * instead of hash value. However, that is only for the event
 4852	 * tracing, other tracer may not need. Defer the allocation
 4853	 * until it is needed.
 4854	 */
 4855	iter->fmt = NULL;
 4856	iter->fmt_size = 0;
 4857
 4858	mutex_lock(&trace_types_lock);
 4859	iter->trace = tr->current_trace;
 
 
 
 
 4860
 4861	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
 4862		goto fail;
 4863
 4864	iter->tr = tr;
 4865
 4866#ifdef CONFIG_TRACER_MAX_TRACE
 4867	/* Currently only the top directory has a snapshot */
 4868	if (tr->current_trace->print_max || snapshot)
 4869		iter->array_buffer = &tr->max_buffer;
 4870	else
 4871#endif
 4872		iter->array_buffer = &tr->array_buffer;
 4873	iter->snapshot = snapshot;
 4874	iter->pos = -1;
 4875	iter->cpu_file = tracing_get_cpu(inode);
 4876	mutex_init(&iter->mutex);
 4877
 4878	/* Notify the tracer early; before we stop tracing. */
 4879	if (iter->trace->open)
 4880		iter->trace->open(iter);
 4881
 4882	/* Annotate start of buffers if we had overruns */
 4883	if (ring_buffer_overruns(iter->array_buffer->buffer))
 4884		iter->iter_flags |= TRACE_FILE_ANNOTATE;
 4885
 4886	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
 4887	if (trace_clocks[tr->clock_id].in_ns)
 4888		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 4889
 4890	/*
 4891	 * If pause-on-trace is enabled, then stop the trace while
 4892	 * dumping, unless this is the "snapshot" file
 4893	 */
 4894	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
 4895		tracing_stop_tr(tr);
 4896
 4897	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
 4898		for_each_tracing_cpu(cpu) {
 4899			iter->buffer_iter[cpu] =
 4900				ring_buffer_read_prepare(iter->array_buffer->buffer,
 4901							 cpu, GFP_KERNEL);
 4902		}
 4903		ring_buffer_read_prepare_sync();
 4904		for_each_tracing_cpu(cpu) {
 4905			ring_buffer_read_start(iter->buffer_iter[cpu]);
 4906			tracing_iter_reset(iter, cpu);
 4907		}
 4908	} else {
 4909		cpu = iter->cpu_file;
 4910		iter->buffer_iter[cpu] =
 4911			ring_buffer_read_prepare(iter->array_buffer->buffer,
 4912						 cpu, GFP_KERNEL);
 4913		ring_buffer_read_prepare_sync();
 4914		ring_buffer_read_start(iter->buffer_iter[cpu]);
 4915		tracing_iter_reset(iter, cpu);
 4916	}
 4917
 4918	mutex_unlock(&trace_types_lock);
 4919
 4920	return iter;
 4921
 4922 fail:
 4923	mutex_unlock(&trace_types_lock);
 4924	free_trace_iter_content(iter);
 
 4925release:
 4926	seq_release_private(inode, file);
 4927	return ERR_PTR(-ENOMEM);
 4928}
 4929
 4930int tracing_open_generic(struct inode *inode, struct file *filp)
 4931{
 4932	int ret;
 4933
 4934	ret = tracing_check_open_get_tr(NULL);
 4935	if (ret)
 4936		return ret;
 4937
 4938	filp->private_data = inode->i_private;
 4939	return 0;
 4940}
 4941
 4942bool tracing_is_disabled(void)
 4943{
 4944	return (tracing_disabled) ? true: false;
 4945}
 4946
 4947/*
 4948 * Open and update trace_array ref count.
 4949 * Must have the current trace_array passed to it.
 4950 */
 4951int tracing_open_generic_tr(struct inode *inode, struct file *filp)
 4952{
 4953	struct trace_array *tr = inode->i_private;
 4954	int ret;
 4955
 4956	ret = tracing_check_open_get_tr(tr);
 4957	if (ret)
 4958		return ret;
 4959
 4960	filp->private_data = inode->i_private;
 4961
 4962	return 0;
 4963}
 4964
 4965/*
 4966 * The private pointer of the inode is the trace_event_file.
 4967 * Update the tr ref count associated to it.
 4968 */
 4969int tracing_open_file_tr(struct inode *inode, struct file *filp)
 4970{
 4971	struct trace_event_file *file = inode->i_private;
 4972	int ret;
 4973
 4974	ret = tracing_check_open_get_tr(file->tr);
 4975	if (ret)
 4976		return ret;
 4977
 4978	mutex_lock(&event_mutex);
 4979
 4980	/* Fail if the file is marked for removal */
 4981	if (file->flags & EVENT_FILE_FL_FREED) {
 4982		trace_array_put(file->tr);
 4983		ret = -ENODEV;
 4984	} else {
 4985		event_file_get(file);
 4986	}
 4987
 4988	mutex_unlock(&event_mutex);
 4989	if (ret)
 4990		return ret;
 4991
 4992	filp->private_data = inode->i_private;
 4993
 4994	return 0;
 4995}
 4996
 4997int tracing_release_file_tr(struct inode *inode, struct file *filp)
 4998{
 4999	struct trace_event_file *file = inode->i_private;
 5000
 5001	trace_array_put(file->tr);
 5002	event_file_put(file);
 5003
 5004	return 0;
 5005}
 5006
 5007int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
 5008{
 5009	tracing_release_file_tr(inode, filp);
 5010	return single_release(inode, filp);
 5011}
 5012
 5013static int tracing_mark_open(struct inode *inode, struct file *filp)
 5014{
 5015	stream_open(inode, filp);
 5016	return tracing_open_generic_tr(inode, filp);
 5017}
 5018
 5019static int tracing_release(struct inode *inode, struct file *file)
 5020{
 5021	struct trace_array *tr = inode->i_private;
 5022	struct seq_file *m = file->private_data;
 5023	struct trace_iterator *iter;
 5024	int cpu;
 5025
 5026	if (!(file->f_mode & FMODE_READ)) {
 5027		trace_array_put(tr);
 5028		return 0;
 5029	}
 5030
 5031	/* Writes do not use seq_file */
 5032	iter = m->private;
 5033	mutex_lock(&trace_types_lock);
 5034
 5035	for_each_tracing_cpu(cpu) {
 5036		if (iter->buffer_iter[cpu])
 5037			ring_buffer_read_finish(iter->buffer_iter[cpu]);
 5038	}
 5039
 5040	if (iter->trace && iter->trace->close)
 5041		iter->trace->close(iter);
 5042
 5043	if (!iter->snapshot && tr->stop_count)
 5044		/* reenable tracing if it was previously enabled */
 5045		tracing_start_tr(tr);
 5046
 5047	__trace_array_put(tr);
 5048
 5049	mutex_unlock(&trace_types_lock);
 5050
 5051	free_trace_iter_content(iter);
 
 
 
 5052	seq_release_private(inode, file);
 5053
 5054	return 0;
 5055}
 5056
 5057int tracing_release_generic_tr(struct inode *inode, struct file *file)
 5058{
 5059	struct trace_array *tr = inode->i_private;
 5060
 5061	trace_array_put(tr);
 5062	return 0;
 5063}
 5064
 5065static int tracing_single_release_tr(struct inode *inode, struct file *file)
 5066{
 5067	struct trace_array *tr = inode->i_private;
 5068
 5069	trace_array_put(tr);
 5070
 5071	return single_release(inode, file);
 5072}
 5073
 5074static int tracing_open(struct inode *inode, struct file *file)
 5075{
 5076	struct trace_array *tr = inode->i_private;
 5077	struct trace_iterator *iter;
 5078	int ret;
 5079
 5080	ret = tracing_check_open_get_tr(tr);
 5081	if (ret)
 5082		return ret;
 5083
 5084	/* If this file was open for write, then erase contents */
 5085	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 5086		int cpu = tracing_get_cpu(inode);
 5087		struct array_buffer *trace_buf = &tr->array_buffer;
 5088
 5089#ifdef CONFIG_TRACER_MAX_TRACE
 5090		if (tr->current_trace->print_max)
 5091			trace_buf = &tr->max_buffer;
 5092#endif
 5093
 5094		if (cpu == RING_BUFFER_ALL_CPUS)
 5095			tracing_reset_online_cpus(trace_buf);
 5096		else
 5097			tracing_reset_cpu(trace_buf, cpu);
 5098	}
 5099
 5100	if (file->f_mode & FMODE_READ) {
 5101		iter = __tracing_open(inode, file, false);
 5102		if (IS_ERR(iter))
 5103			ret = PTR_ERR(iter);
 5104		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 5105			iter->iter_flags |= TRACE_FILE_LAT_FMT;
 5106	}
 5107
 5108	if (ret < 0)
 5109		trace_array_put(tr);
 5110
 5111	return ret;
 5112}
 5113
 5114/*
 5115 * Some tracers are not suitable for instance buffers.
 5116 * A tracer is always available for the global array (toplevel)
 5117 * or if it explicitly states that it is.
 5118 */
 5119static bool
 5120trace_ok_for_array(struct tracer *t, struct trace_array *tr)
 5121{
 5122	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
 5123}
 5124
 5125/* Find the next tracer that this trace array may use */
 5126static struct tracer *
 5127get_tracer_for_array(struct trace_array *tr, struct tracer *t)
 5128{
 5129	while (t && !trace_ok_for_array(t, tr))
 5130		t = t->next;
 5131
 5132	return t;
 5133}
 5134
 5135static void *
 5136t_next(struct seq_file *m, void *v, loff_t *pos)
 5137{
 5138	struct trace_array *tr = m->private;
 5139	struct tracer *t = v;
 5140
 5141	(*pos)++;
 5142
 5143	if (t)
 5144		t = get_tracer_for_array(tr, t->next);
 5145
 5146	return t;
 5147}
 5148
 5149static void *t_start(struct seq_file *m, loff_t *pos)
 5150{
 5151	struct trace_array *tr = m->private;
 5152	struct tracer *t;
 5153	loff_t l = 0;
 5154
 5155	mutex_lock(&trace_types_lock);
 5156
 5157	t = get_tracer_for_array(tr, trace_types);
 5158	for (; t && l < *pos; t = t_next(m, t, &l))
 5159			;
 5160
 5161	return t;
 5162}
 5163
 5164static void t_stop(struct seq_file *m, void *p)
 5165{
 5166	mutex_unlock(&trace_types_lock);
 5167}
 5168
 5169static int t_show(struct seq_file *m, void *v)
 5170{
 5171	struct tracer *t = v;
 5172
 5173	if (!t)
 5174		return 0;
 5175
 5176	seq_puts(m, t->name);
 5177	if (t->next)
 5178		seq_putc(m, ' ');
 5179	else
 5180		seq_putc(m, '\n');
 5181
 5182	return 0;
 5183}
 5184
 5185static const struct seq_operations show_traces_seq_ops = {
 5186	.start		= t_start,
 5187	.next		= t_next,
 5188	.stop		= t_stop,
 5189	.show		= t_show,
 5190};
 5191
 5192static int show_traces_open(struct inode *inode, struct file *file)
 5193{
 5194	struct trace_array *tr = inode->i_private;
 5195	struct seq_file *m;
 5196	int ret;
 5197
 5198	ret = tracing_check_open_get_tr(tr);
 5199	if (ret)
 5200		return ret;
 5201
 5202	ret = seq_open(file, &show_traces_seq_ops);
 5203	if (ret) {
 5204		trace_array_put(tr);
 5205		return ret;
 5206	}
 5207
 5208	m = file->private_data;
 5209	m->private = tr;
 5210
 5211	return 0;
 5212}
 5213
 5214static int show_traces_release(struct inode *inode, struct file *file)
 5215{
 5216	struct trace_array *tr = inode->i_private;
 5217
 5218	trace_array_put(tr);
 5219	return seq_release(inode, file);
 5220}
 5221
 5222static ssize_t
 5223tracing_write_stub(struct file *filp, const char __user *ubuf,
 5224		   size_t count, loff_t *ppos)
 5225{
 5226	return count;
 5227}
 5228
 5229loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
 5230{
 5231	int ret;
 5232
 5233	if (file->f_mode & FMODE_READ)
 5234		ret = seq_lseek(file, offset, whence);
 5235	else
 5236		file->f_pos = ret = 0;
 5237
 5238	return ret;
 5239}
 5240
 5241static const struct file_operations tracing_fops = {
 5242	.open		= tracing_open,
 5243	.read		= seq_read,
 5244	.read_iter	= seq_read_iter,
 5245	.splice_read	= copy_splice_read,
 5246	.write		= tracing_write_stub,
 5247	.llseek		= tracing_lseek,
 5248	.release	= tracing_release,
 5249};
 5250
 5251static const struct file_operations show_traces_fops = {
 5252	.open		= show_traces_open,
 5253	.read		= seq_read,
 5254	.llseek		= seq_lseek,
 5255	.release	= show_traces_release,
 5256};
 5257
 5258static ssize_t
 5259tracing_cpumask_read(struct file *filp, char __user *ubuf,
 5260		     size_t count, loff_t *ppos)
 5261{
 5262	struct trace_array *tr = file_inode(filp)->i_private;
 5263	char *mask_str;
 5264	int len;
 5265
 5266	len = snprintf(NULL, 0, "%*pb\n",
 5267		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
 5268	mask_str = kmalloc(len, GFP_KERNEL);
 5269	if (!mask_str)
 5270		return -ENOMEM;
 5271
 5272	len = snprintf(mask_str, len, "%*pb\n",
 5273		       cpumask_pr_args(tr->tracing_cpumask));
 5274	if (len >= count) {
 5275		count = -EINVAL;
 5276		goto out_err;
 5277	}
 5278	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
 5279
 5280out_err:
 5281	kfree(mask_str);
 5282
 5283	return count;
 5284}
 5285
 5286int tracing_set_cpumask(struct trace_array *tr,
 5287			cpumask_var_t tracing_cpumask_new)
 
 5288{
 5289	int cpu;
 
 
 5290
 5291	if (!tr)
 5292		return -EINVAL;
 
 
 
 
 5293
 5294	local_irq_disable();
 5295	arch_spin_lock(&tr->max_lock);
 5296	for_each_tracing_cpu(cpu) {
 5297		/*
 5298		 * Increase/decrease the disabled counter if we are
 5299		 * about to flip a bit in the cpumask:
 5300		 */
 5301		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 5302				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
 5303			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
 5304			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
 5305#ifdef CONFIG_TRACER_MAX_TRACE
 5306			ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
 5307#endif
 5308		}
 5309		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 5310				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
 5311			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
 5312			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
 5313#ifdef CONFIG_TRACER_MAX_TRACE
 5314			ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
 5315#endif
 5316		}
 5317	}
 5318	arch_spin_unlock(&tr->max_lock);
 5319	local_irq_enable();
 5320
 5321	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
 5322
 5323	return 0;
 5324}
 5325
 5326static ssize_t
 5327tracing_cpumask_write(struct file *filp, const char __user *ubuf,
 5328		      size_t count, loff_t *ppos)
 5329{
 5330	struct trace_array *tr = file_inode(filp)->i_private;
 5331	cpumask_var_t tracing_cpumask_new;
 5332	int err;
 5333
 5334	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
 5335		return -ENOMEM;
 5336
 5337	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
 5338	if (err)
 5339		goto err_free;
 5340
 5341	err = tracing_set_cpumask(tr, tracing_cpumask_new);
 5342	if (err)
 5343		goto err_free;
 5344
 5345	free_cpumask_var(tracing_cpumask_new);
 5346
 5347	return count;
 5348
 5349err_free:
 5350	free_cpumask_var(tracing_cpumask_new);
 5351
 5352	return err;
 5353}
 5354
 5355static const struct file_operations tracing_cpumask_fops = {
 5356	.open		= tracing_open_generic_tr,
 5357	.read		= tracing_cpumask_read,
 5358	.write		= tracing_cpumask_write,
 5359	.release	= tracing_release_generic_tr,
 5360	.llseek		= generic_file_llseek,
 5361};
 5362
 5363static int tracing_trace_options_show(struct seq_file *m, void *v)
 5364{
 5365	struct tracer_opt *trace_opts;
 5366	struct trace_array *tr = m->private;
 5367	u32 tracer_flags;
 5368	int i;
 5369
 5370	mutex_lock(&trace_types_lock);
 5371	tracer_flags = tr->current_trace->flags->val;
 5372	trace_opts = tr->current_trace->flags->opts;
 5373
 5374	for (i = 0; trace_options[i]; i++) {
 5375		if (tr->trace_flags & (1 << i))
 5376			seq_printf(m, "%s\n", trace_options[i]);
 5377		else
 5378			seq_printf(m, "no%s\n", trace_options[i]);
 5379	}
 5380
 5381	for (i = 0; trace_opts[i].name; i++) {
 5382		if (tracer_flags & trace_opts[i].bit)
 5383			seq_printf(m, "%s\n", trace_opts[i].name);
 5384		else
 5385			seq_printf(m, "no%s\n", trace_opts[i].name);
 5386	}
 5387	mutex_unlock(&trace_types_lock);
 5388
 5389	return 0;
 5390}
 5391
 5392static int __set_tracer_option(struct trace_array *tr,
 5393			       struct tracer_flags *tracer_flags,
 5394			       struct tracer_opt *opts, int neg)
 5395{
 5396	struct tracer *trace = tracer_flags->trace;
 5397	int ret;
 5398
 5399	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
 5400	if (ret)
 5401		return ret;
 5402
 5403	if (neg)
 5404		tracer_flags->val &= ~opts->bit;
 5405	else
 5406		tracer_flags->val |= opts->bit;
 5407	return 0;
 5408}
 5409
 5410/* Try to assign a tracer specific option */
 5411static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
 5412{
 5413	struct tracer *trace = tr->current_trace;
 5414	struct tracer_flags *tracer_flags = trace->flags;
 5415	struct tracer_opt *opts = NULL;
 5416	int i;
 5417
 5418	for (i = 0; tracer_flags->opts[i].name; i++) {
 5419		opts = &tracer_flags->opts[i];
 5420
 5421		if (strcmp(cmp, opts->name) == 0)
 5422			return __set_tracer_option(tr, trace->flags, opts, neg);
 5423	}
 5424
 5425	return -EINVAL;
 5426}
 5427
 5428/* Some tracers require overwrite to stay enabled */
 5429int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
 5430{
 5431	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
 5432		return -1;
 5433
 5434	return 0;
 5435}
 5436
 5437int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
 5438{
 5439	int *map;
 5440
 5441	if ((mask == TRACE_ITER_RECORD_TGID) ||
 5442	    (mask == TRACE_ITER_RECORD_CMD))
 5443		lockdep_assert_held(&event_mutex);
 5444
 5445	/* do nothing if flag is already set */
 5446	if (!!(tr->trace_flags & mask) == !!enabled)
 5447		return 0;
 5448
 5449	/* Give the tracer a chance to approve the change */
 5450	if (tr->current_trace->flag_changed)
 5451		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
 5452			return -EINVAL;
 5453
 5454	if (enabled)
 5455		tr->trace_flags |= mask;
 5456	else
 5457		tr->trace_flags &= ~mask;
 5458
 5459	if (mask == TRACE_ITER_RECORD_CMD)
 5460		trace_event_enable_cmd_record(enabled);
 5461
 5462	if (mask == TRACE_ITER_RECORD_TGID) {
 5463		if (!tgid_map) {
 5464			tgid_map_max = pid_max;
 5465			map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
 5466				       GFP_KERNEL);
 5467
 5468			/*
 5469			 * Pairs with smp_load_acquire() in
 5470			 * trace_find_tgid_ptr() to ensure that if it observes
 5471			 * the tgid_map we just allocated then it also observes
 5472			 * the corresponding tgid_map_max value.
 5473			 */
 5474			smp_store_release(&tgid_map, map);
 5475		}
 5476		if (!tgid_map) {
 5477			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
 5478			return -ENOMEM;
 5479		}
 5480
 5481		trace_event_enable_tgid_record(enabled);
 5482	}
 5483
 5484	if (mask == TRACE_ITER_EVENT_FORK)
 5485		trace_event_follow_fork(tr, enabled);
 5486
 5487	if (mask == TRACE_ITER_FUNC_FORK)
 5488		ftrace_pid_follow_fork(tr, enabled);
 5489
 5490	if (mask == TRACE_ITER_OVERWRITE) {
 5491		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
 5492#ifdef CONFIG_TRACER_MAX_TRACE
 5493		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
 5494#endif
 5495	}
 5496
 5497	if (mask == TRACE_ITER_PRINTK) {
 5498		trace_printk_start_stop_comm(enabled);
 5499		trace_printk_control(enabled);
 5500	}
 5501
 5502	return 0;
 5503}
 5504
 5505int trace_set_options(struct trace_array *tr, char *option)
 5506{
 5507	char *cmp;
 5508	int neg = 0;
 5509	int ret;
 5510	size_t orig_len = strlen(option);
 5511	int len;
 5512
 5513	cmp = strstrip(option);
 5514
 5515	len = str_has_prefix(cmp, "no");
 5516	if (len)
 5517		neg = 1;
 5518
 5519	cmp += len;
 5520
 5521	mutex_lock(&event_mutex);
 5522	mutex_lock(&trace_types_lock);
 5523
 5524	ret = match_string(trace_options, -1, cmp);
 5525	/* If no option could be set, test the specific tracer options */
 5526	if (ret < 0)
 5527		ret = set_tracer_option(tr, cmp, neg);
 5528	else
 5529		ret = set_tracer_flag(tr, 1 << ret, !neg);
 5530
 5531	mutex_unlock(&trace_types_lock);
 5532	mutex_unlock(&event_mutex);
 5533
 5534	/*
 5535	 * If the first trailing whitespace is replaced with '\0' by strstrip,
 5536	 * turn it back into a space.
 5537	 */
 5538	if (orig_len > strlen(option))
 5539		option[strlen(option)] = ' ';
 5540
 5541	return ret;
 5542}
 5543
 5544static void __init apply_trace_boot_options(void)
 5545{
 5546	char *buf = trace_boot_options_buf;
 5547	char *option;
 5548
 5549	while (true) {
 5550		option = strsep(&buf, ",");
 5551
 5552		if (!option)
 5553			break;
 5554
 5555		if (*option)
 5556			trace_set_options(&global_trace, option);
 5557
 5558		/* Put back the comma to allow this to be called again */
 5559		if (buf)
 5560			*(buf - 1) = ',';
 5561	}
 5562}
 5563
 5564static ssize_t
 5565tracing_trace_options_write(struct file *filp, const char __user *ubuf,
 5566			size_t cnt, loff_t *ppos)
 5567{
 5568	struct seq_file *m = filp->private_data;
 5569	struct trace_array *tr = m->private;
 5570	char buf[64];
 5571	int ret;
 5572
 5573	if (cnt >= sizeof(buf))
 5574		return -EINVAL;
 5575
 5576	if (copy_from_user(buf, ubuf, cnt))
 5577		return -EFAULT;
 5578
 5579	buf[cnt] = 0;
 5580
 5581	ret = trace_set_options(tr, buf);
 5582	if (ret < 0)
 5583		return ret;
 5584
 5585	*ppos += cnt;
 5586
 5587	return cnt;
 5588}
 5589
 5590static int tracing_trace_options_open(struct inode *inode, struct file *file)
 5591{
 5592	struct trace_array *tr = inode->i_private;
 5593	int ret;
 5594
 5595	ret = tracing_check_open_get_tr(tr);
 5596	if (ret)
 5597		return ret;
 5598
 5599	ret = single_open(file, tracing_trace_options_show, inode->i_private);
 5600	if (ret < 0)
 5601		trace_array_put(tr);
 5602
 5603	return ret;
 5604}
 5605
 5606static const struct file_operations tracing_iter_fops = {
 5607	.open		= tracing_trace_options_open,
 5608	.read		= seq_read,
 5609	.llseek		= seq_lseek,
 5610	.release	= tracing_single_release_tr,
 5611	.write		= tracing_trace_options_write,
 5612};
 5613
 5614static const char readme_msg[] =
 5615	"tracing mini-HOWTO:\n\n"
 5616	"# echo 0 > tracing_on : quick way to disable tracing\n"
 5617	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
 5618	" Important files:\n"
 5619	"  trace\t\t\t- The static contents of the buffer\n"
 5620	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
 5621	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
 5622	"  current_tracer\t- function and latency tracers\n"
 5623	"  available_tracers\t- list of configured tracers for current_tracer\n"
 5624	"  error_log\t- error log for failed commands (that support it)\n"
 5625	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
 5626	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
 5627	"  trace_clock\t\t- change the clock used to order events\n"
 5628	"       local:   Per cpu clock but may not be synced across CPUs\n"
 5629	"      global:   Synced across CPUs but slows tracing down.\n"
 5630	"     counter:   Not a clock, but just an increment\n"
 5631	"      uptime:   Jiffy counter from time of boot\n"
 5632	"        perf:   Same clock that perf events use\n"
 5633#ifdef CONFIG_X86_64
 5634	"     x86-tsc:   TSC cycle counter\n"
 5635#endif
 5636	"\n  timestamp_mode\t- view the mode used to timestamp events\n"
 5637	"       delta:   Delta difference against a buffer-wide timestamp\n"
 5638	"    absolute:   Absolute (standalone) timestamp\n"
 5639	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
 5640	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
 5641	"  tracing_cpumask\t- Limit which CPUs to trace\n"
 5642	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
 5643	"\t\t\t  Remove sub-buffer with rmdir\n"
 5644	"  trace_options\t\t- Set format or modify how tracing happens\n"
 5645	"\t\t\t  Disable an option by prefixing 'no' to the\n"
 5646	"\t\t\t  option name\n"
 5647	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
 5648#ifdef CONFIG_DYNAMIC_FTRACE
 5649	"\n  available_filter_functions - list of functions that can be filtered on\n"
 5650	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
 5651	"\t\t\t  functions\n"
 5652	"\t     accepts: func_full_name or glob-matching-pattern\n"
 5653	"\t     modules: Can select a group via module\n"
 5654	"\t      Format: :mod:<module-name>\n"
 5655	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
 5656	"\t    triggers: a command to perform when function is hit\n"
 5657	"\t      Format: <function>:<trigger>[:count]\n"
 5658	"\t     trigger: traceon, traceoff\n"
 5659	"\t\t      enable_event:<system>:<event>\n"
 5660	"\t\t      disable_event:<system>:<event>\n"
 5661#ifdef CONFIG_STACKTRACE
 5662	"\t\t      stacktrace\n"
 5663#endif
 5664#ifdef CONFIG_TRACER_SNAPSHOT
 5665	"\t\t      snapshot\n"
 5666#endif
 5667	"\t\t      dump\n"
 5668	"\t\t      cpudump\n"
 5669	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
 5670	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
 5671	"\t     The first one will disable tracing every time do_fault is hit\n"
 5672	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
 5673	"\t       The first time do trap is hit and it disables tracing, the\n"
 5674	"\t       counter will decrement to 2. If tracing is already disabled,\n"
 5675	"\t       the counter will not decrement. It only decrements when the\n"
 5676	"\t       trigger did work\n"
 5677	"\t     To remove trigger without count:\n"
 5678	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
 5679	"\t     To remove trigger with a count:\n"
 5680	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
 5681	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
 5682	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
 5683	"\t    modules: Can select a group via module command :mod:\n"
 5684	"\t    Does not accept triggers\n"
 5685#endif /* CONFIG_DYNAMIC_FTRACE */
 5686#ifdef CONFIG_FUNCTION_TRACER
 5687	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
 5688	"\t\t    (function)\n"
 5689	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
 5690	"\t\t    (function)\n"
 5691#endif
 5692#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 5693	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
 5694	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
 5695	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
 5696#endif
 5697#ifdef CONFIG_TRACER_SNAPSHOT
 5698	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
 5699	"\t\t\t  snapshot buffer. Read the contents for more\n"
 5700	"\t\t\t  information\n"
 5701#endif
 5702#ifdef CONFIG_STACK_TRACER
 5703	"  stack_trace\t\t- Shows the max stack trace when active\n"
 5704	"  stack_max_size\t- Shows current max stack size that was traced\n"
 5705	"\t\t\t  Write into this file to reset the max size (trigger a\n"
 5706	"\t\t\t  new trace)\n"
 5707#ifdef CONFIG_DYNAMIC_FTRACE
 5708	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
 5709	"\t\t\t  traces\n"
 5710#endif
 5711#endif /* CONFIG_STACK_TRACER */
 5712#ifdef CONFIG_DYNAMIC_EVENTS
 5713	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
 5714	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 5715#endif
 5716#ifdef CONFIG_KPROBE_EVENTS
 5717	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
 5718	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 5719#endif
 5720#ifdef CONFIG_UPROBE_EVENTS
 5721	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
 5722	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 5723#endif
 5724#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
 5725    defined(CONFIG_FPROBE_EVENTS)
 5726	"\t  accepts: event-definitions (one definition per line)\n"
 5727#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
 5728	"\t   Format: p[:[<group>/][<event>]] <place> [<args>]\n"
 5729	"\t           r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
 5730#endif
 5731#ifdef CONFIG_FPROBE_EVENTS
 5732	"\t           f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
 5733	"\t           t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
 5734#endif
 5735#ifdef CONFIG_HIST_TRIGGERS
 5736	"\t           s:[synthetic/]<event> <field> [<field>]\n"
 5737#endif
 5738	"\t           e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
 5739	"\t           -:[<group>/][<event>]\n"
 5740#ifdef CONFIG_KPROBE_EVENTS
 5741	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
 5742  "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
 5743#endif
 5744#ifdef CONFIG_UPROBE_EVENTS
 5745  "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
 5746#endif
 5747	"\t     args: <name>=fetcharg[:type]\n"
 5748	"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
 5749#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
 5750#ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
 5751	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
 5752	"\t           <argname>[->field[->field|.field...]],\n"
 5753#else
 5754	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
 5755#endif
 5756#else
 5757	"\t           $stack<index>, $stack, $retval, $comm,\n"
 5758#endif
 5759	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
 5760	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
 5761	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
 5762	"\t           symstr, <type>\\[<array-size>\\]\n"
 5763#ifdef CONFIG_HIST_TRIGGERS
 5764	"\t    field: <stype> <name>;\n"
 5765	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
 5766	"\t           [unsigned] char/int/long\n"
 5767#endif
 5768	"\t    efield: For event probes ('e' types), the field is on of the fields\n"
 5769	"\t            of the <attached-group>/<attached-event>.\n"
 5770#endif
 5771	"  events/\t\t- Directory containing all trace event subsystems:\n"
 5772	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
 5773	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
 5774	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
 5775	"\t\t\t  events\n"
 5776	"      filter\t\t- If set, only events passing filter are traced\n"
 5777	"  events/<system>/<event>/\t- Directory containing control files for\n"
 5778	"\t\t\t  <event>:\n"
 5779	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
 5780	"      filter\t\t- If set, only events passing filter are traced\n"
 5781	"      trigger\t\t- If set, a command to perform when event is hit\n"
 5782	"\t    Format: <trigger>[:count][if <filter>]\n"
 5783	"\t   trigger: traceon, traceoff\n"
 5784	"\t            enable_event:<system>:<event>\n"
 5785	"\t            disable_event:<system>:<event>\n"
 5786#ifdef CONFIG_HIST_TRIGGERS
 5787	"\t            enable_hist:<system>:<event>\n"
 5788	"\t            disable_hist:<system>:<event>\n"
 5789#endif
 5790#ifdef CONFIG_STACKTRACE
 5791	"\t\t    stacktrace\n"
 5792#endif
 5793#ifdef CONFIG_TRACER_SNAPSHOT
 5794	"\t\t    snapshot\n"
 5795#endif
 5796#ifdef CONFIG_HIST_TRIGGERS
 5797	"\t\t    hist (see below)\n"
 5798#endif
 5799	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
 5800	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
 5801	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
 5802	"\t                  events/block/block_unplug/trigger\n"
 5803	"\t   The first disables tracing every time block_unplug is hit.\n"
 5804	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
 5805	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
 5806	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
 5807	"\t   Like function triggers, the counter is only decremented if it\n"
 5808	"\t    enabled or disabled tracing.\n"
 5809	"\t   To remove a trigger without a count:\n"
 5810	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
 5811	"\t   To remove a trigger with a count:\n"
 5812	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
 5813	"\t   Filters can be ignored when removing a trigger.\n"
 5814#ifdef CONFIG_HIST_TRIGGERS
 5815	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
 5816	"\t    Format: hist:keys=<field1[,field2,...]>\n"
 5817	"\t            [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
 5818	"\t            [:values=<field1[,field2,...]>]\n"
 5819	"\t            [:sort=<field1[,field2,...]>]\n"
 5820	"\t            [:size=#entries]\n"
 5821	"\t            [:pause][:continue][:clear]\n"
 5822	"\t            [:name=histname1]\n"
 5823	"\t            [:nohitcount]\n"
 5824	"\t            [:<handler>.<action>]\n"
 5825	"\t            [if <filter>]\n\n"
 5826	"\t    Note, special fields can be used as well:\n"
 5827	"\t            common_timestamp - to record current timestamp\n"
 5828	"\t            common_cpu - to record the CPU the event happened on\n"
 5829	"\n"
 5830	"\t    A hist trigger variable can be:\n"
 5831	"\t        - a reference to a field e.g. x=current_timestamp,\n"
 5832	"\t        - a reference to another variable e.g. y=$x,\n"
 5833	"\t        - a numeric literal: e.g. ms_per_sec=1000,\n"
 5834	"\t        - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
 5835	"\n"
 5836	"\t    hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
 5837	"\t    multiplication(*) and division(/) operators. An operand can be either a\n"
 5838	"\t    variable reference, field or numeric literal.\n"
 5839	"\n"
 5840	"\t    When a matching event is hit, an entry is added to a hash\n"
 5841	"\t    table using the key(s) and value(s) named, and the value of a\n"
 5842	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
 5843	"\t    correspond to fields in the event's format description.  Keys\n"
 5844	"\t    can be any field, or the special string 'common_stacktrace'.\n"
 5845	"\t    Compound keys consisting of up to two fields can be specified\n"
 5846	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
 5847	"\t    fields.  Sort keys consisting of up to two fields can be\n"
 5848	"\t    specified using the 'sort' keyword.  The sort direction can\n"
 5849	"\t    be modified by appending '.descending' or '.ascending' to a\n"
 5850	"\t    sort field.  The 'size' parameter can be used to specify more\n"
 5851	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
 5852	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
 5853	"\t    its histogram data will be shared with other triggers of the\n"
 5854	"\t    same name, and trigger hits will update this common data.\n\n"
 5855	"\t    Reading the 'hist' file for the event will dump the hash\n"
 5856	"\t    table in its entirety to stdout.  If there are multiple hist\n"
 5857	"\t    triggers attached to an event, there will be a table for each\n"
 5858	"\t    trigger in the output.  The table displayed for a named\n"
 5859	"\t    trigger will be the same as any other instance having the\n"
 5860	"\t    same name.  The default format used to display a given field\n"
 5861	"\t    can be modified by appending any of the following modifiers\n"
 5862	"\t    to the field name, as applicable:\n\n"
 5863	"\t            .hex        display a number as a hex value\n"
 5864	"\t            .sym        display an address as a symbol\n"
 5865	"\t            .sym-offset display an address as a symbol and offset\n"
 5866	"\t            .execname   display a common_pid as a program name\n"
 5867	"\t            .syscall    display a syscall id as a syscall name\n"
 5868	"\t            .log2       display log2 value rather than raw number\n"
 5869	"\t            .buckets=size  display values in groups of size rather than raw number\n"
 5870	"\t            .usecs      display a common_timestamp in microseconds\n"
 5871	"\t            .percent    display a number of percentage value\n"
 5872	"\t            .graph      display a bar-graph of a value\n\n"
 5873	"\t    The 'pause' parameter can be used to pause an existing hist\n"
 5874	"\t    trigger or to start a hist trigger but not log any events\n"
 5875	"\t    until told to do so.  'continue' can be used to start or\n"
 5876	"\t    restart a paused hist trigger.\n\n"
 5877	"\t    The 'clear' parameter will clear the contents of a running\n"
 5878	"\t    hist trigger and leave its current paused/active state\n"
 5879	"\t    unchanged.\n\n"
 5880	"\t    The 'nohitcount' (or NOHC) parameter will suppress display of\n"
 5881	"\t    raw hitcount in the histogram.\n\n"
 5882	"\t    The enable_hist and disable_hist triggers can be used to\n"
 5883	"\t    have one event conditionally start and stop another event's\n"
 5884	"\t    already-attached hist trigger.  The syntax is analogous to\n"
 5885	"\t    the enable_event and disable_event triggers.\n\n"
 5886	"\t    Hist trigger handlers and actions are executed whenever a\n"
 5887	"\t    a histogram entry is added or updated.  They take the form:\n\n"
 5888	"\t        <handler>.<action>\n\n"
 5889	"\t    The available handlers are:\n\n"
 5890	"\t        onmatch(matching.event)  - invoke on addition or update\n"
 5891	"\t        onmax(var)               - invoke if var exceeds current max\n"
 5892	"\t        onchange(var)            - invoke action if var changes\n\n"
 5893	"\t    The available actions are:\n\n"
 5894	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
 5895	"\t        save(field,...)                      - save current event fields\n"
 5896#ifdef CONFIG_TRACER_SNAPSHOT
 5897	"\t        snapshot()                           - snapshot the trace buffer\n\n"
 5898#endif
 5899#ifdef CONFIG_SYNTH_EVENTS
 5900	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
 5901	"\t  Write into this file to define/undefine new synthetic events.\n"
 5902	"\t     example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
 5903#endif
 5904#endif
 5905;
 5906
 5907static ssize_t
 5908tracing_readme_read(struct file *filp, char __user *ubuf,
 5909		       size_t cnt, loff_t *ppos)
 5910{
 5911	return simple_read_from_buffer(ubuf, cnt, ppos,
 5912					readme_msg, strlen(readme_msg));
 5913}
 5914
 5915static const struct file_operations tracing_readme_fops = {
 5916	.open		= tracing_open_generic,
 5917	.read		= tracing_readme_read,
 5918	.llseek		= generic_file_llseek,
 5919};
 5920
 5921static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
 5922{
 5923	int pid = ++(*pos);
 
 
 
 
 
 
 
 
 
 
 5924
 5925	return trace_find_tgid_ptr(pid);
 5926}
 5927
 5928static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
 5929{
 5930	int pid = *pos;
 
 5931
 5932	return trace_find_tgid_ptr(pid);
 
 
 
 
 
 
 
 
 
 
 5933}
 5934
 5935static void saved_tgids_stop(struct seq_file *m, void *v)
 5936{
 5937}
 5938
 5939static int saved_tgids_show(struct seq_file *m, void *v)
 5940{
 5941	int *entry = (int *)v;
 5942	int pid = entry - tgid_map;
 5943	int tgid = *entry;
 5944
 5945	if (tgid == 0)
 5946		return SEQ_SKIP;
 5947
 5948	seq_printf(m, "%d %d\n", pid, tgid);
 5949	return 0;
 5950}
 5951
 5952static const struct seq_operations tracing_saved_tgids_seq_ops = {
 5953	.start		= saved_tgids_start,
 5954	.stop		= saved_tgids_stop,
 5955	.next		= saved_tgids_next,
 5956	.show		= saved_tgids_show,
 5957};
 5958
 5959static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
 5960{
 5961	int ret;
 5962
 5963	ret = tracing_check_open_get_tr(NULL);
 5964	if (ret)
 5965		return ret;
 5966
 5967	return seq_open(filp, &tracing_saved_tgids_seq_ops);
 5968}
 5969
 5970
 5971static const struct file_operations tracing_saved_tgids_fops = {
 5972	.open		= tracing_saved_tgids_open,
 5973	.read		= seq_read,
 5974	.llseek		= seq_lseek,
 5975	.release	= seq_release,
 5976};
 5977
 5978static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
 5979{
 5980	unsigned int *ptr = v;
 5981
 5982	if (*pos || m->count)
 5983		ptr++;
 5984
 5985	(*pos)++;
 5986
 5987	for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
 5988	     ptr++) {
 5989		if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
 5990			continue;
 5991
 5992		return ptr;
 5993	}
 5994
 5995	return NULL;
 5996}
 5997
 5998static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
 5999{
 6000	void *v;
 6001	loff_t l = 0;
 6002
 6003	preempt_disable();
 6004	arch_spin_lock(&trace_cmdline_lock);
 6005
 6006	v = &savedcmd->map_cmdline_to_pid[0];
 6007	while (l <= *pos) {
 6008		v = saved_cmdlines_next(m, v, &l);
 6009		if (!v)
 6010			return NULL;
 6011	}
 6012
 6013	return v;
 6014}
 6015
 6016static void saved_cmdlines_stop(struct seq_file *m, void *v)
 6017{
 6018	arch_spin_unlock(&trace_cmdline_lock);
 6019	preempt_enable();
 6020}
 6021
 6022static int saved_cmdlines_show(struct seq_file *m, void *v)
 6023{
 6024	char buf[TASK_COMM_LEN];
 6025	unsigned int *pid = v;
 6026
 6027	__trace_find_cmdline(*pid, buf);
 6028	seq_printf(m, "%d %s\n", *pid, buf);
 6029	return 0;
 6030}
 6031
 6032static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
 6033	.start		= saved_cmdlines_start,
 6034	.next		= saved_cmdlines_next,
 6035	.stop		= saved_cmdlines_stop,
 6036	.show		= saved_cmdlines_show,
 6037};
 6038
 6039static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
 6040{
 6041	int ret;
 6042
 6043	ret = tracing_check_open_get_tr(NULL);
 6044	if (ret)
 6045		return ret;
 6046
 6047	return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
 6048}
 6049
 6050static const struct file_operations tracing_saved_cmdlines_fops = {
 6051	.open		= tracing_saved_cmdlines_open,
 6052	.read		= seq_read,
 6053	.llseek		= seq_lseek,
 6054	.release	= seq_release,
 6055};
 6056
 6057static ssize_t
 6058tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
 6059				 size_t cnt, loff_t *ppos)
 6060{
 6061	char buf[64];
 6062	int r;
 6063
 6064	preempt_disable();
 6065	arch_spin_lock(&trace_cmdline_lock);
 6066	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
 6067	arch_spin_unlock(&trace_cmdline_lock);
 6068	preempt_enable();
 6069
 6070	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6071}
 6072
 
 
 
 
 
 
 
 6073static int tracing_resize_saved_cmdlines(unsigned int val)
 6074{
 6075	struct saved_cmdlines_buffer *s, *savedcmd_temp;
 6076
 6077	s = allocate_cmdlines_buffer(val);
 6078	if (!s)
 6079		return -ENOMEM;
 6080
 6081	preempt_disable();
 
 
 
 
 6082	arch_spin_lock(&trace_cmdline_lock);
 6083	savedcmd_temp = savedcmd;
 6084	savedcmd = s;
 6085	arch_spin_unlock(&trace_cmdline_lock);
 6086	preempt_enable();
 6087	free_saved_cmdlines_buffer(savedcmd_temp);
 6088
 6089	return 0;
 6090}
 6091
 6092static ssize_t
 6093tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
 6094				  size_t cnt, loff_t *ppos)
 6095{
 6096	unsigned long val;
 6097	int ret;
 6098
 6099	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 6100	if (ret)
 6101		return ret;
 6102
 6103	/* must have at least 1 entry or less than PID_MAX_DEFAULT */
 6104	if (!val || val > PID_MAX_DEFAULT)
 6105		return -EINVAL;
 6106
 6107	ret = tracing_resize_saved_cmdlines((unsigned int)val);
 6108	if (ret < 0)
 6109		return ret;
 6110
 6111	*ppos += cnt;
 6112
 6113	return cnt;
 6114}
 6115
 6116static const struct file_operations tracing_saved_cmdlines_size_fops = {
 6117	.open		= tracing_open_generic,
 6118	.read		= tracing_saved_cmdlines_size_read,
 6119	.write		= tracing_saved_cmdlines_size_write,
 6120};
 6121
 6122#ifdef CONFIG_TRACE_EVAL_MAP_FILE
 6123static union trace_eval_map_item *
 6124update_eval_map(union trace_eval_map_item *ptr)
 6125{
 6126	if (!ptr->map.eval_string) {
 6127		if (ptr->tail.next) {
 6128			ptr = ptr->tail.next;
 6129			/* Set ptr to the next real item (skip head) */
 6130			ptr++;
 6131		} else
 6132			return NULL;
 6133	}
 6134	return ptr;
 6135}
 6136
 6137static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
 6138{
 6139	union trace_eval_map_item *ptr = v;
 6140
 6141	/*
 6142	 * Paranoid! If ptr points to end, we don't want to increment past it.
 6143	 * This really should never happen.
 6144	 */
 6145	(*pos)++;
 6146	ptr = update_eval_map(ptr);
 6147	if (WARN_ON_ONCE(!ptr))
 6148		return NULL;
 6149
 6150	ptr++;
 
 
 
 6151	ptr = update_eval_map(ptr);
 6152
 6153	return ptr;
 6154}
 6155
 6156static void *eval_map_start(struct seq_file *m, loff_t *pos)
 6157{
 6158	union trace_eval_map_item *v;
 6159	loff_t l = 0;
 6160
 6161	mutex_lock(&trace_eval_mutex);
 6162
 6163	v = trace_eval_maps;
 6164	if (v)
 6165		v++;
 6166
 6167	while (v && l < *pos) {
 6168		v = eval_map_next(m, v, &l);
 6169	}
 6170
 6171	return v;
 6172}
 6173
 6174static void eval_map_stop(struct seq_file *m, void *v)
 6175{
 6176	mutex_unlock(&trace_eval_mutex);
 6177}
 6178
 6179static int eval_map_show(struct seq_file *m, void *v)
 6180{
 6181	union trace_eval_map_item *ptr = v;
 6182
 6183	seq_printf(m, "%s %ld (%s)\n",
 6184		   ptr->map.eval_string, ptr->map.eval_value,
 6185		   ptr->map.system);
 6186
 6187	return 0;
 6188}
 6189
 6190static const struct seq_operations tracing_eval_map_seq_ops = {
 6191	.start		= eval_map_start,
 6192	.next		= eval_map_next,
 6193	.stop		= eval_map_stop,
 6194	.show		= eval_map_show,
 6195};
 6196
 6197static int tracing_eval_map_open(struct inode *inode, struct file *filp)
 6198{
 6199	int ret;
 6200
 6201	ret = tracing_check_open_get_tr(NULL);
 6202	if (ret)
 6203		return ret;
 6204
 6205	return seq_open(filp, &tracing_eval_map_seq_ops);
 6206}
 6207
 6208static const struct file_operations tracing_eval_map_fops = {
 6209	.open		= tracing_eval_map_open,
 6210	.read		= seq_read,
 6211	.llseek		= seq_lseek,
 6212	.release	= seq_release,
 6213};
 6214
 6215static inline union trace_eval_map_item *
 6216trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
 6217{
 6218	/* Return tail of array given the head */
 6219	return ptr + ptr->head.length + 1;
 6220}
 6221
 6222static void
 6223trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
 6224			   int len)
 6225{
 6226	struct trace_eval_map **stop;
 6227	struct trace_eval_map **map;
 6228	union trace_eval_map_item *map_array;
 6229	union trace_eval_map_item *ptr;
 6230
 6231	stop = start + len;
 6232
 6233	/*
 6234	 * The trace_eval_maps contains the map plus a head and tail item,
 6235	 * where the head holds the module and length of array, and the
 6236	 * tail holds a pointer to the next list.
 6237	 */
 6238	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
 6239	if (!map_array) {
 6240		pr_warn("Unable to allocate trace eval mapping\n");
 6241		return;
 6242	}
 6243
 6244	mutex_lock(&trace_eval_mutex);
 6245
 6246	if (!trace_eval_maps)
 6247		trace_eval_maps = map_array;
 6248	else {
 6249		ptr = trace_eval_maps;
 6250		for (;;) {
 6251			ptr = trace_eval_jmp_to_tail(ptr);
 6252			if (!ptr->tail.next)
 6253				break;
 6254			ptr = ptr->tail.next;
 6255
 6256		}
 6257		ptr->tail.next = map_array;
 6258	}
 6259	map_array->head.mod = mod;
 6260	map_array->head.length = len;
 6261	map_array++;
 6262
 6263	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
 6264		map_array->map = **map;
 6265		map_array++;
 6266	}
 6267	memset(map_array, 0, sizeof(*map_array));
 6268
 6269	mutex_unlock(&trace_eval_mutex);
 6270}
 6271
 6272static void trace_create_eval_file(struct dentry *d_tracer)
 6273{
 6274	trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
 6275			  NULL, &tracing_eval_map_fops);
 6276}
 6277
 6278#else /* CONFIG_TRACE_EVAL_MAP_FILE */
 6279static inline void trace_create_eval_file(struct dentry *d_tracer) { }
 6280static inline void trace_insert_eval_map_file(struct module *mod,
 6281			      struct trace_eval_map **start, int len) { }
 6282#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
 6283
 6284static void trace_insert_eval_map(struct module *mod,
 6285				  struct trace_eval_map **start, int len)
 6286{
 6287	struct trace_eval_map **map;
 6288
 6289	if (len <= 0)
 6290		return;
 6291
 6292	map = start;
 6293
 6294	trace_event_eval_update(map, len);
 6295
 6296	trace_insert_eval_map_file(mod, start, len);
 6297}
 6298
 6299static ssize_t
 6300tracing_set_trace_read(struct file *filp, char __user *ubuf,
 6301		       size_t cnt, loff_t *ppos)
 6302{
 6303	struct trace_array *tr = filp->private_data;
 6304	char buf[MAX_TRACER_SIZE+2];
 6305	int r;
 6306
 6307	mutex_lock(&trace_types_lock);
 6308	r = sprintf(buf, "%s\n", tr->current_trace->name);
 6309	mutex_unlock(&trace_types_lock);
 6310
 6311	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6312}
 6313
 6314int tracer_init(struct tracer *t, struct trace_array *tr)
 6315{
 6316	tracing_reset_online_cpus(&tr->array_buffer);
 6317	return t->init(tr);
 6318}
 6319
 6320static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
 6321{
 6322	int cpu;
 6323
 6324	for_each_tracing_cpu(cpu)
 6325		per_cpu_ptr(buf->data, cpu)->entries = val;
 6326}
 6327
 6328static void update_buffer_entries(struct array_buffer *buf, int cpu)
 6329{
 6330	if (cpu == RING_BUFFER_ALL_CPUS) {
 6331		set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
 6332	} else {
 6333		per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
 6334	}
 6335}
 6336
 6337#ifdef CONFIG_TRACER_MAX_TRACE
 6338/* resize @tr's buffer to the size of @size_tr's entries */
 6339static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
 6340					struct array_buffer *size_buf, int cpu_id)
 6341{
 6342	int cpu, ret = 0;
 6343
 6344	if (cpu_id == RING_BUFFER_ALL_CPUS) {
 6345		for_each_tracing_cpu(cpu) {
 6346			ret = ring_buffer_resize(trace_buf->buffer,
 6347				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
 6348			if (ret < 0)
 6349				break;
 6350			per_cpu_ptr(trace_buf->data, cpu)->entries =
 6351				per_cpu_ptr(size_buf->data, cpu)->entries;
 6352		}
 6353	} else {
 6354		ret = ring_buffer_resize(trace_buf->buffer,
 6355				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
 6356		if (ret == 0)
 6357			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
 6358				per_cpu_ptr(size_buf->data, cpu_id)->entries;
 6359	}
 6360
 6361	return ret;
 6362}
 6363#endif /* CONFIG_TRACER_MAX_TRACE */
 6364
 6365static int __tracing_resize_ring_buffer(struct trace_array *tr,
 6366					unsigned long size, int cpu)
 6367{
 6368	int ret;
 6369
 6370	/*
 6371	 * If kernel or user changes the size of the ring buffer
 6372	 * we use the size that was given, and we can forget about
 6373	 * expanding it later.
 6374	 */
 6375	trace_set_ring_buffer_expanded(tr);
 6376
 6377	/* May be called before buffers are initialized */
 6378	if (!tr->array_buffer.buffer)
 6379		return 0;
 6380
 6381	/* Do not allow tracing while resizing ring buffer */
 6382	tracing_stop_tr(tr);
 6383
 6384	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
 6385	if (ret < 0)
 6386		goto out_start;
 6387
 6388#ifdef CONFIG_TRACER_MAX_TRACE
 6389	if (!tr->allocated_snapshot)
 
 6390		goto out;
 6391
 6392	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
 6393	if (ret < 0) {
 6394		int r = resize_buffer_duplicate_size(&tr->array_buffer,
 6395						     &tr->array_buffer, cpu);
 6396		if (r < 0) {
 6397			/*
 6398			 * AARGH! We are left with different
 6399			 * size max buffer!!!!
 6400			 * The max buffer is our "snapshot" buffer.
 6401			 * When a tracer needs a snapshot (one of the
 6402			 * latency tracers), it swaps the max buffer
 6403			 * with the saved snap shot. We succeeded to
 6404			 * update the size of the main buffer, but failed to
 6405			 * update the size of the max buffer. But when we tried
 6406			 * to reset the main buffer to the original size, we
 6407			 * failed there too. This is very unlikely to
 6408			 * happen, but if it does, warn and kill all
 6409			 * tracing.
 6410			 */
 6411			WARN_ON(1);
 6412			tracing_disabled = 1;
 6413		}
 6414		goto out_start;
 6415	}
 6416
 6417	update_buffer_entries(&tr->max_buffer, cpu);
 
 
 
 6418
 6419 out:
 6420#endif /* CONFIG_TRACER_MAX_TRACE */
 6421
 6422	update_buffer_entries(&tr->array_buffer, cpu);
 6423 out_start:
 6424	tracing_start_tr(tr);
 
 
 6425	return ret;
 6426}
 6427
 6428ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
 6429				  unsigned long size, int cpu_id)
 6430{
 6431	int ret;
 6432
 6433	mutex_lock(&trace_types_lock);
 6434
 6435	if (cpu_id != RING_BUFFER_ALL_CPUS) {
 6436		/* make sure, this cpu is enabled in the mask */
 6437		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
 6438			ret = -EINVAL;
 6439			goto out;
 6440		}
 6441	}
 6442
 6443	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
 6444	if (ret < 0)
 6445		ret = -ENOMEM;
 6446
 6447out:
 6448	mutex_unlock(&trace_types_lock);
 6449
 6450	return ret;
 6451}
 6452
 6453
 6454/**
 6455 * tracing_update_buffers - used by tracing facility to expand ring buffers
 6456 * @tr: The tracing instance
 6457 *
 6458 * To save on memory when the tracing is never used on a system with it
 6459 * configured in. The ring buffers are set to a minimum size. But once
 6460 * a user starts to use the tracing facility, then they need to grow
 6461 * to their default size.
 6462 *
 6463 * This function is to be called when a tracer is about to be used.
 6464 */
 6465int tracing_update_buffers(struct trace_array *tr)
 6466{
 6467	int ret = 0;
 6468
 6469	mutex_lock(&trace_types_lock);
 6470	if (!tr->ring_buffer_expanded)
 6471		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
 6472						RING_BUFFER_ALL_CPUS);
 6473	mutex_unlock(&trace_types_lock);
 6474
 6475	return ret;
 6476}
 6477
 6478struct trace_option_dentry;
 6479
 6480static void
 6481create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
 6482
 6483/*
 6484 * Used to clear out the tracer before deletion of an instance.
 6485 * Must have trace_types_lock held.
 6486 */
 6487static void tracing_set_nop(struct trace_array *tr)
 6488{
 6489	if (tr->current_trace == &nop_trace)
 6490		return;
 6491	
 6492	tr->current_trace->enabled--;
 6493
 6494	if (tr->current_trace->reset)
 6495		tr->current_trace->reset(tr);
 6496
 6497	tr->current_trace = &nop_trace;
 6498}
 6499
 6500static bool tracer_options_updated;
 6501
 6502static void add_tracer_options(struct trace_array *tr, struct tracer *t)
 6503{
 6504	/* Only enable if the directory has been created already. */
 6505	if (!tr->dir)
 6506		return;
 6507
 6508	/* Only create trace option files after update_tracer_options finish */
 6509	if (!tracer_options_updated)
 6510		return;
 6511
 6512	create_trace_option_files(tr, t);
 6513}
 6514
 6515int tracing_set_tracer(struct trace_array *tr, const char *buf)
 6516{
 6517	struct tracer *t;
 6518#ifdef CONFIG_TRACER_MAX_TRACE
 6519	bool had_max_tr;
 6520#endif
 6521	int ret = 0;
 6522
 6523	mutex_lock(&trace_types_lock);
 6524
 6525	if (!tr->ring_buffer_expanded) {
 6526		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
 6527						RING_BUFFER_ALL_CPUS);
 6528		if (ret < 0)
 6529			goto out;
 6530		ret = 0;
 6531	}
 6532
 6533	for (t = trace_types; t; t = t->next) {
 6534		if (strcmp(t->name, buf) == 0)
 6535			break;
 6536	}
 6537	if (!t) {
 6538		ret = -EINVAL;
 6539		goto out;
 6540	}
 6541	if (t == tr->current_trace)
 6542		goto out;
 6543
 6544#ifdef CONFIG_TRACER_SNAPSHOT
 6545	if (t->use_max_tr) {
 6546		local_irq_disable();
 6547		arch_spin_lock(&tr->max_lock);
 6548		if (tr->cond_snapshot)
 6549			ret = -EBUSY;
 6550		arch_spin_unlock(&tr->max_lock);
 6551		local_irq_enable();
 6552		if (ret)
 6553			goto out;
 6554	}
 6555#endif
 6556	/* Some tracers won't work on kernel command line */
 6557	if (system_state < SYSTEM_RUNNING && t->noboot) {
 6558		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
 6559			t->name);
 6560		goto out;
 6561	}
 6562
 6563	/* Some tracers are only allowed for the top level buffer */
 6564	if (!trace_ok_for_array(t, tr)) {
 6565		ret = -EINVAL;
 6566		goto out;
 6567	}
 6568
 6569	/* If trace pipe files are being read, we can't change the tracer */
 6570	if (tr->trace_ref) {
 6571		ret = -EBUSY;
 6572		goto out;
 6573	}
 6574
 6575	trace_branch_disable();
 6576
 6577	tr->current_trace->enabled--;
 6578
 6579	if (tr->current_trace->reset)
 6580		tr->current_trace->reset(tr);
 6581
 6582#ifdef CONFIG_TRACER_MAX_TRACE
 6583	had_max_tr = tr->current_trace->use_max_tr;
 6584
 6585	/* Current trace needs to be nop_trace before synchronize_rcu */
 6586	tr->current_trace = &nop_trace;
 6587
 
 
 
 6588	if (had_max_tr && !t->use_max_tr) {
 6589		/*
 6590		 * We need to make sure that the update_max_tr sees that
 6591		 * current_trace changed to nop_trace to keep it from
 6592		 * swapping the buffers after we resize it.
 6593		 * The update_max_tr is called from interrupts disabled
 6594		 * so a synchronized_sched() is sufficient.
 6595		 */
 6596		synchronize_rcu();
 6597		free_snapshot(tr);
 6598	}
 
 6599
 6600	if (t->use_max_tr && !tr->allocated_snapshot) {
 
 6601		ret = tracing_alloc_snapshot_instance(tr);
 6602		if (ret < 0)
 6603			goto out;
 6604	}
 6605#else
 6606	tr->current_trace = &nop_trace;
 6607#endif
 6608
 6609	if (t->init) {
 6610		ret = tracer_init(t, tr);
 6611		if (ret)
 6612			goto out;
 6613	}
 6614
 6615	tr->current_trace = t;
 6616	tr->current_trace->enabled++;
 6617	trace_branch_enable(tr);
 6618 out:
 6619	mutex_unlock(&trace_types_lock);
 6620
 6621	return ret;
 6622}
 6623
 6624static ssize_t
 6625tracing_set_trace_write(struct file *filp, const char __user *ubuf,
 6626			size_t cnt, loff_t *ppos)
 6627{
 6628	struct trace_array *tr = filp->private_data;
 6629	char buf[MAX_TRACER_SIZE+1];
 6630	char *name;
 6631	size_t ret;
 6632	int err;
 6633
 6634	ret = cnt;
 6635
 6636	if (cnt > MAX_TRACER_SIZE)
 6637		cnt = MAX_TRACER_SIZE;
 6638
 6639	if (copy_from_user(buf, ubuf, cnt))
 6640		return -EFAULT;
 6641
 6642	buf[cnt] = 0;
 6643
 6644	name = strim(buf);
 
 
 6645
 6646	err = tracing_set_tracer(tr, name);
 6647	if (err)
 6648		return err;
 6649
 6650	*ppos += ret;
 6651
 6652	return ret;
 6653}
 6654
 6655static ssize_t
 6656tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
 6657		   size_t cnt, loff_t *ppos)
 6658{
 6659	char buf[64];
 6660	int r;
 6661
 6662	r = snprintf(buf, sizeof(buf), "%ld\n",
 6663		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
 6664	if (r > sizeof(buf))
 6665		r = sizeof(buf);
 6666	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6667}
 6668
 6669static ssize_t
 6670tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
 6671		    size_t cnt, loff_t *ppos)
 6672{
 6673	unsigned long val;
 6674	int ret;
 6675
 6676	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 6677	if (ret)
 6678		return ret;
 6679
 6680	*ptr = val * 1000;
 6681
 6682	return cnt;
 6683}
 6684
 6685static ssize_t
 6686tracing_thresh_read(struct file *filp, char __user *ubuf,
 6687		    size_t cnt, loff_t *ppos)
 6688{
 6689	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
 6690}
 6691
 6692static ssize_t
 6693tracing_thresh_write(struct file *filp, const char __user *ubuf,
 6694		     size_t cnt, loff_t *ppos)
 6695{
 6696	struct trace_array *tr = filp->private_data;
 6697	int ret;
 6698
 6699	mutex_lock(&trace_types_lock);
 6700	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
 6701	if (ret < 0)
 6702		goto out;
 6703
 6704	if (tr->current_trace->update_thresh) {
 6705		ret = tr->current_trace->update_thresh(tr);
 6706		if (ret < 0)
 6707			goto out;
 6708	}
 6709
 6710	ret = cnt;
 6711out:
 6712	mutex_unlock(&trace_types_lock);
 6713
 6714	return ret;
 6715}
 6716
 6717#ifdef CONFIG_TRACER_MAX_TRACE
 6718
 6719static ssize_t
 6720tracing_max_lat_read(struct file *filp, char __user *ubuf,
 6721		     size_t cnt, loff_t *ppos)
 6722{
 6723	struct trace_array *tr = filp->private_data;
 6724
 6725	return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
 6726}
 6727
 6728static ssize_t
 6729tracing_max_lat_write(struct file *filp, const char __user *ubuf,
 6730		      size_t cnt, loff_t *ppos)
 6731{
 6732	struct trace_array *tr = filp->private_data;
 6733
 6734	return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
 6735}
 6736
 6737#endif
 6738
 6739static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
 6740{
 6741	if (cpu == RING_BUFFER_ALL_CPUS) {
 6742		if (cpumask_empty(tr->pipe_cpumask)) {
 6743			cpumask_setall(tr->pipe_cpumask);
 6744			return 0;
 6745		}
 6746	} else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
 6747		cpumask_set_cpu(cpu, tr->pipe_cpumask);
 6748		return 0;
 6749	}
 6750	return -EBUSY;
 6751}
 6752
 6753static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
 6754{
 6755	if (cpu == RING_BUFFER_ALL_CPUS) {
 6756		WARN_ON(!cpumask_full(tr->pipe_cpumask));
 6757		cpumask_clear(tr->pipe_cpumask);
 6758	} else {
 6759		WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
 6760		cpumask_clear_cpu(cpu, tr->pipe_cpumask);
 6761	}
 6762}
 6763
 6764static int tracing_open_pipe(struct inode *inode, struct file *filp)
 6765{
 6766	struct trace_array *tr = inode->i_private;
 6767	struct trace_iterator *iter;
 6768	int cpu;
 6769	int ret;
 6770
 6771	ret = tracing_check_open_get_tr(tr);
 6772	if (ret)
 6773		return ret;
 6774
 6775	mutex_lock(&trace_types_lock);
 6776	cpu = tracing_get_cpu(inode);
 6777	ret = open_pipe_on_cpu(tr, cpu);
 6778	if (ret)
 6779		goto fail_pipe_on_cpu;
 6780
 6781	/* create a buffer to store the information to pass to userspace */
 6782	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 6783	if (!iter) {
 6784		ret = -ENOMEM;
 6785		goto fail_alloc_iter;
 
 6786	}
 6787
 6788	trace_seq_init(&iter->seq);
 6789	iter->trace = tr->current_trace;
 6790
 6791	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
 6792		ret = -ENOMEM;
 6793		goto fail;
 6794	}
 6795
 6796	/* trace pipe does not show start of buffer */
 6797	cpumask_setall(iter->started);
 6798
 6799	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 6800		iter->iter_flags |= TRACE_FILE_LAT_FMT;
 6801
 6802	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
 6803	if (trace_clocks[tr->clock_id].in_ns)
 6804		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 6805
 6806	iter->tr = tr;
 6807	iter->array_buffer = &tr->array_buffer;
 6808	iter->cpu_file = cpu;
 6809	mutex_init(&iter->mutex);
 6810	filp->private_data = iter;
 6811
 6812	if (iter->trace->pipe_open)
 6813		iter->trace->pipe_open(iter);
 6814
 6815	nonseekable_open(inode, filp);
 6816
 6817	tr->trace_ref++;
 6818
 6819	mutex_unlock(&trace_types_lock);
 6820	return ret;
 6821
 6822fail:
 6823	kfree(iter);
 6824fail_alloc_iter:
 6825	close_pipe_on_cpu(tr, cpu);
 6826fail_pipe_on_cpu:
 6827	__trace_array_put(tr);
 6828	mutex_unlock(&trace_types_lock);
 6829	return ret;
 6830}
 6831
 6832static int tracing_release_pipe(struct inode *inode, struct file *file)
 6833{
 6834	struct trace_iterator *iter = file->private_data;
 6835	struct trace_array *tr = inode->i_private;
 6836
 6837	mutex_lock(&trace_types_lock);
 6838
 6839	tr->trace_ref--;
 6840
 6841	if (iter->trace->pipe_close)
 6842		iter->trace->pipe_close(iter);
 6843	close_pipe_on_cpu(tr, iter->cpu_file);
 6844	mutex_unlock(&trace_types_lock);
 6845
 6846	free_trace_iter_content(iter);
 
 6847	kfree(iter);
 6848
 6849	trace_array_put(tr);
 6850
 6851	return 0;
 6852}
 6853
 6854static __poll_t
 6855trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
 6856{
 6857	struct trace_array *tr = iter->tr;
 6858
 6859	/* Iterators are static, they should be filled or empty */
 6860	if (trace_buffer_iter(iter, iter->cpu_file))
 6861		return EPOLLIN | EPOLLRDNORM;
 6862
 6863	if (tr->trace_flags & TRACE_ITER_BLOCK)
 6864		/*
 6865		 * Always select as readable when in blocking mode
 6866		 */
 6867		return EPOLLIN | EPOLLRDNORM;
 6868	else
 6869		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
 6870					     filp, poll_table, iter->tr->buffer_percent);
 6871}
 6872
 6873static __poll_t
 6874tracing_poll_pipe(struct file *filp, poll_table *poll_table)
 6875{
 6876	struct trace_iterator *iter = filp->private_data;
 6877
 6878	return trace_poll(iter, filp, poll_table);
 6879}
 6880
 6881/* Must be called with iter->mutex held. */
 6882static int tracing_wait_pipe(struct file *filp)
 6883{
 6884	struct trace_iterator *iter = filp->private_data;
 6885	int ret;
 6886
 6887	while (trace_empty(iter)) {
 6888
 6889		if ((filp->f_flags & O_NONBLOCK)) {
 6890			return -EAGAIN;
 6891		}
 6892
 6893		/*
 6894		 * We block until we read something and tracing is disabled.
 6895		 * We still block if tracing is disabled, but we have never
 6896		 * read anything. This allows a user to cat this file, and
 6897		 * then enable tracing. But after we have read something,
 6898		 * we give an EOF when tracing is again disabled.
 6899		 *
 6900		 * iter->pos will be 0 if we haven't read anything.
 6901		 */
 6902		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
 6903			break;
 6904
 6905		mutex_unlock(&iter->mutex);
 6906
 6907		ret = wait_on_pipe(iter, 0);
 6908
 6909		mutex_lock(&iter->mutex);
 6910
 6911		if (ret)
 6912			return ret;
 6913	}
 6914
 6915	return 1;
 6916}
 6917
 6918/*
 6919 * Consumer reader.
 6920 */
 6921static ssize_t
 6922tracing_read_pipe(struct file *filp, char __user *ubuf,
 6923		  size_t cnt, loff_t *ppos)
 6924{
 6925	struct trace_iterator *iter = filp->private_data;
 6926	ssize_t sret;
 6927
 6928	/*
 6929	 * Avoid more than one consumer on a single file descriptor
 6930	 * This is just a matter of traces coherency, the ring buffer itself
 6931	 * is protected.
 6932	 */
 6933	mutex_lock(&iter->mutex);
 6934
 6935	/* return any leftover data */
 6936	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
 6937	if (sret != -EBUSY)
 6938		goto out;
 6939
 6940	trace_seq_init(&iter->seq);
 6941
 6942	if (iter->trace->read) {
 6943		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
 6944		if (sret)
 6945			goto out;
 6946	}
 6947
 6948waitagain:
 6949	sret = tracing_wait_pipe(filp);
 6950	if (sret <= 0)
 6951		goto out;
 6952
 6953	/* stop when tracing is finished */
 6954	if (trace_empty(iter)) {
 6955		sret = 0;
 6956		goto out;
 6957	}
 6958
 6959	if (cnt >= TRACE_SEQ_BUFFER_SIZE)
 6960		cnt = TRACE_SEQ_BUFFER_SIZE - 1;
 6961
 6962	/* reset all but tr, trace, and overruns */
 6963	trace_iterator_reset(iter);
 
 
 6964	cpumask_clear(iter->started);
 6965	trace_seq_init(&iter->seq);
 
 6966
 6967	trace_event_read_lock();
 6968	trace_access_lock(iter->cpu_file);
 6969	while (trace_find_next_entry_inc(iter) != NULL) {
 6970		enum print_line_t ret;
 6971		int save_len = iter->seq.seq.len;
 6972
 6973		ret = print_trace_line(iter);
 6974		if (ret == TRACE_TYPE_PARTIAL_LINE) {
 6975			/*
 6976			 * If one print_trace_line() fills entire trace_seq in one shot,
 6977			 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
 6978			 * In this case, we need to consume it, otherwise, loop will peek
 6979			 * this event next time, resulting in an infinite loop.
 6980			 */
 6981			if (save_len == 0) {
 6982				iter->seq.full = 0;
 6983				trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
 6984				trace_consume(iter);
 6985				break;
 6986			}
 6987
 6988			/* In other cases, don't print partial lines */
 6989			iter->seq.seq.len = save_len;
 6990			break;
 6991		}
 6992		if (ret != TRACE_TYPE_NO_CONSUME)
 6993			trace_consume(iter);
 6994
 6995		if (trace_seq_used(&iter->seq) >= cnt)
 6996			break;
 6997
 6998		/*
 6999		 * Setting the full flag means we reached the trace_seq buffer
 7000		 * size and we should leave by partial output condition above.
 7001		 * One of the trace_seq_* functions is not used properly.
 7002		 */
 7003		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
 7004			  iter->ent->type);
 7005	}
 7006	trace_access_unlock(iter->cpu_file);
 7007	trace_event_read_unlock();
 7008
 7009	/* Now copy what we have to the user */
 7010	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
 7011	if (iter->seq.readpos >= trace_seq_used(&iter->seq))
 7012		trace_seq_init(&iter->seq);
 7013
 7014	/*
 7015	 * If there was nothing to send to user, in spite of consuming trace
 7016	 * entries, go back to wait for more entries.
 7017	 */
 7018	if (sret == -EBUSY)
 7019		goto waitagain;
 7020
 7021out:
 7022	mutex_unlock(&iter->mutex);
 7023
 7024	return sret;
 7025}
 7026
 7027static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
 7028				     unsigned int idx)
 7029{
 7030	__free_page(spd->pages[idx]);
 7031}
 7032
 
 
 
 
 
 
 
 7033static size_t
 7034tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
 7035{
 7036	size_t count;
 7037	int save_len;
 7038	int ret;
 7039
 7040	/* Seq buffer is page-sized, exactly what we need. */
 7041	for (;;) {
 7042		save_len = iter->seq.seq.len;
 7043		ret = print_trace_line(iter);
 7044
 7045		if (trace_seq_has_overflowed(&iter->seq)) {
 7046			iter->seq.seq.len = save_len;
 7047			break;
 7048		}
 7049
 7050		/*
 7051		 * This should not be hit, because it should only
 7052		 * be set if the iter->seq overflowed. But check it
 7053		 * anyway to be safe.
 7054		 */
 7055		if (ret == TRACE_TYPE_PARTIAL_LINE) {
 7056			iter->seq.seq.len = save_len;
 7057			break;
 7058		}
 7059
 7060		count = trace_seq_used(&iter->seq) - save_len;
 7061		if (rem < count) {
 7062			rem = 0;
 7063			iter->seq.seq.len = save_len;
 7064			break;
 7065		}
 7066
 7067		if (ret != TRACE_TYPE_NO_CONSUME)
 7068			trace_consume(iter);
 7069		rem -= count;
 7070		if (!trace_find_next_entry_inc(iter))	{
 7071			rem = 0;
 7072			iter->ent = NULL;
 7073			break;
 7074		}
 7075	}
 7076
 7077	return rem;
 7078}
 7079
 7080static ssize_t tracing_splice_read_pipe(struct file *filp,
 7081					loff_t *ppos,
 7082					struct pipe_inode_info *pipe,
 7083					size_t len,
 7084					unsigned int flags)
 7085{
 7086	struct page *pages_def[PIPE_DEF_BUFFERS];
 7087	struct partial_page partial_def[PIPE_DEF_BUFFERS];
 7088	struct trace_iterator *iter = filp->private_data;
 7089	struct splice_pipe_desc spd = {
 7090		.pages		= pages_def,
 7091		.partial	= partial_def,
 7092		.nr_pages	= 0, /* This gets updated below. */
 7093		.nr_pages_max	= PIPE_DEF_BUFFERS,
 7094		.ops		= &default_pipe_buf_ops,
 7095		.spd_release	= tracing_spd_release_pipe,
 7096	};
 7097	ssize_t ret;
 7098	size_t rem;
 7099	unsigned int i;
 7100
 7101	if (splice_grow_spd(pipe, &spd))
 7102		return -ENOMEM;
 7103
 7104	mutex_lock(&iter->mutex);
 7105
 7106	if (iter->trace->splice_read) {
 7107		ret = iter->trace->splice_read(iter, filp,
 7108					       ppos, pipe, len, flags);
 7109		if (ret)
 7110			goto out_err;
 7111	}
 7112
 7113	ret = tracing_wait_pipe(filp);
 7114	if (ret <= 0)
 7115		goto out_err;
 7116
 7117	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
 7118		ret = -EFAULT;
 7119		goto out_err;
 7120	}
 7121
 7122	trace_event_read_lock();
 7123	trace_access_lock(iter->cpu_file);
 7124
 7125	/* Fill as many pages as possible. */
 7126	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
 7127		spd.pages[i] = alloc_page(GFP_KERNEL);
 7128		if (!spd.pages[i])
 7129			break;
 7130
 7131		rem = tracing_fill_pipe_page(rem, iter);
 7132
 7133		/* Copy the data into the page, so we can start over. */
 7134		ret = trace_seq_to_buffer(&iter->seq,
 7135					  page_address(spd.pages[i]),
 7136					  trace_seq_used(&iter->seq));
 7137		if (ret < 0) {
 7138			__free_page(spd.pages[i]);
 7139			break;
 7140		}
 7141		spd.partial[i].offset = 0;
 7142		spd.partial[i].len = trace_seq_used(&iter->seq);
 7143
 7144		trace_seq_init(&iter->seq);
 7145	}
 7146
 7147	trace_access_unlock(iter->cpu_file);
 7148	trace_event_read_unlock();
 7149	mutex_unlock(&iter->mutex);
 7150
 7151	spd.nr_pages = i;
 7152
 7153	if (i)
 7154		ret = splice_to_pipe(pipe, &spd);
 7155	else
 7156		ret = 0;
 7157out:
 7158	splice_shrink_spd(&spd);
 7159	return ret;
 7160
 7161out_err:
 7162	mutex_unlock(&iter->mutex);
 7163	goto out;
 7164}
 7165
 7166static ssize_t
 7167tracing_entries_read(struct file *filp, char __user *ubuf,
 7168		     size_t cnt, loff_t *ppos)
 7169{
 7170	struct inode *inode = file_inode(filp);
 7171	struct trace_array *tr = inode->i_private;
 7172	int cpu = tracing_get_cpu(inode);
 7173	char buf[64];
 7174	int r = 0;
 7175	ssize_t ret;
 7176
 7177	mutex_lock(&trace_types_lock);
 7178
 7179	if (cpu == RING_BUFFER_ALL_CPUS) {
 7180		int cpu, buf_size_same;
 7181		unsigned long size;
 7182
 7183		size = 0;
 7184		buf_size_same = 1;
 7185		/* check if all cpu sizes are same */
 7186		for_each_tracing_cpu(cpu) {
 7187			/* fill in the size from first enabled cpu */
 7188			if (size == 0)
 7189				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
 7190			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
 7191				buf_size_same = 0;
 7192				break;
 7193			}
 7194		}
 7195
 7196		if (buf_size_same) {
 7197			if (!tr->ring_buffer_expanded)
 7198				r = sprintf(buf, "%lu (expanded: %lu)\n",
 7199					    size >> 10,
 7200					    trace_buf_size >> 10);
 7201			else
 7202				r = sprintf(buf, "%lu\n", size >> 10);
 7203		} else
 7204			r = sprintf(buf, "X\n");
 7205	} else
 7206		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
 7207
 7208	mutex_unlock(&trace_types_lock);
 7209
 7210	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 7211	return ret;
 7212}
 7213
 7214static ssize_t
 7215tracing_entries_write(struct file *filp, const char __user *ubuf,
 7216		      size_t cnt, loff_t *ppos)
 7217{
 7218	struct inode *inode = file_inode(filp);
 7219	struct trace_array *tr = inode->i_private;
 7220	unsigned long val;
 7221	int ret;
 7222
 7223	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 7224	if (ret)
 7225		return ret;
 7226
 7227	/* must have at least 1 entry */
 7228	if (!val)
 7229		return -EINVAL;
 7230
 7231	/* value is in KB */
 7232	val <<= 10;
 7233	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
 7234	if (ret < 0)
 7235		return ret;
 7236
 7237	*ppos += cnt;
 7238
 7239	return cnt;
 7240}
 7241
 7242static ssize_t
 7243tracing_total_entries_read(struct file *filp, char __user *ubuf,
 7244				size_t cnt, loff_t *ppos)
 7245{
 7246	struct trace_array *tr = filp->private_data;
 7247	char buf[64];
 7248	int r, cpu;
 7249	unsigned long size = 0, expanded_size = 0;
 7250
 7251	mutex_lock(&trace_types_lock);
 7252	for_each_tracing_cpu(cpu) {
 7253		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
 7254		if (!tr->ring_buffer_expanded)
 7255			expanded_size += trace_buf_size >> 10;
 7256	}
 7257	if (tr->ring_buffer_expanded)
 7258		r = sprintf(buf, "%lu\n", size);
 7259	else
 7260		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
 7261	mutex_unlock(&trace_types_lock);
 7262
 7263	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 7264}
 7265
 7266static ssize_t
 7267tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
 7268			  size_t cnt, loff_t *ppos)
 7269{
 7270	/*
 7271	 * There is no need to read what the user has written, this function
 7272	 * is just to make sure that there is no error when "echo" is used
 7273	 */
 7274
 7275	*ppos += cnt;
 7276
 7277	return cnt;
 7278}
 7279
 7280static int
 7281tracing_free_buffer_release(struct inode *inode, struct file *filp)
 7282{
 7283	struct trace_array *tr = inode->i_private;
 7284
 7285	/* disable tracing ? */
 7286	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
 7287		tracer_tracing_off(tr);
 7288	/* resize the ring buffer to 0 */
 7289	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
 7290
 7291	trace_array_put(tr);
 7292
 7293	return 0;
 7294}
 7295
 7296#define TRACE_MARKER_MAX_SIZE		4096
 7297
 7298static ssize_t
 7299tracing_mark_write(struct file *filp, const char __user *ubuf,
 7300					size_t cnt, loff_t *fpos)
 7301{
 7302	struct trace_array *tr = filp->private_data;
 7303	struct ring_buffer_event *event;
 7304	enum event_trigger_type tt = ETT_NONE;
 7305	struct trace_buffer *buffer;
 7306	struct print_entry *entry;
 7307	int meta_size;
 7308	ssize_t written;
 7309	size_t size;
 7310	int len;
 7311
 7312/* Used in tracing_mark_raw_write() as well */
 7313#define FAULTED_STR "<faulted>"
 7314#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
 7315
 7316	if (tracing_disabled)
 7317		return -EINVAL;
 7318
 7319	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
 7320		return -EINVAL;
 7321
 7322	if ((ssize_t)cnt < 0)
 7323		return -EINVAL;
 7324
 7325	if (cnt > TRACE_MARKER_MAX_SIZE)
 7326		cnt = TRACE_MARKER_MAX_SIZE;
 7327
 7328	meta_size = sizeof(*entry) + 2;  /* add '\0' and possible '\n' */
 7329 again:
 7330	size = cnt + meta_size;
 7331
 7332	/* If less than "<faulted>", then make sure we can still add that */
 7333	if (cnt < FAULTED_SIZE)
 7334		size += FAULTED_SIZE - cnt;
 7335
 7336	buffer = tr->array_buffer.buffer;
 7337	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 7338					    tracing_gen_ctx());
 7339	if (unlikely(!event)) {
 7340		/*
 7341		 * If the size was greater than what was allowed, then
 7342		 * make it smaller and try again.
 7343		 */
 7344		if (size > ring_buffer_max_event_size(buffer)) {
 7345			/* cnt < FAULTED size should never be bigger than max */
 7346			if (WARN_ON_ONCE(cnt < FAULTED_SIZE))
 7347				return -EBADF;
 7348			cnt = ring_buffer_max_event_size(buffer) - meta_size;
 7349			/* The above should only happen once */
 7350			if (WARN_ON_ONCE(cnt + meta_size == size))
 7351				return -EBADF;
 7352			goto again;
 7353		}
 7354
 7355		/* Ring buffer disabled, return as if not open for write */
 7356		return -EBADF;
 7357	}
 7358
 7359	entry = ring_buffer_event_data(event);
 7360	entry->ip = _THIS_IP_;
 7361
 7362	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
 7363	if (len) {
 7364		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
 7365		cnt = FAULTED_SIZE;
 7366		written = -EFAULT;
 7367	} else
 7368		written = cnt;
 
 7369
 7370	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
 7371		/* do not add \n before testing triggers, but add \0 */
 7372		entry->buf[cnt] = '\0';
 7373		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
 7374	}
 7375
 7376	if (entry->buf[cnt - 1] != '\n') {
 7377		entry->buf[cnt] = '\n';
 7378		entry->buf[cnt + 1] = '\0';
 7379	} else
 7380		entry->buf[cnt] = '\0';
 7381
 7382	if (static_branch_unlikely(&trace_marker_exports_enabled))
 7383		ftrace_exports(event, TRACE_EXPORT_MARKER);
 7384	__buffer_unlock_commit(buffer, event);
 7385
 7386	if (tt)
 7387		event_triggers_post_call(tr->trace_marker_file, tt);
 7388
 
 
 
 7389	return written;
 7390}
 7391
 
 
 
 7392static ssize_t
 7393tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
 7394					size_t cnt, loff_t *fpos)
 7395{
 7396	struct trace_array *tr = filp->private_data;
 7397	struct ring_buffer_event *event;
 7398	struct trace_buffer *buffer;
 7399	struct raw_data_entry *entry;
 
 7400	ssize_t written;
 7401	int size;
 7402	int len;
 7403
 7404#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
 7405
 7406	if (tracing_disabled)
 7407		return -EINVAL;
 7408
 7409	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
 7410		return -EINVAL;
 7411
 7412	/* The marker must at least have a tag id */
 7413	if (cnt < sizeof(unsigned int))
 7414		return -EINVAL;
 7415
 
 
 
 
 
 
 7416	size = sizeof(*entry) + cnt;
 7417	if (cnt < FAULT_SIZE_ID)
 7418		size += FAULT_SIZE_ID - cnt;
 7419
 7420	buffer = tr->array_buffer.buffer;
 7421
 7422	if (size > ring_buffer_max_event_size(buffer))
 7423		return -EINVAL;
 7424
 7425	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
 7426					    tracing_gen_ctx());
 7427	if (!event)
 7428		/* Ring buffer disabled, return as if not open for write */
 7429		return -EBADF;
 7430
 7431	entry = ring_buffer_event_data(event);
 7432
 7433	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
 7434	if (len) {
 7435		entry->id = -1;
 7436		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
 7437		written = -EFAULT;
 7438	} else
 7439		written = cnt;
 7440
 7441	__buffer_unlock_commit(buffer, event);
 7442
 
 
 
 7443	return written;
 7444}
 7445
 7446static int tracing_clock_show(struct seq_file *m, void *v)
 7447{
 7448	struct trace_array *tr = m->private;
 7449	int i;
 7450
 7451	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
 7452		seq_printf(m,
 7453			"%s%s%s%s", i ? " " : "",
 7454			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
 7455			i == tr->clock_id ? "]" : "");
 7456	seq_putc(m, '\n');
 7457
 7458	return 0;
 7459}
 7460
 7461int tracing_set_clock(struct trace_array *tr, const char *clockstr)
 7462{
 7463	int i;
 7464
 7465	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
 7466		if (strcmp(trace_clocks[i].name, clockstr) == 0)
 7467			break;
 7468	}
 7469	if (i == ARRAY_SIZE(trace_clocks))
 7470		return -EINVAL;
 7471
 7472	mutex_lock(&trace_types_lock);
 7473
 7474	tr->clock_id = i;
 7475
 7476	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
 7477
 7478	/*
 7479	 * New clock may not be consistent with the previous clock.
 7480	 * Reset the buffer so that it doesn't have incomparable timestamps.
 7481	 */
 7482	tracing_reset_online_cpus(&tr->array_buffer);
 7483
 7484#ifdef CONFIG_TRACER_MAX_TRACE
 7485	if (tr->max_buffer.buffer)
 7486		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
 7487	tracing_reset_online_cpus(&tr->max_buffer);
 7488#endif
 7489
 7490	mutex_unlock(&trace_types_lock);
 7491
 7492	return 0;
 7493}
 7494
 7495static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
 7496				   size_t cnt, loff_t *fpos)
 7497{
 7498	struct seq_file *m = filp->private_data;
 7499	struct trace_array *tr = m->private;
 7500	char buf[64];
 7501	const char *clockstr;
 7502	int ret;
 7503
 7504	if (cnt >= sizeof(buf))
 7505		return -EINVAL;
 7506
 7507	if (copy_from_user(buf, ubuf, cnt))
 7508		return -EFAULT;
 7509
 7510	buf[cnt] = 0;
 7511
 7512	clockstr = strstrip(buf);
 7513
 7514	ret = tracing_set_clock(tr, clockstr);
 7515	if (ret)
 7516		return ret;
 7517
 7518	*fpos += cnt;
 7519
 7520	return cnt;
 7521}
 7522
 7523static int tracing_clock_open(struct inode *inode, struct file *file)
 7524{
 7525	struct trace_array *tr = inode->i_private;
 7526	int ret;
 7527
 7528	ret = tracing_check_open_get_tr(tr);
 7529	if (ret)
 7530		return ret;
 7531
 7532	ret = single_open(file, tracing_clock_show, inode->i_private);
 7533	if (ret < 0)
 7534		trace_array_put(tr);
 7535
 7536	return ret;
 7537}
 7538
 7539static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
 7540{
 7541	struct trace_array *tr = m->private;
 7542
 7543	mutex_lock(&trace_types_lock);
 7544
 7545	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
 7546		seq_puts(m, "delta [absolute]\n");
 7547	else
 7548		seq_puts(m, "[delta] absolute\n");
 7549
 7550	mutex_unlock(&trace_types_lock);
 7551
 7552	return 0;
 7553}
 7554
 7555static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
 7556{
 7557	struct trace_array *tr = inode->i_private;
 7558	int ret;
 7559
 7560	ret = tracing_check_open_get_tr(tr);
 7561	if (ret)
 7562		return ret;
 7563
 7564	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
 7565	if (ret < 0)
 7566		trace_array_put(tr);
 7567
 7568	return ret;
 7569}
 7570
 7571u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
 7572{
 7573	if (rbe == this_cpu_read(trace_buffered_event))
 7574		return ring_buffer_time_stamp(buffer);
 7575
 7576	return ring_buffer_event_time_stamp(buffer, rbe);
 7577}
 7578
 7579/*
 7580 * Set or disable using the per CPU trace_buffer_event when possible.
 7581 */
 7582int tracing_set_filter_buffering(struct trace_array *tr, bool set)
 7583{
 7584	int ret = 0;
 7585
 7586	mutex_lock(&trace_types_lock);
 7587
 7588	if (set && tr->no_filter_buffering_ref++)
 7589		goto out;
 7590
 7591	if (!set) {
 7592		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
 7593			ret = -EINVAL;
 7594			goto out;
 7595		}
 7596
 7597		--tr->no_filter_buffering_ref;
 
 7598	}
 
 
 
 
 
 
 
 7599 out:
 7600	mutex_unlock(&trace_types_lock);
 7601
 7602	return ret;
 7603}
 7604
 7605struct ftrace_buffer_info {
 7606	struct trace_iterator	iter;
 7607	void			*spare;
 7608	unsigned int		spare_cpu;
 7609	unsigned int		spare_size;
 7610	unsigned int		read;
 7611};
 7612
 7613#ifdef CONFIG_TRACER_SNAPSHOT
 7614static int tracing_snapshot_open(struct inode *inode, struct file *file)
 7615{
 7616	struct trace_array *tr = inode->i_private;
 7617	struct trace_iterator *iter;
 7618	struct seq_file *m;
 7619	int ret;
 7620
 7621	ret = tracing_check_open_get_tr(tr);
 7622	if (ret)
 7623		return ret;
 7624
 7625	if (file->f_mode & FMODE_READ) {
 7626		iter = __tracing_open(inode, file, true);
 7627		if (IS_ERR(iter))
 7628			ret = PTR_ERR(iter);
 7629	} else {
 7630		/* Writes still need the seq_file to hold the private data */
 7631		ret = -ENOMEM;
 7632		m = kzalloc(sizeof(*m), GFP_KERNEL);
 7633		if (!m)
 7634			goto out;
 7635		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 7636		if (!iter) {
 7637			kfree(m);
 7638			goto out;
 7639		}
 7640		ret = 0;
 7641
 7642		iter->tr = tr;
 7643		iter->array_buffer = &tr->max_buffer;
 7644		iter->cpu_file = tracing_get_cpu(inode);
 7645		m->private = iter;
 7646		file->private_data = m;
 7647	}
 7648out:
 7649	if (ret < 0)
 7650		trace_array_put(tr);
 7651
 7652	return ret;
 7653}
 7654
 7655static void tracing_swap_cpu_buffer(void *tr)
 7656{
 7657	update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
 7658}
 7659
 7660static ssize_t
 7661tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
 7662		       loff_t *ppos)
 7663{
 7664	struct seq_file *m = filp->private_data;
 7665	struct trace_iterator *iter = m->private;
 7666	struct trace_array *tr = iter->tr;
 7667	unsigned long val;
 7668	int ret;
 7669
 7670	ret = tracing_update_buffers(tr);
 7671	if (ret < 0)
 7672		return ret;
 7673
 7674	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 7675	if (ret)
 7676		return ret;
 7677
 7678	mutex_lock(&trace_types_lock);
 7679
 7680	if (tr->current_trace->use_max_tr) {
 7681		ret = -EBUSY;
 7682		goto out;
 7683	}
 7684
 7685	local_irq_disable();
 7686	arch_spin_lock(&tr->max_lock);
 7687	if (tr->cond_snapshot)
 7688		ret = -EBUSY;
 7689	arch_spin_unlock(&tr->max_lock);
 7690	local_irq_enable();
 7691	if (ret)
 7692		goto out;
 7693
 7694	switch (val) {
 7695	case 0:
 7696		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
 7697			ret = -EINVAL;
 7698			break;
 7699		}
 7700		if (tr->allocated_snapshot)
 7701			free_snapshot(tr);
 7702		break;
 7703	case 1:
 7704/* Only allow per-cpu swap if the ring buffer supports it */
 7705#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
 7706		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
 7707			ret = -EINVAL;
 7708			break;
 7709		}
 7710#endif
 7711		if (tr->allocated_snapshot)
 7712			ret = resize_buffer_duplicate_size(&tr->max_buffer,
 7713					&tr->array_buffer, iter->cpu_file);
 7714		else
 7715			ret = tracing_alloc_snapshot_instance(tr);
 7716		if (ret < 0)
 7717			break;
 
 7718		/* Now, we're going to swap */
 7719		if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
 7720			local_irq_disable();
 7721			update_max_tr(tr, current, smp_processor_id(), NULL);
 7722			local_irq_enable();
 7723		} else {
 7724			smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
 7725						 (void *)tr, 1);
 7726		}
 7727		break;
 7728	default:
 7729		if (tr->allocated_snapshot) {
 7730			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 7731				tracing_reset_online_cpus(&tr->max_buffer);
 7732			else
 7733				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
 7734		}
 7735		break;
 7736	}
 7737
 7738	if (ret >= 0) {
 7739		*ppos += cnt;
 7740		ret = cnt;
 7741	}
 7742out:
 7743	mutex_unlock(&trace_types_lock);
 7744	return ret;
 7745}
 7746
 7747static int tracing_snapshot_release(struct inode *inode, struct file *file)
 7748{
 7749	struct seq_file *m = file->private_data;
 7750	int ret;
 7751
 7752	ret = tracing_release(inode, file);
 7753
 7754	if (file->f_mode & FMODE_READ)
 7755		return ret;
 7756
 7757	/* If write only, the seq_file is just a stub */
 7758	if (m)
 7759		kfree(m->private);
 7760	kfree(m);
 7761
 7762	return 0;
 7763}
 7764
 7765static int tracing_buffers_open(struct inode *inode, struct file *filp);
 7766static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
 7767				    size_t count, loff_t *ppos);
 7768static int tracing_buffers_release(struct inode *inode, struct file *file);
 7769static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 7770		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
 7771
 7772static int snapshot_raw_open(struct inode *inode, struct file *filp)
 7773{
 7774	struct ftrace_buffer_info *info;
 7775	int ret;
 7776
 7777	/* The following checks for tracefs lockdown */
 7778	ret = tracing_buffers_open(inode, filp);
 7779	if (ret < 0)
 7780		return ret;
 7781
 7782	info = filp->private_data;
 7783
 7784	if (info->iter.trace->use_max_tr) {
 7785		tracing_buffers_release(inode, filp);
 7786		return -EBUSY;
 7787	}
 7788
 7789	info->iter.snapshot = true;
 7790	info->iter.array_buffer = &info->iter.tr->max_buffer;
 7791
 7792	return ret;
 7793}
 7794
 7795#endif /* CONFIG_TRACER_SNAPSHOT */
 7796
 7797
 7798static const struct file_operations tracing_thresh_fops = {
 7799	.open		= tracing_open_generic,
 7800	.read		= tracing_thresh_read,
 7801	.write		= tracing_thresh_write,
 7802	.llseek		= generic_file_llseek,
 7803};
 7804
 7805#ifdef CONFIG_TRACER_MAX_TRACE
 7806static const struct file_operations tracing_max_lat_fops = {
 7807	.open		= tracing_open_generic_tr,
 7808	.read		= tracing_max_lat_read,
 7809	.write		= tracing_max_lat_write,
 7810	.llseek		= generic_file_llseek,
 7811	.release	= tracing_release_generic_tr,
 7812};
 7813#endif
 7814
 7815static const struct file_operations set_tracer_fops = {
 7816	.open		= tracing_open_generic_tr,
 7817	.read		= tracing_set_trace_read,
 7818	.write		= tracing_set_trace_write,
 7819	.llseek		= generic_file_llseek,
 7820	.release	= tracing_release_generic_tr,
 7821};
 7822
 7823static const struct file_operations tracing_pipe_fops = {
 7824	.open		= tracing_open_pipe,
 7825	.poll		= tracing_poll_pipe,
 7826	.read		= tracing_read_pipe,
 7827	.splice_read	= tracing_splice_read_pipe,
 7828	.release	= tracing_release_pipe,
 7829	.llseek		= no_llseek,
 7830};
 7831
 7832static const struct file_operations tracing_entries_fops = {
 7833	.open		= tracing_open_generic_tr,
 7834	.read		= tracing_entries_read,
 7835	.write		= tracing_entries_write,
 7836	.llseek		= generic_file_llseek,
 7837	.release	= tracing_release_generic_tr,
 7838};
 7839
 7840static const struct file_operations tracing_total_entries_fops = {
 7841	.open		= tracing_open_generic_tr,
 7842	.read		= tracing_total_entries_read,
 7843	.llseek		= generic_file_llseek,
 7844	.release	= tracing_release_generic_tr,
 7845};
 7846
 7847static const struct file_operations tracing_free_buffer_fops = {
 7848	.open		= tracing_open_generic_tr,
 7849	.write		= tracing_free_buffer_write,
 7850	.release	= tracing_free_buffer_release,
 7851};
 7852
 7853static const struct file_operations tracing_mark_fops = {
 7854	.open		= tracing_mark_open,
 7855	.write		= tracing_mark_write,
 
 7856	.release	= tracing_release_generic_tr,
 7857};
 7858
 7859static const struct file_operations tracing_mark_raw_fops = {
 7860	.open		= tracing_mark_open,
 7861	.write		= tracing_mark_raw_write,
 
 7862	.release	= tracing_release_generic_tr,
 7863};
 7864
 7865static const struct file_operations trace_clock_fops = {
 7866	.open		= tracing_clock_open,
 7867	.read		= seq_read,
 7868	.llseek		= seq_lseek,
 7869	.release	= tracing_single_release_tr,
 7870	.write		= tracing_clock_write,
 7871};
 7872
 7873static const struct file_operations trace_time_stamp_mode_fops = {
 7874	.open		= tracing_time_stamp_mode_open,
 7875	.read		= seq_read,
 7876	.llseek		= seq_lseek,
 7877	.release	= tracing_single_release_tr,
 7878};
 7879
 7880#ifdef CONFIG_TRACER_SNAPSHOT
 7881static const struct file_operations snapshot_fops = {
 7882	.open		= tracing_snapshot_open,
 7883	.read		= seq_read,
 7884	.write		= tracing_snapshot_write,
 7885	.llseek		= tracing_lseek,
 7886	.release	= tracing_snapshot_release,
 7887};
 7888
 7889static const struct file_operations snapshot_raw_fops = {
 7890	.open		= snapshot_raw_open,
 7891	.read		= tracing_buffers_read,
 7892	.release	= tracing_buffers_release,
 7893	.splice_read	= tracing_buffers_splice_read,
 7894	.llseek		= no_llseek,
 7895};
 7896
 7897#endif /* CONFIG_TRACER_SNAPSHOT */
 7898
 7899/*
 7900 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
 7901 * @filp: The active open file structure
 7902 * @ubuf: The userspace provided buffer to read value into
 7903 * @cnt: The maximum number of bytes to read
 7904 * @ppos: The current "file" position
 7905 *
 7906 * This function implements the write interface for a struct trace_min_max_param.
 7907 * The filp->private_data must point to a trace_min_max_param structure that
 7908 * defines where to write the value, the min and the max acceptable values,
 7909 * and a lock to protect the write.
 7910 */
 7911static ssize_t
 7912trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
 7913{
 7914	struct trace_min_max_param *param = filp->private_data;
 7915	u64 val;
 7916	int err;
 7917
 7918	if (!param)
 7919		return -EFAULT;
 7920
 7921	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
 7922	if (err)
 7923		return err;
 7924
 7925	if (param->lock)
 7926		mutex_lock(param->lock);
 7927
 7928	if (param->min && val < *param->min)
 7929		err = -EINVAL;
 7930
 7931	if (param->max && val > *param->max)
 7932		err = -EINVAL;
 7933
 7934	if (!err)
 7935		*param->val = val;
 7936
 7937	if (param->lock)
 7938		mutex_unlock(param->lock);
 7939
 7940	if (err)
 7941		return err;
 7942
 7943	return cnt;
 7944}
 7945
 7946/*
 7947 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
 7948 * @filp: The active open file structure
 7949 * @ubuf: The userspace provided buffer to read value into
 7950 * @cnt: The maximum number of bytes to read
 7951 * @ppos: The current "file" position
 7952 *
 7953 * This function implements the read interface for a struct trace_min_max_param.
 7954 * The filp->private_data must point to a trace_min_max_param struct with valid
 7955 * data.
 7956 */
 7957static ssize_t
 7958trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 7959{
 7960	struct trace_min_max_param *param = filp->private_data;
 7961	char buf[U64_STR_SIZE];
 7962	int len;
 7963	u64 val;
 7964
 7965	if (!param)
 7966		return -EFAULT;
 7967
 7968	val = *param->val;
 7969
 7970	if (cnt > sizeof(buf))
 7971		cnt = sizeof(buf);
 7972
 7973	len = snprintf(buf, sizeof(buf), "%llu\n", val);
 7974
 7975	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
 7976}
 7977
 7978const struct file_operations trace_min_max_fops = {
 7979	.open		= tracing_open_generic,
 7980	.read		= trace_min_max_read,
 7981	.write		= trace_min_max_write,
 7982};
 7983
 7984#define TRACING_LOG_ERRS_MAX	8
 7985#define TRACING_LOG_LOC_MAX	128
 7986
 7987#define CMD_PREFIX "  Command: "
 7988
 7989struct err_info {
 7990	const char	**errs;	/* ptr to loc-specific array of err strings */
 7991	u8		type;	/* index into errs -> specific err string */
 7992	u16		pos;	/* caret position */
 7993	u64		ts;
 7994};
 7995
 7996struct tracing_log_err {
 7997	struct list_head	list;
 7998	struct err_info		info;
 7999	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
 8000	char			*cmd;                     /* what caused err */
 8001};
 8002
 8003static DEFINE_MUTEX(tracing_err_log_lock);
 8004
 8005static struct tracing_log_err *alloc_tracing_log_err(int len)
 8006{
 8007	struct tracing_log_err *err;
 8008
 8009	err = kzalloc(sizeof(*err), GFP_KERNEL);
 8010	if (!err)
 8011		return ERR_PTR(-ENOMEM);
 8012
 8013	err->cmd = kzalloc(len, GFP_KERNEL);
 8014	if (!err->cmd) {
 8015		kfree(err);
 8016		return ERR_PTR(-ENOMEM);
 8017	}
 8018
 8019	return err;
 8020}
 8021
 8022static void free_tracing_log_err(struct tracing_log_err *err)
 8023{
 8024	kfree(err->cmd);
 8025	kfree(err);
 8026}
 8027
 8028static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
 8029						   int len)
 8030{
 8031	struct tracing_log_err *err;
 8032	char *cmd;
 8033
 8034	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
 8035		err = alloc_tracing_log_err(len);
 8036		if (PTR_ERR(err) != -ENOMEM)
 8037			tr->n_err_log_entries++;
 
 8038
 8039		return err;
 8040	}
 8041	cmd = kzalloc(len, GFP_KERNEL);
 8042	if (!cmd)
 8043		return ERR_PTR(-ENOMEM);
 8044	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
 8045	kfree(err->cmd);
 8046	err->cmd = cmd;
 8047	list_del(&err->list);
 8048
 8049	return err;
 8050}
 8051
 8052/**
 8053 * err_pos - find the position of a string within a command for error careting
 8054 * @cmd: The tracing command that caused the error
 8055 * @str: The string to position the caret at within @cmd
 8056 *
 8057 * Finds the position of the first occurrence of @str within @cmd.  The
 8058 * return value can be passed to tracing_log_err() for caret placement
 8059 * within @cmd.
 8060 *
 8061 * Returns the index within @cmd of the first occurrence of @str or 0
 8062 * if @str was not found.
 8063 */
 8064unsigned int err_pos(char *cmd, const char *str)
 8065{
 8066	char *found;
 8067
 8068	if (WARN_ON(!strlen(cmd)))
 8069		return 0;
 8070
 8071	found = strstr(cmd, str);
 8072	if (found)
 8073		return found - cmd;
 8074
 8075	return 0;
 8076}
 8077
 8078/**
 8079 * tracing_log_err - write an error to the tracing error log
 8080 * @tr: The associated trace array for the error (NULL for top level array)
 8081 * @loc: A string describing where the error occurred
 8082 * @cmd: The tracing command that caused the error
 8083 * @errs: The array of loc-specific static error strings
 8084 * @type: The index into errs[], which produces the specific static err string
 8085 * @pos: The position the caret should be placed in the cmd
 8086 *
 8087 * Writes an error into tracing/error_log of the form:
 8088 *
 8089 * <loc>: error: <text>
 8090 *   Command: <cmd>
 8091 *              ^
 8092 *
 8093 * tracing/error_log is a small log file containing the last
 8094 * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
 8095 * unless there has been a tracing error, and the error log can be
 8096 * cleared and have its memory freed by writing the empty string in
 8097 * truncation mode to it i.e. echo > tracing/error_log.
 8098 *
 8099 * NOTE: the @errs array along with the @type param are used to
 8100 * produce a static error string - this string is not copied and saved
 8101 * when the error is logged - only a pointer to it is saved.  See
 8102 * existing callers for examples of how static strings are typically
 8103 * defined for use with tracing_log_err().
 8104 */
 8105void tracing_log_err(struct trace_array *tr,
 8106		     const char *loc, const char *cmd,
 8107		     const char **errs, u8 type, u16 pos)
 8108{
 8109	struct tracing_log_err *err;
 8110	int len = 0;
 8111
 8112	if (!tr)
 8113		tr = &global_trace;
 8114
 8115	len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
 8116
 8117	mutex_lock(&tracing_err_log_lock);
 8118	err = get_tracing_log_err(tr, len);
 8119	if (PTR_ERR(err) == -ENOMEM) {
 8120		mutex_unlock(&tracing_err_log_lock);
 8121		return;
 8122	}
 8123
 8124	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
 8125	snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
 8126
 8127	err->info.errs = errs;
 8128	err->info.type = type;
 8129	err->info.pos = pos;
 8130	err->info.ts = local_clock();
 8131
 8132	list_add_tail(&err->list, &tr->err_log);
 8133	mutex_unlock(&tracing_err_log_lock);
 8134}
 8135
 8136static void clear_tracing_err_log(struct trace_array *tr)
 8137{
 8138	struct tracing_log_err *err, *next;
 8139
 8140	mutex_lock(&tracing_err_log_lock);
 8141	list_for_each_entry_safe(err, next, &tr->err_log, list) {
 8142		list_del(&err->list);
 8143		free_tracing_log_err(err);
 8144	}
 8145
 8146	tr->n_err_log_entries = 0;
 8147	mutex_unlock(&tracing_err_log_lock);
 8148}
 8149
 8150static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
 8151{
 8152	struct trace_array *tr = m->private;
 8153
 8154	mutex_lock(&tracing_err_log_lock);
 8155
 8156	return seq_list_start(&tr->err_log, *pos);
 8157}
 8158
 8159static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
 8160{
 8161	struct trace_array *tr = m->private;
 8162
 8163	return seq_list_next(v, &tr->err_log, pos);
 8164}
 8165
 8166static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
 8167{
 8168	mutex_unlock(&tracing_err_log_lock);
 8169}
 8170
 8171static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
 8172{
 8173	u16 i;
 8174
 8175	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
 8176		seq_putc(m, ' ');
 8177	for (i = 0; i < pos; i++)
 8178		seq_putc(m, ' ');
 8179	seq_puts(m, "^\n");
 8180}
 8181
 8182static int tracing_err_log_seq_show(struct seq_file *m, void *v)
 8183{
 8184	struct tracing_log_err *err = v;
 8185
 8186	if (err) {
 8187		const char *err_text = err->info.errs[err->info.type];
 8188		u64 sec = err->info.ts;
 8189		u32 nsec;
 8190
 8191		nsec = do_div(sec, NSEC_PER_SEC);
 8192		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
 8193			   err->loc, err_text);
 8194		seq_printf(m, "%s", err->cmd);
 8195		tracing_err_log_show_pos(m, err->info.pos);
 8196	}
 8197
 8198	return 0;
 8199}
 8200
 8201static const struct seq_operations tracing_err_log_seq_ops = {
 8202	.start  = tracing_err_log_seq_start,
 8203	.next   = tracing_err_log_seq_next,
 8204	.stop   = tracing_err_log_seq_stop,
 8205	.show   = tracing_err_log_seq_show
 8206};
 8207
 8208static int tracing_err_log_open(struct inode *inode, struct file *file)
 8209{
 8210	struct trace_array *tr = inode->i_private;
 8211	int ret = 0;
 8212
 8213	ret = tracing_check_open_get_tr(tr);
 8214	if (ret)
 8215		return ret;
 8216
 8217	/* If this file was opened for write, then erase contents */
 8218	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
 8219		clear_tracing_err_log(tr);
 8220
 8221	if (file->f_mode & FMODE_READ) {
 8222		ret = seq_open(file, &tracing_err_log_seq_ops);
 8223		if (!ret) {
 8224			struct seq_file *m = file->private_data;
 8225			m->private = tr;
 8226		} else {
 8227			trace_array_put(tr);
 8228		}
 8229	}
 8230	return ret;
 8231}
 8232
 8233static ssize_t tracing_err_log_write(struct file *file,
 8234				     const char __user *buffer,
 8235				     size_t count, loff_t *ppos)
 8236{
 8237	return count;
 8238}
 8239
 8240static int tracing_err_log_release(struct inode *inode, struct file *file)
 8241{
 8242	struct trace_array *tr = inode->i_private;
 8243
 8244	trace_array_put(tr);
 8245
 8246	if (file->f_mode & FMODE_READ)
 8247		seq_release(inode, file);
 8248
 8249	return 0;
 8250}
 8251
 8252static const struct file_operations tracing_err_log_fops = {
 8253	.open           = tracing_err_log_open,
 8254	.write		= tracing_err_log_write,
 8255	.read           = seq_read,
 8256	.llseek         = tracing_lseek,
 8257	.release        = tracing_err_log_release,
 8258};
 8259
 8260static int tracing_buffers_open(struct inode *inode, struct file *filp)
 8261{
 8262	struct trace_array *tr = inode->i_private;
 8263	struct ftrace_buffer_info *info;
 8264	int ret;
 8265
 8266	ret = tracing_check_open_get_tr(tr);
 8267	if (ret)
 8268		return ret;
 8269
 8270	info = kvzalloc(sizeof(*info), GFP_KERNEL);
 8271	if (!info) {
 8272		trace_array_put(tr);
 8273		return -ENOMEM;
 8274	}
 8275
 8276	mutex_lock(&trace_types_lock);
 8277
 8278	info->iter.tr		= tr;
 8279	info->iter.cpu_file	= tracing_get_cpu(inode);
 8280	info->iter.trace	= tr->current_trace;
 8281	info->iter.array_buffer = &tr->array_buffer;
 8282	info->spare		= NULL;
 8283	/* Force reading ring buffer for first read */
 8284	info->read		= (unsigned int)-1;
 8285
 8286	filp->private_data = info;
 8287
 8288	tr->trace_ref++;
 8289
 8290	mutex_unlock(&trace_types_lock);
 8291
 8292	ret = nonseekable_open(inode, filp);
 8293	if (ret < 0)
 8294		trace_array_put(tr);
 8295
 8296	return ret;
 8297}
 8298
 8299static __poll_t
 8300tracing_buffers_poll(struct file *filp, poll_table *poll_table)
 8301{
 8302	struct ftrace_buffer_info *info = filp->private_data;
 8303	struct trace_iterator *iter = &info->iter;
 8304
 8305	return trace_poll(iter, filp, poll_table);
 8306}
 8307
 8308static ssize_t
 8309tracing_buffers_read(struct file *filp, char __user *ubuf,
 8310		     size_t count, loff_t *ppos)
 8311{
 8312	struct ftrace_buffer_info *info = filp->private_data;
 8313	struct trace_iterator *iter = &info->iter;
 8314	void *trace_data;
 8315	int page_size;
 8316	ssize_t ret = 0;
 8317	ssize_t size;
 8318
 8319	if (!count)
 8320		return 0;
 8321
 8322#ifdef CONFIG_TRACER_MAX_TRACE
 8323	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
 8324		return -EBUSY;
 8325#endif
 8326
 8327	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
 8328
 8329	/* Make sure the spare matches the current sub buffer size */
 8330	if (info->spare) {
 8331		if (page_size != info->spare_size) {
 8332			ring_buffer_free_read_page(iter->array_buffer->buffer,
 8333						   info->spare_cpu, info->spare);
 8334			info->spare = NULL;
 8335		}
 8336	}
 8337
 8338	if (!info->spare) {
 8339		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
 8340							  iter->cpu_file);
 8341		if (IS_ERR(info->spare)) {
 8342			ret = PTR_ERR(info->spare);
 8343			info->spare = NULL;
 8344		} else {
 8345			info->spare_cpu = iter->cpu_file;
 8346			info->spare_size = page_size;
 8347		}
 8348	}
 8349	if (!info->spare)
 8350		return ret;
 8351
 8352	/* Do we have previous read data to read? */
 8353	if (info->read < page_size)
 8354		goto read;
 8355
 8356 again:
 8357	trace_access_lock(iter->cpu_file);
 8358	ret = ring_buffer_read_page(iter->array_buffer->buffer,
 8359				    info->spare,
 8360				    count,
 8361				    iter->cpu_file, 0);
 8362	trace_access_unlock(iter->cpu_file);
 8363
 8364	if (ret < 0) {
 8365		if (trace_empty(iter)) {
 8366			if ((filp->f_flags & O_NONBLOCK))
 8367				return -EAGAIN;
 8368
 8369			ret = wait_on_pipe(iter, 0);
 8370			if (ret)
 8371				return ret;
 8372
 8373			goto again;
 8374		}
 8375		return 0;
 8376	}
 8377
 8378	info->read = 0;
 8379 read:
 8380	size = page_size - info->read;
 8381	if (size > count)
 8382		size = count;
 8383	trace_data = ring_buffer_read_page_data(info->spare);
 8384	ret = copy_to_user(ubuf, trace_data + info->read, size);
 8385	if (ret == size)
 8386		return -EFAULT;
 8387
 8388	size -= ret;
 8389
 8390	*ppos += size;
 8391	info->read += size;
 8392
 8393	return size;
 8394}
 8395
 8396static int tracing_buffers_flush(struct file *file, fl_owner_t id)
 8397{
 8398	struct ftrace_buffer_info *info = file->private_data;
 8399	struct trace_iterator *iter = &info->iter;
 8400
 8401	iter->wait_index++;
 8402	/* Make sure the waiters see the new wait_index */
 8403	smp_wmb();
 8404
 8405	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
 8406
 8407	return 0;
 8408}
 8409
 8410static int tracing_buffers_release(struct inode *inode, struct file *file)
 8411{
 8412	struct ftrace_buffer_info *info = file->private_data;
 8413	struct trace_iterator *iter = &info->iter;
 8414
 8415	mutex_lock(&trace_types_lock);
 8416
 8417	iter->tr->trace_ref--;
 8418
 8419	__trace_array_put(iter->tr);
 8420
 8421	if (info->spare)
 8422		ring_buffer_free_read_page(iter->array_buffer->buffer,
 8423					   info->spare_cpu, info->spare);
 8424	kvfree(info);
 8425
 8426	mutex_unlock(&trace_types_lock);
 8427
 8428	return 0;
 8429}
 8430
 8431struct buffer_ref {
 8432	struct trace_buffer	*buffer;
 8433	void			*page;
 8434	int			cpu;
 8435	refcount_t		refcount;
 8436};
 8437
 8438static void buffer_ref_release(struct buffer_ref *ref)
 8439{
 8440	if (!refcount_dec_and_test(&ref->refcount))
 8441		return;
 8442	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
 8443	kfree(ref);
 8444}
 8445
 8446static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
 8447				    struct pipe_buffer *buf)
 8448{
 8449	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 8450
 8451	buffer_ref_release(ref);
 8452	buf->private = 0;
 8453}
 8454
 8455static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
 8456				struct pipe_buffer *buf)
 8457{
 8458	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 8459
 8460	if (refcount_read(&ref->refcount) > INT_MAX/2)
 8461		return false;
 8462
 8463	refcount_inc(&ref->refcount);
 8464	return true;
 8465}
 8466
 8467/* Pipe buffer operations for a buffer. */
 8468static const struct pipe_buf_operations buffer_pipe_buf_ops = {
 
 8469	.release		= buffer_pipe_buf_release,
 
 8470	.get			= buffer_pipe_buf_get,
 8471};
 8472
 8473/*
 8474 * Callback from splice_to_pipe(), if we need to release some pages
 8475 * at the end of the spd in case we error'ed out in filling the pipe.
 8476 */
 8477static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
 8478{
 8479	struct buffer_ref *ref =
 8480		(struct buffer_ref *)spd->partial[i].private;
 8481
 8482	buffer_ref_release(ref);
 8483	spd->partial[i].private = 0;
 8484}
 8485
 8486static ssize_t
 8487tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 8488			    struct pipe_inode_info *pipe, size_t len,
 8489			    unsigned int flags)
 8490{
 8491	struct ftrace_buffer_info *info = file->private_data;
 8492	struct trace_iterator *iter = &info->iter;
 8493	struct partial_page partial_def[PIPE_DEF_BUFFERS];
 8494	struct page *pages_def[PIPE_DEF_BUFFERS];
 8495	struct splice_pipe_desc spd = {
 8496		.pages		= pages_def,
 8497		.partial	= partial_def,
 8498		.nr_pages_max	= PIPE_DEF_BUFFERS,
 8499		.ops		= &buffer_pipe_buf_ops,
 8500		.spd_release	= buffer_spd_release,
 8501	};
 8502	struct buffer_ref *ref;
 8503	int page_size;
 8504	int entries, i;
 8505	ssize_t ret = 0;
 8506
 8507#ifdef CONFIG_TRACER_MAX_TRACE
 8508	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
 8509		return -EBUSY;
 8510#endif
 8511
 8512	page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer);
 8513	if (*ppos & (page_size - 1))
 8514		return -EINVAL;
 8515
 8516	if (len & (page_size - 1)) {
 8517		if (len < page_size)
 8518			return -EINVAL;
 8519		len &= (~(page_size - 1));
 8520	}
 8521
 8522	if (splice_grow_spd(pipe, &spd))
 8523		return -ENOMEM;
 8524
 8525 again:
 8526	trace_access_lock(iter->cpu_file);
 8527	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 8528
 8529	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) {
 8530		struct page *page;
 8531		int r;
 8532
 8533		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
 8534		if (!ref) {
 8535			ret = -ENOMEM;
 8536			break;
 8537		}
 8538
 8539		refcount_set(&ref->refcount, 1);
 8540		ref->buffer = iter->array_buffer->buffer;
 8541		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
 8542		if (IS_ERR(ref->page)) {
 8543			ret = PTR_ERR(ref->page);
 8544			ref->page = NULL;
 8545			kfree(ref);
 8546			break;
 8547		}
 8548		ref->cpu = iter->cpu_file;
 8549
 8550		r = ring_buffer_read_page(ref->buffer, ref->page,
 8551					  len, iter->cpu_file, 1);
 8552		if (r < 0) {
 8553			ring_buffer_free_read_page(ref->buffer, ref->cpu,
 8554						   ref->page);
 8555			kfree(ref);
 8556			break;
 8557		}
 8558
 8559		page = virt_to_page(ring_buffer_read_page_data(ref->page));
 8560
 8561		spd.pages[i] = page;
 8562		spd.partial[i].len = page_size;
 8563		spd.partial[i].offset = 0;
 8564		spd.partial[i].private = (unsigned long)ref;
 8565		spd.nr_pages++;
 8566		*ppos += page_size;
 8567
 8568		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 8569	}
 8570
 8571	trace_access_unlock(iter->cpu_file);
 8572	spd.nr_pages = i;
 8573
 8574	/* did we read anything? */
 8575	if (!spd.nr_pages) {
 8576		long wait_index;
 8577
 8578		if (ret)
 8579			goto out;
 8580
 8581		ret = -EAGAIN;
 8582		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
 8583			goto out;
 8584
 8585		wait_index = READ_ONCE(iter->wait_index);
 8586
 8587		ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
 8588		if (ret)
 8589			goto out;
 8590
 8591		/* No need to wait after waking up when tracing is off */
 8592		if (!tracer_tracing_is_on(iter->tr))
 8593			goto out;
 8594
 8595		/* Make sure we see the new wait_index */
 8596		smp_rmb();
 8597		if (wait_index != iter->wait_index)
 8598			goto out;
 8599
 8600		goto again;
 8601	}
 8602
 8603	ret = splice_to_pipe(pipe, &spd);
 8604out:
 8605	splice_shrink_spd(&spd);
 8606
 8607	return ret;
 8608}
 8609
 8610/* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
 8611static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 8612{
 8613	struct ftrace_buffer_info *info = file->private_data;
 8614	struct trace_iterator *iter = &info->iter;
 8615
 8616	if (cmd)
 8617		return -ENOIOCTLCMD;
 8618
 8619	mutex_lock(&trace_types_lock);
 8620
 8621	iter->wait_index++;
 8622	/* Make sure the waiters see the new wait_index */
 8623	smp_wmb();
 8624
 8625	ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
 8626
 8627	mutex_unlock(&trace_types_lock);
 8628	return 0;
 8629}
 8630
 8631static const struct file_operations tracing_buffers_fops = {
 8632	.open		= tracing_buffers_open,
 8633	.read		= tracing_buffers_read,
 8634	.poll		= tracing_buffers_poll,
 8635	.release	= tracing_buffers_release,
 8636	.flush		= tracing_buffers_flush,
 8637	.splice_read	= tracing_buffers_splice_read,
 8638	.unlocked_ioctl = tracing_buffers_ioctl,
 8639	.llseek		= no_llseek,
 8640};
 8641
 8642static ssize_t
 8643tracing_stats_read(struct file *filp, char __user *ubuf,
 8644		   size_t count, loff_t *ppos)
 8645{
 8646	struct inode *inode = file_inode(filp);
 8647	struct trace_array *tr = inode->i_private;
 8648	struct array_buffer *trace_buf = &tr->array_buffer;
 8649	int cpu = tracing_get_cpu(inode);
 8650	struct trace_seq *s;
 8651	unsigned long cnt;
 8652	unsigned long long t;
 8653	unsigned long usec_rem;
 8654
 8655	s = kmalloc(sizeof(*s), GFP_KERNEL);
 8656	if (!s)
 8657		return -ENOMEM;
 8658
 8659	trace_seq_init(s);
 8660
 8661	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
 8662	trace_seq_printf(s, "entries: %ld\n", cnt);
 8663
 8664	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
 8665	trace_seq_printf(s, "overrun: %ld\n", cnt);
 8666
 8667	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
 8668	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
 8669
 8670	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
 8671	trace_seq_printf(s, "bytes: %ld\n", cnt);
 8672
 8673	if (trace_clocks[tr->clock_id].in_ns) {
 8674		/* local or global for trace_clock */
 8675		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
 8676		usec_rem = do_div(t, USEC_PER_SEC);
 8677		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
 8678								t, usec_rem);
 8679
 8680		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
 8681		usec_rem = do_div(t, USEC_PER_SEC);
 8682		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
 8683	} else {
 8684		/* counter or tsc mode for trace_clock */
 8685		trace_seq_printf(s, "oldest event ts: %llu\n",
 8686				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
 8687
 8688		trace_seq_printf(s, "now ts: %llu\n",
 8689				ring_buffer_time_stamp(trace_buf->buffer));
 8690	}
 8691
 8692	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
 8693	trace_seq_printf(s, "dropped events: %ld\n", cnt);
 8694
 8695	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
 8696	trace_seq_printf(s, "read events: %ld\n", cnt);
 8697
 8698	count = simple_read_from_buffer(ubuf, count, ppos,
 8699					s->buffer, trace_seq_used(s));
 8700
 8701	kfree(s);
 8702
 8703	return count;
 8704}
 8705
 8706static const struct file_operations tracing_stats_fops = {
 8707	.open		= tracing_open_generic_tr,
 8708	.read		= tracing_stats_read,
 8709	.llseek		= generic_file_llseek,
 8710	.release	= tracing_release_generic_tr,
 8711};
 8712
 8713#ifdef CONFIG_DYNAMIC_FTRACE
 8714
 8715static ssize_t
 8716tracing_read_dyn_info(struct file *filp, char __user *ubuf,
 8717		  size_t cnt, loff_t *ppos)
 8718{
 8719	ssize_t ret;
 8720	char *buf;
 8721	int r;
 8722
 8723	/* 256 should be plenty to hold the amount needed */
 8724	buf = kmalloc(256, GFP_KERNEL);
 8725	if (!buf)
 8726		return -ENOMEM;
 8727
 8728	r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
 8729		      ftrace_update_tot_cnt,
 8730		      ftrace_number_of_pages,
 8731		      ftrace_number_of_groups);
 8732
 8733	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 8734	kfree(buf);
 8735	return ret;
 8736}
 8737
 8738static const struct file_operations tracing_dyn_info_fops = {
 8739	.open		= tracing_open_generic,
 8740	.read		= tracing_read_dyn_info,
 8741	.llseek		= generic_file_llseek,
 8742};
 8743#endif /* CONFIG_DYNAMIC_FTRACE */
 8744
 8745#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
 8746static void
 8747ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
 8748		struct trace_array *tr, struct ftrace_probe_ops *ops,
 8749		void *data)
 8750{
 8751	tracing_snapshot_instance(tr);
 8752}
 8753
 8754static void
 8755ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
 8756		      struct trace_array *tr, struct ftrace_probe_ops *ops,
 8757		      void *data)
 8758{
 8759	struct ftrace_func_mapper *mapper = data;
 8760	long *count = NULL;
 8761
 8762	if (mapper)
 8763		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 8764
 8765	if (count) {
 8766
 8767		if (*count <= 0)
 8768			return;
 8769
 8770		(*count)--;
 8771	}
 8772
 8773	tracing_snapshot_instance(tr);
 8774}
 8775
 8776static int
 8777ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
 8778		      struct ftrace_probe_ops *ops, void *data)
 8779{
 8780	struct ftrace_func_mapper *mapper = data;
 8781	long *count = NULL;
 8782
 8783	seq_printf(m, "%ps:", (void *)ip);
 8784
 8785	seq_puts(m, "snapshot");
 8786
 8787	if (mapper)
 8788		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 8789
 8790	if (count)
 8791		seq_printf(m, ":count=%ld\n", *count);
 8792	else
 8793		seq_puts(m, ":unlimited\n");
 8794
 8795	return 0;
 8796}
 8797
 8798static int
 8799ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
 8800		     unsigned long ip, void *init_data, void **data)
 8801{
 8802	struct ftrace_func_mapper *mapper = *data;
 8803
 8804	if (!mapper) {
 8805		mapper = allocate_ftrace_func_mapper();
 8806		if (!mapper)
 8807			return -ENOMEM;
 8808		*data = mapper;
 8809	}
 8810
 8811	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
 8812}
 8813
 8814static void
 8815ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
 8816		     unsigned long ip, void *data)
 8817{
 8818	struct ftrace_func_mapper *mapper = data;
 8819
 8820	if (!ip) {
 8821		if (!mapper)
 8822			return;
 8823		free_ftrace_func_mapper(mapper, NULL);
 8824		return;
 8825	}
 8826
 8827	ftrace_func_mapper_remove_ip(mapper, ip);
 8828}
 8829
 8830static struct ftrace_probe_ops snapshot_probe_ops = {
 8831	.func			= ftrace_snapshot,
 8832	.print			= ftrace_snapshot_print,
 8833};
 8834
 8835static struct ftrace_probe_ops snapshot_count_probe_ops = {
 8836	.func			= ftrace_count_snapshot,
 8837	.print			= ftrace_snapshot_print,
 8838	.init			= ftrace_snapshot_init,
 8839	.free			= ftrace_snapshot_free,
 8840};
 8841
 8842static int
 8843ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
 8844			       char *glob, char *cmd, char *param, int enable)
 8845{
 8846	struct ftrace_probe_ops *ops;
 8847	void *count = (void *)-1;
 8848	char *number;
 8849	int ret;
 8850
 8851	if (!tr)
 8852		return -ENODEV;
 8853
 8854	/* hash funcs only work with set_ftrace_filter */
 8855	if (!enable)
 8856		return -EINVAL;
 8857
 8858	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
 8859
 8860	if (glob[0] == '!')
 8861		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
 8862
 8863	if (!param)
 8864		goto out_reg;
 8865
 8866	number = strsep(&param, ":");
 8867
 8868	if (!strlen(number))
 8869		goto out_reg;
 8870
 8871	/*
 8872	 * We use the callback data field (which is a pointer)
 8873	 * as our counter.
 8874	 */
 8875	ret = kstrtoul(number, 0, (unsigned long *)&count);
 8876	if (ret)
 8877		return ret;
 8878
 8879 out_reg:
 8880	ret = tracing_alloc_snapshot_instance(tr);
 8881	if (ret < 0)
 8882		goto out;
 8883
 8884	ret = register_ftrace_function_probe(glob, tr, ops, count);
 8885
 8886 out:
 8887	return ret < 0 ? ret : 0;
 8888}
 8889
 8890static struct ftrace_func_command ftrace_snapshot_cmd = {
 8891	.name			= "snapshot",
 8892	.func			= ftrace_trace_snapshot_callback,
 8893};
 8894
 8895static __init int register_snapshot_cmd(void)
 8896{
 8897	return register_ftrace_command(&ftrace_snapshot_cmd);
 8898}
 8899#else
 8900static inline __init int register_snapshot_cmd(void) { return 0; }
 8901#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
 8902
 8903static struct dentry *tracing_get_dentry(struct trace_array *tr)
 8904{
 8905	if (WARN_ON(!tr->dir))
 8906		return ERR_PTR(-ENODEV);
 8907
 8908	/* Top directory uses NULL as the parent */
 8909	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 8910		return NULL;
 8911
 8912	/* All sub buffers have a descriptor */
 8913	return tr->dir;
 8914}
 8915
 8916static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
 8917{
 8918	struct dentry *d_tracer;
 8919
 8920	if (tr->percpu_dir)
 8921		return tr->percpu_dir;
 8922
 8923	d_tracer = tracing_get_dentry(tr);
 8924	if (IS_ERR(d_tracer))
 8925		return NULL;
 8926
 8927	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
 8928
 8929	MEM_FAIL(!tr->percpu_dir,
 8930		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
 8931
 8932	return tr->percpu_dir;
 8933}
 8934
 8935static struct dentry *
 8936trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
 8937		      void *data, long cpu, const struct file_operations *fops)
 8938{
 8939	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
 8940
 8941	if (ret) /* See tracing_get_cpu() */
 8942		d_inode(ret)->i_cdev = (void *)(cpu + 1);
 8943	return ret;
 8944}
 8945
 8946static void
 8947tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
 8948{
 8949	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
 8950	struct dentry *d_cpu;
 8951	char cpu_dir[30]; /* 30 characters should be more than enough */
 8952
 8953	if (!d_percpu)
 8954		return;
 8955
 8956	snprintf(cpu_dir, 30, "cpu%ld", cpu);
 8957	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
 8958	if (!d_cpu) {
 8959		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
 8960		return;
 8961	}
 8962
 8963	/* per cpu trace_pipe */
 8964	trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
 8965				tr, cpu, &tracing_pipe_fops);
 8966
 8967	/* per cpu trace */
 8968	trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
 8969				tr, cpu, &tracing_fops);
 8970
 8971	trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
 8972				tr, cpu, &tracing_buffers_fops);
 8973
 8974	trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
 8975				tr, cpu, &tracing_stats_fops);
 8976
 8977	trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
 8978				tr, cpu, &tracing_entries_fops);
 8979
 8980#ifdef CONFIG_TRACER_SNAPSHOT
 8981	trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
 8982				tr, cpu, &snapshot_fops);
 8983
 8984	trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
 8985				tr, cpu, &snapshot_raw_fops);
 8986#endif
 8987}
 8988
 8989#ifdef CONFIG_FTRACE_SELFTEST
 8990/* Let selftest have access to static functions in this file */
 8991#include "trace_selftest.c"
 8992#endif
 8993
 8994static ssize_t
 8995trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
 8996			loff_t *ppos)
 8997{
 8998	struct trace_option_dentry *topt = filp->private_data;
 8999	char *buf;
 9000
 9001	if (topt->flags->val & topt->opt->bit)
 9002		buf = "1\n";
 9003	else
 9004		buf = "0\n";
 9005
 9006	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
 9007}
 9008
 9009static ssize_t
 9010trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
 9011			 loff_t *ppos)
 9012{
 9013	struct trace_option_dentry *topt = filp->private_data;
 9014	unsigned long val;
 9015	int ret;
 9016
 9017	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 9018	if (ret)
 9019		return ret;
 9020
 9021	if (val != 0 && val != 1)
 9022		return -EINVAL;
 9023
 9024	if (!!(topt->flags->val & topt->opt->bit) != val) {
 9025		mutex_lock(&trace_types_lock);
 9026		ret = __set_tracer_option(topt->tr, topt->flags,
 9027					  topt->opt, !val);
 9028		mutex_unlock(&trace_types_lock);
 9029		if (ret)
 9030			return ret;
 9031	}
 9032
 9033	*ppos += cnt;
 9034
 9035	return cnt;
 9036}
 9037
 9038static int tracing_open_options(struct inode *inode, struct file *filp)
 9039{
 9040	struct trace_option_dentry *topt = inode->i_private;
 9041	int ret;
 9042
 9043	ret = tracing_check_open_get_tr(topt->tr);
 9044	if (ret)
 9045		return ret;
 9046
 9047	filp->private_data = inode->i_private;
 9048	return 0;
 9049}
 9050
 9051static int tracing_release_options(struct inode *inode, struct file *file)
 9052{
 9053	struct trace_option_dentry *topt = file->private_data;
 9054
 9055	trace_array_put(topt->tr);
 9056	return 0;
 9057}
 9058
 9059static const struct file_operations trace_options_fops = {
 9060	.open = tracing_open_options,
 9061	.read = trace_options_read,
 9062	.write = trace_options_write,
 9063	.llseek	= generic_file_llseek,
 9064	.release = tracing_release_options,
 9065};
 9066
 9067/*
 9068 * In order to pass in both the trace_array descriptor as well as the index
 9069 * to the flag that the trace option file represents, the trace_array
 9070 * has a character array of trace_flags_index[], which holds the index
 9071 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
 9072 * The address of this character array is passed to the flag option file
 9073 * read/write callbacks.
 9074 *
 9075 * In order to extract both the index and the trace_array descriptor,
 9076 * get_tr_index() uses the following algorithm.
 9077 *
 9078 *   idx = *ptr;
 9079 *
 9080 * As the pointer itself contains the address of the index (remember
 9081 * index[1] == 1).
 9082 *
 9083 * Then to get the trace_array descriptor, by subtracting that index
 9084 * from the ptr, we get to the start of the index itself.
 9085 *
 9086 *   ptr - idx == &index[0]
 9087 *
 9088 * Then a simple container_of() from that pointer gets us to the
 9089 * trace_array descriptor.
 9090 */
 9091static void get_tr_index(void *data, struct trace_array **ptr,
 9092			 unsigned int *pindex)
 9093{
 9094	*pindex = *(unsigned char *)data;
 9095
 9096	*ptr = container_of(data - *pindex, struct trace_array,
 9097			    trace_flags_index);
 9098}
 9099
 9100static ssize_t
 9101trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
 9102			loff_t *ppos)
 9103{
 9104	void *tr_index = filp->private_data;
 9105	struct trace_array *tr;
 9106	unsigned int index;
 9107	char *buf;
 9108
 9109	get_tr_index(tr_index, &tr, &index);
 9110
 9111	if (tr->trace_flags & (1 << index))
 9112		buf = "1\n";
 9113	else
 9114		buf = "0\n";
 9115
 9116	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
 9117}
 9118
 9119static ssize_t
 9120trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
 9121			 loff_t *ppos)
 9122{
 9123	void *tr_index = filp->private_data;
 9124	struct trace_array *tr;
 9125	unsigned int index;
 9126	unsigned long val;
 9127	int ret;
 9128
 9129	get_tr_index(tr_index, &tr, &index);
 9130
 9131	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 9132	if (ret)
 9133		return ret;
 9134
 9135	if (val != 0 && val != 1)
 9136		return -EINVAL;
 9137
 9138	mutex_lock(&event_mutex);
 9139	mutex_lock(&trace_types_lock);
 9140	ret = set_tracer_flag(tr, 1 << index, val);
 9141	mutex_unlock(&trace_types_lock);
 9142	mutex_unlock(&event_mutex);
 9143
 9144	if (ret < 0)
 9145		return ret;
 9146
 9147	*ppos += cnt;
 9148
 9149	return cnt;
 9150}
 9151
 9152static const struct file_operations trace_options_core_fops = {
 9153	.open = tracing_open_generic,
 9154	.read = trace_options_core_read,
 9155	.write = trace_options_core_write,
 9156	.llseek = generic_file_llseek,
 9157};
 9158
 9159struct dentry *trace_create_file(const char *name,
 9160				 umode_t mode,
 9161				 struct dentry *parent,
 9162				 void *data,
 9163				 const struct file_operations *fops)
 9164{
 9165	struct dentry *ret;
 9166
 9167	ret = tracefs_create_file(name, mode, parent, data, fops);
 9168	if (!ret)
 9169		pr_warn("Could not create tracefs '%s' entry\n", name);
 9170
 9171	return ret;
 9172}
 9173
 9174
 9175static struct dentry *trace_options_init_dentry(struct trace_array *tr)
 9176{
 9177	struct dentry *d_tracer;
 9178
 9179	if (tr->options)
 9180		return tr->options;
 9181
 9182	d_tracer = tracing_get_dentry(tr);
 9183	if (IS_ERR(d_tracer))
 9184		return NULL;
 9185
 9186	tr->options = tracefs_create_dir("options", d_tracer);
 9187	if (!tr->options) {
 9188		pr_warn("Could not create tracefs directory 'options'\n");
 9189		return NULL;
 9190	}
 9191
 9192	return tr->options;
 9193}
 9194
 9195static void
 9196create_trace_option_file(struct trace_array *tr,
 9197			 struct trace_option_dentry *topt,
 9198			 struct tracer_flags *flags,
 9199			 struct tracer_opt *opt)
 9200{
 9201	struct dentry *t_options;
 9202
 9203	t_options = trace_options_init_dentry(tr);
 9204	if (!t_options)
 9205		return;
 9206
 9207	topt->flags = flags;
 9208	topt->opt = opt;
 9209	topt->tr = tr;
 9210
 9211	topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
 9212					t_options, topt, &trace_options_fops);
 9213
 9214}
 9215
 9216static void
 9217create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
 9218{
 9219	struct trace_option_dentry *topts;
 9220	struct trace_options *tr_topts;
 9221	struct tracer_flags *flags;
 9222	struct tracer_opt *opts;
 9223	int cnt;
 9224	int i;
 9225
 9226	if (!tracer)
 9227		return;
 9228
 9229	flags = tracer->flags;
 9230
 9231	if (!flags || !flags->opts)
 9232		return;
 9233
 9234	/*
 9235	 * If this is an instance, only create flags for tracers
 9236	 * the instance may have.
 9237	 */
 9238	if (!trace_ok_for_array(tracer, tr))
 9239		return;
 9240
 9241	for (i = 0; i < tr->nr_topts; i++) {
 9242		/* Make sure there's no duplicate flags. */
 9243		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
 9244			return;
 9245	}
 9246
 9247	opts = flags->opts;
 9248
 9249	for (cnt = 0; opts[cnt].name; cnt++)
 9250		;
 9251
 9252	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
 9253	if (!topts)
 9254		return;
 9255
 9256	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
 9257			    GFP_KERNEL);
 9258	if (!tr_topts) {
 9259		kfree(topts);
 9260		return;
 9261	}
 9262
 9263	tr->topts = tr_topts;
 9264	tr->topts[tr->nr_topts].tracer = tracer;
 9265	tr->topts[tr->nr_topts].topts = topts;
 9266	tr->nr_topts++;
 9267
 9268	for (cnt = 0; opts[cnt].name; cnt++) {
 9269		create_trace_option_file(tr, &topts[cnt], flags,
 9270					 &opts[cnt]);
 9271		MEM_FAIL(topts[cnt].entry == NULL,
 9272			  "Failed to create trace option: %s",
 9273			  opts[cnt].name);
 9274	}
 9275}
 9276
 9277static struct dentry *
 9278create_trace_option_core_file(struct trace_array *tr,
 9279			      const char *option, long index)
 9280{
 9281	struct dentry *t_options;
 9282
 9283	t_options = trace_options_init_dentry(tr);
 9284	if (!t_options)
 9285		return NULL;
 9286
 9287	return trace_create_file(option, TRACE_MODE_WRITE, t_options,
 9288				 (void *)&tr->trace_flags_index[index],
 9289				 &trace_options_core_fops);
 9290}
 9291
 9292static void create_trace_options_dir(struct trace_array *tr)
 9293{
 9294	struct dentry *t_options;
 9295	bool top_level = tr == &global_trace;
 9296	int i;
 9297
 9298	t_options = trace_options_init_dentry(tr);
 9299	if (!t_options)
 9300		return;
 9301
 9302	for (i = 0; trace_options[i]; i++) {
 9303		if (top_level ||
 9304		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
 9305			create_trace_option_core_file(tr, trace_options[i], i);
 9306	}
 9307}
 9308
 9309static ssize_t
 9310rb_simple_read(struct file *filp, char __user *ubuf,
 9311	       size_t cnt, loff_t *ppos)
 9312{
 9313	struct trace_array *tr = filp->private_data;
 9314	char buf[64];
 9315	int r;
 9316
 9317	r = tracer_tracing_is_on(tr);
 9318	r = sprintf(buf, "%d\n", r);
 9319
 9320	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 9321}
 9322
 9323static ssize_t
 9324rb_simple_write(struct file *filp, const char __user *ubuf,
 9325		size_t cnt, loff_t *ppos)
 9326{
 9327	struct trace_array *tr = filp->private_data;
 9328	struct trace_buffer *buffer = tr->array_buffer.buffer;
 9329	unsigned long val;
 9330	int ret;
 9331
 9332	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 9333	if (ret)
 9334		return ret;
 9335
 9336	if (buffer) {
 9337		mutex_lock(&trace_types_lock);
 9338		if (!!val == tracer_tracing_is_on(tr)) {
 9339			val = 0; /* do nothing */
 9340		} else if (val) {
 9341			tracer_tracing_on(tr);
 9342			if (tr->current_trace->start)
 9343				tr->current_trace->start(tr);
 9344		} else {
 9345			tracer_tracing_off(tr);
 9346			if (tr->current_trace->stop)
 9347				tr->current_trace->stop(tr);
 9348			/* Wake up any waiters */
 9349			ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
 9350		}
 9351		mutex_unlock(&trace_types_lock);
 9352	}
 9353
 9354	(*ppos)++;
 9355
 9356	return cnt;
 9357}
 9358
 9359static const struct file_operations rb_simple_fops = {
 9360	.open		= tracing_open_generic_tr,
 9361	.read		= rb_simple_read,
 9362	.write		= rb_simple_write,
 9363	.release	= tracing_release_generic_tr,
 9364	.llseek		= default_llseek,
 9365};
 9366
 9367static ssize_t
 9368buffer_percent_read(struct file *filp, char __user *ubuf,
 9369		    size_t cnt, loff_t *ppos)
 9370{
 9371	struct trace_array *tr = filp->private_data;
 9372	char buf[64];
 9373	int r;
 9374
 9375	r = tr->buffer_percent;
 9376	r = sprintf(buf, "%d\n", r);
 9377
 9378	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 9379}
 9380
 9381static ssize_t
 9382buffer_percent_write(struct file *filp, const char __user *ubuf,
 9383		     size_t cnt, loff_t *ppos)
 9384{
 9385	struct trace_array *tr = filp->private_data;
 9386	unsigned long val;
 9387	int ret;
 9388
 9389	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 9390	if (ret)
 9391		return ret;
 9392
 9393	if (val > 100)
 9394		return -EINVAL;
 9395
 
 
 
 9396	tr->buffer_percent = val;
 9397
 9398	(*ppos)++;
 9399
 9400	return cnt;
 9401}
 9402
 9403static const struct file_operations buffer_percent_fops = {
 9404	.open		= tracing_open_generic_tr,
 9405	.read		= buffer_percent_read,
 9406	.write		= buffer_percent_write,
 9407	.release	= tracing_release_generic_tr,
 9408	.llseek		= default_llseek,
 9409};
 9410
 9411static ssize_t
 9412buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 9413{
 9414	struct trace_array *tr = filp->private_data;
 9415	size_t size;
 9416	char buf[64];
 9417	int order;
 9418	int r;
 9419
 9420	order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
 9421	size = (PAGE_SIZE << order) / 1024;
 9422
 9423	r = sprintf(buf, "%zd\n", size);
 9424
 9425	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 9426}
 9427
 9428static ssize_t
 9429buffer_subbuf_size_write(struct file *filp, const char __user *ubuf,
 9430			 size_t cnt, loff_t *ppos)
 9431{
 9432	struct trace_array *tr = filp->private_data;
 9433	unsigned long val;
 9434	int old_order;
 9435	int order;
 9436	int pages;
 9437	int ret;
 9438
 9439	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 9440	if (ret)
 9441		return ret;
 9442
 9443	val *= 1024; /* value passed in is in KB */
 9444
 9445	pages = DIV_ROUND_UP(val, PAGE_SIZE);
 9446	order = fls(pages - 1);
 9447
 9448	/* limit between 1 and 128 system pages */
 9449	if (order < 0 || order > 7)
 9450		return -EINVAL;
 9451
 9452	/* Do not allow tracing while changing the order of the ring buffer */
 9453	tracing_stop_tr(tr);
 9454
 9455	old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer);
 9456	if (old_order == order)
 9457		goto out;
 9458
 9459	ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order);
 9460	if (ret)
 9461		goto out;
 9462
 9463#ifdef CONFIG_TRACER_MAX_TRACE
 9464
 9465	if (!tr->allocated_snapshot)
 9466		goto out_max;
 9467
 9468	ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order);
 9469	if (ret) {
 9470		/* Put back the old order */
 9471		cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order);
 9472		if (WARN_ON_ONCE(cnt)) {
 9473			/*
 9474			 * AARGH! We are left with different orders!
 9475			 * The max buffer is our "snapshot" buffer.
 9476			 * When a tracer needs a snapshot (one of the
 9477			 * latency tracers), it swaps the max buffer
 9478			 * with the saved snap shot. We succeeded to
 9479			 * update the order of the main buffer, but failed to
 9480			 * update the order of the max buffer. But when we tried
 9481			 * to reset the main buffer to the original size, we
 9482			 * failed there too. This is very unlikely to
 9483			 * happen, but if it does, warn and kill all
 9484			 * tracing.
 9485			 */
 9486			tracing_disabled = 1;
 9487		}
 9488		goto out;
 9489	}
 9490 out_max:
 9491#endif
 9492	(*ppos)++;
 9493 out:
 9494	if (ret)
 9495		cnt = ret;
 9496	tracing_start_tr(tr);
 9497	return cnt;
 9498}
 9499
 9500static const struct file_operations buffer_subbuf_size_fops = {
 9501	.open		= tracing_open_generic_tr,
 9502	.read		= buffer_subbuf_size_read,
 9503	.write		= buffer_subbuf_size_write,
 9504	.release	= tracing_release_generic_tr,
 9505	.llseek		= default_llseek,
 9506};
 9507
 9508static struct dentry *trace_instance_dir;
 9509
 9510static void
 9511init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 9512
 9513static int
 9514allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
 9515{
 9516	enum ring_buffer_flags rb_flags;
 9517
 9518	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
 9519
 9520	buf->tr = tr;
 9521
 9522	buf->buffer = ring_buffer_alloc(size, rb_flags);
 9523	if (!buf->buffer)
 9524		return -ENOMEM;
 9525
 9526	buf->data = alloc_percpu(struct trace_array_cpu);
 9527	if (!buf->data) {
 9528		ring_buffer_free(buf->buffer);
 9529		buf->buffer = NULL;
 9530		return -ENOMEM;
 9531	}
 9532
 9533	/* Allocate the first page for all buffers */
 9534	set_buffer_entries(&tr->array_buffer,
 9535			   ring_buffer_size(tr->array_buffer.buffer, 0));
 9536
 9537	return 0;
 9538}
 9539
 9540static void free_trace_buffer(struct array_buffer *buf)
 9541{
 9542	if (buf->buffer) {
 9543		ring_buffer_free(buf->buffer);
 9544		buf->buffer = NULL;
 9545		free_percpu(buf->data);
 9546		buf->data = NULL;
 9547	}
 9548}
 9549
 9550static int allocate_trace_buffers(struct trace_array *tr, int size)
 9551{
 9552	int ret;
 9553
 9554	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
 9555	if (ret)
 9556		return ret;
 9557
 9558#ifdef CONFIG_TRACER_MAX_TRACE
 9559	ret = allocate_trace_buffer(tr, &tr->max_buffer,
 9560				    allocate_snapshot ? size : 1);
 9561	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
 9562		free_trace_buffer(&tr->array_buffer);
 
 
 
 9563		return -ENOMEM;
 9564	}
 9565	tr->allocated_snapshot = allocate_snapshot;
 9566
 
 
 
 
 9567	allocate_snapshot = false;
 9568#endif
 9569
 9570	return 0;
 9571}
 9572
 
 
 
 
 
 
 
 
 
 
 9573static void free_trace_buffers(struct trace_array *tr)
 9574{
 9575	if (!tr)
 9576		return;
 9577
 9578	free_trace_buffer(&tr->array_buffer);
 9579
 9580#ifdef CONFIG_TRACER_MAX_TRACE
 9581	free_trace_buffer(&tr->max_buffer);
 9582#endif
 9583}
 9584
 9585static void init_trace_flags_index(struct trace_array *tr)
 9586{
 9587	int i;
 9588
 9589	/* Used by the trace options files */
 9590	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
 9591		tr->trace_flags_index[i] = i;
 9592}
 9593
 9594static void __update_tracer_options(struct trace_array *tr)
 9595{
 9596	struct tracer *t;
 9597
 9598	for (t = trace_types; t; t = t->next)
 9599		add_tracer_options(tr, t);
 9600}
 9601
 9602static void update_tracer_options(struct trace_array *tr)
 9603{
 9604	mutex_lock(&trace_types_lock);
 9605	tracer_options_updated = true;
 9606	__update_tracer_options(tr);
 9607	mutex_unlock(&trace_types_lock);
 9608}
 9609
 9610/* Must have trace_types_lock held */
 9611struct trace_array *trace_array_find(const char *instance)
 9612{
 9613	struct trace_array *tr, *found = NULL;
 9614
 9615	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9616		if (tr->name && strcmp(tr->name, instance) == 0) {
 9617			found = tr;
 9618			break;
 9619		}
 9620	}
 9621
 9622	return found;
 9623}
 9624
 9625struct trace_array *trace_array_find_get(const char *instance)
 9626{
 9627	struct trace_array *tr;
 9628
 9629	mutex_lock(&trace_types_lock);
 9630	tr = trace_array_find(instance);
 9631	if (tr)
 9632		tr->ref++;
 9633	mutex_unlock(&trace_types_lock);
 9634
 9635	return tr;
 9636}
 9637
 9638static int trace_array_create_dir(struct trace_array *tr)
 9639{
 9640	int ret;
 9641
 9642	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
 9643	if (!tr->dir)
 9644		return -EINVAL;
 9645
 9646	ret = event_trace_add_tracer(tr->dir, tr);
 9647	if (ret) {
 9648		tracefs_remove(tr->dir);
 9649		return ret;
 9650	}
 9651
 9652	init_tracer_tracefs(tr, tr->dir);
 9653	__update_tracer_options(tr);
 9654
 9655	return ret;
 9656}
 9657
 9658static struct trace_array *
 9659trace_array_create_systems(const char *name, const char *systems)
 9660{
 9661	struct trace_array *tr;
 9662	int ret;
 9663
 9664	ret = -ENOMEM;
 9665	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
 9666	if (!tr)
 9667		return ERR_PTR(ret);
 9668
 9669	tr->name = kstrdup(name, GFP_KERNEL);
 9670	if (!tr->name)
 9671		goto out_free_tr;
 9672
 9673	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
 9674		goto out_free_tr;
 9675
 9676	if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
 9677		goto out_free_tr;
 9678
 9679	if (systems) {
 9680		tr->system_names = kstrdup_const(systems, GFP_KERNEL);
 9681		if (!tr->system_names)
 9682			goto out_free_tr;
 9683	}
 9684
 9685	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
 9686
 9687	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
 9688
 9689	raw_spin_lock_init(&tr->start_lock);
 9690
 9691	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 9692
 9693	tr->current_trace = &nop_trace;
 9694
 9695	INIT_LIST_HEAD(&tr->systems);
 9696	INIT_LIST_HEAD(&tr->events);
 9697	INIT_LIST_HEAD(&tr->hist_vars);
 9698	INIT_LIST_HEAD(&tr->err_log);
 9699
 9700	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
 9701		goto out_free_tr;
 9702
 9703	/* The ring buffer is defaultly expanded */
 9704	trace_set_ring_buffer_expanded(tr);
 
 9705
 9706	if (ftrace_allocate_ftrace_ops(tr) < 0)
 
 
 9707		goto out_free_tr;
 
 9708
 9709	ftrace_init_trace_array(tr);
 9710
 
 9711	init_trace_flags_index(tr);
 9712
 9713	if (trace_instance_dir) {
 9714		ret = trace_array_create_dir(tr);
 9715		if (ret)
 9716			goto out_free_tr;
 9717	} else
 9718		__trace_early_add_events(tr);
 9719
 9720	list_add(&tr->list, &ftrace_trace_arrays);
 9721
 9722	tr->ref++;
 
 9723
 9724	return tr;
 9725
 9726 out_free_tr:
 9727	ftrace_free_ftrace_ops(tr);
 9728	free_trace_buffers(tr);
 9729	free_cpumask_var(tr->pipe_cpumask);
 9730	free_cpumask_var(tr->tracing_cpumask);
 9731	kfree_const(tr->system_names);
 9732	kfree(tr->name);
 9733	kfree(tr);
 9734
 9735	return ERR_PTR(ret);
 9736}
 9737
 9738static struct trace_array *trace_array_create(const char *name)
 9739{
 9740	return trace_array_create_systems(name, NULL);
 9741}
 9742
 9743static int instance_mkdir(const char *name)
 9744{
 9745	struct trace_array *tr;
 9746	int ret;
 9747
 9748	mutex_lock(&event_mutex);
 9749	mutex_lock(&trace_types_lock);
 9750
 9751	ret = -EEXIST;
 9752	if (trace_array_find(name))
 9753		goto out_unlock;
 9754
 9755	tr = trace_array_create(name);
 9756
 9757	ret = PTR_ERR_OR_ZERO(tr);
 9758
 9759out_unlock:
 9760	mutex_unlock(&trace_types_lock);
 9761	mutex_unlock(&event_mutex);
 9762	return ret;
 
 9763}
 
 9764
 9765/**
 9766 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
 9767 * @name: The name of the trace array to be looked up/created.
 9768 * @systems: A list of systems to create event directories for (NULL for all)
 9769 *
 9770 * Returns pointer to trace array with given name.
 9771 * NULL, if it cannot be created.
 9772 *
 9773 * NOTE: This function increments the reference counter associated with the
 9774 * trace array returned. This makes sure it cannot be freed while in use.
 9775 * Use trace_array_put() once the trace array is no longer needed.
 9776 * If the trace_array is to be freed, trace_array_destroy() needs to
 9777 * be called after the trace_array_put(), or simply let user space delete
 9778 * it from the tracefs instances directory. But until the
 9779 * trace_array_put() is called, user space can not delete it.
 9780 *
 9781 */
 9782struct trace_array *trace_array_get_by_name(const char *name, const char *systems)
 9783{
 9784	struct trace_array *tr;
 9785
 9786	mutex_lock(&event_mutex);
 9787	mutex_lock(&trace_types_lock);
 9788
 9789	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9790		if (tr->name && strcmp(tr->name, name) == 0)
 9791			goto out_unlock;
 9792	}
 9793
 9794	tr = trace_array_create_systems(name, systems);
 9795
 9796	if (IS_ERR(tr))
 9797		tr = NULL;
 9798out_unlock:
 9799	if (tr)
 9800		tr->ref++;
 9801
 9802	mutex_unlock(&trace_types_lock);
 9803	mutex_unlock(&event_mutex);
 9804	return tr;
 9805}
 9806EXPORT_SYMBOL_GPL(trace_array_get_by_name);
 9807
 9808static int __remove_instance(struct trace_array *tr)
 9809{
 9810	int i;
 9811
 9812	/* Reference counter for a newly created trace array = 1. */
 9813	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
 9814		return -EBUSY;
 9815
 9816	list_del(&tr->list);
 9817
 9818	/* Disable all the flags that were enabled coming in */
 9819	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
 9820		if ((1 << i) & ZEROED_TRACE_FLAGS)
 9821			set_tracer_flag(tr, 1 << i, 0);
 9822	}
 9823
 9824	tracing_set_nop(tr);
 9825	clear_ftrace_function_probes(tr);
 9826	event_trace_del_tracer(tr);
 9827	ftrace_clear_pids(tr);
 9828	ftrace_destroy_function_files(tr);
 9829	tracefs_remove(tr->dir);
 9830	free_percpu(tr->last_func_repeats);
 9831	free_trace_buffers(tr);
 9832	clear_tracing_err_log(tr);
 9833
 9834	for (i = 0; i < tr->nr_topts; i++) {
 9835		kfree(tr->topts[i].topts);
 9836	}
 9837	kfree(tr->topts);
 9838
 9839	free_cpumask_var(tr->pipe_cpumask);
 9840	free_cpumask_var(tr->tracing_cpumask);
 9841	kfree_const(tr->system_names);
 9842	kfree(tr->name);
 9843	kfree(tr);
 
 9844
 9845	return 0;
 9846}
 9847
 9848int trace_array_destroy(struct trace_array *this_tr)
 9849{
 9850	struct trace_array *tr;
 9851	int ret;
 9852
 9853	if (!this_tr)
 9854		return -EINVAL;
 9855
 9856	mutex_lock(&event_mutex);
 9857	mutex_lock(&trace_types_lock);
 9858
 9859	ret = -ENODEV;
 9860
 9861	/* Making sure trace array exists before destroying it. */
 9862	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9863		if (tr == this_tr) {
 9864			ret = __remove_instance(tr);
 9865			break;
 9866		}
 9867	}
 9868
 9869	mutex_unlock(&trace_types_lock);
 9870	mutex_unlock(&event_mutex);
 9871
 9872	return ret;
 9873}
 9874EXPORT_SYMBOL_GPL(trace_array_destroy);
 9875
 9876static int instance_rmdir(const char *name)
 9877{
 9878	struct trace_array *tr;
 9879	int ret;
 9880
 9881	mutex_lock(&event_mutex);
 9882	mutex_lock(&trace_types_lock);
 9883
 9884	ret = -ENODEV;
 9885	tr = trace_array_find(name);
 9886	if (tr)
 9887		ret = __remove_instance(tr);
 
 
 
 9888
 9889	mutex_unlock(&trace_types_lock);
 9890	mutex_unlock(&event_mutex);
 9891
 9892	return ret;
 9893}
 9894
 9895static __init void create_trace_instances(struct dentry *d_tracer)
 9896{
 9897	struct trace_array *tr;
 9898
 9899	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
 9900							 instance_mkdir,
 9901							 instance_rmdir);
 9902	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
 9903		return;
 9904
 9905	mutex_lock(&event_mutex);
 9906	mutex_lock(&trace_types_lock);
 9907
 9908	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9909		if (!tr->name)
 9910			continue;
 9911		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
 9912			     "Failed to create instance directory\n"))
 9913			break;
 9914	}
 9915
 9916	mutex_unlock(&trace_types_lock);
 9917	mutex_unlock(&event_mutex);
 9918}
 9919
 9920static void
 9921init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 9922{
 
 9923	int cpu;
 9924
 9925	trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
 9926			tr, &show_traces_fops);
 9927
 9928	trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
 9929			tr, &set_tracer_fops);
 9930
 9931	trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
 9932			  tr, &tracing_cpumask_fops);
 9933
 9934	trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
 9935			  tr, &tracing_iter_fops);
 9936
 9937	trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
 9938			  tr, &tracing_fops);
 9939
 9940	trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
 9941			  tr, &tracing_pipe_fops);
 9942
 9943	trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
 9944			  tr, &tracing_entries_fops);
 9945
 9946	trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
 9947			  tr, &tracing_total_entries_fops);
 9948
 9949	trace_create_file("free_buffer", 0200, d_tracer,
 9950			  tr, &tracing_free_buffer_fops);
 9951
 9952	trace_create_file("trace_marker", 0220, d_tracer,
 9953			  tr, &tracing_mark_fops);
 9954
 9955	tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
 
 
 
 
 9956
 9957	trace_create_file("trace_marker_raw", 0220, d_tracer,
 9958			  tr, &tracing_mark_raw_fops);
 9959
 9960	trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
 9961			  &trace_clock_fops);
 9962
 9963	trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
 9964			  tr, &rb_simple_fops);
 9965
 9966	trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
 9967			  &trace_time_stamp_mode_fops);
 9968
 9969	tr->buffer_percent = 50;
 9970
 9971	trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
 9972			tr, &buffer_percent_fops);
 9973
 9974	trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer,
 9975			  tr, &buffer_subbuf_size_fops);
 9976
 9977	create_trace_options_dir(tr);
 9978
 9979#ifdef CONFIG_TRACER_MAX_TRACE
 9980	trace_create_maxlat_file(tr, d_tracer);
 
 9981#endif
 9982
 9983	if (ftrace_create_function_files(tr, d_tracer))
 9984		MEM_FAIL(1, "Could not allocate function filter files");
 9985
 9986#ifdef CONFIG_TRACER_SNAPSHOT
 9987	trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
 9988			  tr, &snapshot_fops);
 9989#endif
 9990
 9991	trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
 9992			  tr, &tracing_err_log_fops);
 9993
 9994	for_each_tracing_cpu(cpu)
 9995		tracing_init_tracefs_percpu(tr, cpu);
 9996
 9997	ftrace_init_tracefs(tr, d_tracer);
 9998}
 9999
10000static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
10001{
10002	struct vfsmount *mnt;
10003	struct file_system_type *type;
10004
10005	/*
10006	 * To maintain backward compatibility for tools that mount
10007	 * debugfs to get to the tracing facility, tracefs is automatically
10008	 * mounted to the debugfs/tracing directory.
10009	 */
10010	type = get_fs_type("tracefs");
10011	if (!type)
10012		return NULL;
10013	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
10014	put_filesystem(type);
10015	if (IS_ERR(mnt))
10016		return NULL;
10017	mntget(mnt);
10018
10019	return mnt;
10020}
10021
10022/**
10023 * tracing_init_dentry - initialize top level trace array
10024 *
10025 * This is called when creating files or directories in the tracing
10026 * directory. It is called via fs_initcall() by any of the boot up code
10027 * and expects to return the dentry of the top level tracing directory.
10028 */
10029int tracing_init_dentry(void)
10030{
10031	struct trace_array *tr = &global_trace;
10032
10033	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10034		pr_warn("Tracing disabled due to lockdown\n");
10035		return -EPERM;
10036	}
10037
10038	/* The top level trace array uses  NULL as parent */
10039	if (tr->dir)
10040		return 0;
10041
10042	if (WARN_ON(!tracefs_initialized()))
10043		return -ENODEV;
 
 
10044
10045	/*
10046	 * As there may still be users that expect the tracing
10047	 * files to exist in debugfs/tracing, we must automount
10048	 * the tracefs file system there, so older tools still
10049	 * work with the newer kernel.
10050	 */
10051	tr->dir = debugfs_create_automount("tracing", NULL,
10052					   trace_automount, NULL);
10053
10054	return 0;
10055}
10056
10057extern struct trace_eval_map *__start_ftrace_eval_maps[];
10058extern struct trace_eval_map *__stop_ftrace_eval_maps[];
10059
10060static struct workqueue_struct *eval_map_wq __initdata;
10061static struct work_struct eval_map_work __initdata;
10062static struct work_struct tracerfs_init_work __initdata;
10063
10064static void __init eval_map_work_func(struct work_struct *work)
10065{
10066	int len;
10067
10068	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
10069	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
10070}
10071
10072static int __init trace_eval_init(void)
10073{
10074	INIT_WORK(&eval_map_work, eval_map_work_func);
10075
10076	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
10077	if (!eval_map_wq) {
10078		pr_err("Unable to allocate eval_map_wq\n");
10079		/* Do work here */
10080		eval_map_work_func(&eval_map_work);
10081		return -ENOMEM;
10082	}
10083
10084	queue_work(eval_map_wq, &eval_map_work);
10085	return 0;
10086}
10087
10088subsys_initcall(trace_eval_init);
10089
10090static int __init trace_eval_sync(void)
10091{
10092	/* Make sure the eval map updates are finished */
10093	if (eval_map_wq)
10094		destroy_workqueue(eval_map_wq);
10095	return 0;
10096}
10097
10098late_initcall_sync(trace_eval_sync);
10099
10100
10101#ifdef CONFIG_MODULES
10102static void trace_module_add_evals(struct module *mod)
10103{
10104	if (!mod->num_trace_evals)
10105		return;
10106
10107	/*
10108	 * Modules with bad taint do not have events created, do
10109	 * not bother with enums either.
10110	 */
10111	if (trace_module_has_bad_taint(mod))
10112		return;
10113
10114	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
10115}
10116
10117#ifdef CONFIG_TRACE_EVAL_MAP_FILE
10118static void trace_module_remove_evals(struct module *mod)
10119{
10120	union trace_eval_map_item *map;
10121	union trace_eval_map_item **last = &trace_eval_maps;
10122
10123	if (!mod->num_trace_evals)
10124		return;
10125
10126	mutex_lock(&trace_eval_mutex);
10127
10128	map = trace_eval_maps;
10129
10130	while (map) {
10131		if (map->head.mod == mod)
10132			break;
10133		map = trace_eval_jmp_to_tail(map);
10134		last = &map->tail.next;
10135		map = map->tail.next;
10136	}
10137	if (!map)
10138		goto out;
10139
10140	*last = trace_eval_jmp_to_tail(map)->tail.next;
10141	kfree(map);
10142 out:
10143	mutex_unlock(&trace_eval_mutex);
10144}
10145#else
10146static inline void trace_module_remove_evals(struct module *mod) { }
10147#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
10148
10149static int trace_module_notify(struct notifier_block *self,
10150			       unsigned long val, void *data)
10151{
10152	struct module *mod = data;
10153
10154	switch (val) {
10155	case MODULE_STATE_COMING:
10156		trace_module_add_evals(mod);
10157		break;
10158	case MODULE_STATE_GOING:
10159		trace_module_remove_evals(mod);
10160		break;
10161	}
10162
10163	return NOTIFY_OK;
10164}
10165
10166static struct notifier_block trace_module_nb = {
10167	.notifier_call = trace_module_notify,
10168	.priority = 0,
10169};
10170#endif /* CONFIG_MODULES */
10171
10172static __init void tracer_init_tracefs_work_func(struct work_struct *work)
10173{
 
 
 
 
 
 
 
10174
10175	event_trace_init();
10176
10177	init_tracer_tracefs(&global_trace, NULL);
10178	ftrace_init_tracefs_toplevel(&global_trace, NULL);
10179
10180	trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
10181			&global_trace, &tracing_thresh_fops);
10182
10183	trace_create_file("README", TRACE_MODE_READ, NULL,
10184			NULL, &tracing_readme_fops);
10185
10186	trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10187			NULL, &tracing_saved_cmdlines_fops);
10188
10189	trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10190			  NULL, &tracing_saved_cmdlines_size_fops);
10191
10192	trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10193			NULL, &tracing_saved_tgids_fops);
10194
10195	trace_create_eval_file(NULL);
 
 
10196
10197#ifdef CONFIG_MODULES
10198	register_module_notifier(&trace_module_nb);
10199#endif
10200
10201#ifdef CONFIG_DYNAMIC_FTRACE
10202	trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10203			NULL, &tracing_dyn_info_fops);
10204#endif
10205
10206	create_trace_instances(NULL);
10207
10208	update_tracer_options(&global_trace);
10209}
10210
10211static __init int tracer_init_tracefs(void)
10212{
10213	int ret;
10214
10215	trace_access_lock_init();
10216
10217	ret = tracing_init_dentry();
10218	if (ret)
10219		return 0;
10220
10221	if (eval_map_wq) {
10222		INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10223		queue_work(eval_map_wq, &tracerfs_init_work);
10224	} else {
10225		tracer_init_tracefs_work_func(NULL);
10226	}
10227
10228	rv_init_interface();
10229
10230	return 0;
10231}
10232
10233fs_initcall(tracer_init_tracefs);
10234
10235static int trace_die_panic_handler(struct notifier_block *self,
10236				unsigned long ev, void *unused);
 
 
 
10237
10238static struct notifier_block trace_panic_notifier = {
10239	.notifier_call = trace_die_panic_handler,
10240	.priority = INT_MAX - 1,
10241};
10242
10243static struct notifier_block trace_die_notifier = {
10244	.notifier_call = trace_die_panic_handler,
10245	.priority = INT_MAX - 1,
10246};
10247
10248/*
10249 * The idea is to execute the following die/panic callback early, in order
10250 * to avoid showing irrelevant information in the trace (like other panic
10251 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10252 * warnings get disabled (to prevent potential log flooding).
10253 */
10254static int trace_die_panic_handler(struct notifier_block *self,
10255				unsigned long ev, void *unused)
10256{
10257	if (!ftrace_dump_on_oops)
10258		return NOTIFY_DONE;
10259
10260	/* The die notifier requires DIE_OOPS to trigger */
10261	if (self == &trace_die_notifier && ev != DIE_OOPS)
10262		return NOTIFY_DONE;
10263
10264	ftrace_dump(ftrace_dump_on_oops);
10265
10266	return NOTIFY_DONE;
10267}
10268
 
 
 
 
 
10269/*
10270 * printk is set to max of 1024, we really don't need it that big.
10271 * Nothing should be printing 1000 characters anyway.
10272 */
10273#define TRACE_MAX_PRINT		1000
10274
10275/*
10276 * Define here KERN_TRACE so that we have one place to modify
10277 * it if we decide to change what log level the ftrace dump
10278 * should be at.
10279 */
10280#define KERN_TRACE		KERN_EMERG
10281
10282void
10283trace_printk_seq(struct trace_seq *s)
10284{
10285	/* Probably should print a warning here. */
10286	if (s->seq.len >= TRACE_MAX_PRINT)
10287		s->seq.len = TRACE_MAX_PRINT;
10288
10289	/*
10290	 * More paranoid code. Although the buffer size is set to
10291	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10292	 * an extra layer of protection.
10293	 */
10294	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10295		s->seq.len = s->seq.size - 1;
10296
10297	/* should be zero ended, but we are paranoid. */
10298	s->buffer[s->seq.len] = 0;
10299
10300	printk(KERN_TRACE "%s", s->buffer);
10301
10302	trace_seq_init(s);
10303}
10304
10305void trace_init_global_iter(struct trace_iterator *iter)
10306{
10307	iter->tr = &global_trace;
10308	iter->trace = iter->tr->current_trace;
10309	iter->cpu_file = RING_BUFFER_ALL_CPUS;
10310	iter->array_buffer = &global_trace.array_buffer;
10311
10312	if (iter->trace && iter->trace->open)
10313		iter->trace->open(iter);
10314
10315	/* Annotate start of buffers if we had overruns */
10316	if (ring_buffer_overruns(iter->array_buffer->buffer))
10317		iter->iter_flags |= TRACE_FILE_ANNOTATE;
10318
10319	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
10320	if (trace_clocks[iter->tr->clock_id].in_ns)
10321		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10322
10323	/* Can not use kmalloc for iter.temp and iter.fmt */
10324	iter->temp = static_temp_buf;
10325	iter->temp_size = STATIC_TEMP_BUF_SIZE;
10326	iter->fmt = static_fmt_buf;
10327	iter->fmt_size = STATIC_FMT_BUF_SIZE;
10328}
10329
10330void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10331{
10332	/* use static because iter can be a bit big for the stack */
10333	static struct trace_iterator iter;
10334	static atomic_t dump_running;
10335	struct trace_array *tr = &global_trace;
10336	unsigned int old_userobj;
10337	unsigned long flags;
10338	int cnt = 0, cpu;
10339
10340	/* Only allow one dump user at a time. */
10341	if (atomic_inc_return(&dump_running) != 1) {
10342		atomic_dec(&dump_running);
10343		return;
10344	}
10345
10346	/*
10347	 * Always turn off tracing when we dump.
10348	 * We don't need to show trace output of what happens
10349	 * between multiple crashes.
10350	 *
10351	 * If the user does a sysrq-z, then they can re-enable
10352	 * tracing with echo 1 > tracing_on.
10353	 */
10354	tracing_off();
10355
10356	local_irq_save(flags);
 
10357
10358	/* Simulate the iterator */
10359	trace_init_global_iter(&iter);
10360
10361	for_each_tracing_cpu(cpu) {
10362		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10363	}
10364
10365	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10366
10367	/* don't look at user memory in panic mode */
10368	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10369
10370	switch (oops_dump_mode) {
10371	case DUMP_ALL:
10372		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10373		break;
10374	case DUMP_ORIG:
10375		iter.cpu_file = raw_smp_processor_id();
10376		break;
10377	case DUMP_NONE:
10378		goto out_enable;
10379	default:
10380		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10381		iter.cpu_file = RING_BUFFER_ALL_CPUS;
10382	}
10383
10384	printk(KERN_TRACE "Dumping ftrace buffer:\n");
10385
10386	/* Did function tracer already get disabled? */
10387	if (ftrace_is_dead()) {
10388		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10389		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
10390	}
10391
10392	/*
10393	 * We need to stop all tracing on all CPUS to read
10394	 * the next buffer. This is a bit expensive, but is
10395	 * not done often. We fill all what we can read,
10396	 * and then release the locks again.
10397	 */
10398
10399	while (!trace_empty(&iter)) {
10400
10401		if (!cnt)
10402			printk(KERN_TRACE "---------------------------------\n");
10403
10404		cnt++;
10405
10406		trace_iterator_reset(&iter);
10407		iter.iter_flags |= TRACE_FILE_LAT_FMT;
10408
10409		if (trace_find_next_entry_inc(&iter) != NULL) {
10410			int ret;
10411
10412			ret = print_trace_line(&iter);
10413			if (ret != TRACE_TYPE_NO_CONSUME)
10414				trace_consume(&iter);
10415		}
10416		touch_nmi_watchdog();
10417
10418		trace_printk_seq(&iter.seq);
10419	}
10420
10421	if (!cnt)
10422		printk(KERN_TRACE "   (ftrace buffer empty)\n");
10423	else
10424		printk(KERN_TRACE "---------------------------------\n");
10425
10426 out_enable:
10427	tr->trace_flags |= old_userobj;
10428
10429	for_each_tracing_cpu(cpu) {
10430		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10431	}
10432	atomic_dec(&dump_running);
 
10433	local_irq_restore(flags);
10434}
10435EXPORT_SYMBOL_GPL(ftrace_dump);
10436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10437#define WRITE_BUFSIZE  4096
10438
10439ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10440				size_t count, loff_t *ppos,
10441				int (*createfn)(const char *))
10442{
10443	char *kbuf, *buf, *tmp;
10444	int ret = 0;
10445	size_t done = 0;
10446	size_t size;
10447
10448	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10449	if (!kbuf)
10450		return -ENOMEM;
10451
10452	while (done < count) {
10453		size = count - done;
10454
10455		if (size >= WRITE_BUFSIZE)
10456			size = WRITE_BUFSIZE - 1;
10457
10458		if (copy_from_user(kbuf, buffer + done, size)) {
10459			ret = -EFAULT;
10460			goto out;
10461		}
10462		kbuf[size] = '\0';
10463		buf = kbuf;
10464		do {
10465			tmp = strchr(buf, '\n');
10466			if (tmp) {
10467				*tmp = '\0';
10468				size = tmp - buf + 1;
10469			} else {
10470				size = strlen(buf);
10471				if (done + size < count) {
10472					if (buf != kbuf)
10473						break;
10474					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10475					pr_warn("Line length is too long: Should be less than %d\n",
10476						WRITE_BUFSIZE - 2);
10477					ret = -EINVAL;
10478					goto out;
10479				}
10480			}
10481			done += size;
10482
10483			/* Remove comments */
10484			tmp = strchr(buf, '#');
10485
10486			if (tmp)
10487				*tmp = '\0';
10488
10489			ret = createfn(buf);
10490			if (ret)
10491				goto out;
10492			buf += size;
10493
10494		} while (done < count);
10495	}
10496	ret = done;
10497
10498out:
10499	kfree(kbuf);
10500
10501	return ret;
10502}
10503
10504#ifdef CONFIG_TRACER_MAX_TRACE
10505__init static bool tr_needs_alloc_snapshot(const char *name)
10506{
10507	char *test;
10508	int len = strlen(name);
10509	bool ret;
10510
10511	if (!boot_snapshot_index)
10512		return false;
10513
10514	if (strncmp(name, boot_snapshot_info, len) == 0 &&
10515	    boot_snapshot_info[len] == '\t')
10516		return true;
10517
10518	test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10519	if (!test)
10520		return false;
10521
10522	sprintf(test, "\t%s\t", name);
10523	ret = strstr(boot_snapshot_info, test) == NULL;
10524	kfree(test);
10525	return ret;
10526}
10527
10528__init static void do_allocate_snapshot(const char *name)
10529{
10530	if (!tr_needs_alloc_snapshot(name))
10531		return;
10532
10533	/*
10534	 * When allocate_snapshot is set, the next call to
10535	 * allocate_trace_buffers() (called by trace_array_get_by_name())
10536	 * will allocate the snapshot buffer. That will alse clear
10537	 * this flag.
10538	 */
10539	allocate_snapshot = true;
10540}
10541#else
10542static inline void do_allocate_snapshot(const char *name) { }
10543#endif
10544
10545__init static void enable_instances(void)
10546{
10547	struct trace_array *tr;
10548	char *curr_str;
10549	char *str;
10550	char *tok;
10551
10552	/* A tab is always appended */
10553	boot_instance_info[boot_instance_index - 1] = '\0';
10554	str = boot_instance_info;
10555
10556	while ((curr_str = strsep(&str, "\t"))) {
10557
10558		tok = strsep(&curr_str, ",");
10559
10560		if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10561			do_allocate_snapshot(tok);
10562
10563		tr = trace_array_get_by_name(tok, NULL);
10564		if (!tr) {
10565			pr_warn("Failed to create instance buffer %s\n", curr_str);
10566			continue;
10567		}
10568		/* Allow user space to delete it */
10569		trace_array_put(tr);
10570
10571		while ((tok = strsep(&curr_str, ","))) {
10572			early_enable_events(tr, tok, true);
10573		}
10574	}
10575}
10576
10577__init static int tracer_alloc_buffers(void)
10578{
10579	int ring_buf_size;
10580	int ret = -ENOMEM;
10581
10582
10583	if (security_locked_down(LOCKDOWN_TRACEFS)) {
10584		pr_warn("Tracing disabled due to lockdown\n");
10585		return -EPERM;
10586	}
10587
10588	/*
10589	 * Make sure we don't accidentally add more trace options
10590	 * than we have bits for.
10591	 */
10592	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10593
10594	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10595		goto out;
10596
10597	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10598		goto out_free_buffer_mask;
10599
10600	/* Only allocate trace_printk buffers if a trace_printk exists */
10601	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10602		/* Must be called before global_trace.buffer is allocated */
10603		trace_printk_init_buffers();
10604
10605	/* To save memory, keep the ring buffer size to its minimum */
10606	if (global_trace.ring_buffer_expanded)
10607		ring_buf_size = trace_buf_size;
10608	else
10609		ring_buf_size = 1;
10610
10611	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10612	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10613
10614	raw_spin_lock_init(&global_trace.start_lock);
10615
10616	/*
10617	 * The prepare callbacks allocates some memory for the ring buffer. We
10618	 * don't free the buffer if the CPU goes down. If we were to free
10619	 * the buffer, then the user would lose any trace that was in the
10620	 * buffer. The memory will be removed once the "instance" is removed.
10621	 */
10622	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10623				      "trace/RB:prepare", trace_rb_cpu_prepare,
10624				      NULL);
10625	if (ret < 0)
10626		goto out_free_cpumask;
10627	/* Used for event triggers */
10628	ret = -ENOMEM;
10629	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10630	if (!temp_buffer)
10631		goto out_rm_hp_state;
10632
10633	if (trace_create_savedcmd() < 0)
10634		goto out_free_temp_buffer;
10635
10636	if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10637		goto out_free_savedcmd;
10638
10639	/* TODO: make the number of buffers hot pluggable with CPUS */
10640	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10641		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10642		goto out_free_pipe_cpumask;
 
10643	}
 
10644	if (global_trace.buffer_disabled)
10645		tracing_off();
10646
10647	if (trace_boot_clock) {
10648		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10649		if (ret < 0)
10650			pr_warn("Trace clock %s not defined, going back to default\n",
10651				trace_boot_clock);
10652	}
10653
10654	/*
10655	 * register_tracer() might reference current_trace, so it
10656	 * needs to be set before we register anything. This is
10657	 * just a bootstrap of current_trace anyway.
10658	 */
10659	global_trace.current_trace = &nop_trace;
10660
10661	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10662
10663	ftrace_init_global_array_ops(&global_trace);
10664
10665	init_trace_flags_index(&global_trace);
10666
10667	register_tracer(&nop_trace);
10668
10669	/* Function tracing may start here (via kernel command line) */
10670	init_function_trace();
10671
10672	/* All seems OK, enable tracing */
10673	tracing_disabled = 0;
10674
10675	atomic_notifier_chain_register(&panic_notifier_list,
10676				       &trace_panic_notifier);
10677
10678	register_die_notifier(&trace_die_notifier);
10679
10680	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10681
10682	INIT_LIST_HEAD(&global_trace.systems);
10683	INIT_LIST_HEAD(&global_trace.events);
10684	INIT_LIST_HEAD(&global_trace.hist_vars);
10685	INIT_LIST_HEAD(&global_trace.err_log);
10686	list_add(&global_trace.list, &ftrace_trace_arrays);
10687
10688	apply_trace_boot_options();
10689
10690	register_snapshot_cmd();
10691
10692	test_can_verify();
10693
10694	return 0;
10695
10696out_free_pipe_cpumask:
10697	free_cpumask_var(global_trace.pipe_cpumask);
10698out_free_savedcmd:
10699	free_saved_cmdlines_buffer(savedcmd);
10700out_free_temp_buffer:
10701	ring_buffer_free(temp_buffer);
10702out_rm_hp_state:
10703	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10704out_free_cpumask:
10705	free_cpumask_var(global_trace.tracing_cpumask);
10706out_free_buffer_mask:
10707	free_cpumask_var(tracing_buffer_mask);
10708out:
10709	return ret;
10710}
10711
10712void __init ftrace_boot_snapshot(void)
10713{
10714#ifdef CONFIG_TRACER_MAX_TRACE
10715	struct trace_array *tr;
10716
10717	if (!snapshot_at_boot)
10718		return;
10719
10720	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10721		if (!tr->allocated_snapshot)
10722			continue;
10723
10724		tracing_snapshot_instance(tr);
10725		trace_array_puts(tr, "** Boot snapshot taken **\n");
10726	}
10727#endif
10728}
10729
10730void __init early_trace_init(void)
10731{
10732	if (tracepoint_printk) {
10733		tracepoint_print_iter =
10734			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10735		if (MEM_FAIL(!tracepoint_print_iter,
10736			     "Failed to allocate trace iterator\n"))
10737			tracepoint_printk = 0;
10738		else
10739			static_key_enable(&tracepoint_printk_key.key);
10740	}
10741	tracer_alloc_buffers();
10742
10743	init_events();
10744}
10745
10746void __init trace_init(void)
10747{
10748	trace_event_init();
10749
10750	if (boot_instance_index)
10751		enable_instances();
10752}
10753
10754__init static void clear_boot_tracer(void)
10755{
10756	/*
10757	 * The default tracer at boot buffer is an init section.
10758	 * This function is called in lateinit. If we did not
10759	 * find the boot tracer, then clear it out, to prevent
10760	 * later registration from accessing the buffer that is
10761	 * about to be freed.
10762	 */
10763	if (!default_bootup_tracer)
10764		return;
10765
10766	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10767	       default_bootup_tracer);
10768	default_bootup_tracer = NULL;
 
 
10769}
10770
 
 
 
10771#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10772__init static void tracing_set_default_clock(void)
10773{
10774	/* sched_clock_stable() is determined in late_initcall */
10775	if (!trace_boot_clock && !sched_clock_stable()) {
10776		if (security_locked_down(LOCKDOWN_TRACEFS)) {
10777			pr_warn("Can not set tracing clock due to lockdown\n");
10778			return;
10779		}
10780
10781		printk(KERN_WARNING
10782		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
10783		       "If you want to keep using the local clock, then add:\n"
10784		       "  \"trace_clock=local\"\n"
10785		       "on the kernel command line\n");
10786		tracing_set_clock(&global_trace, "global");
10787	}
10788}
10789#else
10790static inline void tracing_set_default_clock(void) { }
10791#endif
10792
10793__init static int late_trace_init(void)
10794{
10795	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10796		static_key_disable(&tracepoint_printk_key.key);
10797		tracepoint_printk = 0;
10798	}
10799
10800	tracing_set_default_clock();
10801	clear_boot_tracer();
10802	return 0;
10803}
10804
10805late_initcall_sync(late_trace_init);
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ring buffer based function tracer
   4 *
   5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
   6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   7 *
   8 * Originally taken from the RT patch by:
   9 *    Arnaldo Carvalho de Melo <acme@redhat.com>
  10 *
  11 * Based on code from the latency_tracer, that is:
  12 *  Copyright (C) 2004-2006 Ingo Molnar
  13 *  Copyright (C) 2004 Nadia Yvette Chambers
  14 */
  15#include <linux/ring_buffer.h>
  16#include <generated/utsrelease.h>
  17#include <linux/stacktrace.h>
  18#include <linux/writeback.h>
  19#include <linux/kallsyms.h>
  20#include <linux/security.h>
  21#include <linux/seq_file.h>
  22#include <linux/notifier.h>
  23#include <linux/irqflags.h>
  24#include <linux/debugfs.h>
  25#include <linux/tracefs.h>
  26#include <linux/pagemap.h>
  27#include <linux/hardirq.h>
  28#include <linux/linkage.h>
  29#include <linux/uaccess.h>
  30#include <linux/vmalloc.h>
  31#include <linux/ftrace.h>
  32#include <linux/module.h>
  33#include <linux/percpu.h>
  34#include <linux/splice.h>
  35#include <linux/kdebug.h>
  36#include <linux/string.h>
  37#include <linux/mount.h>
  38#include <linux/rwsem.h>
  39#include <linux/slab.h>
  40#include <linux/ctype.h>
  41#include <linux/init.h>
 
 
  42#include <linux/poll.h>
  43#include <linux/nmi.h>
  44#include <linux/fs.h>
  45#include <linux/trace.h>
  46#include <linux/sched/clock.h>
  47#include <linux/sched/rt.h>
 
 
 
 
 
  48
  49#include "trace.h"
  50#include "trace_output.h"
  51
  52/*
  53 * On boot up, the ring buffer is set to the minimum size, so that
  54 * we do not waste memory on systems that are not using tracing.
  55 */
  56bool ring_buffer_expanded;
  57
  58/*
  59 * We need to change this state when a selftest is running.
  60 * A selftest will lurk into the ring-buffer to count the
  61 * entries inserted during the selftest although some concurrent
  62 * insertions into the ring-buffer such as trace_printk could occurred
  63 * at the same time, giving false positive or negative results.
  64 */
  65static bool __read_mostly tracing_selftest_running;
  66
  67/*
  68 * If a tracer is running, we do not want to run SELFTEST.
 
  69 */
  70bool __read_mostly tracing_selftest_disabled;
  71
 
 
 
 
 
 
 
 
 
 
 
 
  72/* Pipe tracepoints to printk */
  73struct trace_iterator *tracepoint_print_iter;
  74int tracepoint_printk;
 
  75static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
  76
  77/* For tracers that don't implement custom flags */
  78static struct tracer_opt dummy_tracer_opt[] = {
  79	{ }
  80};
  81
  82static int
  83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  84{
  85	return 0;
  86}
  87
  88/*
  89 * To prevent the comm cache from being overwritten when no
  90 * tracing is active, only save the comm when a trace event
  91 * occurred.
  92 */
  93static DEFINE_PER_CPU(bool, trace_taskinfo_save);
  94
  95/*
  96 * Kill all tracing for good (never come back).
  97 * It is initialized to 1 but will turn to zero if the initialization
  98 * of the tracer is successful. But that is the only place that sets
  99 * this back to zero.
 100 */
 101static int tracing_disabled = 1;
 102
 103cpumask_var_t __read_mostly	tracing_buffer_mask;
 104
 105/*
 106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 107 *
 108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 109 * is set, then ftrace_dump is called. This will output the contents
 110 * of the ftrace buffers to the console.  This is very useful for
 111 * capturing traces that lead to crashes and outputing it to a
 112 * serial console.
 113 *
 114 * It is default off, but you can enable it with either specifying
 115 * "ftrace_dump_on_oops" in the kernel command line, or setting
 116 * /proc/sys/kernel/ftrace_dump_on_oops
 117 * Set 1 if you want to dump buffers of all CPUs
 118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
 119 */
 120
 121enum ftrace_dump_mode ftrace_dump_on_oops;
 122
 123/* When set, tracing will stop when a WARN*() is hit */
 124int __disable_trace_on_warning;
 125
 126#ifdef CONFIG_TRACE_EVAL_MAP_FILE
 127/* Map of enums to their values, for "eval_map" file */
 128struct trace_eval_map_head {
 129	struct module			*mod;
 130	unsigned long			length;
 131};
 132
 133union trace_eval_map_item;
 134
 135struct trace_eval_map_tail {
 136	/*
 137	 * "end" is first and points to NULL as it must be different
 138	 * than "mod" or "eval_string"
 139	 */
 140	union trace_eval_map_item	*next;
 141	const char			*end;	/* points to NULL */
 142};
 143
 144static DEFINE_MUTEX(trace_eval_mutex);
 145
 146/*
 147 * The trace_eval_maps are saved in an array with two extra elements,
 148 * one at the beginning, and one at the end. The beginning item contains
 149 * the count of the saved maps (head.length), and the module they
 150 * belong to if not built in (head.mod). The ending item contains a
 151 * pointer to the next array of saved eval_map items.
 152 */
 153union trace_eval_map_item {
 154	struct trace_eval_map		map;
 155	struct trace_eval_map_head	head;
 156	struct trace_eval_map_tail	tail;
 157};
 158
 159static union trace_eval_map_item *trace_eval_maps;
 160#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
 161
 162static int tracing_set_tracer(struct trace_array *tr, const char *buf);
 163static void ftrace_trace_userstack(struct ring_buffer *buffer,
 164				   unsigned long flags, int pc);
 
 165
 166#define MAX_TRACER_SIZE		100
 167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
 168static char *default_bootup_tracer;
 169
 170static bool allocate_snapshot;
 
 
 
 
 
 
 
 171
 172static int __init set_cmdline_ftrace(char *str)
 173{
 174	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
 175	default_bootup_tracer = bootup_tracer_buf;
 176	/* We are using ftrace early, expand it */
 177	ring_buffer_expanded = true;
 178	return 1;
 179}
 180__setup("ftrace=", set_cmdline_ftrace);
 181
 182static int __init set_ftrace_dump_on_oops(char *str)
 183{
 184	if (*str++ != '=' || !*str) {
 185		ftrace_dump_on_oops = DUMP_ALL;
 186		return 1;
 187	}
 188
 189	if (!strcmp("orig_cpu", str)) {
 190		ftrace_dump_on_oops = DUMP_ORIG;
 191                return 1;
 192        }
 193
 194        return 0;
 195}
 196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 197
 198static int __init stop_trace_on_warning(char *str)
 199{
 200	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
 201		__disable_trace_on_warning = 1;
 202	return 1;
 203}
 204__setup("traceoff_on_warning", stop_trace_on_warning);
 205
 206static int __init boot_alloc_snapshot(char *str)
 207{
 208	allocate_snapshot = true;
 209	/* We also need the main ring buffer expanded */
 210	ring_buffer_expanded = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 211	return 1;
 212}
 213__setup("alloc_snapshot", boot_alloc_snapshot);
 214
 215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 217
 218static int __init set_trace_boot_options(char *str)
 219{
 220	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
 221	return 0;
 222}
 223__setup("trace_options=", set_trace_boot_options);
 224
 225static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
 226static char *trace_boot_clock __initdata;
 227
 228static int __init set_trace_boot_clock(char *str)
 229{
 230	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
 231	trace_boot_clock = trace_boot_clock_buf;
 232	return 0;
 233}
 234__setup("trace_clock=", set_trace_boot_clock);
 235
 236static int __init set_tracepoint_printk(char *str)
 237{
 
 
 
 
 238	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
 239		tracepoint_printk = 1;
 240	return 1;
 241}
 242__setup("tp_printk", set_tracepoint_printk);
 243
 
 
 
 
 
 
 
 244unsigned long long ns2usecs(u64 nsec)
 245{
 246	nsec += 500;
 247	do_div(nsec, 1000);
 248	return nsec;
 249}
 250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 251/* trace_flags holds trace_options default values */
 252#define TRACE_DEFAULT_FLAGS						\
 253	(FUNCTION_DEFAULT_FLAGS |					\
 254	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
 255	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
 256	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
 257	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
 
 258
 259/* trace_options that are only supported by global_trace */
 260#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
 261	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
 262
 263/* trace_flags that are default zero for instances */
 264#define ZEROED_TRACE_FLAGS \
 265	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
 266
 267/*
 268 * The global_trace is the descriptor that holds the top-level tracing
 269 * buffers for the live tracing.
 270 */
 271static struct trace_array global_trace = {
 272	.trace_flags = TRACE_DEFAULT_FLAGS,
 273};
 274
 
 
 
 
 
 
 
 275LIST_HEAD(ftrace_trace_arrays);
 276
 277int trace_array_get(struct trace_array *this_tr)
 278{
 279	struct trace_array *tr;
 280	int ret = -ENODEV;
 281
 282	mutex_lock(&trace_types_lock);
 283	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 284		if (tr == this_tr) {
 285			tr->ref++;
 286			ret = 0;
 287			break;
 288		}
 289	}
 290	mutex_unlock(&trace_types_lock);
 291
 292	return ret;
 293}
 294
 295static void __trace_array_put(struct trace_array *this_tr)
 296{
 297	WARN_ON(!this_tr->ref);
 298	this_tr->ref--;
 299}
 300
 
 
 
 
 
 
 
 
 
 301void trace_array_put(struct trace_array *this_tr)
 302{
 
 
 
 303	mutex_lock(&trace_types_lock);
 304	__trace_array_put(this_tr);
 305	mutex_unlock(&trace_types_lock);
 306}
 
 307
 308int tracing_check_open_get_tr(struct trace_array *tr)
 309{
 310	int ret;
 311
 312	ret = security_locked_down(LOCKDOWN_TRACEFS);
 313	if (ret)
 314		return ret;
 315
 316	if (tracing_disabled)
 317		return -ENODEV;
 318
 319	if (tr && trace_array_get(tr) < 0)
 320		return -ENODEV;
 321
 322	return 0;
 323}
 324
 325int call_filter_check_discard(struct trace_event_call *call, void *rec,
 326			      struct ring_buffer *buffer,
 327			      struct ring_buffer_event *event)
 328{
 329	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
 330	    !filter_match_preds(call->filter, rec)) {
 331		__trace_event_discard_commit(buffer, event);
 332		return 1;
 333	}
 334
 335	return 0;
 336}
 337
 338void trace_free_pid_list(struct trace_pid_list *pid_list)
 339{
 340	vfree(pid_list->pids);
 341	kfree(pid_list);
 342}
 343
 344/**
 345 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
 346 * @filtered_pids: The list of pids to check
 347 * @search_pid: The PID to find in @filtered_pids
 348 *
 349 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
 350 */
 351bool
 352trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
 353{
 354	/*
 355	 * If pid_max changed after filtered_pids was created, we
 356	 * by default ignore all pids greater than the previous pid_max.
 357	 */
 358	if (search_pid >= filtered_pids->pid_max)
 359		return false;
 360
 361	return test_bit(search_pid, filtered_pids->pids);
 362}
 363
 364/**
 365 * trace_ignore_this_task - should a task be ignored for tracing
 366 * @filtered_pids: The list of pids to check
 
 367 * @task: The task that should be ignored if not filtered
 368 *
 369 * Checks if @task should be traced or not from @filtered_pids.
 370 * Returns true if @task should *NOT* be traced.
 371 * Returns false if @task should be traced.
 372 */
 373bool
 374trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
 375{
 376	/*
 377	 * Return false, because if filtered_pids does not exist,
 378	 * all pids are good to trace.
 379	 */
 380	if (!filtered_pids)
 381		return false;
 382
 383	return !trace_find_filtered_pid(filtered_pids, task->pid);
 
 
 
 
 
 
 384}
 385
 386/**
 387 * trace_filter_add_remove_task - Add or remove a task from a pid_list
 388 * @pid_list: The list to modify
 389 * @self: The current task for fork or NULL for exit
 390 * @task: The task to add or remove
 391 *
 392 * If adding a task, if @self is defined, the task is only added if @self
 393 * is also included in @pid_list. This happens on fork and tasks should
 394 * only be added when the parent is listed. If @self is NULL, then the
 395 * @task pid will be removed from the list, which would happen on exit
 396 * of a task.
 397 */
 398void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
 399				  struct task_struct *self,
 400				  struct task_struct *task)
 401{
 402	if (!pid_list)
 403		return;
 404
 405	/* For forks, we only add if the forking task is listed */
 406	if (self) {
 407		if (!trace_find_filtered_pid(pid_list, self->pid))
 408			return;
 409	}
 410
 411	/* Sorry, but we don't support pid_max changing after setting */
 412	if (task->pid >= pid_list->pid_max)
 413		return;
 414
 415	/* "self" is set for forks, and NULL for exits */
 416	if (self)
 417		set_bit(task->pid, pid_list->pids);
 418	else
 419		clear_bit(task->pid, pid_list->pids);
 420}
 421
 422/**
 423 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
 424 * @pid_list: The pid list to show
 425 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
 426 * @pos: The position of the file
 427 *
 428 * This is used by the seq_file "next" operation to iterate the pids
 429 * listed in a trace_pid_list structure.
 430 *
 431 * Returns the pid+1 as we want to display pid of zero, but NULL would
 432 * stop the iteration.
 433 */
 434void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
 435{
 436	unsigned long pid = (unsigned long)v;
 
 437
 438	(*pos)++;
 439
 440	/* pid already is +1 of the actual prevous bit */
 441	pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
 
 
 
 442
 443	/* Return pid + 1 to allow zero to be represented */
 444	if (pid < pid_list->pid_max)
 445		return (void *)(pid + 1);
 446
 447	return NULL;
 448}
 449
 450/**
 451 * trace_pid_start - Used for seq_file to start reading pid lists
 452 * @pid_list: The pid list to show
 453 * @pos: The position of the file
 454 *
 455 * This is used by seq_file "start" operation to start the iteration
 456 * of listing pids.
 457 *
 458 * Returns the pid+1 as we want to display pid of zero, but NULL would
 459 * stop the iteration.
 460 */
 461void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
 462{
 463	unsigned long pid;
 
 464	loff_t l = 0;
 465
 466	pid = find_first_bit(pid_list->pids, pid_list->pid_max);
 467	if (pid >= pid_list->pid_max)
 468		return NULL;
 469
 
 
 470	/* Return pid + 1 so that zero can be the exit value */
 471	for (pid++; pid && l < *pos;
 472	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
 473		;
 474	return (void *)pid;
 475}
 476
 477/**
 478 * trace_pid_show - show the current pid in seq_file processing
 479 * @m: The seq_file structure to write into
 480 * @v: A void pointer of the pid (+1) value to display
 481 *
 482 * Can be directly used by seq_file operations to display the current
 483 * pid value.
 484 */
 485int trace_pid_show(struct seq_file *m, void *v)
 486{
 487	unsigned long pid = (unsigned long)v - 1;
 488
 489	seq_printf(m, "%lu\n", pid);
 490	return 0;
 491}
 492
 493/* 128 should be much more than enough */
 494#define PID_BUF_SIZE		127
 495
 496int trace_pid_write(struct trace_pid_list *filtered_pids,
 497		    struct trace_pid_list **new_pid_list,
 498		    const char __user *ubuf, size_t cnt)
 499{
 500	struct trace_pid_list *pid_list;
 501	struct trace_parser parser;
 502	unsigned long val;
 503	int nr_pids = 0;
 504	ssize_t read = 0;
 505	ssize_t ret = 0;
 506	loff_t pos;
 507	pid_t pid;
 508
 509	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
 510		return -ENOMEM;
 511
 512	/*
 513	 * Always recreate a new array. The write is an all or nothing
 514	 * operation. Always create a new array when adding new pids by
 515	 * the user. If the operation fails, then the current list is
 516	 * not modified.
 517	 */
 518	pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
 519	if (!pid_list) {
 520		trace_parser_put(&parser);
 521		return -ENOMEM;
 522	}
 523
 524	pid_list->pid_max = READ_ONCE(pid_max);
 525
 526	/* Only truncating will shrink pid_max */
 527	if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
 528		pid_list->pid_max = filtered_pids->pid_max;
 529
 530	pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
 531	if (!pid_list->pids) {
 532		trace_parser_put(&parser);
 533		kfree(pid_list);
 534		return -ENOMEM;
 535	}
 536
 537	if (filtered_pids) {
 538		/* copy the current bits to the new max */
 539		for_each_set_bit(pid, filtered_pids->pids,
 540				 filtered_pids->pid_max) {
 541			set_bit(pid, pid_list->pids);
 
 542			nr_pids++;
 543		}
 544	}
 545
 
 546	while (cnt > 0) {
 547
 548		pos = 0;
 549
 550		ret = trace_get_user(&parser, ubuf, cnt, &pos);
 551		if (ret < 0 || !trace_parser_loaded(&parser))
 552			break;
 553
 554		read += ret;
 555		ubuf += ret;
 556		cnt -= ret;
 557
 
 
 
 558		ret = -EINVAL;
 559		if (kstrtoul(parser.buffer, 0, &val))
 560			break;
 561		if (val >= pid_list->pid_max)
 562			break;
 563
 564		pid = (pid_t)val;
 565
 566		set_bit(pid, pid_list->pids);
 
 
 
 567		nr_pids++;
 568
 569		trace_parser_clear(&parser);
 570		ret = 0;
 571	}
 572	trace_parser_put(&parser);
 573
 574	if (ret < 0) {
 575		trace_free_pid_list(pid_list);
 576		return ret;
 577	}
 578
 579	if (!nr_pids) {
 580		/* Cleared the list of pids */
 581		trace_free_pid_list(pid_list);
 582		read = ret;
 583		pid_list = NULL;
 584	}
 585
 586	*new_pid_list = pid_list;
 587
 588	return read;
 589}
 590
 591static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 592{
 593	u64 ts;
 594
 595	/* Early boot up does not have a buffer yet */
 596	if (!buf->buffer)
 597		return trace_clock_local();
 598
 599	ts = ring_buffer_time_stamp(buf->buffer, cpu);
 600	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
 601
 602	return ts;
 603}
 604
 605u64 ftrace_now(int cpu)
 606{
 607	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
 608}
 609
 610/**
 611 * tracing_is_enabled - Show if global_trace has been disabled
 612 *
 613 * Shows if the global trace has been enabled or not. It uses the
 614 * mirror flag "buffer_disabled" to be used in fast paths such as for
 615 * the irqsoff tracer. But it may be inaccurate due to races. If you
 616 * need to know the accurate state, use tracing_is_on() which is a little
 617 * slower, but accurate.
 618 */
 619int tracing_is_enabled(void)
 620{
 621	/*
 622	 * For quick access (irqsoff uses this in fast path), just
 623	 * return the mirror variable of the state of the ring buffer.
 624	 * It's a little racy, but we don't really care.
 625	 */
 626	smp_rmb();
 627	return !global_trace.buffer_disabled;
 628}
 629
 630/*
 631 * trace_buf_size is the size in bytes that is allocated
 632 * for a buffer. Note, the number of bytes is always rounded
 633 * to page size.
 634 *
 635 * This number is purposely set to a low number of 16384.
 636 * If the dump on oops happens, it will be much appreciated
 637 * to not have to wait for all that output. Anyway this can be
 638 * boot time and run time configurable.
 639 */
 640#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
 641
 642static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
 643
 644/* trace_types holds a link list of available tracers. */
 645static struct tracer		*trace_types __read_mostly;
 646
 647/*
 648 * trace_types_lock is used to protect the trace_types list.
 649 */
 650DEFINE_MUTEX(trace_types_lock);
 651
 652/*
 653 * serialize the access of the ring buffer
 654 *
 655 * ring buffer serializes readers, but it is low level protection.
 656 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 657 * are not protected by ring buffer.
 658 *
 659 * The content of events may become garbage if we allow other process consumes
 660 * these events concurrently:
 661 *   A) the page of the consumed events may become a normal page
 662 *      (not reader page) in ring buffer, and this page will be rewrited
 663 *      by events producer.
 664 *   B) The page of the consumed events may become a page for splice_read,
 665 *      and this page will be returned to system.
 666 *
 667 * These primitives allow multi process access to different cpu ring buffer
 668 * concurrently.
 669 *
 670 * These primitives don't distinguish read-only and read-consume access.
 671 * Multi read-only access are also serialized.
 672 */
 673
 674#ifdef CONFIG_SMP
 675static DECLARE_RWSEM(all_cpu_access_lock);
 676static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
 677
 678static inline void trace_access_lock(int cpu)
 679{
 680	if (cpu == RING_BUFFER_ALL_CPUS) {
 681		/* gain it for accessing the whole ring buffer. */
 682		down_write(&all_cpu_access_lock);
 683	} else {
 684		/* gain it for accessing a cpu ring buffer. */
 685
 686		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
 687		down_read(&all_cpu_access_lock);
 688
 689		/* Secondly block other access to this @cpu ring buffer. */
 690		mutex_lock(&per_cpu(cpu_access_lock, cpu));
 691	}
 692}
 693
 694static inline void trace_access_unlock(int cpu)
 695{
 696	if (cpu == RING_BUFFER_ALL_CPUS) {
 697		up_write(&all_cpu_access_lock);
 698	} else {
 699		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
 700		up_read(&all_cpu_access_lock);
 701	}
 702}
 703
 704static inline void trace_access_lock_init(void)
 705{
 706	int cpu;
 707
 708	for_each_possible_cpu(cpu)
 709		mutex_init(&per_cpu(cpu_access_lock, cpu));
 710}
 711
 712#else
 713
 714static DEFINE_MUTEX(access_lock);
 715
 716static inline void trace_access_lock(int cpu)
 717{
 718	(void)cpu;
 719	mutex_lock(&access_lock);
 720}
 721
 722static inline void trace_access_unlock(int cpu)
 723{
 724	(void)cpu;
 725	mutex_unlock(&access_lock);
 726}
 727
 728static inline void trace_access_lock_init(void)
 729{
 730}
 731
 732#endif
 733
 734#ifdef CONFIG_STACKTRACE
 735static void __ftrace_trace_stack(struct ring_buffer *buffer,
 736				 unsigned long flags,
 737				 int skip, int pc, struct pt_regs *regs);
 738static inline void ftrace_trace_stack(struct trace_array *tr,
 739				      struct ring_buffer *buffer,
 740				      unsigned long flags,
 741				      int skip, int pc, struct pt_regs *regs);
 742
 743#else
 744static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
 745					unsigned long flags,
 746					int skip, int pc, struct pt_regs *regs)
 747{
 748}
 749static inline void ftrace_trace_stack(struct trace_array *tr,
 750				      struct ring_buffer *buffer,
 751				      unsigned long flags,
 752				      int skip, int pc, struct pt_regs *regs)
 753{
 754}
 755
 756#endif
 757
 758static __always_inline void
 759trace_event_setup(struct ring_buffer_event *event,
 760		  int type, unsigned long flags, int pc)
 761{
 762	struct trace_entry *ent = ring_buffer_event_data(event);
 763
 764	tracing_generic_entry_update(ent, type, flags, pc);
 765}
 766
 767static __always_inline struct ring_buffer_event *
 768__trace_buffer_lock_reserve(struct ring_buffer *buffer,
 769			  int type,
 770			  unsigned long len,
 771			  unsigned long flags, int pc)
 772{
 773	struct ring_buffer_event *event;
 774
 775	event = ring_buffer_lock_reserve(buffer, len);
 776	if (event != NULL)
 777		trace_event_setup(event, type, flags, pc);
 778
 779	return event;
 780}
 781
 782void tracer_tracing_on(struct trace_array *tr)
 783{
 784	if (tr->trace_buffer.buffer)
 785		ring_buffer_record_on(tr->trace_buffer.buffer);
 786	/*
 787	 * This flag is looked at when buffers haven't been allocated
 788	 * yet, or by some tracers (like irqsoff), that just want to
 789	 * know if the ring buffer has been disabled, but it can handle
 790	 * races of where it gets disabled but we still do a record.
 791	 * As the check is in the fast path of the tracers, it is more
 792	 * important to be fast than accurate.
 793	 */
 794	tr->buffer_disabled = 0;
 795	/* Make the flag seen by readers */
 796	smp_wmb();
 797}
 798
 799/**
 800 * tracing_on - enable tracing buffers
 801 *
 802 * This function enables tracing buffers that may have been
 803 * disabled with tracing_off.
 804 */
 805void tracing_on(void)
 806{
 807	tracer_tracing_on(&global_trace);
 808}
 809EXPORT_SYMBOL_GPL(tracing_on);
 810
 811
 812static __always_inline void
 813__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
 814{
 815	__this_cpu_write(trace_taskinfo_save, true);
 816
 817	/* If this is the temp buffer, we need to commit fully */
 818	if (this_cpu_read(trace_buffered_event) == event) {
 819		/* Length is in event->array[0] */
 820		ring_buffer_write(buffer, event->array[0], &event->array[1]);
 821		/* Release the temp buffer */
 822		this_cpu_dec(trace_buffered_event_cnt);
 
 
 823	} else
 824		ring_buffer_unlock_commit(buffer, event);
 825}
 826
 827/**
 828 * __trace_puts - write a constant string into the trace buffer.
 829 * @ip:	   The address of the caller
 830 * @str:   The constant string to write
 831 * @size:  The size of the string.
 832 */
 833int __trace_puts(unsigned long ip, const char *str, int size)
 834{
 835	struct ring_buffer_event *event;
 836	struct ring_buffer *buffer;
 837	struct print_entry *entry;
 838	unsigned long irq_flags;
 839	int alloc;
 840	int pc;
 841
 842	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 843		return 0;
 844
 845	pc = preempt_count();
 
 846
 847	if (unlikely(tracing_selftest_running || tracing_disabled))
 848		return 0;
 849
 850	alloc = sizeof(*entry) + size + 2; /* possible \n added */
 851
 852	local_save_flags(irq_flags);
 853	buffer = global_trace.trace_buffer.buffer;
 854	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
 855					    irq_flags, pc);
 856	if (!event)
 857		return 0;
 
 
 
 858
 859	entry = ring_buffer_event_data(event);
 860	entry->ip = ip;
 861
 862	memcpy(&entry->buf, str, size);
 863
 864	/* Add a newline if necessary */
 865	if (entry->buf[size - 1] != '\n') {
 866		entry->buf[size] = '\n';
 867		entry->buf[size + 1] = '\0';
 868	} else
 869		entry->buf[size] = '\0';
 870
 871	__buffer_unlock_commit(buffer, event);
 872	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
 
 
 
 
 
 873
 874	return size;
 
 
 
 
 
 
 
 
 875}
 876EXPORT_SYMBOL_GPL(__trace_puts);
 877
 878/**
 879 * __trace_bputs - write the pointer to a constant string into trace buffer
 880 * @ip:	   The address of the caller
 881 * @str:   The constant string to write to the buffer to
 882 */
 883int __trace_bputs(unsigned long ip, const char *str)
 884{
 885	struct ring_buffer_event *event;
 886	struct ring_buffer *buffer;
 887	struct bputs_entry *entry;
 888	unsigned long irq_flags;
 889	int size = sizeof(struct bputs_entry);
 890	int pc;
 891
 892	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 893		return 0;
 894
 895	pc = preempt_count();
 896
 897	if (unlikely(tracing_selftest_running || tracing_disabled))
 898		return 0;
 899
 900	local_save_flags(irq_flags);
 901	buffer = global_trace.trace_buffer.buffer;
 
 
 902	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 903					    irq_flags, pc);
 904	if (!event)
 905		return 0;
 906
 907	entry = ring_buffer_event_data(event);
 908	entry->ip			= ip;
 909	entry->str			= str;
 910
 911	__buffer_unlock_commit(buffer, event);
 912	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
 913
 914	return 1;
 
 
 
 915}
 916EXPORT_SYMBOL_GPL(__trace_bputs);
 917
 918#ifdef CONFIG_TRACER_SNAPSHOT
 919void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
 
 920{
 921	struct tracer *tracer = tr->current_trace;
 922	unsigned long flags;
 923
 924	if (in_nmi()) {
 925		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
 926		internal_trace_puts("*** snapshot is being ignored        ***\n");
 927		return;
 928	}
 929
 930	if (!tr->allocated_snapshot) {
 931		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
 932		internal_trace_puts("*** stopping trace here!   ***\n");
 933		tracing_off();
 934		return;
 935	}
 936
 937	/* Note, snapshot can not be used when the tracer uses it */
 938	if (tracer->use_max_tr) {
 939		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
 940		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
 941		return;
 942	}
 943
 944	local_irq_save(flags);
 945	update_max_tr(tr, current, smp_processor_id(), cond_data);
 946	local_irq_restore(flags);
 947}
 948
 949void tracing_snapshot_instance(struct trace_array *tr)
 950{
 951	tracing_snapshot_instance_cond(tr, NULL);
 952}
 953
 954/**
 955 * tracing_snapshot - take a snapshot of the current buffer.
 956 *
 957 * This causes a swap between the snapshot buffer and the current live
 958 * tracing buffer. You can use this to take snapshots of the live
 959 * trace when some condition is triggered, but continue to trace.
 960 *
 961 * Note, make sure to allocate the snapshot with either
 962 * a tracing_snapshot_alloc(), or by doing it manually
 963 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 964 *
 965 * If the snapshot buffer is not allocated, it will stop tracing.
 966 * Basically making a permanent snapshot.
 967 */
 968void tracing_snapshot(void)
 969{
 970	struct trace_array *tr = &global_trace;
 971
 972	tracing_snapshot_instance(tr);
 973}
 974EXPORT_SYMBOL_GPL(tracing_snapshot);
 975
 976/**
 977 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
 978 * @tr:		The tracing instance to snapshot
 979 * @cond_data:	The data to be tested conditionally, and possibly saved
 980 *
 981 * This is the same as tracing_snapshot() except that the snapshot is
 982 * conditional - the snapshot will only happen if the
 983 * cond_snapshot.update() implementation receiving the cond_data
 984 * returns true, which means that the trace array's cond_snapshot
 985 * update() operation used the cond_data to determine whether the
 986 * snapshot should be taken, and if it was, presumably saved it along
 987 * with the snapshot.
 988 */
 989void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
 990{
 991	tracing_snapshot_instance_cond(tr, cond_data);
 992}
 993EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
 994
 995/**
 996 * tracing_snapshot_cond_data - get the user data associated with a snapshot
 997 * @tr:		The tracing instance
 998 *
 999 * When the user enables a conditional snapshot using
1000 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1001 * with the snapshot.  This accessor is used to retrieve it.
1002 *
1003 * Should not be called from cond_snapshot.update(), since it takes
1004 * the tr->max_lock lock, which the code calling
1005 * cond_snapshot.update() has already done.
1006 *
1007 * Returns the cond_data associated with the trace array's snapshot.
1008 */
1009void *tracing_cond_snapshot_data(struct trace_array *tr)
1010{
1011	void *cond_data = NULL;
1012
 
1013	arch_spin_lock(&tr->max_lock);
1014
1015	if (tr->cond_snapshot)
1016		cond_data = tr->cond_snapshot->cond_data;
1017
1018	arch_spin_unlock(&tr->max_lock);
 
1019
1020	return cond_data;
1021}
1022EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1023
1024static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1025					struct trace_buffer *size_buf, int cpu_id);
1026static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1027
1028int tracing_alloc_snapshot_instance(struct trace_array *tr)
1029{
 
1030	int ret;
1031
1032	if (!tr->allocated_snapshot) {
1033
 
 
 
 
 
 
1034		/* allocate spare buffer */
1035		ret = resize_buffer_duplicate_size(&tr->max_buffer,
1036				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1037		if (ret < 0)
1038			return ret;
1039
1040		tr->allocated_snapshot = true;
1041	}
1042
1043	return 0;
1044}
1045
1046static void free_snapshot(struct trace_array *tr)
1047{
1048	/*
1049	 * We don't free the ring buffer. instead, resize it because
1050	 * The max_tr ring buffer has some state (e.g. ring->clock) and
1051	 * we want preserve it.
1052	 */
 
1053	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1054	set_buffer_entries(&tr->max_buffer, 1);
1055	tracing_reset_online_cpus(&tr->max_buffer);
1056	tr->allocated_snapshot = false;
1057}
1058
1059/**
1060 * tracing_alloc_snapshot - allocate snapshot buffer.
1061 *
1062 * This only allocates the snapshot buffer if it isn't already
1063 * allocated - it doesn't also take a snapshot.
1064 *
1065 * This is meant to be used in cases where the snapshot buffer needs
1066 * to be set up for events that can't sleep but need to be able to
1067 * trigger a snapshot.
1068 */
1069int tracing_alloc_snapshot(void)
1070{
1071	struct trace_array *tr = &global_trace;
1072	int ret;
1073
1074	ret = tracing_alloc_snapshot_instance(tr);
1075	WARN_ON(ret < 0);
1076
1077	return ret;
1078}
1079EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1080
1081/**
1082 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1083 *
1084 * This is similar to tracing_snapshot(), but it will allocate the
1085 * snapshot buffer if it isn't already allocated. Use this only
1086 * where it is safe to sleep, as the allocation may sleep.
1087 *
1088 * This causes a swap between the snapshot buffer and the current live
1089 * tracing buffer. You can use this to take snapshots of the live
1090 * trace when some condition is triggered, but continue to trace.
1091 */
1092void tracing_snapshot_alloc(void)
1093{
1094	int ret;
1095
1096	ret = tracing_alloc_snapshot();
1097	if (ret < 0)
1098		return;
1099
1100	tracing_snapshot();
1101}
1102EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1103
1104/**
1105 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1106 * @tr:		The tracing instance
1107 * @cond_data:	User data to associate with the snapshot
1108 * @update:	Implementation of the cond_snapshot update function
1109 *
1110 * Check whether the conditional snapshot for the given instance has
1111 * already been enabled, or if the current tracer is already using a
1112 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1113 * save the cond_data and update function inside.
1114 *
1115 * Returns 0 if successful, error otherwise.
1116 */
1117int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1118				 cond_update_fn_t update)
1119{
1120	struct cond_snapshot *cond_snapshot;
1121	int ret = 0;
1122
1123	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1124	if (!cond_snapshot)
1125		return -ENOMEM;
1126
1127	cond_snapshot->cond_data = cond_data;
1128	cond_snapshot->update = update;
1129
1130	mutex_lock(&trace_types_lock);
1131
1132	ret = tracing_alloc_snapshot_instance(tr);
1133	if (ret)
1134		goto fail_unlock;
1135
1136	if (tr->current_trace->use_max_tr) {
1137		ret = -EBUSY;
1138		goto fail_unlock;
1139	}
1140
1141	/*
1142	 * The cond_snapshot can only change to NULL without the
1143	 * trace_types_lock. We don't care if we race with it going
1144	 * to NULL, but we want to make sure that it's not set to
1145	 * something other than NULL when we get here, which we can
1146	 * do safely with only holding the trace_types_lock and not
1147	 * having to take the max_lock.
1148	 */
1149	if (tr->cond_snapshot) {
1150		ret = -EBUSY;
1151		goto fail_unlock;
1152	}
1153
 
1154	arch_spin_lock(&tr->max_lock);
1155	tr->cond_snapshot = cond_snapshot;
1156	arch_spin_unlock(&tr->max_lock);
 
1157
1158	mutex_unlock(&trace_types_lock);
1159
1160	return ret;
1161
1162 fail_unlock:
1163	mutex_unlock(&trace_types_lock);
1164	kfree(cond_snapshot);
1165	return ret;
1166}
1167EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1168
1169/**
1170 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1171 * @tr:		The tracing instance
1172 *
1173 * Check whether the conditional snapshot for the given instance is
1174 * enabled; if so, free the cond_snapshot associated with it,
1175 * otherwise return -EINVAL.
1176 *
1177 * Returns 0 if successful, error otherwise.
1178 */
1179int tracing_snapshot_cond_disable(struct trace_array *tr)
1180{
1181	int ret = 0;
1182
 
1183	arch_spin_lock(&tr->max_lock);
1184
1185	if (!tr->cond_snapshot)
1186		ret = -EINVAL;
1187	else {
1188		kfree(tr->cond_snapshot);
1189		tr->cond_snapshot = NULL;
1190	}
1191
1192	arch_spin_unlock(&tr->max_lock);
 
1193
1194	return ret;
1195}
1196EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1197#else
1198void tracing_snapshot(void)
1199{
1200	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1201}
1202EXPORT_SYMBOL_GPL(tracing_snapshot);
1203void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1204{
1205	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1206}
1207EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1208int tracing_alloc_snapshot(void)
1209{
1210	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1211	return -ENODEV;
1212}
1213EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1214void tracing_snapshot_alloc(void)
1215{
1216	/* Give warning */
1217	tracing_snapshot();
1218}
1219EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1220void *tracing_cond_snapshot_data(struct trace_array *tr)
1221{
1222	return NULL;
1223}
1224EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1225int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1226{
1227	return -ENODEV;
1228}
1229EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1230int tracing_snapshot_cond_disable(struct trace_array *tr)
1231{
1232	return false;
1233}
1234EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 
1235#endif /* CONFIG_TRACER_SNAPSHOT */
1236
1237void tracer_tracing_off(struct trace_array *tr)
1238{
1239	if (tr->trace_buffer.buffer)
1240		ring_buffer_record_off(tr->trace_buffer.buffer);
1241	/*
1242	 * This flag is looked at when buffers haven't been allocated
1243	 * yet, or by some tracers (like irqsoff), that just want to
1244	 * know if the ring buffer has been disabled, but it can handle
1245	 * races of where it gets disabled but we still do a record.
1246	 * As the check is in the fast path of the tracers, it is more
1247	 * important to be fast than accurate.
1248	 */
1249	tr->buffer_disabled = 1;
1250	/* Make the flag seen by readers */
1251	smp_wmb();
1252}
1253
1254/**
1255 * tracing_off - turn off tracing buffers
1256 *
1257 * This function stops the tracing buffers from recording data.
1258 * It does not disable any overhead the tracers themselves may
1259 * be causing. This function simply causes all recording to
1260 * the ring buffers to fail.
1261 */
1262void tracing_off(void)
1263{
1264	tracer_tracing_off(&global_trace);
1265}
1266EXPORT_SYMBOL_GPL(tracing_off);
1267
1268void disable_trace_on_warning(void)
1269{
1270	if (__disable_trace_on_warning)
 
 
1271		tracing_off();
 
1272}
1273
1274/**
1275 * tracer_tracing_is_on - show real state of ring buffer enabled
1276 * @tr : the trace array to know if ring buffer is enabled
1277 *
1278 * Shows real state of the ring buffer if it is enabled or not.
1279 */
1280bool tracer_tracing_is_on(struct trace_array *tr)
1281{
1282	if (tr->trace_buffer.buffer)
1283		return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1284	return !tr->buffer_disabled;
1285}
1286
1287/**
1288 * tracing_is_on - show state of ring buffers enabled
1289 */
1290int tracing_is_on(void)
1291{
1292	return tracer_tracing_is_on(&global_trace);
1293}
1294EXPORT_SYMBOL_GPL(tracing_is_on);
1295
1296static int __init set_buf_size(char *str)
1297{
1298	unsigned long buf_size;
1299
1300	if (!str)
1301		return 0;
1302	buf_size = memparse(str, &str);
1303	/* nr_entries can not be zero */
1304	if (buf_size == 0)
1305		return 0;
1306	trace_buf_size = buf_size;
 
 
1307	return 1;
1308}
1309__setup("trace_buf_size=", set_buf_size);
1310
1311static int __init set_tracing_thresh(char *str)
1312{
1313	unsigned long threshold;
1314	int ret;
1315
1316	if (!str)
1317		return 0;
1318	ret = kstrtoul(str, 0, &threshold);
1319	if (ret < 0)
1320		return 0;
1321	tracing_thresh = threshold * 1000;
1322	return 1;
1323}
1324__setup("tracing_thresh=", set_tracing_thresh);
1325
1326unsigned long nsecs_to_usecs(unsigned long nsecs)
1327{
1328	return nsecs / 1000;
1329}
1330
1331/*
1332 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1333 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1334 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1335 * of strings in the order that the evals (enum) were defined.
1336 */
1337#undef C
1338#define C(a, b) b
1339
1340/* These must match the bit postions in trace_iterator_flags */
1341static const char *trace_options[] = {
1342	TRACE_FLAGS
1343	NULL
1344};
1345
1346static struct {
1347	u64 (*func)(void);
1348	const char *name;
1349	int in_ns;		/* is this clock in nanoseconds? */
1350} trace_clocks[] = {
1351	{ trace_clock_local,		"local",	1 },
1352	{ trace_clock_global,		"global",	1 },
1353	{ trace_clock_counter,		"counter",	0 },
1354	{ trace_clock_jiffies,		"uptime",	0 },
1355	{ trace_clock,			"perf",		1 },
1356	{ ktime_get_mono_fast_ns,	"mono",		1 },
1357	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
1358	{ ktime_get_boot_fast_ns,	"boot",		1 },
 
1359	ARCH_TRACE_CLOCKS
1360};
1361
1362bool trace_clock_in_ns(struct trace_array *tr)
1363{
1364	if (trace_clocks[tr->clock_id].in_ns)
1365		return true;
1366
1367	return false;
1368}
1369
1370/*
1371 * trace_parser_get_init - gets the buffer for trace parser
1372 */
1373int trace_parser_get_init(struct trace_parser *parser, int size)
1374{
1375	memset(parser, 0, sizeof(*parser));
1376
1377	parser->buffer = kmalloc(size, GFP_KERNEL);
1378	if (!parser->buffer)
1379		return 1;
1380
1381	parser->size = size;
1382	return 0;
1383}
1384
1385/*
1386 * trace_parser_put - frees the buffer for trace parser
1387 */
1388void trace_parser_put(struct trace_parser *parser)
1389{
1390	kfree(parser->buffer);
1391	parser->buffer = NULL;
1392}
1393
1394/*
1395 * trace_get_user - reads the user input string separated by  space
1396 * (matched by isspace(ch))
1397 *
1398 * For each string found the 'struct trace_parser' is updated,
1399 * and the function returns.
1400 *
1401 * Returns number of bytes read.
1402 *
1403 * See kernel/trace/trace.h for 'struct trace_parser' details.
1404 */
1405int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1406	size_t cnt, loff_t *ppos)
1407{
1408	char ch;
1409	size_t read = 0;
1410	ssize_t ret;
1411
1412	if (!*ppos)
1413		trace_parser_clear(parser);
1414
1415	ret = get_user(ch, ubuf++);
1416	if (ret)
1417		goto out;
1418
1419	read++;
1420	cnt--;
1421
1422	/*
1423	 * The parser is not finished with the last write,
1424	 * continue reading the user input without skipping spaces.
1425	 */
1426	if (!parser->cont) {
1427		/* skip white space */
1428		while (cnt && isspace(ch)) {
1429			ret = get_user(ch, ubuf++);
1430			if (ret)
1431				goto out;
1432			read++;
1433			cnt--;
1434		}
1435
1436		parser->idx = 0;
1437
1438		/* only spaces were written */
1439		if (isspace(ch) || !ch) {
1440			*ppos += read;
1441			ret = read;
1442			goto out;
1443		}
1444	}
1445
1446	/* read the non-space input */
1447	while (cnt && !isspace(ch) && ch) {
1448		if (parser->idx < parser->size - 1)
1449			parser->buffer[parser->idx++] = ch;
1450		else {
1451			ret = -EINVAL;
1452			goto out;
1453		}
1454		ret = get_user(ch, ubuf++);
1455		if (ret)
1456			goto out;
1457		read++;
1458		cnt--;
1459	}
1460
1461	/* We either got finished input or we have to wait for another call. */
1462	if (isspace(ch) || !ch) {
1463		parser->buffer[parser->idx] = 0;
1464		parser->cont = false;
1465	} else if (parser->idx < parser->size - 1) {
1466		parser->cont = true;
1467		parser->buffer[parser->idx++] = ch;
1468		/* Make sure the parsed string always terminates with '\0'. */
1469		parser->buffer[parser->idx] = 0;
1470	} else {
1471		ret = -EINVAL;
1472		goto out;
1473	}
1474
1475	*ppos += read;
1476	ret = read;
1477
1478out:
1479	return ret;
1480}
1481
1482/* TODO add a seq_buf_to_buffer() */
1483static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1484{
1485	int len;
1486
1487	if (trace_seq_used(s) <= s->seq.readpos)
1488		return -EBUSY;
1489
1490	len = trace_seq_used(s) - s->seq.readpos;
1491	if (cnt > len)
1492		cnt = len;
1493	memcpy(buf, s->buffer + s->seq.readpos, cnt);
1494
1495	s->seq.readpos += cnt;
1496	return cnt;
1497}
1498
1499unsigned long __read_mostly	tracing_thresh;
1500
1501#ifdef CONFIG_TRACER_MAX_TRACE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1502/*
1503 * Copy the new maximum trace into the separate maximum-trace
1504 * structure. (this way the maximum trace is permanently saved,
1505 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1506 */
1507static void
1508__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1509{
1510	struct trace_buffer *trace_buf = &tr->trace_buffer;
1511	struct trace_buffer *max_buf = &tr->max_buffer;
1512	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1513	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1514
1515	max_buf->cpu = cpu;
1516	max_buf->time_start = data->preempt_timestamp;
1517
1518	max_data->saved_latency = tr->max_latency;
1519	max_data->critical_start = data->critical_start;
1520	max_data->critical_end = data->critical_end;
1521
1522	strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1523	max_data->pid = tsk->pid;
1524	/*
1525	 * If tsk == current, then use current_uid(), as that does not use
1526	 * RCU. The irq tracer can be called out of RCU scope.
1527	 */
1528	if (tsk == current)
1529		max_data->uid = current_uid();
1530	else
1531		max_data->uid = task_uid(tsk);
1532
1533	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1534	max_data->policy = tsk->policy;
1535	max_data->rt_priority = tsk->rt_priority;
1536
1537	/* record this tasks comm */
1538	tracing_record_cmdline(tsk);
 
1539}
1540
1541/**
1542 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1543 * @tr: tracer
1544 * @tsk: the task with the latency
1545 * @cpu: The cpu that initiated the trace.
1546 * @cond_data: User data associated with a conditional snapshot
1547 *
1548 * Flip the buffers between the @tr and the max_tr and record information
1549 * about which task was the cause of this latency.
1550 */
1551void
1552update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1553	      void *cond_data)
1554{
1555	if (tr->stop_count)
1556		return;
1557
1558	WARN_ON_ONCE(!irqs_disabled());
1559
1560	if (!tr->allocated_snapshot) {
1561		/* Only the nop tracer should hit this when disabling */
1562		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1563		return;
1564	}
1565
1566	arch_spin_lock(&tr->max_lock);
1567
1568	/* Inherit the recordable setting from trace_buffer */
1569	if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1570		ring_buffer_record_on(tr->max_buffer.buffer);
1571	else
1572		ring_buffer_record_off(tr->max_buffer.buffer);
1573
1574#ifdef CONFIG_TRACER_SNAPSHOT
1575	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1576		goto out_unlock;
 
 
1577#endif
1578	swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1579
1580	__update_max_tr(tr, tsk, cpu);
1581
1582 out_unlock:
1583	arch_spin_unlock(&tr->max_lock);
 
 
 
1584}
1585
1586/**
1587 * update_max_tr_single - only copy one trace over, and reset the rest
1588 * @tr: tracer
1589 * @tsk: task with the latency
1590 * @cpu: the cpu of the buffer to copy.
1591 *
1592 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1593 */
1594void
1595update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1596{
1597	int ret;
1598
1599	if (tr->stop_count)
1600		return;
1601
1602	WARN_ON_ONCE(!irqs_disabled());
1603	if (!tr->allocated_snapshot) {
1604		/* Only the nop tracer should hit this when disabling */
1605		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1606		return;
1607	}
1608
1609	arch_spin_lock(&tr->max_lock);
1610
1611	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1612
1613	if (ret == -EBUSY) {
1614		/*
1615		 * We failed to swap the buffer due to a commit taking
1616		 * place on this CPU. We fail to record, but we reset
1617		 * the max trace buffer (no one writes directly to it)
1618		 * and flag that it failed.
 
1619		 */
1620		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1621			"Failed to swap buffers due to commit in progress\n");
1622	}
1623
1624	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1625
1626	__update_max_tr(tr, tsk, cpu);
1627	arch_spin_unlock(&tr->max_lock);
1628}
 
1629#endif /* CONFIG_TRACER_MAX_TRACE */
1630
1631static int wait_on_pipe(struct trace_iterator *iter, int full)
1632{
 
 
1633	/* Iterators are static, they should be filled or empty */
1634	if (trace_buffer_iter(iter, iter->cpu_file))
1635		return 0;
1636
1637	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1638				full);
 
 
 
 
 
 
 
 
 
1639}
1640
1641#ifdef CONFIG_FTRACE_STARTUP_TEST
1642static bool selftests_can_run;
1643
1644struct trace_selftests {
1645	struct list_head		list;
1646	struct tracer			*type;
1647};
1648
1649static LIST_HEAD(postponed_selftests);
1650
1651static int save_selftest(struct tracer *type)
1652{
1653	struct trace_selftests *selftest;
1654
1655	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1656	if (!selftest)
1657		return -ENOMEM;
1658
1659	selftest->type = type;
1660	list_add(&selftest->list, &postponed_selftests);
1661	return 0;
1662}
1663
1664static int run_tracer_selftest(struct tracer *type)
1665{
1666	struct trace_array *tr = &global_trace;
1667	struct tracer *saved_tracer = tr->current_trace;
1668	int ret;
1669
1670	if (!type->selftest || tracing_selftest_disabled)
1671		return 0;
1672
1673	/*
1674	 * If a tracer registers early in boot up (before scheduling is
1675	 * initialized and such), then do not run its selftests yet.
1676	 * Instead, run it a little later in the boot process.
1677	 */
1678	if (!selftests_can_run)
1679		return save_selftest(type);
1680
 
 
 
 
 
 
1681	/*
1682	 * Run a selftest on this tracer.
1683	 * Here we reset the trace buffer, and set the current
1684	 * tracer to be this tracer. The tracer can then run some
1685	 * internal tracing to verify that everything is in order.
1686	 * If we fail, we do not register this tracer.
1687	 */
1688	tracing_reset_online_cpus(&tr->trace_buffer);
1689
1690	tr->current_trace = type;
1691
1692#ifdef CONFIG_TRACER_MAX_TRACE
1693	if (type->use_max_tr) {
1694		/* If we expanded the buffers, make sure the max is expanded too */
1695		if (ring_buffer_expanded)
1696			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1697					   RING_BUFFER_ALL_CPUS);
1698		tr->allocated_snapshot = true;
1699	}
1700#endif
1701
1702	/* the test is responsible for initializing and enabling */
1703	pr_info("Testing tracer %s: ", type->name);
1704	ret = type->selftest(type, tr);
1705	/* the test is responsible for resetting too */
1706	tr->current_trace = saved_tracer;
1707	if (ret) {
1708		printk(KERN_CONT "FAILED!\n");
1709		/* Add the warning after printing 'FAILED' */
1710		WARN_ON(1);
1711		return -1;
1712	}
1713	/* Only reset on passing, to avoid touching corrupted buffers */
1714	tracing_reset_online_cpus(&tr->trace_buffer);
1715
1716#ifdef CONFIG_TRACER_MAX_TRACE
1717	if (type->use_max_tr) {
1718		tr->allocated_snapshot = false;
1719
1720		/* Shrink the max buffer again */
1721		if (ring_buffer_expanded)
1722			ring_buffer_resize(tr->max_buffer.buffer, 1,
1723					   RING_BUFFER_ALL_CPUS);
1724	}
1725#endif
1726
1727	printk(KERN_CONT "PASSED\n");
1728	return 0;
1729}
1730
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1731static __init int init_trace_selftests(void)
1732{
1733	struct trace_selftests *p, *n;
1734	struct tracer *t, **last;
1735	int ret;
1736
1737	selftests_can_run = true;
1738
1739	mutex_lock(&trace_types_lock);
1740
1741	if (list_empty(&postponed_selftests))
1742		goto out;
1743
1744	pr_info("Running postponed tracer tests:\n");
1745
 
1746	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1747		/* This loop can take minutes when sanitizers are enabled, so
1748		 * lets make sure we allow RCU processing.
1749		 */
1750		cond_resched();
1751		ret = run_tracer_selftest(p->type);
1752		/* If the test fails, then warn and remove from available_tracers */
1753		if (ret < 0) {
1754			WARN(1, "tracer: %s failed selftest, disabling\n",
1755			     p->type->name);
1756			last = &trace_types;
1757			for (t = trace_types; t; t = t->next) {
1758				if (t == p->type) {
1759					*last = t->next;
1760					break;
1761				}
1762				last = &t->next;
1763			}
1764		}
1765		list_del(&p->list);
1766		kfree(p);
1767	}
 
1768
1769 out:
1770	mutex_unlock(&trace_types_lock);
1771
1772	return 0;
1773}
1774core_initcall(init_trace_selftests);
1775#else
1776static inline int run_tracer_selftest(struct tracer *type)
1777{
1778	return 0;
1779}
 
 
 
 
1780#endif /* CONFIG_FTRACE_STARTUP_TEST */
1781
1782static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1783
1784static void __init apply_trace_boot_options(void);
1785
1786/**
1787 * register_tracer - register a tracer with the ftrace system.
1788 * @type: the plugin for the tracer
1789 *
1790 * Register a new plugin tracer.
1791 */
1792int __init register_tracer(struct tracer *type)
1793{
1794	struct tracer *t;
1795	int ret = 0;
1796
1797	if (!type->name) {
1798		pr_info("Tracer must have a name\n");
1799		return -1;
1800	}
1801
1802	if (strlen(type->name) >= MAX_TRACER_SIZE) {
1803		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1804		return -1;
1805	}
1806
 
 
 
 
 
 
1807	mutex_lock(&trace_types_lock);
1808
1809	tracing_selftest_running = true;
1810
1811	for (t = trace_types; t; t = t->next) {
1812		if (strcmp(type->name, t->name) == 0) {
1813			/* already found */
1814			pr_info("Tracer %s already registered\n",
1815				type->name);
1816			ret = -1;
1817			goto out;
1818		}
1819	}
1820
1821	if (!type->set_flag)
1822		type->set_flag = &dummy_set_flag;
1823	if (!type->flags) {
1824		/*allocate a dummy tracer_flags*/
1825		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1826		if (!type->flags) {
1827			ret = -ENOMEM;
1828			goto out;
1829		}
1830		type->flags->val = 0;
1831		type->flags->opts = dummy_tracer_opt;
1832	} else
1833		if (!type->flags->opts)
1834			type->flags->opts = dummy_tracer_opt;
1835
1836	/* store the tracer for __set_tracer_option */
1837	type->flags->trace = type;
1838
1839	ret = run_tracer_selftest(type);
1840	if (ret < 0)
1841		goto out;
1842
1843	type->next = trace_types;
1844	trace_types = type;
1845	add_tracer_options(&global_trace, type);
1846
1847 out:
1848	tracing_selftest_running = false;
1849	mutex_unlock(&trace_types_lock);
1850
1851	if (ret || !default_bootup_tracer)
1852		goto out_unlock;
1853
1854	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1855		goto out_unlock;
1856
1857	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1858	/* Do we want this tracer to start on bootup? */
1859	tracing_set_tracer(&global_trace, type->name);
1860	default_bootup_tracer = NULL;
1861
1862	apply_trace_boot_options();
1863
1864	/* disable other selftests, since this will break it. */
1865	tracing_selftest_disabled = true;
1866#ifdef CONFIG_FTRACE_STARTUP_TEST
1867	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1868	       type->name);
1869#endif
1870
1871 out_unlock:
1872	return ret;
1873}
1874
1875static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
1876{
1877	struct ring_buffer *buffer = buf->buffer;
1878
1879	if (!buffer)
1880		return;
1881
1882	ring_buffer_record_disable(buffer);
1883
1884	/* Make sure all commits have finished */
1885	synchronize_rcu();
1886	ring_buffer_reset_cpu(buffer, cpu);
1887
1888	ring_buffer_record_enable(buffer);
1889}
1890
1891void tracing_reset_online_cpus(struct trace_buffer *buf)
1892{
1893	struct ring_buffer *buffer = buf->buffer;
1894	int cpu;
1895
1896	if (!buffer)
1897		return;
1898
1899	ring_buffer_record_disable(buffer);
1900
1901	/* Make sure all commits have finished */
1902	synchronize_rcu();
1903
1904	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1905
1906	for_each_online_cpu(cpu)
1907		ring_buffer_reset_cpu(buffer, cpu);
1908
1909	ring_buffer_record_enable(buffer);
1910}
1911
1912/* Must have trace_types_lock held */
1913void tracing_reset_all_online_cpus(void)
1914{
1915	struct trace_array *tr;
1916
 
 
1917	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1918		if (!tr->clear_trace)
1919			continue;
1920		tr->clear_trace = false;
1921		tracing_reset_online_cpus(&tr->trace_buffer);
1922#ifdef CONFIG_TRACER_MAX_TRACE
1923		tracing_reset_online_cpus(&tr->max_buffer);
1924#endif
1925	}
1926}
1927
 
 
 
 
 
 
 
 
 
 
 
1928static int *tgid_map;
1929
 
 
 
1930#define SAVED_CMDLINES_DEFAULT 128
1931#define NO_CMDLINE_MAP UINT_MAX
 
 
 
 
 
1932static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1933struct saved_cmdlines_buffer {
1934	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1935	unsigned *map_cmdline_to_pid;
1936	unsigned cmdline_num;
1937	int cmdline_idx;
1938	char *saved_cmdlines;
1939};
1940static struct saved_cmdlines_buffer *savedcmd;
1941
1942/* temporary disable recording */
1943static atomic_t trace_record_taskinfo_disabled __read_mostly;
1944
1945static inline char *get_saved_cmdlines(int idx)
1946{
1947	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1948}
1949
1950static inline void set_cmdline(int idx, const char *cmdline)
1951{
1952	strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1953}
1954
1955static int allocate_cmdlines_buffer(unsigned int val,
1956				    struct saved_cmdlines_buffer *s)
 
 
 
 
 
 
 
 
1957{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1958	s->map_cmdline_to_pid = kmalloc_array(val,
1959					      sizeof(*s->map_cmdline_to_pid),
1960					      GFP_KERNEL);
1961	if (!s->map_cmdline_to_pid)
1962		return -ENOMEM;
1963
1964	s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1965	if (!s->saved_cmdlines) {
1966		kfree(s->map_cmdline_to_pid);
1967		return -ENOMEM;
1968	}
1969
1970	s->cmdline_idx = 0;
1971	s->cmdline_num = val;
1972	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1973	       sizeof(s->map_pid_to_cmdline));
1974	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1975	       val * sizeof(*s->map_cmdline_to_pid));
1976
1977	return 0;
1978}
1979
1980static int trace_create_savedcmd(void)
1981{
1982	int ret;
1983
1984	savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1985	if (!savedcmd)
1986		return -ENOMEM;
1987
1988	ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1989	if (ret < 0) {
1990		kfree(savedcmd);
1991		savedcmd = NULL;
1992		return -ENOMEM;
1993	}
1994
1995	return 0;
1996}
1997
1998int is_tracing_stopped(void)
1999{
2000	return global_trace.stop_count;
2001}
2002
2003/**
2004 * tracing_start - quick start of the tracer
2005 *
2006 * If tracing is enabled but was stopped by tracing_stop,
2007 * this will start the tracer back up.
2008 */
2009void tracing_start(void)
2010{
2011	struct ring_buffer *buffer;
2012	unsigned long flags;
2013
2014	if (tracing_disabled)
2015		return;
2016
2017	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2018	if (--global_trace.stop_count) {
2019		if (global_trace.stop_count < 0) {
2020			/* Someone screwed up their debugging */
2021			WARN_ON_ONCE(1);
2022			global_trace.stop_count = 0;
2023		}
2024		goto out;
2025	}
2026
2027	/* Prevent the buffers from switching */
2028	arch_spin_lock(&global_trace.max_lock);
2029
2030	buffer = global_trace.trace_buffer.buffer;
2031	if (buffer)
2032		ring_buffer_record_enable(buffer);
2033
2034#ifdef CONFIG_TRACER_MAX_TRACE
2035	buffer = global_trace.max_buffer.buffer;
2036	if (buffer)
2037		ring_buffer_record_enable(buffer);
2038#endif
2039
2040	arch_spin_unlock(&global_trace.max_lock);
2041
2042 out:
2043	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2044}
2045
2046static void tracing_start_tr(struct trace_array *tr)
2047{
2048	struct ring_buffer *buffer;
2049	unsigned long flags;
2050
2051	if (tracing_disabled)
2052		return;
2053
2054	/* If global, we need to also start the max tracer */
2055	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2056		return tracing_start();
2057
2058	raw_spin_lock_irqsave(&tr->start_lock, flags);
2059
2060	if (--tr->stop_count) {
2061		if (tr->stop_count < 0) {
2062			/* Someone screwed up their debugging */
2063			WARN_ON_ONCE(1);
2064			tr->stop_count = 0;
2065		}
2066		goto out;
2067	}
2068
2069	buffer = tr->trace_buffer.buffer;
2070	if (buffer)
2071		ring_buffer_record_enable(buffer);
2072
2073 out:
2074	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2075}
2076
2077/**
2078 * tracing_stop - quick stop of the tracer
2079 *
2080 * Light weight way to stop tracing. Use in conjunction with
2081 * tracing_start.
2082 */
2083void tracing_stop(void)
 
 
 
 
 
 
2084{
2085	struct ring_buffer *buffer;
2086	unsigned long flags;
2087
2088	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2089	if (global_trace.stop_count++)
2090		goto out;
2091
2092	/* Prevent the buffers from switching */
2093	arch_spin_lock(&global_trace.max_lock);
2094
2095	buffer = global_trace.trace_buffer.buffer;
2096	if (buffer)
2097		ring_buffer_record_disable(buffer);
2098
2099#ifdef CONFIG_TRACER_MAX_TRACE
2100	buffer = global_trace.max_buffer.buffer;
2101	if (buffer)
2102		ring_buffer_record_disable(buffer);
2103#endif
2104
2105	arch_spin_unlock(&global_trace.max_lock);
2106
2107 out:
2108	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2109}
2110
2111static void tracing_stop_tr(struct trace_array *tr)
 
 
 
 
 
 
2112{
2113	struct ring_buffer *buffer;
2114	unsigned long flags;
2115
2116	/* If global, we need to also stop the max tracer */
2117	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2118		return tracing_stop();
2119
2120	raw_spin_lock_irqsave(&tr->start_lock, flags);
2121	if (tr->stop_count++)
2122		goto out;
2123
2124	buffer = tr->trace_buffer.buffer;
2125	if (buffer)
2126		ring_buffer_record_disable(buffer);
2127
2128 out:
2129	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2130}
2131
2132static int trace_save_cmdline(struct task_struct *tsk)
2133{
2134	unsigned pid, idx;
2135
2136	/* treat recording of idle task as a success */
2137	if (!tsk->pid)
2138		return 1;
2139
2140	if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2141		return 0;
2142
2143	/*
2144	 * It's not the end of the world if we don't get
2145	 * the lock, but we also don't want to spin
2146	 * nor do we want to disable interrupts,
2147	 * so if we miss here, then better luck next time.
 
 
 
2148	 */
 
2149	if (!arch_spin_trylock(&trace_cmdline_lock))
2150		return 0;
2151
2152	idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2153	if (idx == NO_CMDLINE_MAP) {
2154		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2155
2156		/*
2157		 * Check whether the cmdline buffer at idx has a pid
2158		 * mapped. We are going to overwrite that entry so we
2159		 * need to clear the map_pid_to_cmdline. Otherwise we
2160		 * would read the new comm for the old pid.
2161		 */
2162		pid = savedcmd->map_cmdline_to_pid[idx];
2163		if (pid != NO_CMDLINE_MAP)
2164			savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2165
2166		savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2167		savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2168
2169		savedcmd->cmdline_idx = idx;
2170	}
2171
 
2172	set_cmdline(idx, tsk->comm);
2173
2174	arch_spin_unlock(&trace_cmdline_lock);
2175
2176	return 1;
2177}
2178
2179static void __trace_find_cmdline(int pid, char comm[])
2180{
2181	unsigned map;
 
2182
2183	if (!pid) {
2184		strcpy(comm, "<idle>");
2185		return;
2186	}
2187
2188	if (WARN_ON_ONCE(pid < 0)) {
2189		strcpy(comm, "<XXX>");
2190		return;
2191	}
2192
2193	if (pid > PID_MAX_DEFAULT) {
2194		strcpy(comm, "<...>");
2195		return;
 
 
 
 
 
2196	}
2197
2198	map = savedcmd->map_pid_to_cmdline[pid];
2199	if (map != NO_CMDLINE_MAP)
2200		strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2201	else
2202		strcpy(comm, "<...>");
2203}
2204
2205void trace_find_cmdline(int pid, char comm[])
2206{
2207	preempt_disable();
2208	arch_spin_lock(&trace_cmdline_lock);
2209
2210	__trace_find_cmdline(pid, comm);
2211
2212	arch_spin_unlock(&trace_cmdline_lock);
2213	preempt_enable();
2214}
2215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2216int trace_find_tgid(int pid)
2217{
2218	if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2219		return 0;
2220
2221	return tgid_map[pid];
2222}
2223
2224static int trace_save_tgid(struct task_struct *tsk)
2225{
 
 
2226	/* treat recording of idle task as a success */
2227	if (!tsk->pid)
2228		return 1;
2229
2230	if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
 
2231		return 0;
2232
2233	tgid_map[tsk->pid] = tsk->tgid;
2234	return 1;
2235}
2236
2237static bool tracing_record_taskinfo_skip(int flags)
2238{
2239	if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2240		return true;
2241	if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2242		return true;
2243	if (!__this_cpu_read(trace_taskinfo_save))
2244		return true;
2245	return false;
2246}
2247
2248/**
2249 * tracing_record_taskinfo - record the task info of a task
2250 *
2251 * @task:  task to record
2252 * @flags: TRACE_RECORD_CMDLINE for recording comm
2253 *         TRACE_RECORD_TGID for recording tgid
2254 */
2255void tracing_record_taskinfo(struct task_struct *task, int flags)
2256{
2257	bool done;
2258
2259	if (tracing_record_taskinfo_skip(flags))
2260		return;
2261
2262	/*
2263	 * Record as much task information as possible. If some fail, continue
2264	 * to try to record the others.
2265	 */
2266	done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2267	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2268
2269	/* If recording any information failed, retry again soon. */
2270	if (!done)
2271		return;
2272
2273	__this_cpu_write(trace_taskinfo_save, false);
2274}
2275
2276/**
2277 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2278 *
2279 * @prev: previous task during sched_switch
2280 * @next: next task during sched_switch
2281 * @flags: TRACE_RECORD_CMDLINE for recording comm
2282 *         TRACE_RECORD_TGID for recording tgid
2283 */
2284void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2285					  struct task_struct *next, int flags)
2286{
2287	bool done;
2288
2289	if (tracing_record_taskinfo_skip(flags))
2290		return;
2291
2292	/*
2293	 * Record as much task information as possible. If some fail, continue
2294	 * to try to record the others.
2295	 */
2296	done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2297	done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2298	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2299	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2300
2301	/* If recording any information failed, retry again soon. */
2302	if (!done)
2303		return;
2304
2305	__this_cpu_write(trace_taskinfo_save, false);
2306}
2307
2308/* Helpers to record a specific task information */
2309void tracing_record_cmdline(struct task_struct *task)
2310{
2311	tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2312}
2313
2314void tracing_record_tgid(struct task_struct *task)
2315{
2316	tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2317}
2318
2319/*
2320 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2321 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2322 * simplifies those functions and keeps them in sync.
2323 */
2324enum print_line_t trace_handle_return(struct trace_seq *s)
2325{
2326	return trace_seq_has_overflowed(s) ?
2327		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2328}
2329EXPORT_SYMBOL_GPL(trace_handle_return);
2330
2331void
2332tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2333			     unsigned long flags, int pc)
2334{
2335	struct task_struct *tsk = current;
2336
2337	entry->preempt_count		= pc & 0xff;
2338	entry->pid			= (tsk) ? tsk->pid : 0;
2339	entry->type			= type;
2340	entry->flags =
2341#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2342		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2343#else
2344		TRACE_FLAG_IRQS_NOSUPPORT |
2345#endif
2346		((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
2347		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2348		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2349		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2350		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2351}
2352EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2353
2354struct ring_buffer_event *
2355trace_buffer_lock_reserve(struct ring_buffer *buffer,
2356			  int type,
2357			  unsigned long len,
2358			  unsigned long flags, int pc)
2359{
2360	return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2361}
2362
2363DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2364DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2365static int trace_buffered_event_ref;
2366
2367/**
2368 * trace_buffered_event_enable - enable buffering events
2369 *
2370 * When events are being filtered, it is quicker to use a temporary
2371 * buffer to write the event data into if there's a likely chance
2372 * that it will not be committed. The discard of the ring buffer
2373 * is not as fast as committing, and is much slower than copying
2374 * a commit.
2375 *
2376 * When an event is to be filtered, allocate per cpu buffers to
2377 * write the event data into, and if the event is filtered and discarded
2378 * it is simply dropped, otherwise, the entire data is to be committed
2379 * in one shot.
2380 */
2381void trace_buffered_event_enable(void)
2382{
2383	struct ring_buffer_event *event;
2384	struct page *page;
2385	int cpu;
2386
2387	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2388
2389	if (trace_buffered_event_ref++)
2390		return;
2391
2392	for_each_tracing_cpu(cpu) {
2393		page = alloc_pages_node(cpu_to_node(cpu),
2394					GFP_KERNEL | __GFP_NORETRY, 0);
2395		if (!page)
2396			goto failed;
 
 
 
2397
2398		event = page_address(page);
2399		memset(event, 0, sizeof(*event));
2400
2401		per_cpu(trace_buffered_event, cpu) = event;
2402
2403		preempt_disable();
2404		if (cpu == smp_processor_id() &&
2405		    this_cpu_read(trace_buffered_event) !=
2406		    per_cpu(trace_buffered_event, cpu))
2407			WARN_ON_ONCE(1);
2408		preempt_enable();
2409	}
2410
2411	return;
2412 failed:
2413	trace_buffered_event_disable();
2414}
2415
2416static void enable_trace_buffered_event(void *data)
2417{
2418	/* Probably not needed, but do it anyway */
2419	smp_rmb();
2420	this_cpu_dec(trace_buffered_event_cnt);
2421}
2422
2423static void disable_trace_buffered_event(void *data)
2424{
2425	this_cpu_inc(trace_buffered_event_cnt);
2426}
2427
2428/**
2429 * trace_buffered_event_disable - disable buffering events
2430 *
2431 * When a filter is removed, it is faster to not use the buffered
2432 * events, and to commit directly into the ring buffer. Free up
2433 * the temp buffers when there are no more users. This requires
2434 * special synchronization with current events.
2435 */
2436void trace_buffered_event_disable(void)
2437{
2438	int cpu;
2439
2440	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2441
2442	if (WARN_ON_ONCE(!trace_buffered_event_ref))
2443		return;
2444
2445	if (--trace_buffered_event_ref)
2446		return;
2447
2448	preempt_disable();
2449	/* For each CPU, set the buffer as used. */
2450	smp_call_function_many(tracing_buffer_mask,
2451			       disable_trace_buffered_event, NULL, 1);
2452	preempt_enable();
2453
2454	/* Wait for all current users to finish */
2455	synchronize_rcu();
2456
2457	for_each_tracing_cpu(cpu) {
2458		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2459		per_cpu(trace_buffered_event, cpu) = NULL;
2460	}
 
2461	/*
2462	 * Make sure trace_buffered_event is NULL before clearing
2463	 * trace_buffered_event_cnt.
 
 
 
2464	 */
2465	smp_wmb();
2466
2467	preempt_disable();
2468	/* Do the work on each cpu */
2469	smp_call_function_many(tracing_buffer_mask,
2470			       enable_trace_buffered_event, NULL, 1);
2471	preempt_enable();
2472}
2473
2474static struct ring_buffer *temp_buffer;
2475
2476struct ring_buffer_event *
2477trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2478			  struct trace_event_file *trace_file,
2479			  int type, unsigned long len,
2480			  unsigned long flags, int pc)
2481{
2482	struct ring_buffer_event *entry;
 
2483	int val;
2484
2485	*current_rb = trace_file->tr->trace_buffer.buffer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2486
2487	if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2488	     (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2489	    (entry = this_cpu_read(trace_buffered_event))) {
2490		/* Try to use the per cpu buffer first */
2491		val = this_cpu_inc_return(trace_buffered_event_cnt);
2492		if (val == 1) {
2493			trace_event_setup(entry, type, flags, pc);
2494			entry->array[0] = len;
2495			return entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2496		}
2497		this_cpu_dec(trace_buffered_event_cnt);
 
2498	}
2499
2500	entry = __trace_buffer_lock_reserve(*current_rb,
2501					    type, len, flags, pc);
2502	/*
2503	 * If tracing is off, but we have triggers enabled
2504	 * we still need to look at the event data. Use the temp_buffer
2505	 * to store the trace event for the tigger to use. It's recusive
2506	 * safe and will not be recorded anywhere.
2507	 */
2508	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2509		*current_rb = temp_buffer;
2510		entry = __trace_buffer_lock_reserve(*current_rb,
2511						    type, len, flags, pc);
2512	}
2513	return entry;
2514}
2515EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2516
2517static DEFINE_SPINLOCK(tracepoint_iter_lock);
2518static DEFINE_MUTEX(tracepoint_printk_mutex);
2519
2520static void output_printk(struct trace_event_buffer *fbuffer)
2521{
2522	struct trace_event_call *event_call;
 
2523	struct trace_event *event;
2524	unsigned long flags;
2525	struct trace_iterator *iter = tracepoint_print_iter;
2526
2527	/* We should never get here if iter is NULL */
2528	if (WARN_ON_ONCE(!iter))
2529		return;
2530
2531	event_call = fbuffer->trace_file->event_call;
2532	if (!event_call || !event_call->event.funcs ||
2533	    !event_call->event.funcs->trace)
2534		return;
2535
 
 
 
 
 
 
2536	event = &fbuffer->trace_file->event_call->event;
2537
2538	spin_lock_irqsave(&tracepoint_iter_lock, flags);
2539	trace_seq_init(&iter->seq);
2540	iter->ent = fbuffer->entry;
2541	event_call->event.funcs->trace(iter, 0, event);
2542	trace_seq_putc(&iter->seq, 0);
2543	printk("%s", iter->seq.buffer);
2544
2545	spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2546}
2547
2548int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2549			     void __user *buffer, size_t *lenp,
2550			     loff_t *ppos)
2551{
2552	int save_tracepoint_printk;
2553	int ret;
2554
2555	mutex_lock(&tracepoint_printk_mutex);
2556	save_tracepoint_printk = tracepoint_printk;
2557
2558	ret = proc_dointvec(table, write, buffer, lenp, ppos);
2559
2560	/*
2561	 * This will force exiting early, as tracepoint_printk
2562	 * is always zero when tracepoint_printk_iter is not allocated
2563	 */
2564	if (!tracepoint_print_iter)
2565		tracepoint_printk = 0;
2566
2567	if (save_tracepoint_printk == tracepoint_printk)
2568		goto out;
2569
2570	if (tracepoint_printk)
2571		static_key_enable(&tracepoint_printk_key.key);
2572	else
2573		static_key_disable(&tracepoint_printk_key.key);
2574
2575 out:
2576	mutex_unlock(&tracepoint_printk_mutex);
2577
2578	return ret;
2579}
2580
2581void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2582{
 
 
 
 
 
 
 
2583	if (static_key_false(&tracepoint_printk_key.key))
2584		output_printk(fbuffer);
2585
2586	event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2587				    fbuffer->event, fbuffer->entry,
2588				    fbuffer->flags, fbuffer->pc);
 
 
 
 
 
 
 
2589}
2590EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2591
2592/*
2593 * Skip 3:
2594 *
2595 *   trace_buffer_unlock_commit_regs()
2596 *   trace_event_buffer_commit()
2597 *   trace_event_raw_event_xxx()
2598 */
2599# define STACK_SKIP 3
2600
2601void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2602				     struct ring_buffer *buffer,
2603				     struct ring_buffer_event *event,
2604				     unsigned long flags, int pc,
2605				     struct pt_regs *regs)
2606{
2607	__buffer_unlock_commit(buffer, event);
2608
2609	/*
2610	 * If regs is not set, then skip the necessary functions.
2611	 * Note, we can still get here via blktrace, wakeup tracer
2612	 * and mmiotrace, but that's ok if they lose a function or
2613	 * two. They are not that meaningful.
2614	 */
2615	ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2616	ftrace_trace_userstack(buffer, flags, pc);
2617}
2618
2619/*
2620 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2621 */
2622void
2623trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2624				   struct ring_buffer_event *event)
2625{
2626	__buffer_unlock_commit(buffer, event);
2627}
2628
2629static void
2630trace_process_export(struct trace_export *export,
2631	       struct ring_buffer_event *event)
2632{
2633	struct trace_entry *entry;
2634	unsigned int size = 0;
2635
2636	entry = ring_buffer_event_data(event);
2637	size = ring_buffer_event_length(event);
2638	export->write(export, entry, size);
2639}
2640
2641static DEFINE_MUTEX(ftrace_export_lock);
2642
2643static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2644
2645static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2646
2647static inline void ftrace_exports_enable(void)
2648{
2649	static_branch_enable(&ftrace_exports_enabled);
2650}
2651
2652static inline void ftrace_exports_disable(void)
2653{
2654	static_branch_disable(&ftrace_exports_enabled);
2655}
2656
2657static void ftrace_exports(struct ring_buffer_event *event)
2658{
2659	struct trace_export *export;
2660
2661	preempt_disable_notrace();
2662
2663	export = rcu_dereference_raw_check(ftrace_exports_list);
2664	while (export) {
2665		trace_process_export(export, event);
2666		export = rcu_dereference_raw_check(export->next);
2667	}
2668
2669	preempt_enable_notrace();
2670}
2671
2672static inline void
2673add_trace_export(struct trace_export **list, struct trace_export *export)
2674{
2675	rcu_assign_pointer(export->next, *list);
2676	/*
2677	 * We are entering export into the list but another
2678	 * CPU might be walking that list. We need to make sure
2679	 * the export->next pointer is valid before another CPU sees
2680	 * the export pointer included into the list.
2681	 */
2682	rcu_assign_pointer(*list, export);
2683}
2684
2685static inline int
2686rm_trace_export(struct trace_export **list, struct trace_export *export)
2687{
2688	struct trace_export **p;
2689
2690	for (p = list; *p != NULL; p = &(*p)->next)
2691		if (*p == export)
2692			break;
2693
2694	if (*p != export)
2695		return -1;
2696
2697	rcu_assign_pointer(*p, (*p)->next);
2698
2699	return 0;
2700}
2701
2702static inline void
2703add_ftrace_export(struct trace_export **list, struct trace_export *export)
2704{
2705	if (*list == NULL)
2706		ftrace_exports_enable();
2707
2708	add_trace_export(list, export);
2709}
2710
2711static inline int
2712rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2713{
2714	int ret;
2715
2716	ret = rm_trace_export(list, export);
2717	if (*list == NULL)
2718		ftrace_exports_disable();
2719
2720	return ret;
2721}
2722
2723int register_ftrace_export(struct trace_export *export)
2724{
2725	if (WARN_ON_ONCE(!export->write))
2726		return -1;
2727
2728	mutex_lock(&ftrace_export_lock);
2729
2730	add_ftrace_export(&ftrace_exports_list, export);
2731
2732	mutex_unlock(&ftrace_export_lock);
2733
2734	return 0;
2735}
2736EXPORT_SYMBOL_GPL(register_ftrace_export);
2737
2738int unregister_ftrace_export(struct trace_export *export)
2739{
2740	int ret;
2741
2742	mutex_lock(&ftrace_export_lock);
2743
2744	ret = rm_ftrace_export(&ftrace_exports_list, export);
2745
2746	mutex_unlock(&ftrace_export_lock);
2747
2748	return ret;
2749}
2750EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2751
2752void
2753trace_function(struct trace_array *tr,
2754	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
2755	       int pc)
2756{
2757	struct trace_event_call *call = &event_function;
2758	struct ring_buffer *buffer = tr->trace_buffer.buffer;
2759	struct ring_buffer_event *event;
2760	struct ftrace_entry *entry;
2761
2762	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2763					    flags, pc);
2764	if (!event)
2765		return;
2766	entry	= ring_buffer_event_data(event);
2767	entry->ip			= ip;
2768	entry->parent_ip		= parent_ip;
2769
2770	if (!call_filter_check_discard(call, entry, buffer, event)) {
2771		if (static_branch_unlikely(&ftrace_exports_enabled))
2772			ftrace_exports(event);
2773		__buffer_unlock_commit(buffer, event);
2774	}
2775}
2776
2777#ifdef CONFIG_STACKTRACE
2778
2779/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2780#define FTRACE_KSTACK_NESTING	4
2781
2782#define FTRACE_KSTACK_ENTRIES	(PAGE_SIZE / FTRACE_KSTACK_NESTING)
2783
2784struct ftrace_stack {
2785	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
2786};
2787
2788
2789struct ftrace_stacks {
2790	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
2791};
2792
2793static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2794static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2795
2796static void __ftrace_trace_stack(struct ring_buffer *buffer,
2797				 unsigned long flags,
2798				 int skip, int pc, struct pt_regs *regs)
2799{
2800	struct trace_event_call *call = &event_kernel_stack;
2801	struct ring_buffer_event *event;
2802	unsigned int size, nr_entries;
2803	struct ftrace_stack *fstack;
2804	struct stack_entry *entry;
2805	int stackidx;
2806
2807	/*
2808	 * Add one, for this function and the call to save_stack_trace()
2809	 * If regs is set, then these functions will not be in the way.
2810	 */
2811#ifndef CONFIG_UNWINDER_ORC
2812	if (!regs)
2813		skip++;
2814#endif
2815
2816	/*
2817	 * Since events can happen in NMIs there's no safe way to
2818	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2819	 * or NMI comes in, it will just have to use the default
2820	 * FTRACE_STACK_SIZE.
2821	 */
2822	preempt_disable_notrace();
2823
2824	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2825
2826	/* This should never happen. If it does, yell once and skip */
2827	if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2828		goto out;
2829
2830	/*
2831	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2832	 * interrupt will either see the value pre increment or post
2833	 * increment. If the interrupt happens pre increment it will have
2834	 * restored the counter when it returns.  We just need a barrier to
2835	 * keep gcc from moving things around.
2836	 */
2837	barrier();
2838
2839	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2840	size = ARRAY_SIZE(fstack->calls);
2841
2842	if (regs) {
2843		nr_entries = stack_trace_save_regs(regs, fstack->calls,
2844						   size, skip);
2845	} else {
2846		nr_entries = stack_trace_save(fstack->calls, size, skip);
2847	}
2848
2849	size = nr_entries * sizeof(unsigned long);
2850	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2851					    sizeof(*entry) + size, flags, pc);
 
2852	if (!event)
2853		goto out;
2854	entry = ring_buffer_event_data(event);
2855
2856	memcpy(&entry->caller, fstack->calls, size);
2857	entry->size = nr_entries;
 
 
2858
2859	if (!call_filter_check_discard(call, entry, buffer, event))
2860		__buffer_unlock_commit(buffer, event);
2861
2862 out:
2863	/* Again, don't let gcc optimize things here */
2864	barrier();
2865	__this_cpu_dec(ftrace_stack_reserve);
2866	preempt_enable_notrace();
2867
2868}
2869
2870static inline void ftrace_trace_stack(struct trace_array *tr,
2871				      struct ring_buffer *buffer,
2872				      unsigned long flags,
2873				      int skip, int pc, struct pt_regs *regs)
2874{
2875	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2876		return;
2877
2878	__ftrace_trace_stack(buffer, flags, skip, pc, regs);
2879}
2880
2881void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2882		   int pc)
2883{
2884	struct ring_buffer *buffer = tr->trace_buffer.buffer;
2885
2886	if (rcu_is_watching()) {
2887		__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2888		return;
2889	}
2890
 
 
 
2891	/*
2892	 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2893	 * but if the above rcu_is_watching() failed, then the NMI
2894	 * triggered someplace critical, and rcu_irq_enter() should
2895	 * not be called from NMI.
2896	 */
2897	if (unlikely(in_nmi()))
2898		return;
2899
2900	rcu_irq_enter_irqson();
2901	__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2902	rcu_irq_exit_irqson();
2903}
2904
2905/**
2906 * trace_dump_stack - record a stack back trace in the trace buffer
2907 * @skip: Number of functions to skip (helper handlers)
2908 */
2909void trace_dump_stack(int skip)
2910{
2911	unsigned long flags;
2912
2913	if (tracing_disabled || tracing_selftest_running)
2914		return;
2915
2916	local_save_flags(flags);
2917
2918#ifndef CONFIG_UNWINDER_ORC
2919	/* Skip 1 to skip this function. */
2920	skip++;
2921#endif
2922	__ftrace_trace_stack(global_trace.trace_buffer.buffer,
2923			     flags, skip, preempt_count(), NULL);
2924}
2925EXPORT_SYMBOL_GPL(trace_dump_stack);
2926
2927#ifdef CONFIG_USER_STACKTRACE_SUPPORT
2928static DEFINE_PER_CPU(int, user_stack_count);
2929
2930static void
2931ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
 
2932{
2933	struct trace_event_call *call = &event_user_stack;
2934	struct ring_buffer_event *event;
2935	struct userstack_entry *entry;
2936
2937	if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2938		return;
2939
2940	/*
2941	 * NMIs can not handle page faults, even with fix ups.
2942	 * The save user stack can (and often does) fault.
2943	 */
2944	if (unlikely(in_nmi()))
2945		return;
2946
2947	/*
2948	 * prevent recursion, since the user stack tracing may
2949	 * trigger other kernel events.
2950	 */
2951	preempt_disable();
2952	if (__this_cpu_read(user_stack_count))
2953		goto out;
2954
2955	__this_cpu_inc(user_stack_count);
2956
2957	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2958					    sizeof(*entry), flags, pc);
2959	if (!event)
2960		goto out_drop_count;
2961	entry	= ring_buffer_event_data(event);
2962
2963	entry->tgid		= current->tgid;
2964	memset(&entry->caller, 0, sizeof(entry->caller));
2965
2966	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
2967	if (!call_filter_check_discard(call, entry, buffer, event))
2968		__buffer_unlock_commit(buffer, event);
2969
2970 out_drop_count:
2971	__this_cpu_dec(user_stack_count);
2972 out:
2973	preempt_enable();
2974}
2975#else /* CONFIG_USER_STACKTRACE_SUPPORT */
2976static void ftrace_trace_userstack(struct ring_buffer *buffer,
2977				   unsigned long flags, int pc)
 
2978{
2979}
2980#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
2981
2982#endif /* CONFIG_STACKTRACE */
2983
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2984/* created for use with alloc_percpu */
2985struct trace_buffer_struct {
2986	int nesting;
2987	char buffer[4][TRACE_BUF_SIZE];
2988};
2989
2990static struct trace_buffer_struct *trace_percpu_buffer;
2991
2992/*
2993 * Thise allows for lockless recording.  If we're nested too deeply, then
2994 * this returns NULL.
2995 */
2996static char *get_trace_buf(void)
2997{
2998	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2999
3000	if (!buffer || buffer->nesting >= 4)
3001		return NULL;
3002
3003	buffer->nesting++;
3004
3005	/* Interrupts must see nesting incremented before we use the buffer */
3006	barrier();
3007	return &buffer->buffer[buffer->nesting][0];
3008}
3009
3010static void put_trace_buf(void)
3011{
3012	/* Don't let the decrement of nesting leak before this */
3013	barrier();
3014	this_cpu_dec(trace_percpu_buffer->nesting);
3015}
3016
3017static int alloc_percpu_trace_buffer(void)
3018{
3019	struct trace_buffer_struct *buffers;
 
 
 
3020
3021	buffers = alloc_percpu(struct trace_buffer_struct);
3022	if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3023		return -ENOMEM;
3024
3025	trace_percpu_buffer = buffers;
3026	return 0;
3027}
3028
3029static int buffers_allocated;
3030
3031void trace_printk_init_buffers(void)
3032{
3033	if (buffers_allocated)
3034		return;
3035
3036	if (alloc_percpu_trace_buffer())
3037		return;
3038
3039	/* trace_printk() is for debug use only. Don't use it in production. */
3040
3041	pr_warn("\n");
3042	pr_warn("**********************************************************\n");
3043	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3044	pr_warn("**                                                      **\n");
3045	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
3046	pr_warn("**                                                      **\n");
3047	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
3048	pr_warn("** unsafe for production use.                           **\n");
3049	pr_warn("**                                                      **\n");
3050	pr_warn("** If you see this message and you are not debugging    **\n");
3051	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
3052	pr_warn("**                                                      **\n");
3053	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
3054	pr_warn("**********************************************************\n");
3055
3056	/* Expand the buffers to set size */
3057	tracing_update_buffers();
3058
3059	buffers_allocated = 1;
3060
3061	/*
3062	 * trace_printk_init_buffers() can be called by modules.
3063	 * If that happens, then we need to start cmdline recording
3064	 * directly here. If the global_trace.buffer is already
3065	 * allocated here, then this was called by module code.
3066	 */
3067	if (global_trace.trace_buffer.buffer)
3068		tracing_start_cmdline_record();
3069}
3070EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3071
3072void trace_printk_start_comm(void)
3073{
3074	/* Start tracing comms if trace printk is set */
3075	if (!buffers_allocated)
3076		return;
3077	tracing_start_cmdline_record();
3078}
3079
3080static void trace_printk_start_stop_comm(int enabled)
3081{
3082	if (!buffers_allocated)
3083		return;
3084
3085	if (enabled)
3086		tracing_start_cmdline_record();
3087	else
3088		tracing_stop_cmdline_record();
3089}
3090
3091/**
3092 * trace_vbprintk - write binary msg to tracing buffer
3093 * @ip:    The address of the caller
3094 * @fmt:   The string format to write to the buffer
3095 * @args:  Arguments for @fmt
3096 */
3097int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3098{
3099	struct trace_event_call *call = &event_bprint;
3100	struct ring_buffer_event *event;
3101	struct ring_buffer *buffer;
3102	struct trace_array *tr = &global_trace;
3103	struct bprint_entry *entry;
3104	unsigned long flags;
3105	char *tbuffer;
3106	int len = 0, size, pc;
3107
3108	if (unlikely(tracing_selftest_running || tracing_disabled))
3109		return 0;
3110
3111	/* Don't pollute graph traces with trace_vprintk internals */
3112	pause_graph_tracing();
3113
3114	pc = preempt_count();
3115	preempt_disable_notrace();
3116
3117	tbuffer = get_trace_buf();
3118	if (!tbuffer) {
3119		len = 0;
3120		goto out_nobuffer;
3121	}
3122
3123	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3124
3125	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3126		goto out;
3127
3128	local_save_flags(flags);
3129	size = sizeof(*entry) + sizeof(u32) * len;
3130	buffer = tr->trace_buffer.buffer;
 
3131	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3132					    flags, pc);
3133	if (!event)
3134		goto out;
3135	entry = ring_buffer_event_data(event);
3136	entry->ip			= ip;
3137	entry->fmt			= fmt;
3138
3139	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3140	if (!call_filter_check_discard(call, entry, buffer, event)) {
3141		__buffer_unlock_commit(buffer, event);
3142		ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3143	}
3144
3145out:
 
 
3146	put_trace_buf();
3147
3148out_nobuffer:
3149	preempt_enable_notrace();
3150	unpause_graph_tracing();
3151
3152	return len;
3153}
3154EXPORT_SYMBOL_GPL(trace_vbprintk);
3155
3156__printf(3, 0)
3157static int
3158__trace_array_vprintk(struct ring_buffer *buffer,
3159		      unsigned long ip, const char *fmt, va_list args)
3160{
3161	struct trace_event_call *call = &event_print;
3162	struct ring_buffer_event *event;
3163	int len = 0, size, pc;
3164	struct print_entry *entry;
3165	unsigned long flags;
3166	char *tbuffer;
3167
3168	if (tracing_disabled || tracing_selftest_running)
3169		return 0;
3170
3171	/* Don't pollute graph traces with trace_vprintk internals */
3172	pause_graph_tracing();
3173
3174	pc = preempt_count();
3175	preempt_disable_notrace();
3176
3177
3178	tbuffer = get_trace_buf();
3179	if (!tbuffer) {
3180		len = 0;
3181		goto out_nobuffer;
3182	}
3183
3184	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3185
3186	local_save_flags(flags);
3187	size = sizeof(*entry) + len + 1;
 
3188	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3189					    flags, pc);
3190	if (!event)
3191		goto out;
3192	entry = ring_buffer_event_data(event);
3193	entry->ip = ip;
3194
3195	memcpy(&entry->buf, tbuffer, len + 1);
3196	if (!call_filter_check_discard(call, entry, buffer, event)) {
3197		__buffer_unlock_commit(buffer, event);
3198		ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3199	}
3200
3201out:
 
3202	put_trace_buf();
3203
3204out_nobuffer:
3205	preempt_enable_notrace();
3206	unpause_graph_tracing();
3207
3208	return len;
3209}
3210
3211__printf(3, 0)
3212int trace_array_vprintk(struct trace_array *tr,
3213			unsigned long ip, const char *fmt, va_list args)
3214{
3215	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
 
 
 
3216}
3217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3218__printf(3, 0)
3219int trace_array_printk(struct trace_array *tr,
3220		       unsigned long ip, const char *fmt, ...)
3221{
3222	int ret;
3223	va_list ap;
3224
3225	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 
 
 
 
 
 
 
3226		return 0;
3227
3228	va_start(ap, fmt);
3229	ret = trace_array_vprintk(tr, ip, fmt, ap);
3230	va_end(ap);
3231	return ret;
3232}
3233EXPORT_SYMBOL_GPL(trace_array_printk);
3234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3235__printf(3, 4)
3236int trace_array_printk_buf(struct ring_buffer *buffer,
3237			   unsigned long ip, const char *fmt, ...)
3238{
3239	int ret;
3240	va_list ap;
3241
3242	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3243		return 0;
3244
3245	va_start(ap, fmt);
3246	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3247	va_end(ap);
3248	return ret;
3249}
3250
3251__printf(2, 0)
3252int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3253{
3254	return trace_array_vprintk(&global_trace, ip, fmt, args);
3255}
3256EXPORT_SYMBOL_GPL(trace_vprintk);
3257
3258static void trace_iterator_increment(struct trace_iterator *iter)
3259{
3260	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3261
3262	iter->idx++;
3263	if (buf_iter)
3264		ring_buffer_read(buf_iter, NULL);
3265}
3266
3267static struct trace_entry *
3268peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3269		unsigned long *lost_events)
3270{
3271	struct ring_buffer_event *event;
3272	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3273
3274	if (buf_iter)
3275		event = ring_buffer_iter_peek(buf_iter, ts);
3276	else
3277		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
 
 
 
3278					 lost_events);
 
3279
3280	if (event) {
3281		iter->ent_size = ring_buffer_event_length(event);
3282		return ring_buffer_event_data(event);
3283	}
3284	iter->ent_size = 0;
3285	return NULL;
3286}
3287
3288static struct trace_entry *
3289__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3290		  unsigned long *missing_events, u64 *ent_ts)
3291{
3292	struct ring_buffer *buffer = iter->trace_buffer->buffer;
3293	struct trace_entry *ent, *next = NULL;
3294	unsigned long lost_events = 0, next_lost = 0;
3295	int cpu_file = iter->cpu_file;
3296	u64 next_ts = 0, ts;
3297	int next_cpu = -1;
3298	int next_size = 0;
3299	int cpu;
3300
3301	/*
3302	 * If we are in a per_cpu trace file, don't bother by iterating over
3303	 * all cpu and peek directly.
3304	 */
3305	if (cpu_file > RING_BUFFER_ALL_CPUS) {
3306		if (ring_buffer_empty_cpu(buffer, cpu_file))
3307			return NULL;
3308		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3309		if (ent_cpu)
3310			*ent_cpu = cpu_file;
3311
3312		return ent;
3313	}
3314
3315	for_each_tracing_cpu(cpu) {
3316
3317		if (ring_buffer_empty_cpu(buffer, cpu))
3318			continue;
3319
3320		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3321
3322		/*
3323		 * Pick the entry with the smallest timestamp:
3324		 */
3325		if (ent && (!next || ts < next_ts)) {
3326			next = ent;
3327			next_cpu = cpu;
3328			next_ts = ts;
3329			next_lost = lost_events;
3330			next_size = iter->ent_size;
3331		}
3332	}
3333
3334	iter->ent_size = next_size;
3335
3336	if (ent_cpu)
3337		*ent_cpu = next_cpu;
3338
3339	if (ent_ts)
3340		*ent_ts = next_ts;
3341
3342	if (missing_events)
3343		*missing_events = next_lost;
3344
3345	return next;
3346}
3347
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3348/* Find the next real entry, without updating the iterator itself */
3349struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3350					  int *ent_cpu, u64 *ent_ts)
3351{
3352	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3353}
3354
3355/* Find the next real entry, and increment the iterator to the next entry */
3356void *trace_find_next_entry_inc(struct trace_iterator *iter)
3357{
3358	iter->ent = __find_next_entry(iter, &iter->cpu,
3359				      &iter->lost_events, &iter->ts);
3360
3361	if (iter->ent)
3362		trace_iterator_increment(iter);
3363
3364	return iter->ent ? iter : NULL;
3365}
3366
3367static void trace_consume(struct trace_iterator *iter)
3368{
3369	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3370			    &iter->lost_events);
3371}
3372
3373static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3374{
3375	struct trace_iterator *iter = m->private;
3376	int i = (int)*pos;
3377	void *ent;
3378
3379	WARN_ON_ONCE(iter->leftover);
3380
3381	(*pos)++;
3382
3383	/* can't go backwards */
3384	if (iter->idx > i)
3385		return NULL;
3386
3387	if (iter->idx < 0)
3388		ent = trace_find_next_entry_inc(iter);
3389	else
3390		ent = iter;
3391
3392	while (ent && iter->idx < i)
3393		ent = trace_find_next_entry_inc(iter);
3394
3395	iter->pos = *pos;
3396
3397	return ent;
3398}
3399
3400void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3401{
3402	struct ring_buffer_event *event;
3403	struct ring_buffer_iter *buf_iter;
3404	unsigned long entries = 0;
3405	u64 ts;
3406
3407	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3408
3409	buf_iter = trace_buffer_iter(iter, cpu);
3410	if (!buf_iter)
3411		return;
3412
3413	ring_buffer_iter_reset(buf_iter);
3414
3415	/*
3416	 * We could have the case with the max latency tracers
3417	 * that a reset never took place on a cpu. This is evident
3418	 * by the timestamp being before the start of the buffer.
3419	 */
3420	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3421		if (ts >= iter->trace_buffer->time_start)
3422			break;
3423		entries++;
3424		ring_buffer_read(buf_iter, NULL);
3425	}
3426
3427	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3428}
3429
3430/*
3431 * The current tracer is copied to avoid a global locking
3432 * all around.
3433 */
3434static void *s_start(struct seq_file *m, loff_t *pos)
3435{
3436	struct trace_iterator *iter = m->private;
3437	struct trace_array *tr = iter->tr;
3438	int cpu_file = iter->cpu_file;
3439	void *p = NULL;
3440	loff_t l = 0;
3441	int cpu;
3442
3443	/*
3444	 * copy the tracer to avoid using a global lock all around.
3445	 * iter->trace is a copy of current_trace, the pointer to the
3446	 * name may be used instead of a strcmp(), as iter->trace->name
3447	 * will point to the same string as current_trace->name.
3448	 */
3449	mutex_lock(&trace_types_lock);
3450	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3451		*iter->trace = *tr->current_trace;
 
 
 
 
 
 
 
3452	mutex_unlock(&trace_types_lock);
3453
3454#ifdef CONFIG_TRACER_MAX_TRACE
3455	if (iter->snapshot && iter->trace->use_max_tr)
3456		return ERR_PTR(-EBUSY);
3457#endif
3458
3459	if (!iter->snapshot)
3460		atomic_inc(&trace_record_taskinfo_disabled);
3461
3462	if (*pos != iter->pos) {
3463		iter->ent = NULL;
3464		iter->cpu = 0;
3465		iter->idx = -1;
3466
3467		if (cpu_file == RING_BUFFER_ALL_CPUS) {
3468			for_each_tracing_cpu(cpu)
3469				tracing_iter_reset(iter, cpu);
3470		} else
3471			tracing_iter_reset(iter, cpu_file);
3472
3473		iter->leftover = 0;
3474		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3475			;
3476
3477	} else {
3478		/*
3479		 * If we overflowed the seq_file before, then we want
3480		 * to just reuse the trace_seq buffer again.
3481		 */
3482		if (iter->leftover)
3483			p = iter;
3484		else {
3485			l = *pos - 1;
3486			p = s_next(m, p, &l);
3487		}
3488	}
3489
3490	trace_event_read_lock();
3491	trace_access_lock(cpu_file);
3492	return p;
3493}
3494
3495static void s_stop(struct seq_file *m, void *p)
3496{
3497	struct trace_iterator *iter = m->private;
3498
3499#ifdef CONFIG_TRACER_MAX_TRACE
3500	if (iter->snapshot && iter->trace->use_max_tr)
3501		return;
3502#endif
3503
3504	if (!iter->snapshot)
3505		atomic_dec(&trace_record_taskinfo_disabled);
3506
3507	trace_access_unlock(iter->cpu_file);
3508	trace_event_read_unlock();
3509}
3510
3511static void
3512get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3513		      unsigned long *entries, int cpu)
3514{
3515	unsigned long count;
3516
3517	count = ring_buffer_entries_cpu(buf->buffer, cpu);
3518	/*
3519	 * If this buffer has skipped entries, then we hold all
3520	 * entries for the trace and we need to ignore the
3521	 * ones before the time stamp.
3522	 */
3523	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3524		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3525		/* total is the same as the entries */
3526		*total = count;
3527	} else
3528		*total = count +
3529			ring_buffer_overrun_cpu(buf->buffer, cpu);
3530	*entries = count;
3531}
3532
3533static void
3534get_total_entries(struct trace_buffer *buf,
3535		  unsigned long *total, unsigned long *entries)
3536{
3537	unsigned long t, e;
3538	int cpu;
3539
3540	*total = 0;
3541	*entries = 0;
3542
3543	for_each_tracing_cpu(cpu) {
3544		get_total_entries_cpu(buf, &t, &e, cpu);
3545		*total += t;
3546		*entries += e;
3547	}
3548}
3549
3550unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3551{
3552	unsigned long total, entries;
3553
3554	if (!tr)
3555		tr = &global_trace;
3556
3557	get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3558
3559	return entries;
3560}
3561
3562unsigned long trace_total_entries(struct trace_array *tr)
3563{
3564	unsigned long total, entries;
3565
3566	if (!tr)
3567		tr = &global_trace;
3568
3569	get_total_entries(&tr->trace_buffer, &total, &entries);
3570
3571	return entries;
3572}
3573
3574static void print_lat_help_header(struct seq_file *m)
3575{
3576	seq_puts(m, "#                  _------=> CPU#            \n"
3577		    "#                 / _-----=> irqs-off        \n"
3578		    "#                | / _----=> need-resched    \n"
3579		    "#                || / _---=> hardirq/softirq \n"
3580		    "#                ||| / _--=> preempt-depth   \n"
3581		    "#                |||| /     delay            \n"
3582		    "#  cmd     pid   ||||| time  |   caller      \n"
3583		    "#     \\   /      |||||  \\    |   /         \n");
 
3584}
3585
3586static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3587{
3588	unsigned long total;
3589	unsigned long entries;
3590
3591	get_total_entries(buf, &total, &entries);
3592	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
3593		   entries, total, num_online_cpus());
3594	seq_puts(m, "#\n");
3595}
3596
3597static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3598				   unsigned int flags)
3599{
3600	bool tgid = flags & TRACE_ITER_RECORD_TGID;
3601
3602	print_event_info(buf, m);
3603
3604	seq_printf(m, "#           TASK-PID   %s  CPU#   TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
3605	seq_printf(m, "#              | |     %s    |       |         |\n",	 tgid ? "  |      " : "");
3606}
3607
3608static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3609				       unsigned int flags)
3610{
3611	bool tgid = flags & TRACE_ITER_RECORD_TGID;
3612	const char *space = "          ";
3613	int prec = tgid ? 10 : 2;
3614
3615	print_event_info(buf, m);
3616
3617	seq_printf(m, "#                          %.*s  _-----=> irqs-off\n", prec, space);
3618	seq_printf(m, "#                          %.*s / _----=> need-resched\n", prec, space);
3619	seq_printf(m, "#                          %.*s| / _---=> hardirq/softirq\n", prec, space);
3620	seq_printf(m, "#                          %.*s|| / _--=> preempt-depth\n", prec, space);
3621	seq_printf(m, "#                          %.*s||| /     delay\n", prec, space);
3622	seq_printf(m, "#           TASK-PID %.*sCPU#  ||||    TIMESTAMP  FUNCTION\n", prec, "   TGID   ");
3623	seq_printf(m, "#              | |   %.*s  |   ||||       |         |\n", prec, "     |    ");
 
3624}
3625
3626void
3627print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3628{
3629	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3630	struct trace_buffer *buf = iter->trace_buffer;
3631	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3632	struct tracer *type = iter->trace;
3633	unsigned long entries;
3634	unsigned long total;
3635	const char *name = "preemption";
3636
3637	name = type->name;
3638
3639	get_total_entries(buf, &total, &entries);
3640
3641	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3642		   name, UTS_RELEASE);
3643	seq_puts(m, "# -----------------------------------"
3644		 "---------------------------------\n");
3645	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3646		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3647		   nsecs_to_usecs(data->saved_latency),
3648		   entries,
3649		   total,
3650		   buf->cpu,
3651#if defined(CONFIG_PREEMPT_NONE)
3652		   "server",
3653#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3654		   "desktop",
3655#elif defined(CONFIG_PREEMPT)
3656		   "preempt",
3657#else
3658		   "unknown",
3659#endif
3660		   /* These are reserved for later use */
3661		   0, 0, 0, 0);
3662#ifdef CONFIG_SMP
3663	seq_printf(m, " #P:%d)\n", num_online_cpus());
3664#else
3665	seq_puts(m, ")\n");
3666#endif
3667	seq_puts(m, "#    -----------------\n");
3668	seq_printf(m, "#    | task: %.16s-%d "
3669		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3670		   data->comm, data->pid,
3671		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3672		   data->policy, data->rt_priority);
3673	seq_puts(m, "#    -----------------\n");
3674
3675	if (data->critical_start) {
3676		seq_puts(m, "#  => started at: ");
3677		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3678		trace_print_seq(m, &iter->seq);
3679		seq_puts(m, "\n#  => ended at:   ");
3680		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3681		trace_print_seq(m, &iter->seq);
3682		seq_puts(m, "\n#\n");
3683	}
3684
3685	seq_puts(m, "#\n");
3686}
3687
3688static void test_cpu_buff_start(struct trace_iterator *iter)
3689{
3690	struct trace_seq *s = &iter->seq;
3691	struct trace_array *tr = iter->tr;
3692
3693	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3694		return;
3695
3696	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3697		return;
3698
3699	if (cpumask_available(iter->started) &&
3700	    cpumask_test_cpu(iter->cpu, iter->started))
3701		return;
3702
3703	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3704		return;
3705
3706	if (cpumask_available(iter->started))
3707		cpumask_set_cpu(iter->cpu, iter->started);
3708
3709	/* Don't print started cpu buffer for the first entry of the trace */
3710	if (iter->idx > 1)
3711		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3712				iter->cpu);
3713}
3714
3715static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3716{
3717	struct trace_array *tr = iter->tr;
3718	struct trace_seq *s = &iter->seq;
3719	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3720	struct trace_entry *entry;
3721	struct trace_event *event;
3722
3723	entry = iter->ent;
3724
3725	test_cpu_buff_start(iter);
3726
3727	event = ftrace_find_event(entry->type);
3728
3729	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3730		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3731			trace_print_lat_context(iter);
3732		else
3733			trace_print_context(iter);
3734	}
3735
3736	if (trace_seq_has_overflowed(s))
3737		return TRACE_TYPE_PARTIAL_LINE;
3738
3739	if (event)
 
 
3740		return event->funcs->trace(iter, sym_flags, event);
 
3741
3742	trace_seq_printf(s, "Unknown type %d\n", entry->type);
3743
3744	return trace_handle_return(s);
3745}
3746
3747static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3748{
3749	struct trace_array *tr = iter->tr;
3750	struct trace_seq *s = &iter->seq;
3751	struct trace_entry *entry;
3752	struct trace_event *event;
3753
3754	entry = iter->ent;
3755
3756	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3757		trace_seq_printf(s, "%d %d %llu ",
3758				 entry->pid, iter->cpu, iter->ts);
3759
3760	if (trace_seq_has_overflowed(s))
3761		return TRACE_TYPE_PARTIAL_LINE;
3762
3763	event = ftrace_find_event(entry->type);
3764	if (event)
3765		return event->funcs->raw(iter, 0, event);
3766
3767	trace_seq_printf(s, "%d ?\n", entry->type);
3768
3769	return trace_handle_return(s);
3770}
3771
3772static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3773{
3774	struct trace_array *tr = iter->tr;
3775	struct trace_seq *s = &iter->seq;
3776	unsigned char newline = '\n';
3777	struct trace_entry *entry;
3778	struct trace_event *event;
3779
3780	entry = iter->ent;
3781
3782	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3783		SEQ_PUT_HEX_FIELD(s, entry->pid);
3784		SEQ_PUT_HEX_FIELD(s, iter->cpu);
3785		SEQ_PUT_HEX_FIELD(s, iter->ts);
3786		if (trace_seq_has_overflowed(s))
3787			return TRACE_TYPE_PARTIAL_LINE;
3788	}
3789
3790	event = ftrace_find_event(entry->type);
3791	if (event) {
3792		enum print_line_t ret = event->funcs->hex(iter, 0, event);
3793		if (ret != TRACE_TYPE_HANDLED)
3794			return ret;
3795	}
3796
3797	SEQ_PUT_FIELD(s, newline);
3798
3799	return trace_handle_return(s);
3800}
3801
3802static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3803{
3804	struct trace_array *tr = iter->tr;
3805	struct trace_seq *s = &iter->seq;
3806	struct trace_entry *entry;
3807	struct trace_event *event;
3808
3809	entry = iter->ent;
3810
3811	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3812		SEQ_PUT_FIELD(s, entry->pid);
3813		SEQ_PUT_FIELD(s, iter->cpu);
3814		SEQ_PUT_FIELD(s, iter->ts);
3815		if (trace_seq_has_overflowed(s))
3816			return TRACE_TYPE_PARTIAL_LINE;
3817	}
3818
3819	event = ftrace_find_event(entry->type);
3820	return event ? event->funcs->binary(iter, 0, event) :
3821		TRACE_TYPE_HANDLED;
3822}
3823
3824int trace_empty(struct trace_iterator *iter)
3825{
3826	struct ring_buffer_iter *buf_iter;
3827	int cpu;
3828
3829	/* If we are looking at one CPU buffer, only check that one */
3830	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3831		cpu = iter->cpu_file;
3832		buf_iter = trace_buffer_iter(iter, cpu);
3833		if (buf_iter) {
3834			if (!ring_buffer_iter_empty(buf_iter))
3835				return 0;
3836		} else {
3837			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3838				return 0;
3839		}
3840		return 1;
3841	}
3842
3843	for_each_tracing_cpu(cpu) {
3844		buf_iter = trace_buffer_iter(iter, cpu);
3845		if (buf_iter) {
3846			if (!ring_buffer_iter_empty(buf_iter))
3847				return 0;
3848		} else {
3849			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3850				return 0;
3851		}
3852	}
3853
3854	return 1;
3855}
3856
3857/*  Called with trace_event_read_lock() held. */
3858enum print_line_t print_trace_line(struct trace_iterator *iter)
3859{
3860	struct trace_array *tr = iter->tr;
3861	unsigned long trace_flags = tr->trace_flags;
3862	enum print_line_t ret;
3863
3864	if (iter->lost_events) {
3865		trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3866				 iter->cpu, iter->lost_events);
 
 
 
 
3867		if (trace_seq_has_overflowed(&iter->seq))
3868			return TRACE_TYPE_PARTIAL_LINE;
3869	}
3870
3871	if (iter->trace && iter->trace->print_line) {
3872		ret = iter->trace->print_line(iter);
3873		if (ret != TRACE_TYPE_UNHANDLED)
3874			return ret;
3875	}
3876
3877	if (iter->ent->type == TRACE_BPUTS &&
3878			trace_flags & TRACE_ITER_PRINTK &&
3879			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3880		return trace_print_bputs_msg_only(iter);
3881
3882	if (iter->ent->type == TRACE_BPRINT &&
3883			trace_flags & TRACE_ITER_PRINTK &&
3884			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3885		return trace_print_bprintk_msg_only(iter);
3886
3887	if (iter->ent->type == TRACE_PRINT &&
3888			trace_flags & TRACE_ITER_PRINTK &&
3889			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3890		return trace_print_printk_msg_only(iter);
3891
3892	if (trace_flags & TRACE_ITER_BIN)
3893		return print_bin_fmt(iter);
3894
3895	if (trace_flags & TRACE_ITER_HEX)
3896		return print_hex_fmt(iter);
3897
3898	if (trace_flags & TRACE_ITER_RAW)
3899		return print_raw_fmt(iter);
3900
3901	return print_trace_fmt(iter);
3902}
3903
3904void trace_latency_header(struct seq_file *m)
3905{
3906	struct trace_iterator *iter = m->private;
3907	struct trace_array *tr = iter->tr;
3908
3909	/* print nothing if the buffers are empty */
3910	if (trace_empty(iter))
3911		return;
3912
3913	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3914		print_trace_header(m, iter);
3915
3916	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3917		print_lat_help_header(m);
3918}
3919
3920void trace_default_header(struct seq_file *m)
3921{
3922	struct trace_iterator *iter = m->private;
3923	struct trace_array *tr = iter->tr;
3924	unsigned long trace_flags = tr->trace_flags;
3925
3926	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3927		return;
3928
3929	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3930		/* print nothing if the buffers are empty */
3931		if (trace_empty(iter))
3932			return;
3933		print_trace_header(m, iter);
3934		if (!(trace_flags & TRACE_ITER_VERBOSE))
3935			print_lat_help_header(m);
3936	} else {
3937		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3938			if (trace_flags & TRACE_ITER_IRQ_INFO)
3939				print_func_help_header_irq(iter->trace_buffer,
3940							   m, trace_flags);
3941			else
3942				print_func_help_header(iter->trace_buffer, m,
3943						       trace_flags);
3944		}
3945	}
3946}
3947
3948static void test_ftrace_alive(struct seq_file *m)
3949{
3950	if (!ftrace_is_dead())
3951		return;
3952	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3953		    "#          MAY BE MISSING FUNCTION EVENTS\n");
3954}
3955
3956#ifdef CONFIG_TRACER_MAX_TRACE
3957static void show_snapshot_main_help(struct seq_file *m)
3958{
3959	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3960		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3961		    "#                      Takes a snapshot of the main buffer.\n"
3962		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3963		    "#                      (Doesn't have to be '2' works with any number that\n"
3964		    "#                       is not a '0' or '1')\n");
3965}
3966
3967static void show_snapshot_percpu_help(struct seq_file *m)
3968{
3969	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3970#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3971	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3972		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
3973#else
3974	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3975		    "#                     Must use main snapshot file to allocate.\n");
3976#endif
3977	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3978		    "#                      (Doesn't have to be '2' works with any number that\n"
3979		    "#                       is not a '0' or '1')\n");
3980}
3981
3982static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3983{
3984	if (iter->tr->allocated_snapshot)
3985		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3986	else
3987		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3988
3989	seq_puts(m, "# Snapshot commands:\n");
3990	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3991		show_snapshot_main_help(m);
3992	else
3993		show_snapshot_percpu_help(m);
3994}
3995#else
3996/* Should never be called */
3997static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3998#endif
3999
4000static int s_show(struct seq_file *m, void *v)
4001{
4002	struct trace_iterator *iter = v;
4003	int ret;
4004
4005	if (iter->ent == NULL) {
4006		if (iter->tr) {
4007			seq_printf(m, "# tracer: %s\n", iter->trace->name);
4008			seq_puts(m, "#\n");
4009			test_ftrace_alive(m);
4010		}
4011		if (iter->snapshot && trace_empty(iter))
4012			print_snapshot_help(m, iter);
4013		else if (iter->trace && iter->trace->print_header)
4014			iter->trace->print_header(m);
4015		else
4016			trace_default_header(m);
4017
4018	} else if (iter->leftover) {
4019		/*
4020		 * If we filled the seq_file buffer earlier, we
4021		 * want to just show it now.
4022		 */
4023		ret = trace_print_seq(m, &iter->seq);
4024
4025		/* ret should this time be zero, but you never know */
4026		iter->leftover = ret;
4027
4028	} else {
4029		print_trace_line(iter);
 
 
 
 
4030		ret = trace_print_seq(m, &iter->seq);
4031		/*
4032		 * If we overflow the seq_file buffer, then it will
4033		 * ask us for this data again at start up.
4034		 * Use that instead.
4035		 *  ret is 0 if seq_file write succeeded.
4036		 *        -1 otherwise.
4037		 */
4038		iter->leftover = ret;
4039	}
4040
4041	return 0;
4042}
4043
4044/*
4045 * Should be used after trace_array_get(), trace_types_lock
4046 * ensures that i_cdev was already initialized.
4047 */
4048static inline int tracing_get_cpu(struct inode *inode)
4049{
4050	if (inode->i_cdev) /* See trace_create_cpu_file() */
4051		return (long)inode->i_cdev - 1;
4052	return RING_BUFFER_ALL_CPUS;
4053}
4054
4055static const struct seq_operations tracer_seq_ops = {
4056	.start		= s_start,
4057	.next		= s_next,
4058	.stop		= s_stop,
4059	.show		= s_show,
4060};
4061
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4062static struct trace_iterator *
4063__tracing_open(struct inode *inode, struct file *file, bool snapshot)
4064{
4065	struct trace_array *tr = inode->i_private;
4066	struct trace_iterator *iter;
4067	int cpu;
4068
4069	if (tracing_disabled)
4070		return ERR_PTR(-ENODEV);
4071
4072	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4073	if (!iter)
4074		return ERR_PTR(-ENOMEM);
4075
4076	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4077				    GFP_KERNEL);
4078	if (!iter->buffer_iter)
4079		goto release;
4080
4081	/*
4082	 * We make a copy of the current tracer to avoid concurrent
4083	 * changes on it while we are reading.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4084	 */
 
 
 
4085	mutex_lock(&trace_types_lock);
4086	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4087	if (!iter->trace)
4088		goto fail;
4089
4090	*iter->trace = *tr->current_trace;
4091
4092	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4093		goto fail;
4094
4095	iter->tr = tr;
4096
4097#ifdef CONFIG_TRACER_MAX_TRACE
4098	/* Currently only the top directory has a snapshot */
4099	if (tr->current_trace->print_max || snapshot)
4100		iter->trace_buffer = &tr->max_buffer;
4101	else
4102#endif
4103		iter->trace_buffer = &tr->trace_buffer;
4104	iter->snapshot = snapshot;
4105	iter->pos = -1;
4106	iter->cpu_file = tracing_get_cpu(inode);
4107	mutex_init(&iter->mutex);
4108
4109	/* Notify the tracer early; before we stop tracing. */
4110	if (iter->trace && iter->trace->open)
4111		iter->trace->open(iter);
4112
4113	/* Annotate start of buffers if we had overruns */
4114	if (ring_buffer_overruns(iter->trace_buffer->buffer))
4115		iter->iter_flags |= TRACE_FILE_ANNOTATE;
4116
4117	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4118	if (trace_clocks[tr->clock_id].in_ns)
4119		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4120
4121	/* stop the trace while dumping if we are not opening "snapshot" */
4122	if (!iter->snapshot)
 
 
 
4123		tracing_stop_tr(tr);
4124
4125	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4126		for_each_tracing_cpu(cpu) {
4127			iter->buffer_iter[cpu] =
4128				ring_buffer_read_prepare(iter->trace_buffer->buffer,
4129							 cpu, GFP_KERNEL);
4130		}
4131		ring_buffer_read_prepare_sync();
4132		for_each_tracing_cpu(cpu) {
4133			ring_buffer_read_start(iter->buffer_iter[cpu]);
4134			tracing_iter_reset(iter, cpu);
4135		}
4136	} else {
4137		cpu = iter->cpu_file;
4138		iter->buffer_iter[cpu] =
4139			ring_buffer_read_prepare(iter->trace_buffer->buffer,
4140						 cpu, GFP_KERNEL);
4141		ring_buffer_read_prepare_sync();
4142		ring_buffer_read_start(iter->buffer_iter[cpu]);
4143		tracing_iter_reset(iter, cpu);
4144	}
4145
4146	mutex_unlock(&trace_types_lock);
4147
4148	return iter;
4149
4150 fail:
4151	mutex_unlock(&trace_types_lock);
4152	kfree(iter->trace);
4153	kfree(iter->buffer_iter);
4154release:
4155	seq_release_private(inode, file);
4156	return ERR_PTR(-ENOMEM);
4157}
4158
4159int tracing_open_generic(struct inode *inode, struct file *filp)
4160{
4161	int ret;
4162
4163	ret = tracing_check_open_get_tr(NULL);
4164	if (ret)
4165		return ret;
4166
4167	filp->private_data = inode->i_private;
4168	return 0;
4169}
4170
4171bool tracing_is_disabled(void)
4172{
4173	return (tracing_disabled) ? true: false;
4174}
4175
4176/*
4177 * Open and update trace_array ref count.
4178 * Must have the current trace_array passed to it.
4179 */
4180int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4181{
4182	struct trace_array *tr = inode->i_private;
4183	int ret;
4184
4185	ret = tracing_check_open_get_tr(tr);
4186	if (ret)
4187		return ret;
4188
4189	filp->private_data = inode->i_private;
4190
4191	return 0;
4192}
4193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4194static int tracing_release(struct inode *inode, struct file *file)
4195{
4196	struct trace_array *tr = inode->i_private;
4197	struct seq_file *m = file->private_data;
4198	struct trace_iterator *iter;
4199	int cpu;
4200
4201	if (!(file->f_mode & FMODE_READ)) {
4202		trace_array_put(tr);
4203		return 0;
4204	}
4205
4206	/* Writes do not use seq_file */
4207	iter = m->private;
4208	mutex_lock(&trace_types_lock);
4209
4210	for_each_tracing_cpu(cpu) {
4211		if (iter->buffer_iter[cpu])
4212			ring_buffer_read_finish(iter->buffer_iter[cpu]);
4213	}
4214
4215	if (iter->trace && iter->trace->close)
4216		iter->trace->close(iter);
4217
4218	if (!iter->snapshot)
4219		/* reenable tracing if it was previously enabled */
4220		tracing_start_tr(tr);
4221
4222	__trace_array_put(tr);
4223
4224	mutex_unlock(&trace_types_lock);
4225
4226	mutex_destroy(&iter->mutex);
4227	free_cpumask_var(iter->started);
4228	kfree(iter->trace);
4229	kfree(iter->buffer_iter);
4230	seq_release_private(inode, file);
4231
4232	return 0;
4233}
4234
4235static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4236{
4237	struct trace_array *tr = inode->i_private;
4238
4239	trace_array_put(tr);
4240	return 0;
4241}
4242
4243static int tracing_single_release_tr(struct inode *inode, struct file *file)
4244{
4245	struct trace_array *tr = inode->i_private;
4246
4247	trace_array_put(tr);
4248
4249	return single_release(inode, file);
4250}
4251
4252static int tracing_open(struct inode *inode, struct file *file)
4253{
4254	struct trace_array *tr = inode->i_private;
4255	struct trace_iterator *iter;
4256	int ret;
4257
4258	ret = tracing_check_open_get_tr(tr);
4259	if (ret)
4260		return ret;
4261
4262	/* If this file was open for write, then erase contents */
4263	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4264		int cpu = tracing_get_cpu(inode);
4265		struct trace_buffer *trace_buf = &tr->trace_buffer;
4266
4267#ifdef CONFIG_TRACER_MAX_TRACE
4268		if (tr->current_trace->print_max)
4269			trace_buf = &tr->max_buffer;
4270#endif
4271
4272		if (cpu == RING_BUFFER_ALL_CPUS)
4273			tracing_reset_online_cpus(trace_buf);
4274		else
4275			tracing_reset_cpu(trace_buf, cpu);
4276	}
4277
4278	if (file->f_mode & FMODE_READ) {
4279		iter = __tracing_open(inode, file, false);
4280		if (IS_ERR(iter))
4281			ret = PTR_ERR(iter);
4282		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4283			iter->iter_flags |= TRACE_FILE_LAT_FMT;
4284	}
4285
4286	if (ret < 0)
4287		trace_array_put(tr);
4288
4289	return ret;
4290}
4291
4292/*
4293 * Some tracers are not suitable for instance buffers.
4294 * A tracer is always available for the global array (toplevel)
4295 * or if it explicitly states that it is.
4296 */
4297static bool
4298trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4299{
4300	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4301}
4302
4303/* Find the next tracer that this trace array may use */
4304static struct tracer *
4305get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4306{
4307	while (t && !trace_ok_for_array(t, tr))
4308		t = t->next;
4309
4310	return t;
4311}
4312
4313static void *
4314t_next(struct seq_file *m, void *v, loff_t *pos)
4315{
4316	struct trace_array *tr = m->private;
4317	struct tracer *t = v;
4318
4319	(*pos)++;
4320
4321	if (t)
4322		t = get_tracer_for_array(tr, t->next);
4323
4324	return t;
4325}
4326
4327static void *t_start(struct seq_file *m, loff_t *pos)
4328{
4329	struct trace_array *tr = m->private;
4330	struct tracer *t;
4331	loff_t l = 0;
4332
4333	mutex_lock(&trace_types_lock);
4334
4335	t = get_tracer_for_array(tr, trace_types);
4336	for (; t && l < *pos; t = t_next(m, t, &l))
4337			;
4338
4339	return t;
4340}
4341
4342static void t_stop(struct seq_file *m, void *p)
4343{
4344	mutex_unlock(&trace_types_lock);
4345}
4346
4347static int t_show(struct seq_file *m, void *v)
4348{
4349	struct tracer *t = v;
4350
4351	if (!t)
4352		return 0;
4353
4354	seq_puts(m, t->name);
4355	if (t->next)
4356		seq_putc(m, ' ');
4357	else
4358		seq_putc(m, '\n');
4359
4360	return 0;
4361}
4362
4363static const struct seq_operations show_traces_seq_ops = {
4364	.start		= t_start,
4365	.next		= t_next,
4366	.stop		= t_stop,
4367	.show		= t_show,
4368};
4369
4370static int show_traces_open(struct inode *inode, struct file *file)
4371{
4372	struct trace_array *tr = inode->i_private;
4373	struct seq_file *m;
4374	int ret;
4375
4376	ret = tracing_check_open_get_tr(tr);
4377	if (ret)
4378		return ret;
4379
4380	ret = seq_open(file, &show_traces_seq_ops);
4381	if (ret) {
4382		trace_array_put(tr);
4383		return ret;
4384	}
4385
4386	m = file->private_data;
4387	m->private = tr;
4388
4389	return 0;
4390}
4391
4392static int show_traces_release(struct inode *inode, struct file *file)
4393{
4394	struct trace_array *tr = inode->i_private;
4395
4396	trace_array_put(tr);
4397	return seq_release(inode, file);
4398}
4399
4400static ssize_t
4401tracing_write_stub(struct file *filp, const char __user *ubuf,
4402		   size_t count, loff_t *ppos)
4403{
4404	return count;
4405}
4406
4407loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4408{
4409	int ret;
4410
4411	if (file->f_mode & FMODE_READ)
4412		ret = seq_lseek(file, offset, whence);
4413	else
4414		file->f_pos = ret = 0;
4415
4416	return ret;
4417}
4418
4419static const struct file_operations tracing_fops = {
4420	.open		= tracing_open,
4421	.read		= seq_read,
 
 
4422	.write		= tracing_write_stub,
4423	.llseek		= tracing_lseek,
4424	.release	= tracing_release,
4425};
4426
4427static const struct file_operations show_traces_fops = {
4428	.open		= show_traces_open,
4429	.read		= seq_read,
4430	.llseek		= seq_lseek,
4431	.release	= show_traces_release,
4432};
4433
4434static ssize_t
4435tracing_cpumask_read(struct file *filp, char __user *ubuf,
4436		     size_t count, loff_t *ppos)
4437{
4438	struct trace_array *tr = file_inode(filp)->i_private;
4439	char *mask_str;
4440	int len;
4441
4442	len = snprintf(NULL, 0, "%*pb\n",
4443		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
4444	mask_str = kmalloc(len, GFP_KERNEL);
4445	if (!mask_str)
4446		return -ENOMEM;
4447
4448	len = snprintf(mask_str, len, "%*pb\n",
4449		       cpumask_pr_args(tr->tracing_cpumask));
4450	if (len >= count) {
4451		count = -EINVAL;
4452		goto out_err;
4453	}
4454	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4455
4456out_err:
4457	kfree(mask_str);
4458
4459	return count;
4460}
4461
4462static ssize_t
4463tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4464		      size_t count, loff_t *ppos)
4465{
4466	struct trace_array *tr = file_inode(filp)->i_private;
4467	cpumask_var_t tracing_cpumask_new;
4468	int err, cpu;
4469
4470	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4471		return -ENOMEM;
4472
4473	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4474	if (err)
4475		goto err_unlock;
4476
4477	local_irq_disable();
4478	arch_spin_lock(&tr->max_lock);
4479	for_each_tracing_cpu(cpu) {
4480		/*
4481		 * Increase/decrease the disabled counter if we are
4482		 * about to flip a bit in the cpumask:
4483		 */
4484		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4485				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4486			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4487			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
 
 
 
4488		}
4489		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4490				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4491			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4492			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
 
 
 
4493		}
4494	}
4495	arch_spin_unlock(&tr->max_lock);
4496	local_irq_enable();
4497
4498	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4499	free_cpumask_var(tracing_cpumask_new);
4500
4501	return count;
4502
4503err_unlock:
4504	free_cpumask_var(tracing_cpumask_new);
4505
4506	return err;
4507}
4508
4509static const struct file_operations tracing_cpumask_fops = {
4510	.open		= tracing_open_generic_tr,
4511	.read		= tracing_cpumask_read,
4512	.write		= tracing_cpumask_write,
4513	.release	= tracing_release_generic_tr,
4514	.llseek		= generic_file_llseek,
4515};
4516
4517static int tracing_trace_options_show(struct seq_file *m, void *v)
4518{
4519	struct tracer_opt *trace_opts;
4520	struct trace_array *tr = m->private;
4521	u32 tracer_flags;
4522	int i;
4523
4524	mutex_lock(&trace_types_lock);
4525	tracer_flags = tr->current_trace->flags->val;
4526	trace_opts = tr->current_trace->flags->opts;
4527
4528	for (i = 0; trace_options[i]; i++) {
4529		if (tr->trace_flags & (1 << i))
4530			seq_printf(m, "%s\n", trace_options[i]);
4531		else
4532			seq_printf(m, "no%s\n", trace_options[i]);
4533	}
4534
4535	for (i = 0; trace_opts[i].name; i++) {
4536		if (tracer_flags & trace_opts[i].bit)
4537			seq_printf(m, "%s\n", trace_opts[i].name);
4538		else
4539			seq_printf(m, "no%s\n", trace_opts[i].name);
4540	}
4541	mutex_unlock(&trace_types_lock);
4542
4543	return 0;
4544}
4545
4546static int __set_tracer_option(struct trace_array *tr,
4547			       struct tracer_flags *tracer_flags,
4548			       struct tracer_opt *opts, int neg)
4549{
4550	struct tracer *trace = tracer_flags->trace;
4551	int ret;
4552
4553	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4554	if (ret)
4555		return ret;
4556
4557	if (neg)
4558		tracer_flags->val &= ~opts->bit;
4559	else
4560		tracer_flags->val |= opts->bit;
4561	return 0;
4562}
4563
4564/* Try to assign a tracer specific option */
4565static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4566{
4567	struct tracer *trace = tr->current_trace;
4568	struct tracer_flags *tracer_flags = trace->flags;
4569	struct tracer_opt *opts = NULL;
4570	int i;
4571
4572	for (i = 0; tracer_flags->opts[i].name; i++) {
4573		opts = &tracer_flags->opts[i];
4574
4575		if (strcmp(cmp, opts->name) == 0)
4576			return __set_tracer_option(tr, trace->flags, opts, neg);
4577	}
4578
4579	return -EINVAL;
4580}
4581
4582/* Some tracers require overwrite to stay enabled */
4583int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4584{
4585	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4586		return -1;
4587
4588	return 0;
4589}
4590
4591int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4592{
 
 
 
 
 
 
4593	/* do nothing if flag is already set */
4594	if (!!(tr->trace_flags & mask) == !!enabled)
4595		return 0;
4596
4597	/* Give the tracer a chance to approve the change */
4598	if (tr->current_trace->flag_changed)
4599		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4600			return -EINVAL;
4601
4602	if (enabled)
4603		tr->trace_flags |= mask;
4604	else
4605		tr->trace_flags &= ~mask;
4606
4607	if (mask == TRACE_ITER_RECORD_CMD)
4608		trace_event_enable_cmd_record(enabled);
4609
4610	if (mask == TRACE_ITER_RECORD_TGID) {
4611		if (!tgid_map)
4612			tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4613					   sizeof(*tgid_map),
4614					   GFP_KERNEL);
 
 
 
 
 
 
 
 
 
4615		if (!tgid_map) {
4616			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4617			return -ENOMEM;
4618		}
4619
4620		trace_event_enable_tgid_record(enabled);
4621	}
4622
4623	if (mask == TRACE_ITER_EVENT_FORK)
4624		trace_event_follow_fork(tr, enabled);
4625
4626	if (mask == TRACE_ITER_FUNC_FORK)
4627		ftrace_pid_follow_fork(tr, enabled);
4628
4629	if (mask == TRACE_ITER_OVERWRITE) {
4630		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4631#ifdef CONFIG_TRACER_MAX_TRACE
4632		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4633#endif
4634	}
4635
4636	if (mask == TRACE_ITER_PRINTK) {
4637		trace_printk_start_stop_comm(enabled);
4638		trace_printk_control(enabled);
4639	}
4640
4641	return 0;
4642}
4643
4644static int trace_set_options(struct trace_array *tr, char *option)
4645{
4646	char *cmp;
4647	int neg = 0;
4648	int ret;
4649	size_t orig_len = strlen(option);
4650	int len;
4651
4652	cmp = strstrip(option);
4653
4654	len = str_has_prefix(cmp, "no");
4655	if (len)
4656		neg = 1;
4657
4658	cmp += len;
4659
 
4660	mutex_lock(&trace_types_lock);
4661
4662	ret = match_string(trace_options, -1, cmp);
4663	/* If no option could be set, test the specific tracer options */
4664	if (ret < 0)
4665		ret = set_tracer_option(tr, cmp, neg);
4666	else
4667		ret = set_tracer_flag(tr, 1 << ret, !neg);
4668
4669	mutex_unlock(&trace_types_lock);
 
4670
4671	/*
4672	 * If the first trailing whitespace is replaced with '\0' by strstrip,
4673	 * turn it back into a space.
4674	 */
4675	if (orig_len > strlen(option))
4676		option[strlen(option)] = ' ';
4677
4678	return ret;
4679}
4680
4681static void __init apply_trace_boot_options(void)
4682{
4683	char *buf = trace_boot_options_buf;
4684	char *option;
4685
4686	while (true) {
4687		option = strsep(&buf, ",");
4688
4689		if (!option)
4690			break;
4691
4692		if (*option)
4693			trace_set_options(&global_trace, option);
4694
4695		/* Put back the comma to allow this to be called again */
4696		if (buf)
4697			*(buf - 1) = ',';
4698	}
4699}
4700
4701static ssize_t
4702tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4703			size_t cnt, loff_t *ppos)
4704{
4705	struct seq_file *m = filp->private_data;
4706	struct trace_array *tr = m->private;
4707	char buf[64];
4708	int ret;
4709
4710	if (cnt >= sizeof(buf))
4711		return -EINVAL;
4712
4713	if (copy_from_user(buf, ubuf, cnt))
4714		return -EFAULT;
4715
4716	buf[cnt] = 0;
4717
4718	ret = trace_set_options(tr, buf);
4719	if (ret < 0)
4720		return ret;
4721
4722	*ppos += cnt;
4723
4724	return cnt;
4725}
4726
4727static int tracing_trace_options_open(struct inode *inode, struct file *file)
4728{
4729	struct trace_array *tr = inode->i_private;
4730	int ret;
4731
4732	ret = tracing_check_open_get_tr(tr);
4733	if (ret)
4734		return ret;
4735
4736	ret = single_open(file, tracing_trace_options_show, inode->i_private);
4737	if (ret < 0)
4738		trace_array_put(tr);
4739
4740	return ret;
4741}
4742
4743static const struct file_operations tracing_iter_fops = {
4744	.open		= tracing_trace_options_open,
4745	.read		= seq_read,
4746	.llseek		= seq_lseek,
4747	.release	= tracing_single_release_tr,
4748	.write		= tracing_trace_options_write,
4749};
4750
4751static const char readme_msg[] =
4752	"tracing mini-HOWTO:\n\n"
4753	"# echo 0 > tracing_on : quick way to disable tracing\n"
4754	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4755	" Important files:\n"
4756	"  trace\t\t\t- The static contents of the buffer\n"
4757	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
4758	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4759	"  current_tracer\t- function and latency tracers\n"
4760	"  available_tracers\t- list of configured tracers for current_tracer\n"
4761	"  error_log\t- error log for failed commands (that support it)\n"
4762	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
4763	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
4764	"  trace_clock\t\t-change the clock used to order events\n"
4765	"       local:   Per cpu clock but may not be synced across CPUs\n"
4766	"      global:   Synced across CPUs but slows tracing down.\n"
4767	"     counter:   Not a clock, but just an increment\n"
4768	"      uptime:   Jiffy counter from time of boot\n"
4769	"        perf:   Same clock that perf events use\n"
4770#ifdef CONFIG_X86_64
4771	"     x86-tsc:   TSC cycle counter\n"
4772#endif
4773	"\n  timestamp_mode\t-view the mode used to timestamp events\n"
4774	"       delta:   Delta difference against a buffer-wide timestamp\n"
4775	"    absolute:   Absolute (standalone) timestamp\n"
4776	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4777	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4778	"  tracing_cpumask\t- Limit which CPUs to trace\n"
4779	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4780	"\t\t\t  Remove sub-buffer with rmdir\n"
4781	"  trace_options\t\t- Set format or modify how tracing happens\n"
4782	"\t\t\t  Disable an option by prefixing 'no' to the\n"
4783	"\t\t\t  option name\n"
4784	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4785#ifdef CONFIG_DYNAMIC_FTRACE
4786	"\n  available_filter_functions - list of functions that can be filtered on\n"
4787	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
4788	"\t\t\t  functions\n"
4789	"\t     accepts: func_full_name or glob-matching-pattern\n"
4790	"\t     modules: Can select a group via module\n"
4791	"\t      Format: :mod:<module-name>\n"
4792	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
4793	"\t    triggers: a command to perform when function is hit\n"
4794	"\t      Format: <function>:<trigger>[:count]\n"
4795	"\t     trigger: traceon, traceoff\n"
4796	"\t\t      enable_event:<system>:<event>\n"
4797	"\t\t      disable_event:<system>:<event>\n"
4798#ifdef CONFIG_STACKTRACE
4799	"\t\t      stacktrace\n"
4800#endif
4801#ifdef CONFIG_TRACER_SNAPSHOT
4802	"\t\t      snapshot\n"
4803#endif
4804	"\t\t      dump\n"
4805	"\t\t      cpudump\n"
4806	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
4807	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
4808	"\t     The first one will disable tracing every time do_fault is hit\n"
4809	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
4810	"\t       The first time do trap is hit and it disables tracing, the\n"
4811	"\t       counter will decrement to 2. If tracing is already disabled,\n"
4812	"\t       the counter will not decrement. It only decrements when the\n"
4813	"\t       trigger did work\n"
4814	"\t     To remove trigger without count:\n"
4815	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
4816	"\t     To remove trigger with a count:\n"
4817	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4818	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
4819	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4820	"\t    modules: Can select a group via module command :mod:\n"
4821	"\t    Does not accept triggers\n"
4822#endif /* CONFIG_DYNAMIC_FTRACE */
4823#ifdef CONFIG_FUNCTION_TRACER
4824	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4825	"\t\t    (function)\n"
 
 
4826#endif
4827#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4828	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4829	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4830	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4831#endif
4832#ifdef CONFIG_TRACER_SNAPSHOT
4833	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
4834	"\t\t\t  snapshot buffer. Read the contents for more\n"
4835	"\t\t\t  information\n"
4836#endif
4837#ifdef CONFIG_STACK_TRACER
4838	"  stack_trace\t\t- Shows the max stack trace when active\n"
4839	"  stack_max_size\t- Shows current max stack size that was traced\n"
4840	"\t\t\t  Write into this file to reset the max size (trigger a\n"
4841	"\t\t\t  new trace)\n"
4842#ifdef CONFIG_DYNAMIC_FTRACE
4843	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4844	"\t\t\t  traces\n"
4845#endif
4846#endif /* CONFIG_STACK_TRACER */
4847#ifdef CONFIG_DYNAMIC_EVENTS
4848	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
4849	"\t\t\t  Write into this file to define/undefine new trace events.\n"
4850#endif
4851#ifdef CONFIG_KPROBE_EVENTS
4852	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
4853	"\t\t\t  Write into this file to define/undefine new trace events.\n"
4854#endif
4855#ifdef CONFIG_UPROBE_EVENTS
4856	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
4857	"\t\t\t  Write into this file to define/undefine new trace events.\n"
4858#endif
 
 
 
4859#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4860	"\t  accepts: event-definitions (one definition per line)\n"
4861	"\t   Format: p[:[<group>/]<event>] <place> [<args>]\n"
4862	"\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
 
 
 
 
4863#ifdef CONFIG_HIST_TRIGGERS
4864	"\t           s:[synthetic/]<event> <field> [<field>]\n"
4865#endif
4866	"\t           -:[<group>/]<event>\n"
 
4867#ifdef CONFIG_KPROBE_EVENTS
4868	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4869  "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4870#endif
4871#ifdef CONFIG_UPROBE_EVENTS
4872  "   place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4873#endif
4874	"\t     args: <name>=fetcharg[:type]\n"
4875	"\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4876#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
 
 
 
 
4877	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
 
4878#else
4879	"\t           $stack<index>, $stack, $retval, $comm,\n"
4880#endif
4881	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
4882	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4883	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
4884	"\t           <type>\\[<array-size>\\]\n"
4885#ifdef CONFIG_HIST_TRIGGERS
4886	"\t    field: <stype> <name>;\n"
4887	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4888	"\t           [unsigned] char/int/long\n"
4889#endif
 
 
4890#endif
4891	"  events/\t\t- Directory containing all trace event subsystems:\n"
4892	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4893	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
4894	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4895	"\t\t\t  events\n"
4896	"      filter\t\t- If set, only events passing filter are traced\n"
4897	"  events/<system>/<event>/\t- Directory containing control files for\n"
4898	"\t\t\t  <event>:\n"
4899	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4900	"      filter\t\t- If set, only events passing filter are traced\n"
4901	"      trigger\t\t- If set, a command to perform when event is hit\n"
4902	"\t    Format: <trigger>[:count][if <filter>]\n"
4903	"\t   trigger: traceon, traceoff\n"
4904	"\t            enable_event:<system>:<event>\n"
4905	"\t            disable_event:<system>:<event>\n"
4906#ifdef CONFIG_HIST_TRIGGERS
4907	"\t            enable_hist:<system>:<event>\n"
4908	"\t            disable_hist:<system>:<event>\n"
4909#endif
4910#ifdef CONFIG_STACKTRACE
4911	"\t\t    stacktrace\n"
4912#endif
4913#ifdef CONFIG_TRACER_SNAPSHOT
4914	"\t\t    snapshot\n"
4915#endif
4916#ifdef CONFIG_HIST_TRIGGERS
4917	"\t\t    hist (see below)\n"
4918#endif
4919	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
4920	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
4921	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4922	"\t                  events/block/block_unplug/trigger\n"
4923	"\t   The first disables tracing every time block_unplug is hit.\n"
4924	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
4925	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
4926	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4927	"\t   Like function triggers, the counter is only decremented if it\n"
4928	"\t    enabled or disabled tracing.\n"
4929	"\t   To remove a trigger without a count:\n"
4930	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
4931	"\t   To remove a trigger with a count:\n"
4932	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
4933	"\t   Filters can be ignored when removing a trigger.\n"
4934#ifdef CONFIG_HIST_TRIGGERS
4935	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
4936	"\t    Format: hist:keys=<field1[,field2,...]>\n"
 
4937	"\t            [:values=<field1[,field2,...]>]\n"
4938	"\t            [:sort=<field1[,field2,...]>]\n"
4939	"\t            [:size=#entries]\n"
4940	"\t            [:pause][:continue][:clear]\n"
4941	"\t            [:name=histname1]\n"
 
4942	"\t            [:<handler>.<action>]\n"
4943	"\t            [if <filter>]\n\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4944	"\t    When a matching event is hit, an entry is added to a hash\n"
4945	"\t    table using the key(s) and value(s) named, and the value of a\n"
4946	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
4947	"\t    correspond to fields in the event's format description.  Keys\n"
4948	"\t    can be any field, or the special string 'stacktrace'.\n"
4949	"\t    Compound keys consisting of up to two fields can be specified\n"
4950	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
4951	"\t    fields.  Sort keys consisting of up to two fields can be\n"
4952	"\t    specified using the 'sort' keyword.  The sort direction can\n"
4953	"\t    be modified by appending '.descending' or '.ascending' to a\n"
4954	"\t    sort field.  The 'size' parameter can be used to specify more\n"
4955	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
4956	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
4957	"\t    its histogram data will be shared with other triggers of the\n"
4958	"\t    same name, and trigger hits will update this common data.\n\n"
4959	"\t    Reading the 'hist' file for the event will dump the hash\n"
4960	"\t    table in its entirety to stdout.  If there are multiple hist\n"
4961	"\t    triggers attached to an event, there will be a table for each\n"
4962	"\t    trigger in the output.  The table displayed for a named\n"
4963	"\t    trigger will be the same as any other instance having the\n"
4964	"\t    same name.  The default format used to display a given field\n"
4965	"\t    can be modified by appending any of the following modifiers\n"
4966	"\t    to the field name, as applicable:\n\n"
4967	"\t            .hex        display a number as a hex value\n"
4968	"\t            .sym        display an address as a symbol\n"
4969	"\t            .sym-offset display an address as a symbol and offset\n"
4970	"\t            .execname   display a common_pid as a program name\n"
4971	"\t            .syscall    display a syscall id as a syscall name\n"
4972	"\t            .log2       display log2 value rather than raw number\n"
4973	"\t            .usecs      display a common_timestamp in microseconds\n\n"
 
 
 
4974	"\t    The 'pause' parameter can be used to pause an existing hist\n"
4975	"\t    trigger or to start a hist trigger but not log any events\n"
4976	"\t    until told to do so.  'continue' can be used to start or\n"
4977	"\t    restart a paused hist trigger.\n\n"
4978	"\t    The 'clear' parameter will clear the contents of a running\n"
4979	"\t    hist trigger and leave its current paused/active state\n"
4980	"\t    unchanged.\n\n"
 
 
4981	"\t    The enable_hist and disable_hist triggers can be used to\n"
4982	"\t    have one event conditionally start and stop another event's\n"
4983	"\t    already-attached hist trigger.  The syntax is analogous to\n"
4984	"\t    the enable_event and disable_event triggers.\n\n"
4985	"\t    Hist trigger handlers and actions are executed whenever a\n"
4986	"\t    a histogram entry is added or updated.  They take the form:\n\n"
4987	"\t        <handler>.<action>\n\n"
4988	"\t    The available handlers are:\n\n"
4989	"\t        onmatch(matching.event)  - invoke on addition or update\n"
4990	"\t        onmax(var)               - invoke if var exceeds current max\n"
4991	"\t        onchange(var)            - invoke action if var changes\n\n"
4992	"\t    The available actions are:\n\n"
4993	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
4994	"\t        save(field,...)                      - save current event fields\n"
4995#ifdef CONFIG_TRACER_SNAPSHOT
4996	"\t        snapshot()                           - snapshot the trace buffer\n"
 
 
 
 
 
4997#endif
4998#endif
4999;
5000
5001static ssize_t
5002tracing_readme_read(struct file *filp, char __user *ubuf,
5003		       size_t cnt, loff_t *ppos)
5004{
5005	return simple_read_from_buffer(ubuf, cnt, ppos,
5006					readme_msg, strlen(readme_msg));
5007}
5008
5009static const struct file_operations tracing_readme_fops = {
5010	.open		= tracing_open_generic,
5011	.read		= tracing_readme_read,
5012	.llseek		= generic_file_llseek,
5013};
5014
5015static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5016{
5017	int *ptr = v;
5018
5019	if (*pos || m->count)
5020		ptr++;
5021
5022	(*pos)++;
5023
5024	for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5025		if (trace_find_tgid(*ptr))
5026			return ptr;
5027	}
5028
5029	return NULL;
5030}
5031
5032static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5033{
5034	void *v;
5035	loff_t l = 0;
5036
5037	if (!tgid_map)
5038		return NULL;
5039
5040	v = &tgid_map[0];
5041	while (l <= *pos) {
5042		v = saved_tgids_next(m, v, &l);
5043		if (!v)
5044			return NULL;
5045	}
5046
5047	return v;
5048}
5049
5050static void saved_tgids_stop(struct seq_file *m, void *v)
5051{
5052}
5053
5054static int saved_tgids_show(struct seq_file *m, void *v)
5055{
5056	int pid = (int *)v - tgid_map;
 
 
 
 
 
5057
5058	seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5059	return 0;
5060}
5061
5062static const struct seq_operations tracing_saved_tgids_seq_ops = {
5063	.start		= saved_tgids_start,
5064	.stop		= saved_tgids_stop,
5065	.next		= saved_tgids_next,
5066	.show		= saved_tgids_show,
5067};
5068
5069static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5070{
5071	int ret;
5072
5073	ret = tracing_check_open_get_tr(NULL);
5074	if (ret)
5075		return ret;
5076
5077	return seq_open(filp, &tracing_saved_tgids_seq_ops);
5078}
5079
5080
5081static const struct file_operations tracing_saved_tgids_fops = {
5082	.open		= tracing_saved_tgids_open,
5083	.read		= seq_read,
5084	.llseek		= seq_lseek,
5085	.release	= seq_release,
5086};
5087
5088static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5089{
5090	unsigned int *ptr = v;
5091
5092	if (*pos || m->count)
5093		ptr++;
5094
5095	(*pos)++;
5096
5097	for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5098	     ptr++) {
5099		if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5100			continue;
5101
5102		return ptr;
5103	}
5104
5105	return NULL;
5106}
5107
5108static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5109{
5110	void *v;
5111	loff_t l = 0;
5112
5113	preempt_disable();
5114	arch_spin_lock(&trace_cmdline_lock);
5115
5116	v = &savedcmd->map_cmdline_to_pid[0];
5117	while (l <= *pos) {
5118		v = saved_cmdlines_next(m, v, &l);
5119		if (!v)
5120			return NULL;
5121	}
5122
5123	return v;
5124}
5125
5126static void saved_cmdlines_stop(struct seq_file *m, void *v)
5127{
5128	arch_spin_unlock(&trace_cmdline_lock);
5129	preempt_enable();
5130}
5131
5132static int saved_cmdlines_show(struct seq_file *m, void *v)
5133{
5134	char buf[TASK_COMM_LEN];
5135	unsigned int *pid = v;
5136
5137	__trace_find_cmdline(*pid, buf);
5138	seq_printf(m, "%d %s\n", *pid, buf);
5139	return 0;
5140}
5141
5142static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5143	.start		= saved_cmdlines_start,
5144	.next		= saved_cmdlines_next,
5145	.stop		= saved_cmdlines_stop,
5146	.show		= saved_cmdlines_show,
5147};
5148
5149static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5150{
5151	int ret;
5152
5153	ret = tracing_check_open_get_tr(NULL);
5154	if (ret)
5155		return ret;
5156
5157	return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5158}
5159
5160static const struct file_operations tracing_saved_cmdlines_fops = {
5161	.open		= tracing_saved_cmdlines_open,
5162	.read		= seq_read,
5163	.llseek		= seq_lseek,
5164	.release	= seq_release,
5165};
5166
5167static ssize_t
5168tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5169				 size_t cnt, loff_t *ppos)
5170{
5171	char buf[64];
5172	int r;
5173
 
5174	arch_spin_lock(&trace_cmdline_lock);
5175	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5176	arch_spin_unlock(&trace_cmdline_lock);
 
5177
5178	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5179}
5180
5181static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5182{
5183	kfree(s->saved_cmdlines);
5184	kfree(s->map_cmdline_to_pid);
5185	kfree(s);
5186}
5187
5188static int tracing_resize_saved_cmdlines(unsigned int val)
5189{
5190	struct saved_cmdlines_buffer *s, *savedcmd_temp;
5191
5192	s = kmalloc(sizeof(*s), GFP_KERNEL);
5193	if (!s)
5194		return -ENOMEM;
5195
5196	if (allocate_cmdlines_buffer(val, s) < 0) {
5197		kfree(s);
5198		return -ENOMEM;
5199	}
5200
5201	arch_spin_lock(&trace_cmdline_lock);
5202	savedcmd_temp = savedcmd;
5203	savedcmd = s;
5204	arch_spin_unlock(&trace_cmdline_lock);
 
5205	free_saved_cmdlines_buffer(savedcmd_temp);
5206
5207	return 0;
5208}
5209
5210static ssize_t
5211tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5212				  size_t cnt, loff_t *ppos)
5213{
5214	unsigned long val;
5215	int ret;
5216
5217	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5218	if (ret)
5219		return ret;
5220
5221	/* must have at least 1 entry or less than PID_MAX_DEFAULT */
5222	if (!val || val > PID_MAX_DEFAULT)
5223		return -EINVAL;
5224
5225	ret = tracing_resize_saved_cmdlines((unsigned int)val);
5226	if (ret < 0)
5227		return ret;
5228
5229	*ppos += cnt;
5230
5231	return cnt;
5232}
5233
5234static const struct file_operations tracing_saved_cmdlines_size_fops = {
5235	.open		= tracing_open_generic,
5236	.read		= tracing_saved_cmdlines_size_read,
5237	.write		= tracing_saved_cmdlines_size_write,
5238};
5239
5240#ifdef CONFIG_TRACE_EVAL_MAP_FILE
5241static union trace_eval_map_item *
5242update_eval_map(union trace_eval_map_item *ptr)
5243{
5244	if (!ptr->map.eval_string) {
5245		if (ptr->tail.next) {
5246			ptr = ptr->tail.next;
5247			/* Set ptr to the next real item (skip head) */
5248			ptr++;
5249		} else
5250			return NULL;
5251	}
5252	return ptr;
5253}
5254
5255static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5256{
5257	union trace_eval_map_item *ptr = v;
5258
5259	/*
5260	 * Paranoid! If ptr points to end, we don't want to increment past it.
5261	 * This really should never happen.
5262	 */
 
5263	ptr = update_eval_map(ptr);
5264	if (WARN_ON_ONCE(!ptr))
5265		return NULL;
5266
5267	ptr++;
5268
5269	(*pos)++;
5270
5271	ptr = update_eval_map(ptr);
5272
5273	return ptr;
5274}
5275
5276static void *eval_map_start(struct seq_file *m, loff_t *pos)
5277{
5278	union trace_eval_map_item *v;
5279	loff_t l = 0;
5280
5281	mutex_lock(&trace_eval_mutex);
5282
5283	v = trace_eval_maps;
5284	if (v)
5285		v++;
5286
5287	while (v && l < *pos) {
5288		v = eval_map_next(m, v, &l);
5289	}
5290
5291	return v;
5292}
5293
5294static void eval_map_stop(struct seq_file *m, void *v)
5295{
5296	mutex_unlock(&trace_eval_mutex);
5297}
5298
5299static int eval_map_show(struct seq_file *m, void *v)
5300{
5301	union trace_eval_map_item *ptr = v;
5302
5303	seq_printf(m, "%s %ld (%s)\n",
5304		   ptr->map.eval_string, ptr->map.eval_value,
5305		   ptr->map.system);
5306
5307	return 0;
5308}
5309
5310static const struct seq_operations tracing_eval_map_seq_ops = {
5311	.start		= eval_map_start,
5312	.next		= eval_map_next,
5313	.stop		= eval_map_stop,
5314	.show		= eval_map_show,
5315};
5316
5317static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5318{
5319	int ret;
5320
5321	ret = tracing_check_open_get_tr(NULL);
5322	if (ret)
5323		return ret;
5324
5325	return seq_open(filp, &tracing_eval_map_seq_ops);
5326}
5327
5328static const struct file_operations tracing_eval_map_fops = {
5329	.open		= tracing_eval_map_open,
5330	.read		= seq_read,
5331	.llseek		= seq_lseek,
5332	.release	= seq_release,
5333};
5334
5335static inline union trace_eval_map_item *
5336trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5337{
5338	/* Return tail of array given the head */
5339	return ptr + ptr->head.length + 1;
5340}
5341
5342static void
5343trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5344			   int len)
5345{
5346	struct trace_eval_map **stop;
5347	struct trace_eval_map **map;
5348	union trace_eval_map_item *map_array;
5349	union trace_eval_map_item *ptr;
5350
5351	stop = start + len;
5352
5353	/*
5354	 * The trace_eval_maps contains the map plus a head and tail item,
5355	 * where the head holds the module and length of array, and the
5356	 * tail holds a pointer to the next list.
5357	 */
5358	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5359	if (!map_array) {
5360		pr_warn("Unable to allocate trace eval mapping\n");
5361		return;
5362	}
5363
5364	mutex_lock(&trace_eval_mutex);
5365
5366	if (!trace_eval_maps)
5367		trace_eval_maps = map_array;
5368	else {
5369		ptr = trace_eval_maps;
5370		for (;;) {
5371			ptr = trace_eval_jmp_to_tail(ptr);
5372			if (!ptr->tail.next)
5373				break;
5374			ptr = ptr->tail.next;
5375
5376		}
5377		ptr->tail.next = map_array;
5378	}
5379	map_array->head.mod = mod;
5380	map_array->head.length = len;
5381	map_array++;
5382
5383	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5384		map_array->map = **map;
5385		map_array++;
5386	}
5387	memset(map_array, 0, sizeof(*map_array));
5388
5389	mutex_unlock(&trace_eval_mutex);
5390}
5391
5392static void trace_create_eval_file(struct dentry *d_tracer)
5393{
5394	trace_create_file("eval_map", 0444, d_tracer,
5395			  NULL, &tracing_eval_map_fops);
5396}
5397
5398#else /* CONFIG_TRACE_EVAL_MAP_FILE */
5399static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5400static inline void trace_insert_eval_map_file(struct module *mod,
5401			      struct trace_eval_map **start, int len) { }
5402#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5403
5404static void trace_insert_eval_map(struct module *mod,
5405				  struct trace_eval_map **start, int len)
5406{
5407	struct trace_eval_map **map;
5408
5409	if (len <= 0)
5410		return;
5411
5412	map = start;
5413
5414	trace_event_eval_update(map, len);
5415
5416	trace_insert_eval_map_file(mod, start, len);
5417}
5418
5419static ssize_t
5420tracing_set_trace_read(struct file *filp, char __user *ubuf,
5421		       size_t cnt, loff_t *ppos)
5422{
5423	struct trace_array *tr = filp->private_data;
5424	char buf[MAX_TRACER_SIZE+2];
5425	int r;
5426
5427	mutex_lock(&trace_types_lock);
5428	r = sprintf(buf, "%s\n", tr->current_trace->name);
5429	mutex_unlock(&trace_types_lock);
5430
5431	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5432}
5433
5434int tracer_init(struct tracer *t, struct trace_array *tr)
5435{
5436	tracing_reset_online_cpus(&tr->trace_buffer);
5437	return t->init(tr);
5438}
5439
5440static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5441{
5442	int cpu;
5443
5444	for_each_tracing_cpu(cpu)
5445		per_cpu_ptr(buf->data, cpu)->entries = val;
5446}
5447
 
 
 
 
 
 
 
 
 
5448#ifdef CONFIG_TRACER_MAX_TRACE
5449/* resize @tr's buffer to the size of @size_tr's entries */
5450static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5451					struct trace_buffer *size_buf, int cpu_id)
5452{
5453	int cpu, ret = 0;
5454
5455	if (cpu_id == RING_BUFFER_ALL_CPUS) {
5456		for_each_tracing_cpu(cpu) {
5457			ret = ring_buffer_resize(trace_buf->buffer,
5458				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5459			if (ret < 0)
5460				break;
5461			per_cpu_ptr(trace_buf->data, cpu)->entries =
5462				per_cpu_ptr(size_buf->data, cpu)->entries;
5463		}
5464	} else {
5465		ret = ring_buffer_resize(trace_buf->buffer,
5466				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5467		if (ret == 0)
5468			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5469				per_cpu_ptr(size_buf->data, cpu_id)->entries;
5470	}
5471
5472	return ret;
5473}
5474#endif /* CONFIG_TRACER_MAX_TRACE */
5475
5476static int __tracing_resize_ring_buffer(struct trace_array *tr,
5477					unsigned long size, int cpu)
5478{
5479	int ret;
5480
5481	/*
5482	 * If kernel or user changes the size of the ring buffer
5483	 * we use the size that was given, and we can forget about
5484	 * expanding it later.
5485	 */
5486	ring_buffer_expanded = true;
5487
5488	/* May be called before buffers are initialized */
5489	if (!tr->trace_buffer.buffer)
5490		return 0;
5491
5492	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
 
 
 
5493	if (ret < 0)
5494		return ret;
5495
5496#ifdef CONFIG_TRACER_MAX_TRACE
5497	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5498	    !tr->current_trace->use_max_tr)
5499		goto out;
5500
5501	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5502	if (ret < 0) {
5503		int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5504						     &tr->trace_buffer, cpu);
5505		if (r < 0) {
5506			/*
5507			 * AARGH! We are left with different
5508			 * size max buffer!!!!
5509			 * The max buffer is our "snapshot" buffer.
5510			 * When a tracer needs a snapshot (one of the
5511			 * latency tracers), it swaps the max buffer
5512			 * with the saved snap shot. We succeeded to
5513			 * update the size of the main buffer, but failed to
5514			 * update the size of the max buffer. But when we tried
5515			 * to reset the main buffer to the original size, we
5516			 * failed there too. This is very unlikely to
5517			 * happen, but if it does, warn and kill all
5518			 * tracing.
5519			 */
5520			WARN_ON(1);
5521			tracing_disabled = 1;
5522		}
5523		return ret;
5524	}
5525
5526	if (cpu == RING_BUFFER_ALL_CPUS)
5527		set_buffer_entries(&tr->max_buffer, size);
5528	else
5529		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5530
5531 out:
5532#endif /* CONFIG_TRACER_MAX_TRACE */
5533
5534	if (cpu == RING_BUFFER_ALL_CPUS)
5535		set_buffer_entries(&tr->trace_buffer, size);
5536	else
5537		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5538
5539	return ret;
5540}
5541
5542static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5543					  unsigned long size, int cpu_id)
5544{
5545	int ret = size;
5546
5547	mutex_lock(&trace_types_lock);
5548
5549	if (cpu_id != RING_BUFFER_ALL_CPUS) {
5550		/* make sure, this cpu is enabled in the mask */
5551		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5552			ret = -EINVAL;
5553			goto out;
5554		}
5555	}
5556
5557	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5558	if (ret < 0)
5559		ret = -ENOMEM;
5560
5561out:
5562	mutex_unlock(&trace_types_lock);
5563
5564	return ret;
5565}
5566
5567
5568/**
5569 * tracing_update_buffers - used by tracing facility to expand ring buffers
 
5570 *
5571 * To save on memory when the tracing is never used on a system with it
5572 * configured in. The ring buffers are set to a minimum size. But once
5573 * a user starts to use the tracing facility, then they need to grow
5574 * to their default size.
5575 *
5576 * This function is to be called when a tracer is about to be used.
5577 */
5578int tracing_update_buffers(void)
5579{
5580	int ret = 0;
5581
5582	mutex_lock(&trace_types_lock);
5583	if (!ring_buffer_expanded)
5584		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5585						RING_BUFFER_ALL_CPUS);
5586	mutex_unlock(&trace_types_lock);
5587
5588	return ret;
5589}
5590
5591struct trace_option_dentry;
5592
5593static void
5594create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5595
5596/*
5597 * Used to clear out the tracer before deletion of an instance.
5598 * Must have trace_types_lock held.
5599 */
5600static void tracing_set_nop(struct trace_array *tr)
5601{
5602	if (tr->current_trace == &nop_trace)
5603		return;
5604	
5605	tr->current_trace->enabled--;
5606
5607	if (tr->current_trace->reset)
5608		tr->current_trace->reset(tr);
5609
5610	tr->current_trace = &nop_trace;
5611}
5612
 
 
5613static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5614{
5615	/* Only enable if the directory has been created already. */
5616	if (!tr->dir)
5617		return;
5618
 
 
 
 
5619	create_trace_option_files(tr, t);
5620}
5621
5622static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5623{
5624	struct tracer *t;
5625#ifdef CONFIG_TRACER_MAX_TRACE
5626	bool had_max_tr;
5627#endif
5628	int ret = 0;
5629
5630	mutex_lock(&trace_types_lock);
5631
5632	if (!ring_buffer_expanded) {
5633		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5634						RING_BUFFER_ALL_CPUS);
5635		if (ret < 0)
5636			goto out;
5637		ret = 0;
5638	}
5639
5640	for (t = trace_types; t; t = t->next) {
5641		if (strcmp(t->name, buf) == 0)
5642			break;
5643	}
5644	if (!t) {
5645		ret = -EINVAL;
5646		goto out;
5647	}
5648	if (t == tr->current_trace)
5649		goto out;
5650
5651#ifdef CONFIG_TRACER_SNAPSHOT
5652	if (t->use_max_tr) {
 
5653		arch_spin_lock(&tr->max_lock);
5654		if (tr->cond_snapshot)
5655			ret = -EBUSY;
5656		arch_spin_unlock(&tr->max_lock);
 
5657		if (ret)
5658			goto out;
5659	}
5660#endif
5661	/* Some tracers won't work on kernel command line */
5662	if (system_state < SYSTEM_RUNNING && t->noboot) {
5663		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5664			t->name);
5665		goto out;
5666	}
5667
5668	/* Some tracers are only allowed for the top level buffer */
5669	if (!trace_ok_for_array(t, tr)) {
5670		ret = -EINVAL;
5671		goto out;
5672	}
5673
5674	/* If trace pipe files are being read, we can't change the tracer */
5675	if (tr->current_trace->ref) {
5676		ret = -EBUSY;
5677		goto out;
5678	}
5679
5680	trace_branch_disable();
5681
5682	tr->current_trace->enabled--;
5683
5684	if (tr->current_trace->reset)
5685		tr->current_trace->reset(tr);
5686
 
 
 
5687	/* Current trace needs to be nop_trace before synchronize_rcu */
5688	tr->current_trace = &nop_trace;
5689
5690#ifdef CONFIG_TRACER_MAX_TRACE
5691	had_max_tr = tr->allocated_snapshot;
5692
5693	if (had_max_tr && !t->use_max_tr) {
5694		/*
5695		 * We need to make sure that the update_max_tr sees that
5696		 * current_trace changed to nop_trace to keep it from
5697		 * swapping the buffers after we resize it.
5698		 * The update_max_tr is called from interrupts disabled
5699		 * so a synchronized_sched() is sufficient.
5700		 */
5701		synchronize_rcu();
5702		free_snapshot(tr);
5703	}
5704#endif
5705
5706#ifdef CONFIG_TRACER_MAX_TRACE
5707	if (t->use_max_tr && !had_max_tr) {
5708		ret = tracing_alloc_snapshot_instance(tr);
5709		if (ret < 0)
5710			goto out;
5711	}
 
 
5712#endif
5713
5714	if (t->init) {
5715		ret = tracer_init(t, tr);
5716		if (ret)
5717			goto out;
5718	}
5719
5720	tr->current_trace = t;
5721	tr->current_trace->enabled++;
5722	trace_branch_enable(tr);
5723 out:
5724	mutex_unlock(&trace_types_lock);
5725
5726	return ret;
5727}
5728
5729static ssize_t
5730tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5731			size_t cnt, loff_t *ppos)
5732{
5733	struct trace_array *tr = filp->private_data;
5734	char buf[MAX_TRACER_SIZE+1];
5735	int i;
5736	size_t ret;
5737	int err;
5738
5739	ret = cnt;
5740
5741	if (cnt > MAX_TRACER_SIZE)
5742		cnt = MAX_TRACER_SIZE;
5743
5744	if (copy_from_user(buf, ubuf, cnt))
5745		return -EFAULT;
5746
5747	buf[cnt] = 0;
5748
5749	/* strip ending whitespace. */
5750	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5751		buf[i] = 0;
5752
5753	err = tracing_set_tracer(tr, buf);
5754	if (err)
5755		return err;
5756
5757	*ppos += ret;
5758
5759	return ret;
5760}
5761
5762static ssize_t
5763tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5764		   size_t cnt, loff_t *ppos)
5765{
5766	char buf[64];
5767	int r;
5768
5769	r = snprintf(buf, sizeof(buf), "%ld\n",
5770		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5771	if (r > sizeof(buf))
5772		r = sizeof(buf);
5773	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5774}
5775
5776static ssize_t
5777tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5778		    size_t cnt, loff_t *ppos)
5779{
5780	unsigned long val;
5781	int ret;
5782
5783	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5784	if (ret)
5785		return ret;
5786
5787	*ptr = val * 1000;
5788
5789	return cnt;
5790}
5791
5792static ssize_t
5793tracing_thresh_read(struct file *filp, char __user *ubuf,
5794		    size_t cnt, loff_t *ppos)
5795{
5796	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5797}
5798
5799static ssize_t
5800tracing_thresh_write(struct file *filp, const char __user *ubuf,
5801		     size_t cnt, loff_t *ppos)
5802{
5803	struct trace_array *tr = filp->private_data;
5804	int ret;
5805
5806	mutex_lock(&trace_types_lock);
5807	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5808	if (ret < 0)
5809		goto out;
5810
5811	if (tr->current_trace->update_thresh) {
5812		ret = tr->current_trace->update_thresh(tr);
5813		if (ret < 0)
5814			goto out;
5815	}
5816
5817	ret = cnt;
5818out:
5819	mutex_unlock(&trace_types_lock);
5820
5821	return ret;
5822}
5823
5824#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5825
5826static ssize_t
5827tracing_max_lat_read(struct file *filp, char __user *ubuf,
5828		     size_t cnt, loff_t *ppos)
5829{
5830	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
 
 
5831}
5832
5833static ssize_t
5834tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5835		      size_t cnt, loff_t *ppos)
5836{
5837	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
 
 
5838}
5839
5840#endif
5841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5842static int tracing_open_pipe(struct inode *inode, struct file *filp)
5843{
5844	struct trace_array *tr = inode->i_private;
5845	struct trace_iterator *iter;
 
5846	int ret;
5847
5848	ret = tracing_check_open_get_tr(tr);
5849	if (ret)
5850		return ret;
5851
5852	mutex_lock(&trace_types_lock);
 
 
 
 
5853
5854	/* create a buffer to store the information to pass to userspace */
5855	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5856	if (!iter) {
5857		ret = -ENOMEM;
5858		__trace_array_put(tr);
5859		goto out;
5860	}
5861
5862	trace_seq_init(&iter->seq);
5863	iter->trace = tr->current_trace;
5864
5865	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5866		ret = -ENOMEM;
5867		goto fail;
5868	}
5869
5870	/* trace pipe does not show start of buffer */
5871	cpumask_setall(iter->started);
5872
5873	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5874		iter->iter_flags |= TRACE_FILE_LAT_FMT;
5875
5876	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
5877	if (trace_clocks[tr->clock_id].in_ns)
5878		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5879
5880	iter->tr = tr;
5881	iter->trace_buffer = &tr->trace_buffer;
5882	iter->cpu_file = tracing_get_cpu(inode);
5883	mutex_init(&iter->mutex);
5884	filp->private_data = iter;
5885
5886	if (iter->trace->pipe_open)
5887		iter->trace->pipe_open(iter);
5888
5889	nonseekable_open(inode, filp);
5890
5891	tr->current_trace->ref++;
5892out:
5893	mutex_unlock(&trace_types_lock);
5894	return ret;
5895
5896fail:
5897	kfree(iter);
 
 
 
5898	__trace_array_put(tr);
5899	mutex_unlock(&trace_types_lock);
5900	return ret;
5901}
5902
5903static int tracing_release_pipe(struct inode *inode, struct file *file)
5904{
5905	struct trace_iterator *iter = file->private_data;
5906	struct trace_array *tr = inode->i_private;
5907
5908	mutex_lock(&trace_types_lock);
5909
5910	tr->current_trace->ref--;
5911
5912	if (iter->trace->pipe_close)
5913		iter->trace->pipe_close(iter);
5914
5915	mutex_unlock(&trace_types_lock);
5916
5917	free_cpumask_var(iter->started);
5918	mutex_destroy(&iter->mutex);
5919	kfree(iter);
5920
5921	trace_array_put(tr);
5922
5923	return 0;
5924}
5925
5926static __poll_t
5927trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5928{
5929	struct trace_array *tr = iter->tr;
5930
5931	/* Iterators are static, they should be filled or empty */
5932	if (trace_buffer_iter(iter, iter->cpu_file))
5933		return EPOLLIN | EPOLLRDNORM;
5934
5935	if (tr->trace_flags & TRACE_ITER_BLOCK)
5936		/*
5937		 * Always select as readable when in blocking mode
5938		 */
5939		return EPOLLIN | EPOLLRDNORM;
5940	else
5941		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5942					     filp, poll_table);
5943}
5944
5945static __poll_t
5946tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5947{
5948	struct trace_iterator *iter = filp->private_data;
5949
5950	return trace_poll(iter, filp, poll_table);
5951}
5952
5953/* Must be called with iter->mutex held. */
5954static int tracing_wait_pipe(struct file *filp)
5955{
5956	struct trace_iterator *iter = filp->private_data;
5957	int ret;
5958
5959	while (trace_empty(iter)) {
5960
5961		if ((filp->f_flags & O_NONBLOCK)) {
5962			return -EAGAIN;
5963		}
5964
5965		/*
5966		 * We block until we read something and tracing is disabled.
5967		 * We still block if tracing is disabled, but we have never
5968		 * read anything. This allows a user to cat this file, and
5969		 * then enable tracing. But after we have read something,
5970		 * we give an EOF when tracing is again disabled.
5971		 *
5972		 * iter->pos will be 0 if we haven't read anything.
5973		 */
5974		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5975			break;
5976
5977		mutex_unlock(&iter->mutex);
5978
5979		ret = wait_on_pipe(iter, 0);
5980
5981		mutex_lock(&iter->mutex);
5982
5983		if (ret)
5984			return ret;
5985	}
5986
5987	return 1;
5988}
5989
5990/*
5991 * Consumer reader.
5992 */
5993static ssize_t
5994tracing_read_pipe(struct file *filp, char __user *ubuf,
5995		  size_t cnt, loff_t *ppos)
5996{
5997	struct trace_iterator *iter = filp->private_data;
5998	ssize_t sret;
5999
6000	/*
6001	 * Avoid more than one consumer on a single file descriptor
6002	 * This is just a matter of traces coherency, the ring buffer itself
6003	 * is protected.
6004	 */
6005	mutex_lock(&iter->mutex);
6006
6007	/* return any leftover data */
6008	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6009	if (sret != -EBUSY)
6010		goto out;
6011
6012	trace_seq_init(&iter->seq);
6013
6014	if (iter->trace->read) {
6015		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6016		if (sret)
6017			goto out;
6018	}
6019
6020waitagain:
6021	sret = tracing_wait_pipe(filp);
6022	if (sret <= 0)
6023		goto out;
6024
6025	/* stop when tracing is finished */
6026	if (trace_empty(iter)) {
6027		sret = 0;
6028		goto out;
6029	}
6030
6031	if (cnt >= PAGE_SIZE)
6032		cnt = PAGE_SIZE - 1;
6033
6034	/* reset all but tr, trace, and overruns */
6035	memset(&iter->seq, 0,
6036	       sizeof(struct trace_iterator) -
6037	       offsetof(struct trace_iterator, seq));
6038	cpumask_clear(iter->started);
6039	trace_seq_init(&iter->seq);
6040	iter->pos = -1;
6041
6042	trace_event_read_lock();
6043	trace_access_lock(iter->cpu_file);
6044	while (trace_find_next_entry_inc(iter) != NULL) {
6045		enum print_line_t ret;
6046		int save_len = iter->seq.seq.len;
6047
6048		ret = print_trace_line(iter);
6049		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6050			/* don't print partial lines */
 
 
 
 
 
 
 
 
 
 
 
 
 
6051			iter->seq.seq.len = save_len;
6052			break;
6053		}
6054		if (ret != TRACE_TYPE_NO_CONSUME)
6055			trace_consume(iter);
6056
6057		if (trace_seq_used(&iter->seq) >= cnt)
6058			break;
6059
6060		/*
6061		 * Setting the full flag means we reached the trace_seq buffer
6062		 * size and we should leave by partial output condition above.
6063		 * One of the trace_seq_* functions is not used properly.
6064		 */
6065		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6066			  iter->ent->type);
6067	}
6068	trace_access_unlock(iter->cpu_file);
6069	trace_event_read_unlock();
6070
6071	/* Now copy what we have to the user */
6072	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6073	if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6074		trace_seq_init(&iter->seq);
6075
6076	/*
6077	 * If there was nothing to send to user, in spite of consuming trace
6078	 * entries, go back to wait for more entries.
6079	 */
6080	if (sret == -EBUSY)
6081		goto waitagain;
6082
6083out:
6084	mutex_unlock(&iter->mutex);
6085
6086	return sret;
6087}
6088
6089static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6090				     unsigned int idx)
6091{
6092	__free_page(spd->pages[idx]);
6093}
6094
6095static const struct pipe_buf_operations tracing_pipe_buf_ops = {
6096	.confirm		= generic_pipe_buf_confirm,
6097	.release		= generic_pipe_buf_release,
6098	.steal			= generic_pipe_buf_steal,
6099	.get			= generic_pipe_buf_get,
6100};
6101
6102static size_t
6103tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6104{
6105	size_t count;
6106	int save_len;
6107	int ret;
6108
6109	/* Seq buffer is page-sized, exactly what we need. */
6110	for (;;) {
6111		save_len = iter->seq.seq.len;
6112		ret = print_trace_line(iter);
6113
6114		if (trace_seq_has_overflowed(&iter->seq)) {
6115			iter->seq.seq.len = save_len;
6116			break;
6117		}
6118
6119		/*
6120		 * This should not be hit, because it should only
6121		 * be set if the iter->seq overflowed. But check it
6122		 * anyway to be safe.
6123		 */
6124		if (ret == TRACE_TYPE_PARTIAL_LINE) {
6125			iter->seq.seq.len = save_len;
6126			break;
6127		}
6128
6129		count = trace_seq_used(&iter->seq) - save_len;
6130		if (rem < count) {
6131			rem = 0;
6132			iter->seq.seq.len = save_len;
6133			break;
6134		}
6135
6136		if (ret != TRACE_TYPE_NO_CONSUME)
6137			trace_consume(iter);
6138		rem -= count;
6139		if (!trace_find_next_entry_inc(iter))	{
6140			rem = 0;
6141			iter->ent = NULL;
6142			break;
6143		}
6144	}
6145
6146	return rem;
6147}
6148
6149static ssize_t tracing_splice_read_pipe(struct file *filp,
6150					loff_t *ppos,
6151					struct pipe_inode_info *pipe,
6152					size_t len,
6153					unsigned int flags)
6154{
6155	struct page *pages_def[PIPE_DEF_BUFFERS];
6156	struct partial_page partial_def[PIPE_DEF_BUFFERS];
6157	struct trace_iterator *iter = filp->private_data;
6158	struct splice_pipe_desc spd = {
6159		.pages		= pages_def,
6160		.partial	= partial_def,
6161		.nr_pages	= 0, /* This gets updated below. */
6162		.nr_pages_max	= PIPE_DEF_BUFFERS,
6163		.ops		= &tracing_pipe_buf_ops,
6164		.spd_release	= tracing_spd_release_pipe,
6165	};
6166	ssize_t ret;
6167	size_t rem;
6168	unsigned int i;
6169
6170	if (splice_grow_spd(pipe, &spd))
6171		return -ENOMEM;
6172
6173	mutex_lock(&iter->mutex);
6174
6175	if (iter->trace->splice_read) {
6176		ret = iter->trace->splice_read(iter, filp,
6177					       ppos, pipe, len, flags);
6178		if (ret)
6179			goto out_err;
6180	}
6181
6182	ret = tracing_wait_pipe(filp);
6183	if (ret <= 0)
6184		goto out_err;
6185
6186	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6187		ret = -EFAULT;
6188		goto out_err;
6189	}
6190
6191	trace_event_read_lock();
6192	trace_access_lock(iter->cpu_file);
6193
6194	/* Fill as many pages as possible. */
6195	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6196		spd.pages[i] = alloc_page(GFP_KERNEL);
6197		if (!spd.pages[i])
6198			break;
6199
6200		rem = tracing_fill_pipe_page(rem, iter);
6201
6202		/* Copy the data into the page, so we can start over. */
6203		ret = trace_seq_to_buffer(&iter->seq,
6204					  page_address(spd.pages[i]),
6205					  trace_seq_used(&iter->seq));
6206		if (ret < 0) {
6207			__free_page(spd.pages[i]);
6208			break;
6209		}
6210		spd.partial[i].offset = 0;
6211		spd.partial[i].len = trace_seq_used(&iter->seq);
6212
6213		trace_seq_init(&iter->seq);
6214	}
6215
6216	trace_access_unlock(iter->cpu_file);
6217	trace_event_read_unlock();
6218	mutex_unlock(&iter->mutex);
6219
6220	spd.nr_pages = i;
6221
6222	if (i)
6223		ret = splice_to_pipe(pipe, &spd);
6224	else
6225		ret = 0;
6226out:
6227	splice_shrink_spd(&spd);
6228	return ret;
6229
6230out_err:
6231	mutex_unlock(&iter->mutex);
6232	goto out;
6233}
6234
6235static ssize_t
6236tracing_entries_read(struct file *filp, char __user *ubuf,
6237		     size_t cnt, loff_t *ppos)
6238{
6239	struct inode *inode = file_inode(filp);
6240	struct trace_array *tr = inode->i_private;
6241	int cpu = tracing_get_cpu(inode);
6242	char buf[64];
6243	int r = 0;
6244	ssize_t ret;
6245
6246	mutex_lock(&trace_types_lock);
6247
6248	if (cpu == RING_BUFFER_ALL_CPUS) {
6249		int cpu, buf_size_same;
6250		unsigned long size;
6251
6252		size = 0;
6253		buf_size_same = 1;
6254		/* check if all cpu sizes are same */
6255		for_each_tracing_cpu(cpu) {
6256			/* fill in the size from first enabled cpu */
6257			if (size == 0)
6258				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6259			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6260				buf_size_same = 0;
6261				break;
6262			}
6263		}
6264
6265		if (buf_size_same) {
6266			if (!ring_buffer_expanded)
6267				r = sprintf(buf, "%lu (expanded: %lu)\n",
6268					    size >> 10,
6269					    trace_buf_size >> 10);
6270			else
6271				r = sprintf(buf, "%lu\n", size >> 10);
6272		} else
6273			r = sprintf(buf, "X\n");
6274	} else
6275		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6276
6277	mutex_unlock(&trace_types_lock);
6278
6279	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6280	return ret;
6281}
6282
6283static ssize_t
6284tracing_entries_write(struct file *filp, const char __user *ubuf,
6285		      size_t cnt, loff_t *ppos)
6286{
6287	struct inode *inode = file_inode(filp);
6288	struct trace_array *tr = inode->i_private;
6289	unsigned long val;
6290	int ret;
6291
6292	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6293	if (ret)
6294		return ret;
6295
6296	/* must have at least 1 entry */
6297	if (!val)
6298		return -EINVAL;
6299
6300	/* value is in KB */
6301	val <<= 10;
6302	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6303	if (ret < 0)
6304		return ret;
6305
6306	*ppos += cnt;
6307
6308	return cnt;
6309}
6310
6311static ssize_t
6312tracing_total_entries_read(struct file *filp, char __user *ubuf,
6313				size_t cnt, loff_t *ppos)
6314{
6315	struct trace_array *tr = filp->private_data;
6316	char buf[64];
6317	int r, cpu;
6318	unsigned long size = 0, expanded_size = 0;
6319
6320	mutex_lock(&trace_types_lock);
6321	for_each_tracing_cpu(cpu) {
6322		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6323		if (!ring_buffer_expanded)
6324			expanded_size += trace_buf_size >> 10;
6325	}
6326	if (ring_buffer_expanded)
6327		r = sprintf(buf, "%lu\n", size);
6328	else
6329		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6330	mutex_unlock(&trace_types_lock);
6331
6332	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6333}
6334
6335static ssize_t
6336tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6337			  size_t cnt, loff_t *ppos)
6338{
6339	/*
6340	 * There is no need to read what the user has written, this function
6341	 * is just to make sure that there is no error when "echo" is used
6342	 */
6343
6344	*ppos += cnt;
6345
6346	return cnt;
6347}
6348
6349static int
6350tracing_free_buffer_release(struct inode *inode, struct file *filp)
6351{
6352	struct trace_array *tr = inode->i_private;
6353
6354	/* disable tracing ? */
6355	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6356		tracer_tracing_off(tr);
6357	/* resize the ring buffer to 0 */
6358	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6359
6360	trace_array_put(tr);
6361
6362	return 0;
6363}
6364
 
 
6365static ssize_t
6366tracing_mark_write(struct file *filp, const char __user *ubuf,
6367					size_t cnt, loff_t *fpos)
6368{
6369	struct trace_array *tr = filp->private_data;
6370	struct ring_buffer_event *event;
6371	enum event_trigger_type tt = ETT_NONE;
6372	struct ring_buffer *buffer;
6373	struct print_entry *entry;
6374	unsigned long irq_flags;
6375	ssize_t written;
6376	int size;
6377	int len;
6378
6379/* Used in tracing_mark_raw_write() as well */
6380#define FAULTED_STR "<faulted>"
6381#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6382
6383	if (tracing_disabled)
6384		return -EINVAL;
6385
6386	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6387		return -EINVAL;
6388
6389	if (cnt > TRACE_BUF_SIZE)
6390		cnt = TRACE_BUF_SIZE;
6391
6392	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
 
6393
6394	local_save_flags(irq_flags);
6395	size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
 
6396
6397	/* If less than "<faulted>", then make sure we can still add that */
6398	if (cnt < FAULTED_SIZE)
6399		size += FAULTED_SIZE - cnt;
6400
6401	buffer = tr->trace_buffer.buffer;
6402	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6403					    irq_flags, preempt_count());
6404	if (unlikely(!event))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6405		/* Ring buffer disabled, return as if not open for write */
6406		return -EBADF;
 
6407
6408	entry = ring_buffer_event_data(event);
6409	entry->ip = _THIS_IP_;
6410
6411	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6412	if (len) {
6413		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6414		cnt = FAULTED_SIZE;
6415		written = -EFAULT;
6416	} else
6417		written = cnt;
6418	len = cnt;
6419
6420	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6421		/* do not add \n before testing triggers, but add \0 */
6422		entry->buf[cnt] = '\0';
6423		tt = event_triggers_call(tr->trace_marker_file, entry, event);
6424	}
6425
6426	if (entry->buf[cnt - 1] != '\n') {
6427		entry->buf[cnt] = '\n';
6428		entry->buf[cnt + 1] = '\0';
6429	} else
6430		entry->buf[cnt] = '\0';
6431
 
 
6432	__buffer_unlock_commit(buffer, event);
6433
6434	if (tt)
6435		event_triggers_post_call(tr->trace_marker_file, tt);
6436
6437	if (written > 0)
6438		*fpos += written;
6439
6440	return written;
6441}
6442
6443/* Limit it for now to 3K (including tag) */
6444#define RAW_DATA_MAX_SIZE (1024*3)
6445
6446static ssize_t
6447tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6448					size_t cnt, loff_t *fpos)
6449{
6450	struct trace_array *tr = filp->private_data;
6451	struct ring_buffer_event *event;
6452	struct ring_buffer *buffer;
6453	struct raw_data_entry *entry;
6454	unsigned long irq_flags;
6455	ssize_t written;
6456	int size;
6457	int len;
6458
6459#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6460
6461	if (tracing_disabled)
6462		return -EINVAL;
6463
6464	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6465		return -EINVAL;
6466
6467	/* The marker must at least have a tag id */
6468	if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6469		return -EINVAL;
6470
6471	if (cnt > TRACE_BUF_SIZE)
6472		cnt = TRACE_BUF_SIZE;
6473
6474	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6475
6476	local_save_flags(irq_flags);
6477	size = sizeof(*entry) + cnt;
6478	if (cnt < FAULT_SIZE_ID)
6479		size += FAULT_SIZE_ID - cnt;
6480
6481	buffer = tr->trace_buffer.buffer;
 
 
 
 
6482	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6483					    irq_flags, preempt_count());
6484	if (!event)
6485		/* Ring buffer disabled, return as if not open for write */
6486		return -EBADF;
6487
6488	entry = ring_buffer_event_data(event);
6489
6490	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6491	if (len) {
6492		entry->id = -1;
6493		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6494		written = -EFAULT;
6495	} else
6496		written = cnt;
6497
6498	__buffer_unlock_commit(buffer, event);
6499
6500	if (written > 0)
6501		*fpos += written;
6502
6503	return written;
6504}
6505
6506static int tracing_clock_show(struct seq_file *m, void *v)
6507{
6508	struct trace_array *tr = m->private;
6509	int i;
6510
6511	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6512		seq_printf(m,
6513			"%s%s%s%s", i ? " " : "",
6514			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6515			i == tr->clock_id ? "]" : "");
6516	seq_putc(m, '\n');
6517
6518	return 0;
6519}
6520
6521int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6522{
6523	int i;
6524
6525	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6526		if (strcmp(trace_clocks[i].name, clockstr) == 0)
6527			break;
6528	}
6529	if (i == ARRAY_SIZE(trace_clocks))
6530		return -EINVAL;
6531
6532	mutex_lock(&trace_types_lock);
6533
6534	tr->clock_id = i;
6535
6536	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6537
6538	/*
6539	 * New clock may not be consistent with the previous clock.
6540	 * Reset the buffer so that it doesn't have incomparable timestamps.
6541	 */
6542	tracing_reset_online_cpus(&tr->trace_buffer);
6543
6544#ifdef CONFIG_TRACER_MAX_TRACE
6545	if (tr->max_buffer.buffer)
6546		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6547	tracing_reset_online_cpus(&tr->max_buffer);
6548#endif
6549
6550	mutex_unlock(&trace_types_lock);
6551
6552	return 0;
6553}
6554
6555static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6556				   size_t cnt, loff_t *fpos)
6557{
6558	struct seq_file *m = filp->private_data;
6559	struct trace_array *tr = m->private;
6560	char buf[64];
6561	const char *clockstr;
6562	int ret;
6563
6564	if (cnt >= sizeof(buf))
6565		return -EINVAL;
6566
6567	if (copy_from_user(buf, ubuf, cnt))
6568		return -EFAULT;
6569
6570	buf[cnt] = 0;
6571
6572	clockstr = strstrip(buf);
6573
6574	ret = tracing_set_clock(tr, clockstr);
6575	if (ret)
6576		return ret;
6577
6578	*fpos += cnt;
6579
6580	return cnt;
6581}
6582
6583static int tracing_clock_open(struct inode *inode, struct file *file)
6584{
6585	struct trace_array *tr = inode->i_private;
6586	int ret;
6587
6588	ret = tracing_check_open_get_tr(tr);
6589	if (ret)
6590		return ret;
6591
6592	ret = single_open(file, tracing_clock_show, inode->i_private);
6593	if (ret < 0)
6594		trace_array_put(tr);
6595
6596	return ret;
6597}
6598
6599static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6600{
6601	struct trace_array *tr = m->private;
6602
6603	mutex_lock(&trace_types_lock);
6604
6605	if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6606		seq_puts(m, "delta [absolute]\n");
6607	else
6608		seq_puts(m, "[delta] absolute\n");
6609
6610	mutex_unlock(&trace_types_lock);
6611
6612	return 0;
6613}
6614
6615static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6616{
6617	struct trace_array *tr = inode->i_private;
6618	int ret;
6619
6620	ret = tracing_check_open_get_tr(tr);
6621	if (ret)
6622		return ret;
6623
6624	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6625	if (ret < 0)
6626		trace_array_put(tr);
6627
6628	return ret;
6629}
6630
6631int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
 
 
 
 
 
 
 
 
 
 
 
6632{
6633	int ret = 0;
6634
6635	mutex_lock(&trace_types_lock);
6636
6637	if (abs && tr->time_stamp_abs_ref++)
6638		goto out;
6639
6640	if (!abs) {
6641		if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6642			ret = -EINVAL;
6643			goto out;
6644		}
6645
6646		if (--tr->time_stamp_abs_ref)
6647			goto out;
6648	}
6649
6650	ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6651
6652#ifdef CONFIG_TRACER_MAX_TRACE
6653	if (tr->max_buffer.buffer)
6654		ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6655#endif
6656 out:
6657	mutex_unlock(&trace_types_lock);
6658
6659	return ret;
6660}
6661
6662struct ftrace_buffer_info {
6663	struct trace_iterator	iter;
6664	void			*spare;
6665	unsigned int		spare_cpu;
 
6666	unsigned int		read;
6667};
6668
6669#ifdef CONFIG_TRACER_SNAPSHOT
6670static int tracing_snapshot_open(struct inode *inode, struct file *file)
6671{
6672	struct trace_array *tr = inode->i_private;
6673	struct trace_iterator *iter;
6674	struct seq_file *m;
6675	int ret;
6676
6677	ret = tracing_check_open_get_tr(tr);
6678	if (ret)
6679		return ret;
6680
6681	if (file->f_mode & FMODE_READ) {
6682		iter = __tracing_open(inode, file, true);
6683		if (IS_ERR(iter))
6684			ret = PTR_ERR(iter);
6685	} else {
6686		/* Writes still need the seq_file to hold the private data */
6687		ret = -ENOMEM;
6688		m = kzalloc(sizeof(*m), GFP_KERNEL);
6689		if (!m)
6690			goto out;
6691		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6692		if (!iter) {
6693			kfree(m);
6694			goto out;
6695		}
6696		ret = 0;
6697
6698		iter->tr = tr;
6699		iter->trace_buffer = &tr->max_buffer;
6700		iter->cpu_file = tracing_get_cpu(inode);
6701		m->private = iter;
6702		file->private_data = m;
6703	}
6704out:
6705	if (ret < 0)
6706		trace_array_put(tr);
6707
6708	return ret;
6709}
6710
 
 
 
 
 
6711static ssize_t
6712tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6713		       loff_t *ppos)
6714{
6715	struct seq_file *m = filp->private_data;
6716	struct trace_iterator *iter = m->private;
6717	struct trace_array *tr = iter->tr;
6718	unsigned long val;
6719	int ret;
6720
6721	ret = tracing_update_buffers();
6722	if (ret < 0)
6723		return ret;
6724
6725	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6726	if (ret)
6727		return ret;
6728
6729	mutex_lock(&trace_types_lock);
6730
6731	if (tr->current_trace->use_max_tr) {
6732		ret = -EBUSY;
6733		goto out;
6734	}
6735
 
6736	arch_spin_lock(&tr->max_lock);
6737	if (tr->cond_snapshot)
6738		ret = -EBUSY;
6739	arch_spin_unlock(&tr->max_lock);
 
6740	if (ret)
6741		goto out;
6742
6743	switch (val) {
6744	case 0:
6745		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6746			ret = -EINVAL;
6747			break;
6748		}
6749		if (tr->allocated_snapshot)
6750			free_snapshot(tr);
6751		break;
6752	case 1:
6753/* Only allow per-cpu swap if the ring buffer supports it */
6754#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6755		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6756			ret = -EINVAL;
6757			break;
6758		}
6759#endif
6760		if (tr->allocated_snapshot)
6761			ret = resize_buffer_duplicate_size(&tr->max_buffer,
6762					&tr->trace_buffer, iter->cpu_file);
6763		else
6764			ret = tracing_alloc_snapshot_instance(tr);
6765		if (ret < 0)
6766			break;
6767		local_irq_disable();
6768		/* Now, we're going to swap */
6769		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 
6770			update_max_tr(tr, current, smp_processor_id(), NULL);
6771		else
6772			update_max_tr_single(tr, current, iter->cpu_file);
6773		local_irq_enable();
 
 
6774		break;
6775	default:
6776		if (tr->allocated_snapshot) {
6777			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6778				tracing_reset_online_cpus(&tr->max_buffer);
6779			else
6780				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
6781		}
6782		break;
6783	}
6784
6785	if (ret >= 0) {
6786		*ppos += cnt;
6787		ret = cnt;
6788	}
6789out:
6790	mutex_unlock(&trace_types_lock);
6791	return ret;
6792}
6793
6794static int tracing_snapshot_release(struct inode *inode, struct file *file)
6795{
6796	struct seq_file *m = file->private_data;
6797	int ret;
6798
6799	ret = tracing_release(inode, file);
6800
6801	if (file->f_mode & FMODE_READ)
6802		return ret;
6803
6804	/* If write only, the seq_file is just a stub */
6805	if (m)
6806		kfree(m->private);
6807	kfree(m);
6808
6809	return 0;
6810}
6811
6812static int tracing_buffers_open(struct inode *inode, struct file *filp);
6813static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6814				    size_t count, loff_t *ppos);
6815static int tracing_buffers_release(struct inode *inode, struct file *file);
6816static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6817		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6818
6819static int snapshot_raw_open(struct inode *inode, struct file *filp)
6820{
6821	struct ftrace_buffer_info *info;
6822	int ret;
6823
6824	/* The following checks for tracefs lockdown */
6825	ret = tracing_buffers_open(inode, filp);
6826	if (ret < 0)
6827		return ret;
6828
6829	info = filp->private_data;
6830
6831	if (info->iter.trace->use_max_tr) {
6832		tracing_buffers_release(inode, filp);
6833		return -EBUSY;
6834	}
6835
6836	info->iter.snapshot = true;
6837	info->iter.trace_buffer = &info->iter.tr->max_buffer;
6838
6839	return ret;
6840}
6841
6842#endif /* CONFIG_TRACER_SNAPSHOT */
6843
6844
6845static const struct file_operations tracing_thresh_fops = {
6846	.open		= tracing_open_generic,
6847	.read		= tracing_thresh_read,
6848	.write		= tracing_thresh_write,
6849	.llseek		= generic_file_llseek,
6850};
6851
6852#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6853static const struct file_operations tracing_max_lat_fops = {
6854	.open		= tracing_open_generic,
6855	.read		= tracing_max_lat_read,
6856	.write		= tracing_max_lat_write,
6857	.llseek		= generic_file_llseek,
 
6858};
6859#endif
6860
6861static const struct file_operations set_tracer_fops = {
6862	.open		= tracing_open_generic,
6863	.read		= tracing_set_trace_read,
6864	.write		= tracing_set_trace_write,
6865	.llseek		= generic_file_llseek,
 
6866};
6867
6868static const struct file_operations tracing_pipe_fops = {
6869	.open		= tracing_open_pipe,
6870	.poll		= tracing_poll_pipe,
6871	.read		= tracing_read_pipe,
6872	.splice_read	= tracing_splice_read_pipe,
6873	.release	= tracing_release_pipe,
6874	.llseek		= no_llseek,
6875};
6876
6877static const struct file_operations tracing_entries_fops = {
6878	.open		= tracing_open_generic_tr,
6879	.read		= tracing_entries_read,
6880	.write		= tracing_entries_write,
6881	.llseek		= generic_file_llseek,
6882	.release	= tracing_release_generic_tr,
6883};
6884
6885static const struct file_operations tracing_total_entries_fops = {
6886	.open		= tracing_open_generic_tr,
6887	.read		= tracing_total_entries_read,
6888	.llseek		= generic_file_llseek,
6889	.release	= tracing_release_generic_tr,
6890};
6891
6892static const struct file_operations tracing_free_buffer_fops = {
6893	.open		= tracing_open_generic_tr,
6894	.write		= tracing_free_buffer_write,
6895	.release	= tracing_free_buffer_release,
6896};
6897
6898static const struct file_operations tracing_mark_fops = {
6899	.open		= tracing_open_generic_tr,
6900	.write		= tracing_mark_write,
6901	.llseek		= generic_file_llseek,
6902	.release	= tracing_release_generic_tr,
6903};
6904
6905static const struct file_operations tracing_mark_raw_fops = {
6906	.open		= tracing_open_generic_tr,
6907	.write		= tracing_mark_raw_write,
6908	.llseek		= generic_file_llseek,
6909	.release	= tracing_release_generic_tr,
6910};
6911
6912static const struct file_operations trace_clock_fops = {
6913	.open		= tracing_clock_open,
6914	.read		= seq_read,
6915	.llseek		= seq_lseek,
6916	.release	= tracing_single_release_tr,
6917	.write		= tracing_clock_write,
6918};
6919
6920static const struct file_operations trace_time_stamp_mode_fops = {
6921	.open		= tracing_time_stamp_mode_open,
6922	.read		= seq_read,
6923	.llseek		= seq_lseek,
6924	.release	= tracing_single_release_tr,
6925};
6926
6927#ifdef CONFIG_TRACER_SNAPSHOT
6928static const struct file_operations snapshot_fops = {
6929	.open		= tracing_snapshot_open,
6930	.read		= seq_read,
6931	.write		= tracing_snapshot_write,
6932	.llseek		= tracing_lseek,
6933	.release	= tracing_snapshot_release,
6934};
6935
6936static const struct file_operations snapshot_raw_fops = {
6937	.open		= snapshot_raw_open,
6938	.read		= tracing_buffers_read,
6939	.release	= tracing_buffers_release,
6940	.splice_read	= tracing_buffers_splice_read,
6941	.llseek		= no_llseek,
6942};
6943
6944#endif /* CONFIG_TRACER_SNAPSHOT */
6945
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6946#define TRACING_LOG_ERRS_MAX	8
6947#define TRACING_LOG_LOC_MAX	128
6948
6949#define CMD_PREFIX "  Command: "
6950
6951struct err_info {
6952	const char	**errs;	/* ptr to loc-specific array of err strings */
6953	u8		type;	/* index into errs -> specific err string */
6954	u8		pos;	/* MAX_FILTER_STR_VAL = 256 */
6955	u64		ts;
6956};
6957
6958struct tracing_log_err {
6959	struct list_head	list;
6960	struct err_info		info;
6961	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
6962	char			cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6963};
6964
6965static DEFINE_MUTEX(tracing_err_log_lock);
6966
6967static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6968{
6969	struct tracing_log_err *err;
 
6970
6971	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
6972		err = kzalloc(sizeof(*err), GFP_KERNEL);
6973		if (!err)
6974			err = ERR_PTR(-ENOMEM);
6975		tr->n_err_log_entries++;
6976
6977		return err;
6978	}
6979
 
 
6980	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
 
 
6981	list_del(&err->list);
6982
6983	return err;
6984}
6985
6986/**
6987 * err_pos - find the position of a string within a command for error careting
6988 * @cmd: The tracing command that caused the error
6989 * @str: The string to position the caret at within @cmd
6990 *
6991 * Finds the position of the first occurence of @str within @cmd.  The
6992 * return value can be passed to tracing_log_err() for caret placement
6993 * within @cmd.
6994 *
6995 * Returns the index within @cmd of the first occurence of @str or 0
6996 * if @str was not found.
6997 */
6998unsigned int err_pos(char *cmd, const char *str)
6999{
7000	char *found;
7001
7002	if (WARN_ON(!strlen(cmd)))
7003		return 0;
7004
7005	found = strstr(cmd, str);
7006	if (found)
7007		return found - cmd;
7008
7009	return 0;
7010}
7011
7012/**
7013 * tracing_log_err - write an error to the tracing error log
7014 * @tr: The associated trace array for the error (NULL for top level array)
7015 * @loc: A string describing where the error occurred
7016 * @cmd: The tracing command that caused the error
7017 * @errs: The array of loc-specific static error strings
7018 * @type: The index into errs[], which produces the specific static err string
7019 * @pos: The position the caret should be placed in the cmd
7020 *
7021 * Writes an error into tracing/error_log of the form:
7022 *
7023 * <loc>: error: <text>
7024 *   Command: <cmd>
7025 *              ^
7026 *
7027 * tracing/error_log is a small log file containing the last
7028 * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
7029 * unless there has been a tracing error, and the error log can be
7030 * cleared and have its memory freed by writing the empty string in
7031 * truncation mode to it i.e. echo > tracing/error_log.
7032 *
7033 * NOTE: the @errs array along with the @type param are used to
7034 * produce a static error string - this string is not copied and saved
7035 * when the error is logged - only a pointer to it is saved.  See
7036 * existing callers for examples of how static strings are typically
7037 * defined for use with tracing_log_err().
7038 */
7039void tracing_log_err(struct trace_array *tr,
7040		     const char *loc, const char *cmd,
7041		     const char **errs, u8 type, u8 pos)
7042{
7043	struct tracing_log_err *err;
 
7044
7045	if (!tr)
7046		tr = &global_trace;
7047
 
 
7048	mutex_lock(&tracing_err_log_lock);
7049	err = get_tracing_log_err(tr);
7050	if (PTR_ERR(err) == -ENOMEM) {
7051		mutex_unlock(&tracing_err_log_lock);
7052		return;
7053	}
7054
7055	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7056	snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7057
7058	err->info.errs = errs;
7059	err->info.type = type;
7060	err->info.pos = pos;
7061	err->info.ts = local_clock();
7062
7063	list_add_tail(&err->list, &tr->err_log);
7064	mutex_unlock(&tracing_err_log_lock);
7065}
7066
7067static void clear_tracing_err_log(struct trace_array *tr)
7068{
7069	struct tracing_log_err *err, *next;
7070
7071	mutex_lock(&tracing_err_log_lock);
7072	list_for_each_entry_safe(err, next, &tr->err_log, list) {
7073		list_del(&err->list);
7074		kfree(err);
7075	}
7076
7077	tr->n_err_log_entries = 0;
7078	mutex_unlock(&tracing_err_log_lock);
7079}
7080
7081static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7082{
7083	struct trace_array *tr = m->private;
7084
7085	mutex_lock(&tracing_err_log_lock);
7086
7087	return seq_list_start(&tr->err_log, *pos);
7088}
7089
7090static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7091{
7092	struct trace_array *tr = m->private;
7093
7094	return seq_list_next(v, &tr->err_log, pos);
7095}
7096
7097static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7098{
7099	mutex_unlock(&tracing_err_log_lock);
7100}
7101
7102static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7103{
7104	u8 i;
7105
7106	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7107		seq_putc(m, ' ');
7108	for (i = 0; i < pos; i++)
7109		seq_putc(m, ' ');
7110	seq_puts(m, "^\n");
7111}
7112
7113static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7114{
7115	struct tracing_log_err *err = v;
7116
7117	if (err) {
7118		const char *err_text = err->info.errs[err->info.type];
7119		u64 sec = err->info.ts;
7120		u32 nsec;
7121
7122		nsec = do_div(sec, NSEC_PER_SEC);
7123		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7124			   err->loc, err_text);
7125		seq_printf(m, "%s", err->cmd);
7126		tracing_err_log_show_pos(m, err->info.pos);
7127	}
7128
7129	return 0;
7130}
7131
7132static const struct seq_operations tracing_err_log_seq_ops = {
7133	.start  = tracing_err_log_seq_start,
7134	.next   = tracing_err_log_seq_next,
7135	.stop   = tracing_err_log_seq_stop,
7136	.show   = tracing_err_log_seq_show
7137};
7138
7139static int tracing_err_log_open(struct inode *inode, struct file *file)
7140{
7141	struct trace_array *tr = inode->i_private;
7142	int ret = 0;
7143
7144	ret = tracing_check_open_get_tr(tr);
7145	if (ret)
7146		return ret;
7147
7148	/* If this file was opened for write, then erase contents */
7149	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7150		clear_tracing_err_log(tr);
7151
7152	if (file->f_mode & FMODE_READ) {
7153		ret = seq_open(file, &tracing_err_log_seq_ops);
7154		if (!ret) {
7155			struct seq_file *m = file->private_data;
7156			m->private = tr;
7157		} else {
7158			trace_array_put(tr);
7159		}
7160	}
7161	return ret;
7162}
7163
7164static ssize_t tracing_err_log_write(struct file *file,
7165				     const char __user *buffer,
7166				     size_t count, loff_t *ppos)
7167{
7168	return count;
7169}
7170
7171static int tracing_err_log_release(struct inode *inode, struct file *file)
7172{
7173	struct trace_array *tr = inode->i_private;
7174
7175	trace_array_put(tr);
7176
7177	if (file->f_mode & FMODE_READ)
7178		seq_release(inode, file);
7179
7180	return 0;
7181}
7182
7183static const struct file_operations tracing_err_log_fops = {
7184	.open           = tracing_err_log_open,
7185	.write		= tracing_err_log_write,
7186	.read           = seq_read,
7187	.llseek         = seq_lseek,
7188	.release        = tracing_err_log_release,
7189};
7190
7191static int tracing_buffers_open(struct inode *inode, struct file *filp)
7192{
7193	struct trace_array *tr = inode->i_private;
7194	struct ftrace_buffer_info *info;
7195	int ret;
7196
7197	ret = tracing_check_open_get_tr(tr);
7198	if (ret)
7199		return ret;
7200
7201	info = kzalloc(sizeof(*info), GFP_KERNEL);
7202	if (!info) {
7203		trace_array_put(tr);
7204		return -ENOMEM;
7205	}
7206
7207	mutex_lock(&trace_types_lock);
7208
7209	info->iter.tr		= tr;
7210	info->iter.cpu_file	= tracing_get_cpu(inode);
7211	info->iter.trace	= tr->current_trace;
7212	info->iter.trace_buffer = &tr->trace_buffer;
7213	info->spare		= NULL;
7214	/* Force reading ring buffer for first read */
7215	info->read		= (unsigned int)-1;
7216
7217	filp->private_data = info;
7218
7219	tr->current_trace->ref++;
7220
7221	mutex_unlock(&trace_types_lock);
7222
7223	ret = nonseekable_open(inode, filp);
7224	if (ret < 0)
7225		trace_array_put(tr);
7226
7227	return ret;
7228}
7229
7230static __poll_t
7231tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7232{
7233	struct ftrace_buffer_info *info = filp->private_data;
7234	struct trace_iterator *iter = &info->iter;
7235
7236	return trace_poll(iter, filp, poll_table);
7237}
7238
7239static ssize_t
7240tracing_buffers_read(struct file *filp, char __user *ubuf,
7241		     size_t count, loff_t *ppos)
7242{
7243	struct ftrace_buffer_info *info = filp->private_data;
7244	struct trace_iterator *iter = &info->iter;
 
 
7245	ssize_t ret = 0;
7246	ssize_t size;
7247
7248	if (!count)
7249		return 0;
7250
7251#ifdef CONFIG_TRACER_MAX_TRACE
7252	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7253		return -EBUSY;
7254#endif
7255
 
 
 
 
 
 
 
 
 
 
 
7256	if (!info->spare) {
7257		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7258							  iter->cpu_file);
7259		if (IS_ERR(info->spare)) {
7260			ret = PTR_ERR(info->spare);
7261			info->spare = NULL;
7262		} else {
7263			info->spare_cpu = iter->cpu_file;
 
7264		}
7265	}
7266	if (!info->spare)
7267		return ret;
7268
7269	/* Do we have previous read data to read? */
7270	if (info->read < PAGE_SIZE)
7271		goto read;
7272
7273 again:
7274	trace_access_lock(iter->cpu_file);
7275	ret = ring_buffer_read_page(iter->trace_buffer->buffer,
7276				    &info->spare,
7277				    count,
7278				    iter->cpu_file, 0);
7279	trace_access_unlock(iter->cpu_file);
7280
7281	if (ret < 0) {
7282		if (trace_empty(iter)) {
7283			if ((filp->f_flags & O_NONBLOCK))
7284				return -EAGAIN;
7285
7286			ret = wait_on_pipe(iter, 0);
7287			if (ret)
7288				return ret;
7289
7290			goto again;
7291		}
7292		return 0;
7293	}
7294
7295	info->read = 0;
7296 read:
7297	size = PAGE_SIZE - info->read;
7298	if (size > count)
7299		size = count;
7300
7301	ret = copy_to_user(ubuf, info->spare + info->read, size);
7302	if (ret == size)
7303		return -EFAULT;
7304
7305	size -= ret;
7306
7307	*ppos += size;
7308	info->read += size;
7309
7310	return size;
7311}
7312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7313static int tracing_buffers_release(struct inode *inode, struct file *file)
7314{
7315	struct ftrace_buffer_info *info = file->private_data;
7316	struct trace_iterator *iter = &info->iter;
7317
7318	mutex_lock(&trace_types_lock);
7319
7320	iter->tr->current_trace->ref--;
7321
7322	__trace_array_put(iter->tr);
7323
7324	if (info->spare)
7325		ring_buffer_free_read_page(iter->trace_buffer->buffer,
7326					   info->spare_cpu, info->spare);
7327	kfree(info);
7328
7329	mutex_unlock(&trace_types_lock);
7330
7331	return 0;
7332}
7333
7334struct buffer_ref {
7335	struct ring_buffer	*buffer;
7336	void			*page;
7337	int			cpu;
7338	refcount_t		refcount;
7339};
7340
7341static void buffer_ref_release(struct buffer_ref *ref)
7342{
7343	if (!refcount_dec_and_test(&ref->refcount))
7344		return;
7345	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7346	kfree(ref);
7347}
7348
7349static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7350				    struct pipe_buffer *buf)
7351{
7352	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7353
7354	buffer_ref_release(ref);
7355	buf->private = 0;
7356}
7357
7358static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7359				struct pipe_buffer *buf)
7360{
7361	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7362
7363	if (refcount_read(&ref->refcount) > INT_MAX/2)
7364		return false;
7365
7366	refcount_inc(&ref->refcount);
7367	return true;
7368}
7369
7370/* Pipe buffer operations for a buffer. */
7371static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7372	.confirm		= generic_pipe_buf_confirm,
7373	.release		= buffer_pipe_buf_release,
7374	.steal			= generic_pipe_buf_nosteal,
7375	.get			= buffer_pipe_buf_get,
7376};
7377
7378/*
7379 * Callback from splice_to_pipe(), if we need to release some pages
7380 * at the end of the spd in case we error'ed out in filling the pipe.
7381 */
7382static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7383{
7384	struct buffer_ref *ref =
7385		(struct buffer_ref *)spd->partial[i].private;
7386
7387	buffer_ref_release(ref);
7388	spd->partial[i].private = 0;
7389}
7390
7391static ssize_t
7392tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7393			    struct pipe_inode_info *pipe, size_t len,
7394			    unsigned int flags)
7395{
7396	struct ftrace_buffer_info *info = file->private_data;
7397	struct trace_iterator *iter = &info->iter;
7398	struct partial_page partial_def[PIPE_DEF_BUFFERS];
7399	struct page *pages_def[PIPE_DEF_BUFFERS];
7400	struct splice_pipe_desc spd = {
7401		.pages		= pages_def,
7402		.partial	= partial_def,
7403		.nr_pages_max	= PIPE_DEF_BUFFERS,
7404		.ops		= &buffer_pipe_buf_ops,
7405		.spd_release	= buffer_spd_release,
7406	};
7407	struct buffer_ref *ref;
 
7408	int entries, i;
7409	ssize_t ret = 0;
7410
7411#ifdef CONFIG_TRACER_MAX_TRACE
7412	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7413		return -EBUSY;
7414#endif
7415
7416	if (*ppos & (PAGE_SIZE - 1))
 
7417		return -EINVAL;
7418
7419	if (len & (PAGE_SIZE - 1)) {
7420		if (len < PAGE_SIZE)
7421			return -EINVAL;
7422		len &= PAGE_MASK;
7423	}
7424
7425	if (splice_grow_spd(pipe, &spd))
7426		return -ENOMEM;
7427
7428 again:
7429	trace_access_lock(iter->cpu_file);
7430	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7431
7432	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7433		struct page *page;
7434		int r;
7435
7436		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7437		if (!ref) {
7438			ret = -ENOMEM;
7439			break;
7440		}
7441
7442		refcount_set(&ref->refcount, 1);
7443		ref->buffer = iter->trace_buffer->buffer;
7444		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7445		if (IS_ERR(ref->page)) {
7446			ret = PTR_ERR(ref->page);
7447			ref->page = NULL;
7448			kfree(ref);
7449			break;
7450		}
7451		ref->cpu = iter->cpu_file;
7452
7453		r = ring_buffer_read_page(ref->buffer, &ref->page,
7454					  len, iter->cpu_file, 1);
7455		if (r < 0) {
7456			ring_buffer_free_read_page(ref->buffer, ref->cpu,
7457						   ref->page);
7458			kfree(ref);
7459			break;
7460		}
7461
7462		page = virt_to_page(ref->page);
7463
7464		spd.pages[i] = page;
7465		spd.partial[i].len = PAGE_SIZE;
7466		spd.partial[i].offset = 0;
7467		spd.partial[i].private = (unsigned long)ref;
7468		spd.nr_pages++;
7469		*ppos += PAGE_SIZE;
7470
7471		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7472	}
7473
7474	trace_access_unlock(iter->cpu_file);
7475	spd.nr_pages = i;
7476
7477	/* did we read anything? */
7478	if (!spd.nr_pages) {
 
 
7479		if (ret)
7480			goto out;
7481
7482		ret = -EAGAIN;
7483		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7484			goto out;
7485
7486		ret = wait_on_pipe(iter, iter->tr->buffer_percent);
 
 
7487		if (ret)
7488			goto out;
7489
 
 
 
 
 
 
 
 
 
7490		goto again;
7491	}
7492
7493	ret = splice_to_pipe(pipe, &spd);
7494out:
7495	splice_shrink_spd(&spd);
7496
7497	return ret;
7498}
7499
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7500static const struct file_operations tracing_buffers_fops = {
7501	.open		= tracing_buffers_open,
7502	.read		= tracing_buffers_read,
7503	.poll		= tracing_buffers_poll,
7504	.release	= tracing_buffers_release,
 
7505	.splice_read	= tracing_buffers_splice_read,
 
7506	.llseek		= no_llseek,
7507};
7508
7509static ssize_t
7510tracing_stats_read(struct file *filp, char __user *ubuf,
7511		   size_t count, loff_t *ppos)
7512{
7513	struct inode *inode = file_inode(filp);
7514	struct trace_array *tr = inode->i_private;
7515	struct trace_buffer *trace_buf = &tr->trace_buffer;
7516	int cpu = tracing_get_cpu(inode);
7517	struct trace_seq *s;
7518	unsigned long cnt;
7519	unsigned long long t;
7520	unsigned long usec_rem;
7521
7522	s = kmalloc(sizeof(*s), GFP_KERNEL);
7523	if (!s)
7524		return -ENOMEM;
7525
7526	trace_seq_init(s);
7527
7528	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7529	trace_seq_printf(s, "entries: %ld\n", cnt);
7530
7531	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7532	trace_seq_printf(s, "overrun: %ld\n", cnt);
7533
7534	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7535	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7536
7537	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7538	trace_seq_printf(s, "bytes: %ld\n", cnt);
7539
7540	if (trace_clocks[tr->clock_id].in_ns) {
7541		/* local or global for trace_clock */
7542		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7543		usec_rem = do_div(t, USEC_PER_SEC);
7544		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7545								t, usec_rem);
7546
7547		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7548		usec_rem = do_div(t, USEC_PER_SEC);
7549		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7550	} else {
7551		/* counter or tsc mode for trace_clock */
7552		trace_seq_printf(s, "oldest event ts: %llu\n",
7553				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7554
7555		trace_seq_printf(s, "now ts: %llu\n",
7556				ring_buffer_time_stamp(trace_buf->buffer, cpu));
7557	}
7558
7559	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7560	trace_seq_printf(s, "dropped events: %ld\n", cnt);
7561
7562	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7563	trace_seq_printf(s, "read events: %ld\n", cnt);
7564
7565	count = simple_read_from_buffer(ubuf, count, ppos,
7566					s->buffer, trace_seq_used(s));
7567
7568	kfree(s);
7569
7570	return count;
7571}
7572
7573static const struct file_operations tracing_stats_fops = {
7574	.open		= tracing_open_generic_tr,
7575	.read		= tracing_stats_read,
7576	.llseek		= generic_file_llseek,
7577	.release	= tracing_release_generic_tr,
7578};
7579
7580#ifdef CONFIG_DYNAMIC_FTRACE
7581
7582static ssize_t
7583tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7584		  size_t cnt, loff_t *ppos)
7585{
7586	unsigned long *p = filp->private_data;
7587	char buf[64]; /* Not too big for a shallow stack */
7588	int r;
7589
7590	r = scnprintf(buf, 63, "%ld", *p);
7591	buf[r++] = '\n';
 
 
 
 
 
 
 
7592
7593	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 
 
7594}
7595
7596static const struct file_operations tracing_dyn_info_fops = {
7597	.open		= tracing_open_generic,
7598	.read		= tracing_read_dyn_info,
7599	.llseek		= generic_file_llseek,
7600};
7601#endif /* CONFIG_DYNAMIC_FTRACE */
7602
7603#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7604static void
7605ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7606		struct trace_array *tr, struct ftrace_probe_ops *ops,
7607		void *data)
7608{
7609	tracing_snapshot_instance(tr);
7610}
7611
7612static void
7613ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7614		      struct trace_array *tr, struct ftrace_probe_ops *ops,
7615		      void *data)
7616{
7617	struct ftrace_func_mapper *mapper = data;
7618	long *count = NULL;
7619
7620	if (mapper)
7621		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7622
7623	if (count) {
7624
7625		if (*count <= 0)
7626			return;
7627
7628		(*count)--;
7629	}
7630
7631	tracing_snapshot_instance(tr);
7632}
7633
7634static int
7635ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7636		      struct ftrace_probe_ops *ops, void *data)
7637{
7638	struct ftrace_func_mapper *mapper = data;
7639	long *count = NULL;
7640
7641	seq_printf(m, "%ps:", (void *)ip);
7642
7643	seq_puts(m, "snapshot");
7644
7645	if (mapper)
7646		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7647
7648	if (count)
7649		seq_printf(m, ":count=%ld\n", *count);
7650	else
7651		seq_puts(m, ":unlimited\n");
7652
7653	return 0;
7654}
7655
7656static int
7657ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7658		     unsigned long ip, void *init_data, void **data)
7659{
7660	struct ftrace_func_mapper *mapper = *data;
7661
7662	if (!mapper) {
7663		mapper = allocate_ftrace_func_mapper();
7664		if (!mapper)
7665			return -ENOMEM;
7666		*data = mapper;
7667	}
7668
7669	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7670}
7671
7672static void
7673ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7674		     unsigned long ip, void *data)
7675{
7676	struct ftrace_func_mapper *mapper = data;
7677
7678	if (!ip) {
7679		if (!mapper)
7680			return;
7681		free_ftrace_func_mapper(mapper, NULL);
7682		return;
7683	}
7684
7685	ftrace_func_mapper_remove_ip(mapper, ip);
7686}
7687
7688static struct ftrace_probe_ops snapshot_probe_ops = {
7689	.func			= ftrace_snapshot,
7690	.print			= ftrace_snapshot_print,
7691};
7692
7693static struct ftrace_probe_ops snapshot_count_probe_ops = {
7694	.func			= ftrace_count_snapshot,
7695	.print			= ftrace_snapshot_print,
7696	.init			= ftrace_snapshot_init,
7697	.free			= ftrace_snapshot_free,
7698};
7699
7700static int
7701ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7702			       char *glob, char *cmd, char *param, int enable)
7703{
7704	struct ftrace_probe_ops *ops;
7705	void *count = (void *)-1;
7706	char *number;
7707	int ret;
7708
7709	if (!tr)
7710		return -ENODEV;
7711
7712	/* hash funcs only work with set_ftrace_filter */
7713	if (!enable)
7714		return -EINVAL;
7715
7716	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
7717
7718	if (glob[0] == '!')
7719		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7720
7721	if (!param)
7722		goto out_reg;
7723
7724	number = strsep(&param, ":");
7725
7726	if (!strlen(number))
7727		goto out_reg;
7728
7729	/*
7730	 * We use the callback data field (which is a pointer)
7731	 * as our counter.
7732	 */
7733	ret = kstrtoul(number, 0, (unsigned long *)&count);
7734	if (ret)
7735		return ret;
7736
7737 out_reg:
7738	ret = tracing_alloc_snapshot_instance(tr);
7739	if (ret < 0)
7740		goto out;
7741
7742	ret = register_ftrace_function_probe(glob, tr, ops, count);
7743
7744 out:
7745	return ret < 0 ? ret : 0;
7746}
7747
7748static struct ftrace_func_command ftrace_snapshot_cmd = {
7749	.name			= "snapshot",
7750	.func			= ftrace_trace_snapshot_callback,
7751};
7752
7753static __init int register_snapshot_cmd(void)
7754{
7755	return register_ftrace_command(&ftrace_snapshot_cmd);
7756}
7757#else
7758static inline __init int register_snapshot_cmd(void) { return 0; }
7759#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7760
7761static struct dentry *tracing_get_dentry(struct trace_array *tr)
7762{
7763	if (WARN_ON(!tr->dir))
7764		return ERR_PTR(-ENODEV);
7765
7766	/* Top directory uses NULL as the parent */
7767	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7768		return NULL;
7769
7770	/* All sub buffers have a descriptor */
7771	return tr->dir;
7772}
7773
7774static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7775{
7776	struct dentry *d_tracer;
7777
7778	if (tr->percpu_dir)
7779		return tr->percpu_dir;
7780
7781	d_tracer = tracing_get_dentry(tr);
7782	if (IS_ERR(d_tracer))
7783		return NULL;
7784
7785	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7786
7787	WARN_ONCE(!tr->percpu_dir,
7788		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7789
7790	return tr->percpu_dir;
7791}
7792
7793static struct dentry *
7794trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7795		      void *data, long cpu, const struct file_operations *fops)
7796{
7797	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7798
7799	if (ret) /* See tracing_get_cpu() */
7800		d_inode(ret)->i_cdev = (void *)(cpu + 1);
7801	return ret;
7802}
7803
7804static void
7805tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7806{
7807	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7808	struct dentry *d_cpu;
7809	char cpu_dir[30]; /* 30 characters should be more than enough */
7810
7811	if (!d_percpu)
7812		return;
7813
7814	snprintf(cpu_dir, 30, "cpu%ld", cpu);
7815	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7816	if (!d_cpu) {
7817		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7818		return;
7819	}
7820
7821	/* per cpu trace_pipe */
7822	trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7823				tr, cpu, &tracing_pipe_fops);
7824
7825	/* per cpu trace */
7826	trace_create_cpu_file("trace", 0644, d_cpu,
7827				tr, cpu, &tracing_fops);
7828
7829	trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7830				tr, cpu, &tracing_buffers_fops);
7831
7832	trace_create_cpu_file("stats", 0444, d_cpu,
7833				tr, cpu, &tracing_stats_fops);
7834
7835	trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7836				tr, cpu, &tracing_entries_fops);
7837
7838#ifdef CONFIG_TRACER_SNAPSHOT
7839	trace_create_cpu_file("snapshot", 0644, d_cpu,
7840				tr, cpu, &snapshot_fops);
7841
7842	trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7843				tr, cpu, &snapshot_raw_fops);
7844#endif
7845}
7846
7847#ifdef CONFIG_FTRACE_SELFTEST
7848/* Let selftest have access to static functions in this file */
7849#include "trace_selftest.c"
7850#endif
7851
7852static ssize_t
7853trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7854			loff_t *ppos)
7855{
7856	struct trace_option_dentry *topt = filp->private_data;
7857	char *buf;
7858
7859	if (topt->flags->val & topt->opt->bit)
7860		buf = "1\n";
7861	else
7862		buf = "0\n";
7863
7864	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7865}
7866
7867static ssize_t
7868trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7869			 loff_t *ppos)
7870{
7871	struct trace_option_dentry *topt = filp->private_data;
7872	unsigned long val;
7873	int ret;
7874
7875	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7876	if (ret)
7877		return ret;
7878
7879	if (val != 0 && val != 1)
7880		return -EINVAL;
7881
7882	if (!!(topt->flags->val & topt->opt->bit) != val) {
7883		mutex_lock(&trace_types_lock);
7884		ret = __set_tracer_option(topt->tr, topt->flags,
7885					  topt->opt, !val);
7886		mutex_unlock(&trace_types_lock);
7887		if (ret)
7888			return ret;
7889	}
7890
7891	*ppos += cnt;
7892
7893	return cnt;
7894}
7895
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7896
7897static const struct file_operations trace_options_fops = {
7898	.open = tracing_open_generic,
7899	.read = trace_options_read,
7900	.write = trace_options_write,
7901	.llseek	= generic_file_llseek,
 
7902};
7903
7904/*
7905 * In order to pass in both the trace_array descriptor as well as the index
7906 * to the flag that the trace option file represents, the trace_array
7907 * has a character array of trace_flags_index[], which holds the index
7908 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7909 * The address of this character array is passed to the flag option file
7910 * read/write callbacks.
7911 *
7912 * In order to extract both the index and the trace_array descriptor,
7913 * get_tr_index() uses the following algorithm.
7914 *
7915 *   idx = *ptr;
7916 *
7917 * As the pointer itself contains the address of the index (remember
7918 * index[1] == 1).
7919 *
7920 * Then to get the trace_array descriptor, by subtracting that index
7921 * from the ptr, we get to the start of the index itself.
7922 *
7923 *   ptr - idx == &index[0]
7924 *
7925 * Then a simple container_of() from that pointer gets us to the
7926 * trace_array descriptor.
7927 */
7928static void get_tr_index(void *data, struct trace_array **ptr,
7929			 unsigned int *pindex)
7930{
7931	*pindex = *(unsigned char *)data;
7932
7933	*ptr = container_of(data - *pindex, struct trace_array,
7934			    trace_flags_index);
7935}
7936
7937static ssize_t
7938trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7939			loff_t *ppos)
7940{
7941	void *tr_index = filp->private_data;
7942	struct trace_array *tr;
7943	unsigned int index;
7944	char *buf;
7945
7946	get_tr_index(tr_index, &tr, &index);
7947
7948	if (tr->trace_flags & (1 << index))
7949		buf = "1\n";
7950	else
7951		buf = "0\n";
7952
7953	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7954}
7955
7956static ssize_t
7957trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7958			 loff_t *ppos)
7959{
7960	void *tr_index = filp->private_data;
7961	struct trace_array *tr;
7962	unsigned int index;
7963	unsigned long val;
7964	int ret;
7965
7966	get_tr_index(tr_index, &tr, &index);
7967
7968	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7969	if (ret)
7970		return ret;
7971
7972	if (val != 0 && val != 1)
7973		return -EINVAL;
7974
 
7975	mutex_lock(&trace_types_lock);
7976	ret = set_tracer_flag(tr, 1 << index, val);
7977	mutex_unlock(&trace_types_lock);
 
7978
7979	if (ret < 0)
7980		return ret;
7981
7982	*ppos += cnt;
7983
7984	return cnt;
7985}
7986
7987static const struct file_operations trace_options_core_fops = {
7988	.open = tracing_open_generic,
7989	.read = trace_options_core_read,
7990	.write = trace_options_core_write,
7991	.llseek = generic_file_llseek,
7992};
7993
7994struct dentry *trace_create_file(const char *name,
7995				 umode_t mode,
7996				 struct dentry *parent,
7997				 void *data,
7998				 const struct file_operations *fops)
7999{
8000	struct dentry *ret;
8001
8002	ret = tracefs_create_file(name, mode, parent, data, fops);
8003	if (!ret)
8004		pr_warn("Could not create tracefs '%s' entry\n", name);
8005
8006	return ret;
8007}
8008
8009
8010static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8011{
8012	struct dentry *d_tracer;
8013
8014	if (tr->options)
8015		return tr->options;
8016
8017	d_tracer = tracing_get_dentry(tr);
8018	if (IS_ERR(d_tracer))
8019		return NULL;
8020
8021	tr->options = tracefs_create_dir("options", d_tracer);
8022	if (!tr->options) {
8023		pr_warn("Could not create tracefs directory 'options'\n");
8024		return NULL;
8025	}
8026
8027	return tr->options;
8028}
8029
8030static void
8031create_trace_option_file(struct trace_array *tr,
8032			 struct trace_option_dentry *topt,
8033			 struct tracer_flags *flags,
8034			 struct tracer_opt *opt)
8035{
8036	struct dentry *t_options;
8037
8038	t_options = trace_options_init_dentry(tr);
8039	if (!t_options)
8040		return;
8041
8042	topt->flags = flags;
8043	topt->opt = opt;
8044	topt->tr = tr;
8045
8046	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8047				    &trace_options_fops);
8048
8049}
8050
8051static void
8052create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8053{
8054	struct trace_option_dentry *topts;
8055	struct trace_options *tr_topts;
8056	struct tracer_flags *flags;
8057	struct tracer_opt *opts;
8058	int cnt;
8059	int i;
8060
8061	if (!tracer)
8062		return;
8063
8064	flags = tracer->flags;
8065
8066	if (!flags || !flags->opts)
8067		return;
8068
8069	/*
8070	 * If this is an instance, only create flags for tracers
8071	 * the instance may have.
8072	 */
8073	if (!trace_ok_for_array(tracer, tr))
8074		return;
8075
8076	for (i = 0; i < tr->nr_topts; i++) {
8077		/* Make sure there's no duplicate flags. */
8078		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8079			return;
8080	}
8081
8082	opts = flags->opts;
8083
8084	for (cnt = 0; opts[cnt].name; cnt++)
8085		;
8086
8087	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8088	if (!topts)
8089		return;
8090
8091	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8092			    GFP_KERNEL);
8093	if (!tr_topts) {
8094		kfree(topts);
8095		return;
8096	}
8097
8098	tr->topts = tr_topts;
8099	tr->topts[tr->nr_topts].tracer = tracer;
8100	tr->topts[tr->nr_topts].topts = topts;
8101	tr->nr_topts++;
8102
8103	for (cnt = 0; opts[cnt].name; cnt++) {
8104		create_trace_option_file(tr, &topts[cnt], flags,
8105					 &opts[cnt]);
8106		WARN_ONCE(topts[cnt].entry == NULL,
8107			  "Failed to create trace option: %s",
8108			  opts[cnt].name);
8109	}
8110}
8111
8112static struct dentry *
8113create_trace_option_core_file(struct trace_array *tr,
8114			      const char *option, long index)
8115{
8116	struct dentry *t_options;
8117
8118	t_options = trace_options_init_dentry(tr);
8119	if (!t_options)
8120		return NULL;
8121
8122	return trace_create_file(option, 0644, t_options,
8123				 (void *)&tr->trace_flags_index[index],
8124				 &trace_options_core_fops);
8125}
8126
8127static void create_trace_options_dir(struct trace_array *tr)
8128{
8129	struct dentry *t_options;
8130	bool top_level = tr == &global_trace;
8131	int i;
8132
8133	t_options = trace_options_init_dentry(tr);
8134	if (!t_options)
8135		return;
8136
8137	for (i = 0; trace_options[i]; i++) {
8138		if (top_level ||
8139		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8140			create_trace_option_core_file(tr, trace_options[i], i);
8141	}
8142}
8143
8144static ssize_t
8145rb_simple_read(struct file *filp, char __user *ubuf,
8146	       size_t cnt, loff_t *ppos)
8147{
8148	struct trace_array *tr = filp->private_data;
8149	char buf[64];
8150	int r;
8151
8152	r = tracer_tracing_is_on(tr);
8153	r = sprintf(buf, "%d\n", r);
8154
8155	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8156}
8157
8158static ssize_t
8159rb_simple_write(struct file *filp, const char __user *ubuf,
8160		size_t cnt, loff_t *ppos)
8161{
8162	struct trace_array *tr = filp->private_data;
8163	struct ring_buffer *buffer = tr->trace_buffer.buffer;
8164	unsigned long val;
8165	int ret;
8166
8167	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8168	if (ret)
8169		return ret;
8170
8171	if (buffer) {
8172		mutex_lock(&trace_types_lock);
8173		if (!!val == tracer_tracing_is_on(tr)) {
8174			val = 0; /* do nothing */
8175		} else if (val) {
8176			tracer_tracing_on(tr);
8177			if (tr->current_trace->start)
8178				tr->current_trace->start(tr);
8179		} else {
8180			tracer_tracing_off(tr);
8181			if (tr->current_trace->stop)
8182				tr->current_trace->stop(tr);
 
 
8183		}
8184		mutex_unlock(&trace_types_lock);
8185	}
8186
8187	(*ppos)++;
8188
8189	return cnt;
8190}
8191
8192static const struct file_operations rb_simple_fops = {
8193	.open		= tracing_open_generic_tr,
8194	.read		= rb_simple_read,
8195	.write		= rb_simple_write,
8196	.release	= tracing_release_generic_tr,
8197	.llseek		= default_llseek,
8198};
8199
8200static ssize_t
8201buffer_percent_read(struct file *filp, char __user *ubuf,
8202		    size_t cnt, loff_t *ppos)
8203{
8204	struct trace_array *tr = filp->private_data;
8205	char buf[64];
8206	int r;
8207
8208	r = tr->buffer_percent;
8209	r = sprintf(buf, "%d\n", r);
8210
8211	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8212}
8213
8214static ssize_t
8215buffer_percent_write(struct file *filp, const char __user *ubuf,
8216		     size_t cnt, loff_t *ppos)
8217{
8218	struct trace_array *tr = filp->private_data;
8219	unsigned long val;
8220	int ret;
8221
8222	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8223	if (ret)
8224		return ret;
8225
8226	if (val > 100)
8227		return -EINVAL;
8228
8229	if (!val)
8230		val = 1;
8231
8232	tr->buffer_percent = val;
8233
8234	(*ppos)++;
8235
8236	return cnt;
8237}
8238
8239static const struct file_operations buffer_percent_fops = {
8240	.open		= tracing_open_generic_tr,
8241	.read		= buffer_percent_read,
8242	.write		= buffer_percent_write,
8243	.release	= tracing_release_generic_tr,
8244	.llseek		= default_llseek,
8245};
8246
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8247static struct dentry *trace_instance_dir;
8248
8249static void
8250init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8251
8252static int
8253allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
8254{
8255	enum ring_buffer_flags rb_flags;
8256
8257	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8258
8259	buf->tr = tr;
8260
8261	buf->buffer = ring_buffer_alloc(size, rb_flags);
8262	if (!buf->buffer)
8263		return -ENOMEM;
8264
8265	buf->data = alloc_percpu(struct trace_array_cpu);
8266	if (!buf->data) {
8267		ring_buffer_free(buf->buffer);
8268		buf->buffer = NULL;
8269		return -ENOMEM;
8270	}
8271
8272	/* Allocate the first page for all buffers */
8273	set_buffer_entries(&tr->trace_buffer,
8274			   ring_buffer_size(tr->trace_buffer.buffer, 0));
8275
8276	return 0;
8277}
8278
 
 
 
 
 
 
 
 
 
 
8279static int allocate_trace_buffers(struct trace_array *tr, int size)
8280{
8281	int ret;
8282
8283	ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8284	if (ret)
8285		return ret;
8286
8287#ifdef CONFIG_TRACER_MAX_TRACE
8288	ret = allocate_trace_buffer(tr, &tr->max_buffer,
8289				    allocate_snapshot ? size : 1);
8290	if (WARN_ON(ret)) {
8291		ring_buffer_free(tr->trace_buffer.buffer);
8292		tr->trace_buffer.buffer = NULL;
8293		free_percpu(tr->trace_buffer.data);
8294		tr->trace_buffer.data = NULL;
8295		return -ENOMEM;
8296	}
8297	tr->allocated_snapshot = allocate_snapshot;
8298
8299	/*
8300	 * Only the top level trace array gets its snapshot allocated
8301	 * from the kernel command line.
8302	 */
8303	allocate_snapshot = false;
8304#endif
 
8305	return 0;
8306}
8307
8308static void free_trace_buffer(struct trace_buffer *buf)
8309{
8310	if (buf->buffer) {
8311		ring_buffer_free(buf->buffer);
8312		buf->buffer = NULL;
8313		free_percpu(buf->data);
8314		buf->data = NULL;
8315	}
8316}
8317
8318static void free_trace_buffers(struct trace_array *tr)
8319{
8320	if (!tr)
8321		return;
8322
8323	free_trace_buffer(&tr->trace_buffer);
8324
8325#ifdef CONFIG_TRACER_MAX_TRACE
8326	free_trace_buffer(&tr->max_buffer);
8327#endif
8328}
8329
8330static void init_trace_flags_index(struct trace_array *tr)
8331{
8332	int i;
8333
8334	/* Used by the trace options files */
8335	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8336		tr->trace_flags_index[i] = i;
8337}
8338
8339static void __update_tracer_options(struct trace_array *tr)
8340{
8341	struct tracer *t;
8342
8343	for (t = trace_types; t; t = t->next)
8344		add_tracer_options(tr, t);
8345}
8346
8347static void update_tracer_options(struct trace_array *tr)
8348{
8349	mutex_lock(&trace_types_lock);
 
8350	__update_tracer_options(tr);
8351	mutex_unlock(&trace_types_lock);
8352}
8353
8354struct trace_array *trace_array_create(const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8355{
8356	struct trace_array *tr;
 
 
 
 
 
 
 
 
 
 
 
 
8357	int ret;
8358
8359	mutex_lock(&event_mutex);
8360	mutex_lock(&trace_types_lock);
 
8361
8362	ret = -EEXIST;
8363	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8364		if (tr->name && strcmp(tr->name, name) == 0)
8365			goto out_unlock;
8366	}
8367
 
 
 
 
 
 
 
 
 
 
 
 
8368	ret = -ENOMEM;
8369	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8370	if (!tr)
8371		goto out_unlock;
8372
8373	tr->name = kstrdup(name, GFP_KERNEL);
8374	if (!tr->name)
8375		goto out_free_tr;
8376
8377	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8378		goto out_free_tr;
8379
 
 
 
 
 
 
 
 
 
8380	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8381
8382	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8383
8384	raw_spin_lock_init(&tr->start_lock);
8385
8386	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8387
8388	tr->current_trace = &nop_trace;
8389
8390	INIT_LIST_HEAD(&tr->systems);
8391	INIT_LIST_HEAD(&tr->events);
8392	INIT_LIST_HEAD(&tr->hist_vars);
8393	INIT_LIST_HEAD(&tr->err_log);
8394
8395	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8396		goto out_free_tr;
8397
8398	tr->dir = tracefs_create_dir(name, trace_instance_dir);
8399	if (!tr->dir)
8400		goto out_free_tr;
8401
8402	ret = event_trace_add_tracer(tr->dir, tr);
8403	if (ret) {
8404		tracefs_remove_recursive(tr->dir);
8405		goto out_free_tr;
8406	}
8407
8408	ftrace_init_trace_array(tr);
8409
8410	init_tracer_tracefs(tr, tr->dir);
8411	init_trace_flags_index(tr);
8412	__update_tracer_options(tr);
 
 
 
 
 
 
8413
8414	list_add(&tr->list, &ftrace_trace_arrays);
8415
8416	mutex_unlock(&trace_types_lock);
8417	mutex_unlock(&event_mutex);
8418
8419	return tr;
8420
8421 out_free_tr:
 
8422	free_trace_buffers(tr);
 
8423	free_cpumask_var(tr->tracing_cpumask);
 
8424	kfree(tr->name);
8425	kfree(tr);
8426
8427 out_unlock:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8428	mutex_unlock(&trace_types_lock);
8429	mutex_unlock(&event_mutex);
8430
8431	return ERR_PTR(ret);
8432}
8433EXPORT_SYMBOL_GPL(trace_array_create);
8434
8435static int instance_mkdir(const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8436{
8437	return PTR_ERR_OR_ZERO(trace_array_create(name));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8438}
 
8439
8440static int __remove_instance(struct trace_array *tr)
8441{
8442	int i;
8443
8444	if (tr->ref || (tr->current_trace && tr->current_trace->ref))
 
8445		return -EBUSY;
8446
8447	list_del(&tr->list);
8448
8449	/* Disable all the flags that were enabled coming in */
8450	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8451		if ((1 << i) & ZEROED_TRACE_FLAGS)
8452			set_tracer_flag(tr, 1 << i, 0);
8453	}
8454
8455	tracing_set_nop(tr);
8456	clear_ftrace_function_probes(tr);
8457	event_trace_del_tracer(tr);
8458	ftrace_clear_pids(tr);
8459	ftrace_destroy_function_files(tr);
8460	tracefs_remove_recursive(tr->dir);
 
8461	free_trace_buffers(tr);
 
8462
8463	for (i = 0; i < tr->nr_topts; i++) {
8464		kfree(tr->topts[i].topts);
8465	}
8466	kfree(tr->topts);
8467
 
8468	free_cpumask_var(tr->tracing_cpumask);
 
8469	kfree(tr->name);
8470	kfree(tr);
8471	tr = NULL;
8472
8473	return 0;
8474}
8475
8476int trace_array_destroy(struct trace_array *tr)
8477{
 
8478	int ret;
8479
8480	if (!tr)
8481		return -EINVAL;
8482
8483	mutex_lock(&event_mutex);
8484	mutex_lock(&trace_types_lock);
8485
8486	ret = __remove_instance(tr);
 
 
 
 
 
 
 
 
8487
8488	mutex_unlock(&trace_types_lock);
8489	mutex_unlock(&event_mutex);
8490
8491	return ret;
8492}
8493EXPORT_SYMBOL_GPL(trace_array_destroy);
8494
8495static int instance_rmdir(const char *name)
8496{
8497	struct trace_array *tr;
8498	int ret;
8499
8500	mutex_lock(&event_mutex);
8501	mutex_lock(&trace_types_lock);
8502
8503	ret = -ENODEV;
8504	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8505		if (tr->name && strcmp(tr->name, name) == 0) {
8506			ret = __remove_instance(tr);
8507			break;
8508		}
8509	}
8510
8511	mutex_unlock(&trace_types_lock);
8512	mutex_unlock(&event_mutex);
8513
8514	return ret;
8515}
8516
8517static __init void create_trace_instances(struct dentry *d_tracer)
8518{
 
 
8519	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8520							 instance_mkdir,
8521							 instance_rmdir);
8522	if (WARN_ON(!trace_instance_dir))
8523		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8524}
8525
8526static void
8527init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8528{
8529	struct trace_event_file *file;
8530	int cpu;
8531
8532	trace_create_file("available_tracers", 0444, d_tracer,
8533			tr, &show_traces_fops);
8534
8535	trace_create_file("current_tracer", 0644, d_tracer,
8536			tr, &set_tracer_fops);
8537
8538	trace_create_file("tracing_cpumask", 0644, d_tracer,
8539			  tr, &tracing_cpumask_fops);
8540
8541	trace_create_file("trace_options", 0644, d_tracer,
8542			  tr, &tracing_iter_fops);
8543
8544	trace_create_file("trace", 0644, d_tracer,
8545			  tr, &tracing_fops);
8546
8547	trace_create_file("trace_pipe", 0444, d_tracer,
8548			  tr, &tracing_pipe_fops);
8549
8550	trace_create_file("buffer_size_kb", 0644, d_tracer,
8551			  tr, &tracing_entries_fops);
8552
8553	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8554			  tr, &tracing_total_entries_fops);
8555
8556	trace_create_file("free_buffer", 0200, d_tracer,
8557			  tr, &tracing_free_buffer_fops);
8558
8559	trace_create_file("trace_marker", 0220, d_tracer,
8560			  tr, &tracing_mark_fops);
8561
8562	file = __find_event_file(tr, "ftrace", "print");
8563	if (file && file->dir)
8564		trace_create_file("trigger", 0644, file->dir, file,
8565				  &event_trigger_fops);
8566	tr->trace_marker_file = file;
8567
8568	trace_create_file("trace_marker_raw", 0220, d_tracer,
8569			  tr, &tracing_mark_raw_fops);
8570
8571	trace_create_file("trace_clock", 0644, d_tracer, tr,
8572			  &trace_clock_fops);
8573
8574	trace_create_file("tracing_on", 0644, d_tracer,
8575			  tr, &rb_simple_fops);
8576
8577	trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8578			  &trace_time_stamp_mode_fops);
8579
8580	tr->buffer_percent = 50;
8581
8582	trace_create_file("buffer_percent", 0444, d_tracer,
8583			tr, &buffer_percent_fops);
8584
 
 
 
8585	create_trace_options_dir(tr);
8586
8587#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8588	trace_create_file("tracing_max_latency", 0644, d_tracer,
8589			&tr->max_latency, &tracing_max_lat_fops);
8590#endif
8591
8592	if (ftrace_create_function_files(tr, d_tracer))
8593		WARN(1, "Could not allocate function filter files");
8594
8595#ifdef CONFIG_TRACER_SNAPSHOT
8596	trace_create_file("snapshot", 0644, d_tracer,
8597			  tr, &snapshot_fops);
8598#endif
8599
8600	trace_create_file("error_log", 0644, d_tracer,
8601			  tr, &tracing_err_log_fops);
8602
8603	for_each_tracing_cpu(cpu)
8604		tracing_init_tracefs_percpu(tr, cpu);
8605
8606	ftrace_init_tracefs(tr, d_tracer);
8607}
8608
8609static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8610{
8611	struct vfsmount *mnt;
8612	struct file_system_type *type;
8613
8614	/*
8615	 * To maintain backward compatibility for tools that mount
8616	 * debugfs to get to the tracing facility, tracefs is automatically
8617	 * mounted to the debugfs/tracing directory.
8618	 */
8619	type = get_fs_type("tracefs");
8620	if (!type)
8621		return NULL;
8622	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8623	put_filesystem(type);
8624	if (IS_ERR(mnt))
8625		return NULL;
8626	mntget(mnt);
8627
8628	return mnt;
8629}
8630
8631/**
8632 * tracing_init_dentry - initialize top level trace array
8633 *
8634 * This is called when creating files or directories in the tracing
8635 * directory. It is called via fs_initcall() by any of the boot up code
8636 * and expects to return the dentry of the top level tracing directory.
8637 */
8638struct dentry *tracing_init_dentry(void)
8639{
8640	struct trace_array *tr = &global_trace;
8641
 
 
 
 
 
8642	/* The top level trace array uses  NULL as parent */
8643	if (tr->dir)
8644		return NULL;
8645
8646	if (WARN_ON(!tracefs_initialized()) ||
8647		(IS_ENABLED(CONFIG_DEBUG_FS) &&
8648		 WARN_ON(!debugfs_initialized())))
8649		return ERR_PTR(-ENODEV);
8650
8651	/*
8652	 * As there may still be users that expect the tracing
8653	 * files to exist in debugfs/tracing, we must automount
8654	 * the tracefs file system there, so older tools still
8655	 * work with the newer kerenl.
8656	 */
8657	tr->dir = debugfs_create_automount("tracing", NULL,
8658					   trace_automount, NULL);
8659
8660	return NULL;
8661}
8662
8663extern struct trace_eval_map *__start_ftrace_eval_maps[];
8664extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8665
8666static void __init trace_eval_init(void)
 
 
 
 
8667{
8668	int len;
8669
8670	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8671	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8672}
8673
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8674#ifdef CONFIG_MODULES
8675static void trace_module_add_evals(struct module *mod)
8676{
8677	if (!mod->num_trace_evals)
8678		return;
8679
8680	/*
8681	 * Modules with bad taint do not have events created, do
8682	 * not bother with enums either.
8683	 */
8684	if (trace_module_has_bad_taint(mod))
8685		return;
8686
8687	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8688}
8689
8690#ifdef CONFIG_TRACE_EVAL_MAP_FILE
8691static void trace_module_remove_evals(struct module *mod)
8692{
8693	union trace_eval_map_item *map;
8694	union trace_eval_map_item **last = &trace_eval_maps;
8695
8696	if (!mod->num_trace_evals)
8697		return;
8698
8699	mutex_lock(&trace_eval_mutex);
8700
8701	map = trace_eval_maps;
8702
8703	while (map) {
8704		if (map->head.mod == mod)
8705			break;
8706		map = trace_eval_jmp_to_tail(map);
8707		last = &map->tail.next;
8708		map = map->tail.next;
8709	}
8710	if (!map)
8711		goto out;
8712
8713	*last = trace_eval_jmp_to_tail(map)->tail.next;
8714	kfree(map);
8715 out:
8716	mutex_unlock(&trace_eval_mutex);
8717}
8718#else
8719static inline void trace_module_remove_evals(struct module *mod) { }
8720#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8721
8722static int trace_module_notify(struct notifier_block *self,
8723			       unsigned long val, void *data)
8724{
8725	struct module *mod = data;
8726
8727	switch (val) {
8728	case MODULE_STATE_COMING:
8729		trace_module_add_evals(mod);
8730		break;
8731	case MODULE_STATE_GOING:
8732		trace_module_remove_evals(mod);
8733		break;
8734	}
8735
8736	return 0;
8737}
8738
8739static struct notifier_block trace_module_nb = {
8740	.notifier_call = trace_module_notify,
8741	.priority = 0,
8742};
8743#endif /* CONFIG_MODULES */
8744
8745static __init int tracer_init_tracefs(void)
8746{
8747	struct dentry *d_tracer;
8748
8749	trace_access_lock_init();
8750
8751	d_tracer = tracing_init_dentry();
8752	if (IS_ERR(d_tracer))
8753		return 0;
8754
8755	event_trace_init();
8756
8757	init_tracer_tracefs(&global_trace, d_tracer);
8758	ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8759
8760	trace_create_file("tracing_thresh", 0644, d_tracer,
8761			&global_trace, &tracing_thresh_fops);
8762
8763	trace_create_file("README", 0444, d_tracer,
8764			NULL, &tracing_readme_fops);
8765
8766	trace_create_file("saved_cmdlines", 0444, d_tracer,
8767			NULL, &tracing_saved_cmdlines_fops);
8768
8769	trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8770			  NULL, &tracing_saved_cmdlines_size_fops);
8771
8772	trace_create_file("saved_tgids", 0444, d_tracer,
8773			NULL, &tracing_saved_tgids_fops);
8774
8775	trace_eval_init();
8776
8777	trace_create_eval_file(d_tracer);
8778
8779#ifdef CONFIG_MODULES
8780	register_module_notifier(&trace_module_nb);
8781#endif
8782
8783#ifdef CONFIG_DYNAMIC_FTRACE
8784	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8785			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8786#endif
8787
8788	create_trace_instances(d_tracer);
8789
8790	update_tracer_options(&global_trace);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8791
8792	return 0;
8793}
8794
8795static int trace_panic_handler(struct notifier_block *this,
8796			       unsigned long event, void *unused)
8797{
8798	if (ftrace_dump_on_oops)
8799		ftrace_dump(ftrace_dump_on_oops);
8800	return NOTIFY_OK;
8801}
8802
8803static struct notifier_block trace_panic_notifier = {
8804	.notifier_call  = trace_panic_handler,
8805	.next           = NULL,
8806	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
 
 
 
 
8807};
8808
8809static int trace_die_handler(struct notifier_block *self,
8810			     unsigned long val,
8811			     void *data)
 
 
 
 
 
8812{
8813	switch (val) {
8814	case DIE_OOPS:
8815		if (ftrace_dump_on_oops)
8816			ftrace_dump(ftrace_dump_on_oops);
8817		break;
8818	default:
8819		break;
8820	}
8821	return NOTIFY_OK;
 
8822}
8823
8824static struct notifier_block trace_die_notifier = {
8825	.notifier_call = trace_die_handler,
8826	.priority = 200
8827};
8828
8829/*
8830 * printk is set to max of 1024, we really don't need it that big.
8831 * Nothing should be printing 1000 characters anyway.
8832 */
8833#define TRACE_MAX_PRINT		1000
8834
8835/*
8836 * Define here KERN_TRACE so that we have one place to modify
8837 * it if we decide to change what log level the ftrace dump
8838 * should be at.
8839 */
8840#define KERN_TRACE		KERN_EMERG
8841
8842void
8843trace_printk_seq(struct trace_seq *s)
8844{
8845	/* Probably should print a warning here. */
8846	if (s->seq.len >= TRACE_MAX_PRINT)
8847		s->seq.len = TRACE_MAX_PRINT;
8848
8849	/*
8850	 * More paranoid code. Although the buffer size is set to
8851	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8852	 * an extra layer of protection.
8853	 */
8854	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8855		s->seq.len = s->seq.size - 1;
8856
8857	/* should be zero ended, but we are paranoid. */
8858	s->buffer[s->seq.len] = 0;
8859
8860	printk(KERN_TRACE "%s", s->buffer);
8861
8862	trace_seq_init(s);
8863}
8864
8865void trace_init_global_iter(struct trace_iterator *iter)
8866{
8867	iter->tr = &global_trace;
8868	iter->trace = iter->tr->current_trace;
8869	iter->cpu_file = RING_BUFFER_ALL_CPUS;
8870	iter->trace_buffer = &global_trace.trace_buffer;
8871
8872	if (iter->trace && iter->trace->open)
8873		iter->trace->open(iter);
8874
8875	/* Annotate start of buffers if we had overruns */
8876	if (ring_buffer_overruns(iter->trace_buffer->buffer))
8877		iter->iter_flags |= TRACE_FILE_ANNOTATE;
8878
8879	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
8880	if (trace_clocks[iter->tr->clock_id].in_ns)
8881		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 
 
 
 
 
 
8882}
8883
8884void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8885{
8886	/* use static because iter can be a bit big for the stack */
8887	static struct trace_iterator iter;
8888	static atomic_t dump_running;
8889	struct trace_array *tr = &global_trace;
8890	unsigned int old_userobj;
8891	unsigned long flags;
8892	int cnt = 0, cpu;
8893
8894	/* Only allow one dump user at a time. */
8895	if (atomic_inc_return(&dump_running) != 1) {
8896		atomic_dec(&dump_running);
8897		return;
8898	}
8899
8900	/*
8901	 * Always turn off tracing when we dump.
8902	 * We don't need to show trace output of what happens
8903	 * between multiple crashes.
8904	 *
8905	 * If the user does a sysrq-z, then they can re-enable
8906	 * tracing with echo 1 > tracing_on.
8907	 */
8908	tracing_off();
8909
8910	local_irq_save(flags);
8911	printk_nmi_direct_enter();
8912
8913	/* Simulate the iterator */
8914	trace_init_global_iter(&iter);
8915
8916	for_each_tracing_cpu(cpu) {
8917		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8918	}
8919
8920	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8921
8922	/* don't look at user memory in panic mode */
8923	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8924
8925	switch (oops_dump_mode) {
8926	case DUMP_ALL:
8927		iter.cpu_file = RING_BUFFER_ALL_CPUS;
8928		break;
8929	case DUMP_ORIG:
8930		iter.cpu_file = raw_smp_processor_id();
8931		break;
8932	case DUMP_NONE:
8933		goto out_enable;
8934	default:
8935		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8936		iter.cpu_file = RING_BUFFER_ALL_CPUS;
8937	}
8938
8939	printk(KERN_TRACE "Dumping ftrace buffer:\n");
8940
8941	/* Did function tracer already get disabled? */
8942	if (ftrace_is_dead()) {
8943		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8944		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
8945	}
8946
8947	/*
8948	 * We need to stop all tracing on all CPUS to read the
8949	 * the next buffer. This is a bit expensive, but is
8950	 * not done often. We fill all what we can read,
8951	 * and then release the locks again.
8952	 */
8953
8954	while (!trace_empty(&iter)) {
8955
8956		if (!cnt)
8957			printk(KERN_TRACE "---------------------------------\n");
8958
8959		cnt++;
8960
8961		trace_iterator_reset(&iter);
8962		iter.iter_flags |= TRACE_FILE_LAT_FMT;
8963
8964		if (trace_find_next_entry_inc(&iter) != NULL) {
8965			int ret;
8966
8967			ret = print_trace_line(&iter);
8968			if (ret != TRACE_TYPE_NO_CONSUME)
8969				trace_consume(&iter);
8970		}
8971		touch_nmi_watchdog();
8972
8973		trace_printk_seq(&iter.seq);
8974	}
8975
8976	if (!cnt)
8977		printk(KERN_TRACE "   (ftrace buffer empty)\n");
8978	else
8979		printk(KERN_TRACE "---------------------------------\n");
8980
8981 out_enable:
8982	tr->trace_flags |= old_userobj;
8983
8984	for_each_tracing_cpu(cpu) {
8985		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8986	}
8987	atomic_dec(&dump_running);
8988	printk_nmi_direct_exit();
8989	local_irq_restore(flags);
8990}
8991EXPORT_SYMBOL_GPL(ftrace_dump);
8992
8993int trace_run_command(const char *buf, int (*createfn)(int, char **))
8994{
8995	char **argv;
8996	int argc, ret;
8997
8998	argc = 0;
8999	ret = 0;
9000	argv = argv_split(GFP_KERNEL, buf, &argc);
9001	if (!argv)
9002		return -ENOMEM;
9003
9004	if (argc)
9005		ret = createfn(argc, argv);
9006
9007	argv_free(argv);
9008
9009	return ret;
9010}
9011
9012#define WRITE_BUFSIZE  4096
9013
9014ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9015				size_t count, loff_t *ppos,
9016				int (*createfn)(int, char **))
9017{
9018	char *kbuf, *buf, *tmp;
9019	int ret = 0;
9020	size_t done = 0;
9021	size_t size;
9022
9023	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9024	if (!kbuf)
9025		return -ENOMEM;
9026
9027	while (done < count) {
9028		size = count - done;
9029
9030		if (size >= WRITE_BUFSIZE)
9031			size = WRITE_BUFSIZE - 1;
9032
9033		if (copy_from_user(kbuf, buffer + done, size)) {
9034			ret = -EFAULT;
9035			goto out;
9036		}
9037		kbuf[size] = '\0';
9038		buf = kbuf;
9039		do {
9040			tmp = strchr(buf, '\n');
9041			if (tmp) {
9042				*tmp = '\0';
9043				size = tmp - buf + 1;
9044			} else {
9045				size = strlen(buf);
9046				if (done + size < count) {
9047					if (buf != kbuf)
9048						break;
9049					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9050					pr_warn("Line length is too long: Should be less than %d\n",
9051						WRITE_BUFSIZE - 2);
9052					ret = -EINVAL;
9053					goto out;
9054				}
9055			}
9056			done += size;
9057
9058			/* Remove comments */
9059			tmp = strchr(buf, '#');
9060
9061			if (tmp)
9062				*tmp = '\0';
9063
9064			ret = trace_run_command(buf, createfn);
9065			if (ret)
9066				goto out;
9067			buf += size;
9068
9069		} while (done < count);
9070	}
9071	ret = done;
9072
9073out:
9074	kfree(kbuf);
9075
9076	return ret;
9077}
9078
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9079__init static int tracer_alloc_buffers(void)
9080{
9081	int ring_buf_size;
9082	int ret = -ENOMEM;
9083
 
 
 
 
 
 
9084	/*
9085	 * Make sure we don't accidently add more trace options
9086	 * than we have bits for.
9087	 */
9088	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9089
9090	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9091		goto out;
9092
9093	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9094		goto out_free_buffer_mask;
9095
9096	/* Only allocate trace_printk buffers if a trace_printk exists */
9097	if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
9098		/* Must be called before global_trace.buffer is allocated */
9099		trace_printk_init_buffers();
9100
9101	/* To save memory, keep the ring buffer size to its minimum */
9102	if (ring_buffer_expanded)
9103		ring_buf_size = trace_buf_size;
9104	else
9105		ring_buf_size = 1;
9106
9107	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9108	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9109
9110	raw_spin_lock_init(&global_trace.start_lock);
9111
9112	/*
9113	 * The prepare callbacks allocates some memory for the ring buffer. We
9114	 * don't free the buffer if the if the CPU goes down. If we were to free
9115	 * the buffer, then the user would lose any trace that was in the
9116	 * buffer. The memory will be removed once the "instance" is removed.
9117	 */
9118	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9119				      "trace/RB:preapre", trace_rb_cpu_prepare,
9120				      NULL);
9121	if (ret < 0)
9122		goto out_free_cpumask;
9123	/* Used for event triggers */
9124	ret = -ENOMEM;
9125	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9126	if (!temp_buffer)
9127		goto out_rm_hp_state;
9128
9129	if (trace_create_savedcmd() < 0)
9130		goto out_free_temp_buffer;
9131
 
 
 
9132	/* TODO: make the number of buffers hot pluggable with CPUS */
9133	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9134		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9135		WARN_ON(1);
9136		goto out_free_savedcmd;
9137	}
9138
9139	if (global_trace.buffer_disabled)
9140		tracing_off();
9141
9142	if (trace_boot_clock) {
9143		ret = tracing_set_clock(&global_trace, trace_boot_clock);
9144		if (ret < 0)
9145			pr_warn("Trace clock %s not defined, going back to default\n",
9146				trace_boot_clock);
9147	}
9148
9149	/*
9150	 * register_tracer() might reference current_trace, so it
9151	 * needs to be set before we register anything. This is
9152	 * just a bootstrap of current_trace anyway.
9153	 */
9154	global_trace.current_trace = &nop_trace;
9155
9156	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9157
9158	ftrace_init_global_array_ops(&global_trace);
9159
9160	init_trace_flags_index(&global_trace);
9161
9162	register_tracer(&nop_trace);
9163
9164	/* Function tracing may start here (via kernel command line) */
9165	init_function_trace();
9166
9167	/* All seems OK, enable tracing */
9168	tracing_disabled = 0;
9169
9170	atomic_notifier_chain_register(&panic_notifier_list,
9171				       &trace_panic_notifier);
9172
9173	register_die_notifier(&trace_die_notifier);
9174
9175	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9176
9177	INIT_LIST_HEAD(&global_trace.systems);
9178	INIT_LIST_HEAD(&global_trace.events);
9179	INIT_LIST_HEAD(&global_trace.hist_vars);
9180	INIT_LIST_HEAD(&global_trace.err_log);
9181	list_add(&global_trace.list, &ftrace_trace_arrays);
9182
9183	apply_trace_boot_options();
9184
9185	register_snapshot_cmd();
9186
 
 
9187	return 0;
9188
 
 
9189out_free_savedcmd:
9190	free_saved_cmdlines_buffer(savedcmd);
9191out_free_temp_buffer:
9192	ring_buffer_free(temp_buffer);
9193out_rm_hp_state:
9194	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9195out_free_cpumask:
9196	free_cpumask_var(global_trace.tracing_cpumask);
9197out_free_buffer_mask:
9198	free_cpumask_var(tracing_buffer_mask);
9199out:
9200	return ret;
9201}
9202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9203void __init early_trace_init(void)
9204{
9205	if (tracepoint_printk) {
9206		tracepoint_print_iter =
9207			kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9208		if (WARN_ON(!tracepoint_print_iter))
 
9209			tracepoint_printk = 0;
9210		else
9211			static_key_enable(&tracepoint_printk_key.key);
9212	}
9213	tracer_alloc_buffers();
 
 
9214}
9215
9216void __init trace_init(void)
9217{
9218	trace_event_init();
 
 
 
9219}
9220
9221__init static int clear_boot_tracer(void)
9222{
9223	/*
9224	 * The default tracer at boot buffer is an init section.
9225	 * This function is called in lateinit. If we did not
9226	 * find the boot tracer, then clear it out, to prevent
9227	 * later registration from accessing the buffer that is
9228	 * about to be freed.
9229	 */
9230	if (!default_bootup_tracer)
9231		return 0;
9232
9233	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9234	       default_bootup_tracer);
9235	default_bootup_tracer = NULL;
9236
9237	return 0;
9238}
9239
9240fs_initcall(tracer_init_tracefs);
9241late_initcall_sync(clear_boot_tracer);
9242
9243#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9244__init static int tracing_set_default_clock(void)
9245{
9246	/* sched_clock_stable() is determined in late_initcall */
9247	if (!trace_boot_clock && !sched_clock_stable()) {
 
 
 
 
 
9248		printk(KERN_WARNING
9249		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
9250		       "If you want to keep using the local clock, then add:\n"
9251		       "  \"trace_clock=local\"\n"
9252		       "on the kernel command line\n");
9253		tracing_set_clock(&global_trace, "global");
9254	}
 
 
 
 
 
 
 
 
 
 
 
9255
 
 
9256	return 0;
9257}
9258late_initcall_sync(tracing_set_default_clock);
9259#endif