Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * ring buffer based function tracer
   3 *
   4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally taken from the RT patch by:
   8 *    Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code from the latency_tracer, that is:
  11 *  Copyright (C) 2004-2006 Ingo Molnar
  12 *  Copyright (C) 2004 Nadia Yvette Chambers
  13 */
  14#include <linux/ring_buffer.h>
  15#include <generated/utsrelease.h>
  16#include <linux/stacktrace.h>
  17#include <linux/writeback.h>
  18#include <linux/kallsyms.h>
 
  19#include <linux/seq_file.h>
  20#include <linux/notifier.h>
  21#include <linux/irqflags.h>
  22#include <linux/debugfs.h>
 
  23#include <linux/pagemap.h>
  24#include <linux/hardirq.h>
  25#include <linux/linkage.h>
  26#include <linux/uaccess.h>
  27#include <linux/kprobes.h>
  28#include <linux/ftrace.h>
  29#include <linux/module.h>
  30#include <linux/percpu.h>
  31#include <linux/splice.h>
  32#include <linux/kdebug.h>
  33#include <linux/string.h>
 
  34#include <linux/rwsem.h>
  35#include <linux/slab.h>
  36#include <linux/ctype.h>
  37#include <linux/init.h>
 
  38#include <linux/poll.h>
  39#include <linux/nmi.h>
  40#include <linux/fs.h>
 
 
  41#include <linux/sched/rt.h>
 
 
 
  42
  43#include "trace.h"
  44#include "trace_output.h"
  45
  46/*
  47 * On boot up, the ring buffer is set to the minimum size, so that
  48 * we do not waste memory on systems that are not using tracing.
  49 */
  50bool ring_buffer_expanded;
  51
  52/*
  53 * We need to change this state when a selftest is running.
  54 * A selftest will lurk into the ring-buffer to count the
  55 * entries inserted during the selftest although some concurrent
  56 * insertions into the ring-buffer such as trace_printk could occurred
  57 * at the same time, giving false positive or negative results.
  58 */
  59static bool __read_mostly tracing_selftest_running;
  60
  61/*
  62 * If a tracer is running, we do not want to run SELFTEST.
 
  63 */
  64bool __read_mostly tracing_selftest_disabled;
  65
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  66/* For tracers that don't implement custom flags */
  67static struct tracer_opt dummy_tracer_opt[] = {
  68	{ }
  69};
  70
  71static struct tracer_flags dummy_tracer_flags = {
  72	.val = 0,
  73	.opts = dummy_tracer_opt
  74};
  75
  76static int
  77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  78{
  79	return 0;
  80}
  81
  82/*
  83 * To prevent the comm cache from being overwritten when no
  84 * tracing is active, only save the comm when a trace event
  85 * occurred.
  86 */
  87static DEFINE_PER_CPU(bool, trace_cmdline_save);
  88
  89/*
  90 * Kill all tracing for good (never come back).
  91 * It is initialized to 1 but will turn to zero if the initialization
  92 * of the tracer is successful. But that is the only place that sets
  93 * this back to zero.
  94 */
  95static int tracing_disabled = 1;
  96
  97DEFINE_PER_CPU(int, ftrace_cpu_disabled);
  98
  99cpumask_var_t __read_mostly	tracing_buffer_mask;
 100
 101/*
 102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 103 *
 104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 105 * is set, then ftrace_dump is called. This will output the contents
 106 * of the ftrace buffers to the console.  This is very useful for
 107 * capturing traces that lead to crashes and outputing it to a
 108 * serial console.
 109 *
 110 * It is default off, but you can enable it with either specifying
 111 * "ftrace_dump_on_oops" in the kernel command line, or setting
 112 * /proc/sys/kernel/ftrace_dump_on_oops
 113 * Set 1 if you want to dump buffers of all CPUs
 114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
 115 */
 116
 117enum ftrace_dump_mode ftrace_dump_on_oops;
 118
 119/* When set, tracing will stop when a WARN*() is hit */
 120int __disable_trace_on_warning;
 121
 122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123
 124#define MAX_TRACER_SIZE		100
 125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
 126static char *default_bootup_tracer;
 127
 128static bool allocate_snapshot;
 129
 130static int __init set_cmdline_ftrace(char *str)
 131{
 132	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
 133	default_bootup_tracer = bootup_tracer_buf;
 134	/* We are using ftrace early, expand it */
 135	ring_buffer_expanded = true;
 136	return 1;
 137}
 138__setup("ftrace=", set_cmdline_ftrace);
 139
 140static int __init set_ftrace_dump_on_oops(char *str)
 141{
 142	if (*str++ != '=' || !*str) {
 143		ftrace_dump_on_oops = DUMP_ALL;
 144		return 1;
 145	}
 146
 147	if (!strcmp("orig_cpu", str)) {
 148		ftrace_dump_on_oops = DUMP_ORIG;
 149                return 1;
 150        }
 151
 152        return 0;
 153}
 154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 155
 156static int __init stop_trace_on_warning(char *str)
 157{
 158	__disable_trace_on_warning = 1;
 
 159	return 1;
 160}
 161__setup("traceoff_on_warning=", stop_trace_on_warning);
 162
 163static int __init boot_alloc_snapshot(char *str)
 164{
 165	allocate_snapshot = true;
 166	/* We also need the main ring buffer expanded */
 167	ring_buffer_expanded = true;
 168	return 1;
 169}
 170__setup("alloc_snapshot", boot_alloc_snapshot);
 171
 172
 173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 174static char *trace_boot_options __initdata;
 175
 176static int __init set_trace_boot_options(char *str)
 177{
 178	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
 179	trace_boot_options = trace_boot_options_buf;
 180	return 0;
 181}
 182__setup("trace_options=", set_trace_boot_options);
 183
 184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
 185static char *trace_boot_clock __initdata;
 186
 187static int __init set_trace_boot_clock(char *str)
 188{
 189	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
 190	trace_boot_clock = trace_boot_clock_buf;
 191	return 0;
 192}
 193__setup("trace_clock=", set_trace_boot_clock);
 194
 
 
 
 
 
 
 
 195
 196unsigned long long ns2usecs(cycle_t nsec)
 
 
 
 
 
 
 
 197{
 198	nsec += 500;
 199	do_div(nsec, 1000);
 200	return nsec;
 201}
 202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 203/*
 204 * The global_trace is the descriptor that holds the tracing
 205 * buffers for the live tracing. For each CPU, it contains
 206 * a link list of pages that will store trace entries. The
 207 * page descriptor of the pages in the memory is used to hold
 208 * the link list by linking the lru item in the page descriptor
 209 * to each of the pages in the buffer per CPU.
 210 *
 211 * For each active CPU there is a data field that holds the
 212 * pages for the buffer for that CPU. Each CPU has the same number
 213 * of pages allocated for its buffer.
 214 */
 215static struct trace_array	global_trace;
 
 
 216
 217LIST_HEAD(ftrace_trace_arrays);
 218
 219int trace_array_get(struct trace_array *this_tr)
 220{
 221	struct trace_array *tr;
 222	int ret = -ENODEV;
 223
 224	mutex_lock(&trace_types_lock);
 225	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 226		if (tr == this_tr) {
 227			tr->ref++;
 228			ret = 0;
 229			break;
 230		}
 231	}
 232	mutex_unlock(&trace_types_lock);
 233
 234	return ret;
 235}
 236
 237static void __trace_array_put(struct trace_array *this_tr)
 238{
 239	WARN_ON(!this_tr->ref);
 240	this_tr->ref--;
 241}
 242
 
 
 
 
 
 
 
 
 
 243void trace_array_put(struct trace_array *this_tr)
 244{
 
 
 
 245	mutex_lock(&trace_types_lock);
 246	__trace_array_put(this_tr);
 247	mutex_unlock(&trace_types_lock);
 248}
 
 249
 250int filter_check_discard(struct ftrace_event_file *file, void *rec,
 251			 struct ring_buffer *buffer,
 252			 struct ring_buffer_event *event)
 253{
 254	if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
 255	    !filter_match_preds(file->filter, rec)) {
 256		ring_buffer_discard_commit(buffer, event);
 257		return 1;
 258	}
 
 
 
 
 259
 260	return 0;
 261}
 262EXPORT_SYMBOL_GPL(filter_check_discard);
 263
 264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
 265			      struct ring_buffer *buffer,
 266			      struct ring_buffer_event *event)
 267{
 268	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
 269	    !filter_match_preds(call->filter, rec)) {
 270		ring_buffer_discard_commit(buffer, event);
 271		return 1;
 272	}
 273
 274	return 0;
 275}
 276EXPORT_SYMBOL_GPL(call_filter_check_discard);
 277
 278cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279{
 280	u64 ts;
 281
 282	/* Early boot up does not have a buffer yet */
 283	if (!buf->buffer)
 284		return trace_clock_local();
 285
 286	ts = ring_buffer_time_stamp(buf->buffer, cpu);
 287	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
 288
 289	return ts;
 290}
 291
 292cycle_t ftrace_now(int cpu)
 293{
 294	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
 295}
 296
 297/**
 298 * tracing_is_enabled - Show if global_trace has been disabled
 299 *
 300 * Shows if the global trace has been enabled or not. It uses the
 301 * mirror flag "buffer_disabled" to be used in fast paths such as for
 302 * the irqsoff tracer. But it may be inaccurate due to races. If you
 303 * need to know the accurate state, use tracing_is_on() which is a little
 304 * slower, but accurate.
 305 */
 306int tracing_is_enabled(void)
 307{
 308	/*
 309	 * For quick access (irqsoff uses this in fast path), just
 310	 * return the mirror variable of the state of the ring buffer.
 311	 * It's a little racy, but we don't really care.
 312	 */
 313	smp_rmb();
 314	return !global_trace.buffer_disabled;
 315}
 316
 317/*
 318 * trace_buf_size is the size in bytes that is allocated
 319 * for a buffer. Note, the number of bytes is always rounded
 320 * to page size.
 321 *
 322 * This number is purposely set to a low number of 16384.
 323 * If the dump on oops happens, it will be much appreciated
 324 * to not have to wait for all that output. Anyway this can be
 325 * boot time and run time configurable.
 326 */
 327#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
 328
 329static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
 330
 331/* trace_types holds a link list of available tracers. */
 332static struct tracer		*trace_types __read_mostly;
 333
 334/*
 335 * trace_types_lock is used to protect the trace_types list.
 336 */
 337DEFINE_MUTEX(trace_types_lock);
 338
 339/*
 340 * serialize the access of the ring buffer
 341 *
 342 * ring buffer serializes readers, but it is low level protection.
 343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 344 * are not protected by ring buffer.
 345 *
 346 * The content of events may become garbage if we allow other process consumes
 347 * these events concurrently:
 348 *   A) the page of the consumed events may become a normal page
 349 *      (not reader page) in ring buffer, and this page will be rewrited
 350 *      by events producer.
 351 *   B) The page of the consumed events may become a page for splice_read,
 352 *      and this page will be returned to system.
 353 *
 354 * These primitives allow multi process access to different cpu ring buffer
 355 * concurrently.
 356 *
 357 * These primitives don't distinguish read-only and read-consume access.
 358 * Multi read-only access are also serialized.
 359 */
 360
 361#ifdef CONFIG_SMP
 362static DECLARE_RWSEM(all_cpu_access_lock);
 363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
 364
 365static inline void trace_access_lock(int cpu)
 366{
 367	if (cpu == RING_BUFFER_ALL_CPUS) {
 368		/* gain it for accessing the whole ring buffer. */
 369		down_write(&all_cpu_access_lock);
 370	} else {
 371		/* gain it for accessing a cpu ring buffer. */
 372
 373		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
 374		down_read(&all_cpu_access_lock);
 375
 376		/* Secondly block other access to this @cpu ring buffer. */
 377		mutex_lock(&per_cpu(cpu_access_lock, cpu));
 378	}
 379}
 380
 381static inline void trace_access_unlock(int cpu)
 382{
 383	if (cpu == RING_BUFFER_ALL_CPUS) {
 384		up_write(&all_cpu_access_lock);
 385	} else {
 386		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
 387		up_read(&all_cpu_access_lock);
 388	}
 389}
 390
 391static inline void trace_access_lock_init(void)
 392{
 393	int cpu;
 394
 395	for_each_possible_cpu(cpu)
 396		mutex_init(&per_cpu(cpu_access_lock, cpu));
 397}
 398
 399#else
 400
 401static DEFINE_MUTEX(access_lock);
 402
 403static inline void trace_access_lock(int cpu)
 404{
 405	(void)cpu;
 406	mutex_lock(&access_lock);
 407}
 408
 409static inline void trace_access_unlock(int cpu)
 410{
 411	(void)cpu;
 412	mutex_unlock(&access_lock);
 413}
 414
 415static inline void trace_access_lock_init(void)
 416{
 417}
 418
 419#endif
 420
 421/* trace_flags holds trace_options default values */
 422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
 423	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
 424	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
 425	TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
 
 
 
 426
 427static void tracer_tracing_on(struct trace_array *tr)
 
 
 
 428{
 429	if (tr->trace_buffer.buffer)
 430		ring_buffer_record_on(tr->trace_buffer.buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431	/*
 432	 * This flag is looked at when buffers haven't been allocated
 433	 * yet, or by some tracers (like irqsoff), that just want to
 434	 * know if the ring buffer has been disabled, but it can handle
 435	 * races of where it gets disabled but we still do a record.
 436	 * As the check is in the fast path of the tracers, it is more
 437	 * important to be fast than accurate.
 438	 */
 439	tr->buffer_disabled = 0;
 440	/* Make the flag seen by readers */
 441	smp_wmb();
 442}
 443
 444/**
 445 * tracing_on - enable tracing buffers
 446 *
 447 * This function enables tracing buffers that may have been
 448 * disabled with tracing_off.
 449 */
 450void tracing_on(void)
 451{
 452	tracer_tracing_on(&global_trace);
 453}
 454EXPORT_SYMBOL_GPL(tracing_on);
 455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 456/**
 457 * __trace_puts - write a constant string into the trace buffer.
 458 * @ip:	   The address of the caller
 459 * @str:   The constant string to write
 460 * @size:  The size of the string.
 461 */
 462int __trace_puts(unsigned long ip, const char *str, int size)
 463{
 464	struct ring_buffer_event *event;
 465	struct ring_buffer *buffer;
 466	struct print_entry *entry;
 467	unsigned long irq_flags;
 468	int alloc;
 469
 
 
 
 470	if (unlikely(tracing_selftest_running || tracing_disabled))
 471		return 0;
 472
 473	alloc = sizeof(*entry) + size + 2; /* possible \n added */
 474
 475	local_save_flags(irq_flags);
 476	buffer = global_trace.trace_buffer.buffer;
 477	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
 478					  irq_flags, preempt_count());
 479	if (!event)
 480		return 0;
 
 
 
 481
 482	entry = ring_buffer_event_data(event);
 483	entry->ip = ip;
 484
 485	memcpy(&entry->buf, str, size);
 486
 487	/* Add a newline if necessary */
 488	if (entry->buf[size - 1] != '\n') {
 489		entry->buf[size] = '\n';
 490		entry->buf[size + 1] = '\0';
 491	} else
 492		entry->buf[size] = '\0';
 493
 494	__buffer_unlock_commit(buffer, event);
 495
 
 
 496	return size;
 497}
 498EXPORT_SYMBOL_GPL(__trace_puts);
 499
 500/**
 501 * __trace_bputs - write the pointer to a constant string into trace buffer
 502 * @ip:	   The address of the caller
 503 * @str:   The constant string to write to the buffer to
 504 */
 505int __trace_bputs(unsigned long ip, const char *str)
 506{
 507	struct ring_buffer_event *event;
 508	struct ring_buffer *buffer;
 509	struct bputs_entry *entry;
 510	unsigned long irq_flags;
 511	int size = sizeof(struct bputs_entry);
 
 
 
 
 512
 513	if (unlikely(tracing_selftest_running || tracing_disabled))
 514		return 0;
 515
 516	local_save_flags(irq_flags);
 517	buffer = global_trace.trace_buffer.buffer;
 518	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 519					  irq_flags, preempt_count());
 
 
 520	if (!event)
 521		return 0;
 522
 523	entry = ring_buffer_event_data(event);
 524	entry->ip			= ip;
 525	entry->str			= str;
 526
 527	__buffer_unlock_commit(buffer, event);
 
 528
 529	return 1;
 
 
 
 530}
 531EXPORT_SYMBOL_GPL(__trace_bputs);
 532
 533#ifdef CONFIG_TRACER_SNAPSHOT
 534/**
 535 * trace_snapshot - take a snapshot of the current buffer.
 536 *
 537 * This causes a swap between the snapshot buffer and the current live
 538 * tracing buffer. You can use this to take snapshots of the live
 539 * trace when some condition is triggered, but continue to trace.
 540 *
 541 * Note, make sure to allocate the snapshot with either
 542 * a tracing_snapshot_alloc(), or by doing it manually
 543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 544 *
 545 * If the snapshot buffer is not allocated, it will stop tracing.
 546 * Basically making a permanent snapshot.
 547 */
 548void tracing_snapshot(void)
 549{
 550	struct trace_array *tr = &global_trace;
 551	struct tracer *tracer = tr->current_trace;
 552	unsigned long flags;
 553
 554	if (in_nmi()) {
 555		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
 556		internal_trace_puts("*** snapshot is being ignored        ***\n");
 557		return;
 558	}
 559
 560	if (!tr->allocated_snapshot) {
 561		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
 562		internal_trace_puts("*** stopping trace here!   ***\n");
 563		tracing_off();
 564		return;
 565	}
 566
 567	/* Note, snapshot can not be used when the tracer uses it */
 568	if (tracer->use_max_tr) {
 569		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
 570		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
 571		return;
 572	}
 573
 574	local_irq_save(flags);
 575	update_max_tr(tr, current, smp_processor_id());
 576	local_irq_restore(flags);
 577}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 578EXPORT_SYMBOL_GPL(tracing_snapshot);
 579
 580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
 581					struct trace_buffer *size_buf, int cpu_id);
 582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 583
 584static int alloc_snapshot(struct trace_array *tr)
 
 
 
 
 
 
 
 
 
 
 585{
 586	int ret;
 587
 588	if (!tr->allocated_snapshot) {
 589
 590		/* allocate spare buffer */
 591		ret = resize_buffer_duplicate_size(&tr->max_buffer,
 592				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
 593		if (ret < 0)
 594			return ret;
 595
 596		tr->allocated_snapshot = true;
 597	}
 598
 599	return 0;
 600}
 601
 602void free_snapshot(struct trace_array *tr)
 603{
 604	/*
 605	 * We don't free the ring buffer. instead, resize it because
 606	 * The max_tr ring buffer has some state (e.g. ring->clock) and
 607	 * we want preserve it.
 608	 */
 609	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
 610	set_buffer_entries(&tr->max_buffer, 1);
 611	tracing_reset_online_cpus(&tr->max_buffer);
 612	tr->allocated_snapshot = false;
 613}
 614
 615/**
 616 * tracing_alloc_snapshot - allocate snapshot buffer.
 617 *
 618 * This only allocates the snapshot buffer if it isn't already
 619 * allocated - it doesn't also take a snapshot.
 620 *
 621 * This is meant to be used in cases where the snapshot buffer needs
 622 * to be set up for events that can't sleep but need to be able to
 623 * trigger a snapshot.
 624 */
 625int tracing_alloc_snapshot(void)
 626{
 627	struct trace_array *tr = &global_trace;
 628	int ret;
 629
 630	ret = alloc_snapshot(tr);
 631	WARN_ON(ret < 0);
 632
 633	return ret;
 634}
 635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 636
 637/**
 638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
 639 *
 640 * This is similar to trace_snapshot(), but it will allocate the
 641 * snapshot buffer if it isn't already allocated. Use this only
 642 * where it is safe to sleep, as the allocation may sleep.
 643 *
 644 * This causes a swap between the snapshot buffer and the current live
 645 * tracing buffer. You can use this to take snapshots of the live
 646 * trace when some condition is triggered, but continue to trace.
 647 */
 648void tracing_snapshot_alloc(void)
 649{
 650	int ret;
 651
 652	ret = tracing_alloc_snapshot();
 653	if (ret < 0)
 654		return;
 655
 656	tracing_snapshot();
 657}
 658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 659#else
 660void tracing_snapshot(void)
 661{
 662	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
 663}
 664EXPORT_SYMBOL_GPL(tracing_snapshot);
 
 
 
 
 
 665int tracing_alloc_snapshot(void)
 666{
 667	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
 668	return -ENODEV;
 669}
 670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 671void tracing_snapshot_alloc(void)
 672{
 673	/* Give warning */
 674	tracing_snapshot();
 675}
 676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 677#endif /* CONFIG_TRACER_SNAPSHOT */
 678
 679static void tracer_tracing_off(struct trace_array *tr)
 680{
 681	if (tr->trace_buffer.buffer)
 682		ring_buffer_record_off(tr->trace_buffer.buffer);
 683	/*
 684	 * This flag is looked at when buffers haven't been allocated
 685	 * yet, or by some tracers (like irqsoff), that just want to
 686	 * know if the ring buffer has been disabled, but it can handle
 687	 * races of where it gets disabled but we still do a record.
 688	 * As the check is in the fast path of the tracers, it is more
 689	 * important to be fast than accurate.
 690	 */
 691	tr->buffer_disabled = 1;
 692	/* Make the flag seen by readers */
 693	smp_wmb();
 694}
 695
 696/**
 697 * tracing_off - turn off tracing buffers
 698 *
 699 * This function stops the tracing buffers from recording data.
 700 * It does not disable any overhead the tracers themselves may
 701 * be causing. This function simply causes all recording to
 702 * the ring buffers to fail.
 703 */
 704void tracing_off(void)
 705{
 706	tracer_tracing_off(&global_trace);
 707}
 708EXPORT_SYMBOL_GPL(tracing_off);
 709
 710void disable_trace_on_warning(void)
 711{
 712	if (__disable_trace_on_warning)
 
 
 713		tracing_off();
 
 714}
 715
 716/**
 717 * tracer_tracing_is_on - show real state of ring buffer enabled
 718 * @tr : the trace array to know if ring buffer is enabled
 719 *
 720 * Shows real state of the ring buffer if it is enabled or not.
 721 */
 722static int tracer_tracing_is_on(struct trace_array *tr)
 723{
 724	if (tr->trace_buffer.buffer)
 725		return ring_buffer_record_is_on(tr->trace_buffer.buffer);
 726	return !tr->buffer_disabled;
 727}
 728
 729/**
 730 * tracing_is_on - show state of ring buffers enabled
 731 */
 732int tracing_is_on(void)
 733{
 734	return tracer_tracing_is_on(&global_trace);
 735}
 736EXPORT_SYMBOL_GPL(tracing_is_on);
 737
 738static int __init set_buf_size(char *str)
 739{
 740	unsigned long buf_size;
 741
 742	if (!str)
 743		return 0;
 744	buf_size = memparse(str, &str);
 745	/* nr_entries can not be zero */
 746	if (buf_size == 0)
 747		return 0;
 748	trace_buf_size = buf_size;
 749	return 1;
 750}
 751__setup("trace_buf_size=", set_buf_size);
 752
 753static int __init set_tracing_thresh(char *str)
 754{
 755	unsigned long threshold;
 756	int ret;
 757
 758	if (!str)
 759		return 0;
 760	ret = kstrtoul(str, 0, &threshold);
 761	if (ret < 0)
 762		return 0;
 763	tracing_thresh = threshold * 1000;
 764	return 1;
 765}
 766__setup("tracing_thresh=", set_tracing_thresh);
 767
 768unsigned long nsecs_to_usecs(unsigned long nsecs)
 769{
 770	return nsecs / 1000;
 771}
 772
 773/* These must match the bit postions in trace_iterator_flags */
 
 
 
 
 
 
 
 
 
 774static const char *trace_options[] = {
 775	"print-parent",
 776	"sym-offset",
 777	"sym-addr",
 778	"verbose",
 779	"raw",
 780	"hex",
 781	"bin",
 782	"block",
 783	"stacktrace",
 784	"trace_printk",
 785	"ftrace_preempt",
 786	"branch",
 787	"annotate",
 788	"userstacktrace",
 789	"sym-userobj",
 790	"printk-msg-only",
 791	"context-info",
 792	"latency-format",
 793	"sleep-time",
 794	"graph-time",
 795	"record-cmd",
 796	"overwrite",
 797	"disable_on_free",
 798	"irq-info",
 799	"markers",
 800	"function-trace",
 801	NULL
 802};
 803
 804static struct {
 805	u64 (*func)(void);
 806	const char *name;
 807	int in_ns;		/* is this clock in nanoseconds? */
 808} trace_clocks[] = {
 809	{ trace_clock_local,	"local",	1 },
 810	{ trace_clock_global,	"global",	1 },
 811	{ trace_clock_counter,	"counter",	0 },
 812	{ trace_clock_jiffies,	"uptime",	1 },
 813	{ trace_clock,		"perf",		1 },
 
 
 
 814	ARCH_TRACE_CLOCKS
 815};
 816
 
 
 
 
 
 
 
 
 817/*
 818 * trace_parser_get_init - gets the buffer for trace parser
 819 */
 820int trace_parser_get_init(struct trace_parser *parser, int size)
 821{
 822	memset(parser, 0, sizeof(*parser));
 823
 824	parser->buffer = kmalloc(size, GFP_KERNEL);
 825	if (!parser->buffer)
 826		return 1;
 827
 828	parser->size = size;
 829	return 0;
 830}
 831
 832/*
 833 * trace_parser_put - frees the buffer for trace parser
 834 */
 835void trace_parser_put(struct trace_parser *parser)
 836{
 837	kfree(parser->buffer);
 
 838}
 839
 840/*
 841 * trace_get_user - reads the user input string separated by  space
 842 * (matched by isspace(ch))
 843 *
 844 * For each string found the 'struct trace_parser' is updated,
 845 * and the function returns.
 846 *
 847 * Returns number of bytes read.
 848 *
 849 * See kernel/trace/trace.h for 'struct trace_parser' details.
 850 */
 851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 852	size_t cnt, loff_t *ppos)
 853{
 854	char ch;
 855	size_t read = 0;
 856	ssize_t ret;
 857
 858	if (!*ppos)
 859		trace_parser_clear(parser);
 860
 861	ret = get_user(ch, ubuf++);
 862	if (ret)
 863		goto out;
 864
 865	read++;
 866	cnt--;
 867
 868	/*
 869	 * The parser is not finished with the last write,
 870	 * continue reading the user input without skipping spaces.
 871	 */
 872	if (!parser->cont) {
 873		/* skip white space */
 874		while (cnt && isspace(ch)) {
 875			ret = get_user(ch, ubuf++);
 876			if (ret)
 877				goto out;
 878			read++;
 879			cnt--;
 880		}
 881
 
 
 882		/* only spaces were written */
 883		if (isspace(ch)) {
 884			*ppos += read;
 885			ret = read;
 886			goto out;
 887		}
 888
 889		parser->idx = 0;
 890	}
 891
 892	/* read the non-space input */
 893	while (cnt && !isspace(ch)) {
 894		if (parser->idx < parser->size - 1)
 895			parser->buffer[parser->idx++] = ch;
 896		else {
 897			ret = -EINVAL;
 898			goto out;
 899		}
 900		ret = get_user(ch, ubuf++);
 901		if (ret)
 902			goto out;
 903		read++;
 904		cnt--;
 905	}
 906
 907	/* We either got finished input or we have to wait for another call. */
 908	if (isspace(ch)) {
 909		parser->buffer[parser->idx] = 0;
 910		parser->cont = false;
 911	} else if (parser->idx < parser->size - 1) {
 912		parser->cont = true;
 913		parser->buffer[parser->idx++] = ch;
 
 
 914	} else {
 915		ret = -EINVAL;
 916		goto out;
 917	}
 918
 919	*ppos += read;
 920	ret = read;
 921
 922out:
 923	return ret;
 924}
 925
 926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
 
 927{
 928	int len;
 929	int ret;
 930
 931	if (!cnt)
 932		return 0;
 933
 934	if (s->len <= s->readpos)
 935		return -EBUSY;
 936
 937	len = s->len - s->readpos;
 938	if (cnt > len)
 939		cnt = len;
 940	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
 941	if (ret == cnt)
 942		return -EFAULT;
 943
 944	cnt -= ret;
 945
 946	s->readpos += cnt;
 947	return cnt;
 948}
 949
 950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 
 
 
 
 
 
 
 951{
 952	int len;
 
 
 
 953
 954	if (s->len <= s->readpos)
 955		return -EBUSY;
 
 
 
 
 956
 957	len = s->len - s->readpos;
 958	if (cnt > len)
 959		cnt = len;
 960	memcpy(buf, s->buffer + s->readpos, cnt);
 
 
 
 
 
 961
 962	s->readpos += cnt;
 963	return cnt;
 
 
 
 
 
 
 
 964}
 965
 966/*
 967 * ftrace_max_lock is used to protect the swapping of buffers
 968 * when taking a max snapshot. The buffers themselves are
 969 * protected by per_cpu spinlocks. But the action of the swap
 970 * needs its own lock.
 971 *
 972 * This is defined as a arch_spinlock_t in order to help
 973 * with performance when lockdep debugging is enabled.
 974 *
 975 * It is also used in other places outside the update_max_tr
 976 * so it needs to be defined outside of the
 977 * CONFIG_TRACER_MAX_TRACE.
 978 */
 979static arch_spinlock_t ftrace_max_lock =
 980	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 981
 982unsigned long __read_mostly	tracing_thresh;
 
 
 
 
 
 
 
 
 
 
 983
 984#ifdef CONFIG_TRACER_MAX_TRACE
 985unsigned long __read_mostly	tracing_max_latency;
 
 
 
 
 986
 
 
 
 
 
 987/*
 988 * Copy the new maximum trace into the separate maximum-trace
 989 * structure. (this way the maximum trace is permanently saved,
 990 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
 991 */
 992static void
 993__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 994{
 995	struct trace_buffer *trace_buf = &tr->trace_buffer;
 996	struct trace_buffer *max_buf = &tr->max_buffer;
 997	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
 998	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
 999
1000	max_buf->cpu = cpu;
1001	max_buf->time_start = data->preempt_timestamp;
1002
1003	max_data->saved_latency = tracing_max_latency;
1004	max_data->critical_start = data->critical_start;
1005	max_data->critical_end = data->critical_end;
1006
1007	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1008	max_data->pid = tsk->pid;
1009	/*
1010	 * If tsk == current, then use current_uid(), as that does not use
1011	 * RCU. The irq tracer can be called out of RCU scope.
1012	 */
1013	if (tsk == current)
1014		max_data->uid = current_uid();
1015	else
1016		max_data->uid = task_uid(tsk);
1017
1018	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1019	max_data->policy = tsk->policy;
1020	max_data->rt_priority = tsk->rt_priority;
1021
1022	/* record this tasks comm */
1023	tracing_record_cmdline(tsk);
 
1024}
1025
1026/**
1027 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1028 * @tr: tracer
1029 * @tsk: the task with the latency
1030 * @cpu: The cpu that initiated the trace.
 
1031 *
1032 * Flip the buffers between the @tr and the max_tr and record information
1033 * about which task was the cause of this latency.
1034 */
1035void
1036update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
1037{
1038	struct ring_buffer *buf;
1039
1040	if (tr->stop_count)
1041		return;
1042
1043	WARN_ON_ONCE(!irqs_disabled());
1044
1045	if (!tr->allocated_snapshot) {
1046		/* Only the nop tracer should hit this when disabling */
1047		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1048		return;
1049	}
1050
1051	arch_spin_lock(&ftrace_max_lock);
 
 
 
 
 
 
1052
1053	buf = tr->trace_buffer.buffer;
1054	tr->trace_buffer.buffer = tr->max_buffer.buffer;
1055	tr->max_buffer.buffer = buf;
 
 
1056
1057	__update_max_tr(tr, tsk, cpu);
1058	arch_spin_unlock(&ftrace_max_lock);
 
 
1059}
1060
1061/**
1062 * update_max_tr_single - only copy one trace over, and reset the rest
1063 * @tr - tracer
1064 * @tsk - task with the latency
1065 * @cpu - the cpu of the buffer to copy.
1066 *
1067 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1068 */
1069void
1070update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1071{
1072	int ret;
1073
1074	if (tr->stop_count)
1075		return;
1076
1077	WARN_ON_ONCE(!irqs_disabled());
1078	if (!tr->allocated_snapshot) {
1079		/* Only the nop tracer should hit this when disabling */
1080		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1081		return;
1082	}
1083
1084	arch_spin_lock(&ftrace_max_lock);
1085
1086	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1087
1088	if (ret == -EBUSY) {
1089		/*
1090		 * We failed to swap the buffer due to a commit taking
1091		 * place on this CPU. We fail to record, but we reset
1092		 * the max trace buffer (no one writes directly to it)
1093		 * and flag that it failed.
1094		 */
1095		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1096			"Failed to swap buffers due to commit in progress\n");
1097	}
1098
1099	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1100
1101	__update_max_tr(tr, tsk, cpu);
1102	arch_spin_unlock(&ftrace_max_lock);
1103}
1104#endif /* CONFIG_TRACER_MAX_TRACE */
1105
1106static void default_wait_pipe(struct trace_iterator *iter)
1107{
1108	/* Iterators are static, they should be filled or empty */
1109	if (trace_buffer_iter(iter, iter->cpu_file))
1110		return;
1111
1112	ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
 
1113}
1114
1115#ifdef CONFIG_FTRACE_STARTUP_TEST
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1116static int run_tracer_selftest(struct tracer *type)
1117{
1118	struct trace_array *tr = &global_trace;
1119	struct tracer *saved_tracer = tr->current_trace;
1120	int ret;
1121
1122	if (!type->selftest || tracing_selftest_disabled)
1123		return 0;
1124
1125	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1126	 * Run a selftest on this tracer.
1127	 * Here we reset the trace buffer, and set the current
1128	 * tracer to be this tracer. The tracer can then run some
1129	 * internal tracing to verify that everything is in order.
1130	 * If we fail, we do not register this tracer.
1131	 */
1132	tracing_reset_online_cpus(&tr->trace_buffer);
1133
1134	tr->current_trace = type;
1135
1136#ifdef CONFIG_TRACER_MAX_TRACE
1137	if (type->use_max_tr) {
1138		/* If we expanded the buffers, make sure the max is expanded too */
1139		if (ring_buffer_expanded)
1140			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1141					   RING_BUFFER_ALL_CPUS);
1142		tr->allocated_snapshot = true;
1143	}
1144#endif
1145
1146	/* the test is responsible for initializing and enabling */
1147	pr_info("Testing tracer %s: ", type->name);
1148	ret = type->selftest(type, tr);
1149	/* the test is responsible for resetting too */
1150	tr->current_trace = saved_tracer;
1151	if (ret) {
1152		printk(KERN_CONT "FAILED!\n");
1153		/* Add the warning after printing 'FAILED' */
1154		WARN_ON(1);
1155		return -1;
1156	}
1157	/* Only reset on passing, to avoid touching corrupted buffers */
1158	tracing_reset_online_cpus(&tr->trace_buffer);
1159
1160#ifdef CONFIG_TRACER_MAX_TRACE
1161	if (type->use_max_tr) {
1162		tr->allocated_snapshot = false;
1163
1164		/* Shrink the max buffer again */
1165		if (ring_buffer_expanded)
1166			ring_buffer_resize(tr->max_buffer.buffer, 1,
1167					   RING_BUFFER_ALL_CPUS);
1168	}
1169#endif
1170
1171	printk(KERN_CONT "PASSED\n");
1172	return 0;
1173}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174#else
1175static inline int run_tracer_selftest(struct tracer *type)
1176{
1177	return 0;
1178}
1179#endif /* CONFIG_FTRACE_STARTUP_TEST */
1180
 
 
 
 
1181/**
1182 * register_tracer - register a tracer with the ftrace system.
1183 * @type - the plugin for the tracer
1184 *
1185 * Register a new plugin tracer.
1186 */
1187int register_tracer(struct tracer *type)
1188{
1189	struct tracer *t;
1190	int ret = 0;
1191
1192	if (!type->name) {
1193		pr_info("Tracer must have a name\n");
1194		return -1;
1195	}
1196
1197	if (strlen(type->name) >= MAX_TRACER_SIZE) {
1198		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1199		return -1;
1200	}
1201
 
 
 
 
 
 
1202	mutex_lock(&trace_types_lock);
1203
1204	tracing_selftest_running = true;
1205
1206	for (t = trace_types; t; t = t->next) {
1207		if (strcmp(type->name, t->name) == 0) {
1208			/* already found */
1209			pr_info("Tracer %s already registered\n",
1210				type->name);
1211			ret = -1;
1212			goto out;
1213		}
1214	}
1215
1216	if (!type->set_flag)
1217		type->set_flag = &dummy_set_flag;
1218	if (!type->flags)
1219		type->flags = &dummy_tracer_flags;
1220	else
 
 
 
 
 
 
 
1221		if (!type->flags->opts)
1222			type->flags->opts = dummy_tracer_opt;
1223	if (!type->wait_pipe)
1224		type->wait_pipe = default_wait_pipe;
 
1225
1226	ret = run_tracer_selftest(type);
1227	if (ret < 0)
1228		goto out;
1229
1230	type->next = trace_types;
1231	trace_types = type;
 
1232
1233 out:
1234	tracing_selftest_running = false;
1235	mutex_unlock(&trace_types_lock);
1236
1237	if (ret || !default_bootup_tracer)
1238		goto out_unlock;
1239
1240	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1241		goto out_unlock;
1242
1243	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1244	/* Do we want this tracer to start on bootup? */
1245	tracing_set_tracer(&global_trace, type->name);
1246	default_bootup_tracer = NULL;
 
 
 
1247	/* disable other selftests, since this will break it. */
1248	tracing_selftest_disabled = true;
1249#ifdef CONFIG_FTRACE_STARTUP_TEST
1250	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1251	       type->name);
1252#endif
1253
1254 out_unlock:
1255	return ret;
1256}
1257
1258void tracing_reset(struct trace_buffer *buf, int cpu)
1259{
1260	struct ring_buffer *buffer = buf->buffer;
1261
1262	if (!buffer)
1263		return;
1264
1265	ring_buffer_record_disable(buffer);
1266
1267	/* Make sure all commits have finished */
1268	synchronize_sched();
1269	ring_buffer_reset_cpu(buffer, cpu);
1270
1271	ring_buffer_record_enable(buffer);
1272}
1273
1274void tracing_reset_online_cpus(struct trace_buffer *buf)
1275{
1276	struct ring_buffer *buffer = buf->buffer;
1277	int cpu;
1278
1279	if (!buffer)
1280		return;
1281
1282	ring_buffer_record_disable(buffer);
1283
1284	/* Make sure all commits have finished */
1285	synchronize_sched();
1286
1287	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1288
1289	for_each_online_cpu(cpu)
1290		ring_buffer_reset_cpu(buffer, cpu);
1291
1292	ring_buffer_record_enable(buffer);
1293}
1294
1295/* Must have trace_types_lock held */
1296void tracing_reset_all_online_cpus(void)
1297{
1298	struct trace_array *tr;
1299
1300	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1301		tracing_reset_online_cpus(&tr->trace_buffer);
 
 
 
1302#ifdef CONFIG_TRACER_MAX_TRACE
1303		tracing_reset_online_cpus(&tr->max_buffer);
1304#endif
1305	}
1306}
1307
1308#define SAVED_CMDLINES 128
 
 
 
 
 
 
 
 
 
1309#define NO_CMDLINE_MAP UINT_MAX
1310static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1311static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1312static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1313static int cmdline_idx;
1314static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 
 
 
 
 
 
 
 
1315
1316/* temporary disable recording */
1317static atomic_t trace_record_cmdline_disabled __read_mostly;
 
 
1318
1319static void trace_init_cmdlines(void)
1320{
1321	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1322	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1323	cmdline_idx = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324}
1325
1326int is_tracing_stopped(void)
1327{
1328	return global_trace.stop_count;
1329}
1330
1331/**
1332 * tracing_start - quick start of the tracer
1333 *
1334 * If tracing is enabled but was stopped by tracing_stop,
1335 * this will start the tracer back up.
1336 */
1337void tracing_start(void)
1338{
1339	struct ring_buffer *buffer;
1340	unsigned long flags;
1341
1342	if (tracing_disabled)
1343		return;
1344
1345	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1346	if (--global_trace.stop_count) {
1347		if (global_trace.stop_count < 0) {
1348			/* Someone screwed up their debugging */
1349			WARN_ON_ONCE(1);
1350			global_trace.stop_count = 0;
1351		}
1352		goto out;
1353	}
1354
1355	/* Prevent the buffers from switching */
1356	arch_spin_lock(&ftrace_max_lock);
1357
1358	buffer = global_trace.trace_buffer.buffer;
1359	if (buffer)
1360		ring_buffer_record_enable(buffer);
1361
1362#ifdef CONFIG_TRACER_MAX_TRACE
1363	buffer = global_trace.max_buffer.buffer;
1364	if (buffer)
1365		ring_buffer_record_enable(buffer);
1366#endif
1367
1368	arch_spin_unlock(&ftrace_max_lock);
1369
1370	ftrace_start();
1371 out:
1372	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1373}
1374
1375static void tracing_start_tr(struct trace_array *tr)
1376{
1377	struct ring_buffer *buffer;
1378	unsigned long flags;
1379
1380	if (tracing_disabled)
1381		return;
1382
1383	/* If global, we need to also start the max tracer */
1384	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1385		return tracing_start();
1386
1387	raw_spin_lock_irqsave(&tr->start_lock, flags);
1388
1389	if (--tr->stop_count) {
1390		if (tr->stop_count < 0) {
1391			/* Someone screwed up their debugging */
1392			WARN_ON_ONCE(1);
1393			tr->stop_count = 0;
1394		}
1395		goto out;
1396	}
1397
1398	buffer = tr->trace_buffer.buffer;
1399	if (buffer)
1400		ring_buffer_record_enable(buffer);
1401
1402 out:
1403	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1404}
1405
1406/**
1407 * tracing_stop - quick stop of the tracer
1408 *
1409 * Light weight way to stop tracing. Use in conjunction with
1410 * tracing_start.
1411 */
1412void tracing_stop(void)
1413{
1414	struct ring_buffer *buffer;
1415	unsigned long flags;
1416
1417	ftrace_stop();
1418	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1419	if (global_trace.stop_count++)
1420		goto out;
1421
1422	/* Prevent the buffers from switching */
1423	arch_spin_lock(&ftrace_max_lock);
1424
1425	buffer = global_trace.trace_buffer.buffer;
1426	if (buffer)
1427		ring_buffer_record_disable(buffer);
1428
1429#ifdef CONFIG_TRACER_MAX_TRACE
1430	buffer = global_trace.max_buffer.buffer;
1431	if (buffer)
1432		ring_buffer_record_disable(buffer);
1433#endif
1434
1435	arch_spin_unlock(&ftrace_max_lock);
1436
1437 out:
1438	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1439}
1440
1441static void tracing_stop_tr(struct trace_array *tr)
1442{
1443	struct ring_buffer *buffer;
1444	unsigned long flags;
1445
1446	/* If global, we need to also stop the max tracer */
1447	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1448		return tracing_stop();
1449
1450	raw_spin_lock_irqsave(&tr->start_lock, flags);
1451	if (tr->stop_count++)
1452		goto out;
1453
1454	buffer = tr->trace_buffer.buffer;
1455	if (buffer)
1456		ring_buffer_record_disable(buffer);
1457
1458 out:
1459	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1460}
1461
1462void trace_stop_cmdline_recording(void);
1463
1464static void trace_save_cmdline(struct task_struct *tsk)
1465{
1466	unsigned pid, idx;
1467
1468	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1469		return;
 
 
 
1470
1471	/*
1472	 * It's not the end of the world if we don't get
1473	 * the lock, but we also don't want to spin
1474	 * nor do we want to disable interrupts,
1475	 * so if we miss here, then better luck next time.
1476	 */
1477	if (!arch_spin_trylock(&trace_cmdline_lock))
1478		return;
1479
1480	idx = map_pid_to_cmdline[tsk->pid];
1481	if (idx == NO_CMDLINE_MAP) {
1482		idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1483
1484		/*
1485		 * Check whether the cmdline buffer at idx has a pid
1486		 * mapped. We are going to overwrite that entry so we
1487		 * need to clear the map_pid_to_cmdline. Otherwise we
1488		 * would read the new comm for the old pid.
1489		 */
1490		pid = map_cmdline_to_pid[idx];
1491		if (pid != NO_CMDLINE_MAP)
1492			map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1493
1494		map_cmdline_to_pid[idx] = tsk->pid;
1495		map_pid_to_cmdline[tsk->pid] = idx;
1496
1497		cmdline_idx = idx;
 
1498	}
1499
1500	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
 
1501
1502	arch_spin_unlock(&trace_cmdline_lock);
 
 
1503}
1504
1505void trace_find_cmdline(int pid, char comm[])
1506{
1507	unsigned map;
 
1508
1509	if (!pid) {
1510		strcpy(comm, "<idle>");
1511		return;
1512	}
1513
1514	if (WARN_ON_ONCE(pid < 0)) {
1515		strcpy(comm, "<XXX>");
1516		return;
1517	}
1518
1519	if (pid > PID_MAX_DEFAULT) {
1520		strcpy(comm, "<...>");
1521		return;
 
 
 
 
 
1522	}
 
 
1523
 
 
1524	preempt_disable();
1525	arch_spin_lock(&trace_cmdline_lock);
1526	map = map_pid_to_cmdline[pid];
1527	if (map != NO_CMDLINE_MAP)
1528		strcpy(comm, saved_cmdlines[map]);
1529	else
1530		strcpy(comm, "<...>");
1531
1532	arch_spin_unlock(&trace_cmdline_lock);
1533	preempt_enable();
1534}
1535
1536void tracing_record_cmdline(struct task_struct *tsk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1537{
1538	if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
 
 
1539		return;
1540
1541	if (!__this_cpu_read(trace_cmdline_save))
 
 
 
 
 
 
 
 
 
 
1542		return;
1543
1544	__this_cpu_write(trace_cmdline_save, false);
 
1545
1546	trace_save_cmdline(tsk);
 
 
 
1547}
1548
1549void
1550tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1551			     int pc)
1552{
1553	struct task_struct *tsk = current;
 
1554
1555	entry->preempt_count		= pc & 0xff;
1556	entry->pid			= (tsk) ? tsk->pid : 0;
1557	entry->flags =
1558#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1559		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1560#else
1561		TRACE_FLAG_IRQS_NOSUPPORT |
1562#endif
1563		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1564		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1565		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1566		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1567}
1568EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1569
1570struct ring_buffer_event *
1571trace_buffer_lock_reserve(struct ring_buffer *buffer,
1572			  int type,
1573			  unsigned long len,
1574			  unsigned long flags, int pc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1575{
1576	struct ring_buffer_event *event;
 
 
1577
1578	event = ring_buffer_lock_reserve(buffer, len);
1579	if (event != NULL) {
1580		struct trace_entry *ent = ring_buffer_event_data(event);
1581
1582		tracing_generic_entry_update(ent, flags, pc);
1583		ent->type = type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1584	}
1585
1586	return event;
 
 
1587}
1588
1589void
1590__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1591{
1592	__this_cpu_write(trace_cmdline_save, true);
1593	ring_buffer_unlock_commit(buffer, event);
 
1594}
1595
1596static inline void
1597__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1598			     struct ring_buffer_event *event,
1599			     unsigned long flags, int pc)
1600{
1601	__buffer_unlock_commit(buffer, event);
1602
1603	ftrace_trace_stack(buffer, flags, 6, pc);
1604	ftrace_trace_userstack(buffer, flags, pc);
1605}
1606
1607void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1608				struct ring_buffer_event *event,
1609				unsigned long flags, int pc)
 
 
 
 
 
 
1610{
1611	__trace_buffer_unlock_commit(buffer, event, flags, pc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1612}
1613EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1614
1615static struct ring_buffer *temp_buffer;
1616
1617struct ring_buffer_event *
1618trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1619			  struct ftrace_event_file *ftrace_file,
1620			  int type, unsigned long len,
1621			  unsigned long flags, int pc)
1622{
1623	struct ring_buffer_event *entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1624
1625	*current_rb = ftrace_file->tr->trace_buffer.buffer;
1626	entry = trace_buffer_lock_reserve(*current_rb,
1627					 type, len, flags, pc);
1628	/*
1629	 * If tracing is off, but we have triggers enabled
1630	 * we still need to look at the event data. Use the temp_buffer
1631	 * to store the trace event for the tigger to use. It's recusive
1632	 * safe and will not be recorded anywhere.
1633	 */
1634	if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1635		*current_rb = temp_buffer;
1636		entry = trace_buffer_lock_reserve(*current_rb,
1637						  type, len, flags, pc);
1638	}
1639	return entry;
1640}
1641EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1642
1643struct ring_buffer_event *
1644trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1645				  int type, unsigned long len,
1646				  unsigned long flags, int pc)
1647{
1648	*current_rb = global_trace.trace_buffer.buffer;
1649	return trace_buffer_lock_reserve(*current_rb,
1650					 type, len, flags, pc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1651}
1652EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1653
1654void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1655					struct ring_buffer_event *event,
1656					unsigned long flags, int pc)
1657{
1658	__trace_buffer_unlock_commit(buffer, event, flags, pc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1659}
1660EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1661
1662void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
 
 
 
 
 
 
 
 
 
 
1663				     struct ring_buffer_event *event,
1664				     unsigned long flags, int pc,
1665				     struct pt_regs *regs)
1666{
1667	__buffer_unlock_commit(buffer, event);
1668
1669	ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1670	ftrace_trace_userstack(buffer, flags, pc);
 
 
 
 
 
 
1671}
1672EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1673
1674void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1675					 struct ring_buffer_event *event)
 
 
 
 
1676{
1677	ring_buffer_discard_commit(buffer, event);
1678}
1679EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1680
1681void
1682trace_function(struct trace_array *tr,
1683	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
1684	       int pc)
1685{
1686	struct ftrace_event_call *call = &event_function;
1687	struct ring_buffer *buffer = tr->trace_buffer.buffer;
1688	struct ring_buffer_event *event;
1689	struct ftrace_entry *entry;
1690
1691	/* If we are reading the ring buffer, don't trace */
1692	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1693		return;
1694
1695	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1696					  flags, pc);
1697	if (!event)
1698		return;
1699	entry	= ring_buffer_event_data(event);
1700	entry->ip			= ip;
1701	entry->parent_ip		= parent_ip;
1702
1703	if (!call_filter_check_discard(call, entry, buffer, event))
 
 
1704		__buffer_unlock_commit(buffer, event);
 
1705}
1706
1707#ifdef CONFIG_STACKTRACE
1708
1709#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
 
 
 
 
1710struct ftrace_stack {
1711	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];
 
 
 
 
 
1712};
1713
1714static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1715static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1716
1717static void __ftrace_trace_stack(struct ring_buffer *buffer,
1718				 unsigned long flags,
1719				 int skip, int pc, struct pt_regs *regs)
1720{
1721	struct ftrace_event_call *call = &event_kernel_stack;
1722	struct ring_buffer_event *event;
 
 
1723	struct stack_entry *entry;
1724	struct stack_trace trace;
1725	int use_stack;
1726	int size = FTRACE_STACK_ENTRIES;
1727
1728	trace.nr_entries	= 0;
1729	trace.skip		= skip;
1730
1731	/*
1732	 * Since events can happen in NMIs there's no safe way to
1733	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1734	 * or NMI comes in, it will just have to use the default
1735	 * FTRACE_STACK_SIZE.
1736	 */
 
 
 
 
 
1737	preempt_disable_notrace();
1738
1739	use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
 
 
 
 
 
1740	/*
1741	 * We don't need any atomic variables, just a barrier.
1742	 * If an interrupt comes in, we don't care, because it would
1743	 * have exited and put the counter back to what we want.
1744	 * We just need a barrier to keep gcc from moving things
1745	 * around.
1746	 */
1747	barrier();
1748	if (use_stack == 1) {
1749		trace.entries		= &__get_cpu_var(ftrace_stack).calls[0];
1750		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;
1751
1752		if (regs)
1753			save_stack_trace_regs(regs, &trace);
1754		else
1755			save_stack_trace(&trace);
1756
1757		if (trace.nr_entries > size)
1758			size = trace.nr_entries;
1759	} else
1760		/* From now on, use_stack is a boolean */
1761		use_stack = 0;
1762
1763	size *= sizeof(unsigned long);
 
 
 
 
 
1764
1765	event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1766					  sizeof(*entry) + size, flags, pc);
 
 
1767	if (!event)
1768		goto out;
1769	entry = ring_buffer_event_data(event);
1770
1771	memset(&entry->caller, 0, size);
1772
1773	if (use_stack)
1774		memcpy(&entry->caller, trace.entries,
1775		       trace.nr_entries * sizeof(unsigned long));
1776	else {
1777		trace.max_entries	= FTRACE_STACK_ENTRIES;
1778		trace.entries		= entry->caller;
1779		if (regs)
1780			save_stack_trace_regs(regs, &trace);
1781		else
1782			save_stack_trace(&trace);
1783	}
1784
1785	entry->size = trace.nr_entries;
1786
1787	if (!call_filter_check_discard(call, entry, buffer, event))
1788		__buffer_unlock_commit(buffer, event);
1789
1790 out:
1791	/* Again, don't let gcc optimize things here */
1792	barrier();
1793	__this_cpu_dec(ftrace_stack_reserve);
1794	preempt_enable_notrace();
1795
1796}
1797
1798void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1799			     int skip, int pc, struct pt_regs *regs)
 
 
1800{
1801	if (!(trace_flags & TRACE_ITER_STACKTRACE))
1802		return;
1803
1804	__ftrace_trace_stack(buffer, flags, skip, pc, regs);
1805}
1806
1807void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1808			int skip, int pc)
1809{
1810	if (!(trace_flags & TRACE_ITER_STACKTRACE))
 
 
 
1811		return;
 
1812
1813	__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1814}
 
 
 
 
 
 
1815
1816void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1817		   int pc)
1818{
1819	__ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1820}
1821
1822/**
1823 * trace_dump_stack - record a stack back trace in the trace buffer
1824 * @skip: Number of functions to skip (helper handlers)
1825 */
1826void trace_dump_stack(int skip)
1827{
1828	unsigned long flags;
1829
1830	if (tracing_disabled || tracing_selftest_running)
1831		return;
1832
1833	local_save_flags(flags);
1834
1835	/*
1836	 * Skip 3 more, seems to get us at the caller of
1837	 * this function.
1838	 */
1839	skip += 3;
1840	__ftrace_trace_stack(global_trace.trace_buffer.buffer,
1841			     flags, skip, preempt_count(), NULL);
1842}
 
1843
 
1844static DEFINE_PER_CPU(int, user_stack_count);
1845
1846void
1847ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
 
1848{
1849	struct ftrace_event_call *call = &event_user_stack;
1850	struct ring_buffer_event *event;
1851	struct userstack_entry *entry;
1852	struct stack_trace trace;
1853
1854	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1855		return;
1856
1857	/*
1858	 * NMIs can not handle page faults, even with fix ups.
1859	 * The save user stack can (and often does) fault.
1860	 */
1861	if (unlikely(in_nmi()))
1862		return;
1863
1864	/*
1865	 * prevent recursion, since the user stack tracing may
1866	 * trigger other kernel events.
1867	 */
1868	preempt_disable();
1869	if (__this_cpu_read(user_stack_count))
1870		goto out;
1871
1872	__this_cpu_inc(user_stack_count);
1873
1874	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1875					  sizeof(*entry), flags, pc);
1876	if (!event)
1877		goto out_drop_count;
1878	entry	= ring_buffer_event_data(event);
1879
1880	entry->tgid		= current->tgid;
1881	memset(&entry->caller, 0, sizeof(entry->caller));
1882
1883	trace.nr_entries	= 0;
1884	trace.max_entries	= FTRACE_STACK_ENTRIES;
1885	trace.skip		= 0;
1886	trace.entries		= entry->caller;
1887
1888	save_stack_trace_user(&trace);
1889	if (!call_filter_check_discard(call, entry, buffer, event))
1890		__buffer_unlock_commit(buffer, event);
1891
1892 out_drop_count:
1893	__this_cpu_dec(user_stack_count);
1894 out:
1895	preempt_enable();
1896}
1897
1898#ifdef UNUSED
1899static void __trace_userstack(struct trace_array *tr, unsigned long flags)
 
1900{
1901	ftrace_trace_userstack(tr, flags, preempt_count());
1902}
1903#endif /* UNUSED */
1904
1905#endif /* CONFIG_STACKTRACE */
1906
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1907/* created for use with alloc_percpu */
1908struct trace_buffer_struct {
1909	char buffer[TRACE_BUF_SIZE];
 
1910};
1911
1912static struct trace_buffer_struct *trace_percpu_buffer;
1913static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1914static struct trace_buffer_struct *trace_percpu_irq_buffer;
1915static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1916
1917/*
1918 * The buffer used is dependent on the context. There is a per cpu
1919 * buffer for normal context, softirq contex, hard irq context and
1920 * for NMI context. Thise allows for lockless recording.
1921 *
1922 * Note, if the buffers failed to be allocated, then this returns NULL
1923 */
1924static char *get_trace_buf(void)
1925{
1926	struct trace_buffer_struct *percpu_buffer;
1927
1928	/*
1929	 * If we have allocated per cpu buffers, then we do not
1930	 * need to do any locking.
1931	 */
1932	if (in_nmi())
1933		percpu_buffer = trace_percpu_nmi_buffer;
1934	else if (in_irq())
1935		percpu_buffer = trace_percpu_irq_buffer;
1936	else if (in_softirq())
1937		percpu_buffer = trace_percpu_sirq_buffer;
1938	else
1939		percpu_buffer = trace_percpu_buffer;
1940
1941	if (!percpu_buffer)
1942		return NULL;
1943
1944	return this_cpu_ptr(&percpu_buffer->buffer[0]);
 
 
 
 
 
 
 
 
 
 
 
1945}
1946
1947static int alloc_percpu_trace_buffer(void)
1948{
1949	struct trace_buffer_struct *buffers;
1950	struct trace_buffer_struct *sirq_buffers;
1951	struct trace_buffer_struct *irq_buffers;
1952	struct trace_buffer_struct *nmi_buffers;
1953
1954	buffers = alloc_percpu(struct trace_buffer_struct);
1955	if (!buffers)
1956		goto err_warn;
1957
1958	sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1959	if (!sirq_buffers)
1960		goto err_sirq;
1961
1962	irq_buffers = alloc_percpu(struct trace_buffer_struct);
1963	if (!irq_buffers)
1964		goto err_irq;
1965
1966	nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1967	if (!nmi_buffers)
1968		goto err_nmi;
1969
1970	trace_percpu_buffer = buffers;
1971	trace_percpu_sirq_buffer = sirq_buffers;
1972	trace_percpu_irq_buffer = irq_buffers;
1973	trace_percpu_nmi_buffer = nmi_buffers;
1974
1975	return 0;
1976
1977 err_nmi:
1978	free_percpu(irq_buffers);
1979 err_irq:
1980	free_percpu(sirq_buffers);
1981 err_sirq:
1982	free_percpu(buffers);
1983 err_warn:
1984	WARN(1, "Could not allocate percpu trace_printk buffer");
1985	return -ENOMEM;
1986}
1987
1988static int buffers_allocated;
1989
1990void trace_printk_init_buffers(void)
1991{
1992	if (buffers_allocated)
1993		return;
1994
1995	if (alloc_percpu_trace_buffer())
1996		return;
1997
1998	pr_info("ftrace: Allocated trace_printk buffers\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1999
2000	/* Expand the buffers to set size */
2001	tracing_update_buffers();
2002
2003	buffers_allocated = 1;
2004
2005	/*
2006	 * trace_printk_init_buffers() can be called by modules.
2007	 * If that happens, then we need to start cmdline recording
2008	 * directly here. If the global_trace.buffer is already
2009	 * allocated here, then this was called by module code.
2010	 */
2011	if (global_trace.trace_buffer.buffer)
2012		tracing_start_cmdline_record();
2013}
 
2014
2015void trace_printk_start_comm(void)
2016{
2017	/* Start tracing comms if trace printk is set */
2018	if (!buffers_allocated)
2019		return;
2020	tracing_start_cmdline_record();
2021}
2022
2023static void trace_printk_start_stop_comm(int enabled)
2024{
2025	if (!buffers_allocated)
2026		return;
2027
2028	if (enabled)
2029		tracing_start_cmdline_record();
2030	else
2031		tracing_stop_cmdline_record();
2032}
2033
2034/**
2035 * trace_vbprintk - write binary msg to tracing buffer
2036 *
 
 
2037 */
2038int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2039{
2040	struct ftrace_event_call *call = &event_bprint;
2041	struct ring_buffer_event *event;
2042	struct ring_buffer *buffer;
2043	struct trace_array *tr = &global_trace;
2044	struct bprint_entry *entry;
2045	unsigned long flags;
2046	char *tbuffer;
2047	int len = 0, size, pc;
2048
2049	if (unlikely(tracing_selftest_running || tracing_disabled))
2050		return 0;
2051
2052	/* Don't pollute graph traces with trace_vprintk internals */
2053	pause_graph_tracing();
2054
2055	pc = preempt_count();
2056	preempt_disable_notrace();
2057
2058	tbuffer = get_trace_buf();
2059	if (!tbuffer) {
2060		len = 0;
2061		goto out;
2062	}
2063
2064	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2065
2066	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2067		goto out;
2068
2069	local_save_flags(flags);
2070	size = sizeof(*entry) + sizeof(u32) * len;
2071	buffer = tr->trace_buffer.buffer;
2072	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2073					  flags, pc);
 
2074	if (!event)
2075		goto out;
2076	entry = ring_buffer_event_data(event);
2077	entry->ip			= ip;
2078	entry->fmt			= fmt;
2079
2080	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2081	if (!call_filter_check_discard(call, entry, buffer, event)) {
2082		__buffer_unlock_commit(buffer, event);
2083		ftrace_trace_stack(buffer, flags, 6, pc);
2084	}
2085
2086out:
 
 
 
 
 
2087	preempt_enable_notrace();
2088	unpause_graph_tracing();
2089
2090	return len;
2091}
2092EXPORT_SYMBOL_GPL(trace_vbprintk);
2093
 
2094static int
2095__trace_array_vprintk(struct ring_buffer *buffer,
2096		      unsigned long ip, const char *fmt, va_list args)
2097{
2098	struct ftrace_event_call *call = &event_print;
2099	struct ring_buffer_event *event;
2100	int len = 0, size, pc;
2101	struct print_entry *entry;
2102	unsigned long flags;
2103	char *tbuffer;
2104
2105	if (tracing_disabled || tracing_selftest_running)
2106		return 0;
2107
2108	/* Don't pollute graph traces with trace_vprintk internals */
2109	pause_graph_tracing();
2110
2111	pc = preempt_count();
2112	preempt_disable_notrace();
2113
2114
2115	tbuffer = get_trace_buf();
2116	if (!tbuffer) {
2117		len = 0;
2118		goto out;
2119	}
2120
2121	len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2122	if (len > TRACE_BUF_SIZE)
2123		goto out;
2124
2125	local_save_flags(flags);
2126	size = sizeof(*entry) + len + 1;
2127	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2128					  flags, pc);
 
2129	if (!event)
2130		goto out;
2131	entry = ring_buffer_event_data(event);
2132	entry->ip = ip;
2133
2134	memcpy(&entry->buf, tbuffer, len);
2135	entry->buf[len] = '\0';
2136	if (!call_filter_check_discard(call, entry, buffer, event)) {
2137		__buffer_unlock_commit(buffer, event);
2138		ftrace_trace_stack(buffer, flags, 6, pc);
2139	}
2140 out:
 
 
 
 
 
2141	preempt_enable_notrace();
2142	unpause_graph_tracing();
2143
2144	return len;
2145}
2146
 
2147int trace_array_vprintk(struct trace_array *tr,
2148			unsigned long ip, const char *fmt, va_list args)
2149{
2150	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2151}
2152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2153int trace_array_printk(struct trace_array *tr,
2154		       unsigned long ip, const char *fmt, ...)
2155{
2156	int ret;
2157	va_list ap;
2158
2159	if (!(trace_flags & TRACE_ITER_PRINTK))
 
 
 
 
 
 
 
2160		return 0;
2161
2162	va_start(ap, fmt);
2163	ret = trace_array_vprintk(tr, ip, fmt, ap);
2164	va_end(ap);
2165	return ret;
2166}
 
2167
2168int trace_array_printk_buf(struct ring_buffer *buffer,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2169			   unsigned long ip, const char *fmt, ...)
2170{
2171	int ret;
2172	va_list ap;
2173
2174	if (!(trace_flags & TRACE_ITER_PRINTK))
2175		return 0;
2176
2177	va_start(ap, fmt);
2178	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2179	va_end(ap);
2180	return ret;
2181}
2182
 
2183int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2184{
2185	return trace_array_vprintk(&global_trace, ip, fmt, args);
2186}
2187EXPORT_SYMBOL_GPL(trace_vprintk);
2188
2189static void trace_iterator_increment(struct trace_iterator *iter)
2190{
2191	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2192
2193	iter->idx++;
2194	if (buf_iter)
2195		ring_buffer_read(buf_iter, NULL);
2196}
2197
2198static struct trace_entry *
2199peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2200		unsigned long *lost_events)
2201{
2202	struct ring_buffer_event *event;
2203	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2204
2205	if (buf_iter)
2206		event = ring_buffer_iter_peek(buf_iter, ts);
2207	else
2208		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
 
 
 
2209					 lost_events);
 
2210
2211	if (event) {
2212		iter->ent_size = ring_buffer_event_length(event);
2213		return ring_buffer_event_data(event);
2214	}
2215	iter->ent_size = 0;
2216	return NULL;
2217}
2218
2219static struct trace_entry *
2220__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2221		  unsigned long *missing_events, u64 *ent_ts)
2222{
2223	struct ring_buffer *buffer = iter->trace_buffer->buffer;
2224	struct trace_entry *ent, *next = NULL;
2225	unsigned long lost_events = 0, next_lost = 0;
2226	int cpu_file = iter->cpu_file;
2227	u64 next_ts = 0, ts;
2228	int next_cpu = -1;
2229	int next_size = 0;
2230	int cpu;
2231
2232	/*
2233	 * If we are in a per_cpu trace file, don't bother by iterating over
2234	 * all cpu and peek directly.
2235	 */
2236	if (cpu_file > RING_BUFFER_ALL_CPUS) {
2237		if (ring_buffer_empty_cpu(buffer, cpu_file))
2238			return NULL;
2239		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2240		if (ent_cpu)
2241			*ent_cpu = cpu_file;
2242
2243		return ent;
2244	}
2245
2246	for_each_tracing_cpu(cpu) {
2247
2248		if (ring_buffer_empty_cpu(buffer, cpu))
2249			continue;
2250
2251		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2252
2253		/*
2254		 * Pick the entry with the smallest timestamp:
2255		 */
2256		if (ent && (!next || ts < next_ts)) {
2257			next = ent;
2258			next_cpu = cpu;
2259			next_ts = ts;
2260			next_lost = lost_events;
2261			next_size = iter->ent_size;
2262		}
2263	}
2264
2265	iter->ent_size = next_size;
2266
2267	if (ent_cpu)
2268		*ent_cpu = next_cpu;
2269
2270	if (ent_ts)
2271		*ent_ts = next_ts;
2272
2273	if (missing_events)
2274		*missing_events = next_lost;
2275
2276	return next;
2277}
2278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2279/* Find the next real entry, without updating the iterator itself */
2280struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2281					  int *ent_cpu, u64 *ent_ts)
2282{
2283	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2284}
2285
2286/* Find the next real entry, and increment the iterator to the next entry */
2287void *trace_find_next_entry_inc(struct trace_iterator *iter)
2288{
2289	iter->ent = __find_next_entry(iter, &iter->cpu,
2290				      &iter->lost_events, &iter->ts);
2291
2292	if (iter->ent)
2293		trace_iterator_increment(iter);
2294
2295	return iter->ent ? iter : NULL;
2296}
2297
2298static void trace_consume(struct trace_iterator *iter)
2299{
2300	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2301			    &iter->lost_events);
2302}
2303
2304static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2305{
2306	struct trace_iterator *iter = m->private;
2307	int i = (int)*pos;
2308	void *ent;
2309
2310	WARN_ON_ONCE(iter->leftover);
2311
2312	(*pos)++;
2313
2314	/* can't go backwards */
2315	if (iter->idx > i)
2316		return NULL;
2317
2318	if (iter->idx < 0)
2319		ent = trace_find_next_entry_inc(iter);
2320	else
2321		ent = iter;
2322
2323	while (ent && iter->idx < i)
2324		ent = trace_find_next_entry_inc(iter);
2325
2326	iter->pos = *pos;
2327
2328	return ent;
2329}
2330
2331void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2332{
2333	struct ring_buffer_event *event;
2334	struct ring_buffer_iter *buf_iter;
2335	unsigned long entries = 0;
2336	u64 ts;
2337
2338	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2339
2340	buf_iter = trace_buffer_iter(iter, cpu);
2341	if (!buf_iter)
2342		return;
2343
2344	ring_buffer_iter_reset(buf_iter);
2345
2346	/*
2347	 * We could have the case with the max latency tracers
2348	 * that a reset never took place on a cpu. This is evident
2349	 * by the timestamp being before the start of the buffer.
2350	 */
2351	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2352		if (ts >= iter->trace_buffer->time_start)
2353			break;
2354		entries++;
2355		ring_buffer_read(buf_iter, NULL);
2356	}
2357
2358	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2359}
2360
2361/*
2362 * The current tracer is copied to avoid a global locking
2363 * all around.
2364 */
2365static void *s_start(struct seq_file *m, loff_t *pos)
2366{
2367	struct trace_iterator *iter = m->private;
2368	struct trace_array *tr = iter->tr;
2369	int cpu_file = iter->cpu_file;
2370	void *p = NULL;
2371	loff_t l = 0;
2372	int cpu;
2373
2374	/*
2375	 * copy the tracer to avoid using a global lock all around.
2376	 * iter->trace is a copy of current_trace, the pointer to the
2377	 * name may be used instead of a strcmp(), as iter->trace->name
2378	 * will point to the same string as current_trace->name.
2379	 */
2380	mutex_lock(&trace_types_lock);
2381	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2382		*iter->trace = *tr->current_trace;
2383	mutex_unlock(&trace_types_lock);
2384
2385#ifdef CONFIG_TRACER_MAX_TRACE
2386	if (iter->snapshot && iter->trace->use_max_tr)
2387		return ERR_PTR(-EBUSY);
2388#endif
2389
2390	if (!iter->snapshot)
2391		atomic_inc(&trace_record_cmdline_disabled);
2392
2393	if (*pos != iter->pos) {
2394		iter->ent = NULL;
2395		iter->cpu = 0;
2396		iter->idx = -1;
2397
2398		if (cpu_file == RING_BUFFER_ALL_CPUS) {
2399			for_each_tracing_cpu(cpu)
2400				tracing_iter_reset(iter, cpu);
2401		} else
2402			tracing_iter_reset(iter, cpu_file);
2403
2404		iter->leftover = 0;
2405		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2406			;
2407
2408	} else {
2409		/*
2410		 * If we overflowed the seq_file before, then we want
2411		 * to just reuse the trace_seq buffer again.
2412		 */
2413		if (iter->leftover)
2414			p = iter;
2415		else {
2416			l = *pos - 1;
2417			p = s_next(m, p, &l);
2418		}
2419	}
2420
2421	trace_event_read_lock();
2422	trace_access_lock(cpu_file);
2423	return p;
2424}
2425
2426static void s_stop(struct seq_file *m, void *p)
2427{
2428	struct trace_iterator *iter = m->private;
2429
2430#ifdef CONFIG_TRACER_MAX_TRACE
2431	if (iter->snapshot && iter->trace->use_max_tr)
2432		return;
2433#endif
2434
2435	if (!iter->snapshot)
2436		atomic_dec(&trace_record_cmdline_disabled);
2437
2438	trace_access_unlock(iter->cpu_file);
2439	trace_event_read_unlock();
2440}
2441
2442static void
2443get_total_entries(struct trace_buffer *buf,
2444		  unsigned long *total, unsigned long *entries)
2445{
2446	unsigned long count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2447	int cpu;
2448
2449	*total = 0;
2450	*entries = 0;
2451
2452	for_each_tracing_cpu(cpu) {
2453		count = ring_buffer_entries_cpu(buf->buffer, cpu);
2454		/*
2455		 * If this buffer has skipped entries, then we hold all
2456		 * entries for the trace and we need to ignore the
2457		 * ones before the time stamp.
2458		 */
2459		if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2460			count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2461			/* total is the same as the entries */
2462			*total += count;
2463		} else
2464			*total += count +
2465				ring_buffer_overrun_cpu(buf->buffer, cpu);
2466		*entries += count;
2467	}
2468}
2469
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2470static void print_lat_help_header(struct seq_file *m)
2471{
2472	seq_puts(m, "#                  _------=> CPU#            \n");
2473	seq_puts(m, "#                 / _-----=> irqs-off        \n");
2474	seq_puts(m, "#                | / _----=> need-resched    \n");
2475	seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2476	seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2477	seq_puts(m, "#                |||| /     delay             \n");
2478	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2479	seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2480}
2481
2482static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2483{
2484	unsigned long total;
2485	unsigned long entries;
2486
2487	get_total_entries(buf, &total, &entries);
2488	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2489		   entries, total, num_online_cpus());
2490	seq_puts(m, "#\n");
2491}
2492
2493static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
 
2494{
 
 
2495	print_event_info(buf, m);
2496	seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2497	seq_puts(m, "#              | |       |          |         |\n");
 
2498}
2499
2500static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
 
2501{
 
 
 
 
2502	print_event_info(buf, m);
2503	seq_puts(m, "#                              _-----=> irqs-off\n");
2504	seq_puts(m, "#                             / _----=> need-resched\n");
2505	seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2506	seq_puts(m, "#                            || / _--=> preempt-depth\n");
2507	seq_puts(m, "#                            ||| /     delay\n");
2508	seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2509	seq_puts(m, "#              | |       |   ||||       |         |\n");
 
2510}
2511
2512void
2513print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2514{
2515	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2516	struct trace_buffer *buf = iter->trace_buffer;
2517	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2518	struct tracer *type = iter->trace;
2519	unsigned long entries;
2520	unsigned long total;
2521	const char *name = "preemption";
2522
2523	name = type->name;
2524
2525	get_total_entries(buf, &total, &entries);
2526
2527	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2528		   name, UTS_RELEASE);
2529	seq_puts(m, "# -----------------------------------"
2530		 "---------------------------------\n");
2531	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2532		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2533		   nsecs_to_usecs(data->saved_latency),
2534		   entries,
2535		   total,
2536		   buf->cpu,
2537#if defined(CONFIG_PREEMPT_NONE)
2538		   "server",
2539#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2540		   "desktop",
2541#elif defined(CONFIG_PREEMPT)
2542		   "preempt",
 
 
2543#else
2544		   "unknown",
2545#endif
2546		   /* These are reserved for later use */
2547		   0, 0, 0, 0);
2548#ifdef CONFIG_SMP
2549	seq_printf(m, " #P:%d)\n", num_online_cpus());
2550#else
2551	seq_puts(m, ")\n");
2552#endif
2553	seq_puts(m, "#    -----------------\n");
2554	seq_printf(m, "#    | task: %.16s-%d "
2555		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2556		   data->comm, data->pid,
2557		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2558		   data->policy, data->rt_priority);
2559	seq_puts(m, "#    -----------------\n");
2560
2561	if (data->critical_start) {
2562		seq_puts(m, "#  => started at: ");
2563		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2564		trace_print_seq(m, &iter->seq);
2565		seq_puts(m, "\n#  => ended at:   ");
2566		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2567		trace_print_seq(m, &iter->seq);
2568		seq_puts(m, "\n#\n");
2569	}
2570
2571	seq_puts(m, "#\n");
2572}
2573
2574static void test_cpu_buff_start(struct trace_iterator *iter)
2575{
2576	struct trace_seq *s = &iter->seq;
 
2577
2578	if (!(trace_flags & TRACE_ITER_ANNOTATE))
2579		return;
2580
2581	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2582		return;
2583
2584	if (cpumask_test_cpu(iter->cpu, iter->started))
 
2585		return;
2586
2587	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2588		return;
2589
2590	cpumask_set_cpu(iter->cpu, iter->started);
 
2591
2592	/* Don't print started cpu buffer for the first entry of the trace */
2593	if (iter->idx > 1)
2594		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2595				iter->cpu);
2596}
2597
2598static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2599{
 
2600	struct trace_seq *s = &iter->seq;
2601	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2602	struct trace_entry *entry;
2603	struct trace_event *event;
2604
2605	entry = iter->ent;
2606
2607	test_cpu_buff_start(iter);
2608
2609	event = ftrace_find_event(entry->type);
2610
2611	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2612		if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2613			if (!trace_print_lat_context(iter))
2614				goto partial;
2615		} else {
2616			if (!trace_print_context(iter))
2617				goto partial;
2618		}
2619	}
2620
 
 
 
2621	if (event)
2622		return event->funcs->trace(iter, sym_flags, event);
2623
2624	if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2625		goto partial;
2626
2627	return TRACE_TYPE_HANDLED;
2628partial:
2629	return TRACE_TYPE_PARTIAL_LINE;
2630}
2631
2632static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2633{
 
2634	struct trace_seq *s = &iter->seq;
2635	struct trace_entry *entry;
2636	struct trace_event *event;
2637
2638	entry = iter->ent;
2639
2640	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2641		if (!trace_seq_printf(s, "%d %d %llu ",
2642				      entry->pid, iter->cpu, iter->ts))
2643			goto partial;
2644	}
 
2645
2646	event = ftrace_find_event(entry->type);
2647	if (event)
2648		return event->funcs->raw(iter, 0, event);
2649
2650	if (!trace_seq_printf(s, "%d ?\n", entry->type))
2651		goto partial;
2652
2653	return TRACE_TYPE_HANDLED;
2654partial:
2655	return TRACE_TYPE_PARTIAL_LINE;
2656}
2657
2658static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2659{
 
2660	struct trace_seq *s = &iter->seq;
2661	unsigned char newline = '\n';
2662	struct trace_entry *entry;
2663	struct trace_event *event;
2664
2665	entry = iter->ent;
2666
2667	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2668		SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2669		SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2670		SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
 
 
2671	}
2672
2673	event = ftrace_find_event(entry->type);
2674	if (event) {
2675		enum print_line_t ret = event->funcs->hex(iter, 0, event);
2676		if (ret != TRACE_TYPE_HANDLED)
2677			return ret;
2678	}
2679
2680	SEQ_PUT_FIELD_RET(s, newline);
2681
2682	return TRACE_TYPE_HANDLED;
2683}
2684
2685static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2686{
 
2687	struct trace_seq *s = &iter->seq;
2688	struct trace_entry *entry;
2689	struct trace_event *event;
2690
2691	entry = iter->ent;
2692
2693	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2694		SEQ_PUT_FIELD_RET(s, entry->pid);
2695		SEQ_PUT_FIELD_RET(s, iter->cpu);
2696		SEQ_PUT_FIELD_RET(s, iter->ts);
 
 
2697	}
2698
2699	event = ftrace_find_event(entry->type);
2700	return event ? event->funcs->binary(iter, 0, event) :
2701		TRACE_TYPE_HANDLED;
2702}
2703
2704int trace_empty(struct trace_iterator *iter)
2705{
2706	struct ring_buffer_iter *buf_iter;
2707	int cpu;
2708
2709	/* If we are looking at one CPU buffer, only check that one */
2710	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2711		cpu = iter->cpu_file;
2712		buf_iter = trace_buffer_iter(iter, cpu);
2713		if (buf_iter) {
2714			if (!ring_buffer_iter_empty(buf_iter))
2715				return 0;
2716		} else {
2717			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2718				return 0;
2719		}
2720		return 1;
2721	}
2722
2723	for_each_tracing_cpu(cpu) {
2724		buf_iter = trace_buffer_iter(iter, cpu);
2725		if (buf_iter) {
2726			if (!ring_buffer_iter_empty(buf_iter))
2727				return 0;
2728		} else {
2729			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2730				return 0;
2731		}
2732	}
2733
2734	return 1;
2735}
2736
2737/*  Called with trace_event_read_lock() held. */
2738enum print_line_t print_trace_line(struct trace_iterator *iter)
2739{
 
 
2740	enum print_line_t ret;
2741
2742	if (iter->lost_events &&
2743	    !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2744				 iter->cpu, iter->lost_events))
2745		return TRACE_TYPE_PARTIAL_LINE;
 
 
 
 
 
 
2746
2747	if (iter->trace && iter->trace->print_line) {
2748		ret = iter->trace->print_line(iter);
2749		if (ret != TRACE_TYPE_UNHANDLED)
2750			return ret;
2751	}
2752
2753	if (iter->ent->type == TRACE_BPUTS &&
2754			trace_flags & TRACE_ITER_PRINTK &&
2755			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2756		return trace_print_bputs_msg_only(iter);
2757
2758	if (iter->ent->type == TRACE_BPRINT &&
2759			trace_flags & TRACE_ITER_PRINTK &&
2760			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2761		return trace_print_bprintk_msg_only(iter);
2762
2763	if (iter->ent->type == TRACE_PRINT &&
2764			trace_flags & TRACE_ITER_PRINTK &&
2765			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2766		return trace_print_printk_msg_only(iter);
2767
2768	if (trace_flags & TRACE_ITER_BIN)
2769		return print_bin_fmt(iter);
2770
2771	if (trace_flags & TRACE_ITER_HEX)
2772		return print_hex_fmt(iter);
2773
2774	if (trace_flags & TRACE_ITER_RAW)
2775		return print_raw_fmt(iter);
2776
2777	return print_trace_fmt(iter);
2778}
2779
2780void trace_latency_header(struct seq_file *m)
2781{
2782	struct trace_iterator *iter = m->private;
 
2783
2784	/* print nothing if the buffers are empty */
2785	if (trace_empty(iter))
2786		return;
2787
2788	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2789		print_trace_header(m, iter);
2790
2791	if (!(trace_flags & TRACE_ITER_VERBOSE))
2792		print_lat_help_header(m);
2793}
2794
2795void trace_default_header(struct seq_file *m)
2796{
2797	struct trace_iterator *iter = m->private;
 
 
2798
2799	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2800		return;
2801
2802	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2803		/* print nothing if the buffers are empty */
2804		if (trace_empty(iter))
2805			return;
2806		print_trace_header(m, iter);
2807		if (!(trace_flags & TRACE_ITER_VERBOSE))
2808			print_lat_help_header(m);
2809	} else {
2810		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2811			if (trace_flags & TRACE_ITER_IRQ_INFO)
2812				print_func_help_header_irq(iter->trace_buffer, m);
 
2813			else
2814				print_func_help_header(iter->trace_buffer, m);
 
2815		}
2816	}
2817}
2818
2819static void test_ftrace_alive(struct seq_file *m)
2820{
2821	if (!ftrace_is_dead())
2822		return;
2823	seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2824	seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2825}
2826
2827#ifdef CONFIG_TRACER_MAX_TRACE
2828static void show_snapshot_main_help(struct seq_file *m)
2829{
2830	seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2831	seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2832	seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
2833	seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2834	seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2835	seq_printf(m, "#                       is not a '0' or '1')\n");
2836}
2837
2838static void show_snapshot_percpu_help(struct seq_file *m)
2839{
2840	seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2841#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2842	seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2843	seq_printf(m, "#                      Takes a snapshot of the main buffer for this cpu.\n");
2844#else
2845	seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2846	seq_printf(m, "#                     Must use main snapshot file to allocate.\n");
2847#endif
2848	seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2849	seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2850	seq_printf(m, "#                       is not a '0' or '1')\n");
2851}
2852
2853static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2854{
2855	if (iter->tr->allocated_snapshot)
2856		seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2857	else
2858		seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2859
2860	seq_printf(m, "# Snapshot commands:\n");
2861	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2862		show_snapshot_main_help(m);
2863	else
2864		show_snapshot_percpu_help(m);
2865}
2866#else
2867/* Should never be called */
2868static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2869#endif
2870
2871static int s_show(struct seq_file *m, void *v)
2872{
2873	struct trace_iterator *iter = v;
2874	int ret;
2875
2876	if (iter->ent == NULL) {
2877		if (iter->tr) {
2878			seq_printf(m, "# tracer: %s\n", iter->trace->name);
2879			seq_puts(m, "#\n");
2880			test_ftrace_alive(m);
2881		}
2882		if (iter->snapshot && trace_empty(iter))
2883			print_snapshot_help(m, iter);
2884		else if (iter->trace && iter->trace->print_header)
2885			iter->trace->print_header(m);
2886		else
2887			trace_default_header(m);
2888
2889	} else if (iter->leftover) {
2890		/*
2891		 * If we filled the seq_file buffer earlier, we
2892		 * want to just show it now.
2893		 */
2894		ret = trace_print_seq(m, &iter->seq);
2895
2896		/* ret should this time be zero, but you never know */
2897		iter->leftover = ret;
2898
2899	} else {
2900		print_trace_line(iter);
2901		ret = trace_print_seq(m, &iter->seq);
2902		/*
2903		 * If we overflow the seq_file buffer, then it will
2904		 * ask us for this data again at start up.
2905		 * Use that instead.
2906		 *  ret is 0 if seq_file write succeeded.
2907		 *        -1 otherwise.
2908		 */
2909		iter->leftover = ret;
2910	}
2911
2912	return 0;
2913}
2914
2915/*
2916 * Should be used after trace_array_get(), trace_types_lock
2917 * ensures that i_cdev was already initialized.
2918 */
2919static inline int tracing_get_cpu(struct inode *inode)
2920{
2921	if (inode->i_cdev) /* See trace_create_cpu_file() */
2922		return (long)inode->i_cdev - 1;
2923	return RING_BUFFER_ALL_CPUS;
2924}
2925
2926static const struct seq_operations tracer_seq_ops = {
2927	.start		= s_start,
2928	.next		= s_next,
2929	.stop		= s_stop,
2930	.show		= s_show,
2931};
2932
2933static struct trace_iterator *
2934__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2935{
2936	struct trace_array *tr = inode->i_private;
2937	struct trace_iterator *iter;
2938	int cpu;
2939
2940	if (tracing_disabled)
2941		return ERR_PTR(-ENODEV);
2942
2943	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2944	if (!iter)
2945		return ERR_PTR(-ENOMEM);
2946
2947	iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2948				    GFP_KERNEL);
2949	if (!iter->buffer_iter)
2950		goto release;
2951
2952	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2953	 * We make a copy of the current tracer to avoid concurrent
2954	 * changes on it while we are reading.
2955	 */
2956	mutex_lock(&trace_types_lock);
2957	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2958	if (!iter->trace)
2959		goto fail;
2960
2961	*iter->trace = *tr->current_trace;
2962
2963	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2964		goto fail;
2965
2966	iter->tr = tr;
2967
2968#ifdef CONFIG_TRACER_MAX_TRACE
2969	/* Currently only the top directory has a snapshot */
2970	if (tr->current_trace->print_max || snapshot)
2971		iter->trace_buffer = &tr->max_buffer;
2972	else
2973#endif
2974		iter->trace_buffer = &tr->trace_buffer;
2975	iter->snapshot = snapshot;
2976	iter->pos = -1;
2977	iter->cpu_file = tracing_get_cpu(inode);
2978	mutex_init(&iter->mutex);
2979
2980	/* Notify the tracer early; before we stop tracing. */
2981	if (iter->trace && iter->trace->open)
2982		iter->trace->open(iter);
2983
2984	/* Annotate start of buffers if we had overruns */
2985	if (ring_buffer_overruns(iter->trace_buffer->buffer))
2986		iter->iter_flags |= TRACE_FILE_ANNOTATE;
2987
2988	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
2989	if (trace_clocks[tr->clock_id].in_ns)
2990		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2991
2992	/* stop the trace while dumping if we are not opening "snapshot" */
2993	if (!iter->snapshot)
 
 
 
2994		tracing_stop_tr(tr);
2995
2996	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2997		for_each_tracing_cpu(cpu) {
2998			iter->buffer_iter[cpu] =
2999				ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
 
3000		}
3001		ring_buffer_read_prepare_sync();
3002		for_each_tracing_cpu(cpu) {
3003			ring_buffer_read_start(iter->buffer_iter[cpu]);
3004			tracing_iter_reset(iter, cpu);
3005		}
3006	} else {
3007		cpu = iter->cpu_file;
3008		iter->buffer_iter[cpu] =
3009			ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
 
3010		ring_buffer_read_prepare_sync();
3011		ring_buffer_read_start(iter->buffer_iter[cpu]);
3012		tracing_iter_reset(iter, cpu);
3013	}
3014
3015	mutex_unlock(&trace_types_lock);
3016
3017	return iter;
3018
3019 fail:
3020	mutex_unlock(&trace_types_lock);
3021	kfree(iter->trace);
 
3022	kfree(iter->buffer_iter);
3023release:
3024	seq_release_private(inode, file);
3025	return ERR_PTR(-ENOMEM);
3026}
3027
3028int tracing_open_generic(struct inode *inode, struct file *filp)
3029{
3030	if (tracing_disabled)
3031		return -ENODEV;
 
 
 
3032
3033	filp->private_data = inode->i_private;
3034	return 0;
3035}
3036
3037bool tracing_is_disabled(void)
3038{
3039	return (tracing_disabled) ? true: false;
3040}
3041
3042/*
3043 * Open and update trace_array ref count.
3044 * Must have the current trace_array passed to it.
3045 */
3046static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3047{
3048	struct trace_array *tr = inode->i_private;
 
3049
3050	if (tracing_disabled)
3051		return -ENODEV;
3052
3053	if (trace_array_get(tr) < 0)
3054		return -ENODEV;
3055
3056	filp->private_data = inode->i_private;
3057
3058	return 0;
3059}
3060
3061static int tracing_release(struct inode *inode, struct file *file)
3062{
3063	struct trace_array *tr = inode->i_private;
3064	struct seq_file *m = file->private_data;
3065	struct trace_iterator *iter;
3066	int cpu;
3067
3068	if (!(file->f_mode & FMODE_READ)) {
3069		trace_array_put(tr);
3070		return 0;
3071	}
3072
3073	/* Writes do not use seq_file */
3074	iter = m->private;
3075	mutex_lock(&trace_types_lock);
3076
3077	for_each_tracing_cpu(cpu) {
3078		if (iter->buffer_iter[cpu])
3079			ring_buffer_read_finish(iter->buffer_iter[cpu]);
3080	}
3081
3082	if (iter->trace && iter->trace->close)
3083		iter->trace->close(iter);
3084
3085	if (!iter->snapshot)
3086		/* reenable tracing if it was previously enabled */
3087		tracing_start_tr(tr);
3088
3089	__trace_array_put(tr);
3090
3091	mutex_unlock(&trace_types_lock);
3092
3093	mutex_destroy(&iter->mutex);
3094	free_cpumask_var(iter->started);
 
 
3095	kfree(iter->trace);
3096	kfree(iter->buffer_iter);
3097	seq_release_private(inode, file);
3098
3099	return 0;
3100}
3101
3102static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3103{
3104	struct trace_array *tr = inode->i_private;
3105
3106	trace_array_put(tr);
3107	return 0;
3108}
3109
3110static int tracing_single_release_tr(struct inode *inode, struct file *file)
3111{
3112	struct trace_array *tr = inode->i_private;
3113
3114	trace_array_put(tr);
3115
3116	return single_release(inode, file);
3117}
3118
3119static int tracing_open(struct inode *inode, struct file *file)
3120{
3121	struct trace_array *tr = inode->i_private;
3122	struct trace_iterator *iter;
3123	int ret = 0;
3124
3125	if (trace_array_get(tr) < 0)
3126		return -ENODEV;
 
3127
3128	/* If this file was open for write, then erase contents */
3129	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3130		int cpu = tracing_get_cpu(inode);
 
 
 
 
 
 
3131
3132		if (cpu == RING_BUFFER_ALL_CPUS)
3133			tracing_reset_online_cpus(&tr->trace_buffer);
3134		else
3135			tracing_reset(&tr->trace_buffer, cpu);
3136	}
3137
3138	if (file->f_mode & FMODE_READ) {
3139		iter = __tracing_open(inode, file, false);
3140		if (IS_ERR(iter))
3141			ret = PTR_ERR(iter);
3142		else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3143			iter->iter_flags |= TRACE_FILE_LAT_FMT;
3144	}
3145
3146	if (ret < 0)
3147		trace_array_put(tr);
3148
3149	return ret;
3150}
3151
3152/*
3153 * Some tracers are not suitable for instance buffers.
3154 * A tracer is always available for the global array (toplevel)
3155 * or if it explicitly states that it is.
3156 */
3157static bool
3158trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3159{
3160	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3161}
3162
3163/* Find the next tracer that this trace array may use */
3164static struct tracer *
3165get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3166{
3167	while (t && !trace_ok_for_array(t, tr))
3168		t = t->next;
3169
3170	return t;
3171}
3172
3173static void *
3174t_next(struct seq_file *m, void *v, loff_t *pos)
3175{
3176	struct trace_array *tr = m->private;
3177	struct tracer *t = v;
3178
3179	(*pos)++;
3180
3181	if (t)
3182		t = get_tracer_for_array(tr, t->next);
3183
3184	return t;
3185}
3186
3187static void *t_start(struct seq_file *m, loff_t *pos)
3188{
3189	struct trace_array *tr = m->private;
3190	struct tracer *t;
3191	loff_t l = 0;
3192
3193	mutex_lock(&trace_types_lock);
3194
3195	t = get_tracer_for_array(tr, trace_types);
3196	for (; t && l < *pos; t = t_next(m, t, &l))
3197			;
3198
3199	return t;
3200}
3201
3202static void t_stop(struct seq_file *m, void *p)
3203{
3204	mutex_unlock(&trace_types_lock);
3205}
3206
3207static int t_show(struct seq_file *m, void *v)
3208{
3209	struct tracer *t = v;
3210
3211	if (!t)
3212		return 0;
3213
3214	seq_printf(m, "%s", t->name);
3215	if (t->next)
3216		seq_putc(m, ' ');
3217	else
3218		seq_putc(m, '\n');
3219
3220	return 0;
3221}
3222
3223static const struct seq_operations show_traces_seq_ops = {
3224	.start		= t_start,
3225	.next		= t_next,
3226	.stop		= t_stop,
3227	.show		= t_show,
3228};
3229
3230static int show_traces_open(struct inode *inode, struct file *file)
3231{
3232	struct trace_array *tr = inode->i_private;
3233	struct seq_file *m;
3234	int ret;
3235
3236	if (tracing_disabled)
3237		return -ENODEV;
 
3238
3239	ret = seq_open(file, &show_traces_seq_ops);
3240	if (ret)
 
3241		return ret;
 
3242
3243	m = file->private_data;
3244	m->private = tr;
3245
3246	return 0;
3247}
3248
 
 
 
 
 
 
 
 
3249static ssize_t
3250tracing_write_stub(struct file *filp, const char __user *ubuf,
3251		   size_t count, loff_t *ppos)
3252{
3253	return count;
3254}
3255
3256loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3257{
3258	int ret;
3259
3260	if (file->f_mode & FMODE_READ)
3261		ret = seq_lseek(file, offset, whence);
3262	else
3263		file->f_pos = ret = 0;
3264
3265	return ret;
3266}
3267
3268static const struct file_operations tracing_fops = {
3269	.open		= tracing_open,
3270	.read		= seq_read,
3271	.write		= tracing_write_stub,
3272	.llseek		= tracing_lseek,
3273	.release	= tracing_release,
3274};
3275
3276static const struct file_operations show_traces_fops = {
3277	.open		= show_traces_open,
3278	.read		= seq_read,
3279	.release	= seq_release,
3280	.llseek		= seq_lseek,
 
3281};
3282
3283/*
3284 * The tracer itself will not take this lock, but still we want
3285 * to provide a consistent cpumask to user-space:
3286 */
3287static DEFINE_MUTEX(tracing_cpumask_update_lock);
3288
3289/*
3290 * Temporary storage for the character representation of the
3291 * CPU bitmask (and one more byte for the newline):
3292 */
3293static char mask_str[NR_CPUS + 1];
3294
3295static ssize_t
3296tracing_cpumask_read(struct file *filp, char __user *ubuf,
3297		     size_t count, loff_t *ppos)
3298{
3299	struct trace_array *tr = file_inode(filp)->i_private;
 
3300	int len;
3301
3302	mutex_lock(&tracing_cpumask_update_lock);
 
 
 
 
3303
3304	len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3305	if (count - len < 2) {
 
3306		count = -EINVAL;
3307		goto out_err;
3308	}
3309	len += sprintf(mask_str + len, "\n");
3310	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3311
3312out_err:
3313	mutex_unlock(&tracing_cpumask_update_lock);
3314
3315	return count;
3316}
3317
3318static ssize_t
3319tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3320		      size_t count, loff_t *ppos)
3321{
3322	struct trace_array *tr = file_inode(filp)->i_private;
3323	cpumask_var_t tracing_cpumask_new;
3324	int err, cpu;
3325
3326	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3327		return -ENOMEM;
3328
3329	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3330	if (err)
3331		goto err_unlock;
3332
3333	mutex_lock(&tracing_cpumask_update_lock);
 
3334
3335	local_irq_disable();
3336	arch_spin_lock(&ftrace_max_lock);
3337	for_each_tracing_cpu(cpu) {
3338		/*
3339		 * Increase/decrease the disabled counter if we are
3340		 * about to flip a bit in the cpumask:
3341		 */
3342		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3343				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3344			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3345			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3346		}
3347		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3348				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3349			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3350			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3351		}
3352	}
3353	arch_spin_unlock(&ftrace_max_lock);
3354	local_irq_enable();
3355
3356	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3357
3358	mutex_unlock(&tracing_cpumask_update_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3359	free_cpumask_var(tracing_cpumask_new);
3360
3361	return count;
3362
3363err_unlock:
3364	free_cpumask_var(tracing_cpumask_new);
3365
3366	return err;
3367}
3368
3369static const struct file_operations tracing_cpumask_fops = {
3370	.open		= tracing_open_generic_tr,
3371	.read		= tracing_cpumask_read,
3372	.write		= tracing_cpumask_write,
3373	.release	= tracing_release_generic_tr,
3374	.llseek		= generic_file_llseek,
3375};
3376
3377static int tracing_trace_options_show(struct seq_file *m, void *v)
3378{
3379	struct tracer_opt *trace_opts;
3380	struct trace_array *tr = m->private;
3381	u32 tracer_flags;
3382	int i;
3383
3384	mutex_lock(&trace_types_lock);
3385	tracer_flags = tr->current_trace->flags->val;
3386	trace_opts = tr->current_trace->flags->opts;
3387
3388	for (i = 0; trace_options[i]; i++) {
3389		if (trace_flags & (1 << i))
3390			seq_printf(m, "%s\n", trace_options[i]);
3391		else
3392			seq_printf(m, "no%s\n", trace_options[i]);
3393	}
3394
3395	for (i = 0; trace_opts[i].name; i++) {
3396		if (tracer_flags & trace_opts[i].bit)
3397			seq_printf(m, "%s\n", trace_opts[i].name);
3398		else
3399			seq_printf(m, "no%s\n", trace_opts[i].name);
3400	}
3401	mutex_unlock(&trace_types_lock);
3402
3403	return 0;
3404}
3405
3406static int __set_tracer_option(struct trace_array *tr,
3407			       struct tracer_flags *tracer_flags,
3408			       struct tracer_opt *opts, int neg)
3409{
3410	struct tracer *trace = tr->current_trace;
3411	int ret;
3412
3413	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3414	if (ret)
3415		return ret;
3416
3417	if (neg)
3418		tracer_flags->val &= ~opts->bit;
3419	else
3420		tracer_flags->val |= opts->bit;
3421	return 0;
3422}
3423
3424/* Try to assign a tracer specific option */
3425static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3426{
3427	struct tracer *trace = tr->current_trace;
3428	struct tracer_flags *tracer_flags = trace->flags;
3429	struct tracer_opt *opts = NULL;
3430	int i;
3431
3432	for (i = 0; tracer_flags->opts[i].name; i++) {
3433		opts = &tracer_flags->opts[i];
3434
3435		if (strcmp(cmp, opts->name) == 0)
3436			return __set_tracer_option(tr, trace->flags, opts, neg);
3437	}
3438
3439	return -EINVAL;
3440}
3441
3442/* Some tracers require overwrite to stay enabled */
3443int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3444{
3445	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3446		return -1;
3447
3448	return 0;
3449}
3450
3451int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3452{
 
 
 
 
 
 
3453	/* do nothing if flag is already set */
3454	if (!!(trace_flags & mask) == !!enabled)
3455		return 0;
3456
3457	/* Give the tracer a chance to approve the change */
3458	if (tr->current_trace->flag_changed)
3459		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3460			return -EINVAL;
3461
3462	if (enabled)
3463		trace_flags |= mask;
3464	else
3465		trace_flags &= ~mask;
3466
3467	if (mask == TRACE_ITER_RECORD_CMD)
3468		trace_event_enable_cmd_record(enabled);
3469
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3470	if (mask == TRACE_ITER_OVERWRITE) {
3471		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3472#ifdef CONFIG_TRACER_MAX_TRACE
3473		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3474#endif
3475	}
3476
3477	if (mask == TRACE_ITER_PRINTK)
3478		trace_printk_start_stop_comm(enabled);
 
 
3479
3480	return 0;
3481}
3482
3483static int trace_set_options(struct trace_array *tr, char *option)
3484{
3485	char *cmp;
3486	int neg = 0;
3487	int ret = -ENODEV;
3488	int i;
 
3489
3490	cmp = strstrip(option);
3491
3492	if (strncmp(cmp, "no", 2) == 0) {
 
3493		neg = 1;
3494		cmp += 2;
3495	}
3496
3497	mutex_lock(&trace_types_lock);
3498
3499	for (i = 0; trace_options[i]; i++) {
3500		if (strcmp(cmp, trace_options[i]) == 0) {
3501			ret = set_tracer_flag(tr, 1 << i, !neg);
3502			break;
3503		}
3504	}
3505
 
3506	/* If no option could be set, test the specific tracer options */
3507	if (!trace_options[i])
3508		ret = set_tracer_option(tr, cmp, neg);
 
 
3509
3510	mutex_unlock(&trace_types_lock);
 
 
 
 
 
 
 
 
3511
3512	return ret;
3513}
3514
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3515static ssize_t
3516tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3517			size_t cnt, loff_t *ppos)
3518{
3519	struct seq_file *m = filp->private_data;
3520	struct trace_array *tr = m->private;
3521	char buf[64];
3522	int ret;
3523
3524	if (cnt >= sizeof(buf))
3525		return -EINVAL;
3526
3527	if (copy_from_user(&buf, ubuf, cnt))
3528		return -EFAULT;
3529
3530	buf[cnt] = 0;
3531
3532	ret = trace_set_options(tr, buf);
3533	if (ret < 0)
3534		return ret;
3535
3536	*ppos += cnt;
3537
3538	return cnt;
3539}
3540
3541static int tracing_trace_options_open(struct inode *inode, struct file *file)
3542{
3543	struct trace_array *tr = inode->i_private;
3544	int ret;
3545
3546	if (tracing_disabled)
3547		return -ENODEV;
3548
3549	if (trace_array_get(tr) < 0)
3550		return -ENODEV;
3551
3552	ret = single_open(file, tracing_trace_options_show, inode->i_private);
3553	if (ret < 0)
3554		trace_array_put(tr);
3555
3556	return ret;
3557}
3558
3559static const struct file_operations tracing_iter_fops = {
3560	.open		= tracing_trace_options_open,
3561	.read		= seq_read,
3562	.llseek		= seq_lseek,
3563	.release	= tracing_single_release_tr,
3564	.write		= tracing_trace_options_write,
3565};
3566
3567static const char readme_msg[] =
3568	"tracing mini-HOWTO:\n\n"
3569	"# echo 0 > tracing_on : quick way to disable tracing\n"
3570	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3571	" Important files:\n"
3572	"  trace\t\t\t- The static contents of the buffer\n"
3573	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
3574	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3575	"  current_tracer\t- function and latency tracers\n"
3576	"  available_tracers\t- list of configured tracers for current_tracer\n"
 
3577	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3578	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3579	"  trace_clock\t\t-change the clock used to order events\n"
3580	"       local:   Per cpu clock but may not be synced across CPUs\n"
3581	"      global:   Synced across CPUs but slows tracing down.\n"
3582	"     counter:   Not a clock, but just an increment\n"
3583	"      uptime:   Jiffy counter from time of boot\n"
3584	"        perf:   Same clock that perf events use\n"
3585#ifdef CONFIG_X86_64
3586	"     x86-tsc:   TSC cycle counter\n"
3587#endif
 
 
 
3588	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
 
3589	"  tracing_cpumask\t- Limit which CPUs to trace\n"
3590	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3591	"\t\t\t  Remove sub-buffer with rmdir\n"
3592	"  trace_options\t\t- Set format or modify how tracing happens\n"
3593	"\t\t\t  Disable an option by adding a suffix 'no' to the\n"
3594	"\t\t\t  option name\n"
 
3595#ifdef CONFIG_DYNAMIC_FTRACE
3596	"\n  available_filter_functions - list of functions that can be filtered on\n"
3597	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
3598	"\t\t\t  functions\n"
3599	"\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3600	"\t     modules: Can select a group via module\n"
3601	"\t      Format: :mod:<module-name>\n"
3602	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
3603	"\t    triggers: a command to perform when function is hit\n"
3604	"\t      Format: <function>:<trigger>[:count]\n"
3605	"\t     trigger: traceon, traceoff\n"
3606	"\t\t      enable_event:<system>:<event>\n"
3607	"\t\t      disable_event:<system>:<event>\n"
3608#ifdef CONFIG_STACKTRACE
3609	"\t\t      stacktrace\n"
3610#endif
3611#ifdef CONFIG_TRACER_SNAPSHOT
3612	"\t\t      snapshot\n"
3613#endif
3614	"\t\t      dump\n"
3615	"\t\t      cpudump\n"
3616	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
3617	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
3618	"\t     The first one will disable tracing every time do_fault is hit\n"
3619	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
3620	"\t       The first time do trap is hit and it disables tracing, the\n"
3621	"\t       counter will decrement to 2. If tracing is already disabled,\n"
3622	"\t       the counter will not decrement. It only decrements when the\n"
3623	"\t       trigger did work\n"
3624	"\t     To remove trigger without count:\n"
3625	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
3626	"\t     To remove trigger with a count:\n"
3627	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3628	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3629	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3630	"\t    modules: Can select a group via module command :mod:\n"
3631	"\t    Does not accept triggers\n"
3632#endif /* CONFIG_DYNAMIC_FTRACE */
3633#ifdef CONFIG_FUNCTION_TRACER
3634	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3635	"\t\t    (function)\n"
 
 
3636#endif
3637#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3638	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
 
3639	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3640#endif
3641#ifdef CONFIG_TRACER_SNAPSHOT
3642	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
3643	"\t\t\t  snapshot buffer. Read the contents for more\n"
3644	"\t\t\t  information\n"
3645#endif
3646#ifdef CONFIG_STACK_TRACER
3647	"  stack_trace\t\t- Shows the max stack trace when active\n"
3648	"  stack_max_size\t- Shows current max stack size that was traced\n"
3649	"\t\t\t  Write into this file to reset the max size (trigger a\n"
3650	"\t\t\t  new trace)\n"
3651#ifdef CONFIG_DYNAMIC_FTRACE
3652	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3653	"\t\t\t  traces\n"
3654#endif
3655#endif /* CONFIG_STACK_TRACER */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3656	"  events/\t\t- Directory containing all trace event subsystems:\n"
3657	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3658	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
3659	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3660	"\t\t\t  events\n"
3661	"      filter\t\t- If set, only events passing filter are traced\n"
3662	"  events/<system>/<event>/\t- Directory containing control files for\n"
3663	"\t\t\t  <event>:\n"
3664	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3665	"      filter\t\t- If set, only events passing filter are traced\n"
3666	"      trigger\t\t- If set, a command to perform when event is hit\n"
3667	"\t    Format: <trigger>[:count][if <filter>]\n"
3668	"\t   trigger: traceon, traceoff\n"
3669	"\t            enable_event:<system>:<event>\n"
3670	"\t            disable_event:<system>:<event>\n"
 
 
 
 
3671#ifdef CONFIG_STACKTRACE
3672	"\t\t    stacktrace\n"
3673#endif
3674#ifdef CONFIG_TRACER_SNAPSHOT
3675	"\t\t    snapshot\n"
3676#endif
 
 
 
3677	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
3678	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
3679	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3680	"\t                  events/block/block_unplug/trigger\n"
3681	"\t   The first disables tracing every time block_unplug is hit.\n"
3682	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
3683	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
3684	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3685	"\t   Like function triggers, the counter is only decremented if it\n"
3686	"\t    enabled or disabled tracing.\n"
3687	"\t   To remove a trigger without a count:\n"
3688	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
3689	"\t   To remove a trigger with a count:\n"
3690	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
3691	"\t   Filters can be ignored when removing a trigger.\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3692;
3693
3694static ssize_t
3695tracing_readme_read(struct file *filp, char __user *ubuf,
3696		       size_t cnt, loff_t *ppos)
3697{
3698	return simple_read_from_buffer(ubuf, cnt, ppos,
3699					readme_msg, strlen(readme_msg));
3700}
3701
3702static const struct file_operations tracing_readme_fops = {
3703	.open		= tracing_open_generic,
3704	.read		= tracing_readme_read,
3705	.llseek		= generic_file_llseek,
3706};
3707
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3708static ssize_t
3709tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3710				size_t cnt, loff_t *ppos)
3711{
3712	char *buf_comm;
3713	char *file_buf;
3714	char *buf;
3715	int len = 0;
3716	int pid;
3717	int i;
3718
3719	file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3720	if (!file_buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3721		return -ENOMEM;
3722
3723	buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3724	if (!buf_comm) {
3725		kfree(file_buf);
3726		return -ENOMEM;
3727	}
3728
3729	buf = file_buf;
 
 
 
 
3730
3731	for (i = 0; i < SAVED_CMDLINES; i++) {
3732		int r;
3733
3734		pid = map_cmdline_to_pid[i];
3735		if (pid == -1 || pid == NO_CMDLINE_MAP)
3736			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3737
3738		trace_find_cmdline(pid, buf_comm);
3739		r = sprintf(buf, "%d %s\n", pid, buf_comm);
3740		buf += r;
3741		len += r;
 
 
 
 
 
 
 
3742	}
 
 
3743
3744	len = simple_read_from_buffer(ubuf, cnt, ppos,
3745				      file_buf, len);
 
3746
3747	kfree(file_buf);
3748	kfree(buf_comm);
 
 
 
 
 
 
3749
3750	return len;
 
 
 
3751}
3752
3753static const struct file_operations tracing_saved_cmdlines_fops = {
3754    .open       = tracing_open_generic,
3755    .read       = tracing_saved_cmdlines_read,
3756    .llseek	= generic_file_llseek,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3757};
3758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3759static ssize_t
3760tracing_set_trace_read(struct file *filp, char __user *ubuf,
3761		       size_t cnt, loff_t *ppos)
3762{
3763	struct trace_array *tr = filp->private_data;
3764	char buf[MAX_TRACER_SIZE+2];
3765	int r;
3766
3767	mutex_lock(&trace_types_lock);
3768	r = sprintf(buf, "%s\n", tr->current_trace->name);
3769	mutex_unlock(&trace_types_lock);
3770
3771	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3772}
3773
3774int tracer_init(struct tracer *t, struct trace_array *tr)
3775{
3776	tracing_reset_online_cpus(&tr->trace_buffer);
3777	return t->init(tr);
3778}
3779
3780static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3781{
3782	int cpu;
3783
3784	for_each_tracing_cpu(cpu)
3785		per_cpu_ptr(buf->data, cpu)->entries = val;
3786}
3787
3788#ifdef CONFIG_TRACER_MAX_TRACE
3789/* resize @tr's buffer to the size of @size_tr's entries */
3790static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3791					struct trace_buffer *size_buf, int cpu_id)
3792{
3793	int cpu, ret = 0;
3794
3795	if (cpu_id == RING_BUFFER_ALL_CPUS) {
3796		for_each_tracing_cpu(cpu) {
3797			ret = ring_buffer_resize(trace_buf->buffer,
3798				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3799			if (ret < 0)
3800				break;
3801			per_cpu_ptr(trace_buf->data, cpu)->entries =
3802				per_cpu_ptr(size_buf->data, cpu)->entries;
3803		}
3804	} else {
3805		ret = ring_buffer_resize(trace_buf->buffer,
3806				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3807		if (ret == 0)
3808			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3809				per_cpu_ptr(size_buf->data, cpu_id)->entries;
3810	}
3811
3812	return ret;
3813}
3814#endif /* CONFIG_TRACER_MAX_TRACE */
3815
3816static int __tracing_resize_ring_buffer(struct trace_array *tr,
3817					unsigned long size, int cpu)
3818{
3819	int ret;
3820
3821	/*
3822	 * If kernel or user changes the size of the ring buffer
3823	 * we use the size that was given, and we can forget about
3824	 * expanding it later.
3825	 */
3826	ring_buffer_expanded = true;
3827
3828	/* May be called before buffers are initialized */
3829	if (!tr->trace_buffer.buffer)
3830		return 0;
3831
3832	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3833	if (ret < 0)
3834		return ret;
3835
3836#ifdef CONFIG_TRACER_MAX_TRACE
3837	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3838	    !tr->current_trace->use_max_tr)
3839		goto out;
3840
3841	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3842	if (ret < 0) {
3843		int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3844						     &tr->trace_buffer, cpu);
3845		if (r < 0) {
3846			/*
3847			 * AARGH! We are left with different
3848			 * size max buffer!!!!
3849			 * The max buffer is our "snapshot" buffer.
3850			 * When a tracer needs a snapshot (one of the
3851			 * latency tracers), it swaps the max buffer
3852			 * with the saved snap shot. We succeeded to
3853			 * update the size of the main buffer, but failed to
3854			 * update the size of the max buffer. But when we tried
3855			 * to reset the main buffer to the original size, we
3856			 * failed there too. This is very unlikely to
3857			 * happen, but if it does, warn and kill all
3858			 * tracing.
3859			 */
3860			WARN_ON(1);
3861			tracing_disabled = 1;
3862		}
3863		return ret;
3864	}
3865
3866	if (cpu == RING_BUFFER_ALL_CPUS)
3867		set_buffer_entries(&tr->max_buffer, size);
3868	else
3869		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3870
3871 out:
3872#endif /* CONFIG_TRACER_MAX_TRACE */
3873
3874	if (cpu == RING_BUFFER_ALL_CPUS)
3875		set_buffer_entries(&tr->trace_buffer, size);
3876	else
3877		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3878
3879	return ret;
3880}
3881
3882static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3883					  unsigned long size, int cpu_id)
3884{
3885	int ret = size;
3886
3887	mutex_lock(&trace_types_lock);
3888
3889	if (cpu_id != RING_BUFFER_ALL_CPUS) {
3890		/* make sure, this cpu is enabled in the mask */
3891		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3892			ret = -EINVAL;
3893			goto out;
3894		}
3895	}
3896
3897	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
3898	if (ret < 0)
3899		ret = -ENOMEM;
3900
3901out:
3902	mutex_unlock(&trace_types_lock);
3903
3904	return ret;
3905}
3906
3907
3908/**
3909 * tracing_update_buffers - used by tracing facility to expand ring buffers
3910 *
3911 * To save on memory when the tracing is never used on a system with it
3912 * configured in. The ring buffers are set to a minimum size. But once
3913 * a user starts to use the tracing facility, then they need to grow
3914 * to their default size.
3915 *
3916 * This function is to be called when a tracer is about to be used.
3917 */
3918int tracing_update_buffers(void)
3919{
3920	int ret = 0;
3921
3922	mutex_lock(&trace_types_lock);
3923	if (!ring_buffer_expanded)
3924		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
3925						RING_BUFFER_ALL_CPUS);
3926	mutex_unlock(&trace_types_lock);
3927
3928	return ret;
3929}
3930
3931struct trace_option_dentry;
3932
3933static struct trace_option_dentry *
3934create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3935
3936static void
3937destroy_trace_option_files(struct trace_option_dentry *topts);
3938
3939/*
3940 * Used to clear out the tracer before deletion of an instance.
3941 * Must have trace_types_lock held.
3942 */
3943static void tracing_set_nop(struct trace_array *tr)
3944{
3945	if (tr->current_trace == &nop_trace)
3946		return;
3947	
3948	tr->current_trace->enabled--;
3949
3950	if (tr->current_trace->reset)
3951		tr->current_trace->reset(tr);
3952
3953	tr->current_trace = &nop_trace;
3954}
3955
3956static int tracing_set_tracer(struct trace_array *tr, const char *buf)
 
 
 
 
 
 
 
 
 
3957{
3958	static struct trace_option_dentry *topts;
3959	struct tracer *t;
3960#ifdef CONFIG_TRACER_MAX_TRACE
3961	bool had_max_tr;
3962#endif
3963	int ret = 0;
3964
3965	mutex_lock(&trace_types_lock);
3966
3967	if (!ring_buffer_expanded) {
3968		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
3969						RING_BUFFER_ALL_CPUS);
3970		if (ret < 0)
3971			goto out;
3972		ret = 0;
3973	}
3974
3975	for (t = trace_types; t; t = t->next) {
3976		if (strcmp(t->name, buf) == 0)
3977			break;
3978	}
3979	if (!t) {
3980		ret = -EINVAL;
3981		goto out;
3982	}
3983	if (t == tr->current_trace)
3984		goto out;
3985
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3986	/* Some tracers are only allowed for the top level buffer */
3987	if (!trace_ok_for_array(t, tr)) {
3988		ret = -EINVAL;
3989		goto out;
3990	}
3991
 
 
 
 
 
 
3992	trace_branch_disable();
3993
3994	tr->current_trace->enabled--;
3995
3996	if (tr->current_trace->reset)
3997		tr->current_trace->reset(tr);
3998
3999	/* Current trace needs to be nop_trace before synchronize_sched */
4000	tr->current_trace = &nop_trace;
4001
4002#ifdef CONFIG_TRACER_MAX_TRACE
4003	had_max_tr = tr->allocated_snapshot;
4004
4005	if (had_max_tr && !t->use_max_tr) {
4006		/*
4007		 * We need to make sure that the update_max_tr sees that
4008		 * current_trace changed to nop_trace to keep it from
4009		 * swapping the buffers after we resize it.
4010		 * The update_max_tr is called from interrupts disabled
4011		 * so a synchronized_sched() is sufficient.
4012		 */
4013		synchronize_sched();
4014		free_snapshot(tr);
4015	}
4016#endif
4017	/* Currently, only the top instance has options */
4018	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4019		destroy_trace_option_files(topts);
4020		topts = create_trace_option_files(tr, t);
4021	}
4022
4023#ifdef CONFIG_TRACER_MAX_TRACE
4024	if (t->use_max_tr && !had_max_tr) {
4025		ret = alloc_snapshot(tr);
4026		if (ret < 0)
4027			goto out;
4028	}
4029#endif
4030
4031	if (t->init) {
4032		ret = tracer_init(t, tr);
4033		if (ret)
4034			goto out;
4035	}
4036
4037	tr->current_trace = t;
4038	tr->current_trace->enabled++;
4039	trace_branch_enable(tr);
4040 out:
4041	mutex_unlock(&trace_types_lock);
4042
4043	return ret;
4044}
4045
4046static ssize_t
4047tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4048			size_t cnt, loff_t *ppos)
4049{
4050	struct trace_array *tr = filp->private_data;
4051	char buf[MAX_TRACER_SIZE+1];
4052	int i;
4053	size_t ret;
4054	int err;
4055
4056	ret = cnt;
4057
4058	if (cnt > MAX_TRACER_SIZE)
4059		cnt = MAX_TRACER_SIZE;
4060
4061	if (copy_from_user(&buf, ubuf, cnt))
4062		return -EFAULT;
4063
4064	buf[cnt] = 0;
4065
4066	/* strip ending whitespace. */
4067	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4068		buf[i] = 0;
4069
4070	err = tracing_set_tracer(tr, buf);
4071	if (err)
4072		return err;
4073
4074	*ppos += ret;
4075
4076	return ret;
4077}
4078
4079static ssize_t
4080tracing_max_lat_read(struct file *filp, char __user *ubuf,
4081		     size_t cnt, loff_t *ppos)
4082{
4083	unsigned long *ptr = filp->private_data;
4084	char buf[64];
4085	int r;
4086
4087	r = snprintf(buf, sizeof(buf), "%ld\n",
4088		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4089	if (r > sizeof(buf))
4090		r = sizeof(buf);
4091	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4092}
4093
4094static ssize_t
4095tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4096		      size_t cnt, loff_t *ppos)
4097{
4098	unsigned long *ptr = filp->private_data;
4099	unsigned long val;
4100	int ret;
4101
4102	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4103	if (ret)
4104		return ret;
4105
4106	*ptr = val * 1000;
4107
4108	return cnt;
4109}
4110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4111static int tracing_open_pipe(struct inode *inode, struct file *filp)
4112{
4113	struct trace_array *tr = inode->i_private;
4114	struct trace_iterator *iter;
4115	int ret = 0;
4116
4117	if (tracing_disabled)
4118		return -ENODEV;
4119
4120	if (trace_array_get(tr) < 0)
4121		return -ENODEV;
 
4122
4123	mutex_lock(&trace_types_lock);
4124
4125	/* create a buffer to store the information to pass to userspace */
4126	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4127	if (!iter) {
4128		ret = -ENOMEM;
4129		__trace_array_put(tr);
4130		goto out;
4131	}
4132
4133	/*
4134	 * We make a copy of the current tracer to avoid concurrent
4135	 * changes on it while we are reading.
4136	 */
4137	iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4138	if (!iter->trace) {
4139		ret = -ENOMEM;
4140		goto fail;
4141	}
4142	*iter->trace = *tr->current_trace;
4143
4144	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4145		ret = -ENOMEM;
4146		goto fail;
4147	}
4148
4149	/* trace pipe does not show start of buffer */
4150	cpumask_setall(iter->started);
4151
4152	if (trace_flags & TRACE_ITER_LATENCY_FMT)
4153		iter->iter_flags |= TRACE_FILE_LAT_FMT;
4154
4155	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4156	if (trace_clocks[tr->clock_id].in_ns)
4157		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4158
4159	iter->tr = tr;
4160	iter->trace_buffer = &tr->trace_buffer;
4161	iter->cpu_file = tracing_get_cpu(inode);
4162	mutex_init(&iter->mutex);
4163	filp->private_data = iter;
4164
4165	if (iter->trace->pipe_open)
4166		iter->trace->pipe_open(iter);
4167
4168	nonseekable_open(inode, filp);
 
 
4169out:
4170	mutex_unlock(&trace_types_lock);
4171	return ret;
4172
4173fail:
4174	kfree(iter->trace);
4175	kfree(iter);
4176	__trace_array_put(tr);
4177	mutex_unlock(&trace_types_lock);
4178	return ret;
4179}
4180
4181static int tracing_release_pipe(struct inode *inode, struct file *file)
4182{
4183	struct trace_iterator *iter = file->private_data;
4184	struct trace_array *tr = inode->i_private;
4185
4186	mutex_lock(&trace_types_lock);
4187
 
 
4188	if (iter->trace->pipe_close)
4189		iter->trace->pipe_close(iter);
4190
4191	mutex_unlock(&trace_types_lock);
4192
4193	free_cpumask_var(iter->started);
4194	mutex_destroy(&iter->mutex);
4195	kfree(iter->trace);
4196	kfree(iter);
4197
4198	trace_array_put(tr);
4199
4200	return 0;
4201}
4202
4203static unsigned int
4204trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4205{
 
 
4206	/* Iterators are static, they should be filled or empty */
4207	if (trace_buffer_iter(iter, iter->cpu_file))
4208		return POLLIN | POLLRDNORM;
4209
4210	if (trace_flags & TRACE_ITER_BLOCK)
4211		/*
4212		 * Always select as readable when in blocking mode
4213		 */
4214		return POLLIN | POLLRDNORM;
4215	else
4216		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4217					     filp, poll_table);
4218}
4219
4220static unsigned int
4221tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4222{
4223	struct trace_iterator *iter = filp->private_data;
4224
4225	return trace_poll(iter, filp, poll_table);
4226}
4227
4228/*
4229 * This is a make-shift waitqueue.
4230 * A tracer might use this callback on some rare cases:
4231 *
4232 *  1) the current tracer might hold the runqueue lock when it wakes up
4233 *     a reader, hence a deadlock (sched, function, and function graph tracers)
4234 *  2) the function tracers, trace all functions, we don't want
4235 *     the overhead of calling wake_up and friends
4236 *     (and tracing them too)
4237 *
4238 *     Anyway, this is really very primitive wakeup.
4239 */
4240void poll_wait_pipe(struct trace_iterator *iter)
4241{
4242	set_current_state(TASK_INTERRUPTIBLE);
4243	/* sleep for 100 msecs, and try again. */
4244	schedule_timeout(HZ / 10);
4245}
4246
4247/* Must be called with trace_types_lock mutex held. */
4248static int tracing_wait_pipe(struct file *filp)
4249{
4250	struct trace_iterator *iter = filp->private_data;
 
4251
4252	while (trace_empty(iter)) {
4253
4254		if ((filp->f_flags & O_NONBLOCK)) {
4255			return -EAGAIN;
4256		}
4257
4258		mutex_unlock(&iter->mutex);
4259
4260		iter->trace->wait_pipe(iter);
4261
4262		mutex_lock(&iter->mutex);
4263
4264		if (signal_pending(current))
4265			return -EINTR;
4266
4267		/*
4268		 * We block until we read something and tracing is disabled.
4269		 * We still block if tracing is disabled, but we have never
4270		 * read anything. This allows a user to cat this file, and
4271		 * then enable tracing. But after we have read something,
4272		 * we give an EOF when tracing is again disabled.
4273		 *
4274		 * iter->pos will be 0 if we haven't read anything.
4275		 */
4276		if (!tracing_is_on() && iter->pos)
4277			break;
 
 
 
 
 
 
 
 
 
4278	}
4279
4280	return 1;
4281}
4282
4283/*
4284 * Consumer reader.
4285 */
4286static ssize_t
4287tracing_read_pipe(struct file *filp, char __user *ubuf,
4288		  size_t cnt, loff_t *ppos)
4289{
4290	struct trace_iterator *iter = filp->private_data;
4291	struct trace_array *tr = iter->tr;
4292	ssize_t sret;
4293
4294	/* return any leftover data */
4295	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4296	if (sret != -EBUSY)
4297		return sret;
4298
4299	trace_seq_init(&iter->seq);
4300
4301	/* copy the tracer to avoid using a global lock all around */
4302	mutex_lock(&trace_types_lock);
4303	if (unlikely(iter->trace->name != tr->current_trace->name))
4304		*iter->trace = *tr->current_trace;
4305	mutex_unlock(&trace_types_lock);
4306
4307	/*
4308	 * Avoid more than one consumer on a single file descriptor
4309	 * This is just a matter of traces coherency, the ring buffer itself
4310	 * is protected.
4311	 */
4312	mutex_lock(&iter->mutex);
 
 
 
 
 
 
 
 
4313	if (iter->trace->read) {
4314		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4315		if (sret)
4316			goto out;
4317	}
4318
4319waitagain:
4320	sret = tracing_wait_pipe(filp);
4321	if (sret <= 0)
4322		goto out;
4323
4324	/* stop when tracing is finished */
4325	if (trace_empty(iter)) {
4326		sret = 0;
4327		goto out;
4328	}
4329
4330	if (cnt >= PAGE_SIZE)
4331		cnt = PAGE_SIZE - 1;
4332
4333	/* reset all but tr, trace, and overruns */
4334	memset(&iter->seq, 0,
4335	       sizeof(struct trace_iterator) -
4336	       offsetof(struct trace_iterator, seq));
4337	cpumask_clear(iter->started);
 
4338	iter->pos = -1;
4339
4340	trace_event_read_lock();
4341	trace_access_lock(iter->cpu_file);
4342	while (trace_find_next_entry_inc(iter) != NULL) {
4343		enum print_line_t ret;
4344		int len = iter->seq.len;
4345
4346		ret = print_trace_line(iter);
4347		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4348			/* don't print partial lines */
4349			iter->seq.len = len;
4350			break;
4351		}
4352		if (ret != TRACE_TYPE_NO_CONSUME)
4353			trace_consume(iter);
4354
4355		if (iter->seq.len >= cnt)
4356			break;
4357
4358		/*
4359		 * Setting the full flag means we reached the trace_seq buffer
4360		 * size and we should leave by partial output condition above.
4361		 * One of the trace_seq_* functions is not used properly.
4362		 */
4363		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4364			  iter->ent->type);
4365	}
4366	trace_access_unlock(iter->cpu_file);
4367	trace_event_read_unlock();
4368
4369	/* Now copy what we have to the user */
4370	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4371	if (iter->seq.readpos >= iter->seq.len)
4372		trace_seq_init(&iter->seq);
4373
4374	/*
4375	 * If there was nothing to send to user, in spite of consuming trace
4376	 * entries, go back to wait for more entries.
4377	 */
4378	if (sret == -EBUSY)
4379		goto waitagain;
4380
4381out:
4382	mutex_unlock(&iter->mutex);
4383
4384	return sret;
4385}
4386
4387static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4388				     unsigned int idx)
4389{
4390	__free_page(spd->pages[idx]);
4391}
4392
4393static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4394	.can_merge		= 0,
4395	.confirm		= generic_pipe_buf_confirm,
4396	.release		= generic_pipe_buf_release,
4397	.steal			= generic_pipe_buf_steal,
4398	.get			= generic_pipe_buf_get,
4399};
4400
4401static size_t
4402tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4403{
4404	size_t count;
 
4405	int ret;
4406
4407	/* Seq buffer is page-sized, exactly what we need. */
4408	for (;;) {
4409		count = iter->seq.len;
4410		ret = print_trace_line(iter);
4411		count = iter->seq.len - count;
4412		if (rem < count) {
4413			rem = 0;
4414			iter->seq.len -= count;
4415			break;
4416		}
 
 
 
 
 
 
4417		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4418			iter->seq.len -= count;
 
 
 
 
 
 
 
4419			break;
4420		}
4421
4422		if (ret != TRACE_TYPE_NO_CONSUME)
4423			trace_consume(iter);
4424		rem -= count;
4425		if (!trace_find_next_entry_inc(iter))	{
4426			rem = 0;
4427			iter->ent = NULL;
4428			break;
4429		}
4430	}
4431
4432	return rem;
4433}
4434
4435static ssize_t tracing_splice_read_pipe(struct file *filp,
4436					loff_t *ppos,
4437					struct pipe_inode_info *pipe,
4438					size_t len,
4439					unsigned int flags)
4440{
4441	struct page *pages_def[PIPE_DEF_BUFFERS];
4442	struct partial_page partial_def[PIPE_DEF_BUFFERS];
4443	struct trace_iterator *iter = filp->private_data;
4444	struct splice_pipe_desc spd = {
4445		.pages		= pages_def,
4446		.partial	= partial_def,
4447		.nr_pages	= 0, /* This gets updated below. */
4448		.nr_pages_max	= PIPE_DEF_BUFFERS,
4449		.flags		= flags,
4450		.ops		= &tracing_pipe_buf_ops,
4451		.spd_release	= tracing_spd_release_pipe,
4452	};
4453	struct trace_array *tr = iter->tr;
4454	ssize_t ret;
4455	size_t rem;
4456	unsigned int i;
4457
4458	if (splice_grow_spd(pipe, &spd))
4459		return -ENOMEM;
4460
4461	/* copy the tracer to avoid using a global lock all around */
4462	mutex_lock(&trace_types_lock);
4463	if (unlikely(iter->trace->name != tr->current_trace->name))
4464		*iter->trace = *tr->current_trace;
4465	mutex_unlock(&trace_types_lock);
4466
4467	mutex_lock(&iter->mutex);
4468
4469	if (iter->trace->splice_read) {
4470		ret = iter->trace->splice_read(iter, filp,
4471					       ppos, pipe, len, flags);
4472		if (ret)
4473			goto out_err;
4474	}
4475
4476	ret = tracing_wait_pipe(filp);
4477	if (ret <= 0)
4478		goto out_err;
4479
4480	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4481		ret = -EFAULT;
4482		goto out_err;
4483	}
4484
4485	trace_event_read_lock();
4486	trace_access_lock(iter->cpu_file);
4487
4488	/* Fill as many pages as possible. */
4489	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4490		spd.pages[i] = alloc_page(GFP_KERNEL);
4491		if (!spd.pages[i])
4492			break;
4493
4494		rem = tracing_fill_pipe_page(rem, iter);
4495
4496		/* Copy the data into the page, so we can start over. */
4497		ret = trace_seq_to_buffer(&iter->seq,
4498					  page_address(spd.pages[i]),
4499					  iter->seq.len);
4500		if (ret < 0) {
4501			__free_page(spd.pages[i]);
4502			break;
4503		}
4504		spd.partial[i].offset = 0;
4505		spd.partial[i].len = iter->seq.len;
4506
4507		trace_seq_init(&iter->seq);
4508	}
4509
4510	trace_access_unlock(iter->cpu_file);
4511	trace_event_read_unlock();
4512	mutex_unlock(&iter->mutex);
4513
4514	spd.nr_pages = i;
4515
4516	ret = splice_to_pipe(pipe, &spd);
 
 
 
4517out:
4518	splice_shrink_spd(&spd);
4519	return ret;
4520
4521out_err:
4522	mutex_unlock(&iter->mutex);
4523	goto out;
4524}
4525
4526static ssize_t
4527tracing_entries_read(struct file *filp, char __user *ubuf,
4528		     size_t cnt, loff_t *ppos)
4529{
4530	struct inode *inode = file_inode(filp);
4531	struct trace_array *tr = inode->i_private;
4532	int cpu = tracing_get_cpu(inode);
4533	char buf[64];
4534	int r = 0;
4535	ssize_t ret;
4536
4537	mutex_lock(&trace_types_lock);
4538
4539	if (cpu == RING_BUFFER_ALL_CPUS) {
4540		int cpu, buf_size_same;
4541		unsigned long size;
4542
4543		size = 0;
4544		buf_size_same = 1;
4545		/* check if all cpu sizes are same */
4546		for_each_tracing_cpu(cpu) {
4547			/* fill in the size from first enabled cpu */
4548			if (size == 0)
4549				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4550			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4551				buf_size_same = 0;
4552				break;
4553			}
4554		}
4555
4556		if (buf_size_same) {
4557			if (!ring_buffer_expanded)
4558				r = sprintf(buf, "%lu (expanded: %lu)\n",
4559					    size >> 10,
4560					    trace_buf_size >> 10);
4561			else
4562				r = sprintf(buf, "%lu\n", size >> 10);
4563		} else
4564			r = sprintf(buf, "X\n");
4565	} else
4566		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4567
4568	mutex_unlock(&trace_types_lock);
4569
4570	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4571	return ret;
4572}
4573
4574static ssize_t
4575tracing_entries_write(struct file *filp, const char __user *ubuf,
4576		      size_t cnt, loff_t *ppos)
4577{
4578	struct inode *inode = file_inode(filp);
4579	struct trace_array *tr = inode->i_private;
4580	unsigned long val;
4581	int ret;
4582
4583	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4584	if (ret)
4585		return ret;
4586
4587	/* must have at least 1 entry */
4588	if (!val)
4589		return -EINVAL;
4590
4591	/* value is in KB */
4592	val <<= 10;
4593	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4594	if (ret < 0)
4595		return ret;
4596
4597	*ppos += cnt;
4598
4599	return cnt;
4600}
4601
4602static ssize_t
4603tracing_total_entries_read(struct file *filp, char __user *ubuf,
4604				size_t cnt, loff_t *ppos)
4605{
4606	struct trace_array *tr = filp->private_data;
4607	char buf[64];
4608	int r, cpu;
4609	unsigned long size = 0, expanded_size = 0;
4610
4611	mutex_lock(&trace_types_lock);
4612	for_each_tracing_cpu(cpu) {
4613		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4614		if (!ring_buffer_expanded)
4615			expanded_size += trace_buf_size >> 10;
4616	}
4617	if (ring_buffer_expanded)
4618		r = sprintf(buf, "%lu\n", size);
4619	else
4620		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4621	mutex_unlock(&trace_types_lock);
4622
4623	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4624}
4625
4626static ssize_t
4627tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4628			  size_t cnt, loff_t *ppos)
4629{
4630	/*
4631	 * There is no need to read what the user has written, this function
4632	 * is just to make sure that there is no error when "echo" is used
4633	 */
4634
4635	*ppos += cnt;
4636
4637	return cnt;
4638}
4639
4640static int
4641tracing_free_buffer_release(struct inode *inode, struct file *filp)
4642{
4643	struct trace_array *tr = inode->i_private;
4644
4645	/* disable tracing ? */
4646	if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4647		tracer_tracing_off(tr);
4648	/* resize the ring buffer to 0 */
4649	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4650
4651	trace_array_put(tr);
4652
4653	return 0;
4654}
4655
4656static ssize_t
4657tracing_mark_write(struct file *filp, const char __user *ubuf,
4658					size_t cnt, loff_t *fpos)
4659{
4660	unsigned long addr = (unsigned long)ubuf;
4661	struct trace_array *tr = filp->private_data;
4662	struct ring_buffer_event *event;
4663	struct ring_buffer *buffer;
 
4664	struct print_entry *entry;
4665	unsigned long irq_flags;
4666	struct page *pages[2];
4667	void *map_page[2];
4668	int nr_pages = 1;
4669	ssize_t written;
4670	int offset;
4671	int size;
4672	int len;
4673	int ret;
4674	int i;
 
 
4675
4676	if (tracing_disabled)
4677		return -EINVAL;
4678
4679	if (!(trace_flags & TRACE_ITER_MARKERS))
4680		return -EINVAL;
4681
4682	if (cnt > TRACE_BUF_SIZE)
4683		cnt = TRACE_BUF_SIZE;
4684
4685	/*
4686	 * Userspace is injecting traces into the kernel trace buffer.
4687	 * We want to be as non intrusive as possible.
4688	 * To do so, we do not want to allocate any special buffers
4689	 * or take any locks, but instead write the userspace data
4690	 * straight into the ring buffer.
4691	 *
4692	 * First we need to pin the userspace buffer into memory,
4693	 * which, most likely it is, because it just referenced it.
4694	 * But there's no guarantee that it is. By using get_user_pages_fast()
4695	 * and kmap_atomic/kunmap_atomic() we can get access to the
4696	 * pages directly. We then write the data directly into the
4697	 * ring buffer.
4698	 */
4699	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4700
4701	/* check if we cross pages */
4702	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4703		nr_pages = 2;
4704
4705	offset = addr & (PAGE_SIZE - 1);
4706	addr &= PAGE_MASK;
4707
4708	ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4709	if (ret < nr_pages) {
4710		while (--ret >= 0)
4711			put_page(pages[ret]);
4712		written = -EFAULT;
4713		goto out;
4714	}
4715
4716	for (i = 0; i < nr_pages; i++)
4717		map_page[i] = kmap_atomic(pages[i]);
4718
4719	local_save_flags(irq_flags);
4720	size = sizeof(*entry) + cnt + 2; /* possible \n added */
4721	buffer = tr->trace_buffer.buffer;
4722	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4723					  irq_flags, preempt_count());
4724	if (!event) {
 
 
4725		/* Ring buffer disabled, return as if not open for write */
4726		written = -EBADF;
4727		goto out_unlock;
4728	}
4729
4730	entry = ring_buffer_event_data(event);
4731	entry->ip = _THIS_IP_;
4732
4733	if (nr_pages == 2) {
4734		len = PAGE_SIZE - offset;
4735		memcpy(&entry->buf, map_page[0] + offset, len);
4736		memcpy(&entry->buf[len], map_page[1], cnt - len);
 
4737	} else
4738		memcpy(&entry->buf, map_page[0] + offset, cnt);
 
 
 
 
 
 
4739
4740	if (entry->buf[cnt - 1] != '\n') {
4741		entry->buf[cnt] = '\n';
4742		entry->buf[cnt + 1] = '\0';
4743	} else
4744		entry->buf[cnt] = '\0';
4745
 
 
4746	__buffer_unlock_commit(buffer, event);
4747
4748	written = cnt;
 
4749
4750	*fpos += written;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4751
4752 out_unlock:
4753	for (i = 0; i < nr_pages; i++){
4754		kunmap_atomic(map_page[i]);
4755		put_page(pages[i]);
4756	}
4757 out:
4758	return written;
4759}
4760
4761static int tracing_clock_show(struct seq_file *m, void *v)
4762{
4763	struct trace_array *tr = m->private;
4764	int i;
4765
4766	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4767		seq_printf(m,
4768			"%s%s%s%s", i ? " " : "",
4769			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4770			i == tr->clock_id ? "]" : "");
4771	seq_putc(m, '\n');
4772
4773	return 0;
4774}
4775
4776static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4777{
4778	int i;
4779
4780	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4781		if (strcmp(trace_clocks[i].name, clockstr) == 0)
4782			break;
4783	}
4784	if (i == ARRAY_SIZE(trace_clocks))
4785		return -EINVAL;
4786
4787	mutex_lock(&trace_types_lock);
4788
4789	tr->clock_id = i;
4790
4791	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4792
4793	/*
4794	 * New clock may not be consistent with the previous clock.
4795	 * Reset the buffer so that it doesn't have incomparable timestamps.
4796	 */
4797	tracing_reset_online_cpus(&tr->trace_buffer);
4798
4799#ifdef CONFIG_TRACER_MAX_TRACE
4800	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4801		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4802	tracing_reset_online_cpus(&tr->max_buffer);
4803#endif
4804
4805	mutex_unlock(&trace_types_lock);
4806
4807	return 0;
4808}
4809
4810static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4811				   size_t cnt, loff_t *fpos)
4812{
4813	struct seq_file *m = filp->private_data;
4814	struct trace_array *tr = m->private;
4815	char buf[64];
4816	const char *clockstr;
4817	int ret;
4818
4819	if (cnt >= sizeof(buf))
4820		return -EINVAL;
4821
4822	if (copy_from_user(&buf, ubuf, cnt))
4823		return -EFAULT;
4824
4825	buf[cnt] = 0;
4826
4827	clockstr = strstrip(buf);
4828
4829	ret = tracing_set_clock(tr, clockstr);
4830	if (ret)
4831		return ret;
4832
4833	*fpos += cnt;
4834
4835	return cnt;
4836}
4837
4838static int tracing_clock_open(struct inode *inode, struct file *file)
4839{
4840	struct trace_array *tr = inode->i_private;
4841	int ret;
4842
4843	if (tracing_disabled)
4844		return -ENODEV;
4845
4846	if (trace_array_get(tr))
4847		return -ENODEV;
4848
4849	ret = single_open(file, tracing_clock_show, inode->i_private);
4850	if (ret < 0)
4851		trace_array_put(tr);
4852
4853	return ret;
4854}
4855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4856struct ftrace_buffer_info {
4857	struct trace_iterator	iter;
4858	void			*spare;
 
4859	unsigned int		read;
4860};
4861
4862#ifdef CONFIG_TRACER_SNAPSHOT
4863static int tracing_snapshot_open(struct inode *inode, struct file *file)
4864{
4865	struct trace_array *tr = inode->i_private;
4866	struct trace_iterator *iter;
4867	struct seq_file *m;
4868	int ret = 0;
4869
4870	if (trace_array_get(tr) < 0)
4871		return -ENODEV;
 
4872
4873	if (file->f_mode & FMODE_READ) {
4874		iter = __tracing_open(inode, file, true);
4875		if (IS_ERR(iter))
4876			ret = PTR_ERR(iter);
4877	} else {
4878		/* Writes still need the seq_file to hold the private data */
4879		ret = -ENOMEM;
4880		m = kzalloc(sizeof(*m), GFP_KERNEL);
4881		if (!m)
4882			goto out;
4883		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4884		if (!iter) {
4885			kfree(m);
4886			goto out;
4887		}
4888		ret = 0;
4889
4890		iter->tr = tr;
4891		iter->trace_buffer = &tr->max_buffer;
4892		iter->cpu_file = tracing_get_cpu(inode);
4893		m->private = iter;
4894		file->private_data = m;
4895	}
4896out:
4897	if (ret < 0)
4898		trace_array_put(tr);
4899
4900	return ret;
4901}
4902
4903static ssize_t
4904tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4905		       loff_t *ppos)
4906{
4907	struct seq_file *m = filp->private_data;
4908	struct trace_iterator *iter = m->private;
4909	struct trace_array *tr = iter->tr;
4910	unsigned long val;
4911	int ret;
4912
4913	ret = tracing_update_buffers();
4914	if (ret < 0)
4915		return ret;
4916
4917	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4918	if (ret)
4919		return ret;
4920
4921	mutex_lock(&trace_types_lock);
4922
4923	if (tr->current_trace->use_max_tr) {
4924		ret = -EBUSY;
4925		goto out;
4926	}
4927
 
 
 
 
 
 
 
4928	switch (val) {
4929	case 0:
4930		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4931			ret = -EINVAL;
4932			break;
4933		}
4934		if (tr->allocated_snapshot)
4935			free_snapshot(tr);
4936		break;
4937	case 1:
4938/* Only allow per-cpu swap if the ring buffer supports it */
4939#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4940		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4941			ret = -EINVAL;
4942			break;
4943		}
4944#endif
4945		if (!tr->allocated_snapshot) {
4946			ret = alloc_snapshot(tr);
4947			if (ret < 0)
4948				break;
4949		}
 
 
4950		local_irq_disable();
4951		/* Now, we're going to swap */
4952		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4953			update_max_tr(tr, current, smp_processor_id());
4954		else
4955			update_max_tr_single(tr, current, iter->cpu_file);
4956		local_irq_enable();
4957		break;
4958	default:
4959		if (tr->allocated_snapshot) {
4960			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4961				tracing_reset_online_cpus(&tr->max_buffer);
4962			else
4963				tracing_reset(&tr->max_buffer, iter->cpu_file);
4964		}
4965		break;
4966	}
4967
4968	if (ret >= 0) {
4969		*ppos += cnt;
4970		ret = cnt;
4971	}
4972out:
4973	mutex_unlock(&trace_types_lock);
4974	return ret;
4975}
4976
4977static int tracing_snapshot_release(struct inode *inode, struct file *file)
4978{
4979	struct seq_file *m = file->private_data;
4980	int ret;
4981
4982	ret = tracing_release(inode, file);
4983
4984	if (file->f_mode & FMODE_READ)
4985		return ret;
4986
4987	/* If write only, the seq_file is just a stub */
4988	if (m)
4989		kfree(m->private);
4990	kfree(m);
4991
4992	return 0;
4993}
4994
4995static int tracing_buffers_open(struct inode *inode, struct file *filp);
4996static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4997				    size_t count, loff_t *ppos);
4998static int tracing_buffers_release(struct inode *inode, struct file *file);
4999static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5000		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5001
5002static int snapshot_raw_open(struct inode *inode, struct file *filp)
5003{
5004	struct ftrace_buffer_info *info;
5005	int ret;
5006
 
5007	ret = tracing_buffers_open(inode, filp);
5008	if (ret < 0)
5009		return ret;
5010
5011	info = filp->private_data;
5012
5013	if (info->iter.trace->use_max_tr) {
5014		tracing_buffers_release(inode, filp);
5015		return -EBUSY;
5016	}
5017
5018	info->iter.snapshot = true;
5019	info->iter.trace_buffer = &info->iter.tr->max_buffer;
5020
5021	return ret;
5022}
5023
5024#endif /* CONFIG_TRACER_SNAPSHOT */
5025
5026
 
 
 
 
 
 
 
 
5027static const struct file_operations tracing_max_lat_fops = {
5028	.open		= tracing_open_generic,
5029	.read		= tracing_max_lat_read,
5030	.write		= tracing_max_lat_write,
5031	.llseek		= generic_file_llseek,
5032};
 
5033
5034static const struct file_operations set_tracer_fops = {
5035	.open		= tracing_open_generic,
5036	.read		= tracing_set_trace_read,
5037	.write		= tracing_set_trace_write,
5038	.llseek		= generic_file_llseek,
5039};
5040
5041static const struct file_operations tracing_pipe_fops = {
5042	.open		= tracing_open_pipe,
5043	.poll		= tracing_poll_pipe,
5044	.read		= tracing_read_pipe,
5045	.splice_read	= tracing_splice_read_pipe,
5046	.release	= tracing_release_pipe,
5047	.llseek		= no_llseek,
5048};
5049
5050static const struct file_operations tracing_entries_fops = {
5051	.open		= tracing_open_generic_tr,
5052	.read		= tracing_entries_read,
5053	.write		= tracing_entries_write,
5054	.llseek		= generic_file_llseek,
5055	.release	= tracing_release_generic_tr,
5056};
5057
5058static const struct file_operations tracing_total_entries_fops = {
5059	.open		= tracing_open_generic_tr,
5060	.read		= tracing_total_entries_read,
5061	.llseek		= generic_file_llseek,
5062	.release	= tracing_release_generic_tr,
5063};
5064
5065static const struct file_operations tracing_free_buffer_fops = {
5066	.open		= tracing_open_generic_tr,
5067	.write		= tracing_free_buffer_write,
5068	.release	= tracing_free_buffer_release,
5069};
5070
5071static const struct file_operations tracing_mark_fops = {
5072	.open		= tracing_open_generic_tr,
5073	.write		= tracing_mark_write,
5074	.llseek		= generic_file_llseek,
5075	.release	= tracing_release_generic_tr,
5076};
5077
 
 
 
 
 
 
 
5078static const struct file_operations trace_clock_fops = {
5079	.open		= tracing_clock_open,
5080	.read		= seq_read,
5081	.llseek		= seq_lseek,
5082	.release	= tracing_single_release_tr,
5083	.write		= tracing_clock_write,
5084};
5085
 
 
 
 
 
 
 
5086#ifdef CONFIG_TRACER_SNAPSHOT
5087static const struct file_operations snapshot_fops = {
5088	.open		= tracing_snapshot_open,
5089	.read		= seq_read,
5090	.write		= tracing_snapshot_write,
5091	.llseek		= tracing_lseek,
5092	.release	= tracing_snapshot_release,
5093};
5094
5095static const struct file_operations snapshot_raw_fops = {
5096	.open		= snapshot_raw_open,
5097	.read		= tracing_buffers_read,
5098	.release	= tracing_buffers_release,
5099	.splice_read	= tracing_buffers_splice_read,
5100	.llseek		= no_llseek,
5101};
5102
5103#endif /* CONFIG_TRACER_SNAPSHOT */
5104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5105static int tracing_buffers_open(struct inode *inode, struct file *filp)
5106{
5107	struct trace_array *tr = inode->i_private;
5108	struct ftrace_buffer_info *info;
5109	int ret;
5110
5111	if (tracing_disabled)
5112		return -ENODEV;
5113
5114	if (trace_array_get(tr) < 0)
5115		return -ENODEV;
5116
5117	info = kzalloc(sizeof(*info), GFP_KERNEL);
5118	if (!info) {
5119		trace_array_put(tr);
5120		return -ENOMEM;
5121	}
5122
5123	mutex_lock(&trace_types_lock);
5124
5125	info->iter.tr		= tr;
5126	info->iter.cpu_file	= tracing_get_cpu(inode);
5127	info->iter.trace	= tr->current_trace;
5128	info->iter.trace_buffer = &tr->trace_buffer;
5129	info->spare		= NULL;
5130	/* Force reading ring buffer for first read */
5131	info->read		= (unsigned int)-1;
5132
5133	filp->private_data = info;
5134
 
 
5135	mutex_unlock(&trace_types_lock);
5136
5137	ret = nonseekable_open(inode, filp);
5138	if (ret < 0)
5139		trace_array_put(tr);
5140
5141	return ret;
5142}
5143
5144static unsigned int
5145tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5146{
5147	struct ftrace_buffer_info *info = filp->private_data;
5148	struct trace_iterator *iter = &info->iter;
5149
5150	return trace_poll(iter, filp, poll_table);
5151}
5152
5153static ssize_t
5154tracing_buffers_read(struct file *filp, char __user *ubuf,
5155		     size_t count, loff_t *ppos)
5156{
5157	struct ftrace_buffer_info *info = filp->private_data;
5158	struct trace_iterator *iter = &info->iter;
5159	ssize_t ret;
5160	ssize_t size;
5161
5162	if (!count)
5163		return 0;
5164
5165	mutex_lock(&trace_types_lock);
5166
5167#ifdef CONFIG_TRACER_MAX_TRACE
5168	if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5169		size = -EBUSY;
5170		goto out_unlock;
5171	}
5172#endif
5173
5174	if (!info->spare)
5175		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5176							  iter->cpu_file);
5177	size = -ENOMEM;
 
 
 
 
 
 
5178	if (!info->spare)
5179		goto out_unlock;
5180
5181	/* Do we have previous read data to read? */
5182	if (info->read < PAGE_SIZE)
5183		goto read;
5184
5185 again:
5186	trace_access_lock(iter->cpu_file);
5187	ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5188				    &info->spare,
5189				    count,
5190				    iter->cpu_file, 0);
5191	trace_access_unlock(iter->cpu_file);
5192
5193	if (ret < 0) {
5194		if (trace_empty(iter)) {
5195			if ((filp->f_flags & O_NONBLOCK)) {
5196				size = -EAGAIN;
5197				goto out_unlock;
5198			}
5199			mutex_unlock(&trace_types_lock);
5200			iter->trace->wait_pipe(iter);
5201			mutex_lock(&trace_types_lock);
5202			if (signal_pending(current)) {
5203				size = -EINTR;
5204				goto out_unlock;
5205			}
5206			goto again;
5207		}
5208		size = 0;
5209		goto out_unlock;
5210	}
5211
5212	info->read = 0;
5213 read:
5214	size = PAGE_SIZE - info->read;
5215	if (size > count)
5216		size = count;
5217
5218	ret = copy_to_user(ubuf, info->spare + info->read, size);
5219	if (ret == size) {
5220		size = -EFAULT;
5221		goto out_unlock;
5222	}
5223	size -= ret;
5224
5225	*ppos += size;
5226	info->read += size;
5227
5228 out_unlock:
5229	mutex_unlock(&trace_types_lock);
5230
5231	return size;
5232}
5233
5234static int tracing_buffers_release(struct inode *inode, struct file *file)
5235{
5236	struct ftrace_buffer_info *info = file->private_data;
5237	struct trace_iterator *iter = &info->iter;
5238
5239	mutex_lock(&trace_types_lock);
5240
 
 
5241	__trace_array_put(iter->tr);
5242
5243	if (info->spare)
5244		ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5245	kfree(info);
 
5246
5247	mutex_unlock(&trace_types_lock);
5248
5249	return 0;
5250}
5251
5252struct buffer_ref {
5253	struct ring_buffer	*buffer;
5254	void			*page;
5255	int			ref;
 
5256};
5257
 
 
 
 
 
 
 
 
5258static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5259				    struct pipe_buffer *buf)
5260{
5261	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5262
5263	if (--ref->ref)
5264		return;
5265
5266	ring_buffer_free_read_page(ref->buffer, ref->page);
5267	kfree(ref);
5268	buf->private = 0;
5269}
5270
5271static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5272				struct pipe_buffer *buf)
5273{
5274	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5275
5276	ref->ref++;
 
 
 
 
5277}
5278
5279/* Pipe buffer operations for a buffer. */
5280static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5281	.can_merge		= 0,
5282	.confirm		= generic_pipe_buf_confirm,
5283	.release		= buffer_pipe_buf_release,
5284	.steal			= generic_pipe_buf_steal,
5285	.get			= buffer_pipe_buf_get,
5286};
5287
5288/*
5289 * Callback from splice_to_pipe(), if we need to release some pages
5290 * at the end of the spd in case we error'ed out in filling the pipe.
5291 */
5292static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5293{
5294	struct buffer_ref *ref =
5295		(struct buffer_ref *)spd->partial[i].private;
5296
5297	if (--ref->ref)
5298		return;
5299
5300	ring_buffer_free_read_page(ref->buffer, ref->page);
5301	kfree(ref);
5302	spd->partial[i].private = 0;
5303}
5304
5305static ssize_t
5306tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5307			    struct pipe_inode_info *pipe, size_t len,
5308			    unsigned int flags)
5309{
5310	struct ftrace_buffer_info *info = file->private_data;
5311	struct trace_iterator *iter = &info->iter;
5312	struct partial_page partial_def[PIPE_DEF_BUFFERS];
5313	struct page *pages_def[PIPE_DEF_BUFFERS];
5314	struct splice_pipe_desc spd = {
5315		.pages		= pages_def,
5316		.partial	= partial_def,
5317		.nr_pages_max	= PIPE_DEF_BUFFERS,
5318		.flags		= flags,
5319		.ops		= &buffer_pipe_buf_ops,
5320		.spd_release	= buffer_spd_release,
5321	};
5322	struct buffer_ref *ref;
5323	int entries, size, i;
5324	ssize_t ret;
5325
5326	mutex_lock(&trace_types_lock);
5327
5328#ifdef CONFIG_TRACER_MAX_TRACE
5329	if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5330		ret = -EBUSY;
5331		goto out;
5332	}
5333#endif
5334
5335	if (splice_grow_spd(pipe, &spd)) {
5336		ret = -ENOMEM;
5337		goto out;
5338	}
5339
5340	if (*ppos & (PAGE_SIZE - 1)) {
5341		ret = -EINVAL;
5342		goto out;
5343	}
5344
5345	if (len & (PAGE_SIZE - 1)) {
5346		if (len < PAGE_SIZE) {
5347			ret = -EINVAL;
5348			goto out;
5349		}
5350		len &= PAGE_MASK;
5351	}
5352
 
 
 
5353 again:
5354	trace_access_lock(iter->cpu_file);
5355	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5356
5357	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5358		struct page *page;
5359		int r;
5360
5361		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5362		if (!ref)
 
5363			break;
 
5364
5365		ref->ref = 1;
5366		ref->buffer = iter->trace_buffer->buffer;
5367		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5368		if (!ref->page) {
 
 
5369			kfree(ref);
5370			break;
5371		}
 
5372
5373		r = ring_buffer_read_page(ref->buffer, &ref->page,
5374					  len, iter->cpu_file, 1);
5375		if (r < 0) {
5376			ring_buffer_free_read_page(ref->buffer, ref->page);
 
5377			kfree(ref);
5378			break;
5379		}
5380
5381		/*
5382		 * zero out any left over data, this is going to
5383		 * user land.
5384		 */
5385		size = ring_buffer_page_len(ref->page);
5386		if (size < PAGE_SIZE)
5387			memset(ref->page + size, 0, PAGE_SIZE - size);
5388
5389		page = virt_to_page(ref->page);
5390
5391		spd.pages[i] = page;
5392		spd.partial[i].len = PAGE_SIZE;
5393		spd.partial[i].offset = 0;
5394		spd.partial[i].private = (unsigned long)ref;
5395		spd.nr_pages++;
5396		*ppos += PAGE_SIZE;
5397
5398		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5399	}
5400
5401	trace_access_unlock(iter->cpu_file);
5402	spd.nr_pages = i;
5403
5404	/* did we read anything? */
5405	if (!spd.nr_pages) {
5406		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5407			ret = -EAGAIN;
5408			goto out;
5409		}
5410		mutex_unlock(&trace_types_lock);
5411		iter->trace->wait_pipe(iter);
5412		mutex_lock(&trace_types_lock);
5413		if (signal_pending(current)) {
5414			ret = -EINTR;
5415			goto out;
5416		}
 
 
 
 
5417		goto again;
5418	}
5419
5420	ret = splice_to_pipe(pipe, &spd);
5421	splice_shrink_spd(&spd);
5422out:
5423	mutex_unlock(&trace_types_lock);
5424
5425	return ret;
5426}
5427
5428static const struct file_operations tracing_buffers_fops = {
5429	.open		= tracing_buffers_open,
5430	.read		= tracing_buffers_read,
5431	.poll		= tracing_buffers_poll,
5432	.release	= tracing_buffers_release,
5433	.splice_read	= tracing_buffers_splice_read,
5434	.llseek		= no_llseek,
5435};
5436
5437static ssize_t
5438tracing_stats_read(struct file *filp, char __user *ubuf,
5439		   size_t count, loff_t *ppos)
5440{
5441	struct inode *inode = file_inode(filp);
5442	struct trace_array *tr = inode->i_private;
5443	struct trace_buffer *trace_buf = &tr->trace_buffer;
5444	int cpu = tracing_get_cpu(inode);
5445	struct trace_seq *s;
5446	unsigned long cnt;
5447	unsigned long long t;
5448	unsigned long usec_rem;
5449
5450	s = kmalloc(sizeof(*s), GFP_KERNEL);
5451	if (!s)
5452		return -ENOMEM;
5453
5454	trace_seq_init(s);
5455
5456	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5457	trace_seq_printf(s, "entries: %ld\n", cnt);
5458
5459	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5460	trace_seq_printf(s, "overrun: %ld\n", cnt);
5461
5462	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5463	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5464
5465	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5466	trace_seq_printf(s, "bytes: %ld\n", cnt);
5467
5468	if (trace_clocks[tr->clock_id].in_ns) {
5469		/* local or global for trace_clock */
5470		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5471		usec_rem = do_div(t, USEC_PER_SEC);
5472		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5473								t, usec_rem);
5474
5475		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5476		usec_rem = do_div(t, USEC_PER_SEC);
5477		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5478	} else {
5479		/* counter or tsc mode for trace_clock */
5480		trace_seq_printf(s, "oldest event ts: %llu\n",
5481				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5482
5483		trace_seq_printf(s, "now ts: %llu\n",
5484				ring_buffer_time_stamp(trace_buf->buffer, cpu));
5485	}
5486
5487	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5488	trace_seq_printf(s, "dropped events: %ld\n", cnt);
5489
5490	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5491	trace_seq_printf(s, "read events: %ld\n", cnt);
5492
5493	count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
 
5494
5495	kfree(s);
5496
5497	return count;
5498}
5499
5500static const struct file_operations tracing_stats_fops = {
5501	.open		= tracing_open_generic_tr,
5502	.read		= tracing_stats_read,
5503	.llseek		= generic_file_llseek,
5504	.release	= tracing_release_generic_tr,
5505};
5506
5507#ifdef CONFIG_DYNAMIC_FTRACE
5508
5509int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5510{
5511	return 0;
5512}
5513
5514static ssize_t
5515tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5516		  size_t cnt, loff_t *ppos)
5517{
5518	static char ftrace_dyn_info_buffer[1024];
5519	static DEFINE_MUTEX(dyn_info_mutex);
5520	unsigned long *p = filp->private_data;
5521	char *buf = ftrace_dyn_info_buffer;
5522	int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5523	int r;
5524
5525	mutex_lock(&dyn_info_mutex);
5526	r = sprintf(buf, "%ld ", *p);
5527
5528	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5529	buf[r++] = '\n';
5530
5531	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5532
5533	mutex_unlock(&dyn_info_mutex);
 
 
 
5534
5535	return r;
 
 
5536}
5537
5538static const struct file_operations tracing_dyn_info_fops = {
5539	.open		= tracing_open_generic,
5540	.read		= tracing_read_dyn_info,
5541	.llseek		= generic_file_llseek,
5542};
5543#endif /* CONFIG_DYNAMIC_FTRACE */
5544
5545#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5546static void
5547ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
 
 
5548{
5549	tracing_snapshot();
5550}
5551
5552static void
5553ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
 
 
5554{
5555	unsigned long *count = (long *)data;
 
5556
5557	if (!*count)
5558		return;
 
 
 
 
 
5559
5560	if (*count != -1)
5561		(*count)--;
 
5562
5563	tracing_snapshot();
5564}
5565
5566static int
5567ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5568		      struct ftrace_probe_ops *ops, void *data)
5569{
5570	long count = (long)data;
 
5571
5572	seq_printf(m, "%ps:", (void *)ip);
5573
5574	seq_printf(m, "snapshot");
 
 
 
5575
5576	if (count == -1)
5577		seq_printf(m, ":unlimited\n");
5578	else
5579		seq_printf(m, ":count=%ld\n", count);
5580
5581	return 0;
5582}
5583
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5584static struct ftrace_probe_ops snapshot_probe_ops = {
5585	.func			= ftrace_snapshot,
5586	.print			= ftrace_snapshot_print,
5587};
5588
5589static struct ftrace_probe_ops snapshot_count_probe_ops = {
5590	.func			= ftrace_count_snapshot,
5591	.print			= ftrace_snapshot_print,
 
 
5592};
5593
5594static int
5595ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5596			       char *glob, char *cmd, char *param, int enable)
5597{
5598	struct ftrace_probe_ops *ops;
5599	void *count = (void *)-1;
5600	char *number;
5601	int ret;
5602
 
 
 
5603	/* hash funcs only work with set_ftrace_filter */
5604	if (!enable)
5605		return -EINVAL;
5606
5607	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5608
5609	if (glob[0] == '!') {
5610		unregister_ftrace_function_probe_func(glob+1, ops);
5611		return 0;
5612	}
5613
5614	if (!param)
5615		goto out_reg;
5616
5617	number = strsep(&param, ":");
5618
5619	if (!strlen(number))
5620		goto out_reg;
5621
5622	/*
5623	 * We use the callback data field (which is a pointer)
5624	 * as our counter.
5625	 */
5626	ret = kstrtoul(number, 0, (unsigned long *)&count);
5627	if (ret)
5628		return ret;
5629
5630 out_reg:
5631	ret = register_ftrace_function_probe(glob, ops, count);
 
 
5632
5633	if (ret >= 0)
5634		alloc_snapshot(&global_trace);
5635
 
5636	return ret < 0 ? ret : 0;
5637}
5638
5639static struct ftrace_func_command ftrace_snapshot_cmd = {
5640	.name			= "snapshot",
5641	.func			= ftrace_trace_snapshot_callback,
5642};
5643
5644static __init int register_snapshot_cmd(void)
5645{
5646	return register_ftrace_command(&ftrace_snapshot_cmd);
5647}
5648#else
5649static inline __init int register_snapshot_cmd(void) { return 0; }
5650#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5651
5652struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5653{
5654	if (tr->dir)
5655		return tr->dir;
5656
5657	if (!debugfs_initialized())
5658		return NULL;
5659
 
5660	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5661		tr->dir = debugfs_create_dir("tracing", NULL);
5662
5663	if (!tr->dir)
5664		pr_warn_once("Could not create debugfs directory 'tracing'\n");
5665
 
5666	return tr->dir;
5667}
5668
5669struct dentry *tracing_init_dentry(void)
5670{
5671	return tracing_init_dentry_tr(&global_trace);
5672}
5673
5674static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5675{
5676	struct dentry *d_tracer;
5677
5678	if (tr->percpu_dir)
5679		return tr->percpu_dir;
5680
5681	d_tracer = tracing_init_dentry_tr(tr);
5682	if (!d_tracer)
5683		return NULL;
5684
5685	tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5686
5687	WARN_ONCE(!tr->percpu_dir,
5688		  "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5689
5690	return tr->percpu_dir;
5691}
5692
5693static struct dentry *
5694trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5695		      void *data, long cpu, const struct file_operations *fops)
5696{
5697	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5698
5699	if (ret) /* See tracing_get_cpu() */
5700		ret->d_inode->i_cdev = (void *)(cpu + 1);
5701	return ret;
5702}
5703
5704static void
5705tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5706{
5707	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5708	struct dentry *d_cpu;
5709	char cpu_dir[30]; /* 30 characters should be more than enough */
5710
5711	if (!d_percpu)
5712		return;
5713
5714	snprintf(cpu_dir, 30, "cpu%ld", cpu);
5715	d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5716	if (!d_cpu) {
5717		pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5718		return;
5719	}
5720
5721	/* per cpu trace_pipe */
5722	trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5723				tr, cpu, &tracing_pipe_fops);
5724
5725	/* per cpu trace */
5726	trace_create_cpu_file("trace", 0644, d_cpu,
5727				tr, cpu, &tracing_fops);
5728
5729	trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5730				tr, cpu, &tracing_buffers_fops);
5731
5732	trace_create_cpu_file("stats", 0444, d_cpu,
5733				tr, cpu, &tracing_stats_fops);
5734
5735	trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5736				tr, cpu, &tracing_entries_fops);
5737
5738#ifdef CONFIG_TRACER_SNAPSHOT
5739	trace_create_cpu_file("snapshot", 0644, d_cpu,
5740				tr, cpu, &snapshot_fops);
5741
5742	trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5743				tr, cpu, &snapshot_raw_fops);
5744#endif
5745}
5746
5747#ifdef CONFIG_FTRACE_SELFTEST
5748/* Let selftest have access to static functions in this file */
5749#include "trace_selftest.c"
5750#endif
5751
5752struct trace_option_dentry {
5753	struct tracer_opt		*opt;
5754	struct tracer_flags		*flags;
5755	struct trace_array		*tr;
5756	struct dentry			*entry;
5757};
5758
5759static ssize_t
5760trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5761			loff_t *ppos)
5762{
5763	struct trace_option_dentry *topt = filp->private_data;
5764	char *buf;
5765
5766	if (topt->flags->val & topt->opt->bit)
5767		buf = "1\n";
5768	else
5769		buf = "0\n";
5770
5771	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5772}
5773
5774static ssize_t
5775trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5776			 loff_t *ppos)
5777{
5778	struct trace_option_dentry *topt = filp->private_data;
5779	unsigned long val;
5780	int ret;
5781
5782	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5783	if (ret)
5784		return ret;
5785
5786	if (val != 0 && val != 1)
5787		return -EINVAL;
5788
5789	if (!!(topt->flags->val & topt->opt->bit) != val) {
5790		mutex_lock(&trace_types_lock);
5791		ret = __set_tracer_option(topt->tr, topt->flags,
5792					  topt->opt, !val);
5793		mutex_unlock(&trace_types_lock);
5794		if (ret)
5795			return ret;
5796	}
5797
5798	*ppos += cnt;
5799
5800	return cnt;
5801}
5802
5803
5804static const struct file_operations trace_options_fops = {
5805	.open = tracing_open_generic,
5806	.read = trace_options_read,
5807	.write = trace_options_write,
5808	.llseek	= generic_file_llseek,
5809};
5810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5811static ssize_t
5812trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5813			loff_t *ppos)
5814{
5815	long index = (long)filp->private_data;
 
 
5816	char *buf;
5817
5818	if (trace_flags & (1 << index))
 
 
5819		buf = "1\n";
5820	else
5821		buf = "0\n";
5822
5823	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5824}
5825
5826static ssize_t
5827trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5828			 loff_t *ppos)
5829{
5830	struct trace_array *tr = &global_trace;
5831	long index = (long)filp->private_data;
 
5832	unsigned long val;
5833	int ret;
5834
 
 
5835	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5836	if (ret)
5837		return ret;
5838
5839	if (val != 0 && val != 1)
5840		return -EINVAL;
5841
 
5842	mutex_lock(&trace_types_lock);
5843	ret = set_tracer_flag(tr, 1 << index, val);
5844	mutex_unlock(&trace_types_lock);
 
5845
5846	if (ret < 0)
5847		return ret;
5848
5849	*ppos += cnt;
5850
5851	return cnt;
5852}
5853
5854static const struct file_operations trace_options_core_fops = {
5855	.open = tracing_open_generic,
5856	.read = trace_options_core_read,
5857	.write = trace_options_core_write,
5858	.llseek = generic_file_llseek,
5859};
5860
5861struct dentry *trace_create_file(const char *name,
5862				 umode_t mode,
5863				 struct dentry *parent,
5864				 void *data,
5865				 const struct file_operations *fops)
5866{
5867	struct dentry *ret;
5868
5869	ret = debugfs_create_file(name, mode, parent, data, fops);
5870	if (!ret)
5871		pr_warning("Could not create debugfs '%s' entry\n", name);
5872
5873	return ret;
5874}
5875
5876
5877static struct dentry *trace_options_init_dentry(struct trace_array *tr)
5878{
5879	struct dentry *d_tracer;
5880
5881	if (tr->options)
5882		return tr->options;
5883
5884	d_tracer = tracing_init_dentry_tr(tr);
5885	if (!d_tracer)
5886		return NULL;
5887
5888	tr->options = debugfs_create_dir("options", d_tracer);
5889	if (!tr->options) {
5890		pr_warning("Could not create debugfs directory 'options'\n");
5891		return NULL;
5892	}
5893
5894	return tr->options;
5895}
5896
5897static void
5898create_trace_option_file(struct trace_array *tr,
5899			 struct trace_option_dentry *topt,
5900			 struct tracer_flags *flags,
5901			 struct tracer_opt *opt)
5902{
5903	struct dentry *t_options;
5904
5905	t_options = trace_options_init_dentry(tr);
5906	if (!t_options)
5907		return;
5908
5909	topt->flags = flags;
5910	topt->opt = opt;
5911	topt->tr = tr;
5912
5913	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
5914				    &trace_options_fops);
5915
5916}
5917
5918static struct trace_option_dentry *
5919create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
5920{
5921	struct trace_option_dentry *topts;
 
5922	struct tracer_flags *flags;
5923	struct tracer_opt *opts;
5924	int cnt;
 
5925
5926	if (!tracer)
5927		return NULL;
5928
5929	flags = tracer->flags;
5930
5931	if (!flags || !flags->opts)
5932		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
5933
5934	opts = flags->opts;
5935
5936	for (cnt = 0; opts[cnt].name; cnt++)
5937		;
5938
5939	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
5940	if (!topts)
5941		return NULL;
5942
5943	for (cnt = 0; opts[cnt].name; cnt++)
5944		create_trace_option_file(tr, &topts[cnt], flags,
5945					 &opts[cnt]);
5946
5947	return topts;
5948}
5949
5950static void
5951destroy_trace_option_files(struct trace_option_dentry *topts)
5952{
5953	int cnt;
5954
5955	if (!topts)
5956		return;
5957
5958	for (cnt = 0; topts[cnt].opt; cnt++) {
5959		if (topts[cnt].entry)
5960			debugfs_remove(topts[cnt].entry);
 
 
5961	}
5962
5963	kfree(topts);
 
 
 
 
 
 
 
 
 
 
 
5964}
5965
5966static struct dentry *
5967create_trace_option_core_file(struct trace_array *tr,
5968			      const char *option, long index)
5969{
5970	struct dentry *t_options;
5971
5972	t_options = trace_options_init_dentry(tr);
5973	if (!t_options)
5974		return NULL;
5975
5976	return trace_create_file(option, 0644, t_options, (void *)index,
5977				    &trace_options_core_fops);
 
5978}
5979
5980static __init void create_trace_options_dir(struct trace_array *tr)
5981{
5982	struct dentry *t_options;
 
5983	int i;
5984
5985	t_options = trace_options_init_dentry(tr);
5986	if (!t_options)
5987		return;
5988
5989	for (i = 0; trace_options[i]; i++)
5990		create_trace_option_core_file(tr, trace_options[i], i);
 
 
 
5991}
5992
5993static ssize_t
5994rb_simple_read(struct file *filp, char __user *ubuf,
5995	       size_t cnt, loff_t *ppos)
5996{
5997	struct trace_array *tr = filp->private_data;
5998	char buf[64];
5999	int r;
6000
6001	r = tracer_tracing_is_on(tr);
6002	r = sprintf(buf, "%d\n", r);
6003
6004	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6005}
6006
6007static ssize_t
6008rb_simple_write(struct file *filp, const char __user *ubuf,
6009		size_t cnt, loff_t *ppos)
6010{
6011	struct trace_array *tr = filp->private_data;
6012	struct ring_buffer *buffer = tr->trace_buffer.buffer;
6013	unsigned long val;
6014	int ret;
6015
6016	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6017	if (ret)
6018		return ret;
6019
6020	if (buffer) {
6021		mutex_lock(&trace_types_lock);
6022		if (val) {
 
 
6023			tracer_tracing_on(tr);
6024			if (tr->current_trace->start)
6025				tr->current_trace->start(tr);
6026		} else {
6027			tracer_tracing_off(tr);
6028			if (tr->current_trace->stop)
6029				tr->current_trace->stop(tr);
6030		}
6031		mutex_unlock(&trace_types_lock);
6032	}
6033
6034	(*ppos)++;
6035
6036	return cnt;
6037}
6038
6039static const struct file_operations rb_simple_fops = {
6040	.open		= tracing_open_generic_tr,
6041	.read		= rb_simple_read,
6042	.write		= rb_simple_write,
6043	.release	= tracing_release_generic_tr,
6044	.llseek		= default_llseek,
6045};
6046
6047struct dentry *trace_instance_dir;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6048
6049static void
6050init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6051
6052static int
6053allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6054{
6055	enum ring_buffer_flags rb_flags;
6056
6057	rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6058
6059	buf->tr = tr;
6060
6061	buf->buffer = ring_buffer_alloc(size, rb_flags);
6062	if (!buf->buffer)
6063		return -ENOMEM;
6064
6065	buf->data = alloc_percpu(struct trace_array_cpu);
6066	if (!buf->data) {
6067		ring_buffer_free(buf->buffer);
 
6068		return -ENOMEM;
6069	}
6070
6071	/* Allocate the first page for all buffers */
6072	set_buffer_entries(&tr->trace_buffer,
6073			   ring_buffer_size(tr->trace_buffer.buffer, 0));
6074
6075	return 0;
6076}
6077
6078static int allocate_trace_buffers(struct trace_array *tr, int size)
6079{
6080	int ret;
6081
6082	ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6083	if (ret)
6084		return ret;
6085
6086#ifdef CONFIG_TRACER_MAX_TRACE
6087	ret = allocate_trace_buffer(tr, &tr->max_buffer,
6088				    allocate_snapshot ? size : 1);
6089	if (WARN_ON(ret)) {
6090		ring_buffer_free(tr->trace_buffer.buffer);
6091		free_percpu(tr->trace_buffer.data);
 
 
6092		return -ENOMEM;
6093	}
6094	tr->allocated_snapshot = allocate_snapshot;
6095
6096	/*
6097	 * Only the top level trace array gets its snapshot allocated
6098	 * from the kernel command line.
6099	 */
6100	allocate_snapshot = false;
6101#endif
 
6102	return 0;
6103}
6104
6105static int new_instance_create(const char *name)
6106{
6107	struct trace_array *tr;
6108	int ret;
 
 
 
 
 
6109
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6110	mutex_lock(&trace_types_lock);
 
 
 
 
 
 
 
 
6111
6112	ret = -EEXIST;
6113	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6114		if (tr->name && strcmp(tr->name, name) == 0)
6115			goto out_unlock;
 
 
6116	}
6117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6118	ret = -ENOMEM;
6119	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6120	if (!tr)
6121		goto out_unlock;
6122
6123	tr->name = kstrdup(name, GFP_KERNEL);
6124	if (!tr->name)
6125		goto out_free_tr;
6126
6127	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6128		goto out_free_tr;
6129
 
 
6130	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6131
6132	raw_spin_lock_init(&tr->start_lock);
6133
 
 
6134	tr->current_trace = &nop_trace;
6135
6136	INIT_LIST_HEAD(&tr->systems);
6137	INIT_LIST_HEAD(&tr->events);
 
 
6138
6139	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6140		goto out_free_tr;
6141
6142	tr->dir = debugfs_create_dir(name, trace_instance_dir);
6143	if (!tr->dir)
6144		goto out_free_tr;
6145
6146	ret = event_trace_add_tracer(tr->dir, tr);
6147	if (ret) {
6148		debugfs_remove_recursive(tr->dir);
6149		goto out_free_tr;
6150	}
6151
6152	init_tracer_debugfs(tr, tr->dir);
 
 
 
 
 
6153
6154	list_add(&tr->list, &ftrace_trace_arrays);
6155
6156	mutex_unlock(&trace_types_lock);
6157
6158	return 0;
6159
6160 out_free_tr:
6161	if (tr->trace_buffer.buffer)
6162		ring_buffer_free(tr->trace_buffer.buffer);
6163	free_cpumask_var(tr->tracing_cpumask);
6164	kfree(tr->name);
6165	kfree(tr);
6166
6167 out_unlock:
6168	mutex_unlock(&trace_types_lock);
6169
6170	return ret;
 
 
 
6171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6172}
6173
6174static int instance_delete(const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6175{
6176	struct trace_array *tr;
6177	int found = 0;
6178	int ret;
6179
 
6180	mutex_lock(&trace_types_lock);
6181
6182	ret = -ENODEV;
6183	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6184		if (tr->name && strcmp(tr->name, name) == 0) {
6185			found = 1;
6186			break;
6187		}
6188	}
6189	if (!found)
6190		goto out_unlock;
6191
6192	ret = -EBUSY;
6193	if (tr->ref)
6194		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6195
6196	list_del(&tr->list);
6197
 
 
 
 
 
 
6198	tracing_set_nop(tr);
 
6199	event_trace_del_tracer(tr);
 
6200	ftrace_destroy_function_files(tr);
6201	debugfs_remove_recursive(tr->dir);
6202	free_percpu(tr->trace_buffer.data);
6203	ring_buffer_free(tr->trace_buffer.buffer);
6204
 
 
 
 
 
 
6205	kfree(tr->name);
6206	kfree(tr);
6207
6208	ret = 0;
6209
6210 out_unlock:
6211	mutex_unlock(&trace_types_lock);
6212
6213	return ret;
6214}
6215
6216static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6217{
6218	struct dentry *parent;
6219	int ret;
6220
6221	/* Paranoid: Make sure the parent is the "instances" directory */
6222	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6223	if (WARN_ON_ONCE(parent != trace_instance_dir))
6224		return -ENOENT;
6225
6226	/*
6227	 * The inode mutex is locked, but debugfs_create_dir() will also
6228	 * take the mutex. As the instances directory can not be destroyed
6229	 * or changed in any other way, it is safe to unlock it, and
6230	 * let the dentry try. If two users try to make the same dir at
6231	 * the same time, then the new_instance_create() will determine the
6232	 * winner.
6233	 */
6234	mutex_unlock(&inode->i_mutex);
6235
6236	ret = new_instance_create(dentry->d_iname);
6237
6238	mutex_lock(&inode->i_mutex);
 
 
 
 
 
 
 
 
 
6239
6240	return ret;
6241}
 
6242
6243static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6244{
6245	struct dentry *parent;
6246	int ret;
6247
6248	/* Paranoid: Make sure the parent is the "instances" directory */
6249	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6250	if (WARN_ON_ONCE(parent != trace_instance_dir))
6251		return -ENOENT;
6252
6253	/* The caller did a dget() on dentry */
6254	mutex_unlock(&dentry->d_inode->i_mutex);
6255
6256	/*
6257	 * The inode mutex is locked, but debugfs_create_dir() will also
6258	 * take the mutex. As the instances directory can not be destroyed
6259	 * or changed in any other way, it is safe to unlock it, and
6260	 * let the dentry try. If two users try to make the same dir at
6261	 * the same time, then the instance_delete() will determine the
6262	 * winner.
6263	 */
6264	mutex_unlock(&inode->i_mutex);
6265
6266	ret = instance_delete(dentry->d_iname);
 
 
 
6267
6268	mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6269	mutex_lock(&dentry->d_inode->i_mutex);
6270
6271	return ret;
6272}
6273
6274static const struct inode_operations instance_dir_inode_operations = {
6275	.lookup		= simple_lookup,
6276	.mkdir		= instance_mkdir,
6277	.rmdir		= instance_rmdir,
6278};
6279
6280static __init void create_trace_instances(struct dentry *d_tracer)
6281{
6282	trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6283	if (WARN_ON(!trace_instance_dir))
 
 
 
 
6284		return;
6285
6286	/* Hijack the dir inode operations, to allow mkdir */
6287	trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
 
 
 
 
 
 
 
 
 
 
 
6288}
6289
6290static void
6291init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6292{
 
6293	int cpu;
6294
6295	trace_create_file("available_tracers", 0444, d_tracer,
6296			tr, &show_traces_fops);
6297
6298	trace_create_file("current_tracer", 0644, d_tracer,
6299			tr, &set_tracer_fops);
6300
6301	trace_create_file("tracing_cpumask", 0644, d_tracer,
6302			  tr, &tracing_cpumask_fops);
6303
6304	trace_create_file("trace_options", 0644, d_tracer,
6305			  tr, &tracing_iter_fops);
6306
6307	trace_create_file("trace", 0644, d_tracer,
6308			  tr, &tracing_fops);
6309
6310	trace_create_file("trace_pipe", 0444, d_tracer,
6311			  tr, &tracing_pipe_fops);
6312
6313	trace_create_file("buffer_size_kb", 0644, d_tracer,
6314			  tr, &tracing_entries_fops);
6315
6316	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6317			  tr, &tracing_total_entries_fops);
6318
6319	trace_create_file("free_buffer", 0200, d_tracer,
6320			  tr, &tracing_free_buffer_fops);
6321
6322	trace_create_file("trace_marker", 0220, d_tracer,
6323			  tr, &tracing_mark_fops);
6324
 
 
 
 
 
 
 
 
 
6325	trace_create_file("trace_clock", 0644, d_tracer, tr,
6326			  &trace_clock_fops);
6327
6328	trace_create_file("tracing_on", 0644, d_tracer,
6329			  tr, &rb_simple_fops);
6330
 
 
 
 
 
 
 
 
 
 
 
 
6331	if (ftrace_create_function_files(tr, d_tracer))
6332		WARN(1, "Could not allocate function filter files");
6333
6334#ifdef CONFIG_TRACER_SNAPSHOT
6335	trace_create_file("snapshot", 0644, d_tracer,
6336			  tr, &snapshot_fops);
6337#endif
6338
 
 
 
6339	for_each_tracing_cpu(cpu)
6340		tracing_init_debugfs_percpu(tr, cpu);
6341
 
6342}
6343
6344static __init int tracer_init_debugfs(void)
6345{
6346	struct dentry *d_tracer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6347
6348	trace_access_lock_init();
6349
6350	d_tracer = tracing_init_dentry();
6351	if (!d_tracer)
6352		return 0;
6353
6354	init_tracer_debugfs(&global_trace, d_tracer);
6355
6356#ifdef CONFIG_TRACER_MAX_TRACE
6357	trace_create_file("tracing_max_latency", 0644, d_tracer,
6358			&tracing_max_latency, &tracing_max_lat_fops);
6359#endif
6360
6361	trace_create_file("tracing_thresh", 0644, d_tracer,
6362			&tracing_thresh, &tracing_max_lat_fops);
6363
6364	trace_create_file("README", 0444, d_tracer,
6365			NULL, &tracing_readme_fops);
6366
6367	trace_create_file("saved_cmdlines", 0444, d_tracer,
6368			NULL, &tracing_saved_cmdlines_fops);
6369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6370#ifdef CONFIG_DYNAMIC_FTRACE
6371	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6372			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6373#endif
6374
6375	create_trace_instances(d_tracer);
6376
6377	create_trace_options_dir(&global_trace);
6378
6379	return 0;
6380}
6381
 
 
6382static int trace_panic_handler(struct notifier_block *this,
6383			       unsigned long event, void *unused)
6384{
6385	if (ftrace_dump_on_oops)
6386		ftrace_dump(ftrace_dump_on_oops);
6387	return NOTIFY_OK;
6388}
6389
6390static struct notifier_block trace_panic_notifier = {
6391	.notifier_call  = trace_panic_handler,
6392	.next           = NULL,
6393	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
6394};
6395
6396static int trace_die_handler(struct notifier_block *self,
6397			     unsigned long val,
6398			     void *data)
6399{
6400	switch (val) {
6401	case DIE_OOPS:
6402		if (ftrace_dump_on_oops)
6403			ftrace_dump(ftrace_dump_on_oops);
6404		break;
6405	default:
6406		break;
6407	}
6408	return NOTIFY_OK;
6409}
6410
6411static struct notifier_block trace_die_notifier = {
6412	.notifier_call = trace_die_handler,
6413	.priority = 200
6414};
6415
6416/*
6417 * printk is set to max of 1024, we really don't need it that big.
6418 * Nothing should be printing 1000 characters anyway.
6419 */
6420#define TRACE_MAX_PRINT		1000
6421
6422/*
6423 * Define here KERN_TRACE so that we have one place to modify
6424 * it if we decide to change what log level the ftrace dump
6425 * should be at.
6426 */
6427#define KERN_TRACE		KERN_EMERG
6428
6429void
6430trace_printk_seq(struct trace_seq *s)
6431{
6432	/* Probably should print a warning here. */
6433	if (s->len >= TRACE_MAX_PRINT)
6434		s->len = TRACE_MAX_PRINT;
 
 
 
 
 
 
 
 
6435
6436	/* should be zero ended, but we are paranoid. */
6437	s->buffer[s->len] = 0;
6438
6439	printk(KERN_TRACE "%s", s->buffer);
6440
6441	trace_seq_init(s);
6442}
6443
6444void trace_init_global_iter(struct trace_iterator *iter)
6445{
6446	iter->tr = &global_trace;
6447	iter->trace = iter->tr->current_trace;
6448	iter->cpu_file = RING_BUFFER_ALL_CPUS;
6449	iter->trace_buffer = &global_trace.trace_buffer;
6450
6451	if (iter->trace && iter->trace->open)
6452		iter->trace->open(iter);
6453
6454	/* Annotate start of buffers if we had overruns */
6455	if (ring_buffer_overruns(iter->trace_buffer->buffer))
6456		iter->iter_flags |= TRACE_FILE_ANNOTATE;
6457
6458	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6459	if (trace_clocks[iter->tr->clock_id].in_ns)
6460		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6461}
6462
6463void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6464{
6465	/* use static because iter can be a bit big for the stack */
6466	static struct trace_iterator iter;
6467	static atomic_t dump_running;
 
6468	unsigned int old_userobj;
6469	unsigned long flags;
6470	int cnt = 0, cpu;
6471
6472	/* Only allow one dump user at a time. */
6473	if (atomic_inc_return(&dump_running) != 1) {
6474		atomic_dec(&dump_running);
6475		return;
6476	}
6477
6478	/*
6479	 * Always turn off tracing when we dump.
6480	 * We don't need to show trace output of what happens
6481	 * between multiple crashes.
6482	 *
6483	 * If the user does a sysrq-z, then they can re-enable
6484	 * tracing with echo 1 > tracing_on.
6485	 */
6486	tracing_off();
6487
6488	local_irq_save(flags);
 
6489
6490	/* Simulate the iterator */
6491	trace_init_global_iter(&iter);
 
 
 
 
 
6492
6493	for_each_tracing_cpu(cpu) {
6494		atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6495	}
6496
6497	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6498
6499	/* don't look at user memory in panic mode */
6500	trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6501
6502	switch (oops_dump_mode) {
6503	case DUMP_ALL:
6504		iter.cpu_file = RING_BUFFER_ALL_CPUS;
6505		break;
6506	case DUMP_ORIG:
6507		iter.cpu_file = raw_smp_processor_id();
6508		break;
6509	case DUMP_NONE:
6510		goto out_enable;
6511	default:
6512		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6513		iter.cpu_file = RING_BUFFER_ALL_CPUS;
6514	}
6515
6516	printk(KERN_TRACE "Dumping ftrace buffer:\n");
6517
6518	/* Did function tracer already get disabled? */
6519	if (ftrace_is_dead()) {
6520		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6521		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6522	}
6523
6524	/*
6525	 * We need to stop all tracing on all CPUS to read the
6526	 * the next buffer. This is a bit expensive, but is
6527	 * not done often. We fill all what we can read,
6528	 * and then release the locks again.
6529	 */
6530
6531	while (!trace_empty(&iter)) {
6532
6533		if (!cnt)
6534			printk(KERN_TRACE "---------------------------------\n");
6535
6536		cnt++;
6537
6538		/* reset all but tr, trace, and overruns */
6539		memset(&iter.seq, 0,
6540		       sizeof(struct trace_iterator) -
6541		       offsetof(struct trace_iterator, seq));
6542		iter.iter_flags |= TRACE_FILE_LAT_FMT;
6543		iter.pos = -1;
6544
6545		if (trace_find_next_entry_inc(&iter) != NULL) {
6546			int ret;
6547
6548			ret = print_trace_line(&iter);
6549			if (ret != TRACE_TYPE_NO_CONSUME)
6550				trace_consume(&iter);
6551		}
6552		touch_nmi_watchdog();
6553
6554		trace_printk_seq(&iter.seq);
6555	}
6556
6557	if (!cnt)
6558		printk(KERN_TRACE "   (ftrace buffer empty)\n");
6559	else
6560		printk(KERN_TRACE "---------------------------------\n");
6561
6562 out_enable:
6563	trace_flags |= old_userobj;
6564
6565	for_each_tracing_cpu(cpu) {
6566		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6567	}
6568 	atomic_dec(&dump_running);
 
6569	local_irq_restore(flags);
6570}
6571EXPORT_SYMBOL_GPL(ftrace_dump);
6572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6573__init static int tracer_alloc_buffers(void)
6574{
6575	int ring_buf_size;
6576	int ret = -ENOMEM;
6577
6578
 
 
 
 
 
 
 
 
 
 
 
6579	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6580		goto out;
6581
6582	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6583		goto out_free_buffer_mask;
6584
6585	/* Only allocate trace_printk buffers if a trace_printk exists */
6586	if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6587		/* Must be called before global_trace.buffer is allocated */
6588		trace_printk_init_buffers();
6589
6590	/* To save memory, keep the ring buffer size to its minimum */
6591	if (ring_buffer_expanded)
6592		ring_buf_size = trace_buf_size;
6593	else
6594		ring_buf_size = 1;
6595
6596	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6597	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6598
6599	raw_spin_lock_init(&global_trace.start_lock);
6600
 
 
 
 
 
 
 
 
 
 
 
6601	/* Used for event triggers */
 
6602	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6603	if (!temp_buffer)
6604		goto out_free_cpumask;
 
 
 
6605
6606	/* TODO: make the number of buffers hot pluggable with CPUS */
6607	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6608		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6609		WARN_ON(1);
6610		goto out_free_temp_buffer;
6611	}
6612
6613	if (global_trace.buffer_disabled)
6614		tracing_off();
6615
6616	trace_init_cmdlines();
6617
6618	if (trace_boot_clock) {
6619		ret = tracing_set_clock(&global_trace, trace_boot_clock);
6620		if (ret < 0)
6621			pr_warning("Trace clock %s not defined, going back to default\n",
6622				   trace_boot_clock);
6623	}
6624
6625	/*
6626	 * register_tracer() might reference current_trace, so it
6627	 * needs to be set before we register anything. This is
6628	 * just a bootstrap of current_trace anyway.
6629	 */
6630	global_trace.current_trace = &nop_trace;
6631
 
 
 
 
 
 
6632	register_tracer(&nop_trace);
6633
 
 
 
6634	/* All seems OK, enable tracing */
6635	tracing_disabled = 0;
6636
6637	atomic_notifier_chain_register(&panic_notifier_list,
6638				       &trace_panic_notifier);
6639
6640	register_die_notifier(&trace_die_notifier);
6641
6642	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6643
6644	INIT_LIST_HEAD(&global_trace.systems);
6645	INIT_LIST_HEAD(&global_trace.events);
 
 
6646	list_add(&global_trace.list, &ftrace_trace_arrays);
6647
6648	while (trace_boot_options) {
6649		char *option;
6650
6651		option = strsep(&trace_boot_options, ",");
6652		trace_set_options(&global_trace, option);
6653	}
6654
6655	register_snapshot_cmd();
6656
 
 
6657	return 0;
6658
 
 
6659out_free_temp_buffer:
6660	ring_buffer_free(temp_buffer);
 
 
6661out_free_cpumask:
6662	free_percpu(global_trace.trace_buffer.data);
6663#ifdef CONFIG_TRACER_MAX_TRACE
6664	free_percpu(global_trace.max_buffer.data);
6665#endif
6666	free_cpumask_var(global_trace.tracing_cpumask);
6667out_free_buffer_mask:
6668	free_cpumask_var(tracing_buffer_mask);
6669out:
6670	return ret;
6671}
6672
6673__init static int clear_boot_tracer(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6674{
6675	/*
6676	 * The default tracer at boot buffer is an init section.
6677	 * This function is called in lateinit. If we did not
6678	 * find the boot tracer, then clear it out, to prevent
6679	 * later registration from accessing the buffer that is
6680	 * about to be freed.
6681	 */
6682	if (!default_bootup_tracer)
6683		return 0;
6684
6685	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6686	       default_bootup_tracer);
6687	default_bootup_tracer = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6688
 
 
6689	return 0;
6690}
6691
6692early_initcall(tracer_alloc_buffers);
6693fs_initcall(tracer_init_debugfs);
6694late_initcall(clear_boot_tracer);
v5.14.15
    1// SPDX-License-Identifier: GPL-2.0
    2/*
    3 * ring buffer based function tracer
    4 *
    5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
    6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
    7 *
    8 * Originally taken from the RT patch by:
    9 *    Arnaldo Carvalho de Melo <acme@redhat.com>
   10 *
   11 * Based on code from the latency_tracer, that is:
   12 *  Copyright (C) 2004-2006 Ingo Molnar
   13 *  Copyright (C) 2004 Nadia Yvette Chambers
   14 */
   15#include <linux/ring_buffer.h>
   16#include <generated/utsrelease.h>
   17#include <linux/stacktrace.h>
   18#include <linux/writeback.h>
   19#include <linux/kallsyms.h>
   20#include <linux/security.h>
   21#include <linux/seq_file.h>
   22#include <linux/notifier.h>
   23#include <linux/irqflags.h>
   24#include <linux/debugfs.h>
   25#include <linux/tracefs.h>
   26#include <linux/pagemap.h>
   27#include <linux/hardirq.h>
   28#include <linux/linkage.h>
   29#include <linux/uaccess.h>
   30#include <linux/vmalloc.h>
   31#include <linux/ftrace.h>
   32#include <linux/module.h>
   33#include <linux/percpu.h>
   34#include <linux/splice.h>
   35#include <linux/kdebug.h>
   36#include <linux/string.h>
   37#include <linux/mount.h>
   38#include <linux/rwsem.h>
   39#include <linux/slab.h>
   40#include <linux/ctype.h>
   41#include <linux/init.h>
   42#include <linux/panic_notifier.h>
   43#include <linux/poll.h>
   44#include <linux/nmi.h>
   45#include <linux/fs.h>
   46#include <linux/trace.h>
   47#include <linux/sched/clock.h>
   48#include <linux/sched/rt.h>
   49#include <linux/fsnotify.h>
   50#include <linux/irq_work.h>
   51#include <linux/workqueue.h>
   52
   53#include "trace.h"
   54#include "trace_output.h"
   55
   56/*
   57 * On boot up, the ring buffer is set to the minimum size, so that
   58 * we do not waste memory on systems that are not using tracing.
   59 */
   60bool ring_buffer_expanded;
   61
   62/*
   63 * We need to change this state when a selftest is running.
   64 * A selftest will lurk into the ring-buffer to count the
   65 * entries inserted during the selftest although some concurrent
   66 * insertions into the ring-buffer such as trace_printk could occurred
   67 * at the same time, giving false positive or negative results.
   68 */
   69static bool __read_mostly tracing_selftest_running;
   70
   71/*
   72 * If boot-time tracing including tracers/events via kernel cmdline
   73 * is running, we do not want to run SELFTEST.
   74 */
   75bool __read_mostly tracing_selftest_disabled;
   76
   77#ifdef CONFIG_FTRACE_STARTUP_TEST
   78void __init disable_tracing_selftest(const char *reason)
   79{
   80	if (!tracing_selftest_disabled) {
   81		tracing_selftest_disabled = true;
   82		pr_info("Ftrace startup test is disabled due to %s\n", reason);
   83	}
   84}
   85#endif
   86
   87/* Pipe tracepoints to printk */
   88struct trace_iterator *tracepoint_print_iter;
   89int tracepoint_printk;
   90static bool tracepoint_printk_stop_on_boot __initdata;
   91static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
   92
   93/* For tracers that don't implement custom flags */
   94static struct tracer_opt dummy_tracer_opt[] = {
   95	{ }
   96};
   97
 
 
 
 
 
   98static int
   99dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  100{
  101	return 0;
  102}
  103
  104/*
  105 * To prevent the comm cache from being overwritten when no
  106 * tracing is active, only save the comm when a trace event
  107 * occurred.
  108 */
  109static DEFINE_PER_CPU(bool, trace_taskinfo_save);
  110
  111/*
  112 * Kill all tracing for good (never come back).
  113 * It is initialized to 1 but will turn to zero if the initialization
  114 * of the tracer is successful. But that is the only place that sets
  115 * this back to zero.
  116 */
  117static int tracing_disabled = 1;
  118
 
 
  119cpumask_var_t __read_mostly	tracing_buffer_mask;
  120
  121/*
  122 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
  123 *
  124 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
  125 * is set, then ftrace_dump is called. This will output the contents
  126 * of the ftrace buffers to the console.  This is very useful for
  127 * capturing traces that lead to crashes and outputing it to a
  128 * serial console.
  129 *
  130 * It is default off, but you can enable it with either specifying
  131 * "ftrace_dump_on_oops" in the kernel command line, or setting
  132 * /proc/sys/kernel/ftrace_dump_on_oops
  133 * Set 1 if you want to dump buffers of all CPUs
  134 * Set 2 if you want to dump the buffer of the CPU that triggered oops
  135 */
  136
  137enum ftrace_dump_mode ftrace_dump_on_oops;
  138
  139/* When set, tracing will stop when a WARN*() is hit */
  140int __disable_trace_on_warning;
  141
  142#ifdef CONFIG_TRACE_EVAL_MAP_FILE
  143/* Map of enums to their values, for "eval_map" file */
  144struct trace_eval_map_head {
  145	struct module			*mod;
  146	unsigned long			length;
  147};
  148
  149union trace_eval_map_item;
  150
  151struct trace_eval_map_tail {
  152	/*
  153	 * "end" is first and points to NULL as it must be different
  154	 * than "mod" or "eval_string"
  155	 */
  156	union trace_eval_map_item	*next;
  157	const char			*end;	/* points to NULL */
  158};
  159
  160static DEFINE_MUTEX(trace_eval_mutex);
  161
  162/*
  163 * The trace_eval_maps are saved in an array with two extra elements,
  164 * one at the beginning, and one at the end. The beginning item contains
  165 * the count of the saved maps (head.length), and the module they
  166 * belong to if not built in (head.mod). The ending item contains a
  167 * pointer to the next array of saved eval_map items.
  168 */
  169union trace_eval_map_item {
  170	struct trace_eval_map		map;
  171	struct trace_eval_map_head	head;
  172	struct trace_eval_map_tail	tail;
  173};
  174
  175static union trace_eval_map_item *trace_eval_maps;
  176#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
  177
  178int tracing_set_tracer(struct trace_array *tr, const char *buf);
  179static void ftrace_trace_userstack(struct trace_array *tr,
  180				   struct trace_buffer *buffer,
  181				   unsigned int trace_ctx);
  182
  183#define MAX_TRACER_SIZE		100
  184static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
  185static char *default_bootup_tracer;
  186
  187static bool allocate_snapshot;
  188
  189static int __init set_cmdline_ftrace(char *str)
  190{
  191	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
  192	default_bootup_tracer = bootup_tracer_buf;
  193	/* We are using ftrace early, expand it */
  194	ring_buffer_expanded = true;
  195	return 1;
  196}
  197__setup("ftrace=", set_cmdline_ftrace);
  198
  199static int __init set_ftrace_dump_on_oops(char *str)
  200{
  201	if (*str++ != '=' || !*str || !strcmp("1", str)) {
  202		ftrace_dump_on_oops = DUMP_ALL;
  203		return 1;
  204	}
  205
  206	if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
  207		ftrace_dump_on_oops = DUMP_ORIG;
  208                return 1;
  209        }
  210
  211        return 0;
  212}
  213__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
  214
  215static int __init stop_trace_on_warning(char *str)
  216{
  217	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
  218		__disable_trace_on_warning = 1;
  219	return 1;
  220}
  221__setup("traceoff_on_warning", stop_trace_on_warning);
  222
  223static int __init boot_alloc_snapshot(char *str)
  224{
  225	allocate_snapshot = true;
  226	/* We also need the main ring buffer expanded */
  227	ring_buffer_expanded = true;
  228	return 1;
  229}
  230__setup("alloc_snapshot", boot_alloc_snapshot);
  231
  232
  233static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 
  234
  235static int __init set_trace_boot_options(char *str)
  236{
  237	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
 
  238	return 0;
  239}
  240__setup("trace_options=", set_trace_boot_options);
  241
  242static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
  243static char *trace_boot_clock __initdata;
  244
  245static int __init set_trace_boot_clock(char *str)
  246{
  247	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
  248	trace_boot_clock = trace_boot_clock_buf;
  249	return 0;
  250}
  251__setup("trace_clock=", set_trace_boot_clock);
  252
  253static int __init set_tracepoint_printk(char *str)
  254{
  255	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
  256		tracepoint_printk = 1;
  257	return 1;
  258}
  259__setup("tp_printk", set_tracepoint_printk);
  260
  261static int __init set_tracepoint_printk_stop(char *str)
  262{
  263	tracepoint_printk_stop_on_boot = true;
  264	return 1;
  265}
  266__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
  267
  268unsigned long long ns2usecs(u64 nsec)
  269{
  270	nsec += 500;
  271	do_div(nsec, 1000);
  272	return nsec;
  273}
  274
  275static void
  276trace_process_export(struct trace_export *export,
  277	       struct ring_buffer_event *event, int flag)
  278{
  279	struct trace_entry *entry;
  280	unsigned int size = 0;
  281
  282	if (export->flags & flag) {
  283		entry = ring_buffer_event_data(event);
  284		size = ring_buffer_event_length(event);
  285		export->write(export, entry, size);
  286	}
  287}
  288
  289static DEFINE_MUTEX(ftrace_export_lock);
  290
  291static struct trace_export __rcu *ftrace_exports_list __read_mostly;
  292
  293static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
  294static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
  295static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
  296
  297static inline void ftrace_exports_enable(struct trace_export *export)
  298{
  299	if (export->flags & TRACE_EXPORT_FUNCTION)
  300		static_branch_inc(&trace_function_exports_enabled);
  301
  302	if (export->flags & TRACE_EXPORT_EVENT)
  303		static_branch_inc(&trace_event_exports_enabled);
  304
  305	if (export->flags & TRACE_EXPORT_MARKER)
  306		static_branch_inc(&trace_marker_exports_enabled);
  307}
  308
  309static inline void ftrace_exports_disable(struct trace_export *export)
  310{
  311	if (export->flags & TRACE_EXPORT_FUNCTION)
  312		static_branch_dec(&trace_function_exports_enabled);
  313
  314	if (export->flags & TRACE_EXPORT_EVENT)
  315		static_branch_dec(&trace_event_exports_enabled);
  316
  317	if (export->flags & TRACE_EXPORT_MARKER)
  318		static_branch_dec(&trace_marker_exports_enabled);
  319}
  320
  321static void ftrace_exports(struct ring_buffer_event *event, int flag)
  322{
  323	struct trace_export *export;
  324
  325	preempt_disable_notrace();
  326
  327	export = rcu_dereference_raw_check(ftrace_exports_list);
  328	while (export) {
  329		trace_process_export(export, event, flag);
  330		export = rcu_dereference_raw_check(export->next);
  331	}
  332
  333	preempt_enable_notrace();
  334}
  335
  336static inline void
  337add_trace_export(struct trace_export **list, struct trace_export *export)
  338{
  339	rcu_assign_pointer(export->next, *list);
  340	/*
  341	 * We are entering export into the list but another
  342	 * CPU might be walking that list. We need to make sure
  343	 * the export->next pointer is valid before another CPU sees
  344	 * the export pointer included into the list.
  345	 */
  346	rcu_assign_pointer(*list, export);
  347}
  348
  349static inline int
  350rm_trace_export(struct trace_export **list, struct trace_export *export)
  351{
  352	struct trace_export **p;
  353
  354	for (p = list; *p != NULL; p = &(*p)->next)
  355		if (*p == export)
  356			break;
  357
  358	if (*p != export)
  359		return -1;
  360
  361	rcu_assign_pointer(*p, (*p)->next);
  362
  363	return 0;
  364}
  365
  366static inline void
  367add_ftrace_export(struct trace_export **list, struct trace_export *export)
  368{
  369	ftrace_exports_enable(export);
  370
  371	add_trace_export(list, export);
  372}
  373
  374static inline int
  375rm_ftrace_export(struct trace_export **list, struct trace_export *export)
  376{
  377	int ret;
  378
  379	ret = rm_trace_export(list, export);
  380	ftrace_exports_disable(export);
  381
  382	return ret;
  383}
  384
  385int register_ftrace_export(struct trace_export *export)
  386{
  387	if (WARN_ON_ONCE(!export->write))
  388		return -1;
  389
  390	mutex_lock(&ftrace_export_lock);
  391
  392	add_ftrace_export(&ftrace_exports_list, export);
  393
  394	mutex_unlock(&ftrace_export_lock);
  395
  396	return 0;
  397}
  398EXPORT_SYMBOL_GPL(register_ftrace_export);
  399
  400int unregister_ftrace_export(struct trace_export *export)
  401{
  402	int ret;
  403
  404	mutex_lock(&ftrace_export_lock);
  405
  406	ret = rm_ftrace_export(&ftrace_exports_list, export);
  407
  408	mutex_unlock(&ftrace_export_lock);
  409
  410	return ret;
  411}
  412EXPORT_SYMBOL_GPL(unregister_ftrace_export);
  413
  414/* trace_flags holds trace_options default values */
  415#define TRACE_DEFAULT_FLAGS						\
  416	(FUNCTION_DEFAULT_FLAGS |					\
  417	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
  418	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
  419	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
  420	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS |			\
  421	 TRACE_ITER_HASH_PTR)
  422
  423/* trace_options that are only supported by global_trace */
  424#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
  425	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
  426
  427/* trace_flags that are default zero for instances */
  428#define ZEROED_TRACE_FLAGS \
  429	(TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
  430
  431/*
  432 * The global_trace is the descriptor that holds the top-level tracing
  433 * buffers for the live tracing.
 
 
 
 
 
 
 
 
  434 */
  435static struct trace_array global_trace = {
  436	.trace_flags = TRACE_DEFAULT_FLAGS,
  437};
  438
  439LIST_HEAD(ftrace_trace_arrays);
  440
  441int trace_array_get(struct trace_array *this_tr)
  442{
  443	struct trace_array *tr;
  444	int ret = -ENODEV;
  445
  446	mutex_lock(&trace_types_lock);
  447	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
  448		if (tr == this_tr) {
  449			tr->ref++;
  450			ret = 0;
  451			break;
  452		}
  453	}
  454	mutex_unlock(&trace_types_lock);
  455
  456	return ret;
  457}
  458
  459static void __trace_array_put(struct trace_array *this_tr)
  460{
  461	WARN_ON(!this_tr->ref);
  462	this_tr->ref--;
  463}
  464
  465/**
  466 * trace_array_put - Decrement the reference counter for this trace array.
  467 * @this_tr : pointer to the trace array
  468 *
  469 * NOTE: Use this when we no longer need the trace array returned by
  470 * trace_array_get_by_name(). This ensures the trace array can be later
  471 * destroyed.
  472 *
  473 */
  474void trace_array_put(struct trace_array *this_tr)
  475{
  476	if (!this_tr)
  477		return;
  478
  479	mutex_lock(&trace_types_lock);
  480	__trace_array_put(this_tr);
  481	mutex_unlock(&trace_types_lock);
  482}
  483EXPORT_SYMBOL_GPL(trace_array_put);
  484
  485int tracing_check_open_get_tr(struct trace_array *tr)
  486{
  487	int ret;
  488
  489	ret = security_locked_down(LOCKDOWN_TRACEFS);
  490	if (ret)
  491		return ret;
  492
  493	if (tracing_disabled)
  494		return -ENODEV;
  495
  496	if (tr && trace_array_get(tr) < 0)
  497		return -ENODEV;
  498
  499	return 0;
  500}
 
  501
  502int call_filter_check_discard(struct trace_event_call *call, void *rec,
  503			      struct trace_buffer *buffer,
  504			      struct ring_buffer_event *event)
  505{
  506	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
  507	    !filter_match_preds(call->filter, rec)) {
  508		__trace_event_discard_commit(buffer, event);
  509		return 1;
  510	}
  511
  512	return 0;
  513}
 
  514
  515void trace_free_pid_list(struct trace_pid_list *pid_list)
  516{
  517	vfree(pid_list->pids);
  518	kfree(pid_list);
  519}
  520
  521/**
  522 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
  523 * @filtered_pids: The list of pids to check
  524 * @search_pid: The PID to find in @filtered_pids
  525 *
  526 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
  527 */
  528bool
  529trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
  530{
  531	/*
  532	 * If pid_max changed after filtered_pids was created, we
  533	 * by default ignore all pids greater than the previous pid_max.
  534	 */
  535	if (search_pid >= filtered_pids->pid_max)
  536		return false;
  537
  538	return test_bit(search_pid, filtered_pids->pids);
  539}
  540
  541/**
  542 * trace_ignore_this_task - should a task be ignored for tracing
  543 * @filtered_pids: The list of pids to check
  544 * @filtered_no_pids: The list of pids not to be traced
  545 * @task: The task that should be ignored if not filtered
  546 *
  547 * Checks if @task should be traced or not from @filtered_pids.
  548 * Returns true if @task should *NOT* be traced.
  549 * Returns false if @task should be traced.
  550 */
  551bool
  552trace_ignore_this_task(struct trace_pid_list *filtered_pids,
  553		       struct trace_pid_list *filtered_no_pids,
  554		       struct task_struct *task)
  555{
  556	/*
  557	 * If filtered_no_pids is not empty, and the task's pid is listed
  558	 * in filtered_no_pids, then return true.
  559	 * Otherwise, if filtered_pids is empty, that means we can
  560	 * trace all tasks. If it has content, then only trace pids
  561	 * within filtered_pids.
  562	 */
  563
  564	return (filtered_pids &&
  565		!trace_find_filtered_pid(filtered_pids, task->pid)) ||
  566		(filtered_no_pids &&
  567		 trace_find_filtered_pid(filtered_no_pids, task->pid));
  568}
  569
  570/**
  571 * trace_filter_add_remove_task - Add or remove a task from a pid_list
  572 * @pid_list: The list to modify
  573 * @self: The current task for fork or NULL for exit
  574 * @task: The task to add or remove
  575 *
  576 * If adding a task, if @self is defined, the task is only added if @self
  577 * is also included in @pid_list. This happens on fork and tasks should
  578 * only be added when the parent is listed. If @self is NULL, then the
  579 * @task pid will be removed from the list, which would happen on exit
  580 * of a task.
  581 */
  582void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
  583				  struct task_struct *self,
  584				  struct task_struct *task)
  585{
  586	if (!pid_list)
  587		return;
  588
  589	/* For forks, we only add if the forking task is listed */
  590	if (self) {
  591		if (!trace_find_filtered_pid(pid_list, self->pid))
  592			return;
  593	}
  594
  595	/* Sorry, but we don't support pid_max changing after setting */
  596	if (task->pid >= pid_list->pid_max)
  597		return;
  598
  599	/* "self" is set for forks, and NULL for exits */
  600	if (self)
  601		set_bit(task->pid, pid_list->pids);
  602	else
  603		clear_bit(task->pid, pid_list->pids);
  604}
  605
  606/**
  607 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
  608 * @pid_list: The pid list to show
  609 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
  610 * @pos: The position of the file
  611 *
  612 * This is used by the seq_file "next" operation to iterate the pids
  613 * listed in a trace_pid_list structure.
  614 *
  615 * Returns the pid+1 as we want to display pid of zero, but NULL would
  616 * stop the iteration.
  617 */
  618void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
  619{
  620	unsigned long pid = (unsigned long)v;
  621
  622	(*pos)++;
  623
  624	/* pid already is +1 of the actual previous bit */
  625	pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
  626
  627	/* Return pid + 1 to allow zero to be represented */
  628	if (pid < pid_list->pid_max)
  629		return (void *)(pid + 1);
  630
  631	return NULL;
  632}
  633
  634/**
  635 * trace_pid_start - Used for seq_file to start reading pid lists
  636 * @pid_list: The pid list to show
  637 * @pos: The position of the file
  638 *
  639 * This is used by seq_file "start" operation to start the iteration
  640 * of listing pids.
  641 *
  642 * Returns the pid+1 as we want to display pid of zero, but NULL would
  643 * stop the iteration.
  644 */
  645void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
  646{
  647	unsigned long pid;
  648	loff_t l = 0;
  649
  650	pid = find_first_bit(pid_list->pids, pid_list->pid_max);
  651	if (pid >= pid_list->pid_max)
  652		return NULL;
  653
  654	/* Return pid + 1 so that zero can be the exit value */
  655	for (pid++; pid && l < *pos;
  656	     pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
  657		;
  658	return (void *)pid;
  659}
  660
  661/**
  662 * trace_pid_show - show the current pid in seq_file processing
  663 * @m: The seq_file structure to write into
  664 * @v: A void pointer of the pid (+1) value to display
  665 *
  666 * Can be directly used by seq_file operations to display the current
  667 * pid value.
  668 */
  669int trace_pid_show(struct seq_file *m, void *v)
  670{
  671	unsigned long pid = (unsigned long)v - 1;
  672
  673	seq_printf(m, "%lu\n", pid);
  674	return 0;
  675}
  676
  677/* 128 should be much more than enough */
  678#define PID_BUF_SIZE		127
  679
  680int trace_pid_write(struct trace_pid_list *filtered_pids,
  681		    struct trace_pid_list **new_pid_list,
  682		    const char __user *ubuf, size_t cnt)
  683{
  684	struct trace_pid_list *pid_list;
  685	struct trace_parser parser;
  686	unsigned long val;
  687	int nr_pids = 0;
  688	ssize_t read = 0;
  689	ssize_t ret = 0;
  690	loff_t pos;
  691	pid_t pid;
  692
  693	if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
  694		return -ENOMEM;
  695
  696	/*
  697	 * Always recreate a new array. The write is an all or nothing
  698	 * operation. Always create a new array when adding new pids by
  699	 * the user. If the operation fails, then the current list is
  700	 * not modified.
  701	 */
  702	pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
  703	if (!pid_list) {
  704		trace_parser_put(&parser);
  705		return -ENOMEM;
  706	}
  707
  708	pid_list->pid_max = READ_ONCE(pid_max);
  709
  710	/* Only truncating will shrink pid_max */
  711	if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
  712		pid_list->pid_max = filtered_pids->pid_max;
  713
  714	pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
  715	if (!pid_list->pids) {
  716		trace_parser_put(&parser);
  717		kfree(pid_list);
  718		return -ENOMEM;
  719	}
  720
  721	if (filtered_pids) {
  722		/* copy the current bits to the new max */
  723		for_each_set_bit(pid, filtered_pids->pids,
  724				 filtered_pids->pid_max) {
  725			set_bit(pid, pid_list->pids);
  726			nr_pids++;
  727		}
  728	}
  729
  730	while (cnt > 0) {
  731
  732		pos = 0;
  733
  734		ret = trace_get_user(&parser, ubuf, cnt, &pos);
  735		if (ret < 0 || !trace_parser_loaded(&parser))
  736			break;
  737
  738		read += ret;
  739		ubuf += ret;
  740		cnt -= ret;
  741
  742		ret = -EINVAL;
  743		if (kstrtoul(parser.buffer, 0, &val))
  744			break;
  745		if (val >= pid_list->pid_max)
  746			break;
  747
  748		pid = (pid_t)val;
  749
  750		set_bit(pid, pid_list->pids);
  751		nr_pids++;
  752
  753		trace_parser_clear(&parser);
  754		ret = 0;
  755	}
  756	trace_parser_put(&parser);
  757
  758	if (ret < 0) {
  759		trace_free_pid_list(pid_list);
  760		return ret;
  761	}
  762
  763	if (!nr_pids) {
  764		/* Cleared the list of pids */
  765		trace_free_pid_list(pid_list);
  766		read = ret;
  767		pid_list = NULL;
  768	}
  769
  770	*new_pid_list = pid_list;
  771
  772	return read;
  773}
  774
  775static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
  776{
  777	u64 ts;
  778
  779	/* Early boot up does not have a buffer yet */
  780	if (!buf->buffer)
  781		return trace_clock_local();
  782
  783	ts = ring_buffer_time_stamp(buf->buffer);
  784	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
  785
  786	return ts;
  787}
  788
  789u64 ftrace_now(int cpu)
  790{
  791	return buffer_ftrace_now(&global_trace.array_buffer, cpu);
  792}
  793
  794/**
  795 * tracing_is_enabled - Show if global_trace has been enabled
  796 *
  797 * Shows if the global trace has been enabled or not. It uses the
  798 * mirror flag "buffer_disabled" to be used in fast paths such as for
  799 * the irqsoff tracer. But it may be inaccurate due to races. If you
  800 * need to know the accurate state, use tracing_is_on() which is a little
  801 * slower, but accurate.
  802 */
  803int tracing_is_enabled(void)
  804{
  805	/*
  806	 * For quick access (irqsoff uses this in fast path), just
  807	 * return the mirror variable of the state of the ring buffer.
  808	 * It's a little racy, but we don't really care.
  809	 */
  810	smp_rmb();
  811	return !global_trace.buffer_disabled;
  812}
  813
  814/*
  815 * trace_buf_size is the size in bytes that is allocated
  816 * for a buffer. Note, the number of bytes is always rounded
  817 * to page size.
  818 *
  819 * This number is purposely set to a low number of 16384.
  820 * If the dump on oops happens, it will be much appreciated
  821 * to not have to wait for all that output. Anyway this can be
  822 * boot time and run time configurable.
  823 */
  824#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
  825
  826static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
  827
  828/* trace_types holds a link list of available tracers. */
  829static struct tracer		*trace_types __read_mostly;
  830
  831/*
  832 * trace_types_lock is used to protect the trace_types list.
  833 */
  834DEFINE_MUTEX(trace_types_lock);
  835
  836/*
  837 * serialize the access of the ring buffer
  838 *
  839 * ring buffer serializes readers, but it is low level protection.
  840 * The validity of the events (which returns by ring_buffer_peek() ..etc)
  841 * are not protected by ring buffer.
  842 *
  843 * The content of events may become garbage if we allow other process consumes
  844 * these events concurrently:
  845 *   A) the page of the consumed events may become a normal page
  846 *      (not reader page) in ring buffer, and this page will be rewritten
  847 *      by events producer.
  848 *   B) The page of the consumed events may become a page for splice_read,
  849 *      and this page will be returned to system.
  850 *
  851 * These primitives allow multi process access to different cpu ring buffer
  852 * concurrently.
  853 *
  854 * These primitives don't distinguish read-only and read-consume access.
  855 * Multi read-only access are also serialized.
  856 */
  857
  858#ifdef CONFIG_SMP
  859static DECLARE_RWSEM(all_cpu_access_lock);
  860static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
  861
  862static inline void trace_access_lock(int cpu)
  863{
  864	if (cpu == RING_BUFFER_ALL_CPUS) {
  865		/* gain it for accessing the whole ring buffer. */
  866		down_write(&all_cpu_access_lock);
  867	} else {
  868		/* gain it for accessing a cpu ring buffer. */
  869
  870		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
  871		down_read(&all_cpu_access_lock);
  872
  873		/* Secondly block other access to this @cpu ring buffer. */
  874		mutex_lock(&per_cpu(cpu_access_lock, cpu));
  875	}
  876}
  877
  878static inline void trace_access_unlock(int cpu)
  879{
  880	if (cpu == RING_BUFFER_ALL_CPUS) {
  881		up_write(&all_cpu_access_lock);
  882	} else {
  883		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
  884		up_read(&all_cpu_access_lock);
  885	}
  886}
  887
  888static inline void trace_access_lock_init(void)
  889{
  890	int cpu;
  891
  892	for_each_possible_cpu(cpu)
  893		mutex_init(&per_cpu(cpu_access_lock, cpu));
  894}
  895
  896#else
  897
  898static DEFINE_MUTEX(access_lock);
  899
  900static inline void trace_access_lock(int cpu)
  901{
  902	(void)cpu;
  903	mutex_lock(&access_lock);
  904}
  905
  906static inline void trace_access_unlock(int cpu)
  907{
  908	(void)cpu;
  909	mutex_unlock(&access_lock);
  910}
  911
  912static inline void trace_access_lock_init(void)
  913{
  914}
  915
  916#endif
  917
  918#ifdef CONFIG_STACKTRACE
  919static void __ftrace_trace_stack(struct trace_buffer *buffer,
  920				 unsigned int trace_ctx,
  921				 int skip, struct pt_regs *regs);
  922static inline void ftrace_trace_stack(struct trace_array *tr,
  923				      struct trace_buffer *buffer,
  924				      unsigned int trace_ctx,
  925				      int skip, struct pt_regs *regs);
  926
  927#else
  928static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
  929					unsigned int trace_ctx,
  930					int skip, struct pt_regs *regs)
  931{
  932}
  933static inline void ftrace_trace_stack(struct trace_array *tr,
  934				      struct trace_buffer *buffer,
  935				      unsigned long trace_ctx,
  936				      int skip, struct pt_regs *regs)
  937{
  938}
  939
  940#endif
  941
  942static __always_inline void
  943trace_event_setup(struct ring_buffer_event *event,
  944		  int type, unsigned int trace_ctx)
  945{
  946	struct trace_entry *ent = ring_buffer_event_data(event);
  947
  948	tracing_generic_entry_update(ent, type, trace_ctx);
  949}
  950
  951static __always_inline struct ring_buffer_event *
  952__trace_buffer_lock_reserve(struct trace_buffer *buffer,
  953			  int type,
  954			  unsigned long len,
  955			  unsigned int trace_ctx)
  956{
  957	struct ring_buffer_event *event;
  958
  959	event = ring_buffer_lock_reserve(buffer, len);
  960	if (event != NULL)
  961		trace_event_setup(event, type, trace_ctx);
  962
  963	return event;
  964}
  965
  966void tracer_tracing_on(struct trace_array *tr)
  967{
  968	if (tr->array_buffer.buffer)
  969		ring_buffer_record_on(tr->array_buffer.buffer);
  970	/*
  971	 * This flag is looked at when buffers haven't been allocated
  972	 * yet, or by some tracers (like irqsoff), that just want to
  973	 * know if the ring buffer has been disabled, but it can handle
  974	 * races of where it gets disabled but we still do a record.
  975	 * As the check is in the fast path of the tracers, it is more
  976	 * important to be fast than accurate.
  977	 */
  978	tr->buffer_disabled = 0;
  979	/* Make the flag seen by readers */
  980	smp_wmb();
  981}
  982
  983/**
  984 * tracing_on - enable tracing buffers
  985 *
  986 * This function enables tracing buffers that may have been
  987 * disabled with tracing_off.
  988 */
  989void tracing_on(void)
  990{
  991	tracer_tracing_on(&global_trace);
  992}
  993EXPORT_SYMBOL_GPL(tracing_on);
  994
  995
  996static __always_inline void
  997__buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
  998{
  999	__this_cpu_write(trace_taskinfo_save, true);
 1000
 1001	/* If this is the temp buffer, we need to commit fully */
 1002	if (this_cpu_read(trace_buffered_event) == event) {
 1003		/* Length is in event->array[0] */
 1004		ring_buffer_write(buffer, event->array[0], &event->array[1]);
 1005		/* Release the temp buffer */
 1006		this_cpu_dec(trace_buffered_event_cnt);
 1007	} else
 1008		ring_buffer_unlock_commit(buffer, event);
 1009}
 1010
 1011/**
 1012 * __trace_puts - write a constant string into the trace buffer.
 1013 * @ip:	   The address of the caller
 1014 * @str:   The constant string to write
 1015 * @size:  The size of the string.
 1016 */
 1017int __trace_puts(unsigned long ip, const char *str, int size)
 1018{
 1019	struct ring_buffer_event *event;
 1020	struct trace_buffer *buffer;
 1021	struct print_entry *entry;
 1022	unsigned int trace_ctx;
 1023	int alloc;
 1024
 1025	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 1026		return 0;
 1027
 1028	if (unlikely(tracing_selftest_running || tracing_disabled))
 1029		return 0;
 1030
 1031	alloc = sizeof(*entry) + size + 2; /* possible \n added */
 1032
 1033	trace_ctx = tracing_gen_ctx();
 1034	buffer = global_trace.array_buffer.buffer;
 1035	ring_buffer_nest_start(buffer);
 1036	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
 1037					    trace_ctx);
 1038	if (!event) {
 1039		size = 0;
 1040		goto out;
 1041	}
 1042
 1043	entry = ring_buffer_event_data(event);
 1044	entry->ip = ip;
 1045
 1046	memcpy(&entry->buf, str, size);
 1047
 1048	/* Add a newline if necessary */
 1049	if (entry->buf[size - 1] != '\n') {
 1050		entry->buf[size] = '\n';
 1051		entry->buf[size + 1] = '\0';
 1052	} else
 1053		entry->buf[size] = '\0';
 1054
 1055	__buffer_unlock_commit(buffer, event);
 1056	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
 1057 out:
 1058	ring_buffer_nest_end(buffer);
 1059	return size;
 1060}
 1061EXPORT_SYMBOL_GPL(__trace_puts);
 1062
 1063/**
 1064 * __trace_bputs - write the pointer to a constant string into trace buffer
 1065 * @ip:	   The address of the caller
 1066 * @str:   The constant string to write to the buffer to
 1067 */
 1068int __trace_bputs(unsigned long ip, const char *str)
 1069{
 1070	struct ring_buffer_event *event;
 1071	struct trace_buffer *buffer;
 1072	struct bputs_entry *entry;
 1073	unsigned int trace_ctx;
 1074	int size = sizeof(struct bputs_entry);
 1075	int ret = 0;
 1076
 1077	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 1078		return 0;
 1079
 1080	if (unlikely(tracing_selftest_running || tracing_disabled))
 1081		return 0;
 1082
 1083	trace_ctx = tracing_gen_ctx();
 1084	buffer = global_trace.array_buffer.buffer;
 1085
 1086	ring_buffer_nest_start(buffer);
 1087	event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 1088					    trace_ctx);
 1089	if (!event)
 1090		goto out;
 1091
 1092	entry = ring_buffer_event_data(event);
 1093	entry->ip			= ip;
 1094	entry->str			= str;
 1095
 1096	__buffer_unlock_commit(buffer, event);
 1097	ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
 1098
 1099	ret = 1;
 1100 out:
 1101	ring_buffer_nest_end(buffer);
 1102	return ret;
 1103}
 1104EXPORT_SYMBOL_GPL(__trace_bputs);
 1105
 1106#ifdef CONFIG_TRACER_SNAPSHOT
 1107static void tracing_snapshot_instance_cond(struct trace_array *tr,
 1108					   void *cond_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 1109{
 
 1110	struct tracer *tracer = tr->current_trace;
 1111	unsigned long flags;
 1112
 1113	if (in_nmi()) {
 1114		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
 1115		internal_trace_puts("*** snapshot is being ignored        ***\n");
 1116		return;
 1117	}
 1118
 1119	if (!tr->allocated_snapshot) {
 1120		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
 1121		internal_trace_puts("*** stopping trace here!   ***\n");
 1122		tracing_off();
 1123		return;
 1124	}
 1125
 1126	/* Note, snapshot can not be used when the tracer uses it */
 1127	if (tracer->use_max_tr) {
 1128		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
 1129		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
 1130		return;
 1131	}
 1132
 1133	local_irq_save(flags);
 1134	update_max_tr(tr, current, smp_processor_id(), cond_data);
 1135	local_irq_restore(flags);
 1136}
 1137
 1138void tracing_snapshot_instance(struct trace_array *tr)
 1139{
 1140	tracing_snapshot_instance_cond(tr, NULL);
 1141}
 1142
 1143/**
 1144 * tracing_snapshot - take a snapshot of the current buffer.
 1145 *
 1146 * This causes a swap between the snapshot buffer and the current live
 1147 * tracing buffer. You can use this to take snapshots of the live
 1148 * trace when some condition is triggered, but continue to trace.
 1149 *
 1150 * Note, make sure to allocate the snapshot with either
 1151 * a tracing_snapshot_alloc(), or by doing it manually
 1152 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 1153 *
 1154 * If the snapshot buffer is not allocated, it will stop tracing.
 1155 * Basically making a permanent snapshot.
 1156 */
 1157void tracing_snapshot(void)
 1158{
 1159	struct trace_array *tr = &global_trace;
 1160
 1161	tracing_snapshot_instance(tr);
 1162}
 1163EXPORT_SYMBOL_GPL(tracing_snapshot);
 1164
 1165/**
 1166 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
 1167 * @tr:		The tracing instance to snapshot
 1168 * @cond_data:	The data to be tested conditionally, and possibly saved
 1169 *
 1170 * This is the same as tracing_snapshot() except that the snapshot is
 1171 * conditional - the snapshot will only happen if the
 1172 * cond_snapshot.update() implementation receiving the cond_data
 1173 * returns true, which means that the trace array's cond_snapshot
 1174 * update() operation used the cond_data to determine whether the
 1175 * snapshot should be taken, and if it was, presumably saved it along
 1176 * with the snapshot.
 1177 */
 1178void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
 1179{
 1180	tracing_snapshot_instance_cond(tr, cond_data);
 1181}
 1182EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
 1183
 1184/**
 1185 * tracing_snapshot_cond_data - get the user data associated with a snapshot
 1186 * @tr:		The tracing instance
 1187 *
 1188 * When the user enables a conditional snapshot using
 1189 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
 1190 * with the snapshot.  This accessor is used to retrieve it.
 1191 *
 1192 * Should not be called from cond_snapshot.update(), since it takes
 1193 * the tr->max_lock lock, which the code calling
 1194 * cond_snapshot.update() has already done.
 1195 *
 1196 * Returns the cond_data associated with the trace array's snapshot.
 1197 */
 1198void *tracing_cond_snapshot_data(struct trace_array *tr)
 1199{
 1200	void *cond_data = NULL;
 1201
 1202	arch_spin_lock(&tr->max_lock);
 1203
 1204	if (tr->cond_snapshot)
 1205		cond_data = tr->cond_snapshot->cond_data;
 1206
 1207	arch_spin_unlock(&tr->max_lock);
 1208
 1209	return cond_data;
 1210}
 1211EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
 1212
 1213static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
 1214					struct array_buffer *size_buf, int cpu_id);
 1215static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
 1216
 1217int tracing_alloc_snapshot_instance(struct trace_array *tr)
 1218{
 1219	int ret;
 1220
 1221	if (!tr->allocated_snapshot) {
 1222
 1223		/* allocate spare buffer */
 1224		ret = resize_buffer_duplicate_size(&tr->max_buffer,
 1225				   &tr->array_buffer, RING_BUFFER_ALL_CPUS);
 1226		if (ret < 0)
 1227			return ret;
 1228
 1229		tr->allocated_snapshot = true;
 1230	}
 1231
 1232	return 0;
 1233}
 1234
 1235static void free_snapshot(struct trace_array *tr)
 1236{
 1237	/*
 1238	 * We don't free the ring buffer. instead, resize it because
 1239	 * The max_tr ring buffer has some state (e.g. ring->clock) and
 1240	 * we want preserve it.
 1241	 */
 1242	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
 1243	set_buffer_entries(&tr->max_buffer, 1);
 1244	tracing_reset_online_cpus(&tr->max_buffer);
 1245	tr->allocated_snapshot = false;
 1246}
 1247
 1248/**
 1249 * tracing_alloc_snapshot - allocate snapshot buffer.
 1250 *
 1251 * This only allocates the snapshot buffer if it isn't already
 1252 * allocated - it doesn't also take a snapshot.
 1253 *
 1254 * This is meant to be used in cases where the snapshot buffer needs
 1255 * to be set up for events that can't sleep but need to be able to
 1256 * trigger a snapshot.
 1257 */
 1258int tracing_alloc_snapshot(void)
 1259{
 1260	struct trace_array *tr = &global_trace;
 1261	int ret;
 1262
 1263	ret = tracing_alloc_snapshot_instance(tr);
 1264	WARN_ON(ret < 0);
 1265
 1266	return ret;
 1267}
 1268EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 1269
 1270/**
 1271 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
 1272 *
 1273 * This is similar to tracing_snapshot(), but it will allocate the
 1274 * snapshot buffer if it isn't already allocated. Use this only
 1275 * where it is safe to sleep, as the allocation may sleep.
 1276 *
 1277 * This causes a swap between the snapshot buffer and the current live
 1278 * tracing buffer. You can use this to take snapshots of the live
 1279 * trace when some condition is triggered, but continue to trace.
 1280 */
 1281void tracing_snapshot_alloc(void)
 1282{
 1283	int ret;
 1284
 1285	ret = tracing_alloc_snapshot();
 1286	if (ret < 0)
 1287		return;
 1288
 1289	tracing_snapshot();
 1290}
 1291EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 1292
 1293/**
 1294 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
 1295 * @tr:		The tracing instance
 1296 * @cond_data:	User data to associate with the snapshot
 1297 * @update:	Implementation of the cond_snapshot update function
 1298 *
 1299 * Check whether the conditional snapshot for the given instance has
 1300 * already been enabled, or if the current tracer is already using a
 1301 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
 1302 * save the cond_data and update function inside.
 1303 *
 1304 * Returns 0 if successful, error otherwise.
 1305 */
 1306int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
 1307				 cond_update_fn_t update)
 1308{
 1309	struct cond_snapshot *cond_snapshot;
 1310	int ret = 0;
 1311
 1312	cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
 1313	if (!cond_snapshot)
 1314		return -ENOMEM;
 1315
 1316	cond_snapshot->cond_data = cond_data;
 1317	cond_snapshot->update = update;
 1318
 1319	mutex_lock(&trace_types_lock);
 1320
 1321	ret = tracing_alloc_snapshot_instance(tr);
 1322	if (ret)
 1323		goto fail_unlock;
 1324
 1325	if (tr->current_trace->use_max_tr) {
 1326		ret = -EBUSY;
 1327		goto fail_unlock;
 1328	}
 1329
 1330	/*
 1331	 * The cond_snapshot can only change to NULL without the
 1332	 * trace_types_lock. We don't care if we race with it going
 1333	 * to NULL, but we want to make sure that it's not set to
 1334	 * something other than NULL when we get here, which we can
 1335	 * do safely with only holding the trace_types_lock and not
 1336	 * having to take the max_lock.
 1337	 */
 1338	if (tr->cond_snapshot) {
 1339		ret = -EBUSY;
 1340		goto fail_unlock;
 1341	}
 1342
 1343	arch_spin_lock(&tr->max_lock);
 1344	tr->cond_snapshot = cond_snapshot;
 1345	arch_spin_unlock(&tr->max_lock);
 1346
 1347	mutex_unlock(&trace_types_lock);
 1348
 1349	return ret;
 1350
 1351 fail_unlock:
 1352	mutex_unlock(&trace_types_lock);
 1353	kfree(cond_snapshot);
 1354	return ret;
 1355}
 1356EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
 1357
 1358/**
 1359 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
 1360 * @tr:		The tracing instance
 1361 *
 1362 * Check whether the conditional snapshot for the given instance is
 1363 * enabled; if so, free the cond_snapshot associated with it,
 1364 * otherwise return -EINVAL.
 1365 *
 1366 * Returns 0 if successful, error otherwise.
 1367 */
 1368int tracing_snapshot_cond_disable(struct trace_array *tr)
 1369{
 1370	int ret = 0;
 1371
 1372	arch_spin_lock(&tr->max_lock);
 1373
 1374	if (!tr->cond_snapshot)
 1375		ret = -EINVAL;
 1376	else {
 1377		kfree(tr->cond_snapshot);
 1378		tr->cond_snapshot = NULL;
 1379	}
 1380
 1381	arch_spin_unlock(&tr->max_lock);
 1382
 1383	return ret;
 1384}
 1385EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 1386#else
 1387void tracing_snapshot(void)
 1388{
 1389	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
 1390}
 1391EXPORT_SYMBOL_GPL(tracing_snapshot);
 1392void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
 1393{
 1394	WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
 1395}
 1396EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
 1397int tracing_alloc_snapshot(void)
 1398{
 1399	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
 1400	return -ENODEV;
 1401}
 1402EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 1403void tracing_snapshot_alloc(void)
 1404{
 1405	/* Give warning */
 1406	tracing_snapshot();
 1407}
 1408EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 1409void *tracing_cond_snapshot_data(struct trace_array *tr)
 1410{
 1411	return NULL;
 1412}
 1413EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
 1414int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
 1415{
 1416	return -ENODEV;
 1417}
 1418EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
 1419int tracing_snapshot_cond_disable(struct trace_array *tr)
 1420{
 1421	return false;
 1422}
 1423EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
 1424#endif /* CONFIG_TRACER_SNAPSHOT */
 1425
 1426void tracer_tracing_off(struct trace_array *tr)
 1427{
 1428	if (tr->array_buffer.buffer)
 1429		ring_buffer_record_off(tr->array_buffer.buffer);
 1430	/*
 1431	 * This flag is looked at when buffers haven't been allocated
 1432	 * yet, or by some tracers (like irqsoff), that just want to
 1433	 * know if the ring buffer has been disabled, but it can handle
 1434	 * races of where it gets disabled but we still do a record.
 1435	 * As the check is in the fast path of the tracers, it is more
 1436	 * important to be fast than accurate.
 1437	 */
 1438	tr->buffer_disabled = 1;
 1439	/* Make the flag seen by readers */
 1440	smp_wmb();
 1441}
 1442
 1443/**
 1444 * tracing_off - turn off tracing buffers
 1445 *
 1446 * This function stops the tracing buffers from recording data.
 1447 * It does not disable any overhead the tracers themselves may
 1448 * be causing. This function simply causes all recording to
 1449 * the ring buffers to fail.
 1450 */
 1451void tracing_off(void)
 1452{
 1453	tracer_tracing_off(&global_trace);
 1454}
 1455EXPORT_SYMBOL_GPL(tracing_off);
 1456
 1457void disable_trace_on_warning(void)
 1458{
 1459	if (__disable_trace_on_warning) {
 1460		trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
 1461			"Disabling tracing due to warning\n");
 1462		tracing_off();
 1463	}
 1464}
 1465
 1466/**
 1467 * tracer_tracing_is_on - show real state of ring buffer enabled
 1468 * @tr : the trace array to know if ring buffer is enabled
 1469 *
 1470 * Shows real state of the ring buffer if it is enabled or not.
 1471 */
 1472bool tracer_tracing_is_on(struct trace_array *tr)
 1473{
 1474	if (tr->array_buffer.buffer)
 1475		return ring_buffer_record_is_on(tr->array_buffer.buffer);
 1476	return !tr->buffer_disabled;
 1477}
 1478
 1479/**
 1480 * tracing_is_on - show state of ring buffers enabled
 1481 */
 1482int tracing_is_on(void)
 1483{
 1484	return tracer_tracing_is_on(&global_trace);
 1485}
 1486EXPORT_SYMBOL_GPL(tracing_is_on);
 1487
 1488static int __init set_buf_size(char *str)
 1489{
 1490	unsigned long buf_size;
 1491
 1492	if (!str)
 1493		return 0;
 1494	buf_size = memparse(str, &str);
 1495	/* nr_entries can not be zero */
 1496	if (buf_size == 0)
 1497		return 0;
 1498	trace_buf_size = buf_size;
 1499	return 1;
 1500}
 1501__setup("trace_buf_size=", set_buf_size);
 1502
 1503static int __init set_tracing_thresh(char *str)
 1504{
 1505	unsigned long threshold;
 1506	int ret;
 1507
 1508	if (!str)
 1509		return 0;
 1510	ret = kstrtoul(str, 0, &threshold);
 1511	if (ret < 0)
 1512		return 0;
 1513	tracing_thresh = threshold * 1000;
 1514	return 1;
 1515}
 1516__setup("tracing_thresh=", set_tracing_thresh);
 1517
 1518unsigned long nsecs_to_usecs(unsigned long nsecs)
 1519{
 1520	return nsecs / 1000;
 1521}
 1522
 1523/*
 1524 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
 1525 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
 1526 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
 1527 * of strings in the order that the evals (enum) were defined.
 1528 */
 1529#undef C
 1530#define C(a, b) b
 1531
 1532/* These must match the bit positions in trace_iterator_flags */
 1533static const char *trace_options[] = {
 1534	TRACE_FLAGS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 1535	NULL
 1536};
 1537
 1538static struct {
 1539	u64 (*func)(void);
 1540	const char *name;
 1541	int in_ns;		/* is this clock in nanoseconds? */
 1542} trace_clocks[] = {
 1543	{ trace_clock_local,		"local",	1 },
 1544	{ trace_clock_global,		"global",	1 },
 1545	{ trace_clock_counter,		"counter",	0 },
 1546	{ trace_clock_jiffies,		"uptime",	0 },
 1547	{ trace_clock,			"perf",		1 },
 1548	{ ktime_get_mono_fast_ns,	"mono",		1 },
 1549	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
 1550	{ ktime_get_boot_fast_ns,	"boot",		1 },
 1551	ARCH_TRACE_CLOCKS
 1552};
 1553
 1554bool trace_clock_in_ns(struct trace_array *tr)
 1555{
 1556	if (trace_clocks[tr->clock_id].in_ns)
 1557		return true;
 1558
 1559	return false;
 1560}
 1561
 1562/*
 1563 * trace_parser_get_init - gets the buffer for trace parser
 1564 */
 1565int trace_parser_get_init(struct trace_parser *parser, int size)
 1566{
 1567	memset(parser, 0, sizeof(*parser));
 1568
 1569	parser->buffer = kmalloc(size, GFP_KERNEL);
 1570	if (!parser->buffer)
 1571		return 1;
 1572
 1573	parser->size = size;
 1574	return 0;
 1575}
 1576
 1577/*
 1578 * trace_parser_put - frees the buffer for trace parser
 1579 */
 1580void trace_parser_put(struct trace_parser *parser)
 1581{
 1582	kfree(parser->buffer);
 1583	parser->buffer = NULL;
 1584}
 1585
 1586/*
 1587 * trace_get_user - reads the user input string separated by  space
 1588 * (matched by isspace(ch))
 1589 *
 1590 * For each string found the 'struct trace_parser' is updated,
 1591 * and the function returns.
 1592 *
 1593 * Returns number of bytes read.
 1594 *
 1595 * See kernel/trace/trace.h for 'struct trace_parser' details.
 1596 */
 1597int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 1598	size_t cnt, loff_t *ppos)
 1599{
 1600	char ch;
 1601	size_t read = 0;
 1602	ssize_t ret;
 1603
 1604	if (!*ppos)
 1605		trace_parser_clear(parser);
 1606
 1607	ret = get_user(ch, ubuf++);
 1608	if (ret)
 1609		goto out;
 1610
 1611	read++;
 1612	cnt--;
 1613
 1614	/*
 1615	 * The parser is not finished with the last write,
 1616	 * continue reading the user input without skipping spaces.
 1617	 */
 1618	if (!parser->cont) {
 1619		/* skip white space */
 1620		while (cnt && isspace(ch)) {
 1621			ret = get_user(ch, ubuf++);
 1622			if (ret)
 1623				goto out;
 1624			read++;
 1625			cnt--;
 1626		}
 1627
 1628		parser->idx = 0;
 1629
 1630		/* only spaces were written */
 1631		if (isspace(ch) || !ch) {
 1632			*ppos += read;
 1633			ret = read;
 1634			goto out;
 1635		}
 
 
 1636	}
 1637
 1638	/* read the non-space input */
 1639	while (cnt && !isspace(ch) && ch) {
 1640		if (parser->idx < parser->size - 1)
 1641			parser->buffer[parser->idx++] = ch;
 1642		else {
 1643			ret = -EINVAL;
 1644			goto out;
 1645		}
 1646		ret = get_user(ch, ubuf++);
 1647		if (ret)
 1648			goto out;
 1649		read++;
 1650		cnt--;
 1651	}
 1652
 1653	/* We either got finished input or we have to wait for another call. */
 1654	if (isspace(ch) || !ch) {
 1655		parser->buffer[parser->idx] = 0;
 1656		parser->cont = false;
 1657	} else if (parser->idx < parser->size - 1) {
 1658		parser->cont = true;
 1659		parser->buffer[parser->idx++] = ch;
 1660		/* Make sure the parsed string always terminates with '\0'. */
 1661		parser->buffer[parser->idx] = 0;
 1662	} else {
 1663		ret = -EINVAL;
 1664		goto out;
 1665	}
 1666
 1667	*ppos += read;
 1668	ret = read;
 1669
 1670out:
 1671	return ret;
 1672}
 1673
 1674/* TODO add a seq_buf_to_buffer() */
 1675static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 1676{
 1677	int len;
 
 1678
 1679	if (trace_seq_used(s) <= s->seq.readpos)
 
 
 
 1680		return -EBUSY;
 1681
 1682	len = trace_seq_used(s) - s->seq.readpos;
 1683	if (cnt > len)
 1684		cnt = len;
 1685	memcpy(buf, s->buffer + s->seq.readpos, cnt);
 
 
 
 
 1686
 1687	s->seq.readpos += cnt;
 1688	return cnt;
 1689}
 1690
 1691unsigned long __read_mostly	tracing_thresh;
 1692static const struct file_operations tracing_max_lat_fops;
 1693
 1694#ifdef LATENCY_FS_NOTIFY
 1695
 1696static struct workqueue_struct *fsnotify_wq;
 1697
 1698static void latency_fsnotify_workfn(struct work_struct *work)
 1699{
 1700	struct trace_array *tr = container_of(work, struct trace_array,
 1701					      fsnotify_work);
 1702	fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
 1703}
 1704
 1705static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
 1706{
 1707	struct trace_array *tr = container_of(iwork, struct trace_array,
 1708					      fsnotify_irqwork);
 1709	queue_work(fsnotify_wq, &tr->fsnotify_work);
 1710}
 1711
 1712static void trace_create_maxlat_file(struct trace_array *tr,
 1713				     struct dentry *d_tracer)
 1714{
 1715	INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
 1716	init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
 1717	tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
 1718					      d_tracer, &tr->max_latency,
 1719					      &tracing_max_lat_fops);
 1720}
 1721
 1722__init static int latency_fsnotify_init(void)
 1723{
 1724	fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
 1725				      WQ_UNBOUND | WQ_HIGHPRI, 0);
 1726	if (!fsnotify_wq) {
 1727		pr_err("Unable to allocate tr_max_lat_wq\n");
 1728		return -ENOMEM;
 1729	}
 1730	return 0;
 1731}
 1732
 1733late_initcall_sync(latency_fsnotify_init);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 1734
 1735void latency_fsnotify(struct trace_array *tr)
 1736{
 1737	if (!fsnotify_wq)
 1738		return;
 1739	/*
 1740	 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
 1741	 * possible that we are called from __schedule() or do_idle(), which
 1742	 * could cause a deadlock.
 1743	 */
 1744	irq_work_queue(&tr->fsnotify_irqwork);
 1745}
 1746
 1747#elif defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)	\
 1748	|| defined(CONFIG_OSNOISE_TRACER)
 1749
 1750#define trace_create_maxlat_file(tr, d_tracer)				\
 1751	trace_create_file("tracing_max_latency", 0644, d_tracer,	\
 1752			  &tr->max_latency, &tracing_max_lat_fops)
 1753
 1754#else
 1755#define trace_create_maxlat_file(tr, d_tracer)	 do { } while (0)
 1756#endif
 1757
 1758#ifdef CONFIG_TRACER_MAX_TRACE
 1759/*
 1760 * Copy the new maximum trace into the separate maximum-trace
 1761 * structure. (this way the maximum trace is permanently saved,
 1762 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
 1763 */
 1764static void
 1765__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 1766{
 1767	struct array_buffer *trace_buf = &tr->array_buffer;
 1768	struct array_buffer *max_buf = &tr->max_buffer;
 1769	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
 1770	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
 1771
 1772	max_buf->cpu = cpu;
 1773	max_buf->time_start = data->preempt_timestamp;
 1774
 1775	max_data->saved_latency = tr->max_latency;
 1776	max_data->critical_start = data->critical_start;
 1777	max_data->critical_end = data->critical_end;
 1778
 1779	strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
 1780	max_data->pid = tsk->pid;
 1781	/*
 1782	 * If tsk == current, then use current_uid(), as that does not use
 1783	 * RCU. The irq tracer can be called out of RCU scope.
 1784	 */
 1785	if (tsk == current)
 1786		max_data->uid = current_uid();
 1787	else
 1788		max_data->uid = task_uid(tsk);
 1789
 1790	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
 1791	max_data->policy = tsk->policy;
 1792	max_data->rt_priority = tsk->rt_priority;
 1793
 1794	/* record this tasks comm */
 1795	tracing_record_cmdline(tsk);
 1796	latency_fsnotify(tr);
 1797}
 1798
 1799/**
 1800 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
 1801 * @tr: tracer
 1802 * @tsk: the task with the latency
 1803 * @cpu: The cpu that initiated the trace.
 1804 * @cond_data: User data associated with a conditional snapshot
 1805 *
 1806 * Flip the buffers between the @tr and the max_tr and record information
 1807 * about which task was the cause of this latency.
 1808 */
 1809void
 1810update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
 1811	      void *cond_data)
 1812{
 
 
 1813	if (tr->stop_count)
 1814		return;
 1815
 1816	WARN_ON_ONCE(!irqs_disabled());
 1817
 1818	if (!tr->allocated_snapshot) {
 1819		/* Only the nop tracer should hit this when disabling */
 1820		WARN_ON_ONCE(tr->current_trace != &nop_trace);
 1821		return;
 1822	}
 1823
 1824	arch_spin_lock(&tr->max_lock);
 1825
 1826	/* Inherit the recordable setting from array_buffer */
 1827	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
 1828		ring_buffer_record_on(tr->max_buffer.buffer);
 1829	else
 1830		ring_buffer_record_off(tr->max_buffer.buffer);
 1831
 1832#ifdef CONFIG_TRACER_SNAPSHOT
 1833	if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
 1834		goto out_unlock;
 1835#endif
 1836	swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
 1837
 1838	__update_max_tr(tr, tsk, cpu);
 1839
 1840 out_unlock:
 1841	arch_spin_unlock(&tr->max_lock);
 1842}
 1843
 1844/**
 1845 * update_max_tr_single - only copy one trace over, and reset the rest
 1846 * @tr: tracer
 1847 * @tsk: task with the latency
 1848 * @cpu: the cpu of the buffer to copy.
 1849 *
 1850 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
 1851 */
 1852void
 1853update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
 1854{
 1855	int ret;
 1856
 1857	if (tr->stop_count)
 1858		return;
 1859
 1860	WARN_ON_ONCE(!irqs_disabled());
 1861	if (!tr->allocated_snapshot) {
 1862		/* Only the nop tracer should hit this when disabling */
 1863		WARN_ON_ONCE(tr->current_trace != &nop_trace);
 1864		return;
 1865	}
 1866
 1867	arch_spin_lock(&tr->max_lock);
 1868
 1869	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
 1870
 1871	if (ret == -EBUSY) {
 1872		/*
 1873		 * We failed to swap the buffer due to a commit taking
 1874		 * place on this CPU. We fail to record, but we reset
 1875		 * the max trace buffer (no one writes directly to it)
 1876		 * and flag that it failed.
 1877		 */
 1878		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
 1879			"Failed to swap buffers due to commit in progress\n");
 1880	}
 1881
 1882	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
 1883
 1884	__update_max_tr(tr, tsk, cpu);
 1885	arch_spin_unlock(&tr->max_lock);
 1886}
 1887#endif /* CONFIG_TRACER_MAX_TRACE */
 1888
 1889static int wait_on_pipe(struct trace_iterator *iter, int full)
 1890{
 1891	/* Iterators are static, they should be filled or empty */
 1892	if (trace_buffer_iter(iter, iter->cpu_file))
 1893		return 0;
 1894
 1895	return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
 1896				full);
 1897}
 1898
 1899#ifdef CONFIG_FTRACE_STARTUP_TEST
 1900static bool selftests_can_run;
 1901
 1902struct trace_selftests {
 1903	struct list_head		list;
 1904	struct tracer			*type;
 1905};
 1906
 1907static LIST_HEAD(postponed_selftests);
 1908
 1909static int save_selftest(struct tracer *type)
 1910{
 1911	struct trace_selftests *selftest;
 1912
 1913	selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
 1914	if (!selftest)
 1915		return -ENOMEM;
 1916
 1917	selftest->type = type;
 1918	list_add(&selftest->list, &postponed_selftests);
 1919	return 0;
 1920}
 1921
 1922static int run_tracer_selftest(struct tracer *type)
 1923{
 1924	struct trace_array *tr = &global_trace;
 1925	struct tracer *saved_tracer = tr->current_trace;
 1926	int ret;
 1927
 1928	if (!type->selftest || tracing_selftest_disabled)
 1929		return 0;
 1930
 1931	/*
 1932	 * If a tracer registers early in boot up (before scheduling is
 1933	 * initialized and such), then do not run its selftests yet.
 1934	 * Instead, run it a little later in the boot process.
 1935	 */
 1936	if (!selftests_can_run)
 1937		return save_selftest(type);
 1938
 1939	if (!tracing_is_on()) {
 1940		pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
 1941			type->name);
 1942		return 0;
 1943	}
 1944
 1945	/*
 1946	 * Run a selftest on this tracer.
 1947	 * Here we reset the trace buffer, and set the current
 1948	 * tracer to be this tracer. The tracer can then run some
 1949	 * internal tracing to verify that everything is in order.
 1950	 * If we fail, we do not register this tracer.
 1951	 */
 1952	tracing_reset_online_cpus(&tr->array_buffer);
 1953
 1954	tr->current_trace = type;
 1955
 1956#ifdef CONFIG_TRACER_MAX_TRACE
 1957	if (type->use_max_tr) {
 1958		/* If we expanded the buffers, make sure the max is expanded too */
 1959		if (ring_buffer_expanded)
 1960			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
 1961					   RING_BUFFER_ALL_CPUS);
 1962		tr->allocated_snapshot = true;
 1963	}
 1964#endif
 1965
 1966	/* the test is responsible for initializing and enabling */
 1967	pr_info("Testing tracer %s: ", type->name);
 1968	ret = type->selftest(type, tr);
 1969	/* the test is responsible for resetting too */
 1970	tr->current_trace = saved_tracer;
 1971	if (ret) {
 1972		printk(KERN_CONT "FAILED!\n");
 1973		/* Add the warning after printing 'FAILED' */
 1974		WARN_ON(1);
 1975		return -1;
 1976	}
 1977	/* Only reset on passing, to avoid touching corrupted buffers */
 1978	tracing_reset_online_cpus(&tr->array_buffer);
 1979
 1980#ifdef CONFIG_TRACER_MAX_TRACE
 1981	if (type->use_max_tr) {
 1982		tr->allocated_snapshot = false;
 1983
 1984		/* Shrink the max buffer again */
 1985		if (ring_buffer_expanded)
 1986			ring_buffer_resize(tr->max_buffer.buffer, 1,
 1987					   RING_BUFFER_ALL_CPUS);
 1988	}
 1989#endif
 1990
 1991	printk(KERN_CONT "PASSED\n");
 1992	return 0;
 1993}
 1994
 1995static __init int init_trace_selftests(void)
 1996{
 1997	struct trace_selftests *p, *n;
 1998	struct tracer *t, **last;
 1999	int ret;
 2000
 2001	selftests_can_run = true;
 2002
 2003	mutex_lock(&trace_types_lock);
 2004
 2005	if (list_empty(&postponed_selftests))
 2006		goto out;
 2007
 2008	pr_info("Running postponed tracer tests:\n");
 2009
 2010	tracing_selftest_running = true;
 2011	list_for_each_entry_safe(p, n, &postponed_selftests, list) {
 2012		/* This loop can take minutes when sanitizers are enabled, so
 2013		 * lets make sure we allow RCU processing.
 2014		 */
 2015		cond_resched();
 2016		ret = run_tracer_selftest(p->type);
 2017		/* If the test fails, then warn and remove from available_tracers */
 2018		if (ret < 0) {
 2019			WARN(1, "tracer: %s failed selftest, disabling\n",
 2020			     p->type->name);
 2021			last = &trace_types;
 2022			for (t = trace_types; t; t = t->next) {
 2023				if (t == p->type) {
 2024					*last = t->next;
 2025					break;
 2026				}
 2027				last = &t->next;
 2028			}
 2029		}
 2030		list_del(&p->list);
 2031		kfree(p);
 2032	}
 2033	tracing_selftest_running = false;
 2034
 2035 out:
 2036	mutex_unlock(&trace_types_lock);
 2037
 2038	return 0;
 2039}
 2040core_initcall(init_trace_selftests);
 2041#else
 2042static inline int run_tracer_selftest(struct tracer *type)
 2043{
 2044	return 0;
 2045}
 2046#endif /* CONFIG_FTRACE_STARTUP_TEST */
 2047
 2048static void add_tracer_options(struct trace_array *tr, struct tracer *t);
 2049
 2050static void __init apply_trace_boot_options(void);
 2051
 2052/**
 2053 * register_tracer - register a tracer with the ftrace system.
 2054 * @type: the plugin for the tracer
 2055 *
 2056 * Register a new plugin tracer.
 2057 */
 2058int __init register_tracer(struct tracer *type)
 2059{
 2060	struct tracer *t;
 2061	int ret = 0;
 2062
 2063	if (!type->name) {
 2064		pr_info("Tracer must have a name\n");
 2065		return -1;
 2066	}
 2067
 2068	if (strlen(type->name) >= MAX_TRACER_SIZE) {
 2069		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
 2070		return -1;
 2071	}
 2072
 2073	if (security_locked_down(LOCKDOWN_TRACEFS)) {
 2074		pr_warn("Can not register tracer %s due to lockdown\n",
 2075			   type->name);
 2076		return -EPERM;
 2077	}
 2078
 2079	mutex_lock(&trace_types_lock);
 2080
 2081	tracing_selftest_running = true;
 2082
 2083	for (t = trace_types; t; t = t->next) {
 2084		if (strcmp(type->name, t->name) == 0) {
 2085			/* already found */
 2086			pr_info("Tracer %s already registered\n",
 2087				type->name);
 2088			ret = -1;
 2089			goto out;
 2090		}
 2091	}
 2092
 2093	if (!type->set_flag)
 2094		type->set_flag = &dummy_set_flag;
 2095	if (!type->flags) {
 2096		/*allocate a dummy tracer_flags*/
 2097		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
 2098		if (!type->flags) {
 2099			ret = -ENOMEM;
 2100			goto out;
 2101		}
 2102		type->flags->val = 0;
 2103		type->flags->opts = dummy_tracer_opt;
 2104	} else
 2105		if (!type->flags->opts)
 2106			type->flags->opts = dummy_tracer_opt;
 2107
 2108	/* store the tracer for __set_tracer_option */
 2109	type->flags->trace = type;
 2110
 2111	ret = run_tracer_selftest(type);
 2112	if (ret < 0)
 2113		goto out;
 2114
 2115	type->next = trace_types;
 2116	trace_types = type;
 2117	add_tracer_options(&global_trace, type);
 2118
 2119 out:
 2120	tracing_selftest_running = false;
 2121	mutex_unlock(&trace_types_lock);
 2122
 2123	if (ret || !default_bootup_tracer)
 2124		goto out_unlock;
 2125
 2126	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
 2127		goto out_unlock;
 2128
 2129	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
 2130	/* Do we want this tracer to start on bootup? */
 2131	tracing_set_tracer(&global_trace, type->name);
 2132	default_bootup_tracer = NULL;
 2133
 2134	apply_trace_boot_options();
 2135
 2136	/* disable other selftests, since this will break it. */
 2137	disable_tracing_selftest("running a tracer");
 
 
 
 
 2138
 2139 out_unlock:
 2140	return ret;
 2141}
 2142
 2143static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
 2144{
 2145	struct trace_buffer *buffer = buf->buffer;
 2146
 2147	if (!buffer)
 2148		return;
 2149
 2150	ring_buffer_record_disable(buffer);
 2151
 2152	/* Make sure all commits have finished */
 2153	synchronize_rcu();
 2154	ring_buffer_reset_cpu(buffer, cpu);
 2155
 2156	ring_buffer_record_enable(buffer);
 2157}
 2158
 2159void tracing_reset_online_cpus(struct array_buffer *buf)
 2160{
 2161	struct trace_buffer *buffer = buf->buffer;
 
 2162
 2163	if (!buffer)
 2164		return;
 2165
 2166	ring_buffer_record_disable(buffer);
 2167
 2168	/* Make sure all commits have finished */
 2169	synchronize_rcu();
 2170
 2171	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
 2172
 2173	ring_buffer_reset_online_cpus(buffer);
 
 2174
 2175	ring_buffer_record_enable(buffer);
 2176}
 2177
 2178/* Must have trace_types_lock held */
 2179void tracing_reset_all_online_cpus(void)
 2180{
 2181	struct trace_array *tr;
 2182
 2183	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 2184		if (!tr->clear_trace)
 2185			continue;
 2186		tr->clear_trace = false;
 2187		tracing_reset_online_cpus(&tr->array_buffer);
 2188#ifdef CONFIG_TRACER_MAX_TRACE
 2189		tracing_reset_online_cpus(&tr->max_buffer);
 2190#endif
 2191	}
 2192}
 2193
 2194/*
 2195 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
 2196 * is the tgid last observed corresponding to pid=i.
 2197 */
 2198static int *tgid_map;
 2199
 2200/* The maximum valid index into tgid_map. */
 2201static size_t tgid_map_max;
 2202
 2203#define SAVED_CMDLINES_DEFAULT 128
 2204#define NO_CMDLINE_MAP UINT_MAX
 
 
 
 
 2205static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 2206struct saved_cmdlines_buffer {
 2207	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
 2208	unsigned *map_cmdline_to_pid;
 2209	unsigned cmdline_num;
 2210	int cmdline_idx;
 2211	char *saved_cmdlines;
 2212};
 2213static struct saved_cmdlines_buffer *savedcmd;
 2214
 2215static inline char *get_saved_cmdlines(int idx)
 2216{
 2217	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
 2218}
 2219
 2220static inline void set_cmdline(int idx, const char *cmdline)
 2221{
 2222	strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
 2223}
 2224
 2225static int allocate_cmdlines_buffer(unsigned int val,
 2226				    struct saved_cmdlines_buffer *s)
 2227{
 2228	s->map_cmdline_to_pid = kmalloc_array(val,
 2229					      sizeof(*s->map_cmdline_to_pid),
 2230					      GFP_KERNEL);
 2231	if (!s->map_cmdline_to_pid)
 2232		return -ENOMEM;
 2233
 2234	s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
 2235	if (!s->saved_cmdlines) {
 2236		kfree(s->map_cmdline_to_pid);
 2237		return -ENOMEM;
 2238	}
 2239
 2240	s->cmdline_idx = 0;
 2241	s->cmdline_num = val;
 2242	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
 2243	       sizeof(s->map_pid_to_cmdline));
 2244	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
 2245	       val * sizeof(*s->map_cmdline_to_pid));
 2246
 2247	return 0;
 2248}
 2249
 2250static int trace_create_savedcmd(void)
 2251{
 2252	int ret;
 2253
 2254	savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
 2255	if (!savedcmd)
 2256		return -ENOMEM;
 2257
 2258	ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
 2259	if (ret < 0) {
 2260		kfree(savedcmd);
 2261		savedcmd = NULL;
 2262		return -ENOMEM;
 2263	}
 2264
 2265	return 0;
 2266}
 2267
 2268int is_tracing_stopped(void)
 2269{
 2270	return global_trace.stop_count;
 2271}
 2272
 2273/**
 2274 * tracing_start - quick start of the tracer
 2275 *
 2276 * If tracing is enabled but was stopped by tracing_stop,
 2277 * this will start the tracer back up.
 2278 */
 2279void tracing_start(void)
 2280{
 2281	struct trace_buffer *buffer;
 2282	unsigned long flags;
 2283
 2284	if (tracing_disabled)
 2285		return;
 2286
 2287	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
 2288	if (--global_trace.stop_count) {
 2289		if (global_trace.stop_count < 0) {
 2290			/* Someone screwed up their debugging */
 2291			WARN_ON_ONCE(1);
 2292			global_trace.stop_count = 0;
 2293		}
 2294		goto out;
 2295	}
 2296
 2297	/* Prevent the buffers from switching */
 2298	arch_spin_lock(&global_trace.max_lock);
 2299
 2300	buffer = global_trace.array_buffer.buffer;
 2301	if (buffer)
 2302		ring_buffer_record_enable(buffer);
 2303
 2304#ifdef CONFIG_TRACER_MAX_TRACE
 2305	buffer = global_trace.max_buffer.buffer;
 2306	if (buffer)
 2307		ring_buffer_record_enable(buffer);
 2308#endif
 2309
 2310	arch_spin_unlock(&global_trace.max_lock);
 2311
 
 2312 out:
 2313	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
 2314}
 2315
 2316static void tracing_start_tr(struct trace_array *tr)
 2317{
 2318	struct trace_buffer *buffer;
 2319	unsigned long flags;
 2320
 2321	if (tracing_disabled)
 2322		return;
 2323
 2324	/* If global, we need to also start the max tracer */
 2325	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 2326		return tracing_start();
 2327
 2328	raw_spin_lock_irqsave(&tr->start_lock, flags);
 2329
 2330	if (--tr->stop_count) {
 2331		if (tr->stop_count < 0) {
 2332			/* Someone screwed up their debugging */
 2333			WARN_ON_ONCE(1);
 2334			tr->stop_count = 0;
 2335		}
 2336		goto out;
 2337	}
 2338
 2339	buffer = tr->array_buffer.buffer;
 2340	if (buffer)
 2341		ring_buffer_record_enable(buffer);
 2342
 2343 out:
 2344	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 2345}
 2346
 2347/**
 2348 * tracing_stop - quick stop of the tracer
 2349 *
 2350 * Light weight way to stop tracing. Use in conjunction with
 2351 * tracing_start.
 2352 */
 2353void tracing_stop(void)
 2354{
 2355	struct trace_buffer *buffer;
 2356	unsigned long flags;
 2357
 
 2358	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
 2359	if (global_trace.stop_count++)
 2360		goto out;
 2361
 2362	/* Prevent the buffers from switching */
 2363	arch_spin_lock(&global_trace.max_lock);
 2364
 2365	buffer = global_trace.array_buffer.buffer;
 2366	if (buffer)
 2367		ring_buffer_record_disable(buffer);
 2368
 2369#ifdef CONFIG_TRACER_MAX_TRACE
 2370	buffer = global_trace.max_buffer.buffer;
 2371	if (buffer)
 2372		ring_buffer_record_disable(buffer);
 2373#endif
 2374
 2375	arch_spin_unlock(&global_trace.max_lock);
 2376
 2377 out:
 2378	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
 2379}
 2380
 2381static void tracing_stop_tr(struct trace_array *tr)
 2382{
 2383	struct trace_buffer *buffer;
 2384	unsigned long flags;
 2385
 2386	/* If global, we need to also stop the max tracer */
 2387	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 2388		return tracing_stop();
 2389
 2390	raw_spin_lock_irqsave(&tr->start_lock, flags);
 2391	if (tr->stop_count++)
 2392		goto out;
 2393
 2394	buffer = tr->array_buffer.buffer;
 2395	if (buffer)
 2396		ring_buffer_record_disable(buffer);
 2397
 2398 out:
 2399	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 2400}
 2401
 2402static int trace_save_cmdline(struct task_struct *tsk)
 
 
 2403{
 2404	unsigned tpid, idx;
 2405
 2406	/* treat recording of idle task as a success */
 2407	if (!tsk->pid)
 2408		return 1;
 2409
 2410	tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
 2411
 2412	/*
 2413	 * It's not the end of the world if we don't get
 2414	 * the lock, but we also don't want to spin
 2415	 * nor do we want to disable interrupts,
 2416	 * so if we miss here, then better luck next time.
 2417	 */
 2418	if (!arch_spin_trylock(&trace_cmdline_lock))
 2419		return 0;
 2420
 2421	idx = savedcmd->map_pid_to_cmdline[tpid];
 2422	if (idx == NO_CMDLINE_MAP) {
 2423		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
 
 
 
 
 
 
 
 
 
 
 
 
 
 2424
 2425		savedcmd->map_pid_to_cmdline[tpid] = idx;
 2426		savedcmd->cmdline_idx = idx;
 2427	}
 2428
 2429	savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
 2430	set_cmdline(idx, tsk->comm);
 2431
 2432	arch_spin_unlock(&trace_cmdline_lock);
 2433
 2434	return 1;
 2435}
 2436
 2437static void __trace_find_cmdline(int pid, char comm[])
 2438{
 2439	unsigned map;
 2440	int tpid;
 2441
 2442	if (!pid) {
 2443		strcpy(comm, "<idle>");
 2444		return;
 2445	}
 2446
 2447	if (WARN_ON_ONCE(pid < 0)) {
 2448		strcpy(comm, "<XXX>");
 2449		return;
 2450	}
 2451
 2452	tpid = pid & (PID_MAX_DEFAULT - 1);
 2453	map = savedcmd->map_pid_to_cmdline[tpid];
 2454	if (map != NO_CMDLINE_MAP) {
 2455		tpid = savedcmd->map_cmdline_to_pid[map];
 2456		if (tpid == pid) {
 2457			strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
 2458			return;
 2459		}
 2460	}
 2461	strcpy(comm, "<...>");
 2462}
 2463
 2464void trace_find_cmdline(int pid, char comm[])
 2465{
 2466	preempt_disable();
 2467	arch_spin_lock(&trace_cmdline_lock);
 2468
 2469	__trace_find_cmdline(pid, comm);
 
 
 
 2470
 2471	arch_spin_unlock(&trace_cmdline_lock);
 2472	preempt_enable();
 2473}
 2474
 2475static int *trace_find_tgid_ptr(int pid)
 2476{
 2477	/*
 2478	 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
 2479	 * if we observe a non-NULL tgid_map then we also observe the correct
 2480	 * tgid_map_max.
 2481	 */
 2482	int *map = smp_load_acquire(&tgid_map);
 2483
 2484	if (unlikely(!map || pid > tgid_map_max))
 2485		return NULL;
 2486
 2487	return &map[pid];
 2488}
 2489
 2490int trace_find_tgid(int pid)
 2491{
 2492	int *ptr = trace_find_tgid_ptr(pid);
 2493
 2494	return ptr ? *ptr : 0;
 2495}
 2496
 2497static int trace_save_tgid(struct task_struct *tsk)
 2498{
 2499	int *ptr;
 2500
 2501	/* treat recording of idle task as a success */
 2502	if (!tsk->pid)
 2503		return 1;
 2504
 2505	ptr = trace_find_tgid_ptr(tsk->pid);
 2506	if (!ptr)
 2507		return 0;
 2508
 2509	*ptr = tsk->tgid;
 2510	return 1;
 2511}
 2512
 2513static bool tracing_record_taskinfo_skip(int flags)
 2514{
 2515	if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
 2516		return true;
 2517	if (!__this_cpu_read(trace_taskinfo_save))
 2518		return true;
 2519	return false;
 2520}
 2521
 2522/**
 2523 * tracing_record_taskinfo - record the task info of a task
 2524 *
 2525 * @task:  task to record
 2526 * @flags: TRACE_RECORD_CMDLINE for recording comm
 2527 *         TRACE_RECORD_TGID for recording tgid
 2528 */
 2529void tracing_record_taskinfo(struct task_struct *task, int flags)
 2530{
 2531	bool done;
 2532
 2533	if (tracing_record_taskinfo_skip(flags))
 2534		return;
 2535
 2536	/*
 2537	 * Record as much task information as possible. If some fail, continue
 2538	 * to try to record the others.
 2539	 */
 2540	done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
 2541	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
 2542
 2543	/* If recording any information failed, retry again soon. */
 2544	if (!done)
 2545		return;
 2546
 2547	__this_cpu_write(trace_taskinfo_save, false);
 2548}
 2549
 2550/**
 2551 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
 2552 *
 2553 * @prev: previous task during sched_switch
 2554 * @next: next task during sched_switch
 2555 * @flags: TRACE_RECORD_CMDLINE for recording comm
 2556 *         TRACE_RECORD_TGID for recording tgid
 2557 */
 2558void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
 2559					  struct task_struct *next, int flags)
 2560{
 2561	bool done;
 2562
 2563	if (tracing_record_taskinfo_skip(flags))
 2564		return;
 2565
 2566	/*
 2567	 * Record as much task information as possible. If some fail, continue
 2568	 * to try to record the others.
 2569	 */
 2570	done  = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
 2571	done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
 2572	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
 2573	done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
 2574
 2575	/* If recording any information failed, retry again soon. */
 2576	if (!done)
 2577		return;
 2578
 2579	__this_cpu_write(trace_taskinfo_save, false);
 2580}
 2581
 2582/* Helpers to record a specific task information */
 2583void tracing_record_cmdline(struct task_struct *task)
 2584{
 2585	tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
 2586}
 2587
 2588void tracing_record_tgid(struct task_struct *task)
 
 
 2589{
 2590	tracing_record_taskinfo(task, TRACE_RECORD_TGID);
 2591}
 2592
 2593/*
 2594 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
 2595 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
 2596 * simplifies those functions and keeps them in sync.
 2597 */
 2598enum print_line_t trace_handle_return(struct trace_seq *s)
 2599{
 2600	return trace_seq_has_overflowed(s) ?
 2601		TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
 2602}
 2603EXPORT_SYMBOL_GPL(trace_handle_return);
 2604
 2605unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
 2606{
 2607	unsigned int trace_flags = irqs_status;
 2608	unsigned int pc;
 2609
 2610	pc = preempt_count();
 2611
 2612	if (pc & NMI_MASK)
 2613		trace_flags |= TRACE_FLAG_NMI;
 2614	if (pc & HARDIRQ_MASK)
 2615		trace_flags |= TRACE_FLAG_HARDIRQ;
 2616	if (in_serving_softirq())
 2617		trace_flags |= TRACE_FLAG_SOFTIRQ;
 2618
 2619	if (tif_need_resched())
 2620		trace_flags |= TRACE_FLAG_NEED_RESCHED;
 2621	if (test_preempt_need_resched())
 2622		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
 2623	return (trace_flags << 16) | (pc & 0xff);
 2624}
 
 2625
 2626struct ring_buffer_event *
 2627trace_buffer_lock_reserve(struct trace_buffer *buffer,
 2628			  int type,
 2629			  unsigned long len,
 2630			  unsigned int trace_ctx)
 2631{
 2632	return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
 2633}
 2634
 2635DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
 2636DEFINE_PER_CPU(int, trace_buffered_event_cnt);
 2637static int trace_buffered_event_ref;
 2638
 2639/**
 2640 * trace_buffered_event_enable - enable buffering events
 2641 *
 2642 * When events are being filtered, it is quicker to use a temporary
 2643 * buffer to write the event data into if there's a likely chance
 2644 * that it will not be committed. The discard of the ring buffer
 2645 * is not as fast as committing, and is much slower than copying
 2646 * a commit.
 2647 *
 2648 * When an event is to be filtered, allocate per cpu buffers to
 2649 * write the event data into, and if the event is filtered and discarded
 2650 * it is simply dropped, otherwise, the entire data is to be committed
 2651 * in one shot.
 2652 */
 2653void trace_buffered_event_enable(void)
 2654{
 2655	struct ring_buffer_event *event;
 2656	struct page *page;
 2657	int cpu;
 2658
 2659	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
 
 
 2660
 2661	if (trace_buffered_event_ref++)
 2662		return;
 2663
 2664	for_each_tracing_cpu(cpu) {
 2665		page = alloc_pages_node(cpu_to_node(cpu),
 2666					GFP_KERNEL | __GFP_NORETRY, 0);
 2667		if (!page)
 2668			goto failed;
 2669
 2670		event = page_address(page);
 2671		memset(event, 0, sizeof(*event));
 2672
 2673		per_cpu(trace_buffered_event, cpu) = event;
 2674
 2675		preempt_disable();
 2676		if (cpu == smp_processor_id() &&
 2677		    __this_cpu_read(trace_buffered_event) !=
 2678		    per_cpu(trace_buffered_event, cpu))
 2679			WARN_ON_ONCE(1);
 2680		preempt_enable();
 2681	}
 2682
 2683	return;
 2684 failed:
 2685	trace_buffered_event_disable();
 2686}
 2687
 2688static void enable_trace_buffered_event(void *data)
 
 2689{
 2690	/* Probably not needed, but do it anyway */
 2691	smp_rmb();
 2692	this_cpu_dec(trace_buffered_event_cnt);
 2693}
 2694
 2695static void disable_trace_buffered_event(void *data)
 
 
 
 2696{
 2697	this_cpu_inc(trace_buffered_event_cnt);
 
 
 
 2698}
 2699
 2700/**
 2701 * trace_buffered_event_disable - disable buffering events
 2702 *
 2703 * When a filter is removed, it is faster to not use the buffered
 2704 * events, and to commit directly into the ring buffer. Free up
 2705 * the temp buffers when there are no more users. This requires
 2706 * special synchronization with current events.
 2707 */
 2708void trace_buffered_event_disable(void)
 2709{
 2710	int cpu;
 2711
 2712	WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
 2713
 2714	if (WARN_ON_ONCE(!trace_buffered_event_ref))
 2715		return;
 2716
 2717	if (--trace_buffered_event_ref)
 2718		return;
 2719
 2720	preempt_disable();
 2721	/* For each CPU, set the buffer as used. */
 2722	smp_call_function_many(tracing_buffer_mask,
 2723			       disable_trace_buffered_event, NULL, 1);
 2724	preempt_enable();
 2725
 2726	/* Wait for all current users to finish */
 2727	synchronize_rcu();
 2728
 2729	for_each_tracing_cpu(cpu) {
 2730		free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
 2731		per_cpu(trace_buffered_event, cpu) = NULL;
 2732	}
 2733	/*
 2734	 * Make sure trace_buffered_event is NULL before clearing
 2735	 * trace_buffered_event_cnt.
 2736	 */
 2737	smp_wmb();
 2738
 2739	preempt_disable();
 2740	/* Do the work on each cpu */
 2741	smp_call_function_many(tracing_buffer_mask,
 2742			       enable_trace_buffered_event, NULL, 1);
 2743	preempt_enable();
 2744}
 
 2745
 2746static struct trace_buffer *temp_buffer;
 2747
 2748struct ring_buffer_event *
 2749trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
 2750			  struct trace_event_file *trace_file,
 2751			  int type, unsigned long len,
 2752			  unsigned int trace_ctx)
 2753{
 2754	struct ring_buffer_event *entry;
 2755	struct trace_array *tr = trace_file->tr;
 2756	int val;
 2757
 2758	*current_rb = tr->array_buffer.buffer;
 2759
 2760	if (!tr->no_filter_buffering_ref &&
 2761	    (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
 2762	    (entry = this_cpu_read(trace_buffered_event))) {
 2763		/*
 2764		 * Filtering is on, so try to use the per cpu buffer first.
 2765		 * This buffer will simulate a ring_buffer_event,
 2766		 * where the type_len is zero and the array[0] will
 2767		 * hold the full length.
 2768		 * (see include/linux/ring-buffer.h for details on
 2769		 *  how the ring_buffer_event is structured).
 2770		 *
 2771		 * Using a temp buffer during filtering and copying it
 2772		 * on a matched filter is quicker than writing directly
 2773		 * into the ring buffer and then discarding it when
 2774		 * it doesn't match. That is because the discard
 2775		 * requires several atomic operations to get right.
 2776		 * Copying on match and doing nothing on a failed match
 2777		 * is still quicker than no copy on match, but having
 2778		 * to discard out of the ring buffer on a failed match.
 2779		 */
 2780		int max_len = PAGE_SIZE - struct_size(entry, array, 1);
 2781
 2782		val = this_cpu_inc_return(trace_buffered_event_cnt);
 2783
 2784		/*
 2785		 * Preemption is disabled, but interrupts and NMIs
 2786		 * can still come in now. If that happens after
 2787		 * the above increment, then it will have to go
 2788		 * back to the old method of allocating the event
 2789		 * on the ring buffer, and if the filter fails, it
 2790		 * will have to call ring_buffer_discard_commit()
 2791		 * to remove it.
 2792		 *
 2793		 * Need to also check the unlikely case that the
 2794		 * length is bigger than the temp buffer size.
 2795		 * If that happens, then the reserve is pretty much
 2796		 * guaranteed to fail, as the ring buffer currently
 2797		 * only allows events less than a page. But that may
 2798		 * change in the future, so let the ring buffer reserve
 2799		 * handle the failure in that case.
 2800		 */
 2801		if (val == 1 && likely(len <= max_len)) {
 2802			trace_event_setup(entry, type, trace_ctx);
 2803			entry->array[0] = len;
 2804			return entry;
 2805		}
 2806		this_cpu_dec(trace_buffered_event_cnt);
 2807	}
 2808
 2809	entry = __trace_buffer_lock_reserve(*current_rb, type, len,
 2810					    trace_ctx);
 
 2811	/*
 2812	 * If tracing is off, but we have triggers enabled
 2813	 * we still need to look at the event data. Use the temp_buffer
 2814	 * to store the trace event for the trigger to use. It's recursive
 2815	 * safe and will not be recorded anywhere.
 2816	 */
 2817	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
 2818		*current_rb = temp_buffer;
 2819		entry = __trace_buffer_lock_reserve(*current_rb, type, len,
 2820						    trace_ctx);
 2821	}
 2822	return entry;
 2823}
 2824EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
 2825
 2826static DEFINE_SPINLOCK(tracepoint_iter_lock);
 2827static DEFINE_MUTEX(tracepoint_printk_mutex);
 2828
 2829static void output_printk(struct trace_event_buffer *fbuffer)
 2830{
 2831	struct trace_event_call *event_call;
 2832	struct trace_event_file *file;
 2833	struct trace_event *event;
 2834	unsigned long flags;
 2835	struct trace_iterator *iter = tracepoint_print_iter;
 2836
 2837	/* We should never get here if iter is NULL */
 2838	if (WARN_ON_ONCE(!iter))
 2839		return;
 2840
 2841	event_call = fbuffer->trace_file->event_call;
 2842	if (!event_call || !event_call->event.funcs ||
 2843	    !event_call->event.funcs->trace)
 2844		return;
 2845
 2846	file = fbuffer->trace_file;
 2847	if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
 2848	    (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
 2849	     !filter_match_preds(file->filter, fbuffer->entry)))
 2850		return;
 2851
 2852	event = &fbuffer->trace_file->event_call->event;
 2853
 2854	spin_lock_irqsave(&tracepoint_iter_lock, flags);
 2855	trace_seq_init(&iter->seq);
 2856	iter->ent = fbuffer->entry;
 2857	event_call->event.funcs->trace(iter, 0, event);
 2858	trace_seq_putc(&iter->seq, 0);
 2859	printk("%s", iter->seq.buffer);
 2860
 2861	spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
 2862}
 2863
 2864int tracepoint_printk_sysctl(struct ctl_table *table, int write,
 2865			     void *buffer, size_t *lenp,
 2866			     loff_t *ppos)
 2867{
 2868	int save_tracepoint_printk;
 2869	int ret;
 2870
 2871	mutex_lock(&tracepoint_printk_mutex);
 2872	save_tracepoint_printk = tracepoint_printk;
 2873
 2874	ret = proc_dointvec(table, write, buffer, lenp, ppos);
 2875
 2876	/*
 2877	 * This will force exiting early, as tracepoint_printk
 2878	 * is always zero when tracepoint_printk_iter is not allocated
 2879	 */
 2880	if (!tracepoint_print_iter)
 2881		tracepoint_printk = 0;
 2882
 2883	if (save_tracepoint_printk == tracepoint_printk)
 2884		goto out;
 2885
 2886	if (tracepoint_printk)
 2887		static_key_enable(&tracepoint_printk_key.key);
 2888	else
 2889		static_key_disable(&tracepoint_printk_key.key);
 2890
 2891 out:
 2892	mutex_unlock(&tracepoint_printk_mutex);
 2893
 2894	return ret;
 2895}
 
 2896
 2897void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
 
 
 2898{
 2899	enum event_trigger_type tt = ETT_NONE;
 2900	struct trace_event_file *file = fbuffer->trace_file;
 2901
 2902	if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
 2903			fbuffer->entry, &tt))
 2904		goto discard;
 2905
 2906	if (static_key_false(&tracepoint_printk_key.key))
 2907		output_printk(fbuffer);
 2908
 2909	if (static_branch_unlikely(&trace_event_exports_enabled))
 2910		ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
 2911
 2912	trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
 2913			fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
 2914
 2915discard:
 2916	if (tt)
 2917		event_triggers_post_call(file, tt);
 2918
 2919}
 2920EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 2921
 2922/*
 2923 * Skip 3:
 2924 *
 2925 *   trace_buffer_unlock_commit_regs()
 2926 *   trace_event_buffer_commit()
 2927 *   trace_event_raw_event_xxx()
 2928 */
 2929# define STACK_SKIP 3
 2930
 2931void trace_buffer_unlock_commit_regs(struct trace_array *tr,
 2932				     struct trace_buffer *buffer,
 2933				     struct ring_buffer_event *event,
 2934				     unsigned int trace_ctx,
 2935				     struct pt_regs *regs)
 2936{
 2937	__buffer_unlock_commit(buffer, event);
 2938
 2939	/*
 2940	 * If regs is not set, then skip the necessary functions.
 2941	 * Note, we can still get here via blktrace, wakeup tracer
 2942	 * and mmiotrace, but that's ok if they lose a function or
 2943	 * two. They are not that meaningful.
 2944	 */
 2945	ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
 2946	ftrace_trace_userstack(tr, buffer, trace_ctx);
 2947}
 
 2948
 2949/*
 2950 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
 2951 */
 2952void
 2953trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
 2954				   struct ring_buffer_event *event)
 2955{
 2956	__buffer_unlock_commit(buffer, event);
 2957}
 
 2958
 2959void
 2960trace_function(struct trace_array *tr, unsigned long ip, unsigned long
 2961	       parent_ip, unsigned int trace_ctx)
 
 2962{
 2963	struct trace_event_call *call = &event_function;
 2964	struct trace_buffer *buffer = tr->array_buffer.buffer;
 2965	struct ring_buffer_event *event;
 2966	struct ftrace_entry *entry;
 2967
 2968	event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
 2969					    trace_ctx);
 
 
 
 
 2970	if (!event)
 2971		return;
 2972	entry	= ring_buffer_event_data(event);
 2973	entry->ip			= ip;
 2974	entry->parent_ip		= parent_ip;
 2975
 2976	if (!call_filter_check_discard(call, entry, buffer, event)) {
 2977		if (static_branch_unlikely(&trace_function_exports_enabled))
 2978			ftrace_exports(event, TRACE_EXPORT_FUNCTION);
 2979		__buffer_unlock_commit(buffer, event);
 2980	}
 2981}
 2982
 2983#ifdef CONFIG_STACKTRACE
 2984
 2985/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
 2986#define FTRACE_KSTACK_NESTING	4
 2987
 2988#define FTRACE_KSTACK_ENTRIES	(PAGE_SIZE / FTRACE_KSTACK_NESTING)
 2989
 2990struct ftrace_stack {
 2991	unsigned long		calls[FTRACE_KSTACK_ENTRIES];
 2992};
 2993
 2994
 2995struct ftrace_stacks {
 2996	struct ftrace_stack	stacks[FTRACE_KSTACK_NESTING];
 2997};
 2998
 2999static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
 3000static DEFINE_PER_CPU(int, ftrace_stack_reserve);
 3001
 3002static void __ftrace_trace_stack(struct trace_buffer *buffer,
 3003				 unsigned int trace_ctx,
 3004				 int skip, struct pt_regs *regs)
 3005{
 3006	struct trace_event_call *call = &event_kernel_stack;
 3007	struct ring_buffer_event *event;
 3008	unsigned int size, nr_entries;
 3009	struct ftrace_stack *fstack;
 3010	struct stack_entry *entry;
 3011	int stackidx;
 
 
 
 
 
 3012
 3013	/*
 3014	 * Add one, for this function and the call to save_stack_trace()
 3015	 * If regs is set, then these functions will not be in the way.
 
 
 3016	 */
 3017#ifndef CONFIG_UNWINDER_ORC
 3018	if (!regs)
 3019		skip++;
 3020#endif
 3021
 3022	preempt_disable_notrace();
 3023
 3024	stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
 3025
 3026	/* This should never happen. If it does, yell once and skip */
 3027	if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
 3028		goto out;
 3029
 3030	/*
 3031	 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
 3032	 * interrupt will either see the value pre increment or post
 3033	 * increment. If the interrupt happens pre increment it will have
 3034	 * restored the counter when it returns.  We just need a barrier to
 3035	 * keep gcc from moving things around.
 3036	 */
 3037	barrier();
 
 
 
 
 
 
 
 
 3038
 3039	fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
 3040	size = ARRAY_SIZE(fstack->calls);
 
 
 
 3041
 3042	if (regs) {
 3043		nr_entries = stack_trace_save_regs(regs, fstack->calls,
 3044						   size, skip);
 3045	} else {
 3046		nr_entries = stack_trace_save(fstack->calls, size, skip);
 3047	}
 3048
 3049	size = nr_entries * sizeof(unsigned long);
 3050	event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
 3051				    (sizeof(*entry) - sizeof(entry->caller)) + size,
 3052				    trace_ctx);
 3053	if (!event)
 3054		goto out;
 3055	entry = ring_buffer_event_data(event);
 3056
 3057	memcpy(&entry->caller, fstack->calls, size);
 3058	entry->size = nr_entries;
 
 
 
 
 
 
 
 
 
 
 
 
 
 3059
 3060	if (!call_filter_check_discard(call, entry, buffer, event))
 3061		__buffer_unlock_commit(buffer, event);
 3062
 3063 out:
 3064	/* Again, don't let gcc optimize things here */
 3065	barrier();
 3066	__this_cpu_dec(ftrace_stack_reserve);
 3067	preempt_enable_notrace();
 3068
 3069}
 3070
 3071static inline void ftrace_trace_stack(struct trace_array *tr,
 3072				      struct trace_buffer *buffer,
 3073				      unsigned int trace_ctx,
 3074				      int skip, struct pt_regs *regs)
 3075{
 3076	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
 3077		return;
 3078
 3079	__ftrace_trace_stack(buffer, trace_ctx, skip, regs);
 3080}
 3081
 3082void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
 3083		   int skip)
 3084{
 3085	struct trace_buffer *buffer = tr->array_buffer.buffer;
 3086
 3087	if (rcu_is_watching()) {
 3088		__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
 3089		return;
 3090	}
 3091
 3092	/*
 3093	 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
 3094	 * but if the above rcu_is_watching() failed, then the NMI
 3095	 * triggered someplace critical, and rcu_irq_enter() should
 3096	 * not be called from NMI.
 3097	 */
 3098	if (unlikely(in_nmi()))
 3099		return;
 3100
 3101	rcu_irq_enter_irqson();
 3102	__ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
 3103	rcu_irq_exit_irqson();
 
 3104}
 3105
 3106/**
 3107 * trace_dump_stack - record a stack back trace in the trace buffer
 3108 * @skip: Number of functions to skip (helper handlers)
 3109 */
 3110void trace_dump_stack(int skip)
 3111{
 
 
 3112	if (tracing_disabled || tracing_selftest_running)
 3113		return;
 3114
 3115#ifndef CONFIG_UNWINDER_ORC
 3116	/* Skip 1 to skip this function. */
 3117	skip++;
 3118#endif
 3119	__ftrace_trace_stack(global_trace.array_buffer.buffer,
 3120			     tracing_gen_ctx(), skip, NULL);
 
 
 
 3121}
 3122EXPORT_SYMBOL_GPL(trace_dump_stack);
 3123
 3124#ifdef CONFIG_USER_STACKTRACE_SUPPORT
 3125static DEFINE_PER_CPU(int, user_stack_count);
 3126
 3127static void
 3128ftrace_trace_userstack(struct trace_array *tr,
 3129		       struct trace_buffer *buffer, unsigned int trace_ctx)
 3130{
 3131	struct trace_event_call *call = &event_user_stack;
 3132	struct ring_buffer_event *event;
 3133	struct userstack_entry *entry;
 
 3134
 3135	if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
 3136		return;
 3137
 3138	/*
 3139	 * NMIs can not handle page faults, even with fix ups.
 3140	 * The save user stack can (and often does) fault.
 3141	 */
 3142	if (unlikely(in_nmi()))
 3143		return;
 3144
 3145	/*
 3146	 * prevent recursion, since the user stack tracing may
 3147	 * trigger other kernel events.
 3148	 */
 3149	preempt_disable();
 3150	if (__this_cpu_read(user_stack_count))
 3151		goto out;
 3152
 3153	__this_cpu_inc(user_stack_count);
 3154
 3155	event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
 3156					    sizeof(*entry), trace_ctx);
 3157	if (!event)
 3158		goto out_drop_count;
 3159	entry	= ring_buffer_event_data(event);
 3160
 3161	entry->tgid		= current->tgid;
 3162	memset(&entry->caller, 0, sizeof(entry->caller));
 3163
 3164	stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
 
 
 
 
 
 3165	if (!call_filter_check_discard(call, entry, buffer, event))
 3166		__buffer_unlock_commit(buffer, event);
 3167
 3168 out_drop_count:
 3169	__this_cpu_dec(user_stack_count);
 3170 out:
 3171	preempt_enable();
 3172}
 3173#else /* CONFIG_USER_STACKTRACE_SUPPORT */
 3174static void ftrace_trace_userstack(struct trace_array *tr,
 3175				   struct trace_buffer *buffer,
 3176				   unsigned int trace_ctx)
 3177{
 
 3178}
 3179#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
 3180
 3181#endif /* CONFIG_STACKTRACE */
 3182
 3183static inline void
 3184func_repeats_set_delta_ts(struct func_repeats_entry *entry,
 3185			  unsigned long long delta)
 3186{
 3187	entry->bottom_delta_ts = delta & U32_MAX;
 3188	entry->top_delta_ts = (delta >> 32);
 3189}
 3190
 3191void trace_last_func_repeats(struct trace_array *tr,
 3192			     struct trace_func_repeats *last_info,
 3193			     unsigned int trace_ctx)
 3194{
 3195	struct trace_buffer *buffer = tr->array_buffer.buffer;
 3196	struct func_repeats_entry *entry;
 3197	struct ring_buffer_event *event;
 3198	u64 delta;
 3199
 3200	event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
 3201					    sizeof(*entry), trace_ctx);
 3202	if (!event)
 3203		return;
 3204
 3205	delta = ring_buffer_event_time_stamp(buffer, event) -
 3206		last_info->ts_last_call;
 3207
 3208	entry = ring_buffer_event_data(event);
 3209	entry->ip = last_info->ip;
 3210	entry->parent_ip = last_info->parent_ip;
 3211	entry->count = last_info->count;
 3212	func_repeats_set_delta_ts(entry, delta);
 3213
 3214	__buffer_unlock_commit(buffer, event);
 3215}
 3216
 3217/* created for use with alloc_percpu */
 3218struct trace_buffer_struct {
 3219	int nesting;
 3220	char buffer[4][TRACE_BUF_SIZE];
 3221};
 3222
 3223static struct trace_buffer_struct *trace_percpu_buffer;
 
 
 
 3224
 3225/*
 3226 * This allows for lockless recording.  If we're nested too deeply, then
 3227 * this returns NULL.
 
 
 
 3228 */
 3229static char *get_trace_buf(void)
 3230{
 3231	struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
 3232
 3233	if (!buffer || buffer->nesting >= 4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 3234		return NULL;
 3235
 3236	buffer->nesting++;
 3237
 3238	/* Interrupts must see nesting incremented before we use the buffer */
 3239	barrier();
 3240	return &buffer->buffer[buffer->nesting - 1][0];
 3241}
 3242
 3243static void put_trace_buf(void)
 3244{
 3245	/* Don't let the decrement of nesting leak before this */
 3246	barrier();
 3247	this_cpu_dec(trace_percpu_buffer->nesting);
 3248}
 3249
 3250static int alloc_percpu_trace_buffer(void)
 3251{
 3252	struct trace_buffer_struct *buffers;
 
 
 
 3253
 3254	if (trace_percpu_buffer)
 3255		return 0;
 
 3256
 3257	buffers = alloc_percpu(struct trace_buffer_struct);
 3258	if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
 3259		return -ENOMEM;
 
 
 
 
 
 
 
 
 3260
 3261	trace_percpu_buffer = buffers;
 
 
 
 
 3262	return 0;
 
 
 
 
 
 
 
 
 
 
 3263}
 3264
 3265static int buffers_allocated;
 3266
 3267void trace_printk_init_buffers(void)
 3268{
 3269	if (buffers_allocated)
 3270		return;
 3271
 3272	if (alloc_percpu_trace_buffer())
 3273		return;
 3274
 3275	/* trace_printk() is for debug use only. Don't use it in production. */
 3276
 3277	pr_warn("\n");
 3278	pr_warn("**********************************************************\n");
 3279	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
 3280	pr_warn("**                                                      **\n");
 3281	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
 3282	pr_warn("**                                                      **\n");
 3283	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
 3284	pr_warn("** unsafe for production use.                           **\n");
 3285	pr_warn("**                                                      **\n");
 3286	pr_warn("** If you see this message and you are not debugging    **\n");
 3287	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
 3288	pr_warn("**                                                      **\n");
 3289	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
 3290	pr_warn("**********************************************************\n");
 3291
 3292	/* Expand the buffers to set size */
 3293	tracing_update_buffers();
 3294
 3295	buffers_allocated = 1;
 3296
 3297	/*
 3298	 * trace_printk_init_buffers() can be called by modules.
 3299	 * If that happens, then we need to start cmdline recording
 3300	 * directly here. If the global_trace.buffer is already
 3301	 * allocated here, then this was called by module code.
 3302	 */
 3303	if (global_trace.array_buffer.buffer)
 3304		tracing_start_cmdline_record();
 3305}
 3306EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
 3307
 3308void trace_printk_start_comm(void)
 3309{
 3310	/* Start tracing comms if trace printk is set */
 3311	if (!buffers_allocated)
 3312		return;
 3313	tracing_start_cmdline_record();
 3314}
 3315
 3316static void trace_printk_start_stop_comm(int enabled)
 3317{
 3318	if (!buffers_allocated)
 3319		return;
 3320
 3321	if (enabled)
 3322		tracing_start_cmdline_record();
 3323	else
 3324		tracing_stop_cmdline_record();
 3325}
 3326
 3327/**
 3328 * trace_vbprintk - write binary msg to tracing buffer
 3329 * @ip:    The address of the caller
 3330 * @fmt:   The string format to write to the buffer
 3331 * @args:  Arguments for @fmt
 3332 */
 3333int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
 3334{
 3335	struct trace_event_call *call = &event_bprint;
 3336	struct ring_buffer_event *event;
 3337	struct trace_buffer *buffer;
 3338	struct trace_array *tr = &global_trace;
 3339	struct bprint_entry *entry;
 3340	unsigned int trace_ctx;
 3341	char *tbuffer;
 3342	int len = 0, size;
 3343
 3344	if (unlikely(tracing_selftest_running || tracing_disabled))
 3345		return 0;
 3346
 3347	/* Don't pollute graph traces with trace_vprintk internals */
 3348	pause_graph_tracing();
 3349
 3350	trace_ctx = tracing_gen_ctx();
 3351	preempt_disable_notrace();
 3352
 3353	tbuffer = get_trace_buf();
 3354	if (!tbuffer) {
 3355		len = 0;
 3356		goto out_nobuffer;
 3357	}
 3358
 3359	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
 3360
 3361	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
 3362		goto out_put;
 3363
 
 3364	size = sizeof(*entry) + sizeof(u32) * len;
 3365	buffer = tr->array_buffer.buffer;
 3366	ring_buffer_nest_start(buffer);
 3367	event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
 3368					    trace_ctx);
 3369	if (!event)
 3370		goto out;
 3371	entry = ring_buffer_event_data(event);
 3372	entry->ip			= ip;
 3373	entry->fmt			= fmt;
 3374
 3375	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
 3376	if (!call_filter_check_discard(call, entry, buffer, event)) {
 3377		__buffer_unlock_commit(buffer, event);
 3378		ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
 3379	}
 3380
 3381out:
 3382	ring_buffer_nest_end(buffer);
 3383out_put:
 3384	put_trace_buf();
 3385
 3386out_nobuffer:
 3387	preempt_enable_notrace();
 3388	unpause_graph_tracing();
 3389
 3390	return len;
 3391}
 3392EXPORT_SYMBOL_GPL(trace_vbprintk);
 3393
 3394__printf(3, 0)
 3395static int
 3396__trace_array_vprintk(struct trace_buffer *buffer,
 3397		      unsigned long ip, const char *fmt, va_list args)
 3398{
 3399	struct trace_event_call *call = &event_print;
 3400	struct ring_buffer_event *event;
 3401	int len = 0, size;
 3402	struct print_entry *entry;
 3403	unsigned int trace_ctx;
 3404	char *tbuffer;
 3405
 3406	if (tracing_disabled || tracing_selftest_running)
 3407		return 0;
 3408
 3409	/* Don't pollute graph traces with trace_vprintk internals */
 3410	pause_graph_tracing();
 3411
 3412	trace_ctx = tracing_gen_ctx();
 3413	preempt_disable_notrace();
 3414
 3415
 3416	tbuffer = get_trace_buf();
 3417	if (!tbuffer) {
 3418		len = 0;
 3419		goto out_nobuffer;
 3420	}
 3421
 3422	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
 
 
 3423
 
 3424	size = sizeof(*entry) + len + 1;
 3425	ring_buffer_nest_start(buffer);
 3426	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 3427					    trace_ctx);
 3428	if (!event)
 3429		goto out;
 3430	entry = ring_buffer_event_data(event);
 3431	entry->ip = ip;
 3432
 3433	memcpy(&entry->buf, tbuffer, len + 1);
 
 3434	if (!call_filter_check_discard(call, entry, buffer, event)) {
 3435		__buffer_unlock_commit(buffer, event);
 3436		ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
 3437	}
 3438
 3439out:
 3440	ring_buffer_nest_end(buffer);
 3441	put_trace_buf();
 3442
 3443out_nobuffer:
 3444	preempt_enable_notrace();
 3445	unpause_graph_tracing();
 3446
 3447	return len;
 3448}
 3449
 3450__printf(3, 0)
 3451int trace_array_vprintk(struct trace_array *tr,
 3452			unsigned long ip, const char *fmt, va_list args)
 3453{
 3454	return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
 3455}
 3456
 3457/**
 3458 * trace_array_printk - Print a message to a specific instance
 3459 * @tr: The instance trace_array descriptor
 3460 * @ip: The instruction pointer that this is called from.
 3461 * @fmt: The format to print (printf format)
 3462 *
 3463 * If a subsystem sets up its own instance, they have the right to
 3464 * printk strings into their tracing instance buffer using this
 3465 * function. Note, this function will not write into the top level
 3466 * buffer (use trace_printk() for that), as writing into the top level
 3467 * buffer should only have events that can be individually disabled.
 3468 * trace_printk() is only used for debugging a kernel, and should not
 3469 * be ever incorporated in normal use.
 3470 *
 3471 * trace_array_printk() can be used, as it will not add noise to the
 3472 * top level tracing buffer.
 3473 *
 3474 * Note, trace_array_init_printk() must be called on @tr before this
 3475 * can be used.
 3476 */
 3477__printf(3, 0)
 3478int trace_array_printk(struct trace_array *tr,
 3479		       unsigned long ip, const char *fmt, ...)
 3480{
 3481	int ret;
 3482	va_list ap;
 3483
 3484	if (!tr)
 3485		return -ENOENT;
 3486
 3487	/* This is only allowed for created instances */
 3488	if (tr == &global_trace)
 3489		return 0;
 3490
 3491	if (!(tr->trace_flags & TRACE_ITER_PRINTK))
 3492		return 0;
 3493
 3494	va_start(ap, fmt);
 3495	ret = trace_array_vprintk(tr, ip, fmt, ap);
 3496	va_end(ap);
 3497	return ret;
 3498}
 3499EXPORT_SYMBOL_GPL(trace_array_printk);
 3500
 3501/**
 3502 * trace_array_init_printk - Initialize buffers for trace_array_printk()
 3503 * @tr: The trace array to initialize the buffers for
 3504 *
 3505 * As trace_array_printk() only writes into instances, they are OK to
 3506 * have in the kernel (unlike trace_printk()). This needs to be called
 3507 * before trace_array_printk() can be used on a trace_array.
 3508 */
 3509int trace_array_init_printk(struct trace_array *tr)
 3510{
 3511	if (!tr)
 3512		return -ENOENT;
 3513
 3514	/* This is only allowed for created instances */
 3515	if (tr == &global_trace)
 3516		return -EINVAL;
 3517
 3518	return alloc_percpu_trace_buffer();
 3519}
 3520EXPORT_SYMBOL_GPL(trace_array_init_printk);
 3521
 3522__printf(3, 4)
 3523int trace_array_printk_buf(struct trace_buffer *buffer,
 3524			   unsigned long ip, const char *fmt, ...)
 3525{
 3526	int ret;
 3527	va_list ap;
 3528
 3529	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 3530		return 0;
 3531
 3532	va_start(ap, fmt);
 3533	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
 3534	va_end(ap);
 3535	return ret;
 3536}
 3537
 3538__printf(2, 0)
 3539int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
 3540{
 3541	return trace_array_vprintk(&global_trace, ip, fmt, args);
 3542}
 3543EXPORT_SYMBOL_GPL(trace_vprintk);
 3544
 3545static void trace_iterator_increment(struct trace_iterator *iter)
 3546{
 3547	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
 3548
 3549	iter->idx++;
 3550	if (buf_iter)
 3551		ring_buffer_iter_advance(buf_iter);
 3552}
 3553
 3554static struct trace_entry *
 3555peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
 3556		unsigned long *lost_events)
 3557{
 3558	struct ring_buffer_event *event;
 3559	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
 3560
 3561	if (buf_iter) {
 3562		event = ring_buffer_iter_peek(buf_iter, ts);
 3563		if (lost_events)
 3564			*lost_events = ring_buffer_iter_dropped(buf_iter) ?
 3565				(unsigned long)-1 : 0;
 3566	} else {
 3567		event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
 3568					 lost_events);
 3569	}
 3570
 3571	if (event) {
 3572		iter->ent_size = ring_buffer_event_length(event);
 3573		return ring_buffer_event_data(event);
 3574	}
 3575	iter->ent_size = 0;
 3576	return NULL;
 3577}
 3578
 3579static struct trace_entry *
 3580__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
 3581		  unsigned long *missing_events, u64 *ent_ts)
 3582{
 3583	struct trace_buffer *buffer = iter->array_buffer->buffer;
 3584	struct trace_entry *ent, *next = NULL;
 3585	unsigned long lost_events = 0, next_lost = 0;
 3586	int cpu_file = iter->cpu_file;
 3587	u64 next_ts = 0, ts;
 3588	int next_cpu = -1;
 3589	int next_size = 0;
 3590	int cpu;
 3591
 3592	/*
 3593	 * If we are in a per_cpu trace file, don't bother by iterating over
 3594	 * all cpu and peek directly.
 3595	 */
 3596	if (cpu_file > RING_BUFFER_ALL_CPUS) {
 3597		if (ring_buffer_empty_cpu(buffer, cpu_file))
 3598			return NULL;
 3599		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
 3600		if (ent_cpu)
 3601			*ent_cpu = cpu_file;
 3602
 3603		return ent;
 3604	}
 3605
 3606	for_each_tracing_cpu(cpu) {
 3607
 3608		if (ring_buffer_empty_cpu(buffer, cpu))
 3609			continue;
 3610
 3611		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
 3612
 3613		/*
 3614		 * Pick the entry with the smallest timestamp:
 3615		 */
 3616		if (ent && (!next || ts < next_ts)) {
 3617			next = ent;
 3618			next_cpu = cpu;
 3619			next_ts = ts;
 3620			next_lost = lost_events;
 3621			next_size = iter->ent_size;
 3622		}
 3623	}
 3624
 3625	iter->ent_size = next_size;
 3626
 3627	if (ent_cpu)
 3628		*ent_cpu = next_cpu;
 3629
 3630	if (ent_ts)
 3631		*ent_ts = next_ts;
 3632
 3633	if (missing_events)
 3634		*missing_events = next_lost;
 3635
 3636	return next;
 3637}
 3638
 3639#define STATIC_FMT_BUF_SIZE	128
 3640static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
 3641
 3642static char *trace_iter_expand_format(struct trace_iterator *iter)
 3643{
 3644	char *tmp;
 3645
 3646	/*
 3647	 * iter->tr is NULL when used with tp_printk, which makes
 3648	 * this get called where it is not safe to call krealloc().
 3649	 */
 3650	if (!iter->tr || iter->fmt == static_fmt_buf)
 3651		return NULL;
 3652
 3653	tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
 3654		       GFP_KERNEL);
 3655	if (tmp) {
 3656		iter->fmt_size += STATIC_FMT_BUF_SIZE;
 3657		iter->fmt = tmp;
 3658	}
 3659
 3660	return tmp;
 3661}
 3662
 3663/* Returns true if the string is safe to dereference from an event */
 3664static bool trace_safe_str(struct trace_iterator *iter, const char *str)
 3665{
 3666	unsigned long addr = (unsigned long)str;
 3667	struct trace_event *trace_event;
 3668	struct trace_event_call *event;
 3669
 3670	/* OK if part of the event data */
 3671	if ((addr >= (unsigned long)iter->ent) &&
 3672	    (addr < (unsigned long)iter->ent + iter->ent_size))
 3673		return true;
 3674
 3675	/* OK if part of the temp seq buffer */
 3676	if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
 3677	    (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
 3678		return true;
 3679
 3680	/* Core rodata can not be freed */
 3681	if (is_kernel_rodata(addr))
 3682		return true;
 3683
 3684	if (trace_is_tracepoint_string(str))
 3685		return true;
 3686
 3687	/*
 3688	 * Now this could be a module event, referencing core module
 3689	 * data, which is OK.
 3690	 */
 3691	if (!iter->ent)
 3692		return false;
 3693
 3694	trace_event = ftrace_find_event(iter->ent->type);
 3695	if (!trace_event)
 3696		return false;
 3697
 3698	event = container_of(trace_event, struct trace_event_call, event);
 3699	if (!event->mod)
 3700		return false;
 3701
 3702	/* Would rather have rodata, but this will suffice */
 3703	if (within_module_core(addr, event->mod))
 3704		return true;
 3705
 3706	return false;
 3707}
 3708
 3709static const char *show_buffer(struct trace_seq *s)
 3710{
 3711	struct seq_buf *seq = &s->seq;
 3712
 3713	seq_buf_terminate(seq);
 3714
 3715	return seq->buffer;
 3716}
 3717
 3718static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
 3719
 3720static int test_can_verify_check(const char *fmt, ...)
 3721{
 3722	char buf[16];
 3723	va_list ap;
 3724	int ret;
 3725
 3726	/*
 3727	 * The verifier is dependent on vsnprintf() modifies the va_list
 3728	 * passed to it, where it is sent as a reference. Some architectures
 3729	 * (like x86_32) passes it by value, which means that vsnprintf()
 3730	 * does not modify the va_list passed to it, and the verifier
 3731	 * would then need to be able to understand all the values that
 3732	 * vsnprintf can use. If it is passed by value, then the verifier
 3733	 * is disabled.
 3734	 */
 3735	va_start(ap, fmt);
 3736	vsnprintf(buf, 16, "%d", ap);
 3737	ret = va_arg(ap, int);
 3738	va_end(ap);
 3739
 3740	return ret;
 3741}
 3742
 3743static void test_can_verify(void)
 3744{
 3745	if (!test_can_verify_check("%d %d", 0, 1)) {
 3746		pr_info("trace event string verifier disabled\n");
 3747		static_branch_inc(&trace_no_verify);
 3748	}
 3749}
 3750
 3751/**
 3752 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
 3753 * @iter: The iterator that holds the seq buffer and the event being printed
 3754 * @fmt: The format used to print the event
 3755 * @ap: The va_list holding the data to print from @fmt.
 3756 *
 3757 * This writes the data into the @iter->seq buffer using the data from
 3758 * @fmt and @ap. If the format has a %s, then the source of the string
 3759 * is examined to make sure it is safe to print, otherwise it will
 3760 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
 3761 * pointer.
 3762 */
 3763void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
 3764			 va_list ap)
 3765{
 3766	const char *p = fmt;
 3767	const char *str;
 3768	int i, j;
 3769
 3770	if (WARN_ON_ONCE(!fmt))
 3771		return;
 3772
 3773	if (static_branch_unlikely(&trace_no_verify))
 3774		goto print;
 3775
 3776	/* Don't bother checking when doing a ftrace_dump() */
 3777	if (iter->fmt == static_fmt_buf)
 3778		goto print;
 3779
 3780	while (*p) {
 3781		bool star = false;
 3782		int len = 0;
 3783
 3784		j = 0;
 3785
 3786		/* We only care about %s and variants */
 3787		for (i = 0; p[i]; i++) {
 3788			if (i + 1 >= iter->fmt_size) {
 3789				/*
 3790				 * If we can't expand the copy buffer,
 3791				 * just print it.
 3792				 */
 3793				if (!trace_iter_expand_format(iter))
 3794					goto print;
 3795			}
 3796
 3797			if (p[i] == '\\' && p[i+1]) {
 3798				i++;
 3799				continue;
 3800			}
 3801			if (p[i] == '%') {
 3802				/* Need to test cases like %08.*s */
 3803				for (j = 1; p[i+j]; j++) {
 3804					if (isdigit(p[i+j]) ||
 3805					    p[i+j] == '.')
 3806						continue;
 3807					if (p[i+j] == '*') {
 3808						star = true;
 3809						continue;
 3810					}
 3811					break;
 3812				}
 3813				if (p[i+j] == 's')
 3814					break;
 3815				star = false;
 3816			}
 3817			j = 0;
 3818		}
 3819		/* If no %s found then just print normally */
 3820		if (!p[i])
 3821			break;
 3822
 3823		/* Copy up to the %s, and print that */
 3824		strncpy(iter->fmt, p, i);
 3825		iter->fmt[i] = '\0';
 3826		trace_seq_vprintf(&iter->seq, iter->fmt, ap);
 3827
 3828		if (star)
 3829			len = va_arg(ap, int);
 3830
 3831		/* The ap now points to the string data of the %s */
 3832		str = va_arg(ap, const char *);
 3833
 3834		/*
 3835		 * If you hit this warning, it is likely that the
 3836		 * trace event in question used %s on a string that
 3837		 * was saved at the time of the event, but may not be
 3838		 * around when the trace is read. Use __string(),
 3839		 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
 3840		 * instead. See samples/trace_events/trace-events-sample.h
 3841		 * for reference.
 3842		 */
 3843		if (WARN_ONCE(!trace_safe_str(iter, str),
 3844			      "fmt: '%s' current_buffer: '%s'",
 3845			      fmt, show_buffer(&iter->seq))) {
 3846			int ret;
 3847
 3848			/* Try to safely read the string */
 3849			if (star) {
 3850				if (len + 1 > iter->fmt_size)
 3851					len = iter->fmt_size - 1;
 3852				if (len < 0)
 3853					len = 0;
 3854				ret = copy_from_kernel_nofault(iter->fmt, str, len);
 3855				iter->fmt[len] = 0;
 3856				star = false;
 3857			} else {
 3858				ret = strncpy_from_kernel_nofault(iter->fmt, str,
 3859								  iter->fmt_size);
 3860			}
 3861			if (ret < 0)
 3862				trace_seq_printf(&iter->seq, "(0x%px)", str);
 3863			else
 3864				trace_seq_printf(&iter->seq, "(0x%px:%s)",
 3865						 str, iter->fmt);
 3866			str = "[UNSAFE-MEMORY]";
 3867			strcpy(iter->fmt, "%s");
 3868		} else {
 3869			strncpy(iter->fmt, p + i, j + 1);
 3870			iter->fmt[j+1] = '\0';
 3871		}
 3872		if (star)
 3873			trace_seq_printf(&iter->seq, iter->fmt, len, str);
 3874		else
 3875			trace_seq_printf(&iter->seq, iter->fmt, str);
 3876
 3877		p += i + j + 1;
 3878	}
 3879 print:
 3880	if (*p)
 3881		trace_seq_vprintf(&iter->seq, p, ap);
 3882}
 3883
 3884const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
 3885{
 3886	const char *p, *new_fmt;
 3887	char *q;
 3888
 3889	if (WARN_ON_ONCE(!fmt))
 3890		return fmt;
 3891
 3892	if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
 3893		return fmt;
 3894
 3895	p = fmt;
 3896	new_fmt = q = iter->fmt;
 3897	while (*p) {
 3898		if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
 3899			if (!trace_iter_expand_format(iter))
 3900				return fmt;
 3901
 3902			q += iter->fmt - new_fmt;
 3903			new_fmt = iter->fmt;
 3904		}
 3905
 3906		*q++ = *p++;
 3907
 3908		/* Replace %p with %px */
 3909		if (p[-1] == '%') {
 3910			if (p[0] == '%') {
 3911				*q++ = *p++;
 3912			} else if (p[0] == 'p' && !isalnum(p[1])) {
 3913				*q++ = *p++;
 3914				*q++ = 'x';
 3915			}
 3916		}
 3917	}
 3918	*q = '\0';
 3919
 3920	return new_fmt;
 3921}
 3922
 3923#define STATIC_TEMP_BUF_SIZE	128
 3924static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
 3925
 3926/* Find the next real entry, without updating the iterator itself */
 3927struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
 3928					  int *ent_cpu, u64 *ent_ts)
 3929{
 3930	/* __find_next_entry will reset ent_size */
 3931	int ent_size = iter->ent_size;
 3932	struct trace_entry *entry;
 3933
 3934	/*
 3935	 * If called from ftrace_dump(), then the iter->temp buffer
 3936	 * will be the static_temp_buf and not created from kmalloc.
 3937	 * If the entry size is greater than the buffer, we can
 3938	 * not save it. Just return NULL in that case. This is only
 3939	 * used to add markers when two consecutive events' time
 3940	 * stamps have a large delta. See trace_print_lat_context()
 3941	 */
 3942	if (iter->temp == static_temp_buf &&
 3943	    STATIC_TEMP_BUF_SIZE < ent_size)
 3944		return NULL;
 3945
 3946	/*
 3947	 * The __find_next_entry() may call peek_next_entry(), which may
 3948	 * call ring_buffer_peek() that may make the contents of iter->ent
 3949	 * undefined. Need to copy iter->ent now.
 3950	 */
 3951	if (iter->ent && iter->ent != iter->temp) {
 3952		if ((!iter->temp || iter->temp_size < iter->ent_size) &&
 3953		    !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
 3954			void *temp;
 3955			temp = kmalloc(iter->ent_size, GFP_KERNEL);
 3956			if (!temp)
 3957				return NULL;
 3958			kfree(iter->temp);
 3959			iter->temp = temp;
 3960			iter->temp_size = iter->ent_size;
 3961		}
 3962		memcpy(iter->temp, iter->ent, iter->ent_size);
 3963		iter->ent = iter->temp;
 3964	}
 3965	entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
 3966	/* Put back the original ent_size */
 3967	iter->ent_size = ent_size;
 3968
 3969	return entry;
 3970}
 3971
 3972/* Find the next real entry, and increment the iterator to the next entry */
 3973void *trace_find_next_entry_inc(struct trace_iterator *iter)
 3974{
 3975	iter->ent = __find_next_entry(iter, &iter->cpu,
 3976				      &iter->lost_events, &iter->ts);
 3977
 3978	if (iter->ent)
 3979		trace_iterator_increment(iter);
 3980
 3981	return iter->ent ? iter : NULL;
 3982}
 3983
 3984static void trace_consume(struct trace_iterator *iter)
 3985{
 3986	ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
 3987			    &iter->lost_events);
 3988}
 3989
 3990static void *s_next(struct seq_file *m, void *v, loff_t *pos)
 3991{
 3992	struct trace_iterator *iter = m->private;
 3993	int i = (int)*pos;
 3994	void *ent;
 3995
 3996	WARN_ON_ONCE(iter->leftover);
 3997
 3998	(*pos)++;
 3999
 4000	/* can't go backwards */
 4001	if (iter->idx > i)
 4002		return NULL;
 4003
 4004	if (iter->idx < 0)
 4005		ent = trace_find_next_entry_inc(iter);
 4006	else
 4007		ent = iter;
 4008
 4009	while (ent && iter->idx < i)
 4010		ent = trace_find_next_entry_inc(iter);
 4011
 4012	iter->pos = *pos;
 4013
 4014	return ent;
 4015}
 4016
 4017void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 4018{
 
 4019	struct ring_buffer_iter *buf_iter;
 4020	unsigned long entries = 0;
 4021	u64 ts;
 4022
 4023	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
 4024
 4025	buf_iter = trace_buffer_iter(iter, cpu);
 4026	if (!buf_iter)
 4027		return;
 4028
 4029	ring_buffer_iter_reset(buf_iter);
 4030
 4031	/*
 4032	 * We could have the case with the max latency tracers
 4033	 * that a reset never took place on a cpu. This is evident
 4034	 * by the timestamp being before the start of the buffer.
 4035	 */
 4036	while (ring_buffer_iter_peek(buf_iter, &ts)) {
 4037		if (ts >= iter->array_buffer->time_start)
 4038			break;
 4039		entries++;
 4040		ring_buffer_iter_advance(buf_iter);
 4041	}
 4042
 4043	per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
 4044}
 4045
 4046/*
 4047 * The current tracer is copied to avoid a global locking
 4048 * all around.
 4049 */
 4050static void *s_start(struct seq_file *m, loff_t *pos)
 4051{
 4052	struct trace_iterator *iter = m->private;
 4053	struct trace_array *tr = iter->tr;
 4054	int cpu_file = iter->cpu_file;
 4055	void *p = NULL;
 4056	loff_t l = 0;
 4057	int cpu;
 4058
 4059	/*
 4060	 * copy the tracer to avoid using a global lock all around.
 4061	 * iter->trace is a copy of current_trace, the pointer to the
 4062	 * name may be used instead of a strcmp(), as iter->trace->name
 4063	 * will point to the same string as current_trace->name.
 4064	 */
 4065	mutex_lock(&trace_types_lock);
 4066	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
 4067		*iter->trace = *tr->current_trace;
 4068	mutex_unlock(&trace_types_lock);
 4069
 4070#ifdef CONFIG_TRACER_MAX_TRACE
 4071	if (iter->snapshot && iter->trace->use_max_tr)
 4072		return ERR_PTR(-EBUSY);
 4073#endif
 4074
 
 
 
 4075	if (*pos != iter->pos) {
 4076		iter->ent = NULL;
 4077		iter->cpu = 0;
 4078		iter->idx = -1;
 4079
 4080		if (cpu_file == RING_BUFFER_ALL_CPUS) {
 4081			for_each_tracing_cpu(cpu)
 4082				tracing_iter_reset(iter, cpu);
 4083		} else
 4084			tracing_iter_reset(iter, cpu_file);
 4085
 4086		iter->leftover = 0;
 4087		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
 4088			;
 4089
 4090	} else {
 4091		/*
 4092		 * If we overflowed the seq_file before, then we want
 4093		 * to just reuse the trace_seq buffer again.
 4094		 */
 4095		if (iter->leftover)
 4096			p = iter;
 4097		else {
 4098			l = *pos - 1;
 4099			p = s_next(m, p, &l);
 4100		}
 4101	}
 4102
 4103	trace_event_read_lock();
 4104	trace_access_lock(cpu_file);
 4105	return p;
 4106}
 4107
 4108static void s_stop(struct seq_file *m, void *p)
 4109{
 4110	struct trace_iterator *iter = m->private;
 4111
 4112#ifdef CONFIG_TRACER_MAX_TRACE
 4113	if (iter->snapshot && iter->trace->use_max_tr)
 4114		return;
 4115#endif
 4116
 
 
 
 4117	trace_access_unlock(iter->cpu_file);
 4118	trace_event_read_unlock();
 4119}
 4120
 4121static void
 4122get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
 4123		      unsigned long *entries, int cpu)
 4124{
 4125	unsigned long count;
 4126
 4127	count = ring_buffer_entries_cpu(buf->buffer, cpu);
 4128	/*
 4129	 * If this buffer has skipped entries, then we hold all
 4130	 * entries for the trace and we need to ignore the
 4131	 * ones before the time stamp.
 4132	 */
 4133	if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
 4134		count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
 4135		/* total is the same as the entries */
 4136		*total = count;
 4137	} else
 4138		*total = count +
 4139			ring_buffer_overrun_cpu(buf->buffer, cpu);
 4140	*entries = count;
 4141}
 4142
 4143static void
 4144get_total_entries(struct array_buffer *buf,
 4145		  unsigned long *total, unsigned long *entries)
 4146{
 4147	unsigned long t, e;
 4148	int cpu;
 4149
 4150	*total = 0;
 4151	*entries = 0;
 4152
 4153	for_each_tracing_cpu(cpu) {
 4154		get_total_entries_cpu(buf, &t, &e, cpu);
 4155		*total += t;
 4156		*entries += e;
 
 
 
 
 
 
 
 
 
 
 
 4157	}
 4158}
 4159
 4160unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
 4161{
 4162	unsigned long total, entries;
 4163
 4164	if (!tr)
 4165		tr = &global_trace;
 4166
 4167	get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
 4168
 4169	return entries;
 4170}
 4171
 4172unsigned long trace_total_entries(struct trace_array *tr)
 4173{
 4174	unsigned long total, entries;
 4175
 4176	if (!tr)
 4177		tr = &global_trace;
 4178
 4179	get_total_entries(&tr->array_buffer, &total, &entries);
 4180
 4181	return entries;
 4182}
 4183
 4184static void print_lat_help_header(struct seq_file *m)
 4185{
 4186	seq_puts(m, "#                    _------=> CPU#            \n"
 4187		    "#                   / _-----=> irqs-off        \n"
 4188		    "#                  | / _----=> need-resched    \n"
 4189		    "#                  || / _---=> hardirq/softirq \n"
 4190		    "#                  ||| / _--=> preempt-depth   \n"
 4191		    "#                  |||| /     delay            \n"
 4192		    "#  cmd     pid     ||||| time  |   caller      \n"
 4193		    "#     \\   /        |||||  \\    |   /         \n");
 4194}
 4195
 4196static void print_event_info(struct array_buffer *buf, struct seq_file *m)
 4197{
 4198	unsigned long total;
 4199	unsigned long entries;
 4200
 4201	get_total_entries(buf, &total, &entries);
 4202	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
 4203		   entries, total, num_online_cpus());
 4204	seq_puts(m, "#\n");
 4205}
 4206
 4207static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
 4208				   unsigned int flags)
 4209{
 4210	bool tgid = flags & TRACE_ITER_RECORD_TGID;
 4211
 4212	print_event_info(buf, m);
 4213
 4214	seq_printf(m, "#           TASK-PID    %s CPU#     TIMESTAMP  FUNCTION\n", tgid ? "   TGID   " : "");
 4215	seq_printf(m, "#              | |      %s   |         |         |\n",      tgid ? "     |    " : "");
 4216}
 4217
 4218static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
 4219				       unsigned int flags)
 4220{
 4221	bool tgid = flags & TRACE_ITER_RECORD_TGID;
 4222	const char *space = "            ";
 4223	int prec = tgid ? 12 : 2;
 4224
 4225	print_event_info(buf, m);
 4226
 4227	seq_printf(m, "#                            %.*s  _-----=> irqs-off\n", prec, space);
 4228	seq_printf(m, "#                            %.*s / _----=> need-resched\n", prec, space);
 4229	seq_printf(m, "#                            %.*s| / _---=> hardirq/softirq\n", prec, space);
 4230	seq_printf(m, "#                            %.*s|| / _--=> preempt-depth\n", prec, space);
 4231	seq_printf(m, "#                            %.*s||| /     delay\n", prec, space);
 4232	seq_printf(m, "#           TASK-PID  %.*s CPU#  ||||   TIMESTAMP  FUNCTION\n", prec, "     TGID   ");
 4233	seq_printf(m, "#              | |    %.*s   |   ||||      |         |\n", prec, "       |    ");
 4234}
 4235
 4236void
 4237print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 4238{
 4239	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
 4240	struct array_buffer *buf = iter->array_buffer;
 4241	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
 4242	struct tracer *type = iter->trace;
 4243	unsigned long entries;
 4244	unsigned long total;
 4245	const char *name = "preemption";
 4246
 4247	name = type->name;
 4248
 4249	get_total_entries(buf, &total, &entries);
 4250
 4251	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
 4252		   name, UTS_RELEASE);
 4253	seq_puts(m, "# -----------------------------------"
 4254		 "---------------------------------\n");
 4255	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
 4256		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
 4257		   nsecs_to_usecs(data->saved_latency),
 4258		   entries,
 4259		   total,
 4260		   buf->cpu,
 4261#if defined(CONFIG_PREEMPT_NONE)
 4262		   "server",
 4263#elif defined(CONFIG_PREEMPT_VOLUNTARY)
 4264		   "desktop",
 4265#elif defined(CONFIG_PREEMPT)
 4266		   "preempt",
 4267#elif defined(CONFIG_PREEMPT_RT)
 4268		   "preempt_rt",
 4269#else
 4270		   "unknown",
 4271#endif
 4272		   /* These are reserved for later use */
 4273		   0, 0, 0, 0);
 4274#ifdef CONFIG_SMP
 4275	seq_printf(m, " #P:%d)\n", num_online_cpus());
 4276#else
 4277	seq_puts(m, ")\n");
 4278#endif
 4279	seq_puts(m, "#    -----------------\n");
 4280	seq_printf(m, "#    | task: %.16s-%d "
 4281		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
 4282		   data->comm, data->pid,
 4283		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
 4284		   data->policy, data->rt_priority);
 4285	seq_puts(m, "#    -----------------\n");
 4286
 4287	if (data->critical_start) {
 4288		seq_puts(m, "#  => started at: ");
 4289		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
 4290		trace_print_seq(m, &iter->seq);
 4291		seq_puts(m, "\n#  => ended at:   ");
 4292		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
 4293		trace_print_seq(m, &iter->seq);
 4294		seq_puts(m, "\n#\n");
 4295	}
 4296
 4297	seq_puts(m, "#\n");
 4298}
 4299
 4300static void test_cpu_buff_start(struct trace_iterator *iter)
 4301{
 4302	struct trace_seq *s = &iter->seq;
 4303	struct trace_array *tr = iter->tr;
 4304
 4305	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
 4306		return;
 4307
 4308	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
 4309		return;
 4310
 4311	if (cpumask_available(iter->started) &&
 4312	    cpumask_test_cpu(iter->cpu, iter->started))
 4313		return;
 4314
 4315	if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
 4316		return;
 4317
 4318	if (cpumask_available(iter->started))
 4319		cpumask_set_cpu(iter->cpu, iter->started);
 4320
 4321	/* Don't print started cpu buffer for the first entry of the trace */
 4322	if (iter->idx > 1)
 4323		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
 4324				iter->cpu);
 4325}
 4326
 4327static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
 4328{
 4329	struct trace_array *tr = iter->tr;
 4330	struct trace_seq *s = &iter->seq;
 4331	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
 4332	struct trace_entry *entry;
 4333	struct trace_event *event;
 4334
 4335	entry = iter->ent;
 4336
 4337	test_cpu_buff_start(iter);
 4338
 4339	event = ftrace_find_event(entry->type);
 4340
 4341	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 4342		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
 4343			trace_print_lat_context(iter);
 4344		else
 4345			trace_print_context(iter);
 
 
 
 4346	}
 4347
 4348	if (trace_seq_has_overflowed(s))
 4349		return TRACE_TYPE_PARTIAL_LINE;
 4350
 4351	if (event)
 4352		return event->funcs->trace(iter, sym_flags, event);
 4353
 4354	trace_seq_printf(s, "Unknown type %d\n", entry->type);
 
 4355
 4356	return trace_handle_return(s);
 
 
 4357}
 4358
 4359static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
 4360{
 4361	struct trace_array *tr = iter->tr;
 4362	struct trace_seq *s = &iter->seq;
 4363	struct trace_entry *entry;
 4364	struct trace_event *event;
 4365
 4366	entry = iter->ent;
 4367
 4368	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
 4369		trace_seq_printf(s, "%d %d %llu ",
 4370				 entry->pid, iter->cpu, iter->ts);
 4371
 4372	if (trace_seq_has_overflowed(s))
 4373		return TRACE_TYPE_PARTIAL_LINE;
 4374
 4375	event = ftrace_find_event(entry->type);
 4376	if (event)
 4377		return event->funcs->raw(iter, 0, event);
 4378
 4379	trace_seq_printf(s, "%d ?\n", entry->type);
 
 4380
 4381	return trace_handle_return(s);
 
 
 4382}
 4383
 4384static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
 4385{
 4386	struct trace_array *tr = iter->tr;
 4387	struct trace_seq *s = &iter->seq;
 4388	unsigned char newline = '\n';
 4389	struct trace_entry *entry;
 4390	struct trace_event *event;
 4391
 4392	entry = iter->ent;
 4393
 4394	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 4395		SEQ_PUT_HEX_FIELD(s, entry->pid);
 4396		SEQ_PUT_HEX_FIELD(s, iter->cpu);
 4397		SEQ_PUT_HEX_FIELD(s, iter->ts);
 4398		if (trace_seq_has_overflowed(s))
 4399			return TRACE_TYPE_PARTIAL_LINE;
 4400	}
 4401
 4402	event = ftrace_find_event(entry->type);
 4403	if (event) {
 4404		enum print_line_t ret = event->funcs->hex(iter, 0, event);
 4405		if (ret != TRACE_TYPE_HANDLED)
 4406			return ret;
 4407	}
 4408
 4409	SEQ_PUT_FIELD(s, newline);
 4410
 4411	return trace_handle_return(s);
 4412}
 4413
 4414static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
 4415{
 4416	struct trace_array *tr = iter->tr;
 4417	struct trace_seq *s = &iter->seq;
 4418	struct trace_entry *entry;
 4419	struct trace_event *event;
 4420
 4421	entry = iter->ent;
 4422
 4423	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 4424		SEQ_PUT_FIELD(s, entry->pid);
 4425		SEQ_PUT_FIELD(s, iter->cpu);
 4426		SEQ_PUT_FIELD(s, iter->ts);
 4427		if (trace_seq_has_overflowed(s))
 4428			return TRACE_TYPE_PARTIAL_LINE;
 4429	}
 4430
 4431	event = ftrace_find_event(entry->type);
 4432	return event ? event->funcs->binary(iter, 0, event) :
 4433		TRACE_TYPE_HANDLED;
 4434}
 4435
 4436int trace_empty(struct trace_iterator *iter)
 4437{
 4438	struct ring_buffer_iter *buf_iter;
 4439	int cpu;
 4440
 4441	/* If we are looking at one CPU buffer, only check that one */
 4442	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
 4443		cpu = iter->cpu_file;
 4444		buf_iter = trace_buffer_iter(iter, cpu);
 4445		if (buf_iter) {
 4446			if (!ring_buffer_iter_empty(buf_iter))
 4447				return 0;
 4448		} else {
 4449			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
 4450				return 0;
 4451		}
 4452		return 1;
 4453	}
 4454
 4455	for_each_tracing_cpu(cpu) {
 4456		buf_iter = trace_buffer_iter(iter, cpu);
 4457		if (buf_iter) {
 4458			if (!ring_buffer_iter_empty(buf_iter))
 4459				return 0;
 4460		} else {
 4461			if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
 4462				return 0;
 4463		}
 4464	}
 4465
 4466	return 1;
 4467}
 4468
 4469/*  Called with trace_event_read_lock() held. */
 4470enum print_line_t print_trace_line(struct trace_iterator *iter)
 4471{
 4472	struct trace_array *tr = iter->tr;
 4473	unsigned long trace_flags = tr->trace_flags;
 4474	enum print_line_t ret;
 4475
 4476	if (iter->lost_events) {
 4477		if (iter->lost_events == (unsigned long)-1)
 4478			trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
 4479					 iter->cpu);
 4480		else
 4481			trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
 4482					 iter->cpu, iter->lost_events);
 4483		if (trace_seq_has_overflowed(&iter->seq))
 4484			return TRACE_TYPE_PARTIAL_LINE;
 4485	}
 4486
 4487	if (iter->trace && iter->trace->print_line) {
 4488		ret = iter->trace->print_line(iter);
 4489		if (ret != TRACE_TYPE_UNHANDLED)
 4490			return ret;
 4491	}
 4492
 4493	if (iter->ent->type == TRACE_BPUTS &&
 4494			trace_flags & TRACE_ITER_PRINTK &&
 4495			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
 4496		return trace_print_bputs_msg_only(iter);
 4497
 4498	if (iter->ent->type == TRACE_BPRINT &&
 4499			trace_flags & TRACE_ITER_PRINTK &&
 4500			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
 4501		return trace_print_bprintk_msg_only(iter);
 4502
 4503	if (iter->ent->type == TRACE_PRINT &&
 4504			trace_flags & TRACE_ITER_PRINTK &&
 4505			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
 4506		return trace_print_printk_msg_only(iter);
 4507
 4508	if (trace_flags & TRACE_ITER_BIN)
 4509		return print_bin_fmt(iter);
 4510
 4511	if (trace_flags & TRACE_ITER_HEX)
 4512		return print_hex_fmt(iter);
 4513
 4514	if (trace_flags & TRACE_ITER_RAW)
 4515		return print_raw_fmt(iter);
 4516
 4517	return print_trace_fmt(iter);
 4518}
 4519
 4520void trace_latency_header(struct seq_file *m)
 4521{
 4522	struct trace_iterator *iter = m->private;
 4523	struct trace_array *tr = iter->tr;
 4524
 4525	/* print nothing if the buffers are empty */
 4526	if (trace_empty(iter))
 4527		return;
 4528
 4529	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
 4530		print_trace_header(m, iter);
 4531
 4532	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
 4533		print_lat_help_header(m);
 4534}
 4535
 4536void trace_default_header(struct seq_file *m)
 4537{
 4538	struct trace_iterator *iter = m->private;
 4539	struct trace_array *tr = iter->tr;
 4540	unsigned long trace_flags = tr->trace_flags;
 4541
 4542	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
 4543		return;
 4544
 4545	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
 4546		/* print nothing if the buffers are empty */
 4547		if (trace_empty(iter))
 4548			return;
 4549		print_trace_header(m, iter);
 4550		if (!(trace_flags & TRACE_ITER_VERBOSE))
 4551			print_lat_help_header(m);
 4552	} else {
 4553		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
 4554			if (trace_flags & TRACE_ITER_IRQ_INFO)
 4555				print_func_help_header_irq(iter->array_buffer,
 4556							   m, trace_flags);
 4557			else
 4558				print_func_help_header(iter->array_buffer, m,
 4559						       trace_flags);
 4560		}
 4561	}
 4562}
 4563
 4564static void test_ftrace_alive(struct seq_file *m)
 4565{
 4566	if (!ftrace_is_dead())
 4567		return;
 4568	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
 4569		    "#          MAY BE MISSING FUNCTION EVENTS\n");
 4570}
 4571
 4572#ifdef CONFIG_TRACER_MAX_TRACE
 4573static void show_snapshot_main_help(struct seq_file *m)
 4574{
 4575	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
 4576		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
 4577		    "#                      Takes a snapshot of the main buffer.\n"
 4578		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
 4579		    "#                      (Doesn't have to be '2' works with any number that\n"
 4580		    "#                       is not a '0' or '1')\n");
 4581}
 4582
 4583static void show_snapshot_percpu_help(struct seq_file *m)
 4584{
 4585	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
 4586#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
 4587	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
 4588		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
 4589#else
 4590	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
 4591		    "#                     Must use main snapshot file to allocate.\n");
 4592#endif
 4593	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
 4594		    "#                      (Doesn't have to be '2' works with any number that\n"
 4595		    "#                       is not a '0' or '1')\n");
 4596}
 4597
 4598static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
 4599{
 4600	if (iter->tr->allocated_snapshot)
 4601		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
 4602	else
 4603		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
 4604
 4605	seq_puts(m, "# Snapshot commands:\n");
 4606	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 4607		show_snapshot_main_help(m);
 4608	else
 4609		show_snapshot_percpu_help(m);
 4610}
 4611#else
 4612/* Should never be called */
 4613static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
 4614#endif
 4615
 4616static int s_show(struct seq_file *m, void *v)
 4617{
 4618	struct trace_iterator *iter = v;
 4619	int ret;
 4620
 4621	if (iter->ent == NULL) {
 4622		if (iter->tr) {
 4623			seq_printf(m, "# tracer: %s\n", iter->trace->name);
 4624			seq_puts(m, "#\n");
 4625			test_ftrace_alive(m);
 4626		}
 4627		if (iter->snapshot && trace_empty(iter))
 4628			print_snapshot_help(m, iter);
 4629		else if (iter->trace && iter->trace->print_header)
 4630			iter->trace->print_header(m);
 4631		else
 4632			trace_default_header(m);
 4633
 4634	} else if (iter->leftover) {
 4635		/*
 4636		 * If we filled the seq_file buffer earlier, we
 4637		 * want to just show it now.
 4638		 */
 4639		ret = trace_print_seq(m, &iter->seq);
 4640
 4641		/* ret should this time be zero, but you never know */
 4642		iter->leftover = ret;
 4643
 4644	} else {
 4645		print_trace_line(iter);
 4646		ret = trace_print_seq(m, &iter->seq);
 4647		/*
 4648		 * If we overflow the seq_file buffer, then it will
 4649		 * ask us for this data again at start up.
 4650		 * Use that instead.
 4651		 *  ret is 0 if seq_file write succeeded.
 4652		 *        -1 otherwise.
 4653		 */
 4654		iter->leftover = ret;
 4655	}
 4656
 4657	return 0;
 4658}
 4659
 4660/*
 4661 * Should be used after trace_array_get(), trace_types_lock
 4662 * ensures that i_cdev was already initialized.
 4663 */
 4664static inline int tracing_get_cpu(struct inode *inode)
 4665{
 4666	if (inode->i_cdev) /* See trace_create_cpu_file() */
 4667		return (long)inode->i_cdev - 1;
 4668	return RING_BUFFER_ALL_CPUS;
 4669}
 4670
 4671static const struct seq_operations tracer_seq_ops = {
 4672	.start		= s_start,
 4673	.next		= s_next,
 4674	.stop		= s_stop,
 4675	.show		= s_show,
 4676};
 4677
 4678static struct trace_iterator *
 4679__tracing_open(struct inode *inode, struct file *file, bool snapshot)
 4680{
 4681	struct trace_array *tr = inode->i_private;
 4682	struct trace_iterator *iter;
 4683	int cpu;
 4684
 4685	if (tracing_disabled)
 4686		return ERR_PTR(-ENODEV);
 4687
 4688	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
 4689	if (!iter)
 4690		return ERR_PTR(-ENOMEM);
 4691
 4692	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
 4693				    GFP_KERNEL);
 4694	if (!iter->buffer_iter)
 4695		goto release;
 4696
 4697	/*
 4698	 * trace_find_next_entry() may need to save off iter->ent.
 4699	 * It will place it into the iter->temp buffer. As most
 4700	 * events are less than 128, allocate a buffer of that size.
 4701	 * If one is greater, then trace_find_next_entry() will
 4702	 * allocate a new buffer to adjust for the bigger iter->ent.
 4703	 * It's not critical if it fails to get allocated here.
 4704	 */
 4705	iter->temp = kmalloc(128, GFP_KERNEL);
 4706	if (iter->temp)
 4707		iter->temp_size = 128;
 4708
 4709	/*
 4710	 * trace_event_printf() may need to modify given format
 4711	 * string to replace %p with %px so that it shows real address
 4712	 * instead of hash value. However, that is only for the event
 4713	 * tracing, other tracer may not need. Defer the allocation
 4714	 * until it is needed.
 4715	 */
 4716	iter->fmt = NULL;
 4717	iter->fmt_size = 0;
 4718
 4719	/*
 4720	 * We make a copy of the current tracer to avoid concurrent
 4721	 * changes on it while we are reading.
 4722	 */
 4723	mutex_lock(&trace_types_lock);
 4724	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
 4725	if (!iter->trace)
 4726		goto fail;
 4727
 4728	*iter->trace = *tr->current_trace;
 4729
 4730	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
 4731		goto fail;
 4732
 4733	iter->tr = tr;
 4734
 4735#ifdef CONFIG_TRACER_MAX_TRACE
 4736	/* Currently only the top directory has a snapshot */
 4737	if (tr->current_trace->print_max || snapshot)
 4738		iter->array_buffer = &tr->max_buffer;
 4739	else
 4740#endif
 4741		iter->array_buffer = &tr->array_buffer;
 4742	iter->snapshot = snapshot;
 4743	iter->pos = -1;
 4744	iter->cpu_file = tracing_get_cpu(inode);
 4745	mutex_init(&iter->mutex);
 4746
 4747	/* Notify the tracer early; before we stop tracing. */
 4748	if (iter->trace->open)
 4749		iter->trace->open(iter);
 4750
 4751	/* Annotate start of buffers if we had overruns */
 4752	if (ring_buffer_overruns(iter->array_buffer->buffer))
 4753		iter->iter_flags |= TRACE_FILE_ANNOTATE;
 4754
 4755	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
 4756	if (trace_clocks[tr->clock_id].in_ns)
 4757		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 4758
 4759	/*
 4760	 * If pause-on-trace is enabled, then stop the trace while
 4761	 * dumping, unless this is the "snapshot" file
 4762	 */
 4763	if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
 4764		tracing_stop_tr(tr);
 4765
 4766	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
 4767		for_each_tracing_cpu(cpu) {
 4768			iter->buffer_iter[cpu] =
 4769				ring_buffer_read_prepare(iter->array_buffer->buffer,
 4770							 cpu, GFP_KERNEL);
 4771		}
 4772		ring_buffer_read_prepare_sync();
 4773		for_each_tracing_cpu(cpu) {
 4774			ring_buffer_read_start(iter->buffer_iter[cpu]);
 4775			tracing_iter_reset(iter, cpu);
 4776		}
 4777	} else {
 4778		cpu = iter->cpu_file;
 4779		iter->buffer_iter[cpu] =
 4780			ring_buffer_read_prepare(iter->array_buffer->buffer,
 4781						 cpu, GFP_KERNEL);
 4782		ring_buffer_read_prepare_sync();
 4783		ring_buffer_read_start(iter->buffer_iter[cpu]);
 4784		tracing_iter_reset(iter, cpu);
 4785	}
 4786
 4787	mutex_unlock(&trace_types_lock);
 4788
 4789	return iter;
 4790
 4791 fail:
 4792	mutex_unlock(&trace_types_lock);
 4793	kfree(iter->trace);
 4794	kfree(iter->temp);
 4795	kfree(iter->buffer_iter);
 4796release:
 4797	seq_release_private(inode, file);
 4798	return ERR_PTR(-ENOMEM);
 4799}
 4800
 4801int tracing_open_generic(struct inode *inode, struct file *filp)
 4802{
 4803	int ret;
 4804
 4805	ret = tracing_check_open_get_tr(NULL);
 4806	if (ret)
 4807		return ret;
 4808
 4809	filp->private_data = inode->i_private;
 4810	return 0;
 4811}
 4812
 4813bool tracing_is_disabled(void)
 4814{
 4815	return (tracing_disabled) ? true: false;
 4816}
 4817
 4818/*
 4819 * Open and update trace_array ref count.
 4820 * Must have the current trace_array passed to it.
 4821 */
 4822int tracing_open_generic_tr(struct inode *inode, struct file *filp)
 4823{
 4824	struct trace_array *tr = inode->i_private;
 4825	int ret;
 4826
 4827	ret = tracing_check_open_get_tr(tr);
 4828	if (ret)
 4829		return ret;
 
 
 4830
 4831	filp->private_data = inode->i_private;
 4832
 4833	return 0;
 4834}
 4835
 4836static int tracing_release(struct inode *inode, struct file *file)
 4837{
 4838	struct trace_array *tr = inode->i_private;
 4839	struct seq_file *m = file->private_data;
 4840	struct trace_iterator *iter;
 4841	int cpu;
 4842
 4843	if (!(file->f_mode & FMODE_READ)) {
 4844		trace_array_put(tr);
 4845		return 0;
 4846	}
 4847
 4848	/* Writes do not use seq_file */
 4849	iter = m->private;
 4850	mutex_lock(&trace_types_lock);
 4851
 4852	for_each_tracing_cpu(cpu) {
 4853		if (iter->buffer_iter[cpu])
 4854			ring_buffer_read_finish(iter->buffer_iter[cpu]);
 4855	}
 4856
 4857	if (iter->trace && iter->trace->close)
 4858		iter->trace->close(iter);
 4859
 4860	if (!iter->snapshot && tr->stop_count)
 4861		/* reenable tracing if it was previously enabled */
 4862		tracing_start_tr(tr);
 4863
 4864	__trace_array_put(tr);
 4865
 4866	mutex_unlock(&trace_types_lock);
 4867
 4868	mutex_destroy(&iter->mutex);
 4869	free_cpumask_var(iter->started);
 4870	kfree(iter->fmt);
 4871	kfree(iter->temp);
 4872	kfree(iter->trace);
 4873	kfree(iter->buffer_iter);
 4874	seq_release_private(inode, file);
 4875
 4876	return 0;
 4877}
 4878
 4879static int tracing_release_generic_tr(struct inode *inode, struct file *file)
 4880{
 4881	struct trace_array *tr = inode->i_private;
 4882
 4883	trace_array_put(tr);
 4884	return 0;
 4885}
 4886
 4887static int tracing_single_release_tr(struct inode *inode, struct file *file)
 4888{
 4889	struct trace_array *tr = inode->i_private;
 4890
 4891	trace_array_put(tr);
 4892
 4893	return single_release(inode, file);
 4894}
 4895
 4896static int tracing_open(struct inode *inode, struct file *file)
 4897{
 4898	struct trace_array *tr = inode->i_private;
 4899	struct trace_iterator *iter;
 4900	int ret;
 4901
 4902	ret = tracing_check_open_get_tr(tr);
 4903	if (ret)
 4904		return ret;
 4905
 4906	/* If this file was open for write, then erase contents */
 4907	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 4908		int cpu = tracing_get_cpu(inode);
 4909		struct array_buffer *trace_buf = &tr->array_buffer;
 4910
 4911#ifdef CONFIG_TRACER_MAX_TRACE
 4912		if (tr->current_trace->print_max)
 4913			trace_buf = &tr->max_buffer;
 4914#endif
 4915
 4916		if (cpu == RING_BUFFER_ALL_CPUS)
 4917			tracing_reset_online_cpus(trace_buf);
 4918		else
 4919			tracing_reset_cpu(trace_buf, cpu);
 4920	}
 4921
 4922	if (file->f_mode & FMODE_READ) {
 4923		iter = __tracing_open(inode, file, false);
 4924		if (IS_ERR(iter))
 4925			ret = PTR_ERR(iter);
 4926		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 4927			iter->iter_flags |= TRACE_FILE_LAT_FMT;
 4928	}
 4929
 4930	if (ret < 0)
 4931		trace_array_put(tr);
 4932
 4933	return ret;
 4934}
 4935
 4936/*
 4937 * Some tracers are not suitable for instance buffers.
 4938 * A tracer is always available for the global array (toplevel)
 4939 * or if it explicitly states that it is.
 4940 */
 4941static bool
 4942trace_ok_for_array(struct tracer *t, struct trace_array *tr)
 4943{
 4944	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
 4945}
 4946
 4947/* Find the next tracer that this trace array may use */
 4948static struct tracer *
 4949get_tracer_for_array(struct trace_array *tr, struct tracer *t)
 4950{
 4951	while (t && !trace_ok_for_array(t, tr))
 4952		t = t->next;
 4953
 4954	return t;
 4955}
 4956
 4957static void *
 4958t_next(struct seq_file *m, void *v, loff_t *pos)
 4959{
 4960	struct trace_array *tr = m->private;
 4961	struct tracer *t = v;
 4962
 4963	(*pos)++;
 4964
 4965	if (t)
 4966		t = get_tracer_for_array(tr, t->next);
 4967
 4968	return t;
 4969}
 4970
 4971static void *t_start(struct seq_file *m, loff_t *pos)
 4972{
 4973	struct trace_array *tr = m->private;
 4974	struct tracer *t;
 4975	loff_t l = 0;
 4976
 4977	mutex_lock(&trace_types_lock);
 4978
 4979	t = get_tracer_for_array(tr, trace_types);
 4980	for (; t && l < *pos; t = t_next(m, t, &l))
 4981			;
 4982
 4983	return t;
 4984}
 4985
 4986static void t_stop(struct seq_file *m, void *p)
 4987{
 4988	mutex_unlock(&trace_types_lock);
 4989}
 4990
 4991static int t_show(struct seq_file *m, void *v)
 4992{
 4993	struct tracer *t = v;
 4994
 4995	if (!t)
 4996		return 0;
 4997
 4998	seq_puts(m, t->name);
 4999	if (t->next)
 5000		seq_putc(m, ' ');
 5001	else
 5002		seq_putc(m, '\n');
 5003
 5004	return 0;
 5005}
 5006
 5007static const struct seq_operations show_traces_seq_ops = {
 5008	.start		= t_start,
 5009	.next		= t_next,
 5010	.stop		= t_stop,
 5011	.show		= t_show,
 5012};
 5013
 5014static int show_traces_open(struct inode *inode, struct file *file)
 5015{
 5016	struct trace_array *tr = inode->i_private;
 5017	struct seq_file *m;
 5018	int ret;
 5019
 5020	ret = tracing_check_open_get_tr(tr);
 5021	if (ret)
 5022		return ret;
 5023
 5024	ret = seq_open(file, &show_traces_seq_ops);
 5025	if (ret) {
 5026		trace_array_put(tr);
 5027		return ret;
 5028	}
 5029
 5030	m = file->private_data;
 5031	m->private = tr;
 5032
 5033	return 0;
 5034}
 5035
 5036static int show_traces_release(struct inode *inode, struct file *file)
 5037{
 5038	struct trace_array *tr = inode->i_private;
 5039
 5040	trace_array_put(tr);
 5041	return seq_release(inode, file);
 5042}
 5043
 5044static ssize_t
 5045tracing_write_stub(struct file *filp, const char __user *ubuf,
 5046		   size_t count, loff_t *ppos)
 5047{
 5048	return count;
 5049}
 5050
 5051loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
 5052{
 5053	int ret;
 5054
 5055	if (file->f_mode & FMODE_READ)
 5056		ret = seq_lseek(file, offset, whence);
 5057	else
 5058		file->f_pos = ret = 0;
 5059
 5060	return ret;
 5061}
 5062
 5063static const struct file_operations tracing_fops = {
 5064	.open		= tracing_open,
 5065	.read		= seq_read,
 5066	.write		= tracing_write_stub,
 5067	.llseek		= tracing_lseek,
 5068	.release	= tracing_release,
 5069};
 5070
 5071static const struct file_operations show_traces_fops = {
 5072	.open		= show_traces_open,
 5073	.read		= seq_read,
 
 5074	.llseek		= seq_lseek,
 5075	.release	= show_traces_release,
 5076};
 5077
 
 
 
 
 
 
 
 
 
 
 
 
 5078static ssize_t
 5079tracing_cpumask_read(struct file *filp, char __user *ubuf,
 5080		     size_t count, loff_t *ppos)
 5081{
 5082	struct trace_array *tr = file_inode(filp)->i_private;
 5083	char *mask_str;
 5084	int len;
 5085
 5086	len = snprintf(NULL, 0, "%*pb\n",
 5087		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
 5088	mask_str = kmalloc(len, GFP_KERNEL);
 5089	if (!mask_str)
 5090		return -ENOMEM;
 5091
 5092	len = snprintf(mask_str, len, "%*pb\n",
 5093		       cpumask_pr_args(tr->tracing_cpumask));
 5094	if (len >= count) {
 5095		count = -EINVAL;
 5096		goto out_err;
 5097	}
 5098	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
 
 5099
 5100out_err:
 5101	kfree(mask_str);
 5102
 5103	return count;
 5104}
 5105
 5106int tracing_set_cpumask(struct trace_array *tr,
 5107			cpumask_var_t tracing_cpumask_new)
 
 5108{
 5109	int cpu;
 
 
 
 
 
 
 
 
 
 5110
 5111	if (!tr)
 5112		return -EINVAL;
 5113
 5114	local_irq_disable();
 5115	arch_spin_lock(&tr->max_lock);
 5116	for_each_tracing_cpu(cpu) {
 5117		/*
 5118		 * Increase/decrease the disabled counter if we are
 5119		 * about to flip a bit in the cpumask:
 5120		 */
 5121		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 5122				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
 5123			atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
 5124			ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
 5125		}
 5126		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
 5127				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
 5128			atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
 5129			ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
 5130		}
 5131	}
 5132	arch_spin_unlock(&tr->max_lock);
 5133	local_irq_enable();
 5134
 5135	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
 5136
 5137	return 0;
 5138}
 5139
 5140static ssize_t
 5141tracing_cpumask_write(struct file *filp, const char __user *ubuf,
 5142		      size_t count, loff_t *ppos)
 5143{
 5144	struct trace_array *tr = file_inode(filp)->i_private;
 5145	cpumask_var_t tracing_cpumask_new;
 5146	int err;
 5147
 5148	if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
 5149		return -ENOMEM;
 5150
 5151	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
 5152	if (err)
 5153		goto err_free;
 5154
 5155	err = tracing_set_cpumask(tr, tracing_cpumask_new);
 5156	if (err)
 5157		goto err_free;
 5158
 5159	free_cpumask_var(tracing_cpumask_new);
 5160
 5161	return count;
 5162
 5163err_free:
 5164	free_cpumask_var(tracing_cpumask_new);
 5165
 5166	return err;
 5167}
 5168
 5169static const struct file_operations tracing_cpumask_fops = {
 5170	.open		= tracing_open_generic_tr,
 5171	.read		= tracing_cpumask_read,
 5172	.write		= tracing_cpumask_write,
 5173	.release	= tracing_release_generic_tr,
 5174	.llseek		= generic_file_llseek,
 5175};
 5176
 5177static int tracing_trace_options_show(struct seq_file *m, void *v)
 5178{
 5179	struct tracer_opt *trace_opts;
 5180	struct trace_array *tr = m->private;
 5181	u32 tracer_flags;
 5182	int i;
 5183
 5184	mutex_lock(&trace_types_lock);
 5185	tracer_flags = tr->current_trace->flags->val;
 5186	trace_opts = tr->current_trace->flags->opts;
 5187
 5188	for (i = 0; trace_options[i]; i++) {
 5189		if (tr->trace_flags & (1 << i))
 5190			seq_printf(m, "%s\n", trace_options[i]);
 5191		else
 5192			seq_printf(m, "no%s\n", trace_options[i]);
 5193	}
 5194
 5195	for (i = 0; trace_opts[i].name; i++) {
 5196		if (tracer_flags & trace_opts[i].bit)
 5197			seq_printf(m, "%s\n", trace_opts[i].name);
 5198		else
 5199			seq_printf(m, "no%s\n", trace_opts[i].name);
 5200	}
 5201	mutex_unlock(&trace_types_lock);
 5202
 5203	return 0;
 5204}
 5205
 5206static int __set_tracer_option(struct trace_array *tr,
 5207			       struct tracer_flags *tracer_flags,
 5208			       struct tracer_opt *opts, int neg)
 5209{
 5210	struct tracer *trace = tracer_flags->trace;
 5211	int ret;
 5212
 5213	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
 5214	if (ret)
 5215		return ret;
 5216
 5217	if (neg)
 5218		tracer_flags->val &= ~opts->bit;
 5219	else
 5220		tracer_flags->val |= opts->bit;
 5221	return 0;
 5222}
 5223
 5224/* Try to assign a tracer specific option */
 5225static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
 5226{
 5227	struct tracer *trace = tr->current_trace;
 5228	struct tracer_flags *tracer_flags = trace->flags;
 5229	struct tracer_opt *opts = NULL;
 5230	int i;
 5231
 5232	for (i = 0; tracer_flags->opts[i].name; i++) {
 5233		opts = &tracer_flags->opts[i];
 5234
 5235		if (strcmp(cmp, opts->name) == 0)
 5236			return __set_tracer_option(tr, trace->flags, opts, neg);
 5237	}
 5238
 5239	return -EINVAL;
 5240}
 5241
 5242/* Some tracers require overwrite to stay enabled */
 5243int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
 5244{
 5245	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
 5246		return -1;
 5247
 5248	return 0;
 5249}
 5250
 5251int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
 5252{
 5253	int *map;
 5254
 5255	if ((mask == TRACE_ITER_RECORD_TGID) ||
 5256	    (mask == TRACE_ITER_RECORD_CMD))
 5257		lockdep_assert_held(&event_mutex);
 5258
 5259	/* do nothing if flag is already set */
 5260	if (!!(tr->trace_flags & mask) == !!enabled)
 5261		return 0;
 5262
 5263	/* Give the tracer a chance to approve the change */
 5264	if (tr->current_trace->flag_changed)
 5265		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
 5266			return -EINVAL;
 5267
 5268	if (enabled)
 5269		tr->trace_flags |= mask;
 5270	else
 5271		tr->trace_flags &= ~mask;
 5272
 5273	if (mask == TRACE_ITER_RECORD_CMD)
 5274		trace_event_enable_cmd_record(enabled);
 5275
 5276	if (mask == TRACE_ITER_RECORD_TGID) {
 5277		if (!tgid_map) {
 5278			tgid_map_max = pid_max;
 5279			map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
 5280				       GFP_KERNEL);
 5281
 5282			/*
 5283			 * Pairs with smp_load_acquire() in
 5284			 * trace_find_tgid_ptr() to ensure that if it observes
 5285			 * the tgid_map we just allocated then it also observes
 5286			 * the corresponding tgid_map_max value.
 5287			 */
 5288			smp_store_release(&tgid_map, map);
 5289		}
 5290		if (!tgid_map) {
 5291			tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
 5292			return -ENOMEM;
 5293		}
 5294
 5295		trace_event_enable_tgid_record(enabled);
 5296	}
 5297
 5298	if (mask == TRACE_ITER_EVENT_FORK)
 5299		trace_event_follow_fork(tr, enabled);
 5300
 5301	if (mask == TRACE_ITER_FUNC_FORK)
 5302		ftrace_pid_follow_fork(tr, enabled);
 5303
 5304	if (mask == TRACE_ITER_OVERWRITE) {
 5305		ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
 5306#ifdef CONFIG_TRACER_MAX_TRACE
 5307		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
 5308#endif
 5309	}
 5310
 5311	if (mask == TRACE_ITER_PRINTK) {
 5312		trace_printk_start_stop_comm(enabled);
 5313		trace_printk_control(enabled);
 5314	}
 5315
 5316	return 0;
 5317}
 5318
 5319int trace_set_options(struct trace_array *tr, char *option)
 5320{
 5321	char *cmp;
 5322	int neg = 0;
 5323	int ret;
 5324	size_t orig_len = strlen(option);
 5325	int len;
 5326
 5327	cmp = strstrip(option);
 5328
 5329	len = str_has_prefix(cmp, "no");
 5330	if (len)
 5331		neg = 1;
 
 
 5332
 5333	cmp += len;
 5334
 5335	mutex_lock(&event_mutex);
 5336	mutex_lock(&trace_types_lock);
 
 
 
 
 5337
 5338	ret = match_string(trace_options, -1, cmp);
 5339	/* If no option could be set, test the specific tracer options */
 5340	if (ret < 0)
 5341		ret = set_tracer_option(tr, cmp, neg);
 5342	else
 5343		ret = set_tracer_flag(tr, 1 << ret, !neg);
 5344
 5345	mutex_unlock(&trace_types_lock);
 5346	mutex_unlock(&event_mutex);
 5347
 5348	/*
 5349	 * If the first trailing whitespace is replaced with '\0' by strstrip,
 5350	 * turn it back into a space.
 5351	 */
 5352	if (orig_len > strlen(option))
 5353		option[strlen(option)] = ' ';
 5354
 5355	return ret;
 5356}
 5357
 5358static void __init apply_trace_boot_options(void)
 5359{
 5360	char *buf = trace_boot_options_buf;
 5361	char *option;
 5362
 5363	while (true) {
 5364		option = strsep(&buf, ",");
 5365
 5366		if (!option)
 5367			break;
 5368
 5369		if (*option)
 5370			trace_set_options(&global_trace, option);
 5371
 5372		/* Put back the comma to allow this to be called again */
 5373		if (buf)
 5374			*(buf - 1) = ',';
 5375	}
 5376}
 5377
 5378static ssize_t
 5379tracing_trace_options_write(struct file *filp, const char __user *ubuf,
 5380			size_t cnt, loff_t *ppos)
 5381{
 5382	struct seq_file *m = filp->private_data;
 5383	struct trace_array *tr = m->private;
 5384	char buf[64];
 5385	int ret;
 5386
 5387	if (cnt >= sizeof(buf))
 5388		return -EINVAL;
 5389
 5390	if (copy_from_user(buf, ubuf, cnt))
 5391		return -EFAULT;
 5392
 5393	buf[cnt] = 0;
 5394
 5395	ret = trace_set_options(tr, buf);
 5396	if (ret < 0)
 5397		return ret;
 5398
 5399	*ppos += cnt;
 5400
 5401	return cnt;
 5402}
 5403
 5404static int tracing_trace_options_open(struct inode *inode, struct file *file)
 5405{
 5406	struct trace_array *tr = inode->i_private;
 5407	int ret;
 5408
 5409	ret = tracing_check_open_get_tr(tr);
 5410	if (ret)
 5411		return ret;
 
 
 5412
 5413	ret = single_open(file, tracing_trace_options_show, inode->i_private);
 5414	if (ret < 0)
 5415		trace_array_put(tr);
 5416
 5417	return ret;
 5418}
 5419
 5420static const struct file_operations tracing_iter_fops = {
 5421	.open		= tracing_trace_options_open,
 5422	.read		= seq_read,
 5423	.llseek		= seq_lseek,
 5424	.release	= tracing_single_release_tr,
 5425	.write		= tracing_trace_options_write,
 5426};
 5427
 5428static const char readme_msg[] =
 5429	"tracing mini-HOWTO:\n\n"
 5430	"# echo 0 > tracing_on : quick way to disable tracing\n"
 5431	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
 5432	" Important files:\n"
 5433	"  trace\t\t\t- The static contents of the buffer\n"
 5434	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
 5435	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
 5436	"  current_tracer\t- function and latency tracers\n"
 5437	"  available_tracers\t- list of configured tracers for current_tracer\n"
 5438	"  error_log\t- error log for failed commands (that support it)\n"
 5439	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
 5440	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
 5441	"  trace_clock\t\t-change the clock used to order events\n"
 5442	"       local:   Per cpu clock but may not be synced across CPUs\n"
 5443	"      global:   Synced across CPUs but slows tracing down.\n"
 5444	"     counter:   Not a clock, but just an increment\n"
 5445	"      uptime:   Jiffy counter from time of boot\n"
 5446	"        perf:   Same clock that perf events use\n"
 5447#ifdef CONFIG_X86_64
 5448	"     x86-tsc:   TSC cycle counter\n"
 5449#endif
 5450	"\n  timestamp_mode\t-view the mode used to timestamp events\n"
 5451	"       delta:   Delta difference against a buffer-wide timestamp\n"
 5452	"    absolute:   Absolute (standalone) timestamp\n"
 5453	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
 5454	"\n  trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
 5455	"  tracing_cpumask\t- Limit which CPUs to trace\n"
 5456	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
 5457	"\t\t\t  Remove sub-buffer with rmdir\n"
 5458	"  trace_options\t\t- Set format or modify how tracing happens\n"
 5459	"\t\t\t  Disable an option by prefixing 'no' to the\n"
 5460	"\t\t\t  option name\n"
 5461	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
 5462#ifdef CONFIG_DYNAMIC_FTRACE
 5463	"\n  available_filter_functions - list of functions that can be filtered on\n"
 5464	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
 5465	"\t\t\t  functions\n"
 5466	"\t     accepts: func_full_name or glob-matching-pattern\n"
 5467	"\t     modules: Can select a group via module\n"
 5468	"\t      Format: :mod:<module-name>\n"
 5469	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
 5470	"\t    triggers: a command to perform when function is hit\n"
 5471	"\t      Format: <function>:<trigger>[:count]\n"
 5472	"\t     trigger: traceon, traceoff\n"
 5473	"\t\t      enable_event:<system>:<event>\n"
 5474	"\t\t      disable_event:<system>:<event>\n"
 5475#ifdef CONFIG_STACKTRACE
 5476	"\t\t      stacktrace\n"
 5477#endif
 5478#ifdef CONFIG_TRACER_SNAPSHOT
 5479	"\t\t      snapshot\n"
 5480#endif
 5481	"\t\t      dump\n"
 5482	"\t\t      cpudump\n"
 5483	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
 5484	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
 5485	"\t     The first one will disable tracing every time do_fault is hit\n"
 5486	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
 5487	"\t       The first time do trap is hit and it disables tracing, the\n"
 5488	"\t       counter will decrement to 2. If tracing is already disabled,\n"
 5489	"\t       the counter will not decrement. It only decrements when the\n"
 5490	"\t       trigger did work\n"
 5491	"\t     To remove trigger without count:\n"
 5492	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
 5493	"\t     To remove trigger with a count:\n"
 5494	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
 5495	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
 5496	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
 5497	"\t    modules: Can select a group via module command :mod:\n"
 5498	"\t    Does not accept triggers\n"
 5499#endif /* CONFIG_DYNAMIC_FTRACE */
 5500#ifdef CONFIG_FUNCTION_TRACER
 5501	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
 5502	"\t\t    (function)\n"
 5503	"  set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
 5504	"\t\t    (function)\n"
 5505#endif
 5506#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 5507	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
 5508	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
 5509	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
 5510#endif
 5511#ifdef CONFIG_TRACER_SNAPSHOT
 5512	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
 5513	"\t\t\t  snapshot buffer. Read the contents for more\n"
 5514	"\t\t\t  information\n"
 5515#endif
 5516#ifdef CONFIG_STACK_TRACER
 5517	"  stack_trace\t\t- Shows the max stack trace when active\n"
 5518	"  stack_max_size\t- Shows current max stack size that was traced\n"
 5519	"\t\t\t  Write into this file to reset the max size (trigger a\n"
 5520	"\t\t\t  new trace)\n"
 5521#ifdef CONFIG_DYNAMIC_FTRACE
 5522	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
 5523	"\t\t\t  traces\n"
 5524#endif
 5525#endif /* CONFIG_STACK_TRACER */
 5526#ifdef CONFIG_DYNAMIC_EVENTS
 5527	"  dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
 5528	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 5529#endif
 5530#ifdef CONFIG_KPROBE_EVENTS
 5531	"  kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
 5532	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 5533#endif
 5534#ifdef CONFIG_UPROBE_EVENTS
 5535	"  uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
 5536	"\t\t\t  Write into this file to define/undefine new trace events.\n"
 5537#endif
 5538#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
 5539	"\t  accepts: event-definitions (one definition per line)\n"
 5540	"\t   Format: p[:[<group>/]<event>] <place> [<args>]\n"
 5541	"\t           r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
 5542#ifdef CONFIG_HIST_TRIGGERS
 5543	"\t           s:[synthetic/]<event> <field> [<field>]\n"
 5544#endif
 5545	"\t           -:[<group>/]<event>\n"
 5546#ifdef CONFIG_KPROBE_EVENTS
 5547	"\t    place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
 5548  "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
 5549#endif
 5550#ifdef CONFIG_UPROBE_EVENTS
 5551  "   place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
 5552#endif
 5553	"\t     args: <name>=fetcharg[:type]\n"
 5554	"\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
 5555#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
 5556	"\t           $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
 5557#else
 5558	"\t           $stack<index>, $stack, $retval, $comm,\n"
 5559#endif
 5560	"\t           +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
 5561	"\t     type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
 5562	"\t           b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
 5563	"\t           <type>\\[<array-size>\\]\n"
 5564#ifdef CONFIG_HIST_TRIGGERS
 5565	"\t    field: <stype> <name>;\n"
 5566	"\t    stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
 5567	"\t           [unsigned] char/int/long\n"
 5568#endif
 5569#endif
 5570	"  events/\t\t- Directory containing all trace event subsystems:\n"
 5571	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
 5572	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
 5573	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
 5574	"\t\t\t  events\n"
 5575	"      filter\t\t- If set, only events passing filter are traced\n"
 5576	"  events/<system>/<event>/\t- Directory containing control files for\n"
 5577	"\t\t\t  <event>:\n"
 5578	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
 5579	"      filter\t\t- If set, only events passing filter are traced\n"
 5580	"      trigger\t\t- If set, a command to perform when event is hit\n"
 5581	"\t    Format: <trigger>[:count][if <filter>]\n"
 5582	"\t   trigger: traceon, traceoff\n"
 5583	"\t            enable_event:<system>:<event>\n"
 5584	"\t            disable_event:<system>:<event>\n"
 5585#ifdef CONFIG_HIST_TRIGGERS
 5586	"\t            enable_hist:<system>:<event>\n"
 5587	"\t            disable_hist:<system>:<event>\n"
 5588#endif
 5589#ifdef CONFIG_STACKTRACE
 5590	"\t\t    stacktrace\n"
 5591#endif
 5592#ifdef CONFIG_TRACER_SNAPSHOT
 5593	"\t\t    snapshot\n"
 5594#endif
 5595#ifdef CONFIG_HIST_TRIGGERS
 5596	"\t\t    hist (see below)\n"
 5597#endif
 5598	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
 5599	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
 5600	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
 5601	"\t                  events/block/block_unplug/trigger\n"
 5602	"\t   The first disables tracing every time block_unplug is hit.\n"
 5603	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
 5604	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
 5605	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
 5606	"\t   Like function triggers, the counter is only decremented if it\n"
 5607	"\t    enabled or disabled tracing.\n"
 5608	"\t   To remove a trigger without a count:\n"
 5609	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
 5610	"\t   To remove a trigger with a count:\n"
 5611	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
 5612	"\t   Filters can be ignored when removing a trigger.\n"
 5613#ifdef CONFIG_HIST_TRIGGERS
 5614	"      hist trigger\t- If set, event hits are aggregated into a hash table\n"
 5615	"\t    Format: hist:keys=<field1[,field2,...]>\n"
 5616	"\t            [:values=<field1[,field2,...]>]\n"
 5617	"\t            [:sort=<field1[,field2,...]>]\n"
 5618	"\t            [:size=#entries]\n"
 5619	"\t            [:pause][:continue][:clear]\n"
 5620	"\t            [:name=histname1]\n"
 5621	"\t            [:<handler>.<action>]\n"
 5622	"\t            [if <filter>]\n\n"
 5623	"\t    Note, special fields can be used as well:\n"
 5624	"\t            common_timestamp - to record current timestamp\n"
 5625	"\t            common_cpu - to record the CPU the event happened on\n"
 5626	"\n"
 5627	"\t    When a matching event is hit, an entry is added to a hash\n"
 5628	"\t    table using the key(s) and value(s) named, and the value of a\n"
 5629	"\t    sum called 'hitcount' is incremented.  Keys and values\n"
 5630	"\t    correspond to fields in the event's format description.  Keys\n"
 5631	"\t    can be any field, or the special string 'stacktrace'.\n"
 5632	"\t    Compound keys consisting of up to two fields can be specified\n"
 5633	"\t    by the 'keys' keyword.  Values must correspond to numeric\n"
 5634	"\t    fields.  Sort keys consisting of up to two fields can be\n"
 5635	"\t    specified using the 'sort' keyword.  The sort direction can\n"
 5636	"\t    be modified by appending '.descending' or '.ascending' to a\n"
 5637	"\t    sort field.  The 'size' parameter can be used to specify more\n"
 5638	"\t    or fewer than the default 2048 entries for the hashtable size.\n"
 5639	"\t    If a hist trigger is given a name using the 'name' parameter,\n"
 5640	"\t    its histogram data will be shared with other triggers of the\n"
 5641	"\t    same name, and trigger hits will update this common data.\n\n"
 5642	"\t    Reading the 'hist' file for the event will dump the hash\n"
 5643	"\t    table in its entirety to stdout.  If there are multiple hist\n"
 5644	"\t    triggers attached to an event, there will be a table for each\n"
 5645	"\t    trigger in the output.  The table displayed for a named\n"
 5646	"\t    trigger will be the same as any other instance having the\n"
 5647	"\t    same name.  The default format used to display a given field\n"
 5648	"\t    can be modified by appending any of the following modifiers\n"
 5649	"\t    to the field name, as applicable:\n\n"
 5650	"\t            .hex        display a number as a hex value\n"
 5651	"\t            .sym        display an address as a symbol\n"
 5652	"\t            .sym-offset display an address as a symbol and offset\n"
 5653	"\t            .execname   display a common_pid as a program name\n"
 5654	"\t            .syscall    display a syscall id as a syscall name\n"
 5655	"\t            .log2       display log2 value rather than raw number\n"
 5656	"\t            .usecs      display a common_timestamp in microseconds\n\n"
 5657	"\t    The 'pause' parameter can be used to pause an existing hist\n"
 5658	"\t    trigger or to start a hist trigger but not log any events\n"
 5659	"\t    until told to do so.  'continue' can be used to start or\n"
 5660	"\t    restart a paused hist trigger.\n\n"
 5661	"\t    The 'clear' parameter will clear the contents of a running\n"
 5662	"\t    hist trigger and leave its current paused/active state\n"
 5663	"\t    unchanged.\n\n"
 5664	"\t    The enable_hist and disable_hist triggers can be used to\n"
 5665	"\t    have one event conditionally start and stop another event's\n"
 5666	"\t    already-attached hist trigger.  The syntax is analogous to\n"
 5667	"\t    the enable_event and disable_event triggers.\n\n"
 5668	"\t    Hist trigger handlers and actions are executed whenever a\n"
 5669	"\t    a histogram entry is added or updated.  They take the form:\n\n"
 5670	"\t        <handler>.<action>\n\n"
 5671	"\t    The available handlers are:\n\n"
 5672	"\t        onmatch(matching.event)  - invoke on addition or update\n"
 5673	"\t        onmax(var)               - invoke if var exceeds current max\n"
 5674	"\t        onchange(var)            - invoke action if var changes\n\n"
 5675	"\t    The available actions are:\n\n"
 5676	"\t        trace(<synthetic_event>,param list)  - generate synthetic event\n"
 5677	"\t        save(field,...)                      - save current event fields\n"
 5678#ifdef CONFIG_TRACER_SNAPSHOT
 5679	"\t        snapshot()                           - snapshot the trace buffer\n\n"
 5680#endif
 5681#ifdef CONFIG_SYNTH_EVENTS
 5682	"  events/synthetic_events\t- Create/append/remove/show synthetic events\n"
 5683	"\t  Write into this file to define/undefine new synthetic events.\n"
 5684	"\t     example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
 5685#endif
 5686#endif
 5687;
 5688
 5689static ssize_t
 5690tracing_readme_read(struct file *filp, char __user *ubuf,
 5691		       size_t cnt, loff_t *ppos)
 5692{
 5693	return simple_read_from_buffer(ubuf, cnt, ppos,
 5694					readme_msg, strlen(readme_msg));
 5695}
 5696
 5697static const struct file_operations tracing_readme_fops = {
 5698	.open		= tracing_open_generic,
 5699	.read		= tracing_readme_read,
 5700	.llseek		= generic_file_llseek,
 5701};
 5702
 5703static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
 5704{
 5705	int pid = ++(*pos);
 5706
 5707	return trace_find_tgid_ptr(pid);
 5708}
 5709
 5710static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
 5711{
 5712	int pid = *pos;
 5713
 5714	return trace_find_tgid_ptr(pid);
 5715}
 5716
 5717static void saved_tgids_stop(struct seq_file *m, void *v)
 5718{
 5719}
 5720
 5721static int saved_tgids_show(struct seq_file *m, void *v)
 5722{
 5723	int *entry = (int *)v;
 5724	int pid = entry - tgid_map;
 5725	int tgid = *entry;
 5726
 5727	if (tgid == 0)
 5728		return SEQ_SKIP;
 5729
 5730	seq_printf(m, "%d %d\n", pid, tgid);
 5731	return 0;
 5732}
 5733
 5734static const struct seq_operations tracing_saved_tgids_seq_ops = {
 5735	.start		= saved_tgids_start,
 5736	.stop		= saved_tgids_stop,
 5737	.next		= saved_tgids_next,
 5738	.show		= saved_tgids_show,
 5739};
 5740
 5741static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
 5742{
 5743	int ret;
 5744
 5745	ret = tracing_check_open_get_tr(NULL);
 5746	if (ret)
 5747		return ret;
 5748
 5749	return seq_open(filp, &tracing_saved_tgids_seq_ops);
 5750}
 5751
 5752
 5753static const struct file_operations tracing_saved_tgids_fops = {
 5754	.open		= tracing_saved_tgids_open,
 5755	.read		= seq_read,
 5756	.llseek		= seq_lseek,
 5757	.release	= seq_release,
 5758};
 5759
 5760static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
 5761{
 5762	unsigned int *ptr = v;
 5763
 5764	if (*pos || m->count)
 5765		ptr++;
 5766
 5767	(*pos)++;
 5768
 5769	for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
 5770	     ptr++) {
 5771		if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
 5772			continue;
 5773
 5774		return ptr;
 5775	}
 5776
 5777	return NULL;
 5778}
 5779
 5780static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
 5781{
 5782	void *v;
 5783	loff_t l = 0;
 5784
 5785	preempt_disable();
 5786	arch_spin_lock(&trace_cmdline_lock);
 5787
 5788	v = &savedcmd->map_cmdline_to_pid[0];
 5789	while (l <= *pos) {
 5790		v = saved_cmdlines_next(m, v, &l);
 5791		if (!v)
 5792			return NULL;
 5793	}
 5794
 5795	return v;
 5796}
 5797
 5798static void saved_cmdlines_stop(struct seq_file *m, void *v)
 5799{
 5800	arch_spin_unlock(&trace_cmdline_lock);
 5801	preempt_enable();
 5802}
 5803
 5804static int saved_cmdlines_show(struct seq_file *m, void *v)
 5805{
 5806	char buf[TASK_COMM_LEN];
 5807	unsigned int *pid = v;
 5808
 5809	__trace_find_cmdline(*pid, buf);
 5810	seq_printf(m, "%d %s\n", *pid, buf);
 5811	return 0;
 5812}
 5813
 5814static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
 5815	.start		= saved_cmdlines_start,
 5816	.next		= saved_cmdlines_next,
 5817	.stop		= saved_cmdlines_stop,
 5818	.show		= saved_cmdlines_show,
 5819};
 5820
 5821static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
 5822{
 5823	int ret;
 5824
 5825	ret = tracing_check_open_get_tr(NULL);
 5826	if (ret)
 5827		return ret;
 5828
 5829	return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
 5830}
 5831
 5832static const struct file_operations tracing_saved_cmdlines_fops = {
 5833	.open		= tracing_saved_cmdlines_open,
 5834	.read		= seq_read,
 5835	.llseek		= seq_lseek,
 5836	.release	= seq_release,
 5837};
 5838
 5839static ssize_t
 5840tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
 5841				 size_t cnt, loff_t *ppos)
 5842{
 5843	char buf[64];
 5844	int r;
 5845
 5846	arch_spin_lock(&trace_cmdline_lock);
 5847	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
 5848	arch_spin_unlock(&trace_cmdline_lock);
 5849
 5850	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 5851}
 5852
 5853static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
 5854{
 5855	kfree(s->saved_cmdlines);
 5856	kfree(s->map_cmdline_to_pid);
 5857	kfree(s);
 5858}
 5859
 5860static int tracing_resize_saved_cmdlines(unsigned int val)
 5861{
 5862	struct saved_cmdlines_buffer *s, *savedcmd_temp;
 5863
 5864	s = kmalloc(sizeof(*s), GFP_KERNEL);
 5865	if (!s)
 5866		return -ENOMEM;
 5867
 5868	if (allocate_cmdlines_buffer(val, s) < 0) {
 5869		kfree(s);
 
 5870		return -ENOMEM;
 5871	}
 5872
 5873	arch_spin_lock(&trace_cmdline_lock);
 5874	savedcmd_temp = savedcmd;
 5875	savedcmd = s;
 5876	arch_spin_unlock(&trace_cmdline_lock);
 5877	free_saved_cmdlines_buffer(savedcmd_temp);
 5878
 5879	return 0;
 5880}
 5881
 5882static ssize_t
 5883tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
 5884				  size_t cnt, loff_t *ppos)
 5885{
 5886	unsigned long val;
 5887	int ret;
 5888
 5889	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 5890	if (ret)
 5891		return ret;
 5892
 5893	/* must have at least 1 entry or less than PID_MAX_DEFAULT */
 5894	if (!val || val > PID_MAX_DEFAULT)
 5895		return -EINVAL;
 5896
 5897	ret = tracing_resize_saved_cmdlines((unsigned int)val);
 5898	if (ret < 0)
 5899		return ret;
 5900
 5901	*ppos += cnt;
 5902
 5903	return cnt;
 5904}
 5905
 5906static const struct file_operations tracing_saved_cmdlines_size_fops = {
 5907	.open		= tracing_open_generic,
 5908	.read		= tracing_saved_cmdlines_size_read,
 5909	.write		= tracing_saved_cmdlines_size_write,
 5910};
 5911
 5912#ifdef CONFIG_TRACE_EVAL_MAP_FILE
 5913static union trace_eval_map_item *
 5914update_eval_map(union trace_eval_map_item *ptr)
 5915{
 5916	if (!ptr->map.eval_string) {
 5917		if (ptr->tail.next) {
 5918			ptr = ptr->tail.next;
 5919			/* Set ptr to the next real item (skip head) */
 5920			ptr++;
 5921		} else
 5922			return NULL;
 5923	}
 5924	return ptr;
 5925}
 5926
 5927static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
 5928{
 5929	union trace_eval_map_item *ptr = v;
 5930
 5931	/*
 5932	 * Paranoid! If ptr points to end, we don't want to increment past it.
 5933	 * This really should never happen.
 5934	 */
 5935	(*pos)++;
 5936	ptr = update_eval_map(ptr);
 5937	if (WARN_ON_ONCE(!ptr))
 5938		return NULL;
 5939
 5940	ptr++;
 5941	ptr = update_eval_map(ptr);
 5942
 5943	return ptr;
 5944}
 5945
 5946static void *eval_map_start(struct seq_file *m, loff_t *pos)
 5947{
 5948	union trace_eval_map_item *v;
 5949	loff_t l = 0;
 5950
 5951	mutex_lock(&trace_eval_mutex);
 5952
 5953	v = trace_eval_maps;
 5954	if (v)
 5955		v++;
 5956
 5957	while (v && l < *pos) {
 5958		v = eval_map_next(m, v, &l);
 5959	}
 5960
 5961	return v;
 5962}
 5963
 5964static void eval_map_stop(struct seq_file *m, void *v)
 5965{
 5966	mutex_unlock(&trace_eval_mutex);
 5967}
 5968
 5969static int eval_map_show(struct seq_file *m, void *v)
 5970{
 5971	union trace_eval_map_item *ptr = v;
 5972
 5973	seq_printf(m, "%s %ld (%s)\n",
 5974		   ptr->map.eval_string, ptr->map.eval_value,
 5975		   ptr->map.system);
 5976
 5977	return 0;
 5978}
 5979
 5980static const struct seq_operations tracing_eval_map_seq_ops = {
 5981	.start		= eval_map_start,
 5982	.next		= eval_map_next,
 5983	.stop		= eval_map_stop,
 5984	.show		= eval_map_show,
 5985};
 5986
 5987static int tracing_eval_map_open(struct inode *inode, struct file *filp)
 5988{
 5989	int ret;
 5990
 5991	ret = tracing_check_open_get_tr(NULL);
 5992	if (ret)
 5993		return ret;
 5994
 5995	return seq_open(filp, &tracing_eval_map_seq_ops);
 5996}
 5997
 5998static const struct file_operations tracing_eval_map_fops = {
 5999	.open		= tracing_eval_map_open,
 6000	.read		= seq_read,
 6001	.llseek		= seq_lseek,
 6002	.release	= seq_release,
 6003};
 6004
 6005static inline union trace_eval_map_item *
 6006trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
 6007{
 6008	/* Return tail of array given the head */
 6009	return ptr + ptr->head.length + 1;
 6010}
 6011
 6012static void
 6013trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
 6014			   int len)
 6015{
 6016	struct trace_eval_map **stop;
 6017	struct trace_eval_map **map;
 6018	union trace_eval_map_item *map_array;
 6019	union trace_eval_map_item *ptr;
 6020
 6021	stop = start + len;
 6022
 6023	/*
 6024	 * The trace_eval_maps contains the map plus a head and tail item,
 6025	 * where the head holds the module and length of array, and the
 6026	 * tail holds a pointer to the next list.
 6027	 */
 6028	map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
 6029	if (!map_array) {
 6030		pr_warn("Unable to allocate trace eval mapping\n");
 6031		return;
 6032	}
 6033
 6034	mutex_lock(&trace_eval_mutex);
 6035
 6036	if (!trace_eval_maps)
 6037		trace_eval_maps = map_array;
 6038	else {
 6039		ptr = trace_eval_maps;
 6040		for (;;) {
 6041			ptr = trace_eval_jmp_to_tail(ptr);
 6042			if (!ptr->tail.next)
 6043				break;
 6044			ptr = ptr->tail.next;
 6045
 6046		}
 6047		ptr->tail.next = map_array;
 6048	}
 6049	map_array->head.mod = mod;
 6050	map_array->head.length = len;
 6051	map_array++;
 6052
 6053	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
 6054		map_array->map = **map;
 6055		map_array++;
 6056	}
 6057	memset(map_array, 0, sizeof(*map_array));
 6058
 6059	mutex_unlock(&trace_eval_mutex);
 6060}
 6061
 6062static void trace_create_eval_file(struct dentry *d_tracer)
 6063{
 6064	trace_create_file("eval_map", 0444, d_tracer,
 6065			  NULL, &tracing_eval_map_fops);
 6066}
 6067
 6068#else /* CONFIG_TRACE_EVAL_MAP_FILE */
 6069static inline void trace_create_eval_file(struct dentry *d_tracer) { }
 6070static inline void trace_insert_eval_map_file(struct module *mod,
 6071			      struct trace_eval_map **start, int len) { }
 6072#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
 6073
 6074static void trace_insert_eval_map(struct module *mod,
 6075				  struct trace_eval_map **start, int len)
 6076{
 6077	struct trace_eval_map **map;
 6078
 6079	if (len <= 0)
 6080		return;
 6081
 6082	map = start;
 6083
 6084	trace_event_eval_update(map, len);
 6085
 6086	trace_insert_eval_map_file(mod, start, len);
 6087}
 6088
 6089static ssize_t
 6090tracing_set_trace_read(struct file *filp, char __user *ubuf,
 6091		       size_t cnt, loff_t *ppos)
 6092{
 6093	struct trace_array *tr = filp->private_data;
 6094	char buf[MAX_TRACER_SIZE+2];
 6095	int r;
 6096
 6097	mutex_lock(&trace_types_lock);
 6098	r = sprintf(buf, "%s\n", tr->current_trace->name);
 6099	mutex_unlock(&trace_types_lock);
 6100
 6101	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6102}
 6103
 6104int tracer_init(struct tracer *t, struct trace_array *tr)
 6105{
 6106	tracing_reset_online_cpus(&tr->array_buffer);
 6107	return t->init(tr);
 6108}
 6109
 6110static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
 6111{
 6112	int cpu;
 6113
 6114	for_each_tracing_cpu(cpu)
 6115		per_cpu_ptr(buf->data, cpu)->entries = val;
 6116}
 6117
 6118#ifdef CONFIG_TRACER_MAX_TRACE
 6119/* resize @tr's buffer to the size of @size_tr's entries */
 6120static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
 6121					struct array_buffer *size_buf, int cpu_id)
 6122{
 6123	int cpu, ret = 0;
 6124
 6125	if (cpu_id == RING_BUFFER_ALL_CPUS) {
 6126		for_each_tracing_cpu(cpu) {
 6127			ret = ring_buffer_resize(trace_buf->buffer,
 6128				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
 6129			if (ret < 0)
 6130				break;
 6131			per_cpu_ptr(trace_buf->data, cpu)->entries =
 6132				per_cpu_ptr(size_buf->data, cpu)->entries;
 6133		}
 6134	} else {
 6135		ret = ring_buffer_resize(trace_buf->buffer,
 6136				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
 6137		if (ret == 0)
 6138			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
 6139				per_cpu_ptr(size_buf->data, cpu_id)->entries;
 6140	}
 6141
 6142	return ret;
 6143}
 6144#endif /* CONFIG_TRACER_MAX_TRACE */
 6145
 6146static int __tracing_resize_ring_buffer(struct trace_array *tr,
 6147					unsigned long size, int cpu)
 6148{
 6149	int ret;
 6150
 6151	/*
 6152	 * If kernel or user changes the size of the ring buffer
 6153	 * we use the size that was given, and we can forget about
 6154	 * expanding it later.
 6155	 */
 6156	ring_buffer_expanded = true;
 6157
 6158	/* May be called before buffers are initialized */
 6159	if (!tr->array_buffer.buffer)
 6160		return 0;
 6161
 6162	ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
 6163	if (ret < 0)
 6164		return ret;
 6165
 6166#ifdef CONFIG_TRACER_MAX_TRACE
 6167	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
 6168	    !tr->current_trace->use_max_tr)
 6169		goto out;
 6170
 6171	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
 6172	if (ret < 0) {
 6173		int r = resize_buffer_duplicate_size(&tr->array_buffer,
 6174						     &tr->array_buffer, cpu);
 6175		if (r < 0) {
 6176			/*
 6177			 * AARGH! We are left with different
 6178			 * size max buffer!!!!
 6179			 * The max buffer is our "snapshot" buffer.
 6180			 * When a tracer needs a snapshot (one of the
 6181			 * latency tracers), it swaps the max buffer
 6182			 * with the saved snap shot. We succeeded to
 6183			 * update the size of the main buffer, but failed to
 6184			 * update the size of the max buffer. But when we tried
 6185			 * to reset the main buffer to the original size, we
 6186			 * failed there too. This is very unlikely to
 6187			 * happen, but if it does, warn and kill all
 6188			 * tracing.
 6189			 */
 6190			WARN_ON(1);
 6191			tracing_disabled = 1;
 6192		}
 6193		return ret;
 6194	}
 6195
 6196	if (cpu == RING_BUFFER_ALL_CPUS)
 6197		set_buffer_entries(&tr->max_buffer, size);
 6198	else
 6199		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
 6200
 6201 out:
 6202#endif /* CONFIG_TRACER_MAX_TRACE */
 6203
 6204	if (cpu == RING_BUFFER_ALL_CPUS)
 6205		set_buffer_entries(&tr->array_buffer, size);
 6206	else
 6207		per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
 6208
 6209	return ret;
 6210}
 6211
 6212ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
 6213				  unsigned long size, int cpu_id)
 6214{
 6215	int ret;
 6216
 6217	mutex_lock(&trace_types_lock);
 6218
 6219	if (cpu_id != RING_BUFFER_ALL_CPUS) {
 6220		/* make sure, this cpu is enabled in the mask */
 6221		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
 6222			ret = -EINVAL;
 6223			goto out;
 6224		}
 6225	}
 6226
 6227	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
 6228	if (ret < 0)
 6229		ret = -ENOMEM;
 6230
 6231out:
 6232	mutex_unlock(&trace_types_lock);
 6233
 6234	return ret;
 6235}
 6236
 6237
 6238/**
 6239 * tracing_update_buffers - used by tracing facility to expand ring buffers
 6240 *
 6241 * To save on memory when the tracing is never used on a system with it
 6242 * configured in. The ring buffers are set to a minimum size. But once
 6243 * a user starts to use the tracing facility, then they need to grow
 6244 * to their default size.
 6245 *
 6246 * This function is to be called when a tracer is about to be used.
 6247 */
 6248int tracing_update_buffers(void)
 6249{
 6250	int ret = 0;
 6251
 6252	mutex_lock(&trace_types_lock);
 6253	if (!ring_buffer_expanded)
 6254		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
 6255						RING_BUFFER_ALL_CPUS);
 6256	mutex_unlock(&trace_types_lock);
 6257
 6258	return ret;
 6259}
 6260
 6261struct trace_option_dentry;
 6262
 
 
 
 6263static void
 6264create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
 6265
 6266/*
 6267 * Used to clear out the tracer before deletion of an instance.
 6268 * Must have trace_types_lock held.
 6269 */
 6270static void tracing_set_nop(struct trace_array *tr)
 6271{
 6272	if (tr->current_trace == &nop_trace)
 6273		return;
 6274	
 6275	tr->current_trace->enabled--;
 6276
 6277	if (tr->current_trace->reset)
 6278		tr->current_trace->reset(tr);
 6279
 6280	tr->current_trace = &nop_trace;
 6281}
 6282
 6283static void add_tracer_options(struct trace_array *tr, struct tracer *t)
 6284{
 6285	/* Only enable if the directory has been created already. */
 6286	if (!tr->dir)
 6287		return;
 6288
 6289	create_trace_option_files(tr, t);
 6290}
 6291
 6292int tracing_set_tracer(struct trace_array *tr, const char *buf)
 6293{
 
 6294	struct tracer *t;
 6295#ifdef CONFIG_TRACER_MAX_TRACE
 6296	bool had_max_tr;
 6297#endif
 6298	int ret = 0;
 6299
 6300	mutex_lock(&trace_types_lock);
 6301
 6302	if (!ring_buffer_expanded) {
 6303		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
 6304						RING_BUFFER_ALL_CPUS);
 6305		if (ret < 0)
 6306			goto out;
 6307		ret = 0;
 6308	}
 6309
 6310	for (t = trace_types; t; t = t->next) {
 6311		if (strcmp(t->name, buf) == 0)
 6312			break;
 6313	}
 6314	if (!t) {
 6315		ret = -EINVAL;
 6316		goto out;
 6317	}
 6318	if (t == tr->current_trace)
 6319		goto out;
 6320
 6321#ifdef CONFIG_TRACER_SNAPSHOT
 6322	if (t->use_max_tr) {
 6323		arch_spin_lock(&tr->max_lock);
 6324		if (tr->cond_snapshot)
 6325			ret = -EBUSY;
 6326		arch_spin_unlock(&tr->max_lock);
 6327		if (ret)
 6328			goto out;
 6329	}
 6330#endif
 6331	/* Some tracers won't work on kernel command line */
 6332	if (system_state < SYSTEM_RUNNING && t->noboot) {
 6333		pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
 6334			t->name);
 6335		goto out;
 6336	}
 6337
 6338	/* Some tracers are only allowed for the top level buffer */
 6339	if (!trace_ok_for_array(t, tr)) {
 6340		ret = -EINVAL;
 6341		goto out;
 6342	}
 6343
 6344	/* If trace pipe files are being read, we can't change the tracer */
 6345	if (tr->trace_ref) {
 6346		ret = -EBUSY;
 6347		goto out;
 6348	}
 6349
 6350	trace_branch_disable();
 6351
 6352	tr->current_trace->enabled--;
 6353
 6354	if (tr->current_trace->reset)
 6355		tr->current_trace->reset(tr);
 6356
 6357	/* Current trace needs to be nop_trace before synchronize_rcu */
 6358	tr->current_trace = &nop_trace;
 6359
 6360#ifdef CONFIG_TRACER_MAX_TRACE
 6361	had_max_tr = tr->allocated_snapshot;
 6362
 6363	if (had_max_tr && !t->use_max_tr) {
 6364		/*
 6365		 * We need to make sure that the update_max_tr sees that
 6366		 * current_trace changed to nop_trace to keep it from
 6367		 * swapping the buffers after we resize it.
 6368		 * The update_max_tr is called from interrupts disabled
 6369		 * so a synchronized_sched() is sufficient.
 6370		 */
 6371		synchronize_rcu();
 6372		free_snapshot(tr);
 6373	}
 6374#endif
 
 
 
 
 
 6375
 6376#ifdef CONFIG_TRACER_MAX_TRACE
 6377	if (t->use_max_tr && !had_max_tr) {
 6378		ret = tracing_alloc_snapshot_instance(tr);
 6379		if (ret < 0)
 6380			goto out;
 6381	}
 6382#endif
 6383
 6384	if (t->init) {
 6385		ret = tracer_init(t, tr);
 6386		if (ret)
 6387			goto out;
 6388	}
 6389
 6390	tr->current_trace = t;
 6391	tr->current_trace->enabled++;
 6392	trace_branch_enable(tr);
 6393 out:
 6394	mutex_unlock(&trace_types_lock);
 6395
 6396	return ret;
 6397}
 6398
 6399static ssize_t
 6400tracing_set_trace_write(struct file *filp, const char __user *ubuf,
 6401			size_t cnt, loff_t *ppos)
 6402{
 6403	struct trace_array *tr = filp->private_data;
 6404	char buf[MAX_TRACER_SIZE+1];
 6405	int i;
 6406	size_t ret;
 6407	int err;
 6408
 6409	ret = cnt;
 6410
 6411	if (cnt > MAX_TRACER_SIZE)
 6412		cnt = MAX_TRACER_SIZE;
 6413
 6414	if (copy_from_user(buf, ubuf, cnt))
 6415		return -EFAULT;
 6416
 6417	buf[cnt] = 0;
 6418
 6419	/* strip ending whitespace. */
 6420	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
 6421		buf[i] = 0;
 6422
 6423	err = tracing_set_tracer(tr, buf);
 6424	if (err)
 6425		return err;
 6426
 6427	*ppos += ret;
 6428
 6429	return ret;
 6430}
 6431
 6432static ssize_t
 6433tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
 6434		   size_t cnt, loff_t *ppos)
 6435{
 
 6436	char buf[64];
 6437	int r;
 6438
 6439	r = snprintf(buf, sizeof(buf), "%ld\n",
 6440		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
 6441	if (r > sizeof(buf))
 6442		r = sizeof(buf);
 6443	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6444}
 6445
 6446static ssize_t
 6447tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
 6448		    size_t cnt, loff_t *ppos)
 6449{
 
 6450	unsigned long val;
 6451	int ret;
 6452
 6453	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 6454	if (ret)
 6455		return ret;
 6456
 6457	*ptr = val * 1000;
 6458
 6459	return cnt;
 6460}
 6461
 6462static ssize_t
 6463tracing_thresh_read(struct file *filp, char __user *ubuf,
 6464		    size_t cnt, loff_t *ppos)
 6465{
 6466	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
 6467}
 6468
 6469static ssize_t
 6470tracing_thresh_write(struct file *filp, const char __user *ubuf,
 6471		     size_t cnt, loff_t *ppos)
 6472{
 6473	struct trace_array *tr = filp->private_data;
 6474	int ret;
 6475
 6476	mutex_lock(&trace_types_lock);
 6477	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
 6478	if (ret < 0)
 6479		goto out;
 6480
 6481	if (tr->current_trace->update_thresh) {
 6482		ret = tr->current_trace->update_thresh(tr);
 6483		if (ret < 0)
 6484			goto out;
 6485	}
 6486
 6487	ret = cnt;
 6488out:
 6489	mutex_unlock(&trace_types_lock);
 6490
 6491	return ret;
 6492}
 6493
 6494#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
 6495
 6496static ssize_t
 6497tracing_max_lat_read(struct file *filp, char __user *ubuf,
 6498		     size_t cnt, loff_t *ppos)
 6499{
 6500	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
 6501}
 6502
 6503static ssize_t
 6504tracing_max_lat_write(struct file *filp, const char __user *ubuf,
 6505		      size_t cnt, loff_t *ppos)
 6506{
 6507	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
 6508}
 6509
 6510#endif
 6511
 6512static int tracing_open_pipe(struct inode *inode, struct file *filp)
 6513{
 6514	struct trace_array *tr = inode->i_private;
 6515	struct trace_iterator *iter;
 6516	int ret;
 
 
 
 6517
 6518	ret = tracing_check_open_get_tr(tr);
 6519	if (ret)
 6520		return ret;
 6521
 6522	mutex_lock(&trace_types_lock);
 6523
 6524	/* create a buffer to store the information to pass to userspace */
 6525	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 6526	if (!iter) {
 6527		ret = -ENOMEM;
 6528		__trace_array_put(tr);
 6529		goto out;
 6530	}
 6531
 6532	trace_seq_init(&iter->seq);
 6533	iter->trace = tr->current_trace;
 
 
 
 
 
 
 
 
 6534
 6535	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
 6536		ret = -ENOMEM;
 6537		goto fail;
 6538	}
 6539
 6540	/* trace pipe does not show start of buffer */
 6541	cpumask_setall(iter->started);
 6542
 6543	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 6544		iter->iter_flags |= TRACE_FILE_LAT_FMT;
 6545
 6546	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
 6547	if (trace_clocks[tr->clock_id].in_ns)
 6548		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 6549
 6550	iter->tr = tr;
 6551	iter->array_buffer = &tr->array_buffer;
 6552	iter->cpu_file = tracing_get_cpu(inode);
 6553	mutex_init(&iter->mutex);
 6554	filp->private_data = iter;
 6555
 6556	if (iter->trace->pipe_open)
 6557		iter->trace->pipe_open(iter);
 6558
 6559	nonseekable_open(inode, filp);
 6560
 6561	tr->trace_ref++;
 6562out:
 6563	mutex_unlock(&trace_types_lock);
 6564	return ret;
 6565
 6566fail:
 
 6567	kfree(iter);
 6568	__trace_array_put(tr);
 6569	mutex_unlock(&trace_types_lock);
 6570	return ret;
 6571}
 6572
 6573static int tracing_release_pipe(struct inode *inode, struct file *file)
 6574{
 6575	struct trace_iterator *iter = file->private_data;
 6576	struct trace_array *tr = inode->i_private;
 6577
 6578	mutex_lock(&trace_types_lock);
 6579
 6580	tr->trace_ref--;
 6581
 6582	if (iter->trace->pipe_close)
 6583		iter->trace->pipe_close(iter);
 6584
 6585	mutex_unlock(&trace_types_lock);
 6586
 6587	free_cpumask_var(iter->started);
 6588	mutex_destroy(&iter->mutex);
 
 6589	kfree(iter);
 6590
 6591	trace_array_put(tr);
 6592
 6593	return 0;
 6594}
 6595
 6596static __poll_t
 6597trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
 6598{
 6599	struct trace_array *tr = iter->tr;
 6600
 6601	/* Iterators are static, they should be filled or empty */
 6602	if (trace_buffer_iter(iter, iter->cpu_file))
 6603		return EPOLLIN | EPOLLRDNORM;
 6604
 6605	if (tr->trace_flags & TRACE_ITER_BLOCK)
 6606		/*
 6607		 * Always select as readable when in blocking mode
 6608		 */
 6609		return EPOLLIN | EPOLLRDNORM;
 6610	else
 6611		return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
 6612					     filp, poll_table);
 6613}
 6614
 6615static __poll_t
 6616tracing_poll_pipe(struct file *filp, poll_table *poll_table)
 6617{
 6618	struct trace_iterator *iter = filp->private_data;
 6619
 6620	return trace_poll(iter, filp, poll_table);
 6621}
 6622
 6623/* Must be called with iter->mutex held. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 6624static int tracing_wait_pipe(struct file *filp)
 6625{
 6626	struct trace_iterator *iter = filp->private_data;
 6627	int ret;
 6628
 6629	while (trace_empty(iter)) {
 6630
 6631		if ((filp->f_flags & O_NONBLOCK)) {
 6632			return -EAGAIN;
 6633		}
 6634
 
 
 
 
 
 
 
 
 
 6635		/*
 6636		 * We block until we read something and tracing is disabled.
 6637		 * We still block if tracing is disabled, but we have never
 6638		 * read anything. This allows a user to cat this file, and
 6639		 * then enable tracing. But after we have read something,
 6640		 * we give an EOF when tracing is again disabled.
 6641		 *
 6642		 * iter->pos will be 0 if we haven't read anything.
 6643		 */
 6644		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
 6645			break;
 6646
 6647		mutex_unlock(&iter->mutex);
 6648
 6649		ret = wait_on_pipe(iter, 0);
 6650
 6651		mutex_lock(&iter->mutex);
 6652
 6653		if (ret)
 6654			return ret;
 6655	}
 6656
 6657	return 1;
 6658}
 6659
 6660/*
 6661 * Consumer reader.
 6662 */
 6663static ssize_t
 6664tracing_read_pipe(struct file *filp, char __user *ubuf,
 6665		  size_t cnt, loff_t *ppos)
 6666{
 6667	struct trace_iterator *iter = filp->private_data;
 
 6668	ssize_t sret;
 6669
 
 
 
 
 
 
 
 
 
 
 
 
 
 6670	/*
 6671	 * Avoid more than one consumer on a single file descriptor
 6672	 * This is just a matter of traces coherency, the ring buffer itself
 6673	 * is protected.
 6674	 */
 6675	mutex_lock(&iter->mutex);
 6676
 6677	/* return any leftover data */
 6678	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
 6679	if (sret != -EBUSY)
 6680		goto out;
 6681
 6682	trace_seq_init(&iter->seq);
 6683
 6684	if (iter->trace->read) {
 6685		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
 6686		if (sret)
 6687			goto out;
 6688	}
 6689
 6690waitagain:
 6691	sret = tracing_wait_pipe(filp);
 6692	if (sret <= 0)
 6693		goto out;
 6694
 6695	/* stop when tracing is finished */
 6696	if (trace_empty(iter)) {
 6697		sret = 0;
 6698		goto out;
 6699	}
 6700
 6701	if (cnt >= PAGE_SIZE)
 6702		cnt = PAGE_SIZE - 1;
 6703
 6704	/* reset all but tr, trace, and overruns */
 6705	memset(&iter->seq, 0,
 6706	       sizeof(struct trace_iterator) -
 6707	       offsetof(struct trace_iterator, seq));
 6708	cpumask_clear(iter->started);
 6709	trace_seq_init(&iter->seq);
 6710	iter->pos = -1;
 6711
 6712	trace_event_read_lock();
 6713	trace_access_lock(iter->cpu_file);
 6714	while (trace_find_next_entry_inc(iter) != NULL) {
 6715		enum print_line_t ret;
 6716		int save_len = iter->seq.seq.len;
 6717
 6718		ret = print_trace_line(iter);
 6719		if (ret == TRACE_TYPE_PARTIAL_LINE) {
 6720			/* don't print partial lines */
 6721			iter->seq.seq.len = save_len;
 6722			break;
 6723		}
 6724		if (ret != TRACE_TYPE_NO_CONSUME)
 6725			trace_consume(iter);
 6726
 6727		if (trace_seq_used(&iter->seq) >= cnt)
 6728			break;
 6729
 6730		/*
 6731		 * Setting the full flag means we reached the trace_seq buffer
 6732		 * size and we should leave by partial output condition above.
 6733		 * One of the trace_seq_* functions is not used properly.
 6734		 */
 6735		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
 6736			  iter->ent->type);
 6737	}
 6738	trace_access_unlock(iter->cpu_file);
 6739	trace_event_read_unlock();
 6740
 6741	/* Now copy what we have to the user */
 6742	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
 6743	if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
 6744		trace_seq_init(&iter->seq);
 6745
 6746	/*
 6747	 * If there was nothing to send to user, in spite of consuming trace
 6748	 * entries, go back to wait for more entries.
 6749	 */
 6750	if (sret == -EBUSY)
 6751		goto waitagain;
 6752
 6753out:
 6754	mutex_unlock(&iter->mutex);
 6755
 6756	return sret;
 6757}
 6758
 6759static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
 6760				     unsigned int idx)
 6761{
 6762	__free_page(spd->pages[idx]);
 6763}
 6764
 
 
 
 
 
 
 
 
 6765static size_t
 6766tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
 6767{
 6768	size_t count;
 6769	int save_len;
 6770	int ret;
 6771
 6772	/* Seq buffer is page-sized, exactly what we need. */
 6773	for (;;) {
 6774		save_len = iter->seq.seq.len;
 6775		ret = print_trace_line(iter);
 6776
 6777		if (trace_seq_has_overflowed(&iter->seq)) {
 6778			iter->seq.seq.len = save_len;
 
 6779			break;
 6780		}
 6781
 6782		/*
 6783		 * This should not be hit, because it should only
 6784		 * be set if the iter->seq overflowed. But check it
 6785		 * anyway to be safe.
 6786		 */
 6787		if (ret == TRACE_TYPE_PARTIAL_LINE) {
 6788			iter->seq.seq.len = save_len;
 6789			break;
 6790		}
 6791
 6792		count = trace_seq_used(&iter->seq) - save_len;
 6793		if (rem < count) {
 6794			rem = 0;
 6795			iter->seq.seq.len = save_len;
 6796			break;
 6797		}
 6798
 6799		if (ret != TRACE_TYPE_NO_CONSUME)
 6800			trace_consume(iter);
 6801		rem -= count;
 6802		if (!trace_find_next_entry_inc(iter))	{
 6803			rem = 0;
 6804			iter->ent = NULL;
 6805			break;
 6806		}
 6807	}
 6808
 6809	return rem;
 6810}
 6811
 6812static ssize_t tracing_splice_read_pipe(struct file *filp,
 6813					loff_t *ppos,
 6814					struct pipe_inode_info *pipe,
 6815					size_t len,
 6816					unsigned int flags)
 6817{
 6818	struct page *pages_def[PIPE_DEF_BUFFERS];
 6819	struct partial_page partial_def[PIPE_DEF_BUFFERS];
 6820	struct trace_iterator *iter = filp->private_data;
 6821	struct splice_pipe_desc spd = {
 6822		.pages		= pages_def,
 6823		.partial	= partial_def,
 6824		.nr_pages	= 0, /* This gets updated below. */
 6825		.nr_pages_max	= PIPE_DEF_BUFFERS,
 6826		.ops		= &default_pipe_buf_ops,
 
 6827		.spd_release	= tracing_spd_release_pipe,
 6828	};
 
 6829	ssize_t ret;
 6830	size_t rem;
 6831	unsigned int i;
 6832
 6833	if (splice_grow_spd(pipe, &spd))
 6834		return -ENOMEM;
 6835
 
 
 
 
 
 
 6836	mutex_lock(&iter->mutex);
 6837
 6838	if (iter->trace->splice_read) {
 6839		ret = iter->trace->splice_read(iter, filp,
 6840					       ppos, pipe, len, flags);
 6841		if (ret)
 6842			goto out_err;
 6843	}
 6844
 6845	ret = tracing_wait_pipe(filp);
 6846	if (ret <= 0)
 6847		goto out_err;
 6848
 6849	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
 6850		ret = -EFAULT;
 6851		goto out_err;
 6852	}
 6853
 6854	trace_event_read_lock();
 6855	trace_access_lock(iter->cpu_file);
 6856
 6857	/* Fill as many pages as possible. */
 6858	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
 6859		spd.pages[i] = alloc_page(GFP_KERNEL);
 6860		if (!spd.pages[i])
 6861			break;
 6862
 6863		rem = tracing_fill_pipe_page(rem, iter);
 6864
 6865		/* Copy the data into the page, so we can start over. */
 6866		ret = trace_seq_to_buffer(&iter->seq,
 6867					  page_address(spd.pages[i]),
 6868					  trace_seq_used(&iter->seq));
 6869		if (ret < 0) {
 6870			__free_page(spd.pages[i]);
 6871			break;
 6872		}
 6873		spd.partial[i].offset = 0;
 6874		spd.partial[i].len = trace_seq_used(&iter->seq);
 6875
 6876		trace_seq_init(&iter->seq);
 6877	}
 6878
 6879	trace_access_unlock(iter->cpu_file);
 6880	trace_event_read_unlock();
 6881	mutex_unlock(&iter->mutex);
 6882
 6883	spd.nr_pages = i;
 6884
 6885	if (i)
 6886		ret = splice_to_pipe(pipe, &spd);
 6887	else
 6888		ret = 0;
 6889out:
 6890	splice_shrink_spd(&spd);
 6891	return ret;
 6892
 6893out_err:
 6894	mutex_unlock(&iter->mutex);
 6895	goto out;
 6896}
 6897
 6898static ssize_t
 6899tracing_entries_read(struct file *filp, char __user *ubuf,
 6900		     size_t cnt, loff_t *ppos)
 6901{
 6902	struct inode *inode = file_inode(filp);
 6903	struct trace_array *tr = inode->i_private;
 6904	int cpu = tracing_get_cpu(inode);
 6905	char buf[64];
 6906	int r = 0;
 6907	ssize_t ret;
 6908
 6909	mutex_lock(&trace_types_lock);
 6910
 6911	if (cpu == RING_BUFFER_ALL_CPUS) {
 6912		int cpu, buf_size_same;
 6913		unsigned long size;
 6914
 6915		size = 0;
 6916		buf_size_same = 1;
 6917		/* check if all cpu sizes are same */
 6918		for_each_tracing_cpu(cpu) {
 6919			/* fill in the size from first enabled cpu */
 6920			if (size == 0)
 6921				size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
 6922			if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
 6923				buf_size_same = 0;
 6924				break;
 6925			}
 6926		}
 6927
 6928		if (buf_size_same) {
 6929			if (!ring_buffer_expanded)
 6930				r = sprintf(buf, "%lu (expanded: %lu)\n",
 6931					    size >> 10,
 6932					    trace_buf_size >> 10);
 6933			else
 6934				r = sprintf(buf, "%lu\n", size >> 10);
 6935		} else
 6936			r = sprintf(buf, "X\n");
 6937	} else
 6938		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
 6939
 6940	mutex_unlock(&trace_types_lock);
 6941
 6942	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6943	return ret;
 6944}
 6945
 6946static ssize_t
 6947tracing_entries_write(struct file *filp, const char __user *ubuf,
 6948		      size_t cnt, loff_t *ppos)
 6949{
 6950	struct inode *inode = file_inode(filp);
 6951	struct trace_array *tr = inode->i_private;
 6952	unsigned long val;
 6953	int ret;
 6954
 6955	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 6956	if (ret)
 6957		return ret;
 6958
 6959	/* must have at least 1 entry */
 6960	if (!val)
 6961		return -EINVAL;
 6962
 6963	/* value is in KB */
 6964	val <<= 10;
 6965	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
 6966	if (ret < 0)
 6967		return ret;
 6968
 6969	*ppos += cnt;
 6970
 6971	return cnt;
 6972}
 6973
 6974static ssize_t
 6975tracing_total_entries_read(struct file *filp, char __user *ubuf,
 6976				size_t cnt, loff_t *ppos)
 6977{
 6978	struct trace_array *tr = filp->private_data;
 6979	char buf[64];
 6980	int r, cpu;
 6981	unsigned long size = 0, expanded_size = 0;
 6982
 6983	mutex_lock(&trace_types_lock);
 6984	for_each_tracing_cpu(cpu) {
 6985		size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
 6986		if (!ring_buffer_expanded)
 6987			expanded_size += trace_buf_size >> 10;
 6988	}
 6989	if (ring_buffer_expanded)
 6990		r = sprintf(buf, "%lu\n", size);
 6991	else
 6992		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
 6993	mutex_unlock(&trace_types_lock);
 6994
 6995	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 6996}
 6997
 6998static ssize_t
 6999tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
 7000			  size_t cnt, loff_t *ppos)
 7001{
 7002	/*
 7003	 * There is no need to read what the user has written, this function
 7004	 * is just to make sure that there is no error when "echo" is used
 7005	 */
 7006
 7007	*ppos += cnt;
 7008
 7009	return cnt;
 7010}
 7011
 7012static int
 7013tracing_free_buffer_release(struct inode *inode, struct file *filp)
 7014{
 7015	struct trace_array *tr = inode->i_private;
 7016
 7017	/* disable tracing ? */
 7018	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
 7019		tracer_tracing_off(tr);
 7020	/* resize the ring buffer to 0 */
 7021	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
 7022
 7023	trace_array_put(tr);
 7024
 7025	return 0;
 7026}
 7027
 7028static ssize_t
 7029tracing_mark_write(struct file *filp, const char __user *ubuf,
 7030					size_t cnt, loff_t *fpos)
 7031{
 
 7032	struct trace_array *tr = filp->private_data;
 7033	struct ring_buffer_event *event;
 7034	enum event_trigger_type tt = ETT_NONE;
 7035	struct trace_buffer *buffer;
 7036	struct print_entry *entry;
 
 
 
 
 7037	ssize_t written;
 
 7038	int size;
 7039	int len;
 7040
 7041/* Used in tracing_mark_raw_write() as well */
 7042#define FAULTED_STR "<faulted>"
 7043#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
 7044
 7045	if (tracing_disabled)
 7046		return -EINVAL;
 7047
 7048	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
 7049		return -EINVAL;
 7050
 7051	if (cnt > TRACE_BUF_SIZE)
 7052		cnt = TRACE_BUF_SIZE;
 7053
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 7054	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
 7055
 7056	size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 7057
 7058	/* If less than "<faulted>", then make sure we can still add that */
 7059	if (cnt < FAULTED_SIZE)
 7060		size += FAULTED_SIZE - cnt;
 7061
 7062	buffer = tr->array_buffer.buffer;
 7063	event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
 7064					    tracing_gen_ctx());
 7065	if (unlikely(!event))
 7066		/* Ring buffer disabled, return as if not open for write */
 7067		return -EBADF;
 
 
 7068
 7069	entry = ring_buffer_event_data(event);
 7070	entry->ip = _THIS_IP_;
 7071
 7072	len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
 7073	if (len) {
 7074		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
 7075		cnt = FAULTED_SIZE;
 7076		written = -EFAULT;
 7077	} else
 7078		written = cnt;
 7079
 7080	if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
 7081		/* do not add \n before testing triggers, but add \0 */
 7082		entry->buf[cnt] = '\0';
 7083		tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
 7084	}
 7085
 7086	if (entry->buf[cnt - 1] != '\n') {
 7087		entry->buf[cnt] = '\n';
 7088		entry->buf[cnt + 1] = '\0';
 7089	} else
 7090		entry->buf[cnt] = '\0';
 7091
 7092	if (static_branch_unlikely(&trace_marker_exports_enabled))
 7093		ftrace_exports(event, TRACE_EXPORT_MARKER);
 7094	__buffer_unlock_commit(buffer, event);
 7095
 7096	if (tt)
 7097		event_triggers_post_call(tr->trace_marker_file, tt);
 7098
 7099	if (written > 0)
 7100		*fpos += written;
 7101
 7102	return written;
 7103}
 7104
 7105/* Limit it for now to 3K (including tag) */
 7106#define RAW_DATA_MAX_SIZE (1024*3)
 7107
 7108static ssize_t
 7109tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
 7110					size_t cnt, loff_t *fpos)
 7111{
 7112	struct trace_array *tr = filp->private_data;
 7113	struct ring_buffer_event *event;
 7114	struct trace_buffer *buffer;
 7115	struct raw_data_entry *entry;
 7116	ssize_t written;
 7117	int size;
 7118	int len;
 7119
 7120#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
 7121
 7122	if (tracing_disabled)
 7123		return -EINVAL;
 7124
 7125	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
 7126		return -EINVAL;
 7127
 7128	/* The marker must at least have a tag id */
 7129	if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
 7130		return -EINVAL;
 7131
 7132	if (cnt > TRACE_BUF_SIZE)
 7133		cnt = TRACE_BUF_SIZE;
 7134
 7135	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
 7136
 7137	size = sizeof(*entry) + cnt;
 7138	if (cnt < FAULT_SIZE_ID)
 7139		size += FAULT_SIZE_ID - cnt;
 7140
 7141	buffer = tr->array_buffer.buffer;
 7142	event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
 7143					    tracing_gen_ctx());
 7144	if (!event)
 7145		/* Ring buffer disabled, return as if not open for write */
 7146		return -EBADF;
 7147
 7148	entry = ring_buffer_event_data(event);
 7149
 7150	len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
 7151	if (len) {
 7152		entry->id = -1;
 7153		memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
 7154		written = -EFAULT;
 7155	} else
 7156		written = cnt;
 7157
 7158	__buffer_unlock_commit(buffer, event);
 7159
 7160	if (written > 0)
 7161		*fpos += written;
 7162
 
 
 
 
 
 
 7163	return written;
 7164}
 7165
 7166static int tracing_clock_show(struct seq_file *m, void *v)
 7167{
 7168	struct trace_array *tr = m->private;
 7169	int i;
 7170
 7171	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
 7172		seq_printf(m,
 7173			"%s%s%s%s", i ? " " : "",
 7174			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
 7175			i == tr->clock_id ? "]" : "");
 7176	seq_putc(m, '\n');
 7177
 7178	return 0;
 7179}
 7180
 7181int tracing_set_clock(struct trace_array *tr, const char *clockstr)
 7182{
 7183	int i;
 7184
 7185	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
 7186		if (strcmp(trace_clocks[i].name, clockstr) == 0)
 7187			break;
 7188	}
 7189	if (i == ARRAY_SIZE(trace_clocks))
 7190		return -EINVAL;
 7191
 7192	mutex_lock(&trace_types_lock);
 7193
 7194	tr->clock_id = i;
 7195
 7196	ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
 7197
 7198	/*
 7199	 * New clock may not be consistent with the previous clock.
 7200	 * Reset the buffer so that it doesn't have incomparable timestamps.
 7201	 */
 7202	tracing_reset_online_cpus(&tr->array_buffer);
 7203
 7204#ifdef CONFIG_TRACER_MAX_TRACE
 7205	if (tr->max_buffer.buffer)
 7206		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
 7207	tracing_reset_online_cpus(&tr->max_buffer);
 7208#endif
 7209
 7210	mutex_unlock(&trace_types_lock);
 7211
 7212	return 0;
 7213}
 7214
 7215static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
 7216				   size_t cnt, loff_t *fpos)
 7217{
 7218	struct seq_file *m = filp->private_data;
 7219	struct trace_array *tr = m->private;
 7220	char buf[64];
 7221	const char *clockstr;
 7222	int ret;
 7223
 7224	if (cnt >= sizeof(buf))
 7225		return -EINVAL;
 7226
 7227	if (copy_from_user(buf, ubuf, cnt))
 7228		return -EFAULT;
 7229
 7230	buf[cnt] = 0;
 7231
 7232	clockstr = strstrip(buf);
 7233
 7234	ret = tracing_set_clock(tr, clockstr);
 7235	if (ret)
 7236		return ret;
 7237
 7238	*fpos += cnt;
 7239
 7240	return cnt;
 7241}
 7242
 7243static int tracing_clock_open(struct inode *inode, struct file *file)
 7244{
 7245	struct trace_array *tr = inode->i_private;
 7246	int ret;
 7247
 7248	ret = tracing_check_open_get_tr(tr);
 7249	if (ret)
 7250		return ret;
 
 
 7251
 7252	ret = single_open(file, tracing_clock_show, inode->i_private);
 7253	if (ret < 0)
 7254		trace_array_put(tr);
 7255
 7256	return ret;
 7257}
 7258
 7259static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
 7260{
 7261	struct trace_array *tr = m->private;
 7262
 7263	mutex_lock(&trace_types_lock);
 7264
 7265	if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
 7266		seq_puts(m, "delta [absolute]\n");
 7267	else
 7268		seq_puts(m, "[delta] absolute\n");
 7269
 7270	mutex_unlock(&trace_types_lock);
 7271
 7272	return 0;
 7273}
 7274
 7275static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
 7276{
 7277	struct trace_array *tr = inode->i_private;
 7278	int ret;
 7279
 7280	ret = tracing_check_open_get_tr(tr);
 7281	if (ret)
 7282		return ret;
 7283
 7284	ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
 7285	if (ret < 0)
 7286		trace_array_put(tr);
 7287
 7288	return ret;
 7289}
 7290
 7291u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
 7292{
 7293	if (rbe == this_cpu_read(trace_buffered_event))
 7294		return ring_buffer_time_stamp(buffer);
 7295
 7296	return ring_buffer_event_time_stamp(buffer, rbe);
 7297}
 7298
 7299/*
 7300 * Set or disable using the per CPU trace_buffer_event when possible.
 7301 */
 7302int tracing_set_filter_buffering(struct trace_array *tr, bool set)
 7303{
 7304	int ret = 0;
 7305
 7306	mutex_lock(&trace_types_lock);
 7307
 7308	if (set && tr->no_filter_buffering_ref++)
 7309		goto out;
 7310
 7311	if (!set) {
 7312		if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
 7313			ret = -EINVAL;
 7314			goto out;
 7315		}
 7316
 7317		--tr->no_filter_buffering_ref;
 7318	}
 7319 out:
 7320	mutex_unlock(&trace_types_lock);
 7321
 7322	return ret;
 7323}
 7324
 7325struct ftrace_buffer_info {
 7326	struct trace_iterator	iter;
 7327	void			*spare;
 7328	unsigned int		spare_cpu;
 7329	unsigned int		read;
 7330};
 7331
 7332#ifdef CONFIG_TRACER_SNAPSHOT
 7333static int tracing_snapshot_open(struct inode *inode, struct file *file)
 7334{
 7335	struct trace_array *tr = inode->i_private;
 7336	struct trace_iterator *iter;
 7337	struct seq_file *m;
 7338	int ret;
 7339
 7340	ret = tracing_check_open_get_tr(tr);
 7341	if (ret)
 7342		return ret;
 7343
 7344	if (file->f_mode & FMODE_READ) {
 7345		iter = __tracing_open(inode, file, true);
 7346		if (IS_ERR(iter))
 7347			ret = PTR_ERR(iter);
 7348	} else {
 7349		/* Writes still need the seq_file to hold the private data */
 7350		ret = -ENOMEM;
 7351		m = kzalloc(sizeof(*m), GFP_KERNEL);
 7352		if (!m)
 7353			goto out;
 7354		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
 7355		if (!iter) {
 7356			kfree(m);
 7357			goto out;
 7358		}
 7359		ret = 0;
 7360
 7361		iter->tr = tr;
 7362		iter->array_buffer = &tr->max_buffer;
 7363		iter->cpu_file = tracing_get_cpu(inode);
 7364		m->private = iter;
 7365		file->private_data = m;
 7366	}
 7367out:
 7368	if (ret < 0)
 7369		trace_array_put(tr);
 7370
 7371	return ret;
 7372}
 7373
 7374static ssize_t
 7375tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
 7376		       loff_t *ppos)
 7377{
 7378	struct seq_file *m = filp->private_data;
 7379	struct trace_iterator *iter = m->private;
 7380	struct trace_array *tr = iter->tr;
 7381	unsigned long val;
 7382	int ret;
 7383
 7384	ret = tracing_update_buffers();
 7385	if (ret < 0)
 7386		return ret;
 7387
 7388	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 7389	if (ret)
 7390		return ret;
 7391
 7392	mutex_lock(&trace_types_lock);
 7393
 7394	if (tr->current_trace->use_max_tr) {
 7395		ret = -EBUSY;
 7396		goto out;
 7397	}
 7398
 7399	arch_spin_lock(&tr->max_lock);
 7400	if (tr->cond_snapshot)
 7401		ret = -EBUSY;
 7402	arch_spin_unlock(&tr->max_lock);
 7403	if (ret)
 7404		goto out;
 7405
 7406	switch (val) {
 7407	case 0:
 7408		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
 7409			ret = -EINVAL;
 7410			break;
 7411		}
 7412		if (tr->allocated_snapshot)
 7413			free_snapshot(tr);
 7414		break;
 7415	case 1:
 7416/* Only allow per-cpu swap if the ring buffer supports it */
 7417#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
 7418		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
 7419			ret = -EINVAL;
 7420			break;
 7421		}
 7422#endif
 7423		if (tr->allocated_snapshot)
 7424			ret = resize_buffer_duplicate_size(&tr->max_buffer,
 7425					&tr->array_buffer, iter->cpu_file);
 7426		else
 7427			ret = tracing_alloc_snapshot_instance(tr);
 7428		if (ret < 0)
 7429			break;
 7430		local_irq_disable();
 7431		/* Now, we're going to swap */
 7432		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 7433			update_max_tr(tr, current, smp_processor_id(), NULL);
 7434		else
 7435			update_max_tr_single(tr, current, iter->cpu_file);
 7436		local_irq_enable();
 7437		break;
 7438	default:
 7439		if (tr->allocated_snapshot) {
 7440			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 7441				tracing_reset_online_cpus(&tr->max_buffer);
 7442			else
 7443				tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
 7444		}
 7445		break;
 7446	}
 7447
 7448	if (ret >= 0) {
 7449		*ppos += cnt;
 7450		ret = cnt;
 7451	}
 7452out:
 7453	mutex_unlock(&trace_types_lock);
 7454	return ret;
 7455}
 7456
 7457static int tracing_snapshot_release(struct inode *inode, struct file *file)
 7458{
 7459	struct seq_file *m = file->private_data;
 7460	int ret;
 7461
 7462	ret = tracing_release(inode, file);
 7463
 7464	if (file->f_mode & FMODE_READ)
 7465		return ret;
 7466
 7467	/* If write only, the seq_file is just a stub */
 7468	if (m)
 7469		kfree(m->private);
 7470	kfree(m);
 7471
 7472	return 0;
 7473}
 7474
 7475static int tracing_buffers_open(struct inode *inode, struct file *filp);
 7476static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
 7477				    size_t count, loff_t *ppos);
 7478static int tracing_buffers_release(struct inode *inode, struct file *file);
 7479static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 7480		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
 7481
 7482static int snapshot_raw_open(struct inode *inode, struct file *filp)
 7483{
 7484	struct ftrace_buffer_info *info;
 7485	int ret;
 7486
 7487	/* The following checks for tracefs lockdown */
 7488	ret = tracing_buffers_open(inode, filp);
 7489	if (ret < 0)
 7490		return ret;
 7491
 7492	info = filp->private_data;
 7493
 7494	if (info->iter.trace->use_max_tr) {
 7495		tracing_buffers_release(inode, filp);
 7496		return -EBUSY;
 7497	}
 7498
 7499	info->iter.snapshot = true;
 7500	info->iter.array_buffer = &info->iter.tr->max_buffer;
 7501
 7502	return ret;
 7503}
 7504
 7505#endif /* CONFIG_TRACER_SNAPSHOT */
 7506
 7507
 7508static const struct file_operations tracing_thresh_fops = {
 7509	.open		= tracing_open_generic,
 7510	.read		= tracing_thresh_read,
 7511	.write		= tracing_thresh_write,
 7512	.llseek		= generic_file_llseek,
 7513};
 7514
 7515#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
 7516static const struct file_operations tracing_max_lat_fops = {
 7517	.open		= tracing_open_generic,
 7518	.read		= tracing_max_lat_read,
 7519	.write		= tracing_max_lat_write,
 7520	.llseek		= generic_file_llseek,
 7521};
 7522#endif
 7523
 7524static const struct file_operations set_tracer_fops = {
 7525	.open		= tracing_open_generic,
 7526	.read		= tracing_set_trace_read,
 7527	.write		= tracing_set_trace_write,
 7528	.llseek		= generic_file_llseek,
 7529};
 7530
 7531static const struct file_operations tracing_pipe_fops = {
 7532	.open		= tracing_open_pipe,
 7533	.poll		= tracing_poll_pipe,
 7534	.read		= tracing_read_pipe,
 7535	.splice_read	= tracing_splice_read_pipe,
 7536	.release	= tracing_release_pipe,
 7537	.llseek		= no_llseek,
 7538};
 7539
 7540static const struct file_operations tracing_entries_fops = {
 7541	.open		= tracing_open_generic_tr,
 7542	.read		= tracing_entries_read,
 7543	.write		= tracing_entries_write,
 7544	.llseek		= generic_file_llseek,
 7545	.release	= tracing_release_generic_tr,
 7546};
 7547
 7548static const struct file_operations tracing_total_entries_fops = {
 7549	.open		= tracing_open_generic_tr,
 7550	.read		= tracing_total_entries_read,
 7551	.llseek		= generic_file_llseek,
 7552	.release	= tracing_release_generic_tr,
 7553};
 7554
 7555static const struct file_operations tracing_free_buffer_fops = {
 7556	.open		= tracing_open_generic_tr,
 7557	.write		= tracing_free_buffer_write,
 7558	.release	= tracing_free_buffer_release,
 7559};
 7560
 7561static const struct file_operations tracing_mark_fops = {
 7562	.open		= tracing_open_generic_tr,
 7563	.write		= tracing_mark_write,
 7564	.llseek		= generic_file_llseek,
 7565	.release	= tracing_release_generic_tr,
 7566};
 7567
 7568static const struct file_operations tracing_mark_raw_fops = {
 7569	.open		= tracing_open_generic_tr,
 7570	.write		= tracing_mark_raw_write,
 7571	.llseek		= generic_file_llseek,
 7572	.release	= tracing_release_generic_tr,
 7573};
 7574
 7575static const struct file_operations trace_clock_fops = {
 7576	.open		= tracing_clock_open,
 7577	.read		= seq_read,
 7578	.llseek		= seq_lseek,
 7579	.release	= tracing_single_release_tr,
 7580	.write		= tracing_clock_write,
 7581};
 7582
 7583static const struct file_operations trace_time_stamp_mode_fops = {
 7584	.open		= tracing_time_stamp_mode_open,
 7585	.read		= seq_read,
 7586	.llseek		= seq_lseek,
 7587	.release	= tracing_single_release_tr,
 7588};
 7589
 7590#ifdef CONFIG_TRACER_SNAPSHOT
 7591static const struct file_operations snapshot_fops = {
 7592	.open		= tracing_snapshot_open,
 7593	.read		= seq_read,
 7594	.write		= tracing_snapshot_write,
 7595	.llseek		= tracing_lseek,
 7596	.release	= tracing_snapshot_release,
 7597};
 7598
 7599static const struct file_operations snapshot_raw_fops = {
 7600	.open		= snapshot_raw_open,
 7601	.read		= tracing_buffers_read,
 7602	.release	= tracing_buffers_release,
 7603	.splice_read	= tracing_buffers_splice_read,
 7604	.llseek		= no_llseek,
 7605};
 7606
 7607#endif /* CONFIG_TRACER_SNAPSHOT */
 7608
 7609/*
 7610 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
 7611 * @filp: The active open file structure
 7612 * @ubuf: The userspace provided buffer to read value into
 7613 * @cnt: The maximum number of bytes to read
 7614 * @ppos: The current "file" position
 7615 *
 7616 * This function implements the write interface for a struct trace_min_max_param.
 7617 * The filp->private_data must point to a trace_min_max_param structure that
 7618 * defines where to write the value, the min and the max acceptable values,
 7619 * and a lock to protect the write.
 7620 */
 7621static ssize_t
 7622trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
 7623{
 7624	struct trace_min_max_param *param = filp->private_data;
 7625	u64 val;
 7626	int err;
 7627
 7628	if (!param)
 7629		return -EFAULT;
 7630
 7631	err = kstrtoull_from_user(ubuf, cnt, 10, &val);
 7632	if (err)
 7633		return err;
 7634
 7635	if (param->lock)
 7636		mutex_lock(param->lock);
 7637
 7638	if (param->min && val < *param->min)
 7639		err = -EINVAL;
 7640
 7641	if (param->max && val > *param->max)
 7642		err = -EINVAL;
 7643
 7644	if (!err)
 7645		*param->val = val;
 7646
 7647	if (param->lock)
 7648		mutex_unlock(param->lock);
 7649
 7650	if (err)
 7651		return err;
 7652
 7653	return cnt;
 7654}
 7655
 7656/*
 7657 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
 7658 * @filp: The active open file structure
 7659 * @ubuf: The userspace provided buffer to read value into
 7660 * @cnt: The maximum number of bytes to read
 7661 * @ppos: The current "file" position
 7662 *
 7663 * This function implements the read interface for a struct trace_min_max_param.
 7664 * The filp->private_data must point to a trace_min_max_param struct with valid
 7665 * data.
 7666 */
 7667static ssize_t
 7668trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 7669{
 7670	struct trace_min_max_param *param = filp->private_data;
 7671	char buf[U64_STR_SIZE];
 7672	int len;
 7673	u64 val;
 7674
 7675	if (!param)
 7676		return -EFAULT;
 7677
 7678	val = *param->val;
 7679
 7680	if (cnt > sizeof(buf))
 7681		cnt = sizeof(buf);
 7682
 7683	len = snprintf(buf, sizeof(buf), "%llu\n", val);
 7684
 7685	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
 7686}
 7687
 7688const struct file_operations trace_min_max_fops = {
 7689	.open		= tracing_open_generic,
 7690	.read		= trace_min_max_read,
 7691	.write		= trace_min_max_write,
 7692};
 7693
 7694#define TRACING_LOG_ERRS_MAX	8
 7695#define TRACING_LOG_LOC_MAX	128
 7696
 7697#define CMD_PREFIX "  Command: "
 7698
 7699struct err_info {
 7700	const char	**errs;	/* ptr to loc-specific array of err strings */
 7701	u8		type;	/* index into errs -> specific err string */
 7702	u8		pos;	/* MAX_FILTER_STR_VAL = 256 */
 7703	u64		ts;
 7704};
 7705
 7706struct tracing_log_err {
 7707	struct list_head	list;
 7708	struct err_info		info;
 7709	char			loc[TRACING_LOG_LOC_MAX]; /* err location */
 7710	char			cmd[MAX_FILTER_STR_VAL]; /* what caused err */
 7711};
 7712
 7713static DEFINE_MUTEX(tracing_err_log_lock);
 7714
 7715static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
 7716{
 7717	struct tracing_log_err *err;
 7718
 7719	if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
 7720		err = kzalloc(sizeof(*err), GFP_KERNEL);
 7721		if (!err)
 7722			err = ERR_PTR(-ENOMEM);
 7723		tr->n_err_log_entries++;
 7724
 7725		return err;
 7726	}
 7727
 7728	err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
 7729	list_del(&err->list);
 7730
 7731	return err;
 7732}
 7733
 7734/**
 7735 * err_pos - find the position of a string within a command for error careting
 7736 * @cmd: The tracing command that caused the error
 7737 * @str: The string to position the caret at within @cmd
 7738 *
 7739 * Finds the position of the first occurrence of @str within @cmd.  The
 7740 * return value can be passed to tracing_log_err() for caret placement
 7741 * within @cmd.
 7742 *
 7743 * Returns the index within @cmd of the first occurrence of @str or 0
 7744 * if @str was not found.
 7745 */
 7746unsigned int err_pos(char *cmd, const char *str)
 7747{
 7748	char *found;
 7749
 7750	if (WARN_ON(!strlen(cmd)))
 7751		return 0;
 7752
 7753	found = strstr(cmd, str);
 7754	if (found)
 7755		return found - cmd;
 7756
 7757	return 0;
 7758}
 7759
 7760/**
 7761 * tracing_log_err - write an error to the tracing error log
 7762 * @tr: The associated trace array for the error (NULL for top level array)
 7763 * @loc: A string describing where the error occurred
 7764 * @cmd: The tracing command that caused the error
 7765 * @errs: The array of loc-specific static error strings
 7766 * @type: The index into errs[], which produces the specific static err string
 7767 * @pos: The position the caret should be placed in the cmd
 7768 *
 7769 * Writes an error into tracing/error_log of the form:
 7770 *
 7771 * <loc>: error: <text>
 7772 *   Command: <cmd>
 7773 *              ^
 7774 *
 7775 * tracing/error_log is a small log file containing the last
 7776 * TRACING_LOG_ERRS_MAX errors (8).  Memory for errors isn't allocated
 7777 * unless there has been a tracing error, and the error log can be
 7778 * cleared and have its memory freed by writing the empty string in
 7779 * truncation mode to it i.e. echo > tracing/error_log.
 7780 *
 7781 * NOTE: the @errs array along with the @type param are used to
 7782 * produce a static error string - this string is not copied and saved
 7783 * when the error is logged - only a pointer to it is saved.  See
 7784 * existing callers for examples of how static strings are typically
 7785 * defined for use with tracing_log_err().
 7786 */
 7787void tracing_log_err(struct trace_array *tr,
 7788		     const char *loc, const char *cmd,
 7789		     const char **errs, u8 type, u8 pos)
 7790{
 7791	struct tracing_log_err *err;
 7792
 7793	if (!tr)
 7794		tr = &global_trace;
 7795
 7796	mutex_lock(&tracing_err_log_lock);
 7797	err = get_tracing_log_err(tr);
 7798	if (PTR_ERR(err) == -ENOMEM) {
 7799		mutex_unlock(&tracing_err_log_lock);
 7800		return;
 7801	}
 7802
 7803	snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
 7804	snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
 7805
 7806	err->info.errs = errs;
 7807	err->info.type = type;
 7808	err->info.pos = pos;
 7809	err->info.ts = local_clock();
 7810
 7811	list_add_tail(&err->list, &tr->err_log);
 7812	mutex_unlock(&tracing_err_log_lock);
 7813}
 7814
 7815static void clear_tracing_err_log(struct trace_array *tr)
 7816{
 7817	struct tracing_log_err *err, *next;
 7818
 7819	mutex_lock(&tracing_err_log_lock);
 7820	list_for_each_entry_safe(err, next, &tr->err_log, list) {
 7821		list_del(&err->list);
 7822		kfree(err);
 7823	}
 7824
 7825	tr->n_err_log_entries = 0;
 7826	mutex_unlock(&tracing_err_log_lock);
 7827}
 7828
 7829static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
 7830{
 7831	struct trace_array *tr = m->private;
 7832
 7833	mutex_lock(&tracing_err_log_lock);
 7834
 7835	return seq_list_start(&tr->err_log, *pos);
 7836}
 7837
 7838static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
 7839{
 7840	struct trace_array *tr = m->private;
 7841
 7842	return seq_list_next(v, &tr->err_log, pos);
 7843}
 7844
 7845static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
 7846{
 7847	mutex_unlock(&tracing_err_log_lock);
 7848}
 7849
 7850static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
 7851{
 7852	u8 i;
 7853
 7854	for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
 7855		seq_putc(m, ' ');
 7856	for (i = 0; i < pos; i++)
 7857		seq_putc(m, ' ');
 7858	seq_puts(m, "^\n");
 7859}
 7860
 7861static int tracing_err_log_seq_show(struct seq_file *m, void *v)
 7862{
 7863	struct tracing_log_err *err = v;
 7864
 7865	if (err) {
 7866		const char *err_text = err->info.errs[err->info.type];
 7867		u64 sec = err->info.ts;
 7868		u32 nsec;
 7869
 7870		nsec = do_div(sec, NSEC_PER_SEC);
 7871		seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
 7872			   err->loc, err_text);
 7873		seq_printf(m, "%s", err->cmd);
 7874		tracing_err_log_show_pos(m, err->info.pos);
 7875	}
 7876
 7877	return 0;
 7878}
 7879
 7880static const struct seq_operations tracing_err_log_seq_ops = {
 7881	.start  = tracing_err_log_seq_start,
 7882	.next   = tracing_err_log_seq_next,
 7883	.stop   = tracing_err_log_seq_stop,
 7884	.show   = tracing_err_log_seq_show
 7885};
 7886
 7887static int tracing_err_log_open(struct inode *inode, struct file *file)
 7888{
 7889	struct trace_array *tr = inode->i_private;
 7890	int ret = 0;
 7891
 7892	ret = tracing_check_open_get_tr(tr);
 7893	if (ret)
 7894		return ret;
 7895
 7896	/* If this file was opened for write, then erase contents */
 7897	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
 7898		clear_tracing_err_log(tr);
 7899
 7900	if (file->f_mode & FMODE_READ) {
 7901		ret = seq_open(file, &tracing_err_log_seq_ops);
 7902		if (!ret) {
 7903			struct seq_file *m = file->private_data;
 7904			m->private = tr;
 7905		} else {
 7906			trace_array_put(tr);
 7907		}
 7908	}
 7909	return ret;
 7910}
 7911
 7912static ssize_t tracing_err_log_write(struct file *file,
 7913				     const char __user *buffer,
 7914				     size_t count, loff_t *ppos)
 7915{
 7916	return count;
 7917}
 7918
 7919static int tracing_err_log_release(struct inode *inode, struct file *file)
 7920{
 7921	struct trace_array *tr = inode->i_private;
 7922
 7923	trace_array_put(tr);
 7924
 7925	if (file->f_mode & FMODE_READ)
 7926		seq_release(inode, file);
 7927
 7928	return 0;
 7929}
 7930
 7931static const struct file_operations tracing_err_log_fops = {
 7932	.open           = tracing_err_log_open,
 7933	.write		= tracing_err_log_write,
 7934	.read           = seq_read,
 7935	.llseek         = seq_lseek,
 7936	.release        = tracing_err_log_release,
 7937};
 7938
 7939static int tracing_buffers_open(struct inode *inode, struct file *filp)
 7940{
 7941	struct trace_array *tr = inode->i_private;
 7942	struct ftrace_buffer_info *info;
 7943	int ret;
 7944
 7945	ret = tracing_check_open_get_tr(tr);
 7946	if (ret)
 7947		return ret;
 
 
 7948
 7949	info = kvzalloc(sizeof(*info), GFP_KERNEL);
 7950	if (!info) {
 7951		trace_array_put(tr);
 7952		return -ENOMEM;
 7953	}
 7954
 7955	mutex_lock(&trace_types_lock);
 7956
 7957	info->iter.tr		= tr;
 7958	info->iter.cpu_file	= tracing_get_cpu(inode);
 7959	info->iter.trace	= tr->current_trace;
 7960	info->iter.array_buffer = &tr->array_buffer;
 7961	info->spare		= NULL;
 7962	/* Force reading ring buffer for first read */
 7963	info->read		= (unsigned int)-1;
 7964
 7965	filp->private_data = info;
 7966
 7967	tr->trace_ref++;
 7968
 7969	mutex_unlock(&trace_types_lock);
 7970
 7971	ret = nonseekable_open(inode, filp);
 7972	if (ret < 0)
 7973		trace_array_put(tr);
 7974
 7975	return ret;
 7976}
 7977
 7978static __poll_t
 7979tracing_buffers_poll(struct file *filp, poll_table *poll_table)
 7980{
 7981	struct ftrace_buffer_info *info = filp->private_data;
 7982	struct trace_iterator *iter = &info->iter;
 7983
 7984	return trace_poll(iter, filp, poll_table);
 7985}
 7986
 7987static ssize_t
 7988tracing_buffers_read(struct file *filp, char __user *ubuf,
 7989		     size_t count, loff_t *ppos)
 7990{
 7991	struct ftrace_buffer_info *info = filp->private_data;
 7992	struct trace_iterator *iter = &info->iter;
 7993	ssize_t ret = 0;
 7994	ssize_t size;
 7995
 7996	if (!count)
 7997		return 0;
 7998
 
 
 7999#ifdef CONFIG_TRACER_MAX_TRACE
 8000	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
 8001		return -EBUSY;
 
 
 8002#endif
 8003
 8004	if (!info->spare) {
 8005		info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
 8006							  iter->cpu_file);
 8007		if (IS_ERR(info->spare)) {
 8008			ret = PTR_ERR(info->spare);
 8009			info->spare = NULL;
 8010		} else {
 8011			info->spare_cpu = iter->cpu_file;
 8012		}
 8013	}
 8014	if (!info->spare)
 8015		return ret;
 8016
 8017	/* Do we have previous read data to read? */
 8018	if (info->read < PAGE_SIZE)
 8019		goto read;
 8020
 8021 again:
 8022	trace_access_lock(iter->cpu_file);
 8023	ret = ring_buffer_read_page(iter->array_buffer->buffer,
 8024				    &info->spare,
 8025				    count,
 8026				    iter->cpu_file, 0);
 8027	trace_access_unlock(iter->cpu_file);
 8028
 8029	if (ret < 0) {
 8030		if (trace_empty(iter)) {
 8031			if ((filp->f_flags & O_NONBLOCK))
 8032				return -EAGAIN;
 8033
 8034			ret = wait_on_pipe(iter, 0);
 8035			if (ret)
 8036				return ret;
 8037
 
 
 
 
 8038			goto again;
 8039		}
 8040		return 0;
 
 8041	}
 8042
 8043	info->read = 0;
 8044 read:
 8045	size = PAGE_SIZE - info->read;
 8046	if (size > count)
 8047		size = count;
 8048
 8049	ret = copy_to_user(ubuf, info->spare + info->read, size);
 8050	if (ret == size)
 8051		return -EFAULT;
 8052
 
 8053	size -= ret;
 8054
 8055	*ppos += size;
 8056	info->read += size;
 8057
 
 
 
 8058	return size;
 8059}
 8060
 8061static int tracing_buffers_release(struct inode *inode, struct file *file)
 8062{
 8063	struct ftrace_buffer_info *info = file->private_data;
 8064	struct trace_iterator *iter = &info->iter;
 8065
 8066	mutex_lock(&trace_types_lock);
 8067
 8068	iter->tr->trace_ref--;
 8069
 8070	__trace_array_put(iter->tr);
 8071
 8072	if (info->spare)
 8073		ring_buffer_free_read_page(iter->array_buffer->buffer,
 8074					   info->spare_cpu, info->spare);
 8075	kvfree(info);
 8076
 8077	mutex_unlock(&trace_types_lock);
 8078
 8079	return 0;
 8080}
 8081
 8082struct buffer_ref {
 8083	struct trace_buffer	*buffer;
 8084	void			*page;
 8085	int			cpu;
 8086	refcount_t		refcount;
 8087};
 8088
 8089static void buffer_ref_release(struct buffer_ref *ref)
 8090{
 8091	if (!refcount_dec_and_test(&ref->refcount))
 8092		return;
 8093	ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
 8094	kfree(ref);
 8095}
 8096
 8097static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
 8098				    struct pipe_buffer *buf)
 8099{
 8100	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 8101
 8102	buffer_ref_release(ref);
 
 
 
 
 8103	buf->private = 0;
 8104}
 8105
 8106static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
 8107				struct pipe_buffer *buf)
 8108{
 8109	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
 8110
 8111	if (refcount_read(&ref->refcount) > INT_MAX/2)
 8112		return false;
 8113
 8114	refcount_inc(&ref->refcount);
 8115	return true;
 8116}
 8117
 8118/* Pipe buffer operations for a buffer. */
 8119static const struct pipe_buf_operations buffer_pipe_buf_ops = {
 
 
 8120	.release		= buffer_pipe_buf_release,
 
 8121	.get			= buffer_pipe_buf_get,
 8122};
 8123
 8124/*
 8125 * Callback from splice_to_pipe(), if we need to release some pages
 8126 * at the end of the spd in case we error'ed out in filling the pipe.
 8127 */
 8128static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
 8129{
 8130	struct buffer_ref *ref =
 8131		(struct buffer_ref *)spd->partial[i].private;
 8132
 8133	buffer_ref_release(ref);
 
 
 
 
 8134	spd->partial[i].private = 0;
 8135}
 8136
 8137static ssize_t
 8138tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 8139			    struct pipe_inode_info *pipe, size_t len,
 8140			    unsigned int flags)
 8141{
 8142	struct ftrace_buffer_info *info = file->private_data;
 8143	struct trace_iterator *iter = &info->iter;
 8144	struct partial_page partial_def[PIPE_DEF_BUFFERS];
 8145	struct page *pages_def[PIPE_DEF_BUFFERS];
 8146	struct splice_pipe_desc spd = {
 8147		.pages		= pages_def,
 8148		.partial	= partial_def,
 8149		.nr_pages_max	= PIPE_DEF_BUFFERS,
 
 8150		.ops		= &buffer_pipe_buf_ops,
 8151		.spd_release	= buffer_spd_release,
 8152	};
 8153	struct buffer_ref *ref;
 8154	int entries, i;
 8155	ssize_t ret = 0;
 
 
 8156
 8157#ifdef CONFIG_TRACER_MAX_TRACE
 8158	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
 8159		return -EBUSY;
 
 
 8160#endif
 8161
 8162	if (*ppos & (PAGE_SIZE - 1))
 8163		return -EINVAL;
 
 
 
 
 
 
 
 8164
 8165	if (len & (PAGE_SIZE - 1)) {
 8166		if (len < PAGE_SIZE)
 8167			return -EINVAL;
 
 
 8168		len &= PAGE_MASK;
 8169	}
 8170
 8171	if (splice_grow_spd(pipe, &spd))
 8172		return -ENOMEM;
 8173
 8174 again:
 8175	trace_access_lock(iter->cpu_file);
 8176	entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 8177
 8178	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
 8179		struct page *page;
 8180		int r;
 8181
 8182		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
 8183		if (!ref) {
 8184			ret = -ENOMEM;
 8185			break;
 8186		}
 8187
 8188		refcount_set(&ref->refcount, 1);
 8189		ref->buffer = iter->array_buffer->buffer;
 8190		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
 8191		if (IS_ERR(ref->page)) {
 8192			ret = PTR_ERR(ref->page);
 8193			ref->page = NULL;
 8194			kfree(ref);
 8195			break;
 8196		}
 8197		ref->cpu = iter->cpu_file;
 8198
 8199		r = ring_buffer_read_page(ref->buffer, &ref->page,
 8200					  len, iter->cpu_file, 1);
 8201		if (r < 0) {
 8202			ring_buffer_free_read_page(ref->buffer, ref->cpu,
 8203						   ref->page);
 8204			kfree(ref);
 8205			break;
 8206		}
 8207
 
 
 
 
 
 
 
 
 8208		page = virt_to_page(ref->page);
 8209
 8210		spd.pages[i] = page;
 8211		spd.partial[i].len = PAGE_SIZE;
 8212		spd.partial[i].offset = 0;
 8213		spd.partial[i].private = (unsigned long)ref;
 8214		spd.nr_pages++;
 8215		*ppos += PAGE_SIZE;
 8216
 8217		entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
 8218	}
 8219
 8220	trace_access_unlock(iter->cpu_file);
 8221	spd.nr_pages = i;
 8222
 8223	/* did we read anything? */
 8224	if (!spd.nr_pages) {
 8225		if (ret)
 
 8226			goto out;
 8227
 8228		ret = -EAGAIN;
 8229		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
 
 
 
 8230			goto out;
 8231
 8232		ret = wait_on_pipe(iter, iter->tr->buffer_percent);
 8233		if (ret)
 8234			goto out;
 8235
 8236		goto again;
 8237	}
 8238
 8239	ret = splice_to_pipe(pipe, &spd);
 
 8240out:
 8241	splice_shrink_spd(&spd);
 8242
 8243	return ret;
 8244}
 8245
 8246static const struct file_operations tracing_buffers_fops = {
 8247	.open		= tracing_buffers_open,
 8248	.read		= tracing_buffers_read,
 8249	.poll		= tracing_buffers_poll,
 8250	.release	= tracing_buffers_release,
 8251	.splice_read	= tracing_buffers_splice_read,
 8252	.llseek		= no_llseek,
 8253};
 8254
 8255static ssize_t
 8256tracing_stats_read(struct file *filp, char __user *ubuf,
 8257		   size_t count, loff_t *ppos)
 8258{
 8259	struct inode *inode = file_inode(filp);
 8260	struct trace_array *tr = inode->i_private;
 8261	struct array_buffer *trace_buf = &tr->array_buffer;
 8262	int cpu = tracing_get_cpu(inode);
 8263	struct trace_seq *s;
 8264	unsigned long cnt;
 8265	unsigned long long t;
 8266	unsigned long usec_rem;
 8267
 8268	s = kmalloc(sizeof(*s), GFP_KERNEL);
 8269	if (!s)
 8270		return -ENOMEM;
 8271
 8272	trace_seq_init(s);
 8273
 8274	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
 8275	trace_seq_printf(s, "entries: %ld\n", cnt);
 8276
 8277	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
 8278	trace_seq_printf(s, "overrun: %ld\n", cnt);
 8279
 8280	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
 8281	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
 8282
 8283	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
 8284	trace_seq_printf(s, "bytes: %ld\n", cnt);
 8285
 8286	if (trace_clocks[tr->clock_id].in_ns) {
 8287		/* local or global for trace_clock */
 8288		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
 8289		usec_rem = do_div(t, USEC_PER_SEC);
 8290		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
 8291								t, usec_rem);
 8292
 8293		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
 8294		usec_rem = do_div(t, USEC_PER_SEC);
 8295		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
 8296	} else {
 8297		/* counter or tsc mode for trace_clock */
 8298		trace_seq_printf(s, "oldest event ts: %llu\n",
 8299				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
 8300
 8301		trace_seq_printf(s, "now ts: %llu\n",
 8302				ring_buffer_time_stamp(trace_buf->buffer));
 8303	}
 8304
 8305	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
 8306	trace_seq_printf(s, "dropped events: %ld\n", cnt);
 8307
 8308	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
 8309	trace_seq_printf(s, "read events: %ld\n", cnt);
 8310
 8311	count = simple_read_from_buffer(ubuf, count, ppos,
 8312					s->buffer, trace_seq_used(s));
 8313
 8314	kfree(s);
 8315
 8316	return count;
 8317}
 8318
 8319static const struct file_operations tracing_stats_fops = {
 8320	.open		= tracing_open_generic_tr,
 8321	.read		= tracing_stats_read,
 8322	.llseek		= generic_file_llseek,
 8323	.release	= tracing_release_generic_tr,
 8324};
 8325
 8326#ifdef CONFIG_DYNAMIC_FTRACE
 8327
 
 
 
 
 
 8328static ssize_t
 8329tracing_read_dyn_info(struct file *filp, char __user *ubuf,
 8330		  size_t cnt, loff_t *ppos)
 8331{
 8332	ssize_t ret;
 8333	char *buf;
 
 
 
 8334	int r;
 8335
 8336	/* 256 should be plenty to hold the amount needed */
 8337	buf = kmalloc(256, GFP_KERNEL);
 8338	if (!buf)
 8339		return -ENOMEM;
 
 
 
 8340
 8341	r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
 8342		      ftrace_update_tot_cnt,
 8343		      ftrace_number_of_pages,
 8344		      ftrace_number_of_groups);
 8345
 8346	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 8347	kfree(buf);
 8348	return ret;
 8349}
 8350
 8351static const struct file_operations tracing_dyn_info_fops = {
 8352	.open		= tracing_open_generic,
 8353	.read		= tracing_read_dyn_info,
 8354	.llseek		= generic_file_llseek,
 8355};
 8356#endif /* CONFIG_DYNAMIC_FTRACE */
 8357
 8358#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
 8359static void
 8360ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
 8361		struct trace_array *tr, struct ftrace_probe_ops *ops,
 8362		void *data)
 8363{
 8364	tracing_snapshot_instance(tr);
 8365}
 8366
 8367static void
 8368ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
 8369		      struct trace_array *tr, struct ftrace_probe_ops *ops,
 8370		      void *data)
 8371{
 8372	struct ftrace_func_mapper *mapper = data;
 8373	long *count = NULL;
 8374
 8375	if (mapper)
 8376		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 8377
 8378	if (count) {
 8379
 8380		if (*count <= 0)
 8381			return;
 8382
 
 8383		(*count)--;
 8384	}
 8385
 8386	tracing_snapshot_instance(tr);
 8387}
 8388
 8389static int
 8390ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
 8391		      struct ftrace_probe_ops *ops, void *data)
 8392{
 8393	struct ftrace_func_mapper *mapper = data;
 8394	long *count = NULL;
 8395
 8396	seq_printf(m, "%ps:", (void *)ip);
 8397
 8398	seq_puts(m, "snapshot");
 8399
 8400	if (mapper)
 8401		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
 8402
 8403	if (count)
 8404		seq_printf(m, ":count=%ld\n", *count);
 8405	else
 8406		seq_puts(m, ":unlimited\n");
 8407
 8408	return 0;
 8409}
 8410
 8411static int
 8412ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
 8413		     unsigned long ip, void *init_data, void **data)
 8414{
 8415	struct ftrace_func_mapper *mapper = *data;
 8416
 8417	if (!mapper) {
 8418		mapper = allocate_ftrace_func_mapper();
 8419		if (!mapper)
 8420			return -ENOMEM;
 8421		*data = mapper;
 8422	}
 8423
 8424	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
 8425}
 8426
 8427static void
 8428ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
 8429		     unsigned long ip, void *data)
 8430{
 8431	struct ftrace_func_mapper *mapper = data;
 8432
 8433	if (!ip) {
 8434		if (!mapper)
 8435			return;
 8436		free_ftrace_func_mapper(mapper, NULL);
 8437		return;
 8438	}
 8439
 8440	ftrace_func_mapper_remove_ip(mapper, ip);
 8441}
 8442
 8443static struct ftrace_probe_ops snapshot_probe_ops = {
 8444	.func			= ftrace_snapshot,
 8445	.print			= ftrace_snapshot_print,
 8446};
 8447
 8448static struct ftrace_probe_ops snapshot_count_probe_ops = {
 8449	.func			= ftrace_count_snapshot,
 8450	.print			= ftrace_snapshot_print,
 8451	.init			= ftrace_snapshot_init,
 8452	.free			= ftrace_snapshot_free,
 8453};
 8454
 8455static int
 8456ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
 8457			       char *glob, char *cmd, char *param, int enable)
 8458{
 8459	struct ftrace_probe_ops *ops;
 8460	void *count = (void *)-1;
 8461	char *number;
 8462	int ret;
 8463
 8464	if (!tr)
 8465		return -ENODEV;
 8466
 8467	/* hash funcs only work with set_ftrace_filter */
 8468	if (!enable)
 8469		return -EINVAL;
 8470
 8471	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
 8472
 8473	if (glob[0] == '!')
 8474		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
 
 
 8475
 8476	if (!param)
 8477		goto out_reg;
 8478
 8479	number = strsep(&param, ":");
 8480
 8481	if (!strlen(number))
 8482		goto out_reg;
 8483
 8484	/*
 8485	 * We use the callback data field (which is a pointer)
 8486	 * as our counter.
 8487	 */
 8488	ret = kstrtoul(number, 0, (unsigned long *)&count);
 8489	if (ret)
 8490		return ret;
 8491
 8492 out_reg:
 8493	ret = tracing_alloc_snapshot_instance(tr);
 8494	if (ret < 0)
 8495		goto out;
 8496
 8497	ret = register_ftrace_function_probe(glob, tr, ops, count);
 
 8498
 8499 out:
 8500	return ret < 0 ? ret : 0;
 8501}
 8502
 8503static struct ftrace_func_command ftrace_snapshot_cmd = {
 8504	.name			= "snapshot",
 8505	.func			= ftrace_trace_snapshot_callback,
 8506};
 8507
 8508static __init int register_snapshot_cmd(void)
 8509{
 8510	return register_ftrace_command(&ftrace_snapshot_cmd);
 8511}
 8512#else
 8513static inline __init int register_snapshot_cmd(void) { return 0; }
 8514#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
 8515
 8516static struct dentry *tracing_get_dentry(struct trace_array *tr)
 8517{
 8518	if (WARN_ON(!tr->dir))
 8519		return ERR_PTR(-ENODEV);
 
 
 
 8520
 8521	/* Top directory uses NULL as the parent */
 8522	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
 8523		return NULL;
 
 
 
 8524
 8525	/* All sub buffers have a descriptor */
 8526	return tr->dir;
 8527}
 8528
 
 
 
 
 
 8529static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
 8530{
 8531	struct dentry *d_tracer;
 8532
 8533	if (tr->percpu_dir)
 8534		return tr->percpu_dir;
 8535
 8536	d_tracer = tracing_get_dentry(tr);
 8537	if (IS_ERR(d_tracer))
 8538		return NULL;
 8539
 8540	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
 8541
 8542	MEM_FAIL(!tr->percpu_dir,
 8543		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
 8544
 8545	return tr->percpu_dir;
 8546}
 8547
 8548static struct dentry *
 8549trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
 8550		      void *data, long cpu, const struct file_operations *fops)
 8551{
 8552	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
 8553
 8554	if (ret) /* See tracing_get_cpu() */
 8555		d_inode(ret)->i_cdev = (void *)(cpu + 1);
 8556	return ret;
 8557}
 8558
 8559static void
 8560tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
 8561{
 8562	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
 8563	struct dentry *d_cpu;
 8564	char cpu_dir[30]; /* 30 characters should be more than enough */
 8565
 8566	if (!d_percpu)
 8567		return;
 8568
 8569	snprintf(cpu_dir, 30, "cpu%ld", cpu);
 8570	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
 8571	if (!d_cpu) {
 8572		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
 8573		return;
 8574	}
 8575
 8576	/* per cpu trace_pipe */
 8577	trace_create_cpu_file("trace_pipe", 0444, d_cpu,
 8578				tr, cpu, &tracing_pipe_fops);
 8579
 8580	/* per cpu trace */
 8581	trace_create_cpu_file("trace", 0644, d_cpu,
 8582				tr, cpu, &tracing_fops);
 8583
 8584	trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
 8585				tr, cpu, &tracing_buffers_fops);
 8586
 8587	trace_create_cpu_file("stats", 0444, d_cpu,
 8588				tr, cpu, &tracing_stats_fops);
 8589
 8590	trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
 8591				tr, cpu, &tracing_entries_fops);
 8592
 8593#ifdef CONFIG_TRACER_SNAPSHOT
 8594	trace_create_cpu_file("snapshot", 0644, d_cpu,
 8595				tr, cpu, &snapshot_fops);
 8596
 8597	trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
 8598				tr, cpu, &snapshot_raw_fops);
 8599#endif
 8600}
 8601
 8602#ifdef CONFIG_FTRACE_SELFTEST
 8603/* Let selftest have access to static functions in this file */
 8604#include "trace_selftest.c"
 8605#endif
 8606
 
 
 
 
 
 
 
 8607static ssize_t
 8608trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
 8609			loff_t *ppos)
 8610{
 8611	struct trace_option_dentry *topt = filp->private_data;
 8612	char *buf;
 8613
 8614	if (topt->flags->val & topt->opt->bit)
 8615		buf = "1\n";
 8616	else
 8617		buf = "0\n";
 8618
 8619	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
 8620}
 8621
 8622static ssize_t
 8623trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
 8624			 loff_t *ppos)
 8625{
 8626	struct trace_option_dentry *topt = filp->private_data;
 8627	unsigned long val;
 8628	int ret;
 8629
 8630	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 8631	if (ret)
 8632		return ret;
 8633
 8634	if (val != 0 && val != 1)
 8635		return -EINVAL;
 8636
 8637	if (!!(topt->flags->val & topt->opt->bit) != val) {
 8638		mutex_lock(&trace_types_lock);
 8639		ret = __set_tracer_option(topt->tr, topt->flags,
 8640					  topt->opt, !val);
 8641		mutex_unlock(&trace_types_lock);
 8642		if (ret)
 8643			return ret;
 8644	}
 8645
 8646	*ppos += cnt;
 8647
 8648	return cnt;
 8649}
 8650
 8651
 8652static const struct file_operations trace_options_fops = {
 8653	.open = tracing_open_generic,
 8654	.read = trace_options_read,
 8655	.write = trace_options_write,
 8656	.llseek	= generic_file_llseek,
 8657};
 8658
 8659/*
 8660 * In order to pass in both the trace_array descriptor as well as the index
 8661 * to the flag that the trace option file represents, the trace_array
 8662 * has a character array of trace_flags_index[], which holds the index
 8663 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
 8664 * The address of this character array is passed to the flag option file
 8665 * read/write callbacks.
 8666 *
 8667 * In order to extract both the index and the trace_array descriptor,
 8668 * get_tr_index() uses the following algorithm.
 8669 *
 8670 *   idx = *ptr;
 8671 *
 8672 * As the pointer itself contains the address of the index (remember
 8673 * index[1] == 1).
 8674 *
 8675 * Then to get the trace_array descriptor, by subtracting that index
 8676 * from the ptr, we get to the start of the index itself.
 8677 *
 8678 *   ptr - idx == &index[0]
 8679 *
 8680 * Then a simple container_of() from that pointer gets us to the
 8681 * trace_array descriptor.
 8682 */
 8683static void get_tr_index(void *data, struct trace_array **ptr,
 8684			 unsigned int *pindex)
 8685{
 8686	*pindex = *(unsigned char *)data;
 8687
 8688	*ptr = container_of(data - *pindex, struct trace_array,
 8689			    trace_flags_index);
 8690}
 8691
 8692static ssize_t
 8693trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
 8694			loff_t *ppos)
 8695{
 8696	void *tr_index = filp->private_data;
 8697	struct trace_array *tr;
 8698	unsigned int index;
 8699	char *buf;
 8700
 8701	get_tr_index(tr_index, &tr, &index);
 8702
 8703	if (tr->trace_flags & (1 << index))
 8704		buf = "1\n";
 8705	else
 8706		buf = "0\n";
 8707
 8708	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
 8709}
 8710
 8711static ssize_t
 8712trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
 8713			 loff_t *ppos)
 8714{
 8715	void *tr_index = filp->private_data;
 8716	struct trace_array *tr;
 8717	unsigned int index;
 8718	unsigned long val;
 8719	int ret;
 8720
 8721	get_tr_index(tr_index, &tr, &index);
 8722
 8723	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 8724	if (ret)
 8725		return ret;
 8726
 8727	if (val != 0 && val != 1)
 8728		return -EINVAL;
 8729
 8730	mutex_lock(&event_mutex);
 8731	mutex_lock(&trace_types_lock);
 8732	ret = set_tracer_flag(tr, 1 << index, val);
 8733	mutex_unlock(&trace_types_lock);
 8734	mutex_unlock(&event_mutex);
 8735
 8736	if (ret < 0)
 8737		return ret;
 8738
 8739	*ppos += cnt;
 8740
 8741	return cnt;
 8742}
 8743
 8744static const struct file_operations trace_options_core_fops = {
 8745	.open = tracing_open_generic,
 8746	.read = trace_options_core_read,
 8747	.write = trace_options_core_write,
 8748	.llseek = generic_file_llseek,
 8749};
 8750
 8751struct dentry *trace_create_file(const char *name,
 8752				 umode_t mode,
 8753				 struct dentry *parent,
 8754				 void *data,
 8755				 const struct file_operations *fops)
 8756{
 8757	struct dentry *ret;
 8758
 8759	ret = tracefs_create_file(name, mode, parent, data, fops);
 8760	if (!ret)
 8761		pr_warn("Could not create tracefs '%s' entry\n", name);
 8762
 8763	return ret;
 8764}
 8765
 8766
 8767static struct dentry *trace_options_init_dentry(struct trace_array *tr)
 8768{
 8769	struct dentry *d_tracer;
 8770
 8771	if (tr->options)
 8772		return tr->options;
 8773
 8774	d_tracer = tracing_get_dentry(tr);
 8775	if (IS_ERR(d_tracer))
 8776		return NULL;
 8777
 8778	tr->options = tracefs_create_dir("options", d_tracer);
 8779	if (!tr->options) {
 8780		pr_warn("Could not create tracefs directory 'options'\n");
 8781		return NULL;
 8782	}
 8783
 8784	return tr->options;
 8785}
 8786
 8787static void
 8788create_trace_option_file(struct trace_array *tr,
 8789			 struct trace_option_dentry *topt,
 8790			 struct tracer_flags *flags,
 8791			 struct tracer_opt *opt)
 8792{
 8793	struct dentry *t_options;
 8794
 8795	t_options = trace_options_init_dentry(tr);
 8796	if (!t_options)
 8797		return;
 8798
 8799	topt->flags = flags;
 8800	topt->opt = opt;
 8801	topt->tr = tr;
 8802
 8803	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
 8804				    &trace_options_fops);
 8805
 8806}
 8807
 8808static void
 8809create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
 8810{
 8811	struct trace_option_dentry *topts;
 8812	struct trace_options *tr_topts;
 8813	struct tracer_flags *flags;
 8814	struct tracer_opt *opts;
 8815	int cnt;
 8816	int i;
 8817
 8818	if (!tracer)
 8819		return;
 8820
 8821	flags = tracer->flags;
 8822
 8823	if (!flags || !flags->opts)
 8824		return;
 8825
 8826	/*
 8827	 * If this is an instance, only create flags for tracers
 8828	 * the instance may have.
 8829	 */
 8830	if (!trace_ok_for_array(tracer, tr))
 8831		return;
 8832
 8833	for (i = 0; i < tr->nr_topts; i++) {
 8834		/* Make sure there's no duplicate flags. */
 8835		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
 8836			return;
 8837	}
 8838
 8839	opts = flags->opts;
 8840
 8841	for (cnt = 0; opts[cnt].name; cnt++)
 8842		;
 8843
 8844	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
 8845	if (!topts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 8846		return;
 8847
 8848	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
 8849			    GFP_KERNEL);
 8850	if (!tr_topts) {
 8851		kfree(topts);
 8852		return;
 8853	}
 8854
 8855	tr->topts = tr_topts;
 8856	tr->topts[tr->nr_topts].tracer = tracer;
 8857	tr->topts[tr->nr_topts].topts = topts;
 8858	tr->nr_topts++;
 8859
 8860	for (cnt = 0; opts[cnt].name; cnt++) {
 8861		create_trace_option_file(tr, &topts[cnt], flags,
 8862					 &opts[cnt]);
 8863		MEM_FAIL(topts[cnt].entry == NULL,
 8864			  "Failed to create trace option: %s",
 8865			  opts[cnt].name);
 8866	}
 8867}
 8868
 8869static struct dentry *
 8870create_trace_option_core_file(struct trace_array *tr,
 8871			      const char *option, long index)
 8872{
 8873	struct dentry *t_options;
 8874
 8875	t_options = trace_options_init_dentry(tr);
 8876	if (!t_options)
 8877		return NULL;
 8878
 8879	return trace_create_file(option, 0644, t_options,
 8880				 (void *)&tr->trace_flags_index[index],
 8881				 &trace_options_core_fops);
 8882}
 8883
 8884static void create_trace_options_dir(struct trace_array *tr)
 8885{
 8886	struct dentry *t_options;
 8887	bool top_level = tr == &global_trace;
 8888	int i;
 8889
 8890	t_options = trace_options_init_dentry(tr);
 8891	if (!t_options)
 8892		return;
 8893
 8894	for (i = 0; trace_options[i]; i++) {
 8895		if (top_level ||
 8896		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
 8897			create_trace_option_core_file(tr, trace_options[i], i);
 8898	}
 8899}
 8900
 8901static ssize_t
 8902rb_simple_read(struct file *filp, char __user *ubuf,
 8903	       size_t cnt, loff_t *ppos)
 8904{
 8905	struct trace_array *tr = filp->private_data;
 8906	char buf[64];
 8907	int r;
 8908
 8909	r = tracer_tracing_is_on(tr);
 8910	r = sprintf(buf, "%d\n", r);
 8911
 8912	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 8913}
 8914
 8915static ssize_t
 8916rb_simple_write(struct file *filp, const char __user *ubuf,
 8917		size_t cnt, loff_t *ppos)
 8918{
 8919	struct trace_array *tr = filp->private_data;
 8920	struct trace_buffer *buffer = tr->array_buffer.buffer;
 8921	unsigned long val;
 8922	int ret;
 8923
 8924	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 8925	if (ret)
 8926		return ret;
 8927
 8928	if (buffer) {
 8929		mutex_lock(&trace_types_lock);
 8930		if (!!val == tracer_tracing_is_on(tr)) {
 8931			val = 0; /* do nothing */
 8932		} else if (val) {
 8933			tracer_tracing_on(tr);
 8934			if (tr->current_trace->start)
 8935				tr->current_trace->start(tr);
 8936		} else {
 8937			tracer_tracing_off(tr);
 8938			if (tr->current_trace->stop)
 8939				tr->current_trace->stop(tr);
 8940		}
 8941		mutex_unlock(&trace_types_lock);
 8942	}
 8943
 8944	(*ppos)++;
 8945
 8946	return cnt;
 8947}
 8948
 8949static const struct file_operations rb_simple_fops = {
 8950	.open		= tracing_open_generic_tr,
 8951	.read		= rb_simple_read,
 8952	.write		= rb_simple_write,
 8953	.release	= tracing_release_generic_tr,
 8954	.llseek		= default_llseek,
 8955};
 8956
 8957static ssize_t
 8958buffer_percent_read(struct file *filp, char __user *ubuf,
 8959		    size_t cnt, loff_t *ppos)
 8960{
 8961	struct trace_array *tr = filp->private_data;
 8962	char buf[64];
 8963	int r;
 8964
 8965	r = tr->buffer_percent;
 8966	r = sprintf(buf, "%d\n", r);
 8967
 8968	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 8969}
 8970
 8971static ssize_t
 8972buffer_percent_write(struct file *filp, const char __user *ubuf,
 8973		     size_t cnt, loff_t *ppos)
 8974{
 8975	struct trace_array *tr = filp->private_data;
 8976	unsigned long val;
 8977	int ret;
 8978
 8979	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 8980	if (ret)
 8981		return ret;
 8982
 8983	if (val > 100)
 8984		return -EINVAL;
 8985
 8986	if (!val)
 8987		val = 1;
 8988
 8989	tr->buffer_percent = val;
 8990
 8991	(*ppos)++;
 8992
 8993	return cnt;
 8994}
 8995
 8996static const struct file_operations buffer_percent_fops = {
 8997	.open		= tracing_open_generic_tr,
 8998	.read		= buffer_percent_read,
 8999	.write		= buffer_percent_write,
 9000	.release	= tracing_release_generic_tr,
 9001	.llseek		= default_llseek,
 9002};
 9003
 9004static struct dentry *trace_instance_dir;
 9005
 9006static void
 9007init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 9008
 9009static int
 9010allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
 9011{
 9012	enum ring_buffer_flags rb_flags;
 9013
 9014	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
 9015
 9016	buf->tr = tr;
 9017
 9018	buf->buffer = ring_buffer_alloc(size, rb_flags);
 9019	if (!buf->buffer)
 9020		return -ENOMEM;
 9021
 9022	buf->data = alloc_percpu(struct trace_array_cpu);
 9023	if (!buf->data) {
 9024		ring_buffer_free(buf->buffer);
 9025		buf->buffer = NULL;
 9026		return -ENOMEM;
 9027	}
 9028
 9029	/* Allocate the first page for all buffers */
 9030	set_buffer_entries(&tr->array_buffer,
 9031			   ring_buffer_size(tr->array_buffer.buffer, 0));
 9032
 9033	return 0;
 9034}
 9035
 9036static int allocate_trace_buffers(struct trace_array *tr, int size)
 9037{
 9038	int ret;
 9039
 9040	ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
 9041	if (ret)
 9042		return ret;
 9043
 9044#ifdef CONFIG_TRACER_MAX_TRACE
 9045	ret = allocate_trace_buffer(tr, &tr->max_buffer,
 9046				    allocate_snapshot ? size : 1);
 9047	if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
 9048		ring_buffer_free(tr->array_buffer.buffer);
 9049		tr->array_buffer.buffer = NULL;
 9050		free_percpu(tr->array_buffer.data);
 9051		tr->array_buffer.data = NULL;
 9052		return -ENOMEM;
 9053	}
 9054	tr->allocated_snapshot = allocate_snapshot;
 9055
 9056	/*
 9057	 * Only the top level trace array gets its snapshot allocated
 9058	 * from the kernel command line.
 9059	 */
 9060	allocate_snapshot = false;
 9061#endif
 9062
 9063	return 0;
 9064}
 9065
 9066static void free_trace_buffer(struct array_buffer *buf)
 9067{
 9068	if (buf->buffer) {
 9069		ring_buffer_free(buf->buffer);
 9070		buf->buffer = NULL;
 9071		free_percpu(buf->data);
 9072		buf->data = NULL;
 9073	}
 9074}
 9075
 9076static void free_trace_buffers(struct trace_array *tr)
 9077{
 9078	if (!tr)
 9079		return;
 9080
 9081	free_trace_buffer(&tr->array_buffer);
 9082
 9083#ifdef CONFIG_TRACER_MAX_TRACE
 9084	free_trace_buffer(&tr->max_buffer);
 9085#endif
 9086}
 9087
 9088static void init_trace_flags_index(struct trace_array *tr)
 9089{
 9090	int i;
 9091
 9092	/* Used by the trace options files */
 9093	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
 9094		tr->trace_flags_index[i] = i;
 9095}
 9096
 9097static void __update_tracer_options(struct trace_array *tr)
 9098{
 9099	struct tracer *t;
 9100
 9101	for (t = trace_types; t; t = t->next)
 9102		add_tracer_options(tr, t);
 9103}
 9104
 9105static void update_tracer_options(struct trace_array *tr)
 9106{
 9107	mutex_lock(&trace_types_lock);
 9108	__update_tracer_options(tr);
 9109	mutex_unlock(&trace_types_lock);
 9110}
 9111
 9112/* Must have trace_types_lock held */
 9113struct trace_array *trace_array_find(const char *instance)
 9114{
 9115	struct trace_array *tr, *found = NULL;
 9116
 
 9117	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9118		if (tr->name && strcmp(tr->name, instance) == 0) {
 9119			found = tr;
 9120			break;
 9121		}
 9122	}
 9123
 9124	return found;
 9125}
 9126
 9127struct trace_array *trace_array_find_get(const char *instance)
 9128{
 9129	struct trace_array *tr;
 9130
 9131	mutex_lock(&trace_types_lock);
 9132	tr = trace_array_find(instance);
 9133	if (tr)
 9134		tr->ref++;
 9135	mutex_unlock(&trace_types_lock);
 9136
 9137	return tr;
 9138}
 9139
 9140static int trace_array_create_dir(struct trace_array *tr)
 9141{
 9142	int ret;
 9143
 9144	tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
 9145	if (!tr->dir)
 9146		return -EINVAL;
 9147
 9148	ret = event_trace_add_tracer(tr->dir, tr);
 9149	if (ret) {
 9150		tracefs_remove(tr->dir);
 9151		return ret;
 9152	}
 9153
 9154	init_tracer_tracefs(tr, tr->dir);
 9155	__update_tracer_options(tr);
 9156
 9157	return ret;
 9158}
 9159
 9160static struct trace_array *trace_array_create(const char *name)
 9161{
 9162	struct trace_array *tr;
 9163	int ret;
 9164
 9165	ret = -ENOMEM;
 9166	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
 9167	if (!tr)
 9168		return ERR_PTR(ret);
 9169
 9170	tr->name = kstrdup(name, GFP_KERNEL);
 9171	if (!tr->name)
 9172		goto out_free_tr;
 9173
 9174	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
 9175		goto out_free_tr;
 9176
 9177	tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
 9178
 9179	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
 9180
 9181	raw_spin_lock_init(&tr->start_lock);
 9182
 9183	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 9184
 9185	tr->current_trace = &nop_trace;
 9186
 9187	INIT_LIST_HEAD(&tr->systems);
 9188	INIT_LIST_HEAD(&tr->events);
 9189	INIT_LIST_HEAD(&tr->hist_vars);
 9190	INIT_LIST_HEAD(&tr->err_log);
 9191
 9192	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
 9193		goto out_free_tr;
 9194
 9195	if (ftrace_allocate_ftrace_ops(tr) < 0)
 
 9196		goto out_free_tr;
 9197
 9198	ftrace_init_trace_array(tr);
 9199
 9200	init_trace_flags_index(tr);
 
 
 9201
 9202	if (trace_instance_dir) {
 9203		ret = trace_array_create_dir(tr);
 9204		if (ret)
 9205			goto out_free_tr;
 9206	} else
 9207		__trace_early_add_events(tr);
 9208
 9209	list_add(&tr->list, &ftrace_trace_arrays);
 9210
 9211	tr->ref++;
 9212
 9213	return tr;
 9214
 9215 out_free_tr:
 9216	ftrace_free_ftrace_ops(tr);
 9217	free_trace_buffers(tr);
 9218	free_cpumask_var(tr->tracing_cpumask);
 9219	kfree(tr->name);
 9220	kfree(tr);
 9221
 9222	return ERR_PTR(ret);
 9223}
 9224
 9225static int instance_mkdir(const char *name)
 9226{
 9227	struct trace_array *tr;
 9228	int ret;
 9229
 9230	mutex_lock(&event_mutex);
 9231	mutex_lock(&trace_types_lock);
 9232
 9233	ret = -EEXIST;
 9234	if (trace_array_find(name))
 9235		goto out_unlock;
 9236
 9237	tr = trace_array_create(name);
 9238
 9239	ret = PTR_ERR_OR_ZERO(tr);
 9240
 9241out_unlock:
 9242	mutex_unlock(&trace_types_lock);
 9243	mutex_unlock(&event_mutex);
 9244	return ret;
 9245}
 9246
 9247/**
 9248 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
 9249 * @name: The name of the trace array to be looked up/created.
 9250 *
 9251 * Returns pointer to trace array with given name.
 9252 * NULL, if it cannot be created.
 9253 *
 9254 * NOTE: This function increments the reference counter associated with the
 9255 * trace array returned. This makes sure it cannot be freed while in use.
 9256 * Use trace_array_put() once the trace array is no longer needed.
 9257 * If the trace_array is to be freed, trace_array_destroy() needs to
 9258 * be called after the trace_array_put(), or simply let user space delete
 9259 * it from the tracefs instances directory. But until the
 9260 * trace_array_put() is called, user space can not delete it.
 9261 *
 9262 */
 9263struct trace_array *trace_array_get_by_name(const char *name)
 9264{
 9265	struct trace_array *tr;
 
 
 9266
 9267	mutex_lock(&event_mutex);
 9268	mutex_lock(&trace_types_lock);
 9269
 
 9270	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9271		if (tr->name && strcmp(tr->name, name) == 0)
 9272			goto out_unlock;
 
 
 9273	}
 
 
 9274
 9275	tr = trace_array_create(name);
 9276
 9277	if (IS_ERR(tr))
 9278		tr = NULL;
 9279out_unlock:
 9280	if (tr)
 9281		tr->ref++;
 9282
 9283	mutex_unlock(&trace_types_lock);
 9284	mutex_unlock(&event_mutex);
 9285	return tr;
 9286}
 9287EXPORT_SYMBOL_GPL(trace_array_get_by_name);
 9288
 9289static int __remove_instance(struct trace_array *tr)
 9290{
 9291	int i;
 9292
 9293	/* Reference counter for a newly created trace array = 1. */
 9294	if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
 9295		return -EBUSY;
 9296
 9297	list_del(&tr->list);
 9298
 9299	/* Disable all the flags that were enabled coming in */
 9300	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
 9301		if ((1 << i) & ZEROED_TRACE_FLAGS)
 9302			set_tracer_flag(tr, 1 << i, 0);
 9303	}
 9304
 9305	tracing_set_nop(tr);
 9306	clear_ftrace_function_probes(tr);
 9307	event_trace_del_tracer(tr);
 9308	ftrace_clear_pids(tr);
 9309	ftrace_destroy_function_files(tr);
 9310	tracefs_remove(tr->dir);
 9311	free_percpu(tr->last_func_repeats);
 9312	free_trace_buffers(tr);
 9313
 9314	for (i = 0; i < tr->nr_topts; i++) {
 9315		kfree(tr->topts[i].topts);
 9316	}
 9317	kfree(tr->topts);
 9318
 9319	free_cpumask_var(tr->tracing_cpumask);
 9320	kfree(tr->name);
 9321	kfree(tr);
 9322
 9323	return 0;
 
 
 
 
 
 9324}
 9325
 9326int trace_array_destroy(struct trace_array *this_tr)
 9327{
 9328	struct trace_array *tr;
 9329	int ret;
 9330
 9331	if (!this_tr)
 9332		return -EINVAL;
 
 
 9333
 9334	mutex_lock(&event_mutex);
 9335	mutex_lock(&trace_types_lock);
 
 
 
 
 
 
 
 9336
 9337	ret = -ENODEV;
 9338
 9339	/* Making sure trace array exists before destroying it. */
 9340	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9341		if (tr == this_tr) {
 9342			ret = __remove_instance(tr);
 9343			break;
 9344		}
 9345	}
 9346
 9347	mutex_unlock(&trace_types_lock);
 9348	mutex_unlock(&event_mutex);
 9349
 9350	return ret;
 9351}
 9352EXPORT_SYMBOL_GPL(trace_array_destroy);
 9353
 9354static int instance_rmdir(const char *name)
 9355{
 9356	struct trace_array *tr;
 9357	int ret;
 9358
 9359	mutex_lock(&event_mutex);
 9360	mutex_lock(&trace_types_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 9361
 9362	ret = -ENODEV;
 9363	tr = trace_array_find(name);
 9364	if (tr)
 9365		ret = __remove_instance(tr);
 9366
 9367	mutex_unlock(&trace_types_lock);
 9368	mutex_unlock(&event_mutex);
 9369
 9370	return ret;
 9371}
 9372
 
 
 
 
 
 
 9373static __init void create_trace_instances(struct dentry *d_tracer)
 9374{
 9375	struct trace_array *tr;
 9376
 9377	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
 9378							 instance_mkdir,
 9379							 instance_rmdir);
 9380	if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
 9381		return;
 9382
 9383	mutex_lock(&event_mutex);
 9384	mutex_lock(&trace_types_lock);
 9385
 9386	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 9387		if (!tr->name)
 9388			continue;
 9389		if (MEM_FAIL(trace_array_create_dir(tr) < 0,
 9390			     "Failed to create instance directory\n"))
 9391			break;
 9392	}
 9393
 9394	mutex_unlock(&trace_types_lock);
 9395	mutex_unlock(&event_mutex);
 9396}
 9397
 9398static void
 9399init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
 9400{
 9401	struct trace_event_file *file;
 9402	int cpu;
 9403
 9404	trace_create_file("available_tracers", 0444, d_tracer,
 9405			tr, &show_traces_fops);
 9406
 9407	trace_create_file("current_tracer", 0644, d_tracer,
 9408			tr, &set_tracer_fops);
 9409
 9410	trace_create_file("tracing_cpumask", 0644, d_tracer,
 9411			  tr, &tracing_cpumask_fops);
 9412
 9413	trace_create_file("trace_options", 0644, d_tracer,
 9414			  tr, &tracing_iter_fops);
 9415
 9416	trace_create_file("trace", 0644, d_tracer,
 9417			  tr, &tracing_fops);
 9418
 9419	trace_create_file("trace_pipe", 0444, d_tracer,
 9420			  tr, &tracing_pipe_fops);
 9421
 9422	trace_create_file("buffer_size_kb", 0644, d_tracer,
 9423			  tr, &tracing_entries_fops);
 9424
 9425	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
 9426			  tr, &tracing_total_entries_fops);
 9427
 9428	trace_create_file("free_buffer", 0200, d_tracer,
 9429			  tr, &tracing_free_buffer_fops);
 9430
 9431	trace_create_file("trace_marker", 0220, d_tracer,
 9432			  tr, &tracing_mark_fops);
 9433
 9434	file = __find_event_file(tr, "ftrace", "print");
 9435	if (file && file->dir)
 9436		trace_create_file("trigger", 0644, file->dir, file,
 9437				  &event_trigger_fops);
 9438	tr->trace_marker_file = file;
 9439
 9440	trace_create_file("trace_marker_raw", 0220, d_tracer,
 9441			  tr, &tracing_mark_raw_fops);
 9442
 9443	trace_create_file("trace_clock", 0644, d_tracer, tr,
 9444			  &trace_clock_fops);
 9445
 9446	trace_create_file("tracing_on", 0644, d_tracer,
 9447			  tr, &rb_simple_fops);
 9448
 9449	trace_create_file("timestamp_mode", 0444, d_tracer, tr,
 9450			  &trace_time_stamp_mode_fops);
 9451
 9452	tr->buffer_percent = 50;
 9453
 9454	trace_create_file("buffer_percent", 0444, d_tracer,
 9455			tr, &buffer_percent_fops);
 9456
 9457	create_trace_options_dir(tr);
 9458
 9459	trace_create_maxlat_file(tr, d_tracer);
 9460
 9461	if (ftrace_create_function_files(tr, d_tracer))
 9462		MEM_FAIL(1, "Could not allocate function filter files");
 9463
 9464#ifdef CONFIG_TRACER_SNAPSHOT
 9465	trace_create_file("snapshot", 0644, d_tracer,
 9466			  tr, &snapshot_fops);
 9467#endif
 9468
 9469	trace_create_file("error_log", 0644, d_tracer,
 9470			  tr, &tracing_err_log_fops);
 9471
 9472	for_each_tracing_cpu(cpu)
 9473		tracing_init_tracefs_percpu(tr, cpu);
 9474
 9475	ftrace_init_tracefs(tr, d_tracer);
 9476}
 9477
 9478static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
 9479{
 9480	struct vfsmount *mnt;
 9481	struct file_system_type *type;
 9482
 9483	/*
 9484	 * To maintain backward compatibility for tools that mount
 9485	 * debugfs to get to the tracing facility, tracefs is automatically
 9486	 * mounted to the debugfs/tracing directory.
 9487	 */
 9488	type = get_fs_type("tracefs");
 9489	if (!type)
 9490		return NULL;
 9491	mnt = vfs_submount(mntpt, type, "tracefs", NULL);
 9492	put_filesystem(type);
 9493	if (IS_ERR(mnt))
 9494		return NULL;
 9495	mntget(mnt);
 9496
 9497	return mnt;
 9498}
 9499
 9500/**
 9501 * tracing_init_dentry - initialize top level trace array
 9502 *
 9503 * This is called when creating files or directories in the tracing
 9504 * directory. It is called via fs_initcall() by any of the boot up code
 9505 * and expects to return the dentry of the top level tracing directory.
 9506 */
 9507int tracing_init_dentry(void)
 9508{
 9509	struct trace_array *tr = &global_trace;
 9510
 9511	if (security_locked_down(LOCKDOWN_TRACEFS)) {
 9512		pr_warn("Tracing disabled due to lockdown\n");
 9513		return -EPERM;
 9514	}
 9515
 9516	/* The top level trace array uses  NULL as parent */
 9517	if (tr->dir)
 9518		return 0;
 9519
 9520	if (WARN_ON(!tracefs_initialized()))
 9521		return -ENODEV;
 9522
 9523	/*
 9524	 * As there may still be users that expect the tracing
 9525	 * files to exist in debugfs/tracing, we must automount
 9526	 * the tracefs file system there, so older tools still
 9527	 * work with the newer kernel.
 9528	 */
 9529	tr->dir = debugfs_create_automount("tracing", NULL,
 9530					   trace_automount, NULL);
 9531
 9532	return 0;
 9533}
 9534
 9535extern struct trace_eval_map *__start_ftrace_eval_maps[];
 9536extern struct trace_eval_map *__stop_ftrace_eval_maps[];
 9537
 9538static struct workqueue_struct *eval_map_wq __initdata;
 9539static struct work_struct eval_map_work __initdata;
 9540
 9541static void __init eval_map_work_func(struct work_struct *work)
 9542{
 9543	int len;
 9544
 9545	len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
 9546	trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
 9547}
 9548
 9549static int __init trace_eval_init(void)
 9550{
 9551	INIT_WORK(&eval_map_work, eval_map_work_func);
 9552
 9553	eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
 9554	if (!eval_map_wq) {
 9555		pr_err("Unable to allocate eval_map_wq\n");
 9556		/* Do work here */
 9557		eval_map_work_func(&eval_map_work);
 9558		return -ENOMEM;
 9559	}
 9560
 9561	queue_work(eval_map_wq, &eval_map_work);
 9562	return 0;
 9563}
 9564
 9565static int __init trace_eval_sync(void)
 9566{
 9567	/* Make sure the eval map updates are finished */
 9568	if (eval_map_wq)
 9569		destroy_workqueue(eval_map_wq);
 9570	return 0;
 9571}
 9572
 9573late_initcall_sync(trace_eval_sync);
 9574
 9575
 9576#ifdef CONFIG_MODULES
 9577static void trace_module_add_evals(struct module *mod)
 9578{
 9579	if (!mod->num_trace_evals)
 9580		return;
 9581
 9582	/*
 9583	 * Modules with bad taint do not have events created, do
 9584	 * not bother with enums either.
 9585	 */
 9586	if (trace_module_has_bad_taint(mod))
 9587		return;
 9588
 9589	trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
 9590}
 9591
 9592#ifdef CONFIG_TRACE_EVAL_MAP_FILE
 9593static void trace_module_remove_evals(struct module *mod)
 9594{
 9595	union trace_eval_map_item *map;
 9596	union trace_eval_map_item **last = &trace_eval_maps;
 9597
 9598	if (!mod->num_trace_evals)
 9599		return;
 9600
 9601	mutex_lock(&trace_eval_mutex);
 9602
 9603	map = trace_eval_maps;
 9604
 9605	while (map) {
 9606		if (map->head.mod == mod)
 9607			break;
 9608		map = trace_eval_jmp_to_tail(map);
 9609		last = &map->tail.next;
 9610		map = map->tail.next;
 9611	}
 9612	if (!map)
 9613		goto out;
 9614
 9615	*last = trace_eval_jmp_to_tail(map)->tail.next;
 9616	kfree(map);
 9617 out:
 9618	mutex_unlock(&trace_eval_mutex);
 9619}
 9620#else
 9621static inline void trace_module_remove_evals(struct module *mod) { }
 9622#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
 9623
 9624static int trace_module_notify(struct notifier_block *self,
 9625			       unsigned long val, void *data)
 9626{
 9627	struct module *mod = data;
 9628
 9629	switch (val) {
 9630	case MODULE_STATE_COMING:
 9631		trace_module_add_evals(mod);
 9632		break;
 9633	case MODULE_STATE_GOING:
 9634		trace_module_remove_evals(mod);
 9635		break;
 9636	}
 9637
 9638	return NOTIFY_OK;
 9639}
 9640
 9641static struct notifier_block trace_module_nb = {
 9642	.notifier_call = trace_module_notify,
 9643	.priority = 0,
 9644};
 9645#endif /* CONFIG_MODULES */
 9646
 9647static __init int tracer_init_tracefs(void)
 9648{
 9649	int ret;
 9650
 9651	trace_access_lock_init();
 9652
 9653	ret = tracing_init_dentry();
 9654	if (ret)
 9655		return 0;
 9656
 9657	event_trace_init();
 9658
 9659	init_tracer_tracefs(&global_trace, NULL);
 9660	ftrace_init_tracefs_toplevel(&global_trace, NULL);
 
 
 9661
 9662	trace_create_file("tracing_thresh", 0644, NULL,
 9663			&global_trace, &tracing_thresh_fops);
 9664
 9665	trace_create_file("README", 0444, NULL,
 9666			NULL, &tracing_readme_fops);
 9667
 9668	trace_create_file("saved_cmdlines", 0444, NULL,
 9669			NULL, &tracing_saved_cmdlines_fops);
 9670
 9671	trace_create_file("saved_cmdlines_size", 0644, NULL,
 9672			  NULL, &tracing_saved_cmdlines_size_fops);
 9673
 9674	trace_create_file("saved_tgids", 0444, NULL,
 9675			NULL, &tracing_saved_tgids_fops);
 9676
 9677	trace_eval_init();
 9678
 9679	trace_create_eval_file(NULL);
 9680
 9681#ifdef CONFIG_MODULES
 9682	register_module_notifier(&trace_module_nb);
 9683#endif
 9684
 9685#ifdef CONFIG_DYNAMIC_FTRACE
 9686	trace_create_file("dyn_ftrace_total_info", 0444, NULL,
 9687			NULL, &tracing_dyn_info_fops);
 9688#endif
 9689
 9690	create_trace_instances(NULL);
 9691
 9692	update_tracer_options(&global_trace);
 9693
 9694	return 0;
 9695}
 9696
 9697fs_initcall(tracer_init_tracefs);
 9698
 9699static int trace_panic_handler(struct notifier_block *this,
 9700			       unsigned long event, void *unused)
 9701{
 9702	if (ftrace_dump_on_oops)
 9703		ftrace_dump(ftrace_dump_on_oops);
 9704	return NOTIFY_OK;
 9705}
 9706
 9707static struct notifier_block trace_panic_notifier = {
 9708	.notifier_call  = trace_panic_handler,
 9709	.next           = NULL,
 9710	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
 9711};
 9712
 9713static int trace_die_handler(struct notifier_block *self,
 9714			     unsigned long val,
 9715			     void *data)
 9716{
 9717	switch (val) {
 9718	case DIE_OOPS:
 9719		if (ftrace_dump_on_oops)
 9720			ftrace_dump(ftrace_dump_on_oops);
 9721		break;
 9722	default:
 9723		break;
 9724	}
 9725	return NOTIFY_OK;
 9726}
 9727
 9728static struct notifier_block trace_die_notifier = {
 9729	.notifier_call = trace_die_handler,
 9730	.priority = 200
 9731};
 9732
 9733/*
 9734 * printk is set to max of 1024, we really don't need it that big.
 9735 * Nothing should be printing 1000 characters anyway.
 9736 */
 9737#define TRACE_MAX_PRINT		1000
 9738
 9739/*
 9740 * Define here KERN_TRACE so that we have one place to modify
 9741 * it if we decide to change what log level the ftrace dump
 9742 * should be at.
 9743 */
 9744#define KERN_TRACE		KERN_EMERG
 9745
 9746void
 9747trace_printk_seq(struct trace_seq *s)
 9748{
 9749	/* Probably should print a warning here. */
 9750	if (s->seq.len >= TRACE_MAX_PRINT)
 9751		s->seq.len = TRACE_MAX_PRINT;
 9752
 9753	/*
 9754	 * More paranoid code. Although the buffer size is set to
 9755	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
 9756	 * an extra layer of protection.
 9757	 */
 9758	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
 9759		s->seq.len = s->seq.size - 1;
 9760
 9761	/* should be zero ended, but we are paranoid. */
 9762	s->buffer[s->seq.len] = 0;
 9763
 9764	printk(KERN_TRACE "%s", s->buffer);
 9765
 9766	trace_seq_init(s);
 9767}
 9768
 9769void trace_init_global_iter(struct trace_iterator *iter)
 9770{
 9771	iter->tr = &global_trace;
 9772	iter->trace = iter->tr->current_trace;
 9773	iter->cpu_file = RING_BUFFER_ALL_CPUS;
 9774	iter->array_buffer = &global_trace.array_buffer;
 9775
 9776	if (iter->trace && iter->trace->open)
 9777		iter->trace->open(iter);
 9778
 9779	/* Annotate start of buffers if we had overruns */
 9780	if (ring_buffer_overruns(iter->array_buffer->buffer))
 9781		iter->iter_flags |= TRACE_FILE_ANNOTATE;
 9782
 9783	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
 9784	if (trace_clocks[iter->tr->clock_id].in_ns)
 9785		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 9786}
 9787
 9788void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
 9789{
 9790	/* use static because iter can be a bit big for the stack */
 9791	static struct trace_iterator iter;
 9792	static atomic_t dump_running;
 9793	struct trace_array *tr = &global_trace;
 9794	unsigned int old_userobj;
 9795	unsigned long flags;
 9796	int cnt = 0, cpu;
 9797
 9798	/* Only allow one dump user at a time. */
 9799	if (atomic_inc_return(&dump_running) != 1) {
 9800		atomic_dec(&dump_running);
 9801		return;
 9802	}
 9803
 9804	/*
 9805	 * Always turn off tracing when we dump.
 9806	 * We don't need to show trace output of what happens
 9807	 * between multiple crashes.
 9808	 *
 9809	 * If the user does a sysrq-z, then they can re-enable
 9810	 * tracing with echo 1 > tracing_on.
 9811	 */
 9812	tracing_off();
 9813
 9814	local_irq_save(flags);
 9815	printk_nmi_direct_enter();
 9816
 9817	/* Simulate the iterator */
 9818	trace_init_global_iter(&iter);
 9819	/* Can not use kmalloc for iter.temp and iter.fmt */
 9820	iter.temp = static_temp_buf;
 9821	iter.temp_size = STATIC_TEMP_BUF_SIZE;
 9822	iter.fmt = static_fmt_buf;
 9823	iter.fmt_size = STATIC_FMT_BUF_SIZE;
 9824
 9825	for_each_tracing_cpu(cpu) {
 9826		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
 9827	}
 9828
 9829	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
 9830
 9831	/* don't look at user memory in panic mode */
 9832	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
 9833
 9834	switch (oops_dump_mode) {
 9835	case DUMP_ALL:
 9836		iter.cpu_file = RING_BUFFER_ALL_CPUS;
 9837		break;
 9838	case DUMP_ORIG:
 9839		iter.cpu_file = raw_smp_processor_id();
 9840		break;
 9841	case DUMP_NONE:
 9842		goto out_enable;
 9843	default:
 9844		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
 9845		iter.cpu_file = RING_BUFFER_ALL_CPUS;
 9846	}
 9847
 9848	printk(KERN_TRACE "Dumping ftrace buffer:\n");
 9849
 9850	/* Did function tracer already get disabled? */
 9851	if (ftrace_is_dead()) {
 9852		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
 9853		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
 9854	}
 9855
 9856	/*
 9857	 * We need to stop all tracing on all CPUS to read
 9858	 * the next buffer. This is a bit expensive, but is
 9859	 * not done often. We fill all what we can read,
 9860	 * and then release the locks again.
 9861	 */
 9862
 9863	while (!trace_empty(&iter)) {
 9864
 9865		if (!cnt)
 9866			printk(KERN_TRACE "---------------------------------\n");
 9867
 9868		cnt++;
 9869
 9870		trace_iterator_reset(&iter);
 
 
 
 9871		iter.iter_flags |= TRACE_FILE_LAT_FMT;
 
 9872
 9873		if (trace_find_next_entry_inc(&iter) != NULL) {
 9874			int ret;
 9875
 9876			ret = print_trace_line(&iter);
 9877			if (ret != TRACE_TYPE_NO_CONSUME)
 9878				trace_consume(&iter);
 9879		}
 9880		touch_nmi_watchdog();
 9881
 9882		trace_printk_seq(&iter.seq);
 9883	}
 9884
 9885	if (!cnt)
 9886		printk(KERN_TRACE "   (ftrace buffer empty)\n");
 9887	else
 9888		printk(KERN_TRACE "---------------------------------\n");
 9889
 9890 out_enable:
 9891	tr->trace_flags |= old_userobj;
 9892
 9893	for_each_tracing_cpu(cpu) {
 9894		atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
 9895	}
 9896	atomic_dec(&dump_running);
 9897	printk_nmi_direct_exit();
 9898	local_irq_restore(flags);
 9899}
 9900EXPORT_SYMBOL_GPL(ftrace_dump);
 9901
 9902#define WRITE_BUFSIZE  4096
 9903
 9904ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
 9905				size_t count, loff_t *ppos,
 9906				int (*createfn)(const char *))
 9907{
 9908	char *kbuf, *buf, *tmp;
 9909	int ret = 0;
 9910	size_t done = 0;
 9911	size_t size;
 9912
 9913	kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
 9914	if (!kbuf)
 9915		return -ENOMEM;
 9916
 9917	while (done < count) {
 9918		size = count - done;
 9919
 9920		if (size >= WRITE_BUFSIZE)
 9921			size = WRITE_BUFSIZE - 1;
 9922
 9923		if (copy_from_user(kbuf, buffer + done, size)) {
 9924			ret = -EFAULT;
 9925			goto out;
 9926		}
 9927		kbuf[size] = '\0';
 9928		buf = kbuf;
 9929		do {
 9930			tmp = strchr(buf, '\n');
 9931			if (tmp) {
 9932				*tmp = '\0';
 9933				size = tmp - buf + 1;
 9934			} else {
 9935				size = strlen(buf);
 9936				if (done + size < count) {
 9937					if (buf != kbuf)
 9938						break;
 9939					/* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
 9940					pr_warn("Line length is too long: Should be less than %d\n",
 9941						WRITE_BUFSIZE - 2);
 9942					ret = -EINVAL;
 9943					goto out;
 9944				}
 9945			}
 9946			done += size;
 9947
 9948			/* Remove comments */
 9949			tmp = strchr(buf, '#');
 9950
 9951			if (tmp)
 9952				*tmp = '\0';
 9953
 9954			ret = createfn(buf);
 9955			if (ret)
 9956				goto out;
 9957			buf += size;
 9958
 9959		} while (done < count);
 9960	}
 9961	ret = done;
 9962
 9963out:
 9964	kfree(kbuf);
 9965
 9966	return ret;
 9967}
 9968
 9969__init static int tracer_alloc_buffers(void)
 9970{
 9971	int ring_buf_size;
 9972	int ret = -ENOMEM;
 9973
 9974
 9975	if (security_locked_down(LOCKDOWN_TRACEFS)) {
 9976		pr_warn("Tracing disabled due to lockdown\n");
 9977		return -EPERM;
 9978	}
 9979
 9980	/*
 9981	 * Make sure we don't accidentally add more trace options
 9982	 * than we have bits for.
 9983	 */
 9984	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
 9985
 9986	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
 9987		goto out;
 9988
 9989	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
 9990		goto out_free_buffer_mask;
 9991
 9992	/* Only allocate trace_printk buffers if a trace_printk exists */
 9993	if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
 9994		/* Must be called before global_trace.buffer is allocated */
 9995		trace_printk_init_buffers();
 9996
 9997	/* To save memory, keep the ring buffer size to its minimum */
 9998	if (ring_buffer_expanded)
 9999		ring_buf_size = trace_buf_size;
10000	else
10001		ring_buf_size = 1;
10002
10003	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10004	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10005
10006	raw_spin_lock_init(&global_trace.start_lock);
10007
10008	/*
10009	 * The prepare callbacks allocates some memory for the ring buffer. We
10010	 * don't free the buffer if the CPU goes down. If we were to free
10011	 * the buffer, then the user would lose any trace that was in the
10012	 * buffer. The memory will be removed once the "instance" is removed.
10013	 */
10014	ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10015				      "trace/RB:preapre", trace_rb_cpu_prepare,
10016				      NULL);
10017	if (ret < 0)
10018		goto out_free_cpumask;
10019	/* Used for event triggers */
10020	ret = -ENOMEM;
10021	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10022	if (!temp_buffer)
10023		goto out_rm_hp_state;
10024
10025	if (trace_create_savedcmd() < 0)
10026		goto out_free_temp_buffer;
10027
10028	/* TODO: make the number of buffers hot pluggable with CPUS */
10029	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10030		MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10031		goto out_free_savedcmd;
 
10032	}
10033
10034	if (global_trace.buffer_disabled)
10035		tracing_off();
10036
 
 
10037	if (trace_boot_clock) {
10038		ret = tracing_set_clock(&global_trace, trace_boot_clock);
10039		if (ret < 0)
10040			pr_warn("Trace clock %s not defined, going back to default\n",
10041				trace_boot_clock);
10042	}
10043
10044	/*
10045	 * register_tracer() might reference current_trace, so it
10046	 * needs to be set before we register anything. This is
10047	 * just a bootstrap of current_trace anyway.
10048	 */
10049	global_trace.current_trace = &nop_trace;
10050
10051	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10052
10053	ftrace_init_global_array_ops(&global_trace);
10054
10055	init_trace_flags_index(&global_trace);
10056
10057	register_tracer(&nop_trace);
10058
10059	/* Function tracing may start here (via kernel command line) */
10060	init_function_trace();
10061
10062	/* All seems OK, enable tracing */
10063	tracing_disabled = 0;
10064
10065	atomic_notifier_chain_register(&panic_notifier_list,
10066				       &trace_panic_notifier);
10067
10068	register_die_notifier(&trace_die_notifier);
10069
10070	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10071
10072	INIT_LIST_HEAD(&global_trace.systems);
10073	INIT_LIST_HEAD(&global_trace.events);
10074	INIT_LIST_HEAD(&global_trace.hist_vars);
10075	INIT_LIST_HEAD(&global_trace.err_log);
10076	list_add(&global_trace.list, &ftrace_trace_arrays);
10077
10078	apply_trace_boot_options();
 
 
 
 
 
10079
10080	register_snapshot_cmd();
10081
10082	test_can_verify();
10083
10084	return 0;
10085
10086out_free_savedcmd:
10087	free_saved_cmdlines_buffer(savedcmd);
10088out_free_temp_buffer:
10089	ring_buffer_free(temp_buffer);
10090out_rm_hp_state:
10091	cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10092out_free_cpumask:
 
 
 
 
10093	free_cpumask_var(global_trace.tracing_cpumask);
10094out_free_buffer_mask:
10095	free_cpumask_var(tracing_buffer_mask);
10096out:
10097	return ret;
10098}
10099
10100void __init early_trace_init(void)
10101{
10102	if (tracepoint_printk) {
10103		tracepoint_print_iter =
10104			kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10105		if (MEM_FAIL(!tracepoint_print_iter,
10106			     "Failed to allocate trace iterator\n"))
10107			tracepoint_printk = 0;
10108		else
10109			static_key_enable(&tracepoint_printk_key.key);
10110	}
10111	tracer_alloc_buffers();
10112}
10113
10114void __init trace_init(void)
10115{
10116	trace_event_init();
10117}
10118
10119__init static void clear_boot_tracer(void)
10120{
10121	/*
10122	 * The default tracer at boot buffer is an init section.
10123	 * This function is called in lateinit. If we did not
10124	 * find the boot tracer, then clear it out, to prevent
10125	 * later registration from accessing the buffer that is
10126	 * about to be freed.
10127	 */
10128	if (!default_bootup_tracer)
10129		return;
10130
10131	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10132	       default_bootup_tracer);
10133	default_bootup_tracer = NULL;
10134}
10135
10136#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10137__init static void tracing_set_default_clock(void)
10138{
10139	/* sched_clock_stable() is determined in late_initcall */
10140	if (!trace_boot_clock && !sched_clock_stable()) {
10141		if (security_locked_down(LOCKDOWN_TRACEFS)) {
10142			pr_warn("Can not set tracing clock due to lockdown\n");
10143			return;
10144		}
10145
10146		printk(KERN_WARNING
10147		       "Unstable clock detected, switching default tracing clock to \"global\"\n"
10148		       "If you want to keep using the local clock, then add:\n"
10149		       "  \"trace_clock=local\"\n"
10150		       "on the kernel command line\n");
10151		tracing_set_clock(&global_trace, "global");
10152	}
10153}
10154#else
10155static inline void tracing_set_default_clock(void) { }
10156#endif
10157
10158__init static int late_trace_init(void)
10159{
10160	if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10161		static_key_disable(&tracepoint_printk_key.key);
10162		tracepoint_printk = 0;
10163	}
10164
10165	tracing_set_default_clock();
10166	clear_boot_tracer();
10167	return 0;
10168}
10169
10170late_initcall_sync(late_trace_init);