Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * ring buffer based function tracer
   3 *
   4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally taken from the RT patch by:
   8 *    Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code from the latency_tracer, that is:
  11 *  Copyright (C) 2004-2006 Ingo Molnar
  12 *  Copyright (C) 2004 Nadia Yvette Chambers
  13 */
  14#include <linux/ring_buffer.h>
  15#include <generated/utsrelease.h>
  16#include <linux/stacktrace.h>
  17#include <linux/writeback.h>
  18#include <linux/kallsyms.h>
  19#include <linux/seq_file.h>
  20#include <linux/notifier.h>
  21#include <linux/irqflags.h>
  22#include <linux/debugfs.h>
 
  23#include <linux/pagemap.h>
  24#include <linux/hardirq.h>
  25#include <linux/linkage.h>
  26#include <linux/uaccess.h>
  27#include <linux/kprobes.h>
  28#include <linux/ftrace.h>
  29#include <linux/module.h>
  30#include <linux/percpu.h>
  31#include <linux/splice.h>
  32#include <linux/kdebug.h>
  33#include <linux/string.h>
 
  34#include <linux/rwsem.h>
  35#include <linux/slab.h>
  36#include <linux/ctype.h>
  37#include <linux/init.h>
  38#include <linux/poll.h>
  39#include <linux/nmi.h>
  40#include <linux/fs.h>
  41#include <linux/sched/rt.h>
  42
  43#include "trace.h"
  44#include "trace_output.h"
  45
  46/*
  47 * On boot up, the ring buffer is set to the minimum size, so that
  48 * we do not waste memory on systems that are not using tracing.
  49 */
  50bool ring_buffer_expanded;
  51
  52/*
  53 * We need to change this state when a selftest is running.
  54 * A selftest will lurk into the ring-buffer to count the
  55 * entries inserted during the selftest although some concurrent
  56 * insertions into the ring-buffer such as trace_printk could occurred
  57 * at the same time, giving false positive or negative results.
  58 */
  59static bool __read_mostly tracing_selftest_running;
  60
  61/*
  62 * If a tracer is running, we do not want to run SELFTEST.
  63 */
  64bool __read_mostly tracing_selftest_disabled;
  65
 
 
 
 
  66/* For tracers that don't implement custom flags */
  67static struct tracer_opt dummy_tracer_opt[] = {
  68	{ }
  69};
  70
  71static struct tracer_flags dummy_tracer_flags = {
  72	.val = 0,
  73	.opts = dummy_tracer_opt
  74};
  75
  76static int
  77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  78{
  79	return 0;
  80}
  81
  82/*
  83 * To prevent the comm cache from being overwritten when no
  84 * tracing is active, only save the comm when a trace event
  85 * occurred.
  86 */
  87static DEFINE_PER_CPU(bool, trace_cmdline_save);
  88
  89/*
  90 * Kill all tracing for good (never come back).
  91 * It is initialized to 1 but will turn to zero if the initialization
  92 * of the tracer is successful. But that is the only place that sets
  93 * this back to zero.
  94 */
  95static int tracing_disabled = 1;
  96
  97DEFINE_PER_CPU(int, ftrace_cpu_disabled);
  98
  99cpumask_var_t __read_mostly	tracing_buffer_mask;
 100
 101/*
 102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 103 *
 104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 105 * is set, then ftrace_dump is called. This will output the contents
 106 * of the ftrace buffers to the console.  This is very useful for
 107 * capturing traces that lead to crashes and outputing it to a
 108 * serial console.
 109 *
 110 * It is default off, but you can enable it with either specifying
 111 * "ftrace_dump_on_oops" in the kernel command line, or setting
 112 * /proc/sys/kernel/ftrace_dump_on_oops
 113 * Set 1 if you want to dump buffers of all CPUs
 114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
 115 */
 116
 117enum ftrace_dump_mode ftrace_dump_on_oops;
 118
 119/* When set, tracing will stop when a WARN*() is hit */
 120int __disable_trace_on_warning;
 121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
 123
 124#define MAX_TRACER_SIZE		100
 125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
 126static char *default_bootup_tracer;
 127
 128static bool allocate_snapshot;
 129
 130static int __init set_cmdline_ftrace(char *str)
 131{
 132	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
 133	default_bootup_tracer = bootup_tracer_buf;
 134	/* We are using ftrace early, expand it */
 135	ring_buffer_expanded = true;
 136	return 1;
 137}
 138__setup("ftrace=", set_cmdline_ftrace);
 139
 140static int __init set_ftrace_dump_on_oops(char *str)
 141{
 142	if (*str++ != '=' || !*str) {
 143		ftrace_dump_on_oops = DUMP_ALL;
 144		return 1;
 145	}
 146
 147	if (!strcmp("orig_cpu", str)) {
 148		ftrace_dump_on_oops = DUMP_ORIG;
 149                return 1;
 150        }
 151
 152        return 0;
 153}
 154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 155
 156static int __init stop_trace_on_warning(char *str)
 157{
 158	__disable_trace_on_warning = 1;
 
 159	return 1;
 160}
 161__setup("traceoff_on_warning=", stop_trace_on_warning);
 162
 163static int __init boot_alloc_snapshot(char *str)
 164{
 165	allocate_snapshot = true;
 166	/* We also need the main ring buffer expanded */
 167	ring_buffer_expanded = true;
 168	return 1;
 169}
 170__setup("alloc_snapshot", boot_alloc_snapshot);
 171
 172
 173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 174static char *trace_boot_options __initdata;
 175
 176static int __init set_trace_boot_options(char *str)
 177{
 178	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
 179	trace_boot_options = trace_boot_options_buf;
 180	return 0;
 181}
 182__setup("trace_options=", set_trace_boot_options);
 183
 184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
 185static char *trace_boot_clock __initdata;
 186
 187static int __init set_trace_boot_clock(char *str)
 188{
 189	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
 190	trace_boot_clock = trace_boot_clock_buf;
 191	return 0;
 192}
 193__setup("trace_clock=", set_trace_boot_clock);
 194
 
 
 
 
 
 
 
 195
 196unsigned long long ns2usecs(cycle_t nsec)
 197{
 198	nsec += 500;
 199	do_div(nsec, 1000);
 200	return nsec;
 201}
 202
 
 
 
 
 
 
 
 
 
 
 
 
 
 203/*
 204 * The global_trace is the descriptor that holds the tracing
 205 * buffers for the live tracing. For each CPU, it contains
 206 * a link list of pages that will store trace entries. The
 207 * page descriptor of the pages in the memory is used to hold
 208 * the link list by linking the lru item in the page descriptor
 209 * to each of the pages in the buffer per CPU.
 210 *
 211 * For each active CPU there is a data field that holds the
 212 * pages for the buffer for that CPU. Each CPU has the same number
 213 * of pages allocated for its buffer.
 214 */
 215static struct trace_array	global_trace;
 
 
 216
 217LIST_HEAD(ftrace_trace_arrays);
 218
 219int trace_array_get(struct trace_array *this_tr)
 220{
 221	struct trace_array *tr;
 222	int ret = -ENODEV;
 223
 224	mutex_lock(&trace_types_lock);
 225	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 226		if (tr == this_tr) {
 227			tr->ref++;
 228			ret = 0;
 229			break;
 230		}
 231	}
 232	mutex_unlock(&trace_types_lock);
 233
 234	return ret;
 235}
 236
 237static void __trace_array_put(struct trace_array *this_tr)
 238{
 239	WARN_ON(!this_tr->ref);
 240	this_tr->ref--;
 241}
 242
 243void trace_array_put(struct trace_array *this_tr)
 244{
 245	mutex_lock(&trace_types_lock);
 246	__trace_array_put(this_tr);
 247	mutex_unlock(&trace_types_lock);
 248}
 249
 250int filter_check_discard(struct ftrace_event_file *file, void *rec,
 251			 struct ring_buffer *buffer,
 252			 struct ring_buffer_event *event)
 253{
 254	if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
 255	    !filter_match_preds(file->filter, rec)) {
 256		ring_buffer_discard_commit(buffer, event);
 257		return 1;
 258	}
 259
 260	return 0;
 261}
 262EXPORT_SYMBOL_GPL(filter_check_discard);
 263
 264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
 265			      struct ring_buffer *buffer,
 266			      struct ring_buffer_event *event)
 267{
 268	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
 269	    !filter_match_preds(call->filter, rec)) {
 270		ring_buffer_discard_commit(buffer, event);
 271		return 1;
 272	}
 273
 274	return 0;
 275}
 276EXPORT_SYMBOL_GPL(call_filter_check_discard);
 277
 278cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 279{
 280	u64 ts;
 281
 282	/* Early boot up does not have a buffer yet */
 283	if (!buf->buffer)
 284		return trace_clock_local();
 285
 286	ts = ring_buffer_time_stamp(buf->buffer, cpu);
 287	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
 288
 289	return ts;
 290}
 291
 292cycle_t ftrace_now(int cpu)
 293{
 294	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
 295}
 296
 297/**
 298 * tracing_is_enabled - Show if global_trace has been disabled
 299 *
 300 * Shows if the global trace has been enabled or not. It uses the
 301 * mirror flag "buffer_disabled" to be used in fast paths such as for
 302 * the irqsoff tracer. But it may be inaccurate due to races. If you
 303 * need to know the accurate state, use tracing_is_on() which is a little
 304 * slower, but accurate.
 305 */
 306int tracing_is_enabled(void)
 307{
 308	/*
 309	 * For quick access (irqsoff uses this in fast path), just
 310	 * return the mirror variable of the state of the ring buffer.
 311	 * It's a little racy, but we don't really care.
 312	 */
 313	smp_rmb();
 314	return !global_trace.buffer_disabled;
 315}
 316
 317/*
 318 * trace_buf_size is the size in bytes that is allocated
 319 * for a buffer. Note, the number of bytes is always rounded
 320 * to page size.
 321 *
 322 * This number is purposely set to a low number of 16384.
 323 * If the dump on oops happens, it will be much appreciated
 324 * to not have to wait for all that output. Anyway this can be
 325 * boot time and run time configurable.
 326 */
 327#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
 328
 329static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
 330
 331/* trace_types holds a link list of available tracers. */
 332static struct tracer		*trace_types __read_mostly;
 333
 334/*
 335 * trace_types_lock is used to protect the trace_types list.
 336 */
 337DEFINE_MUTEX(trace_types_lock);
 338
 339/*
 340 * serialize the access of the ring buffer
 341 *
 342 * ring buffer serializes readers, but it is low level protection.
 343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 344 * are not protected by ring buffer.
 345 *
 346 * The content of events may become garbage if we allow other process consumes
 347 * these events concurrently:
 348 *   A) the page of the consumed events may become a normal page
 349 *      (not reader page) in ring buffer, and this page will be rewrited
 350 *      by events producer.
 351 *   B) The page of the consumed events may become a page for splice_read,
 352 *      and this page will be returned to system.
 353 *
 354 * These primitives allow multi process access to different cpu ring buffer
 355 * concurrently.
 356 *
 357 * These primitives don't distinguish read-only and read-consume access.
 358 * Multi read-only access are also serialized.
 359 */
 360
 361#ifdef CONFIG_SMP
 362static DECLARE_RWSEM(all_cpu_access_lock);
 363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
 364
 365static inline void trace_access_lock(int cpu)
 366{
 367	if (cpu == RING_BUFFER_ALL_CPUS) {
 368		/* gain it for accessing the whole ring buffer. */
 369		down_write(&all_cpu_access_lock);
 370	} else {
 371		/* gain it for accessing a cpu ring buffer. */
 372
 373		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
 374		down_read(&all_cpu_access_lock);
 375
 376		/* Secondly block other access to this @cpu ring buffer. */
 377		mutex_lock(&per_cpu(cpu_access_lock, cpu));
 378	}
 379}
 380
 381static inline void trace_access_unlock(int cpu)
 382{
 383	if (cpu == RING_BUFFER_ALL_CPUS) {
 384		up_write(&all_cpu_access_lock);
 385	} else {
 386		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
 387		up_read(&all_cpu_access_lock);
 388	}
 389}
 390
 391static inline void trace_access_lock_init(void)
 392{
 393	int cpu;
 394
 395	for_each_possible_cpu(cpu)
 396		mutex_init(&per_cpu(cpu_access_lock, cpu));
 397}
 398
 399#else
 400
 401static DEFINE_MUTEX(access_lock);
 402
 403static inline void trace_access_lock(int cpu)
 404{
 405	(void)cpu;
 406	mutex_lock(&access_lock);
 407}
 408
 409static inline void trace_access_unlock(int cpu)
 410{
 411	(void)cpu;
 412	mutex_unlock(&access_lock);
 413}
 414
 415static inline void trace_access_lock_init(void)
 416{
 417}
 418
 419#endif
 420
 421/* trace_flags holds trace_options default values */
 422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
 423	TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
 424	TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
 425	TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 426
 427static void tracer_tracing_on(struct trace_array *tr)
 428{
 429	if (tr->trace_buffer.buffer)
 430		ring_buffer_record_on(tr->trace_buffer.buffer);
 431	/*
 432	 * This flag is looked at when buffers haven't been allocated
 433	 * yet, or by some tracers (like irqsoff), that just want to
 434	 * know if the ring buffer has been disabled, but it can handle
 435	 * races of where it gets disabled but we still do a record.
 436	 * As the check is in the fast path of the tracers, it is more
 437	 * important to be fast than accurate.
 438	 */
 439	tr->buffer_disabled = 0;
 440	/* Make the flag seen by readers */
 441	smp_wmb();
 442}
 443
 444/**
 445 * tracing_on - enable tracing buffers
 446 *
 447 * This function enables tracing buffers that may have been
 448 * disabled with tracing_off.
 449 */
 450void tracing_on(void)
 451{
 452	tracer_tracing_on(&global_trace);
 453}
 454EXPORT_SYMBOL_GPL(tracing_on);
 455
 456/**
 457 * __trace_puts - write a constant string into the trace buffer.
 458 * @ip:	   The address of the caller
 459 * @str:   The constant string to write
 460 * @size:  The size of the string.
 461 */
 462int __trace_puts(unsigned long ip, const char *str, int size)
 463{
 464	struct ring_buffer_event *event;
 465	struct ring_buffer *buffer;
 466	struct print_entry *entry;
 467	unsigned long irq_flags;
 468	int alloc;
 
 
 
 
 
 
 469
 470	if (unlikely(tracing_selftest_running || tracing_disabled))
 471		return 0;
 472
 473	alloc = sizeof(*entry) + size + 2; /* possible \n added */
 474
 475	local_save_flags(irq_flags);
 476	buffer = global_trace.trace_buffer.buffer;
 477	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
 478					  irq_flags, preempt_count());
 479	if (!event)
 480		return 0;
 481
 482	entry = ring_buffer_event_data(event);
 483	entry->ip = ip;
 484
 485	memcpy(&entry->buf, str, size);
 486
 487	/* Add a newline if necessary */
 488	if (entry->buf[size - 1] != '\n') {
 489		entry->buf[size] = '\n';
 490		entry->buf[size + 1] = '\0';
 491	} else
 492		entry->buf[size] = '\0';
 493
 494	__buffer_unlock_commit(buffer, event);
 
 495
 496	return size;
 497}
 498EXPORT_SYMBOL_GPL(__trace_puts);
 499
 500/**
 501 * __trace_bputs - write the pointer to a constant string into trace buffer
 502 * @ip:	   The address of the caller
 503 * @str:   The constant string to write to the buffer to
 504 */
 505int __trace_bputs(unsigned long ip, const char *str)
 506{
 507	struct ring_buffer_event *event;
 508	struct ring_buffer *buffer;
 509	struct bputs_entry *entry;
 510	unsigned long irq_flags;
 511	int size = sizeof(struct bputs_entry);
 
 
 
 
 
 
 512
 513	if (unlikely(tracing_selftest_running || tracing_disabled))
 514		return 0;
 515
 516	local_save_flags(irq_flags);
 517	buffer = global_trace.trace_buffer.buffer;
 518	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 519					  irq_flags, preempt_count());
 520	if (!event)
 521		return 0;
 522
 523	entry = ring_buffer_event_data(event);
 524	entry->ip			= ip;
 525	entry->str			= str;
 526
 527	__buffer_unlock_commit(buffer, event);
 
 528
 529	return 1;
 530}
 531EXPORT_SYMBOL_GPL(__trace_bputs);
 532
 533#ifdef CONFIG_TRACER_SNAPSHOT
 534/**
 535 * trace_snapshot - take a snapshot of the current buffer.
 536 *
 537 * This causes a swap between the snapshot buffer and the current live
 538 * tracing buffer. You can use this to take snapshots of the live
 539 * trace when some condition is triggered, but continue to trace.
 540 *
 541 * Note, make sure to allocate the snapshot with either
 542 * a tracing_snapshot_alloc(), or by doing it manually
 543 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 544 *
 545 * If the snapshot buffer is not allocated, it will stop tracing.
 546 * Basically making a permanent snapshot.
 547 */
 548void tracing_snapshot(void)
 549{
 550	struct trace_array *tr = &global_trace;
 551	struct tracer *tracer = tr->current_trace;
 552	unsigned long flags;
 553
 554	if (in_nmi()) {
 555		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
 556		internal_trace_puts("*** snapshot is being ignored        ***\n");
 557		return;
 558	}
 559
 560	if (!tr->allocated_snapshot) {
 561		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
 562		internal_trace_puts("*** stopping trace here!   ***\n");
 563		tracing_off();
 564		return;
 565	}
 566
 567	/* Note, snapshot can not be used when the tracer uses it */
 568	if (tracer->use_max_tr) {
 569		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
 570		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
 571		return;
 572	}
 573
 574	local_irq_save(flags);
 575	update_max_tr(tr, current, smp_processor_id());
 576	local_irq_restore(flags);
 577}
 578EXPORT_SYMBOL_GPL(tracing_snapshot);
 579
 580static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
 581					struct trace_buffer *size_buf, int cpu_id);
 582static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
 583
 584static int alloc_snapshot(struct trace_array *tr)
 585{
 586	int ret;
 587
 588	if (!tr->allocated_snapshot) {
 589
 590		/* allocate spare buffer */
 591		ret = resize_buffer_duplicate_size(&tr->max_buffer,
 592				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
 593		if (ret < 0)
 594			return ret;
 595
 596		tr->allocated_snapshot = true;
 597	}
 598
 599	return 0;
 600}
 601
 602void free_snapshot(struct trace_array *tr)
 603{
 604	/*
 605	 * We don't free the ring buffer. instead, resize it because
 606	 * The max_tr ring buffer has some state (e.g. ring->clock) and
 607	 * we want preserve it.
 608	 */
 609	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
 610	set_buffer_entries(&tr->max_buffer, 1);
 611	tracing_reset_online_cpus(&tr->max_buffer);
 612	tr->allocated_snapshot = false;
 613}
 614
 615/**
 616 * tracing_alloc_snapshot - allocate snapshot buffer.
 617 *
 618 * This only allocates the snapshot buffer if it isn't already
 619 * allocated - it doesn't also take a snapshot.
 620 *
 621 * This is meant to be used in cases where the snapshot buffer needs
 622 * to be set up for events that can't sleep but need to be able to
 623 * trigger a snapshot.
 624 */
 625int tracing_alloc_snapshot(void)
 626{
 627	struct trace_array *tr = &global_trace;
 628	int ret;
 629
 630	ret = alloc_snapshot(tr);
 631	WARN_ON(ret < 0);
 632
 633	return ret;
 634}
 635EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 636
 637/**
 638 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
 639 *
 640 * This is similar to trace_snapshot(), but it will allocate the
 641 * snapshot buffer if it isn't already allocated. Use this only
 642 * where it is safe to sleep, as the allocation may sleep.
 643 *
 644 * This causes a swap between the snapshot buffer and the current live
 645 * tracing buffer. You can use this to take snapshots of the live
 646 * trace when some condition is triggered, but continue to trace.
 647 */
 648void tracing_snapshot_alloc(void)
 649{
 650	int ret;
 651
 652	ret = tracing_alloc_snapshot();
 653	if (ret < 0)
 654		return;
 655
 656	tracing_snapshot();
 657}
 658EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 659#else
 660void tracing_snapshot(void)
 661{
 662	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
 663}
 664EXPORT_SYMBOL_GPL(tracing_snapshot);
 665int tracing_alloc_snapshot(void)
 666{
 667	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
 668	return -ENODEV;
 669}
 670EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 671void tracing_snapshot_alloc(void)
 672{
 673	/* Give warning */
 674	tracing_snapshot();
 675}
 676EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 677#endif /* CONFIG_TRACER_SNAPSHOT */
 678
 679static void tracer_tracing_off(struct trace_array *tr)
 680{
 681	if (tr->trace_buffer.buffer)
 682		ring_buffer_record_off(tr->trace_buffer.buffer);
 683	/*
 684	 * This flag is looked at when buffers haven't been allocated
 685	 * yet, or by some tracers (like irqsoff), that just want to
 686	 * know if the ring buffer has been disabled, but it can handle
 687	 * races of where it gets disabled but we still do a record.
 688	 * As the check is in the fast path of the tracers, it is more
 689	 * important to be fast than accurate.
 690	 */
 691	tr->buffer_disabled = 1;
 692	/* Make the flag seen by readers */
 693	smp_wmb();
 694}
 695
 696/**
 697 * tracing_off - turn off tracing buffers
 698 *
 699 * This function stops the tracing buffers from recording data.
 700 * It does not disable any overhead the tracers themselves may
 701 * be causing. This function simply causes all recording to
 702 * the ring buffers to fail.
 703 */
 704void tracing_off(void)
 705{
 706	tracer_tracing_off(&global_trace);
 707}
 708EXPORT_SYMBOL_GPL(tracing_off);
 709
 710void disable_trace_on_warning(void)
 711{
 712	if (__disable_trace_on_warning)
 713		tracing_off();
 714}
 715
 716/**
 717 * tracer_tracing_is_on - show real state of ring buffer enabled
 718 * @tr : the trace array to know if ring buffer is enabled
 719 *
 720 * Shows real state of the ring buffer if it is enabled or not.
 721 */
 722static int tracer_tracing_is_on(struct trace_array *tr)
 723{
 724	if (tr->trace_buffer.buffer)
 725		return ring_buffer_record_is_on(tr->trace_buffer.buffer);
 726	return !tr->buffer_disabled;
 727}
 728
 729/**
 730 * tracing_is_on - show state of ring buffers enabled
 731 */
 732int tracing_is_on(void)
 733{
 734	return tracer_tracing_is_on(&global_trace);
 735}
 736EXPORT_SYMBOL_GPL(tracing_is_on);
 737
 738static int __init set_buf_size(char *str)
 739{
 740	unsigned long buf_size;
 741
 742	if (!str)
 743		return 0;
 744	buf_size = memparse(str, &str);
 745	/* nr_entries can not be zero */
 746	if (buf_size == 0)
 747		return 0;
 748	trace_buf_size = buf_size;
 749	return 1;
 750}
 751__setup("trace_buf_size=", set_buf_size);
 752
 753static int __init set_tracing_thresh(char *str)
 754{
 755	unsigned long threshold;
 756	int ret;
 757
 758	if (!str)
 759		return 0;
 760	ret = kstrtoul(str, 0, &threshold);
 761	if (ret < 0)
 762		return 0;
 763	tracing_thresh = threshold * 1000;
 764	return 1;
 765}
 766__setup("tracing_thresh=", set_tracing_thresh);
 767
 768unsigned long nsecs_to_usecs(unsigned long nsecs)
 769{
 770	return nsecs / 1000;
 771}
 772
 
 
 
 
 
 
 
 
 
 773/* These must match the bit postions in trace_iterator_flags */
 774static const char *trace_options[] = {
 775	"print-parent",
 776	"sym-offset",
 777	"sym-addr",
 778	"verbose",
 779	"raw",
 780	"hex",
 781	"bin",
 782	"block",
 783	"stacktrace",
 784	"trace_printk",
 785	"ftrace_preempt",
 786	"branch",
 787	"annotate",
 788	"userstacktrace",
 789	"sym-userobj",
 790	"printk-msg-only",
 791	"context-info",
 792	"latency-format",
 793	"sleep-time",
 794	"graph-time",
 795	"record-cmd",
 796	"overwrite",
 797	"disable_on_free",
 798	"irq-info",
 799	"markers",
 800	"function-trace",
 801	NULL
 802};
 803
 804static struct {
 805	u64 (*func)(void);
 806	const char *name;
 807	int in_ns;		/* is this clock in nanoseconds? */
 808} trace_clocks[] = {
 809	{ trace_clock_local,	"local",	1 },
 810	{ trace_clock_global,	"global",	1 },
 811	{ trace_clock_counter,	"counter",	0 },
 812	{ trace_clock_jiffies,	"uptime",	1 },
 813	{ trace_clock,		"perf",		1 },
 
 
 814	ARCH_TRACE_CLOCKS
 815};
 816
 817/*
 818 * trace_parser_get_init - gets the buffer for trace parser
 819 */
 820int trace_parser_get_init(struct trace_parser *parser, int size)
 821{
 822	memset(parser, 0, sizeof(*parser));
 823
 824	parser->buffer = kmalloc(size, GFP_KERNEL);
 825	if (!parser->buffer)
 826		return 1;
 827
 828	parser->size = size;
 829	return 0;
 830}
 831
 832/*
 833 * trace_parser_put - frees the buffer for trace parser
 834 */
 835void trace_parser_put(struct trace_parser *parser)
 836{
 837	kfree(parser->buffer);
 838}
 839
 840/*
 841 * trace_get_user - reads the user input string separated by  space
 842 * (matched by isspace(ch))
 843 *
 844 * For each string found the 'struct trace_parser' is updated,
 845 * and the function returns.
 846 *
 847 * Returns number of bytes read.
 848 *
 849 * See kernel/trace/trace.h for 'struct trace_parser' details.
 850 */
 851int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 852	size_t cnt, loff_t *ppos)
 853{
 854	char ch;
 855	size_t read = 0;
 856	ssize_t ret;
 857
 858	if (!*ppos)
 859		trace_parser_clear(parser);
 860
 861	ret = get_user(ch, ubuf++);
 862	if (ret)
 863		goto out;
 864
 865	read++;
 866	cnt--;
 867
 868	/*
 869	 * The parser is not finished with the last write,
 870	 * continue reading the user input without skipping spaces.
 871	 */
 872	if (!parser->cont) {
 873		/* skip white space */
 874		while (cnt && isspace(ch)) {
 875			ret = get_user(ch, ubuf++);
 876			if (ret)
 877				goto out;
 878			read++;
 879			cnt--;
 880		}
 881
 882		/* only spaces were written */
 883		if (isspace(ch)) {
 884			*ppos += read;
 885			ret = read;
 886			goto out;
 887		}
 888
 889		parser->idx = 0;
 890	}
 891
 892	/* read the non-space input */
 893	while (cnt && !isspace(ch)) {
 894		if (parser->idx < parser->size - 1)
 895			parser->buffer[parser->idx++] = ch;
 896		else {
 897			ret = -EINVAL;
 898			goto out;
 899		}
 900		ret = get_user(ch, ubuf++);
 901		if (ret)
 902			goto out;
 903		read++;
 904		cnt--;
 905	}
 906
 907	/* We either got finished input or we have to wait for another call. */
 908	if (isspace(ch)) {
 909		parser->buffer[parser->idx] = 0;
 910		parser->cont = false;
 911	} else if (parser->idx < parser->size - 1) {
 912		parser->cont = true;
 913		parser->buffer[parser->idx++] = ch;
 914	} else {
 915		ret = -EINVAL;
 916		goto out;
 917	}
 918
 919	*ppos += read;
 920	ret = read;
 921
 922out:
 923	return ret;
 924}
 925
 926ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
 927{
 928	int len;
 929	int ret;
 930
 931	if (!cnt)
 932		return 0;
 933
 934	if (s->len <= s->readpos)
 935		return -EBUSY;
 936
 937	len = s->len - s->readpos;
 938	if (cnt > len)
 939		cnt = len;
 940	ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
 941	if (ret == cnt)
 942		return -EFAULT;
 943
 944	cnt -= ret;
 945
 946	s->readpos += cnt;
 947	return cnt;
 948}
 949
 950static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 951{
 952	int len;
 953
 954	if (s->len <= s->readpos)
 955		return -EBUSY;
 956
 957	len = s->len - s->readpos;
 958	if (cnt > len)
 959		cnt = len;
 960	memcpy(buf, s->buffer + s->readpos, cnt);
 961
 962	s->readpos += cnt;
 963	return cnt;
 964}
 965
 966/*
 967 * ftrace_max_lock is used to protect the swapping of buffers
 968 * when taking a max snapshot. The buffers themselves are
 969 * protected by per_cpu spinlocks. But the action of the swap
 970 * needs its own lock.
 971 *
 972 * This is defined as a arch_spinlock_t in order to help
 973 * with performance when lockdep debugging is enabled.
 974 *
 975 * It is also used in other places outside the update_max_tr
 976 * so it needs to be defined outside of the
 977 * CONFIG_TRACER_MAX_TRACE.
 978 */
 979static arch_spinlock_t ftrace_max_lock =
 980	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 981
 982unsigned long __read_mostly	tracing_thresh;
 983
 984#ifdef CONFIG_TRACER_MAX_TRACE
 985unsigned long __read_mostly	tracing_max_latency;
 986
 987/*
 988 * Copy the new maximum trace into the separate maximum-trace
 989 * structure. (this way the maximum trace is permanently saved,
 990 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
 991 */
 992static void
 993__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 994{
 995	struct trace_buffer *trace_buf = &tr->trace_buffer;
 996	struct trace_buffer *max_buf = &tr->max_buffer;
 997	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
 998	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
 999
1000	max_buf->cpu = cpu;
1001	max_buf->time_start = data->preempt_timestamp;
1002
1003	max_data->saved_latency = tracing_max_latency;
1004	max_data->critical_start = data->critical_start;
1005	max_data->critical_end = data->critical_end;
1006
1007	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1008	max_data->pid = tsk->pid;
1009	/*
1010	 * If tsk == current, then use current_uid(), as that does not use
1011	 * RCU. The irq tracer can be called out of RCU scope.
1012	 */
1013	if (tsk == current)
1014		max_data->uid = current_uid();
1015	else
1016		max_data->uid = task_uid(tsk);
1017
1018	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1019	max_data->policy = tsk->policy;
1020	max_data->rt_priority = tsk->rt_priority;
1021
1022	/* record this tasks comm */
1023	tracing_record_cmdline(tsk);
1024}
1025
1026/**
1027 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1028 * @tr: tracer
1029 * @tsk: the task with the latency
1030 * @cpu: The cpu that initiated the trace.
1031 *
1032 * Flip the buffers between the @tr and the max_tr and record information
1033 * about which task was the cause of this latency.
1034 */
1035void
1036update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1037{
1038	struct ring_buffer *buf;
1039
1040	if (tr->stop_count)
1041		return;
1042
1043	WARN_ON_ONCE(!irqs_disabled());
1044
1045	if (!tr->allocated_snapshot) {
1046		/* Only the nop tracer should hit this when disabling */
1047		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1048		return;
1049	}
1050
1051	arch_spin_lock(&ftrace_max_lock);
1052
1053	buf = tr->trace_buffer.buffer;
1054	tr->trace_buffer.buffer = tr->max_buffer.buffer;
1055	tr->max_buffer.buffer = buf;
1056
1057	__update_max_tr(tr, tsk, cpu);
1058	arch_spin_unlock(&ftrace_max_lock);
1059}
1060
1061/**
1062 * update_max_tr_single - only copy one trace over, and reset the rest
1063 * @tr - tracer
1064 * @tsk - task with the latency
1065 * @cpu - the cpu of the buffer to copy.
1066 *
1067 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1068 */
1069void
1070update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1071{
1072	int ret;
1073
1074	if (tr->stop_count)
1075		return;
1076
1077	WARN_ON_ONCE(!irqs_disabled());
1078	if (!tr->allocated_snapshot) {
1079		/* Only the nop tracer should hit this when disabling */
1080		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1081		return;
1082	}
1083
1084	arch_spin_lock(&ftrace_max_lock);
1085
1086	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1087
1088	if (ret == -EBUSY) {
1089		/*
1090		 * We failed to swap the buffer due to a commit taking
1091		 * place on this CPU. We fail to record, but we reset
1092		 * the max trace buffer (no one writes directly to it)
1093		 * and flag that it failed.
1094		 */
1095		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1096			"Failed to swap buffers due to commit in progress\n");
1097	}
1098
1099	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1100
1101	__update_max_tr(tr, tsk, cpu);
1102	arch_spin_unlock(&ftrace_max_lock);
1103}
1104#endif /* CONFIG_TRACER_MAX_TRACE */
1105
1106static void default_wait_pipe(struct trace_iterator *iter)
1107{
1108	/* Iterators are static, they should be filled or empty */
1109	if (trace_buffer_iter(iter, iter->cpu_file))
1110		return;
1111
1112	ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
 
1113}
1114
1115#ifdef CONFIG_FTRACE_STARTUP_TEST
1116static int run_tracer_selftest(struct tracer *type)
1117{
1118	struct trace_array *tr = &global_trace;
1119	struct tracer *saved_tracer = tr->current_trace;
1120	int ret;
1121
1122	if (!type->selftest || tracing_selftest_disabled)
1123		return 0;
1124
1125	/*
1126	 * Run a selftest on this tracer.
1127	 * Here we reset the trace buffer, and set the current
1128	 * tracer to be this tracer. The tracer can then run some
1129	 * internal tracing to verify that everything is in order.
1130	 * If we fail, we do not register this tracer.
1131	 */
1132	tracing_reset_online_cpus(&tr->trace_buffer);
1133
1134	tr->current_trace = type;
1135
1136#ifdef CONFIG_TRACER_MAX_TRACE
1137	if (type->use_max_tr) {
1138		/* If we expanded the buffers, make sure the max is expanded too */
1139		if (ring_buffer_expanded)
1140			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1141					   RING_BUFFER_ALL_CPUS);
1142		tr->allocated_snapshot = true;
1143	}
1144#endif
1145
1146	/* the test is responsible for initializing and enabling */
1147	pr_info("Testing tracer %s: ", type->name);
1148	ret = type->selftest(type, tr);
1149	/* the test is responsible for resetting too */
1150	tr->current_trace = saved_tracer;
1151	if (ret) {
1152		printk(KERN_CONT "FAILED!\n");
1153		/* Add the warning after printing 'FAILED' */
1154		WARN_ON(1);
1155		return -1;
1156	}
1157	/* Only reset on passing, to avoid touching corrupted buffers */
1158	tracing_reset_online_cpus(&tr->trace_buffer);
1159
1160#ifdef CONFIG_TRACER_MAX_TRACE
1161	if (type->use_max_tr) {
1162		tr->allocated_snapshot = false;
1163
1164		/* Shrink the max buffer again */
1165		if (ring_buffer_expanded)
1166			ring_buffer_resize(tr->max_buffer.buffer, 1,
1167					   RING_BUFFER_ALL_CPUS);
1168	}
1169#endif
1170
1171	printk(KERN_CONT "PASSED\n");
1172	return 0;
1173}
1174#else
1175static inline int run_tracer_selftest(struct tracer *type)
1176{
1177	return 0;
1178}
1179#endif /* CONFIG_FTRACE_STARTUP_TEST */
1180
 
 
 
 
1181/**
1182 * register_tracer - register a tracer with the ftrace system.
1183 * @type - the plugin for the tracer
1184 *
1185 * Register a new plugin tracer.
1186 */
1187int register_tracer(struct tracer *type)
1188{
1189	struct tracer *t;
1190	int ret = 0;
1191
1192	if (!type->name) {
1193		pr_info("Tracer must have a name\n");
1194		return -1;
1195	}
1196
1197	if (strlen(type->name) >= MAX_TRACER_SIZE) {
1198		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1199		return -1;
1200	}
1201
1202	mutex_lock(&trace_types_lock);
1203
1204	tracing_selftest_running = true;
1205
1206	for (t = trace_types; t; t = t->next) {
1207		if (strcmp(type->name, t->name) == 0) {
1208			/* already found */
1209			pr_info("Tracer %s already registered\n",
1210				type->name);
1211			ret = -1;
1212			goto out;
1213		}
1214	}
1215
1216	if (!type->set_flag)
1217		type->set_flag = &dummy_set_flag;
1218	if (!type->flags)
1219		type->flags = &dummy_tracer_flags;
1220	else
 
 
 
 
 
 
 
1221		if (!type->flags->opts)
1222			type->flags->opts = dummy_tracer_opt;
1223	if (!type->wait_pipe)
1224		type->wait_pipe = default_wait_pipe;
 
1225
1226	ret = run_tracer_selftest(type);
1227	if (ret < 0)
1228		goto out;
1229
1230	type->next = trace_types;
1231	trace_types = type;
 
1232
1233 out:
1234	tracing_selftest_running = false;
1235	mutex_unlock(&trace_types_lock);
1236
1237	if (ret || !default_bootup_tracer)
1238		goto out_unlock;
1239
1240	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1241		goto out_unlock;
1242
1243	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1244	/* Do we want this tracer to start on bootup? */
1245	tracing_set_tracer(&global_trace, type->name);
1246	default_bootup_tracer = NULL;
 
 
 
1247	/* disable other selftests, since this will break it. */
1248	tracing_selftest_disabled = true;
1249#ifdef CONFIG_FTRACE_STARTUP_TEST
1250	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1251	       type->name);
1252#endif
1253
1254 out_unlock:
1255	return ret;
1256}
1257
1258void tracing_reset(struct trace_buffer *buf, int cpu)
1259{
1260	struct ring_buffer *buffer = buf->buffer;
1261
1262	if (!buffer)
1263		return;
1264
1265	ring_buffer_record_disable(buffer);
1266
1267	/* Make sure all commits have finished */
1268	synchronize_sched();
1269	ring_buffer_reset_cpu(buffer, cpu);
1270
1271	ring_buffer_record_enable(buffer);
1272}
1273
1274void tracing_reset_online_cpus(struct trace_buffer *buf)
1275{
1276	struct ring_buffer *buffer = buf->buffer;
1277	int cpu;
1278
1279	if (!buffer)
1280		return;
1281
1282	ring_buffer_record_disable(buffer);
1283
1284	/* Make sure all commits have finished */
1285	synchronize_sched();
1286
1287	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1288
1289	for_each_online_cpu(cpu)
1290		ring_buffer_reset_cpu(buffer, cpu);
1291
1292	ring_buffer_record_enable(buffer);
1293}
1294
1295/* Must have trace_types_lock held */
1296void tracing_reset_all_online_cpus(void)
1297{
1298	struct trace_array *tr;
1299
1300	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1301		tracing_reset_online_cpus(&tr->trace_buffer);
1302#ifdef CONFIG_TRACER_MAX_TRACE
1303		tracing_reset_online_cpus(&tr->max_buffer);
1304#endif
1305	}
1306}
1307
1308#define SAVED_CMDLINES 128
1309#define NO_CMDLINE_MAP UINT_MAX
1310static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1311static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1312static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1313static int cmdline_idx;
1314static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 
 
 
 
 
 
 
 
1315
1316/* temporary disable recording */
1317static atomic_t trace_record_cmdline_disabled __read_mostly;
1318
1319static void trace_init_cmdlines(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1320{
1321	memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1322	memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
1323	cmdline_idx = 0;
 
 
 
 
 
 
 
 
 
 
 
1324}
1325
1326int is_tracing_stopped(void)
1327{
1328	return global_trace.stop_count;
1329}
1330
1331/**
1332 * tracing_start - quick start of the tracer
1333 *
1334 * If tracing is enabled but was stopped by tracing_stop,
1335 * this will start the tracer back up.
1336 */
1337void tracing_start(void)
1338{
1339	struct ring_buffer *buffer;
1340	unsigned long flags;
1341
1342	if (tracing_disabled)
1343		return;
1344
1345	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1346	if (--global_trace.stop_count) {
1347		if (global_trace.stop_count < 0) {
1348			/* Someone screwed up their debugging */
1349			WARN_ON_ONCE(1);
1350			global_trace.stop_count = 0;
1351		}
1352		goto out;
1353	}
1354
1355	/* Prevent the buffers from switching */
1356	arch_spin_lock(&ftrace_max_lock);
1357
1358	buffer = global_trace.trace_buffer.buffer;
1359	if (buffer)
1360		ring_buffer_record_enable(buffer);
1361
1362#ifdef CONFIG_TRACER_MAX_TRACE
1363	buffer = global_trace.max_buffer.buffer;
1364	if (buffer)
1365		ring_buffer_record_enable(buffer);
1366#endif
1367
1368	arch_spin_unlock(&ftrace_max_lock);
1369
1370	ftrace_start();
1371 out:
1372	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1373}
1374
1375static void tracing_start_tr(struct trace_array *tr)
1376{
1377	struct ring_buffer *buffer;
1378	unsigned long flags;
1379
1380	if (tracing_disabled)
1381		return;
1382
1383	/* If global, we need to also start the max tracer */
1384	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1385		return tracing_start();
1386
1387	raw_spin_lock_irqsave(&tr->start_lock, flags);
1388
1389	if (--tr->stop_count) {
1390		if (tr->stop_count < 0) {
1391			/* Someone screwed up their debugging */
1392			WARN_ON_ONCE(1);
1393			tr->stop_count = 0;
1394		}
1395		goto out;
1396	}
1397
1398	buffer = tr->trace_buffer.buffer;
1399	if (buffer)
1400		ring_buffer_record_enable(buffer);
1401
1402 out:
1403	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1404}
1405
1406/**
1407 * tracing_stop - quick stop of the tracer
1408 *
1409 * Light weight way to stop tracing. Use in conjunction with
1410 * tracing_start.
1411 */
1412void tracing_stop(void)
1413{
1414	struct ring_buffer *buffer;
1415	unsigned long flags;
1416
1417	ftrace_stop();
1418	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1419	if (global_trace.stop_count++)
1420		goto out;
1421
1422	/* Prevent the buffers from switching */
1423	arch_spin_lock(&ftrace_max_lock);
1424
1425	buffer = global_trace.trace_buffer.buffer;
1426	if (buffer)
1427		ring_buffer_record_disable(buffer);
1428
1429#ifdef CONFIG_TRACER_MAX_TRACE
1430	buffer = global_trace.max_buffer.buffer;
1431	if (buffer)
1432		ring_buffer_record_disable(buffer);
1433#endif
1434
1435	arch_spin_unlock(&ftrace_max_lock);
1436
1437 out:
1438	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1439}
1440
1441static void tracing_stop_tr(struct trace_array *tr)
1442{
1443	struct ring_buffer *buffer;
1444	unsigned long flags;
1445
1446	/* If global, we need to also stop the max tracer */
1447	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1448		return tracing_stop();
1449
1450	raw_spin_lock_irqsave(&tr->start_lock, flags);
1451	if (tr->stop_count++)
1452		goto out;
1453
1454	buffer = tr->trace_buffer.buffer;
1455	if (buffer)
1456		ring_buffer_record_disable(buffer);
1457
1458 out:
1459	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1460}
1461
1462void trace_stop_cmdline_recording(void);
1463
1464static void trace_save_cmdline(struct task_struct *tsk)
1465{
1466	unsigned pid, idx;
1467
1468	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1469		return;
1470
1471	/*
1472	 * It's not the end of the world if we don't get
1473	 * the lock, but we also don't want to spin
1474	 * nor do we want to disable interrupts,
1475	 * so if we miss here, then better luck next time.
1476	 */
1477	if (!arch_spin_trylock(&trace_cmdline_lock))
1478		return;
1479
1480	idx = map_pid_to_cmdline[tsk->pid];
1481	if (idx == NO_CMDLINE_MAP) {
1482		idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1483
1484		/*
1485		 * Check whether the cmdline buffer at idx has a pid
1486		 * mapped. We are going to overwrite that entry so we
1487		 * need to clear the map_pid_to_cmdline. Otherwise we
1488		 * would read the new comm for the old pid.
1489		 */
1490		pid = map_cmdline_to_pid[idx];
1491		if (pid != NO_CMDLINE_MAP)
1492			map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1493
1494		map_cmdline_to_pid[idx] = tsk->pid;
1495		map_pid_to_cmdline[tsk->pid] = idx;
1496
1497		cmdline_idx = idx;
1498	}
1499
1500	memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1501
1502	arch_spin_unlock(&trace_cmdline_lock);
 
 
1503}
1504
1505void trace_find_cmdline(int pid, char comm[])
1506{
1507	unsigned map;
1508
1509	if (!pid) {
1510		strcpy(comm, "<idle>");
1511		return;
1512	}
1513
1514	if (WARN_ON_ONCE(pid < 0)) {
1515		strcpy(comm, "<XXX>");
1516		return;
1517	}
1518
1519	if (pid > PID_MAX_DEFAULT) {
1520		strcpy(comm, "<...>");
1521		return;
1522	}
1523
1524	preempt_disable();
1525	arch_spin_lock(&trace_cmdline_lock);
1526	map = map_pid_to_cmdline[pid];
1527	if (map != NO_CMDLINE_MAP)
1528		strcpy(comm, saved_cmdlines[map]);
1529	else
1530		strcpy(comm, "<...>");
 
 
 
 
 
 
 
 
1531
1532	arch_spin_unlock(&trace_cmdline_lock);
1533	preempt_enable();
1534}
1535
1536void tracing_record_cmdline(struct task_struct *tsk)
1537{
1538	if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1539		return;
1540
1541	if (!__this_cpu_read(trace_cmdline_save))
1542		return;
1543
1544	__this_cpu_write(trace_cmdline_save, false);
1545
1546	trace_save_cmdline(tsk);
1547}
1548
1549void
1550tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1551			     int pc)
1552{
1553	struct task_struct *tsk = current;
1554
1555	entry->preempt_count		= pc & 0xff;
1556	entry->pid			= (tsk) ? tsk->pid : 0;
1557	entry->flags =
1558#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1559		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1560#else
1561		TRACE_FLAG_IRQS_NOSUPPORT |
1562#endif
 
1563		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1564		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1565		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1566		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1567}
1568EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1569
1570struct ring_buffer_event *
1571trace_buffer_lock_reserve(struct ring_buffer *buffer,
1572			  int type,
1573			  unsigned long len,
1574			  unsigned long flags, int pc)
1575{
1576	struct ring_buffer_event *event;
1577
1578	event = ring_buffer_lock_reserve(buffer, len);
1579	if (event != NULL) {
1580		struct trace_entry *ent = ring_buffer_event_data(event);
1581
1582		tracing_generic_entry_update(ent, flags, pc);
1583		ent->type = type;
1584	}
1585
1586	return event;
1587}
1588
1589void
1590__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1591{
1592	__this_cpu_write(trace_cmdline_save, true);
1593	ring_buffer_unlock_commit(buffer, event);
1594}
1595
1596static inline void
1597__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1598			     struct ring_buffer_event *event,
1599			     unsigned long flags, int pc)
1600{
1601	__buffer_unlock_commit(buffer, event);
1602
1603	ftrace_trace_stack(buffer, flags, 6, pc);
1604	ftrace_trace_userstack(buffer, flags, pc);
1605}
1606
1607void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1608				struct ring_buffer_event *event,
1609				unsigned long flags, int pc)
1610{
1611	__trace_buffer_unlock_commit(buffer, event, flags, pc);
1612}
1613EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1614
1615static struct ring_buffer *temp_buffer;
1616
1617struct ring_buffer_event *
1618trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1619			  struct ftrace_event_file *ftrace_file,
1620			  int type, unsigned long len,
1621			  unsigned long flags, int pc)
1622{
1623	struct ring_buffer_event *entry;
1624
1625	*current_rb = ftrace_file->tr->trace_buffer.buffer;
1626	entry = trace_buffer_lock_reserve(*current_rb,
1627					 type, len, flags, pc);
1628	/*
1629	 * If tracing is off, but we have triggers enabled
1630	 * we still need to look at the event data. Use the temp_buffer
1631	 * to store the trace event for the tigger to use. It's recusive
1632	 * safe and will not be recorded anywhere.
1633	 */
1634	if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1635		*current_rb = temp_buffer;
1636		entry = trace_buffer_lock_reserve(*current_rb,
1637						  type, len, flags, pc);
1638	}
1639	return entry;
1640}
1641EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1642
1643struct ring_buffer_event *
1644trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1645				  int type, unsigned long len,
1646				  unsigned long flags, int pc)
1647{
1648	*current_rb = global_trace.trace_buffer.buffer;
1649	return trace_buffer_lock_reserve(*current_rb,
1650					 type, len, flags, pc);
1651}
1652EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1653
1654void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1655					struct ring_buffer_event *event,
1656					unsigned long flags, int pc)
1657{
1658	__trace_buffer_unlock_commit(buffer, event, flags, pc);
1659}
1660EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1661
1662void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1663				     struct ring_buffer_event *event,
1664				     unsigned long flags, int pc,
1665				     struct pt_regs *regs)
1666{
1667	__buffer_unlock_commit(buffer, event);
1668
1669	ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1670	ftrace_trace_userstack(buffer, flags, pc);
1671}
1672EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1673
1674void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1675					 struct ring_buffer_event *event)
1676{
1677	ring_buffer_discard_commit(buffer, event);
1678}
1679EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1680
1681void
1682trace_function(struct trace_array *tr,
1683	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
1684	       int pc)
1685{
1686	struct ftrace_event_call *call = &event_function;
1687	struct ring_buffer *buffer = tr->trace_buffer.buffer;
1688	struct ring_buffer_event *event;
1689	struct ftrace_entry *entry;
1690
1691	/* If we are reading the ring buffer, don't trace */
1692	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1693		return;
1694
1695	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1696					  flags, pc);
1697	if (!event)
1698		return;
1699	entry	= ring_buffer_event_data(event);
1700	entry->ip			= ip;
1701	entry->parent_ip		= parent_ip;
1702
1703	if (!call_filter_check_discard(call, entry, buffer, event))
1704		__buffer_unlock_commit(buffer, event);
1705}
1706
1707#ifdef CONFIG_STACKTRACE
1708
1709#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1710struct ftrace_stack {
1711	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];
1712};
1713
1714static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1715static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1716
1717static void __ftrace_trace_stack(struct ring_buffer *buffer,
1718				 unsigned long flags,
1719				 int skip, int pc, struct pt_regs *regs)
1720{
1721	struct ftrace_event_call *call = &event_kernel_stack;
1722	struct ring_buffer_event *event;
1723	struct stack_entry *entry;
1724	struct stack_trace trace;
1725	int use_stack;
1726	int size = FTRACE_STACK_ENTRIES;
1727
1728	trace.nr_entries	= 0;
1729	trace.skip		= skip;
1730
1731	/*
1732	 * Since events can happen in NMIs there's no safe way to
1733	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1734	 * or NMI comes in, it will just have to use the default
1735	 * FTRACE_STACK_SIZE.
1736	 */
1737	preempt_disable_notrace();
1738
1739	use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1740	/*
1741	 * We don't need any atomic variables, just a barrier.
1742	 * If an interrupt comes in, we don't care, because it would
1743	 * have exited and put the counter back to what we want.
1744	 * We just need a barrier to keep gcc from moving things
1745	 * around.
1746	 */
1747	barrier();
1748	if (use_stack == 1) {
1749		trace.entries		= &__get_cpu_var(ftrace_stack).calls[0];
1750		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;
1751
1752		if (regs)
1753			save_stack_trace_regs(regs, &trace);
1754		else
1755			save_stack_trace(&trace);
1756
1757		if (trace.nr_entries > size)
1758			size = trace.nr_entries;
1759	} else
1760		/* From now on, use_stack is a boolean */
1761		use_stack = 0;
1762
1763	size *= sizeof(unsigned long);
1764
1765	event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1766					  sizeof(*entry) + size, flags, pc);
1767	if (!event)
1768		goto out;
1769	entry = ring_buffer_event_data(event);
1770
1771	memset(&entry->caller, 0, size);
1772
1773	if (use_stack)
1774		memcpy(&entry->caller, trace.entries,
1775		       trace.nr_entries * sizeof(unsigned long));
1776	else {
1777		trace.max_entries	= FTRACE_STACK_ENTRIES;
1778		trace.entries		= entry->caller;
1779		if (regs)
1780			save_stack_trace_regs(regs, &trace);
1781		else
1782			save_stack_trace(&trace);
1783	}
1784
1785	entry->size = trace.nr_entries;
1786
1787	if (!call_filter_check_discard(call, entry, buffer, event))
1788		__buffer_unlock_commit(buffer, event);
1789
1790 out:
1791	/* Again, don't let gcc optimize things here */
1792	barrier();
1793	__this_cpu_dec(ftrace_stack_reserve);
1794	preempt_enable_notrace();
1795
1796}
1797
1798void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1799			     int skip, int pc, struct pt_regs *regs)
 
 
1800{
1801	if (!(trace_flags & TRACE_ITER_STACKTRACE))
1802		return;
1803
1804	__ftrace_trace_stack(buffer, flags, skip, pc, regs);
1805}
1806
1807void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1808			int skip, int pc)
1809{
1810	if (!(trace_flags & TRACE_ITER_STACKTRACE))
1811		return;
1812
1813	__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1814}
1815
1816void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1817		   int pc)
1818{
1819	__ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1820}
1821
1822/**
1823 * trace_dump_stack - record a stack back trace in the trace buffer
1824 * @skip: Number of functions to skip (helper handlers)
1825 */
1826void trace_dump_stack(int skip)
1827{
1828	unsigned long flags;
1829
1830	if (tracing_disabled || tracing_selftest_running)
1831		return;
1832
1833	local_save_flags(flags);
1834
1835	/*
1836	 * Skip 3 more, seems to get us at the caller of
1837	 * this function.
1838	 */
1839	skip += 3;
1840	__ftrace_trace_stack(global_trace.trace_buffer.buffer,
1841			     flags, skip, preempt_count(), NULL);
1842}
1843
1844static DEFINE_PER_CPU(int, user_stack_count);
1845
1846void
1847ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1848{
1849	struct ftrace_event_call *call = &event_user_stack;
1850	struct ring_buffer_event *event;
1851	struct userstack_entry *entry;
1852	struct stack_trace trace;
1853
1854	if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1855		return;
1856
1857	/*
1858	 * NMIs can not handle page faults, even with fix ups.
1859	 * The save user stack can (and often does) fault.
1860	 */
1861	if (unlikely(in_nmi()))
1862		return;
1863
1864	/*
1865	 * prevent recursion, since the user stack tracing may
1866	 * trigger other kernel events.
1867	 */
1868	preempt_disable();
1869	if (__this_cpu_read(user_stack_count))
1870		goto out;
1871
1872	__this_cpu_inc(user_stack_count);
1873
1874	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1875					  sizeof(*entry), flags, pc);
1876	if (!event)
1877		goto out_drop_count;
1878	entry	= ring_buffer_event_data(event);
1879
1880	entry->tgid		= current->tgid;
1881	memset(&entry->caller, 0, sizeof(entry->caller));
1882
1883	trace.nr_entries	= 0;
1884	trace.max_entries	= FTRACE_STACK_ENTRIES;
1885	trace.skip		= 0;
1886	trace.entries		= entry->caller;
1887
1888	save_stack_trace_user(&trace);
1889	if (!call_filter_check_discard(call, entry, buffer, event))
1890		__buffer_unlock_commit(buffer, event);
1891
1892 out_drop_count:
1893	__this_cpu_dec(user_stack_count);
1894 out:
1895	preempt_enable();
1896}
1897
1898#ifdef UNUSED
1899static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1900{
1901	ftrace_trace_userstack(tr, flags, preempt_count());
1902}
1903#endif /* UNUSED */
1904
1905#endif /* CONFIG_STACKTRACE */
1906
1907/* created for use with alloc_percpu */
1908struct trace_buffer_struct {
1909	char buffer[TRACE_BUF_SIZE];
1910};
1911
1912static struct trace_buffer_struct *trace_percpu_buffer;
1913static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1914static struct trace_buffer_struct *trace_percpu_irq_buffer;
1915static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1916
1917/*
1918 * The buffer used is dependent on the context. There is a per cpu
1919 * buffer for normal context, softirq contex, hard irq context and
1920 * for NMI context. Thise allows for lockless recording.
1921 *
1922 * Note, if the buffers failed to be allocated, then this returns NULL
1923 */
1924static char *get_trace_buf(void)
1925{
1926	struct trace_buffer_struct *percpu_buffer;
1927
1928	/*
1929	 * If we have allocated per cpu buffers, then we do not
1930	 * need to do any locking.
1931	 */
1932	if (in_nmi())
1933		percpu_buffer = trace_percpu_nmi_buffer;
1934	else if (in_irq())
1935		percpu_buffer = trace_percpu_irq_buffer;
1936	else if (in_softirq())
1937		percpu_buffer = trace_percpu_sirq_buffer;
1938	else
1939		percpu_buffer = trace_percpu_buffer;
1940
1941	if (!percpu_buffer)
1942		return NULL;
1943
1944	return this_cpu_ptr(&percpu_buffer->buffer[0]);
1945}
1946
1947static int alloc_percpu_trace_buffer(void)
1948{
1949	struct trace_buffer_struct *buffers;
1950	struct trace_buffer_struct *sirq_buffers;
1951	struct trace_buffer_struct *irq_buffers;
1952	struct trace_buffer_struct *nmi_buffers;
1953
1954	buffers = alloc_percpu(struct trace_buffer_struct);
1955	if (!buffers)
1956		goto err_warn;
1957
1958	sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1959	if (!sirq_buffers)
1960		goto err_sirq;
1961
1962	irq_buffers = alloc_percpu(struct trace_buffer_struct);
1963	if (!irq_buffers)
1964		goto err_irq;
1965
1966	nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1967	if (!nmi_buffers)
1968		goto err_nmi;
1969
1970	trace_percpu_buffer = buffers;
1971	trace_percpu_sirq_buffer = sirq_buffers;
1972	trace_percpu_irq_buffer = irq_buffers;
1973	trace_percpu_nmi_buffer = nmi_buffers;
1974
1975	return 0;
1976
1977 err_nmi:
1978	free_percpu(irq_buffers);
1979 err_irq:
1980	free_percpu(sirq_buffers);
1981 err_sirq:
1982	free_percpu(buffers);
1983 err_warn:
1984	WARN(1, "Could not allocate percpu trace_printk buffer");
1985	return -ENOMEM;
1986}
1987
1988static int buffers_allocated;
1989
1990void trace_printk_init_buffers(void)
1991{
1992	if (buffers_allocated)
1993		return;
1994
1995	if (alloc_percpu_trace_buffer())
1996		return;
1997
1998	pr_info("ftrace: Allocated trace_printk buffers\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1999
2000	/* Expand the buffers to set size */
2001	tracing_update_buffers();
2002
2003	buffers_allocated = 1;
2004
2005	/*
2006	 * trace_printk_init_buffers() can be called by modules.
2007	 * If that happens, then we need to start cmdline recording
2008	 * directly here. If the global_trace.buffer is already
2009	 * allocated here, then this was called by module code.
2010	 */
2011	if (global_trace.trace_buffer.buffer)
2012		tracing_start_cmdline_record();
2013}
2014
2015void trace_printk_start_comm(void)
2016{
2017	/* Start tracing comms if trace printk is set */
2018	if (!buffers_allocated)
2019		return;
2020	tracing_start_cmdline_record();
2021}
2022
2023static void trace_printk_start_stop_comm(int enabled)
2024{
2025	if (!buffers_allocated)
2026		return;
2027
2028	if (enabled)
2029		tracing_start_cmdline_record();
2030	else
2031		tracing_stop_cmdline_record();
2032}
2033
2034/**
2035 * trace_vbprintk - write binary msg to tracing buffer
2036 *
2037 */
2038int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2039{
2040	struct ftrace_event_call *call = &event_bprint;
2041	struct ring_buffer_event *event;
2042	struct ring_buffer *buffer;
2043	struct trace_array *tr = &global_trace;
2044	struct bprint_entry *entry;
2045	unsigned long flags;
2046	char *tbuffer;
2047	int len = 0, size, pc;
2048
2049	if (unlikely(tracing_selftest_running || tracing_disabled))
2050		return 0;
2051
2052	/* Don't pollute graph traces with trace_vprintk internals */
2053	pause_graph_tracing();
2054
2055	pc = preempt_count();
2056	preempt_disable_notrace();
2057
2058	tbuffer = get_trace_buf();
2059	if (!tbuffer) {
2060		len = 0;
2061		goto out;
2062	}
2063
2064	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2065
2066	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2067		goto out;
2068
2069	local_save_flags(flags);
2070	size = sizeof(*entry) + sizeof(u32) * len;
2071	buffer = tr->trace_buffer.buffer;
2072	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2073					  flags, pc);
2074	if (!event)
2075		goto out;
2076	entry = ring_buffer_event_data(event);
2077	entry->ip			= ip;
2078	entry->fmt			= fmt;
2079
2080	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2081	if (!call_filter_check_discard(call, entry, buffer, event)) {
2082		__buffer_unlock_commit(buffer, event);
2083		ftrace_trace_stack(buffer, flags, 6, pc);
2084	}
2085
2086out:
2087	preempt_enable_notrace();
2088	unpause_graph_tracing();
2089
2090	return len;
2091}
2092EXPORT_SYMBOL_GPL(trace_vbprintk);
2093
2094static int
2095__trace_array_vprintk(struct ring_buffer *buffer,
2096		      unsigned long ip, const char *fmt, va_list args)
2097{
2098	struct ftrace_event_call *call = &event_print;
2099	struct ring_buffer_event *event;
2100	int len = 0, size, pc;
2101	struct print_entry *entry;
2102	unsigned long flags;
2103	char *tbuffer;
2104
2105	if (tracing_disabled || tracing_selftest_running)
2106		return 0;
2107
2108	/* Don't pollute graph traces with trace_vprintk internals */
2109	pause_graph_tracing();
2110
2111	pc = preempt_count();
2112	preempt_disable_notrace();
2113
2114
2115	tbuffer = get_trace_buf();
2116	if (!tbuffer) {
2117		len = 0;
2118		goto out;
2119	}
2120
2121	len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2122	if (len > TRACE_BUF_SIZE)
2123		goto out;
2124
2125	local_save_flags(flags);
2126	size = sizeof(*entry) + len + 1;
2127	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2128					  flags, pc);
2129	if (!event)
2130		goto out;
2131	entry = ring_buffer_event_data(event);
2132	entry->ip = ip;
2133
2134	memcpy(&entry->buf, tbuffer, len);
2135	entry->buf[len] = '\0';
2136	if (!call_filter_check_discard(call, entry, buffer, event)) {
2137		__buffer_unlock_commit(buffer, event);
2138		ftrace_trace_stack(buffer, flags, 6, pc);
2139	}
2140 out:
2141	preempt_enable_notrace();
2142	unpause_graph_tracing();
2143
2144	return len;
2145}
2146
2147int trace_array_vprintk(struct trace_array *tr,
2148			unsigned long ip, const char *fmt, va_list args)
2149{
2150	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2151}
2152
2153int trace_array_printk(struct trace_array *tr,
2154		       unsigned long ip, const char *fmt, ...)
2155{
2156	int ret;
2157	va_list ap;
2158
2159	if (!(trace_flags & TRACE_ITER_PRINTK))
2160		return 0;
2161
2162	va_start(ap, fmt);
2163	ret = trace_array_vprintk(tr, ip, fmt, ap);
2164	va_end(ap);
2165	return ret;
2166}
2167
2168int trace_array_printk_buf(struct ring_buffer *buffer,
2169			   unsigned long ip, const char *fmt, ...)
2170{
2171	int ret;
2172	va_list ap;
2173
2174	if (!(trace_flags & TRACE_ITER_PRINTK))
2175		return 0;
2176
2177	va_start(ap, fmt);
2178	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2179	va_end(ap);
2180	return ret;
2181}
2182
2183int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2184{
2185	return trace_array_vprintk(&global_trace, ip, fmt, args);
2186}
2187EXPORT_SYMBOL_GPL(trace_vprintk);
2188
2189static void trace_iterator_increment(struct trace_iterator *iter)
2190{
2191	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2192
2193	iter->idx++;
2194	if (buf_iter)
2195		ring_buffer_read(buf_iter, NULL);
2196}
2197
2198static struct trace_entry *
2199peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2200		unsigned long *lost_events)
2201{
2202	struct ring_buffer_event *event;
2203	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2204
2205	if (buf_iter)
2206		event = ring_buffer_iter_peek(buf_iter, ts);
2207	else
2208		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2209					 lost_events);
2210
2211	if (event) {
2212		iter->ent_size = ring_buffer_event_length(event);
2213		return ring_buffer_event_data(event);
2214	}
2215	iter->ent_size = 0;
2216	return NULL;
2217}
2218
2219static struct trace_entry *
2220__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2221		  unsigned long *missing_events, u64 *ent_ts)
2222{
2223	struct ring_buffer *buffer = iter->trace_buffer->buffer;
2224	struct trace_entry *ent, *next = NULL;
2225	unsigned long lost_events = 0, next_lost = 0;
2226	int cpu_file = iter->cpu_file;
2227	u64 next_ts = 0, ts;
2228	int next_cpu = -1;
2229	int next_size = 0;
2230	int cpu;
2231
2232	/*
2233	 * If we are in a per_cpu trace file, don't bother by iterating over
2234	 * all cpu and peek directly.
2235	 */
2236	if (cpu_file > RING_BUFFER_ALL_CPUS) {
2237		if (ring_buffer_empty_cpu(buffer, cpu_file))
2238			return NULL;
2239		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2240		if (ent_cpu)
2241			*ent_cpu = cpu_file;
2242
2243		return ent;
2244	}
2245
2246	for_each_tracing_cpu(cpu) {
2247
2248		if (ring_buffer_empty_cpu(buffer, cpu))
2249			continue;
2250
2251		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2252
2253		/*
2254		 * Pick the entry with the smallest timestamp:
2255		 */
2256		if (ent && (!next || ts < next_ts)) {
2257			next = ent;
2258			next_cpu = cpu;
2259			next_ts = ts;
2260			next_lost = lost_events;
2261			next_size = iter->ent_size;
2262		}
2263	}
2264
2265	iter->ent_size = next_size;
2266
2267	if (ent_cpu)
2268		*ent_cpu = next_cpu;
2269
2270	if (ent_ts)
2271		*ent_ts = next_ts;
2272
2273	if (missing_events)
2274		*missing_events = next_lost;
2275
2276	return next;
2277}
2278
2279/* Find the next real entry, without updating the iterator itself */
2280struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2281					  int *ent_cpu, u64 *ent_ts)
2282{
2283	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2284}
2285
2286/* Find the next real entry, and increment the iterator to the next entry */
2287void *trace_find_next_entry_inc(struct trace_iterator *iter)
2288{
2289	iter->ent = __find_next_entry(iter, &iter->cpu,
2290				      &iter->lost_events, &iter->ts);
2291
2292	if (iter->ent)
2293		trace_iterator_increment(iter);
2294
2295	return iter->ent ? iter : NULL;
2296}
2297
2298static void trace_consume(struct trace_iterator *iter)
2299{
2300	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2301			    &iter->lost_events);
2302}
2303
2304static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2305{
2306	struct trace_iterator *iter = m->private;
2307	int i = (int)*pos;
2308	void *ent;
2309
2310	WARN_ON_ONCE(iter->leftover);
2311
2312	(*pos)++;
2313
2314	/* can't go backwards */
2315	if (iter->idx > i)
2316		return NULL;
2317
2318	if (iter->idx < 0)
2319		ent = trace_find_next_entry_inc(iter);
2320	else
2321		ent = iter;
2322
2323	while (ent && iter->idx < i)
2324		ent = trace_find_next_entry_inc(iter);
2325
2326	iter->pos = *pos;
2327
2328	return ent;
2329}
2330
2331void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2332{
2333	struct ring_buffer_event *event;
2334	struct ring_buffer_iter *buf_iter;
2335	unsigned long entries = 0;
2336	u64 ts;
2337
2338	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2339
2340	buf_iter = trace_buffer_iter(iter, cpu);
2341	if (!buf_iter)
2342		return;
2343
2344	ring_buffer_iter_reset(buf_iter);
2345
2346	/*
2347	 * We could have the case with the max latency tracers
2348	 * that a reset never took place on a cpu. This is evident
2349	 * by the timestamp being before the start of the buffer.
2350	 */
2351	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2352		if (ts >= iter->trace_buffer->time_start)
2353			break;
2354		entries++;
2355		ring_buffer_read(buf_iter, NULL);
2356	}
2357
2358	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2359}
2360
2361/*
2362 * The current tracer is copied to avoid a global locking
2363 * all around.
2364 */
2365static void *s_start(struct seq_file *m, loff_t *pos)
2366{
2367	struct trace_iterator *iter = m->private;
2368	struct trace_array *tr = iter->tr;
2369	int cpu_file = iter->cpu_file;
2370	void *p = NULL;
2371	loff_t l = 0;
2372	int cpu;
2373
2374	/*
2375	 * copy the tracer to avoid using a global lock all around.
2376	 * iter->trace is a copy of current_trace, the pointer to the
2377	 * name may be used instead of a strcmp(), as iter->trace->name
2378	 * will point to the same string as current_trace->name.
2379	 */
2380	mutex_lock(&trace_types_lock);
2381	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2382		*iter->trace = *tr->current_trace;
2383	mutex_unlock(&trace_types_lock);
2384
2385#ifdef CONFIG_TRACER_MAX_TRACE
2386	if (iter->snapshot && iter->trace->use_max_tr)
2387		return ERR_PTR(-EBUSY);
2388#endif
2389
2390	if (!iter->snapshot)
2391		atomic_inc(&trace_record_cmdline_disabled);
2392
2393	if (*pos != iter->pos) {
2394		iter->ent = NULL;
2395		iter->cpu = 0;
2396		iter->idx = -1;
2397
2398		if (cpu_file == RING_BUFFER_ALL_CPUS) {
2399			for_each_tracing_cpu(cpu)
2400				tracing_iter_reset(iter, cpu);
2401		} else
2402			tracing_iter_reset(iter, cpu_file);
2403
2404		iter->leftover = 0;
2405		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2406			;
2407
2408	} else {
2409		/*
2410		 * If we overflowed the seq_file before, then we want
2411		 * to just reuse the trace_seq buffer again.
2412		 */
2413		if (iter->leftover)
2414			p = iter;
2415		else {
2416			l = *pos - 1;
2417			p = s_next(m, p, &l);
2418		}
2419	}
2420
2421	trace_event_read_lock();
2422	trace_access_lock(cpu_file);
2423	return p;
2424}
2425
2426static void s_stop(struct seq_file *m, void *p)
2427{
2428	struct trace_iterator *iter = m->private;
2429
2430#ifdef CONFIG_TRACER_MAX_TRACE
2431	if (iter->snapshot && iter->trace->use_max_tr)
2432		return;
2433#endif
2434
2435	if (!iter->snapshot)
2436		atomic_dec(&trace_record_cmdline_disabled);
2437
2438	trace_access_unlock(iter->cpu_file);
2439	trace_event_read_unlock();
2440}
2441
2442static void
2443get_total_entries(struct trace_buffer *buf,
2444		  unsigned long *total, unsigned long *entries)
2445{
2446	unsigned long count;
2447	int cpu;
2448
2449	*total = 0;
2450	*entries = 0;
2451
2452	for_each_tracing_cpu(cpu) {
2453		count = ring_buffer_entries_cpu(buf->buffer, cpu);
2454		/*
2455		 * If this buffer has skipped entries, then we hold all
2456		 * entries for the trace and we need to ignore the
2457		 * ones before the time stamp.
2458		 */
2459		if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2460			count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2461			/* total is the same as the entries */
2462			*total += count;
2463		} else
2464			*total += count +
2465				ring_buffer_overrun_cpu(buf->buffer, cpu);
2466		*entries += count;
2467	}
2468}
2469
2470static void print_lat_help_header(struct seq_file *m)
2471{
2472	seq_puts(m, "#                  _------=> CPU#            \n");
2473	seq_puts(m, "#                 / _-----=> irqs-off        \n");
2474	seq_puts(m, "#                | / _----=> need-resched    \n");
2475	seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2476	seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2477	seq_puts(m, "#                |||| /     delay             \n");
2478	seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2479	seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2480}
2481
2482static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2483{
2484	unsigned long total;
2485	unsigned long entries;
2486
2487	get_total_entries(buf, &total, &entries);
2488	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2489		   entries, total, num_online_cpus());
2490	seq_puts(m, "#\n");
2491}
2492
2493static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2494{
2495	print_event_info(buf, m);
2496	seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2497	seq_puts(m, "#              | |       |          |         |\n");
2498}
2499
2500static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2501{
2502	print_event_info(buf, m);
2503	seq_puts(m, "#                              _-----=> irqs-off\n");
2504	seq_puts(m, "#                             / _----=> need-resched\n");
2505	seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2506	seq_puts(m, "#                            || / _--=> preempt-depth\n");
2507	seq_puts(m, "#                            ||| /     delay\n");
2508	seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2509	seq_puts(m, "#              | |       |   ||||       |         |\n");
2510}
2511
2512void
2513print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2514{
2515	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2516	struct trace_buffer *buf = iter->trace_buffer;
2517	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2518	struct tracer *type = iter->trace;
2519	unsigned long entries;
2520	unsigned long total;
2521	const char *name = "preemption";
2522
2523	name = type->name;
2524
2525	get_total_entries(buf, &total, &entries);
2526
2527	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2528		   name, UTS_RELEASE);
2529	seq_puts(m, "# -----------------------------------"
2530		 "---------------------------------\n");
2531	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2532		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2533		   nsecs_to_usecs(data->saved_latency),
2534		   entries,
2535		   total,
2536		   buf->cpu,
2537#if defined(CONFIG_PREEMPT_NONE)
2538		   "server",
2539#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2540		   "desktop",
2541#elif defined(CONFIG_PREEMPT)
2542		   "preempt",
2543#else
2544		   "unknown",
2545#endif
2546		   /* These are reserved for later use */
2547		   0, 0, 0, 0);
2548#ifdef CONFIG_SMP
2549	seq_printf(m, " #P:%d)\n", num_online_cpus());
2550#else
2551	seq_puts(m, ")\n");
2552#endif
2553	seq_puts(m, "#    -----------------\n");
2554	seq_printf(m, "#    | task: %.16s-%d "
2555		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2556		   data->comm, data->pid,
2557		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2558		   data->policy, data->rt_priority);
2559	seq_puts(m, "#    -----------------\n");
2560
2561	if (data->critical_start) {
2562		seq_puts(m, "#  => started at: ");
2563		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2564		trace_print_seq(m, &iter->seq);
2565		seq_puts(m, "\n#  => ended at:   ");
2566		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2567		trace_print_seq(m, &iter->seq);
2568		seq_puts(m, "\n#\n");
2569	}
2570
2571	seq_puts(m, "#\n");
2572}
2573
2574static void test_cpu_buff_start(struct trace_iterator *iter)
2575{
2576	struct trace_seq *s = &iter->seq;
 
2577
2578	if (!(trace_flags & TRACE_ITER_ANNOTATE))
2579		return;
2580
2581	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2582		return;
2583
2584	if (cpumask_test_cpu(iter->cpu, iter->started))
2585		return;
2586
2587	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2588		return;
2589
2590	cpumask_set_cpu(iter->cpu, iter->started);
 
2591
2592	/* Don't print started cpu buffer for the first entry of the trace */
2593	if (iter->idx > 1)
2594		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2595				iter->cpu);
2596}
2597
2598static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2599{
 
2600	struct trace_seq *s = &iter->seq;
2601	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2602	struct trace_entry *entry;
2603	struct trace_event *event;
2604
2605	entry = iter->ent;
2606
2607	test_cpu_buff_start(iter);
2608
2609	event = ftrace_find_event(entry->type);
2610
2611	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2612		if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2613			if (!trace_print_lat_context(iter))
2614				goto partial;
2615		} else {
2616			if (!trace_print_context(iter))
2617				goto partial;
2618		}
2619	}
2620
 
 
 
2621	if (event)
2622		return event->funcs->trace(iter, sym_flags, event);
2623
2624	if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2625		goto partial;
2626
2627	return TRACE_TYPE_HANDLED;
2628partial:
2629	return TRACE_TYPE_PARTIAL_LINE;
2630}
2631
2632static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2633{
 
2634	struct trace_seq *s = &iter->seq;
2635	struct trace_entry *entry;
2636	struct trace_event *event;
2637
2638	entry = iter->ent;
2639
2640	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2641		if (!trace_seq_printf(s, "%d %d %llu ",
2642				      entry->pid, iter->cpu, iter->ts))
2643			goto partial;
2644	}
 
2645
2646	event = ftrace_find_event(entry->type);
2647	if (event)
2648		return event->funcs->raw(iter, 0, event);
2649
2650	if (!trace_seq_printf(s, "%d ?\n", entry->type))
2651		goto partial;
2652
2653	return TRACE_TYPE_HANDLED;
2654partial:
2655	return TRACE_TYPE_PARTIAL_LINE;
2656}
2657
2658static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2659{
 
2660	struct trace_seq *s = &iter->seq;
2661	unsigned char newline = '\n';
2662	struct trace_entry *entry;
2663	struct trace_event *event;
2664
2665	entry = iter->ent;
2666
2667	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2668		SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2669		SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2670		SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
 
 
2671	}
2672
2673	event = ftrace_find_event(entry->type);
2674	if (event) {
2675		enum print_line_t ret = event->funcs->hex(iter, 0, event);
2676		if (ret != TRACE_TYPE_HANDLED)
2677			return ret;
2678	}
2679
2680	SEQ_PUT_FIELD_RET(s, newline);
2681
2682	return TRACE_TYPE_HANDLED;
2683}
2684
2685static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2686{
 
2687	struct trace_seq *s = &iter->seq;
2688	struct trace_entry *entry;
2689	struct trace_event *event;
2690
2691	entry = iter->ent;
2692
2693	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2694		SEQ_PUT_FIELD_RET(s, entry->pid);
2695		SEQ_PUT_FIELD_RET(s, iter->cpu);
2696		SEQ_PUT_FIELD_RET(s, iter->ts);
 
 
2697	}
2698
2699	event = ftrace_find_event(entry->type);
2700	return event ? event->funcs->binary(iter, 0, event) :
2701		TRACE_TYPE_HANDLED;
2702}
2703
2704int trace_empty(struct trace_iterator *iter)
2705{
2706	struct ring_buffer_iter *buf_iter;
2707	int cpu;
2708
2709	/* If we are looking at one CPU buffer, only check that one */
2710	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2711		cpu = iter->cpu_file;
2712		buf_iter = trace_buffer_iter(iter, cpu);
2713		if (buf_iter) {
2714			if (!ring_buffer_iter_empty(buf_iter))
2715				return 0;
2716		} else {
2717			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2718				return 0;
2719		}
2720		return 1;
2721	}
2722
2723	for_each_tracing_cpu(cpu) {
2724		buf_iter = trace_buffer_iter(iter, cpu);
2725		if (buf_iter) {
2726			if (!ring_buffer_iter_empty(buf_iter))
2727				return 0;
2728		} else {
2729			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2730				return 0;
2731		}
2732	}
2733
2734	return 1;
2735}
2736
2737/*  Called with trace_event_read_lock() held. */
2738enum print_line_t print_trace_line(struct trace_iterator *iter)
2739{
 
 
2740	enum print_line_t ret;
2741
2742	if (iter->lost_events &&
2743	    !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2744				 iter->cpu, iter->lost_events))
2745		return TRACE_TYPE_PARTIAL_LINE;
 
 
2746
2747	if (iter->trace && iter->trace->print_line) {
2748		ret = iter->trace->print_line(iter);
2749		if (ret != TRACE_TYPE_UNHANDLED)
2750			return ret;
2751	}
2752
2753	if (iter->ent->type == TRACE_BPUTS &&
2754			trace_flags & TRACE_ITER_PRINTK &&
2755			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2756		return trace_print_bputs_msg_only(iter);
2757
2758	if (iter->ent->type == TRACE_BPRINT &&
2759			trace_flags & TRACE_ITER_PRINTK &&
2760			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2761		return trace_print_bprintk_msg_only(iter);
2762
2763	if (iter->ent->type == TRACE_PRINT &&
2764			trace_flags & TRACE_ITER_PRINTK &&
2765			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2766		return trace_print_printk_msg_only(iter);
2767
2768	if (trace_flags & TRACE_ITER_BIN)
2769		return print_bin_fmt(iter);
2770
2771	if (trace_flags & TRACE_ITER_HEX)
2772		return print_hex_fmt(iter);
2773
2774	if (trace_flags & TRACE_ITER_RAW)
2775		return print_raw_fmt(iter);
2776
2777	return print_trace_fmt(iter);
2778}
2779
2780void trace_latency_header(struct seq_file *m)
2781{
2782	struct trace_iterator *iter = m->private;
 
2783
2784	/* print nothing if the buffers are empty */
2785	if (trace_empty(iter))
2786		return;
2787
2788	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2789		print_trace_header(m, iter);
2790
2791	if (!(trace_flags & TRACE_ITER_VERBOSE))
2792		print_lat_help_header(m);
2793}
2794
2795void trace_default_header(struct seq_file *m)
2796{
2797	struct trace_iterator *iter = m->private;
 
 
2798
2799	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2800		return;
2801
2802	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2803		/* print nothing if the buffers are empty */
2804		if (trace_empty(iter))
2805			return;
2806		print_trace_header(m, iter);
2807		if (!(trace_flags & TRACE_ITER_VERBOSE))
2808			print_lat_help_header(m);
2809	} else {
2810		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2811			if (trace_flags & TRACE_ITER_IRQ_INFO)
2812				print_func_help_header_irq(iter->trace_buffer, m);
2813			else
2814				print_func_help_header(iter->trace_buffer, m);
2815		}
2816	}
2817}
2818
2819static void test_ftrace_alive(struct seq_file *m)
2820{
2821	if (!ftrace_is_dead())
2822		return;
2823	seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2824	seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2825}
2826
2827#ifdef CONFIG_TRACER_MAX_TRACE
2828static void show_snapshot_main_help(struct seq_file *m)
2829{
2830	seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2831	seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2832	seq_printf(m, "#                      Takes a snapshot of the main buffer.\n");
2833	seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
2834	seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2835	seq_printf(m, "#                       is not a '0' or '1')\n");
2836}
2837
2838static void show_snapshot_percpu_help(struct seq_file *m)
2839{
2840	seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2841#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2842	seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2843	seq_printf(m, "#                      Takes a snapshot of the main buffer for this cpu.\n");
2844#else
2845	seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2846	seq_printf(m, "#                     Must use main snapshot file to allocate.\n");
2847#endif
2848	seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2849	seq_printf(m, "#                      (Doesn't have to be '2' works with any number that\n");
2850	seq_printf(m, "#                       is not a '0' or '1')\n");
2851}
2852
2853static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2854{
2855	if (iter->tr->allocated_snapshot)
2856		seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2857	else
2858		seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2859
2860	seq_printf(m, "# Snapshot commands:\n");
2861	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2862		show_snapshot_main_help(m);
2863	else
2864		show_snapshot_percpu_help(m);
2865}
2866#else
2867/* Should never be called */
2868static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2869#endif
2870
2871static int s_show(struct seq_file *m, void *v)
2872{
2873	struct trace_iterator *iter = v;
2874	int ret;
2875
2876	if (iter->ent == NULL) {
2877		if (iter->tr) {
2878			seq_printf(m, "# tracer: %s\n", iter->trace->name);
2879			seq_puts(m, "#\n");
2880			test_ftrace_alive(m);
2881		}
2882		if (iter->snapshot && trace_empty(iter))
2883			print_snapshot_help(m, iter);
2884		else if (iter->trace && iter->trace->print_header)
2885			iter->trace->print_header(m);
2886		else
2887			trace_default_header(m);
2888
2889	} else if (iter->leftover) {
2890		/*
2891		 * If we filled the seq_file buffer earlier, we
2892		 * want to just show it now.
2893		 */
2894		ret = trace_print_seq(m, &iter->seq);
2895
2896		/* ret should this time be zero, but you never know */
2897		iter->leftover = ret;
2898
2899	} else {
2900		print_trace_line(iter);
2901		ret = trace_print_seq(m, &iter->seq);
2902		/*
2903		 * If we overflow the seq_file buffer, then it will
2904		 * ask us for this data again at start up.
2905		 * Use that instead.
2906		 *  ret is 0 if seq_file write succeeded.
2907		 *        -1 otherwise.
2908		 */
2909		iter->leftover = ret;
2910	}
2911
2912	return 0;
2913}
2914
2915/*
2916 * Should be used after trace_array_get(), trace_types_lock
2917 * ensures that i_cdev was already initialized.
2918 */
2919static inline int tracing_get_cpu(struct inode *inode)
2920{
2921	if (inode->i_cdev) /* See trace_create_cpu_file() */
2922		return (long)inode->i_cdev - 1;
2923	return RING_BUFFER_ALL_CPUS;
2924}
2925
2926static const struct seq_operations tracer_seq_ops = {
2927	.start		= s_start,
2928	.next		= s_next,
2929	.stop		= s_stop,
2930	.show		= s_show,
2931};
2932
2933static struct trace_iterator *
2934__tracing_open(struct inode *inode, struct file *file, bool snapshot)
2935{
2936	struct trace_array *tr = inode->i_private;
2937	struct trace_iterator *iter;
2938	int cpu;
2939
2940	if (tracing_disabled)
2941		return ERR_PTR(-ENODEV);
2942
2943	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2944	if (!iter)
2945		return ERR_PTR(-ENOMEM);
2946
2947	iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2948				    GFP_KERNEL);
2949	if (!iter->buffer_iter)
2950		goto release;
2951
2952	/*
2953	 * We make a copy of the current tracer to avoid concurrent
2954	 * changes on it while we are reading.
2955	 */
2956	mutex_lock(&trace_types_lock);
2957	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2958	if (!iter->trace)
2959		goto fail;
2960
2961	*iter->trace = *tr->current_trace;
2962
2963	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2964		goto fail;
2965
2966	iter->tr = tr;
2967
2968#ifdef CONFIG_TRACER_MAX_TRACE
2969	/* Currently only the top directory has a snapshot */
2970	if (tr->current_trace->print_max || snapshot)
2971		iter->trace_buffer = &tr->max_buffer;
2972	else
2973#endif
2974		iter->trace_buffer = &tr->trace_buffer;
2975	iter->snapshot = snapshot;
2976	iter->pos = -1;
2977	iter->cpu_file = tracing_get_cpu(inode);
2978	mutex_init(&iter->mutex);
2979
2980	/* Notify the tracer early; before we stop tracing. */
2981	if (iter->trace && iter->trace->open)
2982		iter->trace->open(iter);
2983
2984	/* Annotate start of buffers if we had overruns */
2985	if (ring_buffer_overruns(iter->trace_buffer->buffer))
2986		iter->iter_flags |= TRACE_FILE_ANNOTATE;
2987
2988	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
2989	if (trace_clocks[tr->clock_id].in_ns)
2990		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2991
2992	/* stop the trace while dumping if we are not opening "snapshot" */
2993	if (!iter->snapshot)
2994		tracing_stop_tr(tr);
2995
2996	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
2997		for_each_tracing_cpu(cpu) {
2998			iter->buffer_iter[cpu] =
2999				ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3000		}
3001		ring_buffer_read_prepare_sync();
3002		for_each_tracing_cpu(cpu) {
3003			ring_buffer_read_start(iter->buffer_iter[cpu]);
3004			tracing_iter_reset(iter, cpu);
3005		}
3006	} else {
3007		cpu = iter->cpu_file;
3008		iter->buffer_iter[cpu] =
3009			ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3010		ring_buffer_read_prepare_sync();
3011		ring_buffer_read_start(iter->buffer_iter[cpu]);
3012		tracing_iter_reset(iter, cpu);
3013	}
3014
3015	mutex_unlock(&trace_types_lock);
3016
3017	return iter;
3018
3019 fail:
3020	mutex_unlock(&trace_types_lock);
3021	kfree(iter->trace);
3022	kfree(iter->buffer_iter);
3023release:
3024	seq_release_private(inode, file);
3025	return ERR_PTR(-ENOMEM);
3026}
3027
3028int tracing_open_generic(struct inode *inode, struct file *filp)
3029{
3030	if (tracing_disabled)
3031		return -ENODEV;
3032
3033	filp->private_data = inode->i_private;
3034	return 0;
3035}
3036
3037bool tracing_is_disabled(void)
3038{
3039	return (tracing_disabled) ? true: false;
3040}
3041
3042/*
3043 * Open and update trace_array ref count.
3044 * Must have the current trace_array passed to it.
3045 */
3046static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3047{
3048	struct trace_array *tr = inode->i_private;
3049
3050	if (tracing_disabled)
3051		return -ENODEV;
3052
3053	if (trace_array_get(tr) < 0)
3054		return -ENODEV;
3055
3056	filp->private_data = inode->i_private;
3057
3058	return 0;
3059}
3060
3061static int tracing_release(struct inode *inode, struct file *file)
3062{
3063	struct trace_array *tr = inode->i_private;
3064	struct seq_file *m = file->private_data;
3065	struct trace_iterator *iter;
3066	int cpu;
3067
3068	if (!(file->f_mode & FMODE_READ)) {
3069		trace_array_put(tr);
3070		return 0;
3071	}
3072
3073	/* Writes do not use seq_file */
3074	iter = m->private;
3075	mutex_lock(&trace_types_lock);
3076
3077	for_each_tracing_cpu(cpu) {
3078		if (iter->buffer_iter[cpu])
3079			ring_buffer_read_finish(iter->buffer_iter[cpu]);
3080	}
3081
3082	if (iter->trace && iter->trace->close)
3083		iter->trace->close(iter);
3084
3085	if (!iter->snapshot)
3086		/* reenable tracing if it was previously enabled */
3087		tracing_start_tr(tr);
3088
3089	__trace_array_put(tr);
3090
3091	mutex_unlock(&trace_types_lock);
3092
3093	mutex_destroy(&iter->mutex);
3094	free_cpumask_var(iter->started);
3095	kfree(iter->trace);
3096	kfree(iter->buffer_iter);
3097	seq_release_private(inode, file);
3098
3099	return 0;
3100}
3101
3102static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3103{
3104	struct trace_array *tr = inode->i_private;
3105
3106	trace_array_put(tr);
3107	return 0;
3108}
3109
3110static int tracing_single_release_tr(struct inode *inode, struct file *file)
3111{
3112	struct trace_array *tr = inode->i_private;
3113
3114	trace_array_put(tr);
3115
3116	return single_release(inode, file);
3117}
3118
3119static int tracing_open(struct inode *inode, struct file *file)
3120{
3121	struct trace_array *tr = inode->i_private;
3122	struct trace_iterator *iter;
3123	int ret = 0;
3124
3125	if (trace_array_get(tr) < 0)
3126		return -ENODEV;
3127
3128	/* If this file was open for write, then erase contents */
3129	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3130		int cpu = tracing_get_cpu(inode);
3131
3132		if (cpu == RING_BUFFER_ALL_CPUS)
3133			tracing_reset_online_cpus(&tr->trace_buffer);
3134		else
3135			tracing_reset(&tr->trace_buffer, cpu);
3136	}
3137
3138	if (file->f_mode & FMODE_READ) {
3139		iter = __tracing_open(inode, file, false);
3140		if (IS_ERR(iter))
3141			ret = PTR_ERR(iter);
3142		else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3143			iter->iter_flags |= TRACE_FILE_LAT_FMT;
3144	}
3145
3146	if (ret < 0)
3147		trace_array_put(tr);
3148
3149	return ret;
3150}
3151
3152/*
3153 * Some tracers are not suitable for instance buffers.
3154 * A tracer is always available for the global array (toplevel)
3155 * or if it explicitly states that it is.
3156 */
3157static bool
3158trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3159{
3160	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3161}
3162
3163/* Find the next tracer that this trace array may use */
3164static struct tracer *
3165get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3166{
3167	while (t && !trace_ok_for_array(t, tr))
3168		t = t->next;
3169
3170	return t;
3171}
3172
3173static void *
3174t_next(struct seq_file *m, void *v, loff_t *pos)
3175{
3176	struct trace_array *tr = m->private;
3177	struct tracer *t = v;
3178
3179	(*pos)++;
3180
3181	if (t)
3182		t = get_tracer_for_array(tr, t->next);
3183
3184	return t;
3185}
3186
3187static void *t_start(struct seq_file *m, loff_t *pos)
3188{
3189	struct trace_array *tr = m->private;
3190	struct tracer *t;
3191	loff_t l = 0;
3192
3193	mutex_lock(&trace_types_lock);
3194
3195	t = get_tracer_for_array(tr, trace_types);
3196	for (; t && l < *pos; t = t_next(m, t, &l))
3197			;
3198
3199	return t;
3200}
3201
3202static void t_stop(struct seq_file *m, void *p)
3203{
3204	mutex_unlock(&trace_types_lock);
3205}
3206
3207static int t_show(struct seq_file *m, void *v)
3208{
3209	struct tracer *t = v;
3210
3211	if (!t)
3212		return 0;
3213
3214	seq_printf(m, "%s", t->name);
3215	if (t->next)
3216		seq_putc(m, ' ');
3217	else
3218		seq_putc(m, '\n');
3219
3220	return 0;
3221}
3222
3223static const struct seq_operations show_traces_seq_ops = {
3224	.start		= t_start,
3225	.next		= t_next,
3226	.stop		= t_stop,
3227	.show		= t_show,
3228};
3229
3230static int show_traces_open(struct inode *inode, struct file *file)
3231{
3232	struct trace_array *tr = inode->i_private;
3233	struct seq_file *m;
3234	int ret;
3235
3236	if (tracing_disabled)
3237		return -ENODEV;
3238
3239	ret = seq_open(file, &show_traces_seq_ops);
3240	if (ret)
3241		return ret;
3242
3243	m = file->private_data;
3244	m->private = tr;
3245
3246	return 0;
3247}
3248
3249static ssize_t
3250tracing_write_stub(struct file *filp, const char __user *ubuf,
3251		   size_t count, loff_t *ppos)
3252{
3253	return count;
3254}
3255
3256loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3257{
3258	int ret;
3259
3260	if (file->f_mode & FMODE_READ)
3261		ret = seq_lseek(file, offset, whence);
3262	else
3263		file->f_pos = ret = 0;
3264
3265	return ret;
3266}
3267
3268static const struct file_operations tracing_fops = {
3269	.open		= tracing_open,
3270	.read		= seq_read,
3271	.write		= tracing_write_stub,
3272	.llseek		= tracing_lseek,
3273	.release	= tracing_release,
3274};
3275
3276static const struct file_operations show_traces_fops = {
3277	.open		= show_traces_open,
3278	.read		= seq_read,
3279	.release	= seq_release,
3280	.llseek		= seq_lseek,
3281};
3282
3283/*
3284 * The tracer itself will not take this lock, but still we want
3285 * to provide a consistent cpumask to user-space:
3286 */
3287static DEFINE_MUTEX(tracing_cpumask_update_lock);
3288
3289/*
3290 * Temporary storage for the character representation of the
3291 * CPU bitmask (and one more byte for the newline):
3292 */
3293static char mask_str[NR_CPUS + 1];
3294
3295static ssize_t
3296tracing_cpumask_read(struct file *filp, char __user *ubuf,
3297		     size_t count, loff_t *ppos)
3298{
3299	struct trace_array *tr = file_inode(filp)->i_private;
3300	int len;
3301
3302	mutex_lock(&tracing_cpumask_update_lock);
3303
3304	len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3305	if (count - len < 2) {
 
3306		count = -EINVAL;
3307		goto out_err;
3308	}
3309	len += sprintf(mask_str + len, "\n");
3310	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3311
3312out_err:
3313	mutex_unlock(&tracing_cpumask_update_lock);
3314
3315	return count;
3316}
3317
3318static ssize_t
3319tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3320		      size_t count, loff_t *ppos)
3321{
3322	struct trace_array *tr = file_inode(filp)->i_private;
3323	cpumask_var_t tracing_cpumask_new;
3324	int err, cpu;
3325
3326	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3327		return -ENOMEM;
3328
3329	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3330	if (err)
3331		goto err_unlock;
3332
3333	mutex_lock(&tracing_cpumask_update_lock);
3334
3335	local_irq_disable();
3336	arch_spin_lock(&ftrace_max_lock);
3337	for_each_tracing_cpu(cpu) {
3338		/*
3339		 * Increase/decrease the disabled counter if we are
3340		 * about to flip a bit in the cpumask:
3341		 */
3342		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3343				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3344			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3345			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3346		}
3347		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3348				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3349			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3350			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3351		}
3352	}
3353	arch_spin_unlock(&ftrace_max_lock);
3354	local_irq_enable();
3355
3356	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3357
3358	mutex_unlock(&tracing_cpumask_update_lock);
3359	free_cpumask_var(tracing_cpumask_new);
3360
3361	return count;
3362
3363err_unlock:
3364	free_cpumask_var(tracing_cpumask_new);
3365
3366	return err;
3367}
3368
3369static const struct file_operations tracing_cpumask_fops = {
3370	.open		= tracing_open_generic_tr,
3371	.read		= tracing_cpumask_read,
3372	.write		= tracing_cpumask_write,
3373	.release	= tracing_release_generic_tr,
3374	.llseek		= generic_file_llseek,
3375};
3376
3377static int tracing_trace_options_show(struct seq_file *m, void *v)
3378{
3379	struct tracer_opt *trace_opts;
3380	struct trace_array *tr = m->private;
3381	u32 tracer_flags;
3382	int i;
3383
3384	mutex_lock(&trace_types_lock);
3385	tracer_flags = tr->current_trace->flags->val;
3386	trace_opts = tr->current_trace->flags->opts;
3387
3388	for (i = 0; trace_options[i]; i++) {
3389		if (trace_flags & (1 << i))
3390			seq_printf(m, "%s\n", trace_options[i]);
3391		else
3392			seq_printf(m, "no%s\n", trace_options[i]);
3393	}
3394
3395	for (i = 0; trace_opts[i].name; i++) {
3396		if (tracer_flags & trace_opts[i].bit)
3397			seq_printf(m, "%s\n", trace_opts[i].name);
3398		else
3399			seq_printf(m, "no%s\n", trace_opts[i].name);
3400	}
3401	mutex_unlock(&trace_types_lock);
3402
3403	return 0;
3404}
3405
3406static int __set_tracer_option(struct trace_array *tr,
3407			       struct tracer_flags *tracer_flags,
3408			       struct tracer_opt *opts, int neg)
3409{
3410	struct tracer *trace = tr->current_trace;
3411	int ret;
3412
3413	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3414	if (ret)
3415		return ret;
3416
3417	if (neg)
3418		tracer_flags->val &= ~opts->bit;
3419	else
3420		tracer_flags->val |= opts->bit;
3421	return 0;
3422}
3423
3424/* Try to assign a tracer specific option */
3425static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3426{
3427	struct tracer *trace = tr->current_trace;
3428	struct tracer_flags *tracer_flags = trace->flags;
3429	struct tracer_opt *opts = NULL;
3430	int i;
3431
3432	for (i = 0; tracer_flags->opts[i].name; i++) {
3433		opts = &tracer_flags->opts[i];
3434
3435		if (strcmp(cmp, opts->name) == 0)
3436			return __set_tracer_option(tr, trace->flags, opts, neg);
3437	}
3438
3439	return -EINVAL;
3440}
3441
3442/* Some tracers require overwrite to stay enabled */
3443int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3444{
3445	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3446		return -1;
3447
3448	return 0;
3449}
3450
3451int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3452{
3453	/* do nothing if flag is already set */
3454	if (!!(trace_flags & mask) == !!enabled)
3455		return 0;
3456
3457	/* Give the tracer a chance to approve the change */
3458	if (tr->current_trace->flag_changed)
3459		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3460			return -EINVAL;
3461
3462	if (enabled)
3463		trace_flags |= mask;
3464	else
3465		trace_flags &= ~mask;
3466
3467	if (mask == TRACE_ITER_RECORD_CMD)
3468		trace_event_enable_cmd_record(enabled);
3469
3470	if (mask == TRACE_ITER_OVERWRITE) {
3471		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3472#ifdef CONFIG_TRACER_MAX_TRACE
3473		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3474#endif
3475	}
3476
3477	if (mask == TRACE_ITER_PRINTK)
3478		trace_printk_start_stop_comm(enabled);
 
 
3479
3480	return 0;
3481}
3482
3483static int trace_set_options(struct trace_array *tr, char *option)
3484{
3485	char *cmp;
3486	int neg = 0;
3487	int ret = -ENODEV;
3488	int i;
 
3489
3490	cmp = strstrip(option);
3491
3492	if (strncmp(cmp, "no", 2) == 0) {
3493		neg = 1;
3494		cmp += 2;
3495	}
3496
3497	mutex_lock(&trace_types_lock);
3498
3499	for (i = 0; trace_options[i]; i++) {
3500		if (strcmp(cmp, trace_options[i]) == 0) {
3501			ret = set_tracer_flag(tr, 1 << i, !neg);
3502			break;
3503		}
3504	}
3505
3506	/* If no option could be set, test the specific tracer options */
3507	if (!trace_options[i])
3508		ret = set_tracer_option(tr, cmp, neg);
3509
3510	mutex_unlock(&trace_types_lock);
3511
 
 
 
 
 
 
 
3512	return ret;
3513}
3514
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3515static ssize_t
3516tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3517			size_t cnt, loff_t *ppos)
3518{
3519	struct seq_file *m = filp->private_data;
3520	struct trace_array *tr = m->private;
3521	char buf[64];
3522	int ret;
3523
3524	if (cnt >= sizeof(buf))
3525		return -EINVAL;
3526
3527	if (copy_from_user(&buf, ubuf, cnt))
3528		return -EFAULT;
3529
3530	buf[cnt] = 0;
3531
3532	ret = trace_set_options(tr, buf);
3533	if (ret < 0)
3534		return ret;
3535
3536	*ppos += cnt;
3537
3538	return cnt;
3539}
3540
3541static int tracing_trace_options_open(struct inode *inode, struct file *file)
3542{
3543	struct trace_array *tr = inode->i_private;
3544	int ret;
3545
3546	if (tracing_disabled)
3547		return -ENODEV;
3548
3549	if (trace_array_get(tr) < 0)
3550		return -ENODEV;
3551
3552	ret = single_open(file, tracing_trace_options_show, inode->i_private);
3553	if (ret < 0)
3554		trace_array_put(tr);
3555
3556	return ret;
3557}
3558
3559static const struct file_operations tracing_iter_fops = {
3560	.open		= tracing_trace_options_open,
3561	.read		= seq_read,
3562	.llseek		= seq_lseek,
3563	.release	= tracing_single_release_tr,
3564	.write		= tracing_trace_options_write,
3565};
3566
3567static const char readme_msg[] =
3568	"tracing mini-HOWTO:\n\n"
3569	"# echo 0 > tracing_on : quick way to disable tracing\n"
3570	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3571	" Important files:\n"
3572	"  trace\t\t\t- The static contents of the buffer\n"
3573	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
3574	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3575	"  current_tracer\t- function and latency tracers\n"
3576	"  available_tracers\t- list of configured tracers for current_tracer\n"
3577	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3578	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3579	"  trace_clock\t\t-change the clock used to order events\n"
3580	"       local:   Per cpu clock but may not be synced across CPUs\n"
3581	"      global:   Synced across CPUs but slows tracing down.\n"
3582	"     counter:   Not a clock, but just an increment\n"
3583	"      uptime:   Jiffy counter from time of boot\n"
3584	"        perf:   Same clock that perf events use\n"
3585#ifdef CONFIG_X86_64
3586	"     x86-tsc:   TSC cycle counter\n"
3587#endif
3588	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3589	"  tracing_cpumask\t- Limit which CPUs to trace\n"
3590	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3591	"\t\t\t  Remove sub-buffer with rmdir\n"
3592	"  trace_options\t\t- Set format or modify how tracing happens\n"
3593	"\t\t\t  Disable an option by adding a suffix 'no' to the\n"
3594	"\t\t\t  option name\n"
 
3595#ifdef CONFIG_DYNAMIC_FTRACE
3596	"\n  available_filter_functions - list of functions that can be filtered on\n"
3597	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
3598	"\t\t\t  functions\n"
3599	"\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3600	"\t     modules: Can select a group via module\n"
3601	"\t      Format: :mod:<module-name>\n"
3602	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
3603	"\t    triggers: a command to perform when function is hit\n"
3604	"\t      Format: <function>:<trigger>[:count]\n"
3605	"\t     trigger: traceon, traceoff\n"
3606	"\t\t      enable_event:<system>:<event>\n"
3607	"\t\t      disable_event:<system>:<event>\n"
3608#ifdef CONFIG_STACKTRACE
3609	"\t\t      stacktrace\n"
3610#endif
3611#ifdef CONFIG_TRACER_SNAPSHOT
3612	"\t\t      snapshot\n"
3613#endif
3614	"\t\t      dump\n"
3615	"\t\t      cpudump\n"
3616	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
3617	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
3618	"\t     The first one will disable tracing every time do_fault is hit\n"
3619	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
3620	"\t       The first time do trap is hit and it disables tracing, the\n"
3621	"\t       counter will decrement to 2. If tracing is already disabled,\n"
3622	"\t       the counter will not decrement. It only decrements when the\n"
3623	"\t       trigger did work\n"
3624	"\t     To remove trigger without count:\n"
3625	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
3626	"\t     To remove trigger with a count:\n"
3627	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3628	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3629	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3630	"\t    modules: Can select a group via module command :mod:\n"
3631	"\t    Does not accept triggers\n"
3632#endif /* CONFIG_DYNAMIC_FTRACE */
3633#ifdef CONFIG_FUNCTION_TRACER
3634	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3635	"\t\t    (function)\n"
3636#endif
3637#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3638	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
 
3639	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3640#endif
3641#ifdef CONFIG_TRACER_SNAPSHOT
3642	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
3643	"\t\t\t  snapshot buffer. Read the contents for more\n"
3644	"\t\t\t  information\n"
3645#endif
3646#ifdef CONFIG_STACK_TRACER
3647	"  stack_trace\t\t- Shows the max stack trace when active\n"
3648	"  stack_max_size\t- Shows current max stack size that was traced\n"
3649	"\t\t\t  Write into this file to reset the max size (trigger a\n"
3650	"\t\t\t  new trace)\n"
3651#ifdef CONFIG_DYNAMIC_FTRACE
3652	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3653	"\t\t\t  traces\n"
3654#endif
3655#endif /* CONFIG_STACK_TRACER */
3656	"  events/\t\t- Directory containing all trace event subsystems:\n"
3657	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3658	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
3659	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3660	"\t\t\t  events\n"
3661	"      filter\t\t- If set, only events passing filter are traced\n"
3662	"  events/<system>/<event>/\t- Directory containing control files for\n"
3663	"\t\t\t  <event>:\n"
3664	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3665	"      filter\t\t- If set, only events passing filter are traced\n"
3666	"      trigger\t\t- If set, a command to perform when event is hit\n"
3667	"\t    Format: <trigger>[:count][if <filter>]\n"
3668	"\t   trigger: traceon, traceoff\n"
3669	"\t            enable_event:<system>:<event>\n"
3670	"\t            disable_event:<system>:<event>\n"
3671#ifdef CONFIG_STACKTRACE
3672	"\t\t    stacktrace\n"
3673#endif
3674#ifdef CONFIG_TRACER_SNAPSHOT
3675	"\t\t    snapshot\n"
3676#endif
3677	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
3678	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
3679	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3680	"\t                  events/block/block_unplug/trigger\n"
3681	"\t   The first disables tracing every time block_unplug is hit.\n"
3682	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
3683	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
3684	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3685	"\t   Like function triggers, the counter is only decremented if it\n"
3686	"\t    enabled or disabled tracing.\n"
3687	"\t   To remove a trigger without a count:\n"
3688	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
3689	"\t   To remove a trigger with a count:\n"
3690	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
3691	"\t   Filters can be ignored when removing a trigger.\n"
3692;
3693
3694static ssize_t
3695tracing_readme_read(struct file *filp, char __user *ubuf,
3696		       size_t cnt, loff_t *ppos)
3697{
3698	return simple_read_from_buffer(ubuf, cnt, ppos,
3699					readme_msg, strlen(readme_msg));
3700}
3701
3702static const struct file_operations tracing_readme_fops = {
3703	.open		= tracing_open_generic,
3704	.read		= tracing_readme_read,
3705	.llseek		= generic_file_llseek,
3706};
3707
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3708static ssize_t
3709tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3710				size_t cnt, loff_t *ppos)
3711{
3712	char *buf_comm;
3713	char *file_buf;
3714	char *buf;
3715	int len = 0;
3716	int pid;
3717	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3718
3719	file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3720	if (!file_buf)
3721		return -ENOMEM;
3722
3723	buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3724	if (!buf_comm) {
3725		kfree(file_buf);
3726		return -ENOMEM;
3727	}
3728
3729	buf = file_buf;
 
 
 
 
3730
3731	for (i = 0; i < SAVED_CMDLINES; i++) {
3732		int r;
3733
3734		pid = map_cmdline_to_pid[i];
3735		if (pid == -1 || pid == NO_CMDLINE_MAP)
3736			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3737
3738		trace_find_cmdline(pid, buf_comm);
3739		r = sprintf(buf, "%d %s\n", pid, buf_comm);
3740		buf += r;
3741		len += r;
 
 
 
 
 
 
 
3742	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3743
3744	len = simple_read_from_buffer(ubuf, cnt, ppos,
3745				      file_buf, len);
3746
3747	kfree(file_buf);
3748	kfree(buf_comm);
3749
3750	return len;
3751}
3752
3753static const struct file_operations tracing_saved_cmdlines_fops = {
3754    .open       = tracing_open_generic,
3755    .read       = tracing_saved_cmdlines_read,
3756    .llseek	= generic_file_llseek,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3757};
3758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3759static ssize_t
3760tracing_set_trace_read(struct file *filp, char __user *ubuf,
3761		       size_t cnt, loff_t *ppos)
3762{
3763	struct trace_array *tr = filp->private_data;
3764	char buf[MAX_TRACER_SIZE+2];
3765	int r;
3766
3767	mutex_lock(&trace_types_lock);
3768	r = sprintf(buf, "%s\n", tr->current_trace->name);
3769	mutex_unlock(&trace_types_lock);
3770
3771	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3772}
3773
3774int tracer_init(struct tracer *t, struct trace_array *tr)
3775{
3776	tracing_reset_online_cpus(&tr->trace_buffer);
3777	return t->init(tr);
3778}
3779
3780static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
3781{
3782	int cpu;
3783
3784	for_each_tracing_cpu(cpu)
3785		per_cpu_ptr(buf->data, cpu)->entries = val;
3786}
3787
3788#ifdef CONFIG_TRACER_MAX_TRACE
3789/* resize @tr's buffer to the size of @size_tr's entries */
3790static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3791					struct trace_buffer *size_buf, int cpu_id)
3792{
3793	int cpu, ret = 0;
3794
3795	if (cpu_id == RING_BUFFER_ALL_CPUS) {
3796		for_each_tracing_cpu(cpu) {
3797			ret = ring_buffer_resize(trace_buf->buffer,
3798				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
3799			if (ret < 0)
3800				break;
3801			per_cpu_ptr(trace_buf->data, cpu)->entries =
3802				per_cpu_ptr(size_buf->data, cpu)->entries;
3803		}
3804	} else {
3805		ret = ring_buffer_resize(trace_buf->buffer,
3806				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
3807		if (ret == 0)
3808			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3809				per_cpu_ptr(size_buf->data, cpu_id)->entries;
3810	}
3811
3812	return ret;
3813}
3814#endif /* CONFIG_TRACER_MAX_TRACE */
3815
3816static int __tracing_resize_ring_buffer(struct trace_array *tr,
3817					unsigned long size, int cpu)
3818{
3819	int ret;
3820
3821	/*
3822	 * If kernel or user changes the size of the ring buffer
3823	 * we use the size that was given, and we can forget about
3824	 * expanding it later.
3825	 */
3826	ring_buffer_expanded = true;
3827
3828	/* May be called before buffers are initialized */
3829	if (!tr->trace_buffer.buffer)
3830		return 0;
3831
3832	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
3833	if (ret < 0)
3834		return ret;
3835
3836#ifdef CONFIG_TRACER_MAX_TRACE
3837	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3838	    !tr->current_trace->use_max_tr)
3839		goto out;
3840
3841	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
3842	if (ret < 0) {
3843		int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3844						     &tr->trace_buffer, cpu);
3845		if (r < 0) {
3846			/*
3847			 * AARGH! We are left with different
3848			 * size max buffer!!!!
3849			 * The max buffer is our "snapshot" buffer.
3850			 * When a tracer needs a snapshot (one of the
3851			 * latency tracers), it swaps the max buffer
3852			 * with the saved snap shot. We succeeded to
3853			 * update the size of the main buffer, but failed to
3854			 * update the size of the max buffer. But when we tried
3855			 * to reset the main buffer to the original size, we
3856			 * failed there too. This is very unlikely to
3857			 * happen, but if it does, warn and kill all
3858			 * tracing.
3859			 */
3860			WARN_ON(1);
3861			tracing_disabled = 1;
3862		}
3863		return ret;
3864	}
3865
3866	if (cpu == RING_BUFFER_ALL_CPUS)
3867		set_buffer_entries(&tr->max_buffer, size);
3868	else
3869		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
3870
3871 out:
3872#endif /* CONFIG_TRACER_MAX_TRACE */
3873
3874	if (cpu == RING_BUFFER_ALL_CPUS)
3875		set_buffer_entries(&tr->trace_buffer, size);
3876	else
3877		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
3878
3879	return ret;
3880}
3881
3882static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3883					  unsigned long size, int cpu_id)
3884{
3885	int ret = size;
3886
3887	mutex_lock(&trace_types_lock);
3888
3889	if (cpu_id != RING_BUFFER_ALL_CPUS) {
3890		/* make sure, this cpu is enabled in the mask */
3891		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3892			ret = -EINVAL;
3893			goto out;
3894		}
3895	}
3896
3897	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
3898	if (ret < 0)
3899		ret = -ENOMEM;
3900
3901out:
3902	mutex_unlock(&trace_types_lock);
3903
3904	return ret;
3905}
3906
3907
3908/**
3909 * tracing_update_buffers - used by tracing facility to expand ring buffers
3910 *
3911 * To save on memory when the tracing is never used on a system with it
3912 * configured in. The ring buffers are set to a minimum size. But once
3913 * a user starts to use the tracing facility, then they need to grow
3914 * to their default size.
3915 *
3916 * This function is to be called when a tracer is about to be used.
3917 */
3918int tracing_update_buffers(void)
3919{
3920	int ret = 0;
3921
3922	mutex_lock(&trace_types_lock);
3923	if (!ring_buffer_expanded)
3924		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
3925						RING_BUFFER_ALL_CPUS);
3926	mutex_unlock(&trace_types_lock);
3927
3928	return ret;
3929}
3930
3931struct trace_option_dentry;
3932
3933static struct trace_option_dentry *
3934create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
3935
3936static void
3937destroy_trace_option_files(struct trace_option_dentry *topts);
3938
3939/*
3940 * Used to clear out the tracer before deletion of an instance.
3941 * Must have trace_types_lock held.
3942 */
3943static void tracing_set_nop(struct trace_array *tr)
3944{
3945	if (tr->current_trace == &nop_trace)
3946		return;
3947	
3948	tr->current_trace->enabled--;
3949
3950	if (tr->current_trace->reset)
3951		tr->current_trace->reset(tr);
3952
3953	tr->current_trace = &nop_trace;
3954}
3955
 
 
 
 
 
 
 
 
 
3956static int tracing_set_tracer(struct trace_array *tr, const char *buf)
3957{
3958	static struct trace_option_dentry *topts;
3959	struct tracer *t;
3960#ifdef CONFIG_TRACER_MAX_TRACE
3961	bool had_max_tr;
3962#endif
3963	int ret = 0;
3964
3965	mutex_lock(&trace_types_lock);
3966
3967	if (!ring_buffer_expanded) {
3968		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
3969						RING_BUFFER_ALL_CPUS);
3970		if (ret < 0)
3971			goto out;
3972		ret = 0;
3973	}
3974
3975	for (t = trace_types; t; t = t->next) {
3976		if (strcmp(t->name, buf) == 0)
3977			break;
3978	}
3979	if (!t) {
3980		ret = -EINVAL;
3981		goto out;
3982	}
3983	if (t == tr->current_trace)
3984		goto out;
3985
3986	/* Some tracers are only allowed for the top level buffer */
3987	if (!trace_ok_for_array(t, tr)) {
3988		ret = -EINVAL;
3989		goto out;
3990	}
3991
 
 
 
 
 
 
3992	trace_branch_disable();
3993
3994	tr->current_trace->enabled--;
3995
3996	if (tr->current_trace->reset)
3997		tr->current_trace->reset(tr);
3998
3999	/* Current trace needs to be nop_trace before synchronize_sched */
4000	tr->current_trace = &nop_trace;
4001
4002#ifdef CONFIG_TRACER_MAX_TRACE
4003	had_max_tr = tr->allocated_snapshot;
4004
4005	if (had_max_tr && !t->use_max_tr) {
4006		/*
4007		 * We need to make sure that the update_max_tr sees that
4008		 * current_trace changed to nop_trace to keep it from
4009		 * swapping the buffers after we resize it.
4010		 * The update_max_tr is called from interrupts disabled
4011		 * so a synchronized_sched() is sufficient.
4012		 */
4013		synchronize_sched();
4014		free_snapshot(tr);
4015	}
4016#endif
4017	/* Currently, only the top instance has options */
4018	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4019		destroy_trace_option_files(topts);
4020		topts = create_trace_option_files(tr, t);
4021	}
4022
4023#ifdef CONFIG_TRACER_MAX_TRACE
4024	if (t->use_max_tr && !had_max_tr) {
4025		ret = alloc_snapshot(tr);
4026		if (ret < 0)
4027			goto out;
4028	}
4029#endif
4030
4031	if (t->init) {
4032		ret = tracer_init(t, tr);
4033		if (ret)
4034			goto out;
4035	}
4036
4037	tr->current_trace = t;
4038	tr->current_trace->enabled++;
4039	trace_branch_enable(tr);
4040 out:
4041	mutex_unlock(&trace_types_lock);
4042
4043	return ret;
4044}
4045
4046static ssize_t
4047tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4048			size_t cnt, loff_t *ppos)
4049{
4050	struct trace_array *tr = filp->private_data;
4051	char buf[MAX_TRACER_SIZE+1];
4052	int i;
4053	size_t ret;
4054	int err;
4055
4056	ret = cnt;
4057
4058	if (cnt > MAX_TRACER_SIZE)
4059		cnt = MAX_TRACER_SIZE;
4060
4061	if (copy_from_user(&buf, ubuf, cnt))
4062		return -EFAULT;
4063
4064	buf[cnt] = 0;
4065
4066	/* strip ending whitespace. */
4067	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4068		buf[i] = 0;
4069
4070	err = tracing_set_tracer(tr, buf);
4071	if (err)
4072		return err;
4073
4074	*ppos += ret;
4075
4076	return ret;
4077}
4078
4079static ssize_t
4080tracing_max_lat_read(struct file *filp, char __user *ubuf,
4081		     size_t cnt, loff_t *ppos)
4082{
4083	unsigned long *ptr = filp->private_data;
4084	char buf[64];
4085	int r;
4086
4087	r = snprintf(buf, sizeof(buf), "%ld\n",
4088		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4089	if (r > sizeof(buf))
4090		r = sizeof(buf);
4091	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4092}
4093
4094static ssize_t
4095tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4096		      size_t cnt, loff_t *ppos)
4097{
4098	unsigned long *ptr = filp->private_data;
4099	unsigned long val;
4100	int ret;
4101
4102	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4103	if (ret)
4104		return ret;
4105
4106	*ptr = val * 1000;
4107
4108	return cnt;
4109}
4110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4111static int tracing_open_pipe(struct inode *inode, struct file *filp)
4112{
4113	struct trace_array *tr = inode->i_private;
4114	struct trace_iterator *iter;
4115	int ret = 0;
4116
4117	if (tracing_disabled)
4118		return -ENODEV;
4119
4120	if (trace_array_get(tr) < 0)
4121		return -ENODEV;
4122
4123	mutex_lock(&trace_types_lock);
4124
4125	/* create a buffer to store the information to pass to userspace */
4126	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4127	if (!iter) {
4128		ret = -ENOMEM;
4129		__trace_array_put(tr);
4130		goto out;
4131	}
4132
4133	/*
4134	 * We make a copy of the current tracer to avoid concurrent
4135	 * changes on it while we are reading.
4136	 */
4137	iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4138	if (!iter->trace) {
4139		ret = -ENOMEM;
4140		goto fail;
4141	}
4142	*iter->trace = *tr->current_trace;
4143
4144	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4145		ret = -ENOMEM;
4146		goto fail;
4147	}
4148
4149	/* trace pipe does not show start of buffer */
4150	cpumask_setall(iter->started);
4151
4152	if (trace_flags & TRACE_ITER_LATENCY_FMT)
4153		iter->iter_flags |= TRACE_FILE_LAT_FMT;
4154
4155	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4156	if (trace_clocks[tr->clock_id].in_ns)
4157		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4158
4159	iter->tr = tr;
4160	iter->trace_buffer = &tr->trace_buffer;
4161	iter->cpu_file = tracing_get_cpu(inode);
4162	mutex_init(&iter->mutex);
4163	filp->private_data = iter;
4164
4165	if (iter->trace->pipe_open)
4166		iter->trace->pipe_open(iter);
4167
4168	nonseekable_open(inode, filp);
 
 
4169out:
4170	mutex_unlock(&trace_types_lock);
4171	return ret;
4172
4173fail:
4174	kfree(iter->trace);
4175	kfree(iter);
4176	__trace_array_put(tr);
4177	mutex_unlock(&trace_types_lock);
4178	return ret;
4179}
4180
4181static int tracing_release_pipe(struct inode *inode, struct file *file)
4182{
4183	struct trace_iterator *iter = file->private_data;
4184	struct trace_array *tr = inode->i_private;
4185
4186	mutex_lock(&trace_types_lock);
4187
 
 
4188	if (iter->trace->pipe_close)
4189		iter->trace->pipe_close(iter);
4190
4191	mutex_unlock(&trace_types_lock);
4192
4193	free_cpumask_var(iter->started);
4194	mutex_destroy(&iter->mutex);
4195	kfree(iter->trace);
4196	kfree(iter);
4197
4198	trace_array_put(tr);
4199
4200	return 0;
4201}
4202
4203static unsigned int
4204trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4205{
 
 
4206	/* Iterators are static, they should be filled or empty */
4207	if (trace_buffer_iter(iter, iter->cpu_file))
4208		return POLLIN | POLLRDNORM;
4209
4210	if (trace_flags & TRACE_ITER_BLOCK)
4211		/*
4212		 * Always select as readable when in blocking mode
4213		 */
4214		return POLLIN | POLLRDNORM;
4215	else
4216		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4217					     filp, poll_table);
4218}
4219
4220static unsigned int
4221tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4222{
4223	struct trace_iterator *iter = filp->private_data;
4224
4225	return trace_poll(iter, filp, poll_table);
4226}
4227
4228/*
4229 * This is a make-shift waitqueue.
4230 * A tracer might use this callback on some rare cases:
4231 *
4232 *  1) the current tracer might hold the runqueue lock when it wakes up
4233 *     a reader, hence a deadlock (sched, function, and function graph tracers)
4234 *  2) the function tracers, trace all functions, we don't want
4235 *     the overhead of calling wake_up and friends
4236 *     (and tracing them too)
4237 *
4238 *     Anyway, this is really very primitive wakeup.
4239 */
4240void poll_wait_pipe(struct trace_iterator *iter)
4241{
4242	set_current_state(TASK_INTERRUPTIBLE);
4243	/* sleep for 100 msecs, and try again. */
4244	schedule_timeout(HZ / 10);
4245}
4246
4247/* Must be called with trace_types_lock mutex held. */
4248static int tracing_wait_pipe(struct file *filp)
4249{
4250	struct trace_iterator *iter = filp->private_data;
 
4251
4252	while (trace_empty(iter)) {
4253
4254		if ((filp->f_flags & O_NONBLOCK)) {
4255			return -EAGAIN;
4256		}
4257
4258		mutex_unlock(&iter->mutex);
4259
4260		iter->trace->wait_pipe(iter);
4261
4262		mutex_lock(&iter->mutex);
4263
4264		if (signal_pending(current))
4265			return -EINTR;
4266
4267		/*
4268		 * We block until we read something and tracing is disabled.
4269		 * We still block if tracing is disabled, but we have never
4270		 * read anything. This allows a user to cat this file, and
4271		 * then enable tracing. But after we have read something,
4272		 * we give an EOF when tracing is again disabled.
4273		 *
4274		 * iter->pos will be 0 if we haven't read anything.
4275		 */
4276		if (!tracing_is_on() && iter->pos)
4277			break;
 
 
 
 
 
 
 
 
 
4278	}
4279
4280	return 1;
4281}
4282
4283/*
4284 * Consumer reader.
4285 */
4286static ssize_t
4287tracing_read_pipe(struct file *filp, char __user *ubuf,
4288		  size_t cnt, loff_t *ppos)
4289{
4290	struct trace_iterator *iter = filp->private_data;
4291	struct trace_array *tr = iter->tr;
4292	ssize_t sret;
4293
4294	/* return any leftover data */
4295	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4296	if (sret != -EBUSY)
4297		return sret;
4298
4299	trace_seq_init(&iter->seq);
4300
4301	/* copy the tracer to avoid using a global lock all around */
4302	mutex_lock(&trace_types_lock);
4303	if (unlikely(iter->trace->name != tr->current_trace->name))
4304		*iter->trace = *tr->current_trace;
4305	mutex_unlock(&trace_types_lock);
4306
4307	/*
4308	 * Avoid more than one consumer on a single file descriptor
4309	 * This is just a matter of traces coherency, the ring buffer itself
4310	 * is protected.
4311	 */
4312	mutex_lock(&iter->mutex);
4313	if (iter->trace->read) {
4314		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4315		if (sret)
4316			goto out;
4317	}
4318
4319waitagain:
4320	sret = tracing_wait_pipe(filp);
4321	if (sret <= 0)
4322		goto out;
4323
4324	/* stop when tracing is finished */
4325	if (trace_empty(iter)) {
4326		sret = 0;
4327		goto out;
4328	}
4329
4330	if (cnt >= PAGE_SIZE)
4331		cnt = PAGE_SIZE - 1;
4332
4333	/* reset all but tr, trace, and overruns */
4334	memset(&iter->seq, 0,
4335	       sizeof(struct trace_iterator) -
4336	       offsetof(struct trace_iterator, seq));
4337	cpumask_clear(iter->started);
4338	iter->pos = -1;
4339
4340	trace_event_read_lock();
4341	trace_access_lock(iter->cpu_file);
4342	while (trace_find_next_entry_inc(iter) != NULL) {
4343		enum print_line_t ret;
4344		int len = iter->seq.len;
4345
4346		ret = print_trace_line(iter);
4347		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4348			/* don't print partial lines */
4349			iter->seq.len = len;
4350			break;
4351		}
4352		if (ret != TRACE_TYPE_NO_CONSUME)
4353			trace_consume(iter);
4354
4355		if (iter->seq.len >= cnt)
4356			break;
4357
4358		/*
4359		 * Setting the full flag means we reached the trace_seq buffer
4360		 * size and we should leave by partial output condition above.
4361		 * One of the trace_seq_* functions is not used properly.
4362		 */
4363		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4364			  iter->ent->type);
4365	}
4366	trace_access_unlock(iter->cpu_file);
4367	trace_event_read_unlock();
4368
4369	/* Now copy what we have to the user */
4370	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4371	if (iter->seq.readpos >= iter->seq.len)
4372		trace_seq_init(&iter->seq);
4373
4374	/*
4375	 * If there was nothing to send to user, in spite of consuming trace
4376	 * entries, go back to wait for more entries.
4377	 */
4378	if (sret == -EBUSY)
4379		goto waitagain;
4380
4381out:
4382	mutex_unlock(&iter->mutex);
4383
4384	return sret;
4385}
4386
4387static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4388				     unsigned int idx)
4389{
4390	__free_page(spd->pages[idx]);
4391}
4392
4393static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4394	.can_merge		= 0,
4395	.confirm		= generic_pipe_buf_confirm,
4396	.release		= generic_pipe_buf_release,
4397	.steal			= generic_pipe_buf_steal,
4398	.get			= generic_pipe_buf_get,
4399};
4400
4401static size_t
4402tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4403{
4404	size_t count;
 
4405	int ret;
4406
4407	/* Seq buffer is page-sized, exactly what we need. */
4408	for (;;) {
4409		count = iter->seq.len;
4410		ret = print_trace_line(iter);
4411		count = iter->seq.len - count;
4412		if (rem < count) {
4413			rem = 0;
4414			iter->seq.len -= count;
4415			break;
4416		}
 
 
 
 
 
 
4417		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4418			iter->seq.len -= count;
 
 
 
 
 
 
 
4419			break;
4420		}
4421
4422		if (ret != TRACE_TYPE_NO_CONSUME)
4423			trace_consume(iter);
4424		rem -= count;
4425		if (!trace_find_next_entry_inc(iter))	{
4426			rem = 0;
4427			iter->ent = NULL;
4428			break;
4429		}
4430	}
4431
4432	return rem;
4433}
4434
4435static ssize_t tracing_splice_read_pipe(struct file *filp,
4436					loff_t *ppos,
4437					struct pipe_inode_info *pipe,
4438					size_t len,
4439					unsigned int flags)
4440{
4441	struct page *pages_def[PIPE_DEF_BUFFERS];
4442	struct partial_page partial_def[PIPE_DEF_BUFFERS];
4443	struct trace_iterator *iter = filp->private_data;
4444	struct splice_pipe_desc spd = {
4445		.pages		= pages_def,
4446		.partial	= partial_def,
4447		.nr_pages	= 0, /* This gets updated below. */
4448		.nr_pages_max	= PIPE_DEF_BUFFERS,
4449		.flags		= flags,
4450		.ops		= &tracing_pipe_buf_ops,
4451		.spd_release	= tracing_spd_release_pipe,
4452	};
4453	struct trace_array *tr = iter->tr;
4454	ssize_t ret;
4455	size_t rem;
4456	unsigned int i;
4457
4458	if (splice_grow_spd(pipe, &spd))
4459		return -ENOMEM;
4460
4461	/* copy the tracer to avoid using a global lock all around */
4462	mutex_lock(&trace_types_lock);
4463	if (unlikely(iter->trace->name != tr->current_trace->name))
4464		*iter->trace = *tr->current_trace;
4465	mutex_unlock(&trace_types_lock);
4466
4467	mutex_lock(&iter->mutex);
4468
4469	if (iter->trace->splice_read) {
4470		ret = iter->trace->splice_read(iter, filp,
4471					       ppos, pipe, len, flags);
4472		if (ret)
4473			goto out_err;
4474	}
4475
4476	ret = tracing_wait_pipe(filp);
4477	if (ret <= 0)
4478		goto out_err;
4479
4480	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4481		ret = -EFAULT;
4482		goto out_err;
4483	}
4484
4485	trace_event_read_lock();
4486	trace_access_lock(iter->cpu_file);
4487
4488	/* Fill as many pages as possible. */
4489	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4490		spd.pages[i] = alloc_page(GFP_KERNEL);
4491		if (!spd.pages[i])
4492			break;
4493
4494		rem = tracing_fill_pipe_page(rem, iter);
4495
4496		/* Copy the data into the page, so we can start over. */
4497		ret = trace_seq_to_buffer(&iter->seq,
4498					  page_address(spd.pages[i]),
4499					  iter->seq.len);
4500		if (ret < 0) {
4501			__free_page(spd.pages[i]);
4502			break;
4503		}
4504		spd.partial[i].offset = 0;
4505		spd.partial[i].len = iter->seq.len;
4506
4507		trace_seq_init(&iter->seq);
4508	}
4509
4510	trace_access_unlock(iter->cpu_file);
4511	trace_event_read_unlock();
4512	mutex_unlock(&iter->mutex);
4513
4514	spd.nr_pages = i;
4515
4516	ret = splice_to_pipe(pipe, &spd);
 
 
 
4517out:
4518	splice_shrink_spd(&spd);
4519	return ret;
4520
4521out_err:
4522	mutex_unlock(&iter->mutex);
4523	goto out;
4524}
4525
4526static ssize_t
4527tracing_entries_read(struct file *filp, char __user *ubuf,
4528		     size_t cnt, loff_t *ppos)
4529{
4530	struct inode *inode = file_inode(filp);
4531	struct trace_array *tr = inode->i_private;
4532	int cpu = tracing_get_cpu(inode);
4533	char buf[64];
4534	int r = 0;
4535	ssize_t ret;
4536
4537	mutex_lock(&trace_types_lock);
4538
4539	if (cpu == RING_BUFFER_ALL_CPUS) {
4540		int cpu, buf_size_same;
4541		unsigned long size;
4542
4543		size = 0;
4544		buf_size_same = 1;
4545		/* check if all cpu sizes are same */
4546		for_each_tracing_cpu(cpu) {
4547			/* fill in the size from first enabled cpu */
4548			if (size == 0)
4549				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4550			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4551				buf_size_same = 0;
4552				break;
4553			}
4554		}
4555
4556		if (buf_size_same) {
4557			if (!ring_buffer_expanded)
4558				r = sprintf(buf, "%lu (expanded: %lu)\n",
4559					    size >> 10,
4560					    trace_buf_size >> 10);
4561			else
4562				r = sprintf(buf, "%lu\n", size >> 10);
4563		} else
4564			r = sprintf(buf, "X\n");
4565	} else
4566		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
4567
4568	mutex_unlock(&trace_types_lock);
4569
4570	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4571	return ret;
4572}
4573
4574static ssize_t
4575tracing_entries_write(struct file *filp, const char __user *ubuf,
4576		      size_t cnt, loff_t *ppos)
4577{
4578	struct inode *inode = file_inode(filp);
4579	struct trace_array *tr = inode->i_private;
4580	unsigned long val;
4581	int ret;
4582
4583	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4584	if (ret)
4585		return ret;
4586
4587	/* must have at least 1 entry */
4588	if (!val)
4589		return -EINVAL;
4590
4591	/* value is in KB */
4592	val <<= 10;
4593	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4594	if (ret < 0)
4595		return ret;
4596
4597	*ppos += cnt;
4598
4599	return cnt;
4600}
4601
4602static ssize_t
4603tracing_total_entries_read(struct file *filp, char __user *ubuf,
4604				size_t cnt, loff_t *ppos)
4605{
4606	struct trace_array *tr = filp->private_data;
4607	char buf[64];
4608	int r, cpu;
4609	unsigned long size = 0, expanded_size = 0;
4610
4611	mutex_lock(&trace_types_lock);
4612	for_each_tracing_cpu(cpu) {
4613		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
4614		if (!ring_buffer_expanded)
4615			expanded_size += trace_buf_size >> 10;
4616	}
4617	if (ring_buffer_expanded)
4618		r = sprintf(buf, "%lu\n", size);
4619	else
4620		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4621	mutex_unlock(&trace_types_lock);
4622
4623	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4624}
4625
4626static ssize_t
4627tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4628			  size_t cnt, loff_t *ppos)
4629{
4630	/*
4631	 * There is no need to read what the user has written, this function
4632	 * is just to make sure that there is no error when "echo" is used
4633	 */
4634
4635	*ppos += cnt;
4636
4637	return cnt;
4638}
4639
4640static int
4641tracing_free_buffer_release(struct inode *inode, struct file *filp)
4642{
4643	struct trace_array *tr = inode->i_private;
4644
4645	/* disable tracing ? */
4646	if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4647		tracer_tracing_off(tr);
4648	/* resize the ring buffer to 0 */
4649	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4650
4651	trace_array_put(tr);
4652
4653	return 0;
4654}
4655
4656static ssize_t
4657tracing_mark_write(struct file *filp, const char __user *ubuf,
4658					size_t cnt, loff_t *fpos)
4659{
4660	unsigned long addr = (unsigned long)ubuf;
4661	struct trace_array *tr = filp->private_data;
4662	struct ring_buffer_event *event;
4663	struct ring_buffer *buffer;
4664	struct print_entry *entry;
4665	unsigned long irq_flags;
4666	struct page *pages[2];
4667	void *map_page[2];
4668	int nr_pages = 1;
4669	ssize_t written;
4670	int offset;
4671	int size;
4672	int len;
4673	int ret;
4674	int i;
4675
4676	if (tracing_disabled)
4677		return -EINVAL;
4678
4679	if (!(trace_flags & TRACE_ITER_MARKERS))
4680		return -EINVAL;
4681
4682	if (cnt > TRACE_BUF_SIZE)
4683		cnt = TRACE_BUF_SIZE;
4684
4685	/*
4686	 * Userspace is injecting traces into the kernel trace buffer.
4687	 * We want to be as non intrusive as possible.
4688	 * To do so, we do not want to allocate any special buffers
4689	 * or take any locks, but instead write the userspace data
4690	 * straight into the ring buffer.
4691	 *
4692	 * First we need to pin the userspace buffer into memory,
4693	 * which, most likely it is, because it just referenced it.
4694	 * But there's no guarantee that it is. By using get_user_pages_fast()
4695	 * and kmap_atomic/kunmap_atomic() we can get access to the
4696	 * pages directly. We then write the data directly into the
4697	 * ring buffer.
4698	 */
4699	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
4700
4701	/* check if we cross pages */
4702	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4703		nr_pages = 2;
4704
4705	offset = addr & (PAGE_SIZE - 1);
4706	addr &= PAGE_MASK;
4707
4708	ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4709	if (ret < nr_pages) {
4710		while (--ret >= 0)
4711			put_page(pages[ret]);
4712		written = -EFAULT;
4713		goto out;
4714	}
4715
4716	for (i = 0; i < nr_pages; i++)
4717		map_page[i] = kmap_atomic(pages[i]);
4718
4719	local_save_flags(irq_flags);
4720	size = sizeof(*entry) + cnt + 2; /* possible \n added */
4721	buffer = tr->trace_buffer.buffer;
4722	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4723					  irq_flags, preempt_count());
4724	if (!event) {
4725		/* Ring buffer disabled, return as if not open for write */
4726		written = -EBADF;
4727		goto out_unlock;
4728	}
4729
4730	entry = ring_buffer_event_data(event);
4731	entry->ip = _THIS_IP_;
4732
4733	if (nr_pages == 2) {
4734		len = PAGE_SIZE - offset;
4735		memcpy(&entry->buf, map_page[0] + offset, len);
4736		memcpy(&entry->buf[len], map_page[1], cnt - len);
4737	} else
4738		memcpy(&entry->buf, map_page[0] + offset, cnt);
4739
4740	if (entry->buf[cnt - 1] != '\n') {
4741		entry->buf[cnt] = '\n';
4742		entry->buf[cnt + 1] = '\0';
4743	} else
4744		entry->buf[cnt] = '\0';
4745
4746	__buffer_unlock_commit(buffer, event);
4747
4748	written = cnt;
4749
4750	*fpos += written;
4751
4752 out_unlock:
4753	for (i = 0; i < nr_pages; i++){
4754		kunmap_atomic(map_page[i]);
4755		put_page(pages[i]);
4756	}
4757 out:
4758	return written;
4759}
4760
4761static int tracing_clock_show(struct seq_file *m, void *v)
4762{
4763	struct trace_array *tr = m->private;
4764	int i;
4765
4766	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
4767		seq_printf(m,
4768			"%s%s%s%s", i ? " " : "",
4769			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4770			i == tr->clock_id ? "]" : "");
4771	seq_putc(m, '\n');
4772
4773	return 0;
4774}
4775
4776static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
4777{
4778	int i;
4779
4780	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4781		if (strcmp(trace_clocks[i].name, clockstr) == 0)
4782			break;
4783	}
4784	if (i == ARRAY_SIZE(trace_clocks))
4785		return -EINVAL;
4786
4787	mutex_lock(&trace_types_lock);
4788
4789	tr->clock_id = i;
4790
4791	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
4792
4793	/*
4794	 * New clock may not be consistent with the previous clock.
4795	 * Reset the buffer so that it doesn't have incomparable timestamps.
4796	 */
4797	tracing_reset_online_cpus(&tr->trace_buffer);
4798
4799#ifdef CONFIG_TRACER_MAX_TRACE
4800	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4801		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4802	tracing_reset_online_cpus(&tr->max_buffer);
4803#endif
4804
4805	mutex_unlock(&trace_types_lock);
4806
4807	return 0;
4808}
4809
4810static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4811				   size_t cnt, loff_t *fpos)
4812{
4813	struct seq_file *m = filp->private_data;
4814	struct trace_array *tr = m->private;
4815	char buf[64];
4816	const char *clockstr;
4817	int ret;
4818
4819	if (cnt >= sizeof(buf))
4820		return -EINVAL;
4821
4822	if (copy_from_user(&buf, ubuf, cnt))
4823		return -EFAULT;
4824
4825	buf[cnt] = 0;
4826
4827	clockstr = strstrip(buf);
4828
4829	ret = tracing_set_clock(tr, clockstr);
4830	if (ret)
4831		return ret;
4832
4833	*fpos += cnt;
4834
4835	return cnt;
4836}
4837
4838static int tracing_clock_open(struct inode *inode, struct file *file)
4839{
4840	struct trace_array *tr = inode->i_private;
4841	int ret;
4842
4843	if (tracing_disabled)
4844		return -ENODEV;
4845
4846	if (trace_array_get(tr))
4847		return -ENODEV;
4848
4849	ret = single_open(file, tracing_clock_show, inode->i_private);
4850	if (ret < 0)
4851		trace_array_put(tr);
4852
4853	return ret;
4854}
4855
4856struct ftrace_buffer_info {
4857	struct trace_iterator	iter;
4858	void			*spare;
4859	unsigned int		read;
4860};
4861
4862#ifdef CONFIG_TRACER_SNAPSHOT
4863static int tracing_snapshot_open(struct inode *inode, struct file *file)
4864{
4865	struct trace_array *tr = inode->i_private;
4866	struct trace_iterator *iter;
4867	struct seq_file *m;
4868	int ret = 0;
4869
4870	if (trace_array_get(tr) < 0)
4871		return -ENODEV;
4872
4873	if (file->f_mode & FMODE_READ) {
4874		iter = __tracing_open(inode, file, true);
4875		if (IS_ERR(iter))
4876			ret = PTR_ERR(iter);
4877	} else {
4878		/* Writes still need the seq_file to hold the private data */
4879		ret = -ENOMEM;
4880		m = kzalloc(sizeof(*m), GFP_KERNEL);
4881		if (!m)
4882			goto out;
4883		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4884		if (!iter) {
4885			kfree(m);
4886			goto out;
4887		}
4888		ret = 0;
4889
4890		iter->tr = tr;
4891		iter->trace_buffer = &tr->max_buffer;
4892		iter->cpu_file = tracing_get_cpu(inode);
4893		m->private = iter;
4894		file->private_data = m;
4895	}
4896out:
4897	if (ret < 0)
4898		trace_array_put(tr);
4899
4900	return ret;
4901}
4902
4903static ssize_t
4904tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4905		       loff_t *ppos)
4906{
4907	struct seq_file *m = filp->private_data;
4908	struct trace_iterator *iter = m->private;
4909	struct trace_array *tr = iter->tr;
4910	unsigned long val;
4911	int ret;
4912
4913	ret = tracing_update_buffers();
4914	if (ret < 0)
4915		return ret;
4916
4917	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4918	if (ret)
4919		return ret;
4920
4921	mutex_lock(&trace_types_lock);
4922
4923	if (tr->current_trace->use_max_tr) {
4924		ret = -EBUSY;
4925		goto out;
4926	}
4927
4928	switch (val) {
4929	case 0:
4930		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4931			ret = -EINVAL;
4932			break;
4933		}
4934		if (tr->allocated_snapshot)
4935			free_snapshot(tr);
4936		break;
4937	case 1:
4938/* Only allow per-cpu swap if the ring buffer supports it */
4939#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4940		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4941			ret = -EINVAL;
4942			break;
4943		}
4944#endif
4945		if (!tr->allocated_snapshot) {
4946			ret = alloc_snapshot(tr);
4947			if (ret < 0)
4948				break;
4949		}
4950		local_irq_disable();
4951		/* Now, we're going to swap */
4952		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4953			update_max_tr(tr, current, smp_processor_id());
4954		else
4955			update_max_tr_single(tr, current, iter->cpu_file);
4956		local_irq_enable();
4957		break;
4958	default:
4959		if (tr->allocated_snapshot) {
4960			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4961				tracing_reset_online_cpus(&tr->max_buffer);
4962			else
4963				tracing_reset(&tr->max_buffer, iter->cpu_file);
4964		}
4965		break;
4966	}
4967
4968	if (ret >= 0) {
4969		*ppos += cnt;
4970		ret = cnt;
4971	}
4972out:
4973	mutex_unlock(&trace_types_lock);
4974	return ret;
4975}
4976
4977static int tracing_snapshot_release(struct inode *inode, struct file *file)
4978{
4979	struct seq_file *m = file->private_data;
4980	int ret;
4981
4982	ret = tracing_release(inode, file);
4983
4984	if (file->f_mode & FMODE_READ)
4985		return ret;
4986
4987	/* If write only, the seq_file is just a stub */
4988	if (m)
4989		kfree(m->private);
4990	kfree(m);
4991
4992	return 0;
4993}
4994
4995static int tracing_buffers_open(struct inode *inode, struct file *filp);
4996static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4997				    size_t count, loff_t *ppos);
4998static int tracing_buffers_release(struct inode *inode, struct file *file);
4999static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5000		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5001
5002static int snapshot_raw_open(struct inode *inode, struct file *filp)
5003{
5004	struct ftrace_buffer_info *info;
5005	int ret;
5006
5007	ret = tracing_buffers_open(inode, filp);
5008	if (ret < 0)
5009		return ret;
5010
5011	info = filp->private_data;
5012
5013	if (info->iter.trace->use_max_tr) {
5014		tracing_buffers_release(inode, filp);
5015		return -EBUSY;
5016	}
5017
5018	info->iter.snapshot = true;
5019	info->iter.trace_buffer = &info->iter.tr->max_buffer;
5020
5021	return ret;
5022}
5023
5024#endif /* CONFIG_TRACER_SNAPSHOT */
5025
5026
 
 
 
 
 
 
 
 
5027static const struct file_operations tracing_max_lat_fops = {
5028	.open		= tracing_open_generic,
5029	.read		= tracing_max_lat_read,
5030	.write		= tracing_max_lat_write,
5031	.llseek		= generic_file_llseek,
5032};
 
5033
5034static const struct file_operations set_tracer_fops = {
5035	.open		= tracing_open_generic,
5036	.read		= tracing_set_trace_read,
5037	.write		= tracing_set_trace_write,
5038	.llseek		= generic_file_llseek,
5039};
5040
5041static const struct file_operations tracing_pipe_fops = {
5042	.open		= tracing_open_pipe,
5043	.poll		= tracing_poll_pipe,
5044	.read		= tracing_read_pipe,
5045	.splice_read	= tracing_splice_read_pipe,
5046	.release	= tracing_release_pipe,
5047	.llseek		= no_llseek,
5048};
5049
5050static const struct file_operations tracing_entries_fops = {
5051	.open		= tracing_open_generic_tr,
5052	.read		= tracing_entries_read,
5053	.write		= tracing_entries_write,
5054	.llseek		= generic_file_llseek,
5055	.release	= tracing_release_generic_tr,
5056};
5057
5058static const struct file_operations tracing_total_entries_fops = {
5059	.open		= tracing_open_generic_tr,
5060	.read		= tracing_total_entries_read,
5061	.llseek		= generic_file_llseek,
5062	.release	= tracing_release_generic_tr,
5063};
5064
5065static const struct file_operations tracing_free_buffer_fops = {
5066	.open		= tracing_open_generic_tr,
5067	.write		= tracing_free_buffer_write,
5068	.release	= tracing_free_buffer_release,
5069};
5070
5071static const struct file_operations tracing_mark_fops = {
5072	.open		= tracing_open_generic_tr,
5073	.write		= tracing_mark_write,
5074	.llseek		= generic_file_llseek,
5075	.release	= tracing_release_generic_tr,
5076};
5077
5078static const struct file_operations trace_clock_fops = {
5079	.open		= tracing_clock_open,
5080	.read		= seq_read,
5081	.llseek		= seq_lseek,
5082	.release	= tracing_single_release_tr,
5083	.write		= tracing_clock_write,
5084};
5085
5086#ifdef CONFIG_TRACER_SNAPSHOT
5087static const struct file_operations snapshot_fops = {
5088	.open		= tracing_snapshot_open,
5089	.read		= seq_read,
5090	.write		= tracing_snapshot_write,
5091	.llseek		= tracing_lseek,
5092	.release	= tracing_snapshot_release,
5093};
5094
5095static const struct file_operations snapshot_raw_fops = {
5096	.open		= snapshot_raw_open,
5097	.read		= tracing_buffers_read,
5098	.release	= tracing_buffers_release,
5099	.splice_read	= tracing_buffers_splice_read,
5100	.llseek		= no_llseek,
5101};
5102
5103#endif /* CONFIG_TRACER_SNAPSHOT */
5104
5105static int tracing_buffers_open(struct inode *inode, struct file *filp)
5106{
5107	struct trace_array *tr = inode->i_private;
5108	struct ftrace_buffer_info *info;
5109	int ret;
5110
5111	if (tracing_disabled)
5112		return -ENODEV;
5113
5114	if (trace_array_get(tr) < 0)
5115		return -ENODEV;
5116
5117	info = kzalloc(sizeof(*info), GFP_KERNEL);
5118	if (!info) {
5119		trace_array_put(tr);
5120		return -ENOMEM;
5121	}
5122
5123	mutex_lock(&trace_types_lock);
5124
5125	info->iter.tr		= tr;
5126	info->iter.cpu_file	= tracing_get_cpu(inode);
5127	info->iter.trace	= tr->current_trace;
5128	info->iter.trace_buffer = &tr->trace_buffer;
5129	info->spare		= NULL;
5130	/* Force reading ring buffer for first read */
5131	info->read		= (unsigned int)-1;
5132
5133	filp->private_data = info;
5134
 
 
5135	mutex_unlock(&trace_types_lock);
5136
5137	ret = nonseekable_open(inode, filp);
5138	if (ret < 0)
5139		trace_array_put(tr);
5140
5141	return ret;
5142}
5143
5144static unsigned int
5145tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5146{
5147	struct ftrace_buffer_info *info = filp->private_data;
5148	struct trace_iterator *iter = &info->iter;
5149
5150	return trace_poll(iter, filp, poll_table);
5151}
5152
5153static ssize_t
5154tracing_buffers_read(struct file *filp, char __user *ubuf,
5155		     size_t count, loff_t *ppos)
5156{
5157	struct ftrace_buffer_info *info = filp->private_data;
5158	struct trace_iterator *iter = &info->iter;
5159	ssize_t ret;
5160	ssize_t size;
5161
5162	if (!count)
5163		return 0;
5164
5165	mutex_lock(&trace_types_lock);
5166
5167#ifdef CONFIG_TRACER_MAX_TRACE
5168	if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5169		size = -EBUSY;
5170		goto out_unlock;
5171	}
5172#endif
5173
5174	if (!info->spare)
5175		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5176							  iter->cpu_file);
5177	size = -ENOMEM;
5178	if (!info->spare)
5179		goto out_unlock;
5180
5181	/* Do we have previous read data to read? */
5182	if (info->read < PAGE_SIZE)
5183		goto read;
5184
5185 again:
5186	trace_access_lock(iter->cpu_file);
5187	ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5188				    &info->spare,
5189				    count,
5190				    iter->cpu_file, 0);
5191	trace_access_unlock(iter->cpu_file);
5192
5193	if (ret < 0) {
5194		if (trace_empty(iter)) {
5195			if ((filp->f_flags & O_NONBLOCK)) {
5196				size = -EAGAIN;
5197				goto out_unlock;
5198			}
5199			mutex_unlock(&trace_types_lock);
5200			iter->trace->wait_pipe(iter);
5201			mutex_lock(&trace_types_lock);
5202			if (signal_pending(current)) {
5203				size = -EINTR;
5204				goto out_unlock;
5205			}
5206			goto again;
5207		}
5208		size = 0;
5209		goto out_unlock;
5210	}
5211
5212	info->read = 0;
5213 read:
5214	size = PAGE_SIZE - info->read;
5215	if (size > count)
5216		size = count;
5217
5218	ret = copy_to_user(ubuf, info->spare + info->read, size);
5219	if (ret == size) {
5220		size = -EFAULT;
5221		goto out_unlock;
5222	}
5223	size -= ret;
5224
5225	*ppos += size;
5226	info->read += size;
5227
5228 out_unlock:
5229	mutex_unlock(&trace_types_lock);
5230
5231	return size;
5232}
5233
5234static int tracing_buffers_release(struct inode *inode, struct file *file)
5235{
5236	struct ftrace_buffer_info *info = file->private_data;
5237	struct trace_iterator *iter = &info->iter;
5238
5239	mutex_lock(&trace_types_lock);
5240
 
 
5241	__trace_array_put(iter->tr);
5242
5243	if (info->spare)
5244		ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5245	kfree(info);
5246
5247	mutex_unlock(&trace_types_lock);
5248
5249	return 0;
5250}
5251
5252struct buffer_ref {
5253	struct ring_buffer	*buffer;
5254	void			*page;
5255	int			ref;
5256};
5257
5258static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5259				    struct pipe_buffer *buf)
5260{
5261	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5262
5263	if (--ref->ref)
5264		return;
5265
5266	ring_buffer_free_read_page(ref->buffer, ref->page);
5267	kfree(ref);
5268	buf->private = 0;
5269}
5270
5271static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5272				struct pipe_buffer *buf)
5273{
5274	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5275
5276	ref->ref++;
5277}
5278
5279/* Pipe buffer operations for a buffer. */
5280static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5281	.can_merge		= 0,
5282	.confirm		= generic_pipe_buf_confirm,
5283	.release		= buffer_pipe_buf_release,
5284	.steal			= generic_pipe_buf_steal,
5285	.get			= buffer_pipe_buf_get,
5286};
5287
5288/*
5289 * Callback from splice_to_pipe(), if we need to release some pages
5290 * at the end of the spd in case we error'ed out in filling the pipe.
5291 */
5292static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5293{
5294	struct buffer_ref *ref =
5295		(struct buffer_ref *)spd->partial[i].private;
5296
5297	if (--ref->ref)
5298		return;
5299
5300	ring_buffer_free_read_page(ref->buffer, ref->page);
5301	kfree(ref);
5302	spd->partial[i].private = 0;
5303}
5304
5305static ssize_t
5306tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5307			    struct pipe_inode_info *pipe, size_t len,
5308			    unsigned int flags)
5309{
5310	struct ftrace_buffer_info *info = file->private_data;
5311	struct trace_iterator *iter = &info->iter;
5312	struct partial_page partial_def[PIPE_DEF_BUFFERS];
5313	struct page *pages_def[PIPE_DEF_BUFFERS];
5314	struct splice_pipe_desc spd = {
5315		.pages		= pages_def,
5316		.partial	= partial_def,
5317		.nr_pages_max	= PIPE_DEF_BUFFERS,
5318		.flags		= flags,
5319		.ops		= &buffer_pipe_buf_ops,
5320		.spd_release	= buffer_spd_release,
5321	};
5322	struct buffer_ref *ref;
5323	int entries, size, i;
5324	ssize_t ret;
5325
5326	mutex_lock(&trace_types_lock);
5327
5328#ifdef CONFIG_TRACER_MAX_TRACE
5329	if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5330		ret = -EBUSY;
5331		goto out;
5332	}
5333#endif
5334
5335	if (splice_grow_spd(pipe, &spd)) {
5336		ret = -ENOMEM;
5337		goto out;
5338	}
5339
5340	if (*ppos & (PAGE_SIZE - 1)) {
5341		ret = -EINVAL;
5342		goto out;
5343	}
5344
5345	if (len & (PAGE_SIZE - 1)) {
5346		if (len < PAGE_SIZE) {
5347			ret = -EINVAL;
5348			goto out;
5349		}
5350		len &= PAGE_MASK;
5351	}
5352
5353 again:
5354	trace_access_lock(iter->cpu_file);
5355	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5356
5357	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5358		struct page *page;
5359		int r;
5360
5361		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5362		if (!ref)
 
5363			break;
 
5364
5365		ref->ref = 1;
5366		ref->buffer = iter->trace_buffer->buffer;
5367		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5368		if (!ref->page) {
 
5369			kfree(ref);
5370			break;
5371		}
5372
5373		r = ring_buffer_read_page(ref->buffer, &ref->page,
5374					  len, iter->cpu_file, 1);
5375		if (r < 0) {
5376			ring_buffer_free_read_page(ref->buffer, ref->page);
5377			kfree(ref);
5378			break;
5379		}
5380
5381		/*
5382		 * zero out any left over data, this is going to
5383		 * user land.
5384		 */
5385		size = ring_buffer_page_len(ref->page);
5386		if (size < PAGE_SIZE)
5387			memset(ref->page + size, 0, PAGE_SIZE - size);
5388
5389		page = virt_to_page(ref->page);
5390
5391		spd.pages[i] = page;
5392		spd.partial[i].len = PAGE_SIZE;
5393		spd.partial[i].offset = 0;
5394		spd.partial[i].private = (unsigned long)ref;
5395		spd.nr_pages++;
5396		*ppos += PAGE_SIZE;
5397
5398		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5399	}
5400
5401	trace_access_unlock(iter->cpu_file);
5402	spd.nr_pages = i;
5403
5404	/* did we read anything? */
5405	if (!spd.nr_pages) {
5406		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5407			ret = -EAGAIN;
5408			goto out;
5409		}
5410		mutex_unlock(&trace_types_lock);
5411		iter->trace->wait_pipe(iter);
5412		mutex_lock(&trace_types_lock);
5413		if (signal_pending(current)) {
5414			ret = -EINTR;
5415			goto out;
5416		}
5417		goto again;
5418	}
5419
5420	ret = splice_to_pipe(pipe, &spd);
5421	splice_shrink_spd(&spd);
5422out:
5423	mutex_unlock(&trace_types_lock);
5424
5425	return ret;
5426}
5427
5428static const struct file_operations tracing_buffers_fops = {
5429	.open		= tracing_buffers_open,
5430	.read		= tracing_buffers_read,
5431	.poll		= tracing_buffers_poll,
5432	.release	= tracing_buffers_release,
5433	.splice_read	= tracing_buffers_splice_read,
5434	.llseek		= no_llseek,
5435};
5436
5437static ssize_t
5438tracing_stats_read(struct file *filp, char __user *ubuf,
5439		   size_t count, loff_t *ppos)
5440{
5441	struct inode *inode = file_inode(filp);
5442	struct trace_array *tr = inode->i_private;
5443	struct trace_buffer *trace_buf = &tr->trace_buffer;
5444	int cpu = tracing_get_cpu(inode);
5445	struct trace_seq *s;
5446	unsigned long cnt;
5447	unsigned long long t;
5448	unsigned long usec_rem;
5449
5450	s = kmalloc(sizeof(*s), GFP_KERNEL);
5451	if (!s)
5452		return -ENOMEM;
5453
5454	trace_seq_init(s);
5455
5456	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5457	trace_seq_printf(s, "entries: %ld\n", cnt);
5458
5459	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5460	trace_seq_printf(s, "overrun: %ld\n", cnt);
5461
5462	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5463	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5464
5465	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5466	trace_seq_printf(s, "bytes: %ld\n", cnt);
5467
5468	if (trace_clocks[tr->clock_id].in_ns) {
5469		/* local or global for trace_clock */
5470		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5471		usec_rem = do_div(t, USEC_PER_SEC);
5472		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5473								t, usec_rem);
5474
5475		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5476		usec_rem = do_div(t, USEC_PER_SEC);
5477		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5478	} else {
5479		/* counter or tsc mode for trace_clock */
5480		trace_seq_printf(s, "oldest event ts: %llu\n",
5481				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5482
5483		trace_seq_printf(s, "now ts: %llu\n",
5484				ring_buffer_time_stamp(trace_buf->buffer, cpu));
5485	}
5486
5487	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5488	trace_seq_printf(s, "dropped events: %ld\n", cnt);
5489
5490	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5491	trace_seq_printf(s, "read events: %ld\n", cnt);
5492
5493	count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
 
5494
5495	kfree(s);
5496
5497	return count;
5498}
5499
5500static const struct file_operations tracing_stats_fops = {
5501	.open		= tracing_open_generic_tr,
5502	.read		= tracing_stats_read,
5503	.llseek		= generic_file_llseek,
5504	.release	= tracing_release_generic_tr,
5505};
5506
5507#ifdef CONFIG_DYNAMIC_FTRACE
5508
5509int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5510{
5511	return 0;
5512}
5513
5514static ssize_t
5515tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5516		  size_t cnt, loff_t *ppos)
5517{
5518	static char ftrace_dyn_info_buffer[1024];
5519	static DEFINE_MUTEX(dyn_info_mutex);
5520	unsigned long *p = filp->private_data;
5521	char *buf = ftrace_dyn_info_buffer;
5522	int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5523	int r;
5524
5525	mutex_lock(&dyn_info_mutex);
5526	r = sprintf(buf, "%ld ", *p);
5527
5528	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5529	buf[r++] = '\n';
5530
5531	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5532
5533	mutex_unlock(&dyn_info_mutex);
5534
5535	return r;
5536}
5537
5538static const struct file_operations tracing_dyn_info_fops = {
5539	.open		= tracing_open_generic,
5540	.read		= tracing_read_dyn_info,
5541	.llseek		= generic_file_llseek,
5542};
5543#endif /* CONFIG_DYNAMIC_FTRACE */
5544
5545#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5546static void
5547ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5548{
5549	tracing_snapshot();
5550}
5551
5552static void
5553ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5554{
5555	unsigned long *count = (long *)data;
5556
5557	if (!*count)
5558		return;
5559
5560	if (*count != -1)
5561		(*count)--;
5562
5563	tracing_snapshot();
5564}
5565
5566static int
5567ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5568		      struct ftrace_probe_ops *ops, void *data)
5569{
5570	long count = (long)data;
5571
5572	seq_printf(m, "%ps:", (void *)ip);
5573
5574	seq_printf(m, "snapshot");
5575
5576	if (count == -1)
5577		seq_printf(m, ":unlimited\n");
5578	else
5579		seq_printf(m, ":count=%ld\n", count);
5580
5581	return 0;
5582}
5583
5584static struct ftrace_probe_ops snapshot_probe_ops = {
5585	.func			= ftrace_snapshot,
5586	.print			= ftrace_snapshot_print,
5587};
5588
5589static struct ftrace_probe_ops snapshot_count_probe_ops = {
5590	.func			= ftrace_count_snapshot,
5591	.print			= ftrace_snapshot_print,
5592};
5593
5594static int
5595ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5596			       char *glob, char *cmd, char *param, int enable)
5597{
5598	struct ftrace_probe_ops *ops;
5599	void *count = (void *)-1;
5600	char *number;
5601	int ret;
5602
5603	/* hash funcs only work with set_ftrace_filter */
5604	if (!enable)
5605		return -EINVAL;
5606
5607	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
5608
5609	if (glob[0] == '!') {
5610		unregister_ftrace_function_probe_func(glob+1, ops);
5611		return 0;
5612	}
5613
5614	if (!param)
5615		goto out_reg;
5616
5617	number = strsep(&param, ":");
5618
5619	if (!strlen(number))
5620		goto out_reg;
5621
5622	/*
5623	 * We use the callback data field (which is a pointer)
5624	 * as our counter.
5625	 */
5626	ret = kstrtoul(number, 0, (unsigned long *)&count);
5627	if (ret)
5628		return ret;
5629
5630 out_reg:
5631	ret = register_ftrace_function_probe(glob, ops, count);
5632
5633	if (ret >= 0)
5634		alloc_snapshot(&global_trace);
5635
5636	return ret < 0 ? ret : 0;
5637}
5638
5639static struct ftrace_func_command ftrace_snapshot_cmd = {
5640	.name			= "snapshot",
5641	.func			= ftrace_trace_snapshot_callback,
5642};
5643
5644static __init int register_snapshot_cmd(void)
5645{
5646	return register_ftrace_command(&ftrace_snapshot_cmd);
5647}
5648#else
5649static inline __init int register_snapshot_cmd(void) { return 0; }
5650#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
5651
5652struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
5653{
5654	if (tr->dir)
5655		return tr->dir;
5656
5657	if (!debugfs_initialized())
5658		return NULL;
5659
 
5660	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5661		tr->dir = debugfs_create_dir("tracing", NULL);
5662
5663	if (!tr->dir)
5664		pr_warn_once("Could not create debugfs directory 'tracing'\n");
5665
 
5666	return tr->dir;
5667}
5668
5669struct dentry *tracing_init_dentry(void)
5670{
5671	return tracing_init_dentry_tr(&global_trace);
5672}
5673
5674static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5675{
5676	struct dentry *d_tracer;
5677
5678	if (tr->percpu_dir)
5679		return tr->percpu_dir;
5680
5681	d_tracer = tracing_init_dentry_tr(tr);
5682	if (!d_tracer)
5683		return NULL;
5684
5685	tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
5686
5687	WARN_ONCE(!tr->percpu_dir,
5688		  "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
5689
5690	return tr->percpu_dir;
5691}
5692
5693static struct dentry *
5694trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5695		      void *data, long cpu, const struct file_operations *fops)
5696{
5697	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5698
5699	if (ret) /* See tracing_get_cpu() */
5700		ret->d_inode->i_cdev = (void *)(cpu + 1);
5701	return ret;
5702}
5703
5704static void
5705tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
5706{
5707	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5708	struct dentry *d_cpu;
5709	char cpu_dir[30]; /* 30 characters should be more than enough */
5710
5711	if (!d_percpu)
5712		return;
5713
5714	snprintf(cpu_dir, 30, "cpu%ld", cpu);
5715	d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5716	if (!d_cpu) {
5717		pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5718		return;
5719	}
5720
5721	/* per cpu trace_pipe */
5722	trace_create_cpu_file("trace_pipe", 0444, d_cpu,
5723				tr, cpu, &tracing_pipe_fops);
5724
5725	/* per cpu trace */
5726	trace_create_cpu_file("trace", 0644, d_cpu,
5727				tr, cpu, &tracing_fops);
5728
5729	trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
5730				tr, cpu, &tracing_buffers_fops);
5731
5732	trace_create_cpu_file("stats", 0444, d_cpu,
5733				tr, cpu, &tracing_stats_fops);
5734
5735	trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
5736				tr, cpu, &tracing_entries_fops);
5737
5738#ifdef CONFIG_TRACER_SNAPSHOT
5739	trace_create_cpu_file("snapshot", 0644, d_cpu,
5740				tr, cpu, &snapshot_fops);
5741
5742	trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
5743				tr, cpu, &snapshot_raw_fops);
5744#endif
5745}
5746
5747#ifdef CONFIG_FTRACE_SELFTEST
5748/* Let selftest have access to static functions in this file */
5749#include "trace_selftest.c"
5750#endif
5751
5752struct trace_option_dentry {
5753	struct tracer_opt		*opt;
5754	struct tracer_flags		*flags;
5755	struct trace_array		*tr;
5756	struct dentry			*entry;
5757};
5758
5759static ssize_t
5760trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5761			loff_t *ppos)
5762{
5763	struct trace_option_dentry *topt = filp->private_data;
5764	char *buf;
5765
5766	if (topt->flags->val & topt->opt->bit)
5767		buf = "1\n";
5768	else
5769		buf = "0\n";
5770
5771	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5772}
5773
5774static ssize_t
5775trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5776			 loff_t *ppos)
5777{
5778	struct trace_option_dentry *topt = filp->private_data;
5779	unsigned long val;
5780	int ret;
5781
5782	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5783	if (ret)
5784		return ret;
5785
5786	if (val != 0 && val != 1)
5787		return -EINVAL;
5788
5789	if (!!(topt->flags->val & topt->opt->bit) != val) {
5790		mutex_lock(&trace_types_lock);
5791		ret = __set_tracer_option(topt->tr, topt->flags,
5792					  topt->opt, !val);
5793		mutex_unlock(&trace_types_lock);
5794		if (ret)
5795			return ret;
5796	}
5797
5798	*ppos += cnt;
5799
5800	return cnt;
5801}
5802
5803
5804static const struct file_operations trace_options_fops = {
5805	.open = tracing_open_generic,
5806	.read = trace_options_read,
5807	.write = trace_options_write,
5808	.llseek	= generic_file_llseek,
5809};
5810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5811static ssize_t
5812trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5813			loff_t *ppos)
5814{
5815	long index = (long)filp->private_data;
 
 
5816	char *buf;
5817
5818	if (trace_flags & (1 << index))
 
 
5819		buf = "1\n";
5820	else
5821		buf = "0\n";
5822
5823	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5824}
5825
5826static ssize_t
5827trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5828			 loff_t *ppos)
5829{
5830	struct trace_array *tr = &global_trace;
5831	long index = (long)filp->private_data;
 
5832	unsigned long val;
5833	int ret;
5834
 
 
5835	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5836	if (ret)
5837		return ret;
5838
5839	if (val != 0 && val != 1)
5840		return -EINVAL;
5841
5842	mutex_lock(&trace_types_lock);
5843	ret = set_tracer_flag(tr, 1 << index, val);
5844	mutex_unlock(&trace_types_lock);
5845
5846	if (ret < 0)
5847		return ret;
5848
5849	*ppos += cnt;
5850
5851	return cnt;
5852}
5853
5854static const struct file_operations trace_options_core_fops = {
5855	.open = tracing_open_generic,
5856	.read = trace_options_core_read,
5857	.write = trace_options_core_write,
5858	.llseek = generic_file_llseek,
5859};
5860
5861struct dentry *trace_create_file(const char *name,
5862				 umode_t mode,
5863				 struct dentry *parent,
5864				 void *data,
5865				 const struct file_operations *fops)
5866{
5867	struct dentry *ret;
5868
5869	ret = debugfs_create_file(name, mode, parent, data, fops);
5870	if (!ret)
5871		pr_warning("Could not create debugfs '%s' entry\n", name);
5872
5873	return ret;
5874}
5875
5876
5877static struct dentry *trace_options_init_dentry(struct trace_array *tr)
5878{
5879	struct dentry *d_tracer;
5880
5881	if (tr->options)
5882		return tr->options;
5883
5884	d_tracer = tracing_init_dentry_tr(tr);
5885	if (!d_tracer)
5886		return NULL;
5887
5888	tr->options = debugfs_create_dir("options", d_tracer);
5889	if (!tr->options) {
5890		pr_warning("Could not create debugfs directory 'options'\n");
5891		return NULL;
5892	}
5893
5894	return tr->options;
5895}
5896
5897static void
5898create_trace_option_file(struct trace_array *tr,
5899			 struct trace_option_dentry *topt,
5900			 struct tracer_flags *flags,
5901			 struct tracer_opt *opt)
5902{
5903	struct dentry *t_options;
5904
5905	t_options = trace_options_init_dentry(tr);
5906	if (!t_options)
5907		return;
5908
5909	topt->flags = flags;
5910	topt->opt = opt;
5911	topt->tr = tr;
5912
5913	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
5914				    &trace_options_fops);
5915
5916}
5917
5918static struct trace_option_dentry *
5919create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
5920{
5921	struct trace_option_dentry *topts;
 
5922	struct tracer_flags *flags;
5923	struct tracer_opt *opts;
5924	int cnt;
 
5925
5926	if (!tracer)
5927		return NULL;
5928
5929	flags = tracer->flags;
5930
5931	if (!flags || !flags->opts)
5932		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
5933
5934	opts = flags->opts;
5935
5936	for (cnt = 0; opts[cnt].name; cnt++)
5937		;
5938
5939	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
5940	if (!topts)
5941		return NULL;
5942
5943	for (cnt = 0; opts[cnt].name; cnt++)
5944		create_trace_option_file(tr, &topts[cnt], flags,
5945					 &opts[cnt]);
5946
5947	return topts;
5948}
5949
5950static void
5951destroy_trace_option_files(struct trace_option_dentry *topts)
5952{
5953	int cnt;
5954
5955	if (!topts)
5956		return;
5957
5958	for (cnt = 0; topts[cnt].opt; cnt++) {
5959		if (topts[cnt].entry)
5960			debugfs_remove(topts[cnt].entry);
 
 
5961	}
5962
5963	kfree(topts);
 
 
 
 
 
 
 
 
 
 
 
5964}
5965
5966static struct dentry *
5967create_trace_option_core_file(struct trace_array *tr,
5968			      const char *option, long index)
5969{
5970	struct dentry *t_options;
5971
5972	t_options = trace_options_init_dentry(tr);
5973	if (!t_options)
5974		return NULL;
5975
5976	return trace_create_file(option, 0644, t_options, (void *)index,
5977				    &trace_options_core_fops);
 
5978}
5979
5980static __init void create_trace_options_dir(struct trace_array *tr)
5981{
5982	struct dentry *t_options;
 
5983	int i;
5984
5985	t_options = trace_options_init_dentry(tr);
5986	if (!t_options)
5987		return;
5988
5989	for (i = 0; trace_options[i]; i++)
5990		create_trace_option_core_file(tr, trace_options[i], i);
 
 
 
5991}
5992
5993static ssize_t
5994rb_simple_read(struct file *filp, char __user *ubuf,
5995	       size_t cnt, loff_t *ppos)
5996{
5997	struct trace_array *tr = filp->private_data;
5998	char buf[64];
5999	int r;
6000
6001	r = tracer_tracing_is_on(tr);
6002	r = sprintf(buf, "%d\n", r);
6003
6004	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6005}
6006
6007static ssize_t
6008rb_simple_write(struct file *filp, const char __user *ubuf,
6009		size_t cnt, loff_t *ppos)
6010{
6011	struct trace_array *tr = filp->private_data;
6012	struct ring_buffer *buffer = tr->trace_buffer.buffer;
6013	unsigned long val;
6014	int ret;
6015
6016	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6017	if (ret)
6018		return ret;
6019
6020	if (buffer) {
6021		mutex_lock(&trace_types_lock);
6022		if (val) {
6023			tracer_tracing_on(tr);
6024			if (tr->current_trace->start)
6025				tr->current_trace->start(tr);
6026		} else {
6027			tracer_tracing_off(tr);
6028			if (tr->current_trace->stop)
6029				tr->current_trace->stop(tr);
6030		}
6031		mutex_unlock(&trace_types_lock);
6032	}
6033
6034	(*ppos)++;
6035
6036	return cnt;
6037}
6038
6039static const struct file_operations rb_simple_fops = {
6040	.open		= tracing_open_generic_tr,
6041	.read		= rb_simple_read,
6042	.write		= rb_simple_write,
6043	.release	= tracing_release_generic_tr,
6044	.llseek		= default_llseek,
6045};
6046
6047struct dentry *trace_instance_dir;
6048
6049static void
6050init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6051
6052static int
6053allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6054{
6055	enum ring_buffer_flags rb_flags;
6056
6057	rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6058
6059	buf->tr = tr;
6060
6061	buf->buffer = ring_buffer_alloc(size, rb_flags);
6062	if (!buf->buffer)
6063		return -ENOMEM;
6064
6065	buf->data = alloc_percpu(struct trace_array_cpu);
6066	if (!buf->data) {
6067		ring_buffer_free(buf->buffer);
6068		return -ENOMEM;
6069	}
6070
6071	/* Allocate the first page for all buffers */
6072	set_buffer_entries(&tr->trace_buffer,
6073			   ring_buffer_size(tr->trace_buffer.buffer, 0));
6074
6075	return 0;
6076}
6077
6078static int allocate_trace_buffers(struct trace_array *tr, int size)
6079{
6080	int ret;
6081
6082	ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6083	if (ret)
6084		return ret;
6085
6086#ifdef CONFIG_TRACER_MAX_TRACE
6087	ret = allocate_trace_buffer(tr, &tr->max_buffer,
6088				    allocate_snapshot ? size : 1);
6089	if (WARN_ON(ret)) {
6090		ring_buffer_free(tr->trace_buffer.buffer);
6091		free_percpu(tr->trace_buffer.data);
6092		return -ENOMEM;
6093	}
6094	tr->allocated_snapshot = allocate_snapshot;
6095
6096	/*
6097	 * Only the top level trace array gets its snapshot allocated
6098	 * from the kernel command line.
6099	 */
6100	allocate_snapshot = false;
6101#endif
6102	return 0;
6103}
6104
6105static int new_instance_create(const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6106{
6107	struct trace_array *tr;
6108	int ret;
6109
6110	mutex_lock(&trace_types_lock);
6111
6112	ret = -EEXIST;
6113	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6114		if (tr->name && strcmp(tr->name, name) == 0)
6115			goto out_unlock;
6116	}
6117
6118	ret = -ENOMEM;
6119	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6120	if (!tr)
6121		goto out_unlock;
6122
6123	tr->name = kstrdup(name, GFP_KERNEL);
6124	if (!tr->name)
6125		goto out_free_tr;
6126
6127	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6128		goto out_free_tr;
6129
 
 
6130	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6131
6132	raw_spin_lock_init(&tr->start_lock);
6133
 
 
6134	tr->current_trace = &nop_trace;
6135
6136	INIT_LIST_HEAD(&tr->systems);
6137	INIT_LIST_HEAD(&tr->events);
6138
6139	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6140		goto out_free_tr;
6141
6142	tr->dir = debugfs_create_dir(name, trace_instance_dir);
6143	if (!tr->dir)
6144		goto out_free_tr;
6145
6146	ret = event_trace_add_tracer(tr->dir, tr);
6147	if (ret) {
6148		debugfs_remove_recursive(tr->dir);
6149		goto out_free_tr;
6150	}
6151
6152	init_tracer_debugfs(tr, tr->dir);
 
 
6153
6154	list_add(&tr->list, &ftrace_trace_arrays);
6155
6156	mutex_unlock(&trace_types_lock);
6157
6158	return 0;
6159
6160 out_free_tr:
6161	if (tr->trace_buffer.buffer)
6162		ring_buffer_free(tr->trace_buffer.buffer);
6163	free_cpumask_var(tr->tracing_cpumask);
6164	kfree(tr->name);
6165	kfree(tr);
6166
6167 out_unlock:
6168	mutex_unlock(&trace_types_lock);
6169
6170	return ret;
6171
6172}
6173
6174static int instance_delete(const char *name)
6175{
6176	struct trace_array *tr;
6177	int found = 0;
6178	int ret;
 
6179
6180	mutex_lock(&trace_types_lock);
6181
6182	ret = -ENODEV;
6183	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6184		if (tr->name && strcmp(tr->name, name) == 0) {
6185			found = 1;
6186			break;
6187		}
6188	}
6189	if (!found)
6190		goto out_unlock;
6191
6192	ret = -EBUSY;
6193	if (tr->ref)
6194		goto out_unlock;
6195
6196	list_del(&tr->list);
6197
6198	tracing_set_nop(tr);
6199	event_trace_del_tracer(tr);
6200	ftrace_destroy_function_files(tr);
6201	debugfs_remove_recursive(tr->dir);
6202	free_percpu(tr->trace_buffer.data);
6203	ring_buffer_free(tr->trace_buffer.buffer);
 
 
 
 
6204
6205	kfree(tr->name);
6206	kfree(tr);
6207
6208	ret = 0;
6209
6210 out_unlock:
6211	mutex_unlock(&trace_types_lock);
6212
6213	return ret;
6214}
6215
6216static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6217{
6218	struct dentry *parent;
6219	int ret;
6220
6221	/* Paranoid: Make sure the parent is the "instances" directory */
6222	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6223	if (WARN_ON_ONCE(parent != trace_instance_dir))
6224		return -ENOENT;
6225
6226	/*
6227	 * The inode mutex is locked, but debugfs_create_dir() will also
6228	 * take the mutex. As the instances directory can not be destroyed
6229	 * or changed in any other way, it is safe to unlock it, and
6230	 * let the dentry try. If two users try to make the same dir at
6231	 * the same time, then the new_instance_create() will determine the
6232	 * winner.
6233	 */
6234	mutex_unlock(&inode->i_mutex);
6235
6236	ret = new_instance_create(dentry->d_iname);
6237
6238	mutex_lock(&inode->i_mutex);
6239
6240	return ret;
6241}
6242
6243static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6244{
6245	struct dentry *parent;
6246	int ret;
6247
6248	/* Paranoid: Make sure the parent is the "instances" directory */
6249	parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6250	if (WARN_ON_ONCE(parent != trace_instance_dir))
6251		return -ENOENT;
6252
6253	/* The caller did a dget() on dentry */
6254	mutex_unlock(&dentry->d_inode->i_mutex);
6255
6256	/*
6257	 * The inode mutex is locked, but debugfs_create_dir() will also
6258	 * take the mutex. As the instances directory can not be destroyed
6259	 * or changed in any other way, it is safe to unlock it, and
6260	 * let the dentry try. If two users try to make the same dir at
6261	 * the same time, then the instance_delete() will determine the
6262	 * winner.
6263	 */
6264	mutex_unlock(&inode->i_mutex);
6265
6266	ret = instance_delete(dentry->d_iname);
6267
6268	mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6269	mutex_lock(&dentry->d_inode->i_mutex);
6270
6271	return ret;
6272}
6273
6274static const struct inode_operations instance_dir_inode_operations = {
6275	.lookup		= simple_lookup,
6276	.mkdir		= instance_mkdir,
6277	.rmdir		= instance_rmdir,
6278};
6279
6280static __init void create_trace_instances(struct dentry *d_tracer)
6281{
6282	trace_instance_dir = debugfs_create_dir("instances", d_tracer);
 
 
6283	if (WARN_ON(!trace_instance_dir))
6284		return;
6285
6286	/* Hijack the dir inode operations, to allow mkdir */
6287	trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6288}
6289
6290static void
6291init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6292{
6293	int cpu;
6294
6295	trace_create_file("available_tracers", 0444, d_tracer,
6296			tr, &show_traces_fops);
6297
6298	trace_create_file("current_tracer", 0644, d_tracer,
6299			tr, &set_tracer_fops);
6300
6301	trace_create_file("tracing_cpumask", 0644, d_tracer,
6302			  tr, &tracing_cpumask_fops);
6303
6304	trace_create_file("trace_options", 0644, d_tracer,
6305			  tr, &tracing_iter_fops);
6306
6307	trace_create_file("trace", 0644, d_tracer,
6308			  tr, &tracing_fops);
6309
6310	trace_create_file("trace_pipe", 0444, d_tracer,
6311			  tr, &tracing_pipe_fops);
6312
6313	trace_create_file("buffer_size_kb", 0644, d_tracer,
6314			  tr, &tracing_entries_fops);
6315
6316	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6317			  tr, &tracing_total_entries_fops);
6318
6319	trace_create_file("free_buffer", 0200, d_tracer,
6320			  tr, &tracing_free_buffer_fops);
6321
6322	trace_create_file("trace_marker", 0220, d_tracer,
6323			  tr, &tracing_mark_fops);
6324
6325	trace_create_file("trace_clock", 0644, d_tracer, tr,
6326			  &trace_clock_fops);
6327
6328	trace_create_file("tracing_on", 0644, d_tracer,
6329			  tr, &rb_simple_fops);
6330
 
 
 
 
 
 
 
6331	if (ftrace_create_function_files(tr, d_tracer))
6332		WARN(1, "Could not allocate function filter files");
6333
6334#ifdef CONFIG_TRACER_SNAPSHOT
6335	trace_create_file("snapshot", 0644, d_tracer,
6336			  tr, &snapshot_fops);
6337#endif
6338
6339	for_each_tracing_cpu(cpu)
6340		tracing_init_debugfs_percpu(tr, cpu);
6341
6342}
6343
6344static __init int tracer_init_debugfs(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6345{
6346	struct dentry *d_tracer;
6347
6348	trace_access_lock_init();
6349
6350	d_tracer = tracing_init_dentry();
6351	if (!d_tracer)
6352		return 0;
6353
6354	init_tracer_debugfs(&global_trace, d_tracer);
6355
6356#ifdef CONFIG_TRACER_MAX_TRACE
6357	trace_create_file("tracing_max_latency", 0644, d_tracer,
6358			&tracing_max_latency, &tracing_max_lat_fops);
6359#endif
6360
6361	trace_create_file("tracing_thresh", 0644, d_tracer,
6362			&tracing_thresh, &tracing_max_lat_fops);
6363
6364	trace_create_file("README", 0444, d_tracer,
6365			NULL, &tracing_readme_fops);
6366
6367	trace_create_file("saved_cmdlines", 0444, d_tracer,
6368			NULL, &tracing_saved_cmdlines_fops);
6369
 
 
 
 
 
 
 
 
 
 
 
6370#ifdef CONFIG_DYNAMIC_FTRACE
6371	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6372			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6373#endif
6374
6375	create_trace_instances(d_tracer);
6376
6377	create_trace_options_dir(&global_trace);
6378
6379	return 0;
6380}
6381
6382static int trace_panic_handler(struct notifier_block *this,
6383			       unsigned long event, void *unused)
6384{
6385	if (ftrace_dump_on_oops)
6386		ftrace_dump(ftrace_dump_on_oops);
6387	return NOTIFY_OK;
6388}
6389
6390static struct notifier_block trace_panic_notifier = {
6391	.notifier_call  = trace_panic_handler,
6392	.next           = NULL,
6393	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
6394};
6395
6396static int trace_die_handler(struct notifier_block *self,
6397			     unsigned long val,
6398			     void *data)
6399{
6400	switch (val) {
6401	case DIE_OOPS:
6402		if (ftrace_dump_on_oops)
6403			ftrace_dump(ftrace_dump_on_oops);
6404		break;
6405	default:
6406		break;
6407	}
6408	return NOTIFY_OK;
6409}
6410
6411static struct notifier_block trace_die_notifier = {
6412	.notifier_call = trace_die_handler,
6413	.priority = 200
6414};
6415
6416/*
6417 * printk is set to max of 1024, we really don't need it that big.
6418 * Nothing should be printing 1000 characters anyway.
6419 */
6420#define TRACE_MAX_PRINT		1000
6421
6422/*
6423 * Define here KERN_TRACE so that we have one place to modify
6424 * it if we decide to change what log level the ftrace dump
6425 * should be at.
6426 */
6427#define KERN_TRACE		KERN_EMERG
6428
6429void
6430trace_printk_seq(struct trace_seq *s)
6431{
6432	/* Probably should print a warning here. */
6433	if (s->len >= TRACE_MAX_PRINT)
6434		s->len = TRACE_MAX_PRINT;
 
 
 
 
 
 
 
 
6435
6436	/* should be zero ended, but we are paranoid. */
6437	s->buffer[s->len] = 0;
6438
6439	printk(KERN_TRACE "%s", s->buffer);
6440
6441	trace_seq_init(s);
6442}
6443
6444void trace_init_global_iter(struct trace_iterator *iter)
6445{
6446	iter->tr = &global_trace;
6447	iter->trace = iter->tr->current_trace;
6448	iter->cpu_file = RING_BUFFER_ALL_CPUS;
6449	iter->trace_buffer = &global_trace.trace_buffer;
6450
6451	if (iter->trace && iter->trace->open)
6452		iter->trace->open(iter);
6453
6454	/* Annotate start of buffers if we had overruns */
6455	if (ring_buffer_overruns(iter->trace_buffer->buffer))
6456		iter->iter_flags |= TRACE_FILE_ANNOTATE;
6457
6458	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
6459	if (trace_clocks[iter->tr->clock_id].in_ns)
6460		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6461}
6462
6463void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
6464{
6465	/* use static because iter can be a bit big for the stack */
6466	static struct trace_iterator iter;
6467	static atomic_t dump_running;
 
6468	unsigned int old_userobj;
6469	unsigned long flags;
6470	int cnt = 0, cpu;
6471
6472	/* Only allow one dump user at a time. */
6473	if (atomic_inc_return(&dump_running) != 1) {
6474		atomic_dec(&dump_running);
6475		return;
6476	}
6477
6478	/*
6479	 * Always turn off tracing when we dump.
6480	 * We don't need to show trace output of what happens
6481	 * between multiple crashes.
6482	 *
6483	 * If the user does a sysrq-z, then they can re-enable
6484	 * tracing with echo 1 > tracing_on.
6485	 */
6486	tracing_off();
6487
6488	local_irq_save(flags);
6489
6490	/* Simulate the iterator */
6491	trace_init_global_iter(&iter);
6492
6493	for_each_tracing_cpu(cpu) {
6494		atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
6495	}
6496
6497	old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6498
6499	/* don't look at user memory in panic mode */
6500	trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6501
6502	switch (oops_dump_mode) {
6503	case DUMP_ALL:
6504		iter.cpu_file = RING_BUFFER_ALL_CPUS;
6505		break;
6506	case DUMP_ORIG:
6507		iter.cpu_file = raw_smp_processor_id();
6508		break;
6509	case DUMP_NONE:
6510		goto out_enable;
6511	default:
6512		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
6513		iter.cpu_file = RING_BUFFER_ALL_CPUS;
6514	}
6515
6516	printk(KERN_TRACE "Dumping ftrace buffer:\n");
6517
6518	/* Did function tracer already get disabled? */
6519	if (ftrace_is_dead()) {
6520		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6521		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
6522	}
6523
6524	/*
6525	 * We need to stop all tracing on all CPUS to read the
6526	 * the next buffer. This is a bit expensive, but is
6527	 * not done often. We fill all what we can read,
6528	 * and then release the locks again.
6529	 */
6530
6531	while (!trace_empty(&iter)) {
6532
6533		if (!cnt)
6534			printk(KERN_TRACE "---------------------------------\n");
6535
6536		cnt++;
6537
6538		/* reset all but tr, trace, and overruns */
6539		memset(&iter.seq, 0,
6540		       sizeof(struct trace_iterator) -
6541		       offsetof(struct trace_iterator, seq));
6542		iter.iter_flags |= TRACE_FILE_LAT_FMT;
6543		iter.pos = -1;
6544
6545		if (trace_find_next_entry_inc(&iter) != NULL) {
6546			int ret;
6547
6548			ret = print_trace_line(&iter);
6549			if (ret != TRACE_TYPE_NO_CONSUME)
6550				trace_consume(&iter);
6551		}
6552		touch_nmi_watchdog();
6553
6554		trace_printk_seq(&iter.seq);
6555	}
6556
6557	if (!cnt)
6558		printk(KERN_TRACE "   (ftrace buffer empty)\n");
6559	else
6560		printk(KERN_TRACE "---------------------------------\n");
6561
6562 out_enable:
6563	trace_flags |= old_userobj;
6564
6565	for_each_tracing_cpu(cpu) {
6566		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
6567	}
6568 	atomic_dec(&dump_running);
6569	local_irq_restore(flags);
6570}
6571EXPORT_SYMBOL_GPL(ftrace_dump);
6572
6573__init static int tracer_alloc_buffers(void)
6574{
6575	int ring_buf_size;
6576	int ret = -ENOMEM;
6577
 
 
 
 
 
6578
6579	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6580		goto out;
6581
6582	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6583		goto out_free_buffer_mask;
6584
6585	/* Only allocate trace_printk buffers if a trace_printk exists */
6586	if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
6587		/* Must be called before global_trace.buffer is allocated */
6588		trace_printk_init_buffers();
6589
6590	/* To save memory, keep the ring buffer size to its minimum */
6591	if (ring_buffer_expanded)
6592		ring_buf_size = trace_buf_size;
6593	else
6594		ring_buf_size = 1;
6595
6596	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6597	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6598
6599	raw_spin_lock_init(&global_trace.start_lock);
6600
6601	/* Used for event triggers */
6602	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6603	if (!temp_buffer)
6604		goto out_free_cpumask;
6605
 
 
 
6606	/* TODO: make the number of buffers hot pluggable with CPUS */
6607	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6608		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6609		WARN_ON(1);
6610		goto out_free_temp_buffer;
6611	}
6612
6613	if (global_trace.buffer_disabled)
6614		tracing_off();
6615
6616	trace_init_cmdlines();
6617
6618	if (trace_boot_clock) {
6619		ret = tracing_set_clock(&global_trace, trace_boot_clock);
6620		if (ret < 0)
6621			pr_warning("Trace clock %s not defined, going back to default\n",
6622				   trace_boot_clock);
6623	}
6624
6625	/*
6626	 * register_tracer() might reference current_trace, so it
6627	 * needs to be set before we register anything. This is
6628	 * just a bootstrap of current_trace anyway.
6629	 */
6630	global_trace.current_trace = &nop_trace;
6631
 
 
 
 
 
 
6632	register_tracer(&nop_trace);
6633
6634	/* All seems OK, enable tracing */
6635	tracing_disabled = 0;
6636
6637	atomic_notifier_chain_register(&panic_notifier_list,
6638				       &trace_panic_notifier);
6639
6640	register_die_notifier(&trace_die_notifier);
6641
6642	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6643
6644	INIT_LIST_HEAD(&global_trace.systems);
6645	INIT_LIST_HEAD(&global_trace.events);
6646	list_add(&global_trace.list, &ftrace_trace_arrays);
6647
6648	while (trace_boot_options) {
6649		char *option;
6650
6651		option = strsep(&trace_boot_options, ",");
6652		trace_set_options(&global_trace, option);
6653	}
6654
6655	register_snapshot_cmd();
6656
6657	return 0;
6658
 
 
6659out_free_temp_buffer:
6660	ring_buffer_free(temp_buffer);
6661out_free_cpumask:
6662	free_percpu(global_trace.trace_buffer.data);
6663#ifdef CONFIG_TRACER_MAX_TRACE
6664	free_percpu(global_trace.max_buffer.data);
6665#endif
6666	free_cpumask_var(global_trace.tracing_cpumask);
6667out_free_buffer_mask:
6668	free_cpumask_var(tracing_buffer_mask);
6669out:
6670	return ret;
6671}
6672
 
 
 
 
 
 
 
 
 
 
 
 
6673__init static int clear_boot_tracer(void)
6674{
6675	/*
6676	 * The default tracer at boot buffer is an init section.
6677	 * This function is called in lateinit. If we did not
6678	 * find the boot tracer, then clear it out, to prevent
6679	 * later registration from accessing the buffer that is
6680	 * about to be freed.
6681	 */
6682	if (!default_bootup_tracer)
6683		return 0;
6684
6685	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6686	       default_bootup_tracer);
6687	default_bootup_tracer = NULL;
6688
6689	return 0;
6690}
6691
6692early_initcall(tracer_alloc_buffers);
6693fs_initcall(tracer_init_debugfs);
6694late_initcall(clear_boot_tracer);
v4.6
   1/*
   2 * ring buffer based function tracer
   3 *
   4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
   5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
   6 *
   7 * Originally taken from the RT patch by:
   8 *    Arnaldo Carvalho de Melo <acme@redhat.com>
   9 *
  10 * Based on code from the latency_tracer, that is:
  11 *  Copyright (C) 2004-2006 Ingo Molnar
  12 *  Copyright (C) 2004 Nadia Yvette Chambers
  13 */
  14#include <linux/ring_buffer.h>
  15#include <generated/utsrelease.h>
  16#include <linux/stacktrace.h>
  17#include <linux/writeback.h>
  18#include <linux/kallsyms.h>
  19#include <linux/seq_file.h>
  20#include <linux/notifier.h>
  21#include <linux/irqflags.h>
  22#include <linux/debugfs.h>
  23#include <linux/tracefs.h>
  24#include <linux/pagemap.h>
  25#include <linux/hardirq.h>
  26#include <linux/linkage.h>
  27#include <linux/uaccess.h>
  28#include <linux/kprobes.h>
  29#include <linux/ftrace.h>
  30#include <linux/module.h>
  31#include <linux/percpu.h>
  32#include <linux/splice.h>
  33#include <linux/kdebug.h>
  34#include <linux/string.h>
  35#include <linux/mount.h>
  36#include <linux/rwsem.h>
  37#include <linux/slab.h>
  38#include <linux/ctype.h>
  39#include <linux/init.h>
  40#include <linux/poll.h>
  41#include <linux/nmi.h>
  42#include <linux/fs.h>
  43#include <linux/sched/rt.h>
  44
  45#include "trace.h"
  46#include "trace_output.h"
  47
  48/*
  49 * On boot up, the ring buffer is set to the minimum size, so that
  50 * we do not waste memory on systems that are not using tracing.
  51 */
  52bool ring_buffer_expanded;
  53
  54/*
  55 * We need to change this state when a selftest is running.
  56 * A selftest will lurk into the ring-buffer to count the
  57 * entries inserted during the selftest although some concurrent
  58 * insertions into the ring-buffer such as trace_printk could occurred
  59 * at the same time, giving false positive or negative results.
  60 */
  61static bool __read_mostly tracing_selftest_running;
  62
  63/*
  64 * If a tracer is running, we do not want to run SELFTEST.
  65 */
  66bool __read_mostly tracing_selftest_disabled;
  67
  68/* Pipe tracepoints to printk */
  69struct trace_iterator *tracepoint_print_iter;
  70int tracepoint_printk;
  71
  72/* For tracers that don't implement custom flags */
  73static struct tracer_opt dummy_tracer_opt[] = {
  74	{ }
  75};
  76
 
 
 
 
 
  77static int
  78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  79{
  80	return 0;
  81}
  82
  83/*
  84 * To prevent the comm cache from being overwritten when no
  85 * tracing is active, only save the comm when a trace event
  86 * occurred.
  87 */
  88static DEFINE_PER_CPU(bool, trace_cmdline_save);
  89
  90/*
  91 * Kill all tracing for good (never come back).
  92 * It is initialized to 1 but will turn to zero if the initialization
  93 * of the tracer is successful. But that is the only place that sets
  94 * this back to zero.
  95 */
  96static int tracing_disabled = 1;
  97
 
 
  98cpumask_var_t __read_mostly	tracing_buffer_mask;
  99
 100/*
 101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
 102 *
 103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
 104 * is set, then ftrace_dump is called. This will output the contents
 105 * of the ftrace buffers to the console.  This is very useful for
 106 * capturing traces that lead to crashes and outputing it to a
 107 * serial console.
 108 *
 109 * It is default off, but you can enable it with either specifying
 110 * "ftrace_dump_on_oops" in the kernel command line, or setting
 111 * /proc/sys/kernel/ftrace_dump_on_oops
 112 * Set 1 if you want to dump buffers of all CPUs
 113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
 114 */
 115
 116enum ftrace_dump_mode ftrace_dump_on_oops;
 117
 118/* When set, tracing will stop when a WARN*() is hit */
 119int __disable_trace_on_warning;
 120
 121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
 122/* Map of enums to their values, for "enum_map" file */
 123struct trace_enum_map_head {
 124	struct module			*mod;
 125	unsigned long			length;
 126};
 127
 128union trace_enum_map_item;
 129
 130struct trace_enum_map_tail {
 131	/*
 132	 * "end" is first and points to NULL as it must be different
 133	 * than "mod" or "enum_string"
 134	 */
 135	union trace_enum_map_item	*next;
 136	const char			*end;	/* points to NULL */
 137};
 138
 139static DEFINE_MUTEX(trace_enum_mutex);
 140
 141/*
 142 * The trace_enum_maps are saved in an array with two extra elements,
 143 * one at the beginning, and one at the end. The beginning item contains
 144 * the count of the saved maps (head.length), and the module they
 145 * belong to if not built in (head.mod). The ending item contains a
 146 * pointer to the next array of saved enum_map items.
 147 */
 148union trace_enum_map_item {
 149	struct trace_enum_map		map;
 150	struct trace_enum_map_head	head;
 151	struct trace_enum_map_tail	tail;
 152};
 153
 154static union trace_enum_map_item *trace_enum_maps;
 155#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
 156
 157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
 158
 159#define MAX_TRACER_SIZE		100
 160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
 161static char *default_bootup_tracer;
 162
 163static bool allocate_snapshot;
 164
 165static int __init set_cmdline_ftrace(char *str)
 166{
 167	strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
 168	default_bootup_tracer = bootup_tracer_buf;
 169	/* We are using ftrace early, expand it */
 170	ring_buffer_expanded = true;
 171	return 1;
 172}
 173__setup("ftrace=", set_cmdline_ftrace);
 174
 175static int __init set_ftrace_dump_on_oops(char *str)
 176{
 177	if (*str++ != '=' || !*str) {
 178		ftrace_dump_on_oops = DUMP_ALL;
 179		return 1;
 180	}
 181
 182	if (!strcmp("orig_cpu", str)) {
 183		ftrace_dump_on_oops = DUMP_ORIG;
 184                return 1;
 185        }
 186
 187        return 0;
 188}
 189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 190
 191static int __init stop_trace_on_warning(char *str)
 192{
 193	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
 194		__disable_trace_on_warning = 1;
 195	return 1;
 196}
 197__setup("traceoff_on_warning", stop_trace_on_warning);
 198
 199static int __init boot_alloc_snapshot(char *str)
 200{
 201	allocate_snapshot = true;
 202	/* We also need the main ring buffer expanded */
 203	ring_buffer_expanded = true;
 204	return 1;
 205}
 206__setup("alloc_snapshot", boot_alloc_snapshot);
 207
 208
 209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
 
 210
 211static int __init set_trace_boot_options(char *str)
 212{
 213	strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
 
 214	return 0;
 215}
 216__setup("trace_options=", set_trace_boot_options);
 217
 218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
 219static char *trace_boot_clock __initdata;
 220
 221static int __init set_trace_boot_clock(char *str)
 222{
 223	strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
 224	trace_boot_clock = trace_boot_clock_buf;
 225	return 0;
 226}
 227__setup("trace_clock=", set_trace_boot_clock);
 228
 229static int __init set_tracepoint_printk(char *str)
 230{
 231	if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
 232		tracepoint_printk = 1;
 233	return 1;
 234}
 235__setup("tp_printk", set_tracepoint_printk);
 236
 237unsigned long long ns2usecs(cycle_t nsec)
 238{
 239	nsec += 500;
 240	do_div(nsec, 1000);
 241	return nsec;
 242}
 243
 244/* trace_flags holds trace_options default values */
 245#define TRACE_DEFAULT_FLAGS						\
 246	(FUNCTION_DEFAULT_FLAGS |					\
 247	 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |			\
 248	 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |		\
 249	 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |			\
 250	 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
 251
 252/* trace_options that are only supported by global_trace */
 253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK |			\
 254	       TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
 255
 256
 257/*
 258 * The global_trace is the descriptor that holds the tracing
 259 * buffers for the live tracing. For each CPU, it contains
 260 * a link list of pages that will store trace entries. The
 261 * page descriptor of the pages in the memory is used to hold
 262 * the link list by linking the lru item in the page descriptor
 263 * to each of the pages in the buffer per CPU.
 264 *
 265 * For each active CPU there is a data field that holds the
 266 * pages for the buffer for that CPU. Each CPU has the same number
 267 * of pages allocated for its buffer.
 268 */
 269static struct trace_array global_trace = {
 270	.trace_flags = TRACE_DEFAULT_FLAGS,
 271};
 272
 273LIST_HEAD(ftrace_trace_arrays);
 274
 275int trace_array_get(struct trace_array *this_tr)
 276{
 277	struct trace_array *tr;
 278	int ret = -ENODEV;
 279
 280	mutex_lock(&trace_types_lock);
 281	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
 282		if (tr == this_tr) {
 283			tr->ref++;
 284			ret = 0;
 285			break;
 286		}
 287	}
 288	mutex_unlock(&trace_types_lock);
 289
 290	return ret;
 291}
 292
 293static void __trace_array_put(struct trace_array *this_tr)
 294{
 295	WARN_ON(!this_tr->ref);
 296	this_tr->ref--;
 297}
 298
 299void trace_array_put(struct trace_array *this_tr)
 300{
 301	mutex_lock(&trace_types_lock);
 302	__trace_array_put(this_tr);
 303	mutex_unlock(&trace_types_lock);
 304}
 305
 306int filter_check_discard(struct trace_event_file *file, void *rec,
 307			 struct ring_buffer *buffer,
 308			 struct ring_buffer_event *event)
 309{
 310	if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
 311	    !filter_match_preds(file->filter, rec)) {
 312		ring_buffer_discard_commit(buffer, event);
 313		return 1;
 314	}
 315
 316	return 0;
 317}
 318EXPORT_SYMBOL_GPL(filter_check_discard);
 319
 320int call_filter_check_discard(struct trace_event_call *call, void *rec,
 321			      struct ring_buffer *buffer,
 322			      struct ring_buffer_event *event)
 323{
 324	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
 325	    !filter_match_preds(call->filter, rec)) {
 326		ring_buffer_discard_commit(buffer, event);
 327		return 1;
 328	}
 329
 330	return 0;
 331}
 332EXPORT_SYMBOL_GPL(call_filter_check_discard);
 333
 334static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
 335{
 336	u64 ts;
 337
 338	/* Early boot up does not have a buffer yet */
 339	if (!buf->buffer)
 340		return trace_clock_local();
 341
 342	ts = ring_buffer_time_stamp(buf->buffer, cpu);
 343	ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
 344
 345	return ts;
 346}
 347
 348cycle_t ftrace_now(int cpu)
 349{
 350	return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
 351}
 352
 353/**
 354 * tracing_is_enabled - Show if global_trace has been disabled
 355 *
 356 * Shows if the global trace has been enabled or not. It uses the
 357 * mirror flag "buffer_disabled" to be used in fast paths such as for
 358 * the irqsoff tracer. But it may be inaccurate due to races. If you
 359 * need to know the accurate state, use tracing_is_on() which is a little
 360 * slower, but accurate.
 361 */
 362int tracing_is_enabled(void)
 363{
 364	/*
 365	 * For quick access (irqsoff uses this in fast path), just
 366	 * return the mirror variable of the state of the ring buffer.
 367	 * It's a little racy, but we don't really care.
 368	 */
 369	smp_rmb();
 370	return !global_trace.buffer_disabled;
 371}
 372
 373/*
 374 * trace_buf_size is the size in bytes that is allocated
 375 * for a buffer. Note, the number of bytes is always rounded
 376 * to page size.
 377 *
 378 * This number is purposely set to a low number of 16384.
 379 * If the dump on oops happens, it will be much appreciated
 380 * to not have to wait for all that output. Anyway this can be
 381 * boot time and run time configurable.
 382 */
 383#define TRACE_BUF_SIZE_DEFAULT	1441792UL /* 16384 * 88 (sizeof(entry)) */
 384
 385static unsigned long		trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
 386
 387/* trace_types holds a link list of available tracers. */
 388static struct tracer		*trace_types __read_mostly;
 389
 390/*
 391 * trace_types_lock is used to protect the trace_types list.
 392 */
 393DEFINE_MUTEX(trace_types_lock);
 394
 395/*
 396 * serialize the access of the ring buffer
 397 *
 398 * ring buffer serializes readers, but it is low level protection.
 399 * The validity of the events (which returns by ring_buffer_peek() ..etc)
 400 * are not protected by ring buffer.
 401 *
 402 * The content of events may become garbage if we allow other process consumes
 403 * these events concurrently:
 404 *   A) the page of the consumed events may become a normal page
 405 *      (not reader page) in ring buffer, and this page will be rewrited
 406 *      by events producer.
 407 *   B) The page of the consumed events may become a page for splice_read,
 408 *      and this page will be returned to system.
 409 *
 410 * These primitives allow multi process access to different cpu ring buffer
 411 * concurrently.
 412 *
 413 * These primitives don't distinguish read-only and read-consume access.
 414 * Multi read-only access are also serialized.
 415 */
 416
 417#ifdef CONFIG_SMP
 418static DECLARE_RWSEM(all_cpu_access_lock);
 419static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
 420
 421static inline void trace_access_lock(int cpu)
 422{
 423	if (cpu == RING_BUFFER_ALL_CPUS) {
 424		/* gain it for accessing the whole ring buffer. */
 425		down_write(&all_cpu_access_lock);
 426	} else {
 427		/* gain it for accessing a cpu ring buffer. */
 428
 429		/* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
 430		down_read(&all_cpu_access_lock);
 431
 432		/* Secondly block other access to this @cpu ring buffer. */
 433		mutex_lock(&per_cpu(cpu_access_lock, cpu));
 434	}
 435}
 436
 437static inline void trace_access_unlock(int cpu)
 438{
 439	if (cpu == RING_BUFFER_ALL_CPUS) {
 440		up_write(&all_cpu_access_lock);
 441	} else {
 442		mutex_unlock(&per_cpu(cpu_access_lock, cpu));
 443		up_read(&all_cpu_access_lock);
 444	}
 445}
 446
 447static inline void trace_access_lock_init(void)
 448{
 449	int cpu;
 450
 451	for_each_possible_cpu(cpu)
 452		mutex_init(&per_cpu(cpu_access_lock, cpu));
 453}
 454
 455#else
 456
 457static DEFINE_MUTEX(access_lock);
 458
 459static inline void trace_access_lock(int cpu)
 460{
 461	(void)cpu;
 462	mutex_lock(&access_lock);
 463}
 464
 465static inline void trace_access_unlock(int cpu)
 466{
 467	(void)cpu;
 468	mutex_unlock(&access_lock);
 469}
 470
 471static inline void trace_access_lock_init(void)
 472{
 473}
 474
 475#endif
 476
 477#ifdef CONFIG_STACKTRACE
 478static void __ftrace_trace_stack(struct ring_buffer *buffer,
 479				 unsigned long flags,
 480				 int skip, int pc, struct pt_regs *regs);
 481static inline void ftrace_trace_stack(struct trace_array *tr,
 482				      struct ring_buffer *buffer,
 483				      unsigned long flags,
 484				      int skip, int pc, struct pt_regs *regs);
 485
 486#else
 487static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
 488					unsigned long flags,
 489					int skip, int pc, struct pt_regs *regs)
 490{
 491}
 492static inline void ftrace_trace_stack(struct trace_array *tr,
 493				      struct ring_buffer *buffer,
 494				      unsigned long flags,
 495				      int skip, int pc, struct pt_regs *regs)
 496{
 497}
 498
 499#endif
 500
 501static void tracer_tracing_on(struct trace_array *tr)
 502{
 503	if (tr->trace_buffer.buffer)
 504		ring_buffer_record_on(tr->trace_buffer.buffer);
 505	/*
 506	 * This flag is looked at when buffers haven't been allocated
 507	 * yet, or by some tracers (like irqsoff), that just want to
 508	 * know if the ring buffer has been disabled, but it can handle
 509	 * races of where it gets disabled but we still do a record.
 510	 * As the check is in the fast path of the tracers, it is more
 511	 * important to be fast than accurate.
 512	 */
 513	tr->buffer_disabled = 0;
 514	/* Make the flag seen by readers */
 515	smp_wmb();
 516}
 517
 518/**
 519 * tracing_on - enable tracing buffers
 520 *
 521 * This function enables tracing buffers that may have been
 522 * disabled with tracing_off.
 523 */
 524void tracing_on(void)
 525{
 526	tracer_tracing_on(&global_trace);
 527}
 528EXPORT_SYMBOL_GPL(tracing_on);
 529
 530/**
 531 * __trace_puts - write a constant string into the trace buffer.
 532 * @ip:	   The address of the caller
 533 * @str:   The constant string to write
 534 * @size:  The size of the string.
 535 */
 536int __trace_puts(unsigned long ip, const char *str, int size)
 537{
 538	struct ring_buffer_event *event;
 539	struct ring_buffer *buffer;
 540	struct print_entry *entry;
 541	unsigned long irq_flags;
 542	int alloc;
 543	int pc;
 544
 545	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 546		return 0;
 547
 548	pc = preempt_count();
 549
 550	if (unlikely(tracing_selftest_running || tracing_disabled))
 551		return 0;
 552
 553	alloc = sizeof(*entry) + size + 2; /* possible \n added */
 554
 555	local_save_flags(irq_flags);
 556	buffer = global_trace.trace_buffer.buffer;
 557	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
 558					  irq_flags, pc);
 559	if (!event)
 560		return 0;
 561
 562	entry = ring_buffer_event_data(event);
 563	entry->ip = ip;
 564
 565	memcpy(&entry->buf, str, size);
 566
 567	/* Add a newline if necessary */
 568	if (entry->buf[size - 1] != '\n') {
 569		entry->buf[size] = '\n';
 570		entry->buf[size + 1] = '\0';
 571	} else
 572		entry->buf[size] = '\0';
 573
 574	__buffer_unlock_commit(buffer, event);
 575	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
 576
 577	return size;
 578}
 579EXPORT_SYMBOL_GPL(__trace_puts);
 580
 581/**
 582 * __trace_bputs - write the pointer to a constant string into trace buffer
 583 * @ip:	   The address of the caller
 584 * @str:   The constant string to write to the buffer to
 585 */
 586int __trace_bputs(unsigned long ip, const char *str)
 587{
 588	struct ring_buffer_event *event;
 589	struct ring_buffer *buffer;
 590	struct bputs_entry *entry;
 591	unsigned long irq_flags;
 592	int size = sizeof(struct bputs_entry);
 593	int pc;
 594
 595	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
 596		return 0;
 597
 598	pc = preempt_count();
 599
 600	if (unlikely(tracing_selftest_running || tracing_disabled))
 601		return 0;
 602
 603	local_save_flags(irq_flags);
 604	buffer = global_trace.trace_buffer.buffer;
 605	event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 606					  irq_flags, pc);
 607	if (!event)
 608		return 0;
 609
 610	entry = ring_buffer_event_data(event);
 611	entry->ip			= ip;
 612	entry->str			= str;
 613
 614	__buffer_unlock_commit(buffer, event);
 615	ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
 616
 617	return 1;
 618}
 619EXPORT_SYMBOL_GPL(__trace_bputs);
 620
 621#ifdef CONFIG_TRACER_SNAPSHOT
 622/**
 623 * trace_snapshot - take a snapshot of the current buffer.
 624 *
 625 * This causes a swap between the snapshot buffer and the current live
 626 * tracing buffer. You can use this to take snapshots of the live
 627 * trace when some condition is triggered, but continue to trace.
 628 *
 629 * Note, make sure to allocate the snapshot with either
 630 * a tracing_snapshot_alloc(), or by doing it manually
 631 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
 632 *
 633 * If the snapshot buffer is not allocated, it will stop tracing.
 634 * Basically making a permanent snapshot.
 635 */
 636void tracing_snapshot(void)
 637{
 638	struct trace_array *tr = &global_trace;
 639	struct tracer *tracer = tr->current_trace;
 640	unsigned long flags;
 641
 642	if (in_nmi()) {
 643		internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
 644		internal_trace_puts("*** snapshot is being ignored        ***\n");
 645		return;
 646	}
 647
 648	if (!tr->allocated_snapshot) {
 649		internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
 650		internal_trace_puts("*** stopping trace here!   ***\n");
 651		tracing_off();
 652		return;
 653	}
 654
 655	/* Note, snapshot can not be used when the tracer uses it */
 656	if (tracer->use_max_tr) {
 657		internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
 658		internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
 659		return;
 660	}
 661
 662	local_irq_save(flags);
 663	update_max_tr(tr, current, smp_processor_id());
 664	local_irq_restore(flags);
 665}
 666EXPORT_SYMBOL_GPL(tracing_snapshot);
 667
 668static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
 669					struct trace_buffer *size_buf, int cpu_id);
 670static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
 671
 672static int alloc_snapshot(struct trace_array *tr)
 673{
 674	int ret;
 675
 676	if (!tr->allocated_snapshot) {
 677
 678		/* allocate spare buffer */
 679		ret = resize_buffer_duplicate_size(&tr->max_buffer,
 680				   &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
 681		if (ret < 0)
 682			return ret;
 683
 684		tr->allocated_snapshot = true;
 685	}
 686
 687	return 0;
 688}
 689
 690static void free_snapshot(struct trace_array *tr)
 691{
 692	/*
 693	 * We don't free the ring buffer. instead, resize it because
 694	 * The max_tr ring buffer has some state (e.g. ring->clock) and
 695	 * we want preserve it.
 696	 */
 697	ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
 698	set_buffer_entries(&tr->max_buffer, 1);
 699	tracing_reset_online_cpus(&tr->max_buffer);
 700	tr->allocated_snapshot = false;
 701}
 702
 703/**
 704 * tracing_alloc_snapshot - allocate snapshot buffer.
 705 *
 706 * This only allocates the snapshot buffer if it isn't already
 707 * allocated - it doesn't also take a snapshot.
 708 *
 709 * This is meant to be used in cases where the snapshot buffer needs
 710 * to be set up for events that can't sleep but need to be able to
 711 * trigger a snapshot.
 712 */
 713int tracing_alloc_snapshot(void)
 714{
 715	struct trace_array *tr = &global_trace;
 716	int ret;
 717
 718	ret = alloc_snapshot(tr);
 719	WARN_ON(ret < 0);
 720
 721	return ret;
 722}
 723EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 724
 725/**
 726 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
 727 *
 728 * This is similar to trace_snapshot(), but it will allocate the
 729 * snapshot buffer if it isn't already allocated. Use this only
 730 * where it is safe to sleep, as the allocation may sleep.
 731 *
 732 * This causes a swap between the snapshot buffer and the current live
 733 * tracing buffer. You can use this to take snapshots of the live
 734 * trace when some condition is triggered, but continue to trace.
 735 */
 736void tracing_snapshot_alloc(void)
 737{
 738	int ret;
 739
 740	ret = tracing_alloc_snapshot();
 741	if (ret < 0)
 742		return;
 743
 744	tracing_snapshot();
 745}
 746EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 747#else
 748void tracing_snapshot(void)
 749{
 750	WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
 751}
 752EXPORT_SYMBOL_GPL(tracing_snapshot);
 753int tracing_alloc_snapshot(void)
 754{
 755	WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
 756	return -ENODEV;
 757}
 758EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
 759void tracing_snapshot_alloc(void)
 760{
 761	/* Give warning */
 762	tracing_snapshot();
 763}
 764EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 765#endif /* CONFIG_TRACER_SNAPSHOT */
 766
 767static void tracer_tracing_off(struct trace_array *tr)
 768{
 769	if (tr->trace_buffer.buffer)
 770		ring_buffer_record_off(tr->trace_buffer.buffer);
 771	/*
 772	 * This flag is looked at when buffers haven't been allocated
 773	 * yet, or by some tracers (like irqsoff), that just want to
 774	 * know if the ring buffer has been disabled, but it can handle
 775	 * races of where it gets disabled but we still do a record.
 776	 * As the check is in the fast path of the tracers, it is more
 777	 * important to be fast than accurate.
 778	 */
 779	tr->buffer_disabled = 1;
 780	/* Make the flag seen by readers */
 781	smp_wmb();
 782}
 783
 784/**
 785 * tracing_off - turn off tracing buffers
 786 *
 787 * This function stops the tracing buffers from recording data.
 788 * It does not disable any overhead the tracers themselves may
 789 * be causing. This function simply causes all recording to
 790 * the ring buffers to fail.
 791 */
 792void tracing_off(void)
 793{
 794	tracer_tracing_off(&global_trace);
 795}
 796EXPORT_SYMBOL_GPL(tracing_off);
 797
 798void disable_trace_on_warning(void)
 799{
 800	if (__disable_trace_on_warning)
 801		tracing_off();
 802}
 803
 804/**
 805 * tracer_tracing_is_on - show real state of ring buffer enabled
 806 * @tr : the trace array to know if ring buffer is enabled
 807 *
 808 * Shows real state of the ring buffer if it is enabled or not.
 809 */
 810static int tracer_tracing_is_on(struct trace_array *tr)
 811{
 812	if (tr->trace_buffer.buffer)
 813		return ring_buffer_record_is_on(tr->trace_buffer.buffer);
 814	return !tr->buffer_disabled;
 815}
 816
 817/**
 818 * tracing_is_on - show state of ring buffers enabled
 819 */
 820int tracing_is_on(void)
 821{
 822	return tracer_tracing_is_on(&global_trace);
 823}
 824EXPORT_SYMBOL_GPL(tracing_is_on);
 825
 826static int __init set_buf_size(char *str)
 827{
 828	unsigned long buf_size;
 829
 830	if (!str)
 831		return 0;
 832	buf_size = memparse(str, &str);
 833	/* nr_entries can not be zero */
 834	if (buf_size == 0)
 835		return 0;
 836	trace_buf_size = buf_size;
 837	return 1;
 838}
 839__setup("trace_buf_size=", set_buf_size);
 840
 841static int __init set_tracing_thresh(char *str)
 842{
 843	unsigned long threshold;
 844	int ret;
 845
 846	if (!str)
 847		return 0;
 848	ret = kstrtoul(str, 0, &threshold);
 849	if (ret < 0)
 850		return 0;
 851	tracing_thresh = threshold * 1000;
 852	return 1;
 853}
 854__setup("tracing_thresh=", set_tracing_thresh);
 855
 856unsigned long nsecs_to_usecs(unsigned long nsecs)
 857{
 858	return nsecs / 1000;
 859}
 860
 861/*
 862 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
 863 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
 864 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
 865 * of strings in the order that the enums were defined.
 866 */
 867#undef C
 868#define C(a, b) b
 869
 870/* These must match the bit postions in trace_iterator_flags */
 871static const char *trace_options[] = {
 872	TRACE_FLAGS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 873	NULL
 874};
 875
 876static struct {
 877	u64 (*func)(void);
 878	const char *name;
 879	int in_ns;		/* is this clock in nanoseconds? */
 880} trace_clocks[] = {
 881	{ trace_clock_local,		"local",	1 },
 882	{ trace_clock_global,		"global",	1 },
 883	{ trace_clock_counter,		"counter",	0 },
 884	{ trace_clock_jiffies,		"uptime",	0 },
 885	{ trace_clock,			"perf",		1 },
 886	{ ktime_get_mono_fast_ns,	"mono",		1 },
 887	{ ktime_get_raw_fast_ns,	"mono_raw",	1 },
 888	ARCH_TRACE_CLOCKS
 889};
 890
 891/*
 892 * trace_parser_get_init - gets the buffer for trace parser
 893 */
 894int trace_parser_get_init(struct trace_parser *parser, int size)
 895{
 896	memset(parser, 0, sizeof(*parser));
 897
 898	parser->buffer = kmalloc(size, GFP_KERNEL);
 899	if (!parser->buffer)
 900		return 1;
 901
 902	parser->size = size;
 903	return 0;
 904}
 905
 906/*
 907 * trace_parser_put - frees the buffer for trace parser
 908 */
 909void trace_parser_put(struct trace_parser *parser)
 910{
 911	kfree(parser->buffer);
 912}
 913
 914/*
 915 * trace_get_user - reads the user input string separated by  space
 916 * (matched by isspace(ch))
 917 *
 918 * For each string found the 'struct trace_parser' is updated,
 919 * and the function returns.
 920 *
 921 * Returns number of bytes read.
 922 *
 923 * See kernel/trace/trace.h for 'struct trace_parser' details.
 924 */
 925int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
 926	size_t cnt, loff_t *ppos)
 927{
 928	char ch;
 929	size_t read = 0;
 930	ssize_t ret;
 931
 932	if (!*ppos)
 933		trace_parser_clear(parser);
 934
 935	ret = get_user(ch, ubuf++);
 936	if (ret)
 937		goto out;
 938
 939	read++;
 940	cnt--;
 941
 942	/*
 943	 * The parser is not finished with the last write,
 944	 * continue reading the user input without skipping spaces.
 945	 */
 946	if (!parser->cont) {
 947		/* skip white space */
 948		while (cnt && isspace(ch)) {
 949			ret = get_user(ch, ubuf++);
 950			if (ret)
 951				goto out;
 952			read++;
 953			cnt--;
 954		}
 955
 956		/* only spaces were written */
 957		if (isspace(ch)) {
 958			*ppos += read;
 959			ret = read;
 960			goto out;
 961		}
 962
 963		parser->idx = 0;
 964	}
 965
 966	/* read the non-space input */
 967	while (cnt && !isspace(ch)) {
 968		if (parser->idx < parser->size - 1)
 969			parser->buffer[parser->idx++] = ch;
 970		else {
 971			ret = -EINVAL;
 972			goto out;
 973		}
 974		ret = get_user(ch, ubuf++);
 975		if (ret)
 976			goto out;
 977		read++;
 978		cnt--;
 979	}
 980
 981	/* We either got finished input or we have to wait for another call. */
 982	if (isspace(ch)) {
 983		parser->buffer[parser->idx] = 0;
 984		parser->cont = false;
 985	} else if (parser->idx < parser->size - 1) {
 986		parser->cont = true;
 987		parser->buffer[parser->idx++] = ch;
 988	} else {
 989		ret = -EINVAL;
 990		goto out;
 991	}
 992
 993	*ppos += read;
 994	ret = read;
 995
 996out:
 997	return ret;
 998}
 999
1000/* TODO add a seq_buf_to_buffer() */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1001static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1002{
1003	int len;
1004
1005	if (trace_seq_used(s) <= s->seq.readpos)
1006		return -EBUSY;
1007
1008	len = trace_seq_used(s) - s->seq.readpos;
1009	if (cnt > len)
1010		cnt = len;
1011	memcpy(buf, s->buffer + s->seq.readpos, cnt);
1012
1013	s->seq.readpos += cnt;
1014	return cnt;
1015}
1016
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1017unsigned long __read_mostly	tracing_thresh;
1018
1019#ifdef CONFIG_TRACER_MAX_TRACE
 
 
1020/*
1021 * Copy the new maximum trace into the separate maximum-trace
1022 * structure. (this way the maximum trace is permanently saved,
1023 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1024 */
1025static void
1026__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1027{
1028	struct trace_buffer *trace_buf = &tr->trace_buffer;
1029	struct trace_buffer *max_buf = &tr->max_buffer;
1030	struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1031	struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1032
1033	max_buf->cpu = cpu;
1034	max_buf->time_start = data->preempt_timestamp;
1035
1036	max_data->saved_latency = tr->max_latency;
1037	max_data->critical_start = data->critical_start;
1038	max_data->critical_end = data->critical_end;
1039
1040	memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1041	max_data->pid = tsk->pid;
1042	/*
1043	 * If tsk == current, then use current_uid(), as that does not use
1044	 * RCU. The irq tracer can be called out of RCU scope.
1045	 */
1046	if (tsk == current)
1047		max_data->uid = current_uid();
1048	else
1049		max_data->uid = task_uid(tsk);
1050
1051	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1052	max_data->policy = tsk->policy;
1053	max_data->rt_priority = tsk->rt_priority;
1054
1055	/* record this tasks comm */
1056	tracing_record_cmdline(tsk);
1057}
1058
1059/**
1060 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1061 * @tr: tracer
1062 * @tsk: the task with the latency
1063 * @cpu: The cpu that initiated the trace.
1064 *
1065 * Flip the buffers between the @tr and the max_tr and record information
1066 * about which task was the cause of this latency.
1067 */
1068void
1069update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1070{
1071	struct ring_buffer *buf;
1072
1073	if (tr->stop_count)
1074		return;
1075
1076	WARN_ON_ONCE(!irqs_disabled());
1077
1078	if (!tr->allocated_snapshot) {
1079		/* Only the nop tracer should hit this when disabling */
1080		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1081		return;
1082	}
1083
1084	arch_spin_lock(&tr->max_lock);
1085
1086	buf = tr->trace_buffer.buffer;
1087	tr->trace_buffer.buffer = tr->max_buffer.buffer;
1088	tr->max_buffer.buffer = buf;
1089
1090	__update_max_tr(tr, tsk, cpu);
1091	arch_spin_unlock(&tr->max_lock);
1092}
1093
1094/**
1095 * update_max_tr_single - only copy one trace over, and reset the rest
1096 * @tr - tracer
1097 * @tsk - task with the latency
1098 * @cpu - the cpu of the buffer to copy.
1099 *
1100 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1101 */
1102void
1103update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1104{
1105	int ret;
1106
1107	if (tr->stop_count)
1108		return;
1109
1110	WARN_ON_ONCE(!irqs_disabled());
1111	if (!tr->allocated_snapshot) {
1112		/* Only the nop tracer should hit this when disabling */
1113		WARN_ON_ONCE(tr->current_trace != &nop_trace);
1114		return;
1115	}
1116
1117	arch_spin_lock(&tr->max_lock);
1118
1119	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1120
1121	if (ret == -EBUSY) {
1122		/*
1123		 * We failed to swap the buffer due to a commit taking
1124		 * place on this CPU. We fail to record, but we reset
1125		 * the max trace buffer (no one writes directly to it)
1126		 * and flag that it failed.
1127		 */
1128		trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1129			"Failed to swap buffers due to commit in progress\n");
1130	}
1131
1132	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1133
1134	__update_max_tr(tr, tsk, cpu);
1135	arch_spin_unlock(&tr->max_lock);
1136}
1137#endif /* CONFIG_TRACER_MAX_TRACE */
1138
1139static int wait_on_pipe(struct trace_iterator *iter, bool full)
1140{
1141	/* Iterators are static, they should be filled or empty */
1142	if (trace_buffer_iter(iter, iter->cpu_file))
1143		return 0;
1144
1145	return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1146				full);
1147}
1148
1149#ifdef CONFIG_FTRACE_STARTUP_TEST
1150static int run_tracer_selftest(struct tracer *type)
1151{
1152	struct trace_array *tr = &global_trace;
1153	struct tracer *saved_tracer = tr->current_trace;
1154	int ret;
1155
1156	if (!type->selftest || tracing_selftest_disabled)
1157		return 0;
1158
1159	/*
1160	 * Run a selftest on this tracer.
1161	 * Here we reset the trace buffer, and set the current
1162	 * tracer to be this tracer. The tracer can then run some
1163	 * internal tracing to verify that everything is in order.
1164	 * If we fail, we do not register this tracer.
1165	 */
1166	tracing_reset_online_cpus(&tr->trace_buffer);
1167
1168	tr->current_trace = type;
1169
1170#ifdef CONFIG_TRACER_MAX_TRACE
1171	if (type->use_max_tr) {
1172		/* If we expanded the buffers, make sure the max is expanded too */
1173		if (ring_buffer_expanded)
1174			ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1175					   RING_BUFFER_ALL_CPUS);
1176		tr->allocated_snapshot = true;
1177	}
1178#endif
1179
1180	/* the test is responsible for initializing and enabling */
1181	pr_info("Testing tracer %s: ", type->name);
1182	ret = type->selftest(type, tr);
1183	/* the test is responsible for resetting too */
1184	tr->current_trace = saved_tracer;
1185	if (ret) {
1186		printk(KERN_CONT "FAILED!\n");
1187		/* Add the warning after printing 'FAILED' */
1188		WARN_ON(1);
1189		return -1;
1190	}
1191	/* Only reset on passing, to avoid touching corrupted buffers */
1192	tracing_reset_online_cpus(&tr->trace_buffer);
1193
1194#ifdef CONFIG_TRACER_MAX_TRACE
1195	if (type->use_max_tr) {
1196		tr->allocated_snapshot = false;
1197
1198		/* Shrink the max buffer again */
1199		if (ring_buffer_expanded)
1200			ring_buffer_resize(tr->max_buffer.buffer, 1,
1201					   RING_BUFFER_ALL_CPUS);
1202	}
1203#endif
1204
1205	printk(KERN_CONT "PASSED\n");
1206	return 0;
1207}
1208#else
1209static inline int run_tracer_selftest(struct tracer *type)
1210{
1211	return 0;
1212}
1213#endif /* CONFIG_FTRACE_STARTUP_TEST */
1214
1215static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1216
1217static void __init apply_trace_boot_options(void);
1218
1219/**
1220 * register_tracer - register a tracer with the ftrace system.
1221 * @type - the plugin for the tracer
1222 *
1223 * Register a new plugin tracer.
1224 */
1225int __init register_tracer(struct tracer *type)
1226{
1227	struct tracer *t;
1228	int ret = 0;
1229
1230	if (!type->name) {
1231		pr_info("Tracer must have a name\n");
1232		return -1;
1233	}
1234
1235	if (strlen(type->name) >= MAX_TRACER_SIZE) {
1236		pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1237		return -1;
1238	}
1239
1240	mutex_lock(&trace_types_lock);
1241
1242	tracing_selftest_running = true;
1243
1244	for (t = trace_types; t; t = t->next) {
1245		if (strcmp(type->name, t->name) == 0) {
1246			/* already found */
1247			pr_info("Tracer %s already registered\n",
1248				type->name);
1249			ret = -1;
1250			goto out;
1251		}
1252	}
1253
1254	if (!type->set_flag)
1255		type->set_flag = &dummy_set_flag;
1256	if (!type->flags) {
1257		/*allocate a dummy tracer_flags*/
1258		type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1259		if (!type->flags) {
1260			ret = -ENOMEM;
1261			goto out;
1262		}
1263		type->flags->val = 0;
1264		type->flags->opts = dummy_tracer_opt;
1265	} else
1266		if (!type->flags->opts)
1267			type->flags->opts = dummy_tracer_opt;
1268
1269	/* store the tracer for __set_tracer_option */
1270	type->flags->trace = type;
1271
1272	ret = run_tracer_selftest(type);
1273	if (ret < 0)
1274		goto out;
1275
1276	type->next = trace_types;
1277	trace_types = type;
1278	add_tracer_options(&global_trace, type);
1279
1280 out:
1281	tracing_selftest_running = false;
1282	mutex_unlock(&trace_types_lock);
1283
1284	if (ret || !default_bootup_tracer)
1285		goto out_unlock;
1286
1287	if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1288		goto out_unlock;
1289
1290	printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1291	/* Do we want this tracer to start on bootup? */
1292	tracing_set_tracer(&global_trace, type->name);
1293	default_bootup_tracer = NULL;
1294
1295	apply_trace_boot_options();
1296
1297	/* disable other selftests, since this will break it. */
1298	tracing_selftest_disabled = true;
1299#ifdef CONFIG_FTRACE_STARTUP_TEST
1300	printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1301	       type->name);
1302#endif
1303
1304 out_unlock:
1305	return ret;
1306}
1307
1308void tracing_reset(struct trace_buffer *buf, int cpu)
1309{
1310	struct ring_buffer *buffer = buf->buffer;
1311
1312	if (!buffer)
1313		return;
1314
1315	ring_buffer_record_disable(buffer);
1316
1317	/* Make sure all commits have finished */
1318	synchronize_sched();
1319	ring_buffer_reset_cpu(buffer, cpu);
1320
1321	ring_buffer_record_enable(buffer);
1322}
1323
1324void tracing_reset_online_cpus(struct trace_buffer *buf)
1325{
1326	struct ring_buffer *buffer = buf->buffer;
1327	int cpu;
1328
1329	if (!buffer)
1330		return;
1331
1332	ring_buffer_record_disable(buffer);
1333
1334	/* Make sure all commits have finished */
1335	synchronize_sched();
1336
1337	buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1338
1339	for_each_online_cpu(cpu)
1340		ring_buffer_reset_cpu(buffer, cpu);
1341
1342	ring_buffer_record_enable(buffer);
1343}
1344
1345/* Must have trace_types_lock held */
1346void tracing_reset_all_online_cpus(void)
1347{
1348	struct trace_array *tr;
1349
1350	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1351		tracing_reset_online_cpus(&tr->trace_buffer);
1352#ifdef CONFIG_TRACER_MAX_TRACE
1353		tracing_reset_online_cpus(&tr->max_buffer);
1354#endif
1355	}
1356}
1357
1358#define SAVED_CMDLINES_DEFAULT 128
1359#define NO_CMDLINE_MAP UINT_MAX
 
 
 
 
1360static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1361struct saved_cmdlines_buffer {
1362	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1363	unsigned *map_cmdline_to_pid;
1364	unsigned cmdline_num;
1365	int cmdline_idx;
1366	char *saved_cmdlines;
1367};
1368static struct saved_cmdlines_buffer *savedcmd;
1369
1370/* temporary disable recording */
1371static atomic_t trace_record_cmdline_disabled __read_mostly;
1372
1373static inline char *get_saved_cmdlines(int idx)
1374{
1375	return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1376}
1377
1378static inline void set_cmdline(int idx, const char *cmdline)
1379{
1380	memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1381}
1382
1383static int allocate_cmdlines_buffer(unsigned int val,
1384				    struct saved_cmdlines_buffer *s)
1385{
1386	s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1387					GFP_KERNEL);
1388	if (!s->map_cmdline_to_pid)
1389		return -ENOMEM;
1390
1391	s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1392	if (!s->saved_cmdlines) {
1393		kfree(s->map_cmdline_to_pid);
1394		return -ENOMEM;
1395	}
1396
1397	s->cmdline_idx = 0;
1398	s->cmdline_num = val;
1399	memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1400	       sizeof(s->map_pid_to_cmdline));
1401	memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1402	       val * sizeof(*s->map_cmdline_to_pid));
1403
1404	return 0;
1405}
1406
1407static int trace_create_savedcmd(void)
1408{
1409	int ret;
1410
1411	savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1412	if (!savedcmd)
1413		return -ENOMEM;
1414
1415	ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1416	if (ret < 0) {
1417		kfree(savedcmd);
1418		savedcmd = NULL;
1419		return -ENOMEM;
1420	}
1421
1422	return 0;
1423}
1424
1425int is_tracing_stopped(void)
1426{
1427	return global_trace.stop_count;
1428}
1429
1430/**
1431 * tracing_start - quick start of the tracer
1432 *
1433 * If tracing is enabled but was stopped by tracing_stop,
1434 * this will start the tracer back up.
1435 */
1436void tracing_start(void)
1437{
1438	struct ring_buffer *buffer;
1439	unsigned long flags;
1440
1441	if (tracing_disabled)
1442		return;
1443
1444	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1445	if (--global_trace.stop_count) {
1446		if (global_trace.stop_count < 0) {
1447			/* Someone screwed up their debugging */
1448			WARN_ON_ONCE(1);
1449			global_trace.stop_count = 0;
1450		}
1451		goto out;
1452	}
1453
1454	/* Prevent the buffers from switching */
1455	arch_spin_lock(&global_trace.max_lock);
1456
1457	buffer = global_trace.trace_buffer.buffer;
1458	if (buffer)
1459		ring_buffer_record_enable(buffer);
1460
1461#ifdef CONFIG_TRACER_MAX_TRACE
1462	buffer = global_trace.max_buffer.buffer;
1463	if (buffer)
1464		ring_buffer_record_enable(buffer);
1465#endif
1466
1467	arch_spin_unlock(&global_trace.max_lock);
1468
 
1469 out:
1470	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_start_tr(struct trace_array *tr)
1474{
1475	struct ring_buffer *buffer;
1476	unsigned long flags;
1477
1478	if (tracing_disabled)
1479		return;
1480
1481	/* If global, we need to also start the max tracer */
1482	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1483		return tracing_start();
1484
1485	raw_spin_lock_irqsave(&tr->start_lock, flags);
1486
1487	if (--tr->stop_count) {
1488		if (tr->stop_count < 0) {
1489			/* Someone screwed up their debugging */
1490			WARN_ON_ONCE(1);
1491			tr->stop_count = 0;
1492		}
1493		goto out;
1494	}
1495
1496	buffer = tr->trace_buffer.buffer;
1497	if (buffer)
1498		ring_buffer_record_enable(buffer);
1499
1500 out:
1501	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1502}
1503
1504/**
1505 * tracing_stop - quick stop of the tracer
1506 *
1507 * Light weight way to stop tracing. Use in conjunction with
1508 * tracing_start.
1509 */
1510void tracing_stop(void)
1511{
1512	struct ring_buffer *buffer;
1513	unsigned long flags;
1514
 
1515	raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1516	if (global_trace.stop_count++)
1517		goto out;
1518
1519	/* Prevent the buffers from switching */
1520	arch_spin_lock(&global_trace.max_lock);
1521
1522	buffer = global_trace.trace_buffer.buffer;
1523	if (buffer)
1524		ring_buffer_record_disable(buffer);
1525
1526#ifdef CONFIG_TRACER_MAX_TRACE
1527	buffer = global_trace.max_buffer.buffer;
1528	if (buffer)
1529		ring_buffer_record_disable(buffer);
1530#endif
1531
1532	arch_spin_unlock(&global_trace.max_lock);
1533
1534 out:
1535	raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1536}
1537
1538static void tracing_stop_tr(struct trace_array *tr)
1539{
1540	struct ring_buffer *buffer;
1541	unsigned long flags;
1542
1543	/* If global, we need to also stop the max tracer */
1544	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1545		return tracing_stop();
1546
1547	raw_spin_lock_irqsave(&tr->start_lock, flags);
1548	if (tr->stop_count++)
1549		goto out;
1550
1551	buffer = tr->trace_buffer.buffer;
1552	if (buffer)
1553		ring_buffer_record_disable(buffer);
1554
1555 out:
1556	raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1557}
1558
1559void trace_stop_cmdline_recording(void);
1560
1561static int trace_save_cmdline(struct task_struct *tsk)
1562{
1563	unsigned pid, idx;
1564
1565	if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1566		return 0;
1567
1568	/*
1569	 * It's not the end of the world if we don't get
1570	 * the lock, but we also don't want to spin
1571	 * nor do we want to disable interrupts,
1572	 * so if we miss here, then better luck next time.
1573	 */
1574	if (!arch_spin_trylock(&trace_cmdline_lock))
1575		return 0;
1576
1577	idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1578	if (idx == NO_CMDLINE_MAP) {
1579		idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1580
1581		/*
1582		 * Check whether the cmdline buffer at idx has a pid
1583		 * mapped. We are going to overwrite that entry so we
1584		 * need to clear the map_pid_to_cmdline. Otherwise we
1585		 * would read the new comm for the old pid.
1586		 */
1587		pid = savedcmd->map_cmdline_to_pid[idx];
1588		if (pid != NO_CMDLINE_MAP)
1589			savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1590
1591		savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1592		savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1593
1594		savedcmd->cmdline_idx = idx;
1595	}
1596
1597	set_cmdline(idx, tsk->comm);
1598
1599	arch_spin_unlock(&trace_cmdline_lock);
1600
1601	return 1;
1602}
1603
1604static void __trace_find_cmdline(int pid, char comm[])
1605{
1606	unsigned map;
1607
1608	if (!pid) {
1609		strcpy(comm, "<idle>");
1610		return;
1611	}
1612
1613	if (WARN_ON_ONCE(pid < 0)) {
1614		strcpy(comm, "<XXX>");
1615		return;
1616	}
1617
1618	if (pid > PID_MAX_DEFAULT) {
1619		strcpy(comm, "<...>");
1620		return;
1621	}
1622
1623	map = savedcmd->map_pid_to_cmdline[pid];
 
 
1624	if (map != NO_CMDLINE_MAP)
1625		strcpy(comm, get_saved_cmdlines(map));
1626	else
1627		strcpy(comm, "<...>");
1628}
1629
1630void trace_find_cmdline(int pid, char comm[])
1631{
1632	preempt_disable();
1633	arch_spin_lock(&trace_cmdline_lock);
1634
1635	__trace_find_cmdline(pid, comm);
1636
1637	arch_spin_unlock(&trace_cmdline_lock);
1638	preempt_enable();
1639}
1640
1641void tracing_record_cmdline(struct task_struct *tsk)
1642{
1643	if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1644		return;
1645
1646	if (!__this_cpu_read(trace_cmdline_save))
1647		return;
1648
1649	if (trace_save_cmdline(tsk))
1650		__this_cpu_write(trace_cmdline_save, false);
 
1651}
1652
1653void
1654tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1655			     int pc)
1656{
1657	struct task_struct *tsk = current;
1658
1659	entry->preempt_count		= pc & 0xff;
1660	entry->pid			= (tsk) ? tsk->pid : 0;
1661	entry->flags =
1662#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1663		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1664#else
1665		TRACE_FLAG_IRQS_NOSUPPORT |
1666#endif
1667		((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
1668		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1669		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1670		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1671		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1672}
1673EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1674
1675struct ring_buffer_event *
1676trace_buffer_lock_reserve(struct ring_buffer *buffer,
1677			  int type,
1678			  unsigned long len,
1679			  unsigned long flags, int pc)
1680{
1681	struct ring_buffer_event *event;
1682
1683	event = ring_buffer_lock_reserve(buffer, len);
1684	if (event != NULL) {
1685		struct trace_entry *ent = ring_buffer_event_data(event);
1686
1687		tracing_generic_entry_update(ent, flags, pc);
1688		ent->type = type;
1689	}
1690
1691	return event;
1692}
1693
1694void
1695__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1696{
1697	__this_cpu_write(trace_cmdline_save, true);
1698	ring_buffer_unlock_commit(buffer, event);
1699}
1700
1701void trace_buffer_unlock_commit(struct trace_array *tr,
1702				struct ring_buffer *buffer,
1703				struct ring_buffer_event *event,
1704				unsigned long flags, int pc)
1705{
1706	__buffer_unlock_commit(buffer, event);
1707
1708	ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1709	ftrace_trace_userstack(buffer, flags, pc);
1710}
 
 
 
 
 
 
 
1711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1712
1713static struct ring_buffer *temp_buffer;
1714
1715struct ring_buffer_event *
1716trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1717			  struct trace_event_file *trace_file,
1718			  int type, unsigned long len,
1719			  unsigned long flags, int pc)
1720{
1721	struct ring_buffer_event *entry;
1722
1723	*current_rb = trace_file->tr->trace_buffer.buffer;
1724	entry = trace_buffer_lock_reserve(*current_rb,
1725					 type, len, flags, pc);
1726	/*
1727	 * If tracing is off, but we have triggers enabled
1728	 * we still need to look at the event data. Use the temp_buffer
1729	 * to store the trace event for the tigger to use. It's recusive
1730	 * safe and will not be recorded anywhere.
1731	 */
1732	if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1733		*current_rb = temp_buffer;
1734		entry = trace_buffer_lock_reserve(*current_rb,
1735						  type, len, flags, pc);
1736	}
1737	return entry;
1738}
1739EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1740
1741struct ring_buffer_event *
1742trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1743				  int type, unsigned long len,
1744				  unsigned long flags, int pc)
1745{
1746	*current_rb = global_trace.trace_buffer.buffer;
1747	return trace_buffer_lock_reserve(*current_rb,
1748					 type, len, flags, pc);
1749}
1750EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1751
1752void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1753				     struct ring_buffer *buffer,
 
 
 
 
 
 
 
1754				     struct ring_buffer_event *event,
1755				     unsigned long flags, int pc,
1756				     struct pt_regs *regs)
1757{
1758	__buffer_unlock_commit(buffer, event);
1759
1760	ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1761	ftrace_trace_userstack(buffer, flags, pc);
1762}
1763EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1764
1765void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1766					 struct ring_buffer_event *event)
1767{
1768	ring_buffer_discard_commit(buffer, event);
1769}
1770EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1771
1772void
1773trace_function(struct trace_array *tr,
1774	       unsigned long ip, unsigned long parent_ip, unsigned long flags,
1775	       int pc)
1776{
1777	struct trace_event_call *call = &event_function;
1778	struct ring_buffer *buffer = tr->trace_buffer.buffer;
1779	struct ring_buffer_event *event;
1780	struct ftrace_entry *entry;
1781
 
 
 
 
1782	event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1783					  flags, pc);
1784	if (!event)
1785		return;
1786	entry	= ring_buffer_event_data(event);
1787	entry->ip			= ip;
1788	entry->parent_ip		= parent_ip;
1789
1790	if (!call_filter_check_discard(call, entry, buffer, event))
1791		__buffer_unlock_commit(buffer, event);
1792}
1793
1794#ifdef CONFIG_STACKTRACE
1795
1796#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797struct ftrace_stack {
1798	unsigned long		calls[FTRACE_STACK_MAX_ENTRIES];
1799};
1800
1801static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1802static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1803
1804static void __ftrace_trace_stack(struct ring_buffer *buffer,
1805				 unsigned long flags,
1806				 int skip, int pc, struct pt_regs *regs)
1807{
1808	struct trace_event_call *call = &event_kernel_stack;
1809	struct ring_buffer_event *event;
1810	struct stack_entry *entry;
1811	struct stack_trace trace;
1812	int use_stack;
1813	int size = FTRACE_STACK_ENTRIES;
1814
1815	trace.nr_entries	= 0;
1816	trace.skip		= skip;
1817
1818	/*
1819	 * Since events can happen in NMIs there's no safe way to
1820	 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1821	 * or NMI comes in, it will just have to use the default
1822	 * FTRACE_STACK_SIZE.
1823	 */
1824	preempt_disable_notrace();
1825
1826	use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1827	/*
1828	 * We don't need any atomic variables, just a barrier.
1829	 * If an interrupt comes in, we don't care, because it would
1830	 * have exited and put the counter back to what we want.
1831	 * We just need a barrier to keep gcc from moving things
1832	 * around.
1833	 */
1834	barrier();
1835	if (use_stack == 1) {
1836		trace.entries		= this_cpu_ptr(ftrace_stack.calls);
1837		trace.max_entries	= FTRACE_STACK_MAX_ENTRIES;
1838
1839		if (regs)
1840			save_stack_trace_regs(regs, &trace);
1841		else
1842			save_stack_trace(&trace);
1843
1844		if (trace.nr_entries > size)
1845			size = trace.nr_entries;
1846	} else
1847		/* From now on, use_stack is a boolean */
1848		use_stack = 0;
1849
1850	size *= sizeof(unsigned long);
1851
1852	event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1853					  sizeof(*entry) + size, flags, pc);
1854	if (!event)
1855		goto out;
1856	entry = ring_buffer_event_data(event);
1857
1858	memset(&entry->caller, 0, size);
1859
1860	if (use_stack)
1861		memcpy(&entry->caller, trace.entries,
1862		       trace.nr_entries * sizeof(unsigned long));
1863	else {
1864		trace.max_entries	= FTRACE_STACK_ENTRIES;
1865		trace.entries		= entry->caller;
1866		if (regs)
1867			save_stack_trace_regs(regs, &trace);
1868		else
1869			save_stack_trace(&trace);
1870	}
1871
1872	entry->size = trace.nr_entries;
1873
1874	if (!call_filter_check_discard(call, entry, buffer, event))
1875		__buffer_unlock_commit(buffer, event);
1876
1877 out:
1878	/* Again, don't let gcc optimize things here */
1879	barrier();
1880	__this_cpu_dec(ftrace_stack_reserve);
1881	preempt_enable_notrace();
1882
1883}
1884
1885static inline void ftrace_trace_stack(struct trace_array *tr,
1886				      struct ring_buffer *buffer,
1887				      unsigned long flags,
1888				      int skip, int pc, struct pt_regs *regs)
1889{
1890	if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1891		return;
1892
1893	__ftrace_trace_stack(buffer, flags, skip, pc, regs);
1894}
1895
 
 
 
 
 
 
 
 
 
1896void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1897		   int pc)
1898{
1899	__ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1900}
1901
1902/**
1903 * trace_dump_stack - record a stack back trace in the trace buffer
1904 * @skip: Number of functions to skip (helper handlers)
1905 */
1906void trace_dump_stack(int skip)
1907{
1908	unsigned long flags;
1909
1910	if (tracing_disabled || tracing_selftest_running)
1911		return;
1912
1913	local_save_flags(flags);
1914
1915	/*
1916	 * Skip 3 more, seems to get us at the caller of
1917	 * this function.
1918	 */
1919	skip += 3;
1920	__ftrace_trace_stack(global_trace.trace_buffer.buffer,
1921			     flags, skip, preempt_count(), NULL);
1922}
1923
1924static DEFINE_PER_CPU(int, user_stack_count);
1925
1926void
1927ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1928{
1929	struct trace_event_call *call = &event_user_stack;
1930	struct ring_buffer_event *event;
1931	struct userstack_entry *entry;
1932	struct stack_trace trace;
1933
1934	if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1935		return;
1936
1937	/*
1938	 * NMIs can not handle page faults, even with fix ups.
1939	 * The save user stack can (and often does) fault.
1940	 */
1941	if (unlikely(in_nmi()))
1942		return;
1943
1944	/*
1945	 * prevent recursion, since the user stack tracing may
1946	 * trigger other kernel events.
1947	 */
1948	preempt_disable();
1949	if (__this_cpu_read(user_stack_count))
1950		goto out;
1951
1952	__this_cpu_inc(user_stack_count);
1953
1954	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1955					  sizeof(*entry), flags, pc);
1956	if (!event)
1957		goto out_drop_count;
1958	entry	= ring_buffer_event_data(event);
1959
1960	entry->tgid		= current->tgid;
1961	memset(&entry->caller, 0, sizeof(entry->caller));
1962
1963	trace.nr_entries	= 0;
1964	trace.max_entries	= FTRACE_STACK_ENTRIES;
1965	trace.skip		= 0;
1966	trace.entries		= entry->caller;
1967
1968	save_stack_trace_user(&trace);
1969	if (!call_filter_check_discard(call, entry, buffer, event))
1970		__buffer_unlock_commit(buffer, event);
1971
1972 out_drop_count:
1973	__this_cpu_dec(user_stack_count);
1974 out:
1975	preempt_enable();
1976}
1977
1978#ifdef UNUSED
1979static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1980{
1981	ftrace_trace_userstack(tr, flags, preempt_count());
1982}
1983#endif /* UNUSED */
1984
1985#endif /* CONFIG_STACKTRACE */
1986
1987/* created for use with alloc_percpu */
1988struct trace_buffer_struct {
1989	char buffer[TRACE_BUF_SIZE];
1990};
1991
1992static struct trace_buffer_struct *trace_percpu_buffer;
1993static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1994static struct trace_buffer_struct *trace_percpu_irq_buffer;
1995static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1996
1997/*
1998 * The buffer used is dependent on the context. There is a per cpu
1999 * buffer for normal context, softirq contex, hard irq context and
2000 * for NMI context. Thise allows for lockless recording.
2001 *
2002 * Note, if the buffers failed to be allocated, then this returns NULL
2003 */
2004static char *get_trace_buf(void)
2005{
2006	struct trace_buffer_struct *percpu_buffer;
2007
2008	/*
2009	 * If we have allocated per cpu buffers, then we do not
2010	 * need to do any locking.
2011	 */
2012	if (in_nmi())
2013		percpu_buffer = trace_percpu_nmi_buffer;
2014	else if (in_irq())
2015		percpu_buffer = trace_percpu_irq_buffer;
2016	else if (in_softirq())
2017		percpu_buffer = trace_percpu_sirq_buffer;
2018	else
2019		percpu_buffer = trace_percpu_buffer;
2020
2021	if (!percpu_buffer)
2022		return NULL;
2023
2024	return this_cpu_ptr(&percpu_buffer->buffer[0]);
2025}
2026
2027static int alloc_percpu_trace_buffer(void)
2028{
2029	struct trace_buffer_struct *buffers;
2030	struct trace_buffer_struct *sirq_buffers;
2031	struct trace_buffer_struct *irq_buffers;
2032	struct trace_buffer_struct *nmi_buffers;
2033
2034	buffers = alloc_percpu(struct trace_buffer_struct);
2035	if (!buffers)
2036		goto err_warn;
2037
2038	sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2039	if (!sirq_buffers)
2040		goto err_sirq;
2041
2042	irq_buffers = alloc_percpu(struct trace_buffer_struct);
2043	if (!irq_buffers)
2044		goto err_irq;
2045
2046	nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2047	if (!nmi_buffers)
2048		goto err_nmi;
2049
2050	trace_percpu_buffer = buffers;
2051	trace_percpu_sirq_buffer = sirq_buffers;
2052	trace_percpu_irq_buffer = irq_buffers;
2053	trace_percpu_nmi_buffer = nmi_buffers;
2054
2055	return 0;
2056
2057 err_nmi:
2058	free_percpu(irq_buffers);
2059 err_irq:
2060	free_percpu(sirq_buffers);
2061 err_sirq:
2062	free_percpu(buffers);
2063 err_warn:
2064	WARN(1, "Could not allocate percpu trace_printk buffer");
2065	return -ENOMEM;
2066}
2067
2068static int buffers_allocated;
2069
2070void trace_printk_init_buffers(void)
2071{
2072	if (buffers_allocated)
2073		return;
2074
2075	if (alloc_percpu_trace_buffer())
2076		return;
2077
2078	/* trace_printk() is for debug use only. Don't use it in production. */
2079
2080	pr_warn("\n");
2081	pr_warn("**********************************************************\n");
2082	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2083	pr_warn("**                                                      **\n");
2084	pr_warn("** trace_printk() being used. Allocating extra memory.  **\n");
2085	pr_warn("**                                                      **\n");
2086	pr_warn("** This means that this is a DEBUG kernel and it is     **\n");
2087	pr_warn("** unsafe for production use.                           **\n");
2088	pr_warn("**                                                      **\n");
2089	pr_warn("** If you see this message and you are not debugging    **\n");
2090	pr_warn("** the kernel, report this immediately to your vendor!  **\n");
2091	pr_warn("**                                                      **\n");
2092	pr_warn("**   NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE   **\n");
2093	pr_warn("**********************************************************\n");
2094
2095	/* Expand the buffers to set size */
2096	tracing_update_buffers();
2097
2098	buffers_allocated = 1;
2099
2100	/*
2101	 * trace_printk_init_buffers() can be called by modules.
2102	 * If that happens, then we need to start cmdline recording
2103	 * directly here. If the global_trace.buffer is already
2104	 * allocated here, then this was called by module code.
2105	 */
2106	if (global_trace.trace_buffer.buffer)
2107		tracing_start_cmdline_record();
2108}
2109
2110void trace_printk_start_comm(void)
2111{
2112	/* Start tracing comms if trace printk is set */
2113	if (!buffers_allocated)
2114		return;
2115	tracing_start_cmdline_record();
2116}
2117
2118static void trace_printk_start_stop_comm(int enabled)
2119{
2120	if (!buffers_allocated)
2121		return;
2122
2123	if (enabled)
2124		tracing_start_cmdline_record();
2125	else
2126		tracing_stop_cmdline_record();
2127}
2128
2129/**
2130 * trace_vbprintk - write binary msg to tracing buffer
2131 *
2132 */
2133int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2134{
2135	struct trace_event_call *call = &event_bprint;
2136	struct ring_buffer_event *event;
2137	struct ring_buffer *buffer;
2138	struct trace_array *tr = &global_trace;
2139	struct bprint_entry *entry;
2140	unsigned long flags;
2141	char *tbuffer;
2142	int len = 0, size, pc;
2143
2144	if (unlikely(tracing_selftest_running || tracing_disabled))
2145		return 0;
2146
2147	/* Don't pollute graph traces with trace_vprintk internals */
2148	pause_graph_tracing();
2149
2150	pc = preempt_count();
2151	preempt_disable_notrace();
2152
2153	tbuffer = get_trace_buf();
2154	if (!tbuffer) {
2155		len = 0;
2156		goto out;
2157	}
2158
2159	len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2160
2161	if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2162		goto out;
2163
2164	local_save_flags(flags);
2165	size = sizeof(*entry) + sizeof(u32) * len;
2166	buffer = tr->trace_buffer.buffer;
2167	event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2168					  flags, pc);
2169	if (!event)
2170		goto out;
2171	entry = ring_buffer_event_data(event);
2172	entry->ip			= ip;
2173	entry->fmt			= fmt;
2174
2175	memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2176	if (!call_filter_check_discard(call, entry, buffer, event)) {
2177		__buffer_unlock_commit(buffer, event);
2178		ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2179	}
2180
2181out:
2182	preempt_enable_notrace();
2183	unpause_graph_tracing();
2184
2185	return len;
2186}
2187EXPORT_SYMBOL_GPL(trace_vbprintk);
2188
2189static int
2190__trace_array_vprintk(struct ring_buffer *buffer,
2191		      unsigned long ip, const char *fmt, va_list args)
2192{
2193	struct trace_event_call *call = &event_print;
2194	struct ring_buffer_event *event;
2195	int len = 0, size, pc;
2196	struct print_entry *entry;
2197	unsigned long flags;
2198	char *tbuffer;
2199
2200	if (tracing_disabled || tracing_selftest_running)
2201		return 0;
2202
2203	/* Don't pollute graph traces with trace_vprintk internals */
2204	pause_graph_tracing();
2205
2206	pc = preempt_count();
2207	preempt_disable_notrace();
2208
2209
2210	tbuffer = get_trace_buf();
2211	if (!tbuffer) {
2212		len = 0;
2213		goto out;
2214	}
2215
2216	len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
 
 
2217
2218	local_save_flags(flags);
2219	size = sizeof(*entry) + len + 1;
2220	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2221					  flags, pc);
2222	if (!event)
2223		goto out;
2224	entry = ring_buffer_event_data(event);
2225	entry->ip = ip;
2226
2227	memcpy(&entry->buf, tbuffer, len + 1);
 
2228	if (!call_filter_check_discard(call, entry, buffer, event)) {
2229		__buffer_unlock_commit(buffer, event);
2230		ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2231	}
2232 out:
2233	preempt_enable_notrace();
2234	unpause_graph_tracing();
2235
2236	return len;
2237}
2238
2239int trace_array_vprintk(struct trace_array *tr,
2240			unsigned long ip, const char *fmt, va_list args)
2241{
2242	return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2243}
2244
2245int trace_array_printk(struct trace_array *tr,
2246		       unsigned long ip, const char *fmt, ...)
2247{
2248	int ret;
2249	va_list ap;
2250
2251	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2252		return 0;
2253
2254	va_start(ap, fmt);
2255	ret = trace_array_vprintk(tr, ip, fmt, ap);
2256	va_end(ap);
2257	return ret;
2258}
2259
2260int trace_array_printk_buf(struct ring_buffer *buffer,
2261			   unsigned long ip, const char *fmt, ...)
2262{
2263	int ret;
2264	va_list ap;
2265
2266	if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2267		return 0;
2268
2269	va_start(ap, fmt);
2270	ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2271	va_end(ap);
2272	return ret;
2273}
2274
2275int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2276{
2277	return trace_array_vprintk(&global_trace, ip, fmt, args);
2278}
2279EXPORT_SYMBOL_GPL(trace_vprintk);
2280
2281static void trace_iterator_increment(struct trace_iterator *iter)
2282{
2283	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2284
2285	iter->idx++;
2286	if (buf_iter)
2287		ring_buffer_read(buf_iter, NULL);
2288}
2289
2290static struct trace_entry *
2291peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2292		unsigned long *lost_events)
2293{
2294	struct ring_buffer_event *event;
2295	struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2296
2297	if (buf_iter)
2298		event = ring_buffer_iter_peek(buf_iter, ts);
2299	else
2300		event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2301					 lost_events);
2302
2303	if (event) {
2304		iter->ent_size = ring_buffer_event_length(event);
2305		return ring_buffer_event_data(event);
2306	}
2307	iter->ent_size = 0;
2308	return NULL;
2309}
2310
2311static struct trace_entry *
2312__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2313		  unsigned long *missing_events, u64 *ent_ts)
2314{
2315	struct ring_buffer *buffer = iter->trace_buffer->buffer;
2316	struct trace_entry *ent, *next = NULL;
2317	unsigned long lost_events = 0, next_lost = 0;
2318	int cpu_file = iter->cpu_file;
2319	u64 next_ts = 0, ts;
2320	int next_cpu = -1;
2321	int next_size = 0;
2322	int cpu;
2323
2324	/*
2325	 * If we are in a per_cpu trace file, don't bother by iterating over
2326	 * all cpu and peek directly.
2327	 */
2328	if (cpu_file > RING_BUFFER_ALL_CPUS) {
2329		if (ring_buffer_empty_cpu(buffer, cpu_file))
2330			return NULL;
2331		ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2332		if (ent_cpu)
2333			*ent_cpu = cpu_file;
2334
2335		return ent;
2336	}
2337
2338	for_each_tracing_cpu(cpu) {
2339
2340		if (ring_buffer_empty_cpu(buffer, cpu))
2341			continue;
2342
2343		ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2344
2345		/*
2346		 * Pick the entry with the smallest timestamp:
2347		 */
2348		if (ent && (!next || ts < next_ts)) {
2349			next = ent;
2350			next_cpu = cpu;
2351			next_ts = ts;
2352			next_lost = lost_events;
2353			next_size = iter->ent_size;
2354		}
2355	}
2356
2357	iter->ent_size = next_size;
2358
2359	if (ent_cpu)
2360		*ent_cpu = next_cpu;
2361
2362	if (ent_ts)
2363		*ent_ts = next_ts;
2364
2365	if (missing_events)
2366		*missing_events = next_lost;
2367
2368	return next;
2369}
2370
2371/* Find the next real entry, without updating the iterator itself */
2372struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2373					  int *ent_cpu, u64 *ent_ts)
2374{
2375	return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2376}
2377
2378/* Find the next real entry, and increment the iterator to the next entry */
2379void *trace_find_next_entry_inc(struct trace_iterator *iter)
2380{
2381	iter->ent = __find_next_entry(iter, &iter->cpu,
2382				      &iter->lost_events, &iter->ts);
2383
2384	if (iter->ent)
2385		trace_iterator_increment(iter);
2386
2387	return iter->ent ? iter : NULL;
2388}
2389
2390static void trace_consume(struct trace_iterator *iter)
2391{
2392	ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2393			    &iter->lost_events);
2394}
2395
2396static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2397{
2398	struct trace_iterator *iter = m->private;
2399	int i = (int)*pos;
2400	void *ent;
2401
2402	WARN_ON_ONCE(iter->leftover);
2403
2404	(*pos)++;
2405
2406	/* can't go backwards */
2407	if (iter->idx > i)
2408		return NULL;
2409
2410	if (iter->idx < 0)
2411		ent = trace_find_next_entry_inc(iter);
2412	else
2413		ent = iter;
2414
2415	while (ent && iter->idx < i)
2416		ent = trace_find_next_entry_inc(iter);
2417
2418	iter->pos = *pos;
2419
2420	return ent;
2421}
2422
2423void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2424{
2425	struct ring_buffer_event *event;
2426	struct ring_buffer_iter *buf_iter;
2427	unsigned long entries = 0;
2428	u64 ts;
2429
2430	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2431
2432	buf_iter = trace_buffer_iter(iter, cpu);
2433	if (!buf_iter)
2434		return;
2435
2436	ring_buffer_iter_reset(buf_iter);
2437
2438	/*
2439	 * We could have the case with the max latency tracers
2440	 * that a reset never took place on a cpu. This is evident
2441	 * by the timestamp being before the start of the buffer.
2442	 */
2443	while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2444		if (ts >= iter->trace_buffer->time_start)
2445			break;
2446		entries++;
2447		ring_buffer_read(buf_iter, NULL);
2448	}
2449
2450	per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2451}
2452
2453/*
2454 * The current tracer is copied to avoid a global locking
2455 * all around.
2456 */
2457static void *s_start(struct seq_file *m, loff_t *pos)
2458{
2459	struct trace_iterator *iter = m->private;
2460	struct trace_array *tr = iter->tr;
2461	int cpu_file = iter->cpu_file;
2462	void *p = NULL;
2463	loff_t l = 0;
2464	int cpu;
2465
2466	/*
2467	 * copy the tracer to avoid using a global lock all around.
2468	 * iter->trace is a copy of current_trace, the pointer to the
2469	 * name may be used instead of a strcmp(), as iter->trace->name
2470	 * will point to the same string as current_trace->name.
2471	 */
2472	mutex_lock(&trace_types_lock);
2473	if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2474		*iter->trace = *tr->current_trace;
2475	mutex_unlock(&trace_types_lock);
2476
2477#ifdef CONFIG_TRACER_MAX_TRACE
2478	if (iter->snapshot && iter->trace->use_max_tr)
2479		return ERR_PTR(-EBUSY);
2480#endif
2481
2482	if (!iter->snapshot)
2483		atomic_inc(&trace_record_cmdline_disabled);
2484
2485	if (*pos != iter->pos) {
2486		iter->ent = NULL;
2487		iter->cpu = 0;
2488		iter->idx = -1;
2489
2490		if (cpu_file == RING_BUFFER_ALL_CPUS) {
2491			for_each_tracing_cpu(cpu)
2492				tracing_iter_reset(iter, cpu);
2493		} else
2494			tracing_iter_reset(iter, cpu_file);
2495
2496		iter->leftover = 0;
2497		for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2498			;
2499
2500	} else {
2501		/*
2502		 * If we overflowed the seq_file before, then we want
2503		 * to just reuse the trace_seq buffer again.
2504		 */
2505		if (iter->leftover)
2506			p = iter;
2507		else {
2508			l = *pos - 1;
2509			p = s_next(m, p, &l);
2510		}
2511	}
2512
2513	trace_event_read_lock();
2514	trace_access_lock(cpu_file);
2515	return p;
2516}
2517
2518static void s_stop(struct seq_file *m, void *p)
2519{
2520	struct trace_iterator *iter = m->private;
2521
2522#ifdef CONFIG_TRACER_MAX_TRACE
2523	if (iter->snapshot && iter->trace->use_max_tr)
2524		return;
2525#endif
2526
2527	if (!iter->snapshot)
2528		atomic_dec(&trace_record_cmdline_disabled);
2529
2530	trace_access_unlock(iter->cpu_file);
2531	trace_event_read_unlock();
2532}
2533
2534static void
2535get_total_entries(struct trace_buffer *buf,
2536		  unsigned long *total, unsigned long *entries)
2537{
2538	unsigned long count;
2539	int cpu;
2540
2541	*total = 0;
2542	*entries = 0;
2543
2544	for_each_tracing_cpu(cpu) {
2545		count = ring_buffer_entries_cpu(buf->buffer, cpu);
2546		/*
2547		 * If this buffer has skipped entries, then we hold all
2548		 * entries for the trace and we need to ignore the
2549		 * ones before the time stamp.
2550		 */
2551		if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2552			count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2553			/* total is the same as the entries */
2554			*total += count;
2555		} else
2556			*total += count +
2557				ring_buffer_overrun_cpu(buf->buffer, cpu);
2558		*entries += count;
2559	}
2560}
2561
2562static void print_lat_help_header(struct seq_file *m)
2563{
2564	seq_puts(m, "#                  _------=> CPU#            \n"
2565		    "#                 / _-----=> irqs-off        \n"
2566		    "#                | / _----=> need-resched    \n"
2567		    "#                || / _---=> hardirq/softirq \n"
2568		    "#                ||| / _--=> preempt-depth   \n"
2569		    "#                |||| /     delay            \n"
2570		    "#  cmd     pid   ||||| time  |   caller      \n"
2571		    "#     \\   /      |||||  \\    |   /         \n");
2572}
2573
2574static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2575{
2576	unsigned long total;
2577	unsigned long entries;
2578
2579	get_total_entries(buf, &total, &entries);
2580	seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2581		   entries, total, num_online_cpus());
2582	seq_puts(m, "#\n");
2583}
2584
2585static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2586{
2587	print_event_info(buf, m);
2588	seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n"
2589		    "#              | |       |          |         |\n");
2590}
2591
2592static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2593{
2594	print_event_info(buf, m);
2595	seq_puts(m, "#                              _-----=> irqs-off\n"
2596		    "#                             / _----=> need-resched\n"
2597		    "#                            | / _---=> hardirq/softirq\n"
2598		    "#                            || / _--=> preempt-depth\n"
2599		    "#                            ||| /     delay\n"
2600		    "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n"
2601		    "#              | |       |   ||||       |         |\n");
2602}
2603
2604void
2605print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2606{
2607	unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2608	struct trace_buffer *buf = iter->trace_buffer;
2609	struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2610	struct tracer *type = iter->trace;
2611	unsigned long entries;
2612	unsigned long total;
2613	const char *name = "preemption";
2614
2615	name = type->name;
2616
2617	get_total_entries(buf, &total, &entries);
2618
2619	seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2620		   name, UTS_RELEASE);
2621	seq_puts(m, "# -----------------------------------"
2622		 "---------------------------------\n");
2623	seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2624		   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2625		   nsecs_to_usecs(data->saved_latency),
2626		   entries,
2627		   total,
2628		   buf->cpu,
2629#if defined(CONFIG_PREEMPT_NONE)
2630		   "server",
2631#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2632		   "desktop",
2633#elif defined(CONFIG_PREEMPT)
2634		   "preempt",
2635#else
2636		   "unknown",
2637#endif
2638		   /* These are reserved for later use */
2639		   0, 0, 0, 0);
2640#ifdef CONFIG_SMP
2641	seq_printf(m, " #P:%d)\n", num_online_cpus());
2642#else
2643	seq_puts(m, ")\n");
2644#endif
2645	seq_puts(m, "#    -----------------\n");
2646	seq_printf(m, "#    | task: %.16s-%d "
2647		   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2648		   data->comm, data->pid,
2649		   from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2650		   data->policy, data->rt_priority);
2651	seq_puts(m, "#    -----------------\n");
2652
2653	if (data->critical_start) {
2654		seq_puts(m, "#  => started at: ");
2655		seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2656		trace_print_seq(m, &iter->seq);
2657		seq_puts(m, "\n#  => ended at:   ");
2658		seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2659		trace_print_seq(m, &iter->seq);
2660		seq_puts(m, "\n#\n");
2661	}
2662
2663	seq_puts(m, "#\n");
2664}
2665
2666static void test_cpu_buff_start(struct trace_iterator *iter)
2667{
2668	struct trace_seq *s = &iter->seq;
2669	struct trace_array *tr = iter->tr;
2670
2671	if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2672		return;
2673
2674	if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2675		return;
2676
2677	if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2678		return;
2679
2680	if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2681		return;
2682
2683	if (iter->started)
2684		cpumask_set_cpu(iter->cpu, iter->started);
2685
2686	/* Don't print started cpu buffer for the first entry of the trace */
2687	if (iter->idx > 1)
2688		trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2689				iter->cpu);
2690}
2691
2692static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2693{
2694	struct trace_array *tr = iter->tr;
2695	struct trace_seq *s = &iter->seq;
2696	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2697	struct trace_entry *entry;
2698	struct trace_event *event;
2699
2700	entry = iter->ent;
2701
2702	test_cpu_buff_start(iter);
2703
2704	event = ftrace_find_event(entry->type);
2705
2706	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2707		if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2708			trace_print_lat_context(iter);
2709		else
2710			trace_print_context(iter);
 
 
 
2711	}
2712
2713	if (trace_seq_has_overflowed(s))
2714		return TRACE_TYPE_PARTIAL_LINE;
2715
2716	if (event)
2717		return event->funcs->trace(iter, sym_flags, event);
2718
2719	trace_seq_printf(s, "Unknown type %d\n", entry->type);
 
2720
2721	return trace_handle_return(s);
 
 
2722}
2723
2724static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2725{
2726	struct trace_array *tr = iter->tr;
2727	struct trace_seq *s = &iter->seq;
2728	struct trace_entry *entry;
2729	struct trace_event *event;
2730
2731	entry = iter->ent;
2732
2733	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2734		trace_seq_printf(s, "%d %d %llu ",
2735				 entry->pid, iter->cpu, iter->ts);
2736
2737	if (trace_seq_has_overflowed(s))
2738		return TRACE_TYPE_PARTIAL_LINE;
2739
2740	event = ftrace_find_event(entry->type);
2741	if (event)
2742		return event->funcs->raw(iter, 0, event);
2743
2744	trace_seq_printf(s, "%d ?\n", entry->type);
 
2745
2746	return trace_handle_return(s);
 
 
2747}
2748
2749static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2750{
2751	struct trace_array *tr = iter->tr;
2752	struct trace_seq *s = &iter->seq;
2753	unsigned char newline = '\n';
2754	struct trace_entry *entry;
2755	struct trace_event *event;
2756
2757	entry = iter->ent;
2758
2759	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2760		SEQ_PUT_HEX_FIELD(s, entry->pid);
2761		SEQ_PUT_HEX_FIELD(s, iter->cpu);
2762		SEQ_PUT_HEX_FIELD(s, iter->ts);
2763		if (trace_seq_has_overflowed(s))
2764			return TRACE_TYPE_PARTIAL_LINE;
2765	}
2766
2767	event = ftrace_find_event(entry->type);
2768	if (event) {
2769		enum print_line_t ret = event->funcs->hex(iter, 0, event);
2770		if (ret != TRACE_TYPE_HANDLED)
2771			return ret;
2772	}
2773
2774	SEQ_PUT_FIELD(s, newline);
2775
2776	return trace_handle_return(s);
2777}
2778
2779static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2780{
2781	struct trace_array *tr = iter->tr;
2782	struct trace_seq *s = &iter->seq;
2783	struct trace_entry *entry;
2784	struct trace_event *event;
2785
2786	entry = iter->ent;
2787
2788	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2789		SEQ_PUT_FIELD(s, entry->pid);
2790		SEQ_PUT_FIELD(s, iter->cpu);
2791		SEQ_PUT_FIELD(s, iter->ts);
2792		if (trace_seq_has_overflowed(s))
2793			return TRACE_TYPE_PARTIAL_LINE;
2794	}
2795
2796	event = ftrace_find_event(entry->type);
2797	return event ? event->funcs->binary(iter, 0, event) :
2798		TRACE_TYPE_HANDLED;
2799}
2800
2801int trace_empty(struct trace_iterator *iter)
2802{
2803	struct ring_buffer_iter *buf_iter;
2804	int cpu;
2805
2806	/* If we are looking at one CPU buffer, only check that one */
2807	if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2808		cpu = iter->cpu_file;
2809		buf_iter = trace_buffer_iter(iter, cpu);
2810		if (buf_iter) {
2811			if (!ring_buffer_iter_empty(buf_iter))
2812				return 0;
2813		} else {
2814			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2815				return 0;
2816		}
2817		return 1;
2818	}
2819
2820	for_each_tracing_cpu(cpu) {
2821		buf_iter = trace_buffer_iter(iter, cpu);
2822		if (buf_iter) {
2823			if (!ring_buffer_iter_empty(buf_iter))
2824				return 0;
2825		} else {
2826			if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2827				return 0;
2828		}
2829	}
2830
2831	return 1;
2832}
2833
2834/*  Called with trace_event_read_lock() held. */
2835enum print_line_t print_trace_line(struct trace_iterator *iter)
2836{
2837	struct trace_array *tr = iter->tr;
2838	unsigned long trace_flags = tr->trace_flags;
2839	enum print_line_t ret;
2840
2841	if (iter->lost_events) {
2842		trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2843				 iter->cpu, iter->lost_events);
2844		if (trace_seq_has_overflowed(&iter->seq))
2845			return TRACE_TYPE_PARTIAL_LINE;
2846	}
2847
2848	if (iter->trace && iter->trace->print_line) {
2849		ret = iter->trace->print_line(iter);
2850		if (ret != TRACE_TYPE_UNHANDLED)
2851			return ret;
2852	}
2853
2854	if (iter->ent->type == TRACE_BPUTS &&
2855			trace_flags & TRACE_ITER_PRINTK &&
2856			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857		return trace_print_bputs_msg_only(iter);
2858
2859	if (iter->ent->type == TRACE_BPRINT &&
2860			trace_flags & TRACE_ITER_PRINTK &&
2861			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2862		return trace_print_bprintk_msg_only(iter);
2863
2864	if (iter->ent->type == TRACE_PRINT &&
2865			trace_flags & TRACE_ITER_PRINTK &&
2866			trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2867		return trace_print_printk_msg_only(iter);
2868
2869	if (trace_flags & TRACE_ITER_BIN)
2870		return print_bin_fmt(iter);
2871
2872	if (trace_flags & TRACE_ITER_HEX)
2873		return print_hex_fmt(iter);
2874
2875	if (trace_flags & TRACE_ITER_RAW)
2876		return print_raw_fmt(iter);
2877
2878	return print_trace_fmt(iter);
2879}
2880
2881void trace_latency_header(struct seq_file *m)
2882{
2883	struct trace_iterator *iter = m->private;
2884	struct trace_array *tr = iter->tr;
2885
2886	/* print nothing if the buffers are empty */
2887	if (trace_empty(iter))
2888		return;
2889
2890	if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2891		print_trace_header(m, iter);
2892
2893	if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2894		print_lat_help_header(m);
2895}
2896
2897void trace_default_header(struct seq_file *m)
2898{
2899	struct trace_iterator *iter = m->private;
2900	struct trace_array *tr = iter->tr;
2901	unsigned long trace_flags = tr->trace_flags;
2902
2903	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2904		return;
2905
2906	if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2907		/* print nothing if the buffers are empty */
2908		if (trace_empty(iter))
2909			return;
2910		print_trace_header(m, iter);
2911		if (!(trace_flags & TRACE_ITER_VERBOSE))
2912			print_lat_help_header(m);
2913	} else {
2914		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2915			if (trace_flags & TRACE_ITER_IRQ_INFO)
2916				print_func_help_header_irq(iter->trace_buffer, m);
2917			else
2918				print_func_help_header(iter->trace_buffer, m);
2919		}
2920	}
2921}
2922
2923static void test_ftrace_alive(struct seq_file *m)
2924{
2925	if (!ftrace_is_dead())
2926		return;
2927	seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928		    "#          MAY BE MISSING FUNCTION EVENTS\n");
2929}
2930
2931#ifdef CONFIG_TRACER_MAX_TRACE
2932static void show_snapshot_main_help(struct seq_file *m)
2933{
2934	seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935		    "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936		    "#                      Takes a snapshot of the main buffer.\n"
2937		    "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938		    "#                      (Doesn't have to be '2' works with any number that\n"
2939		    "#                       is not a '0' or '1')\n");
2940}
2941
2942static void show_snapshot_percpu_help(struct seq_file *m)
2943{
2944	seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2945#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2946	seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947		    "#                      Takes a snapshot of the main buffer for this cpu.\n");
2948#else
2949	seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950		    "#                     Must use main snapshot file to allocate.\n");
2951#endif
2952	seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953		    "#                      (Doesn't have to be '2' works with any number that\n"
2954		    "#                       is not a '0' or '1')\n");
2955}
2956
2957static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2958{
2959	if (iter->tr->allocated_snapshot)
2960		seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2961	else
2962		seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2963
2964	seq_puts(m, "# Snapshot commands:\n");
2965	if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2966		show_snapshot_main_help(m);
2967	else
2968		show_snapshot_percpu_help(m);
2969}
2970#else
2971/* Should never be called */
2972static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2973#endif
2974
2975static int s_show(struct seq_file *m, void *v)
2976{
2977	struct trace_iterator *iter = v;
2978	int ret;
2979
2980	if (iter->ent == NULL) {
2981		if (iter->tr) {
2982			seq_printf(m, "# tracer: %s\n", iter->trace->name);
2983			seq_puts(m, "#\n");
2984			test_ftrace_alive(m);
2985		}
2986		if (iter->snapshot && trace_empty(iter))
2987			print_snapshot_help(m, iter);
2988		else if (iter->trace && iter->trace->print_header)
2989			iter->trace->print_header(m);
2990		else
2991			trace_default_header(m);
2992
2993	} else if (iter->leftover) {
2994		/*
2995		 * If we filled the seq_file buffer earlier, we
2996		 * want to just show it now.
2997		 */
2998		ret = trace_print_seq(m, &iter->seq);
2999
3000		/* ret should this time be zero, but you never know */
3001		iter->leftover = ret;
3002
3003	} else {
3004		print_trace_line(iter);
3005		ret = trace_print_seq(m, &iter->seq);
3006		/*
3007		 * If we overflow the seq_file buffer, then it will
3008		 * ask us for this data again at start up.
3009		 * Use that instead.
3010		 *  ret is 0 if seq_file write succeeded.
3011		 *        -1 otherwise.
3012		 */
3013		iter->leftover = ret;
3014	}
3015
3016	return 0;
3017}
3018
3019/*
3020 * Should be used after trace_array_get(), trace_types_lock
3021 * ensures that i_cdev was already initialized.
3022 */
3023static inline int tracing_get_cpu(struct inode *inode)
3024{
3025	if (inode->i_cdev) /* See trace_create_cpu_file() */
3026		return (long)inode->i_cdev - 1;
3027	return RING_BUFFER_ALL_CPUS;
3028}
3029
3030static const struct seq_operations tracer_seq_ops = {
3031	.start		= s_start,
3032	.next		= s_next,
3033	.stop		= s_stop,
3034	.show		= s_show,
3035};
3036
3037static struct trace_iterator *
3038__tracing_open(struct inode *inode, struct file *file, bool snapshot)
3039{
3040	struct trace_array *tr = inode->i_private;
3041	struct trace_iterator *iter;
3042	int cpu;
3043
3044	if (tracing_disabled)
3045		return ERR_PTR(-ENODEV);
3046
3047	iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3048	if (!iter)
3049		return ERR_PTR(-ENOMEM);
3050
3051	iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3052				    GFP_KERNEL);
3053	if (!iter->buffer_iter)
3054		goto release;
3055
3056	/*
3057	 * We make a copy of the current tracer to avoid concurrent
3058	 * changes on it while we are reading.
3059	 */
3060	mutex_lock(&trace_types_lock);
3061	iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3062	if (!iter->trace)
3063		goto fail;
3064
3065	*iter->trace = *tr->current_trace;
3066
3067	if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3068		goto fail;
3069
3070	iter->tr = tr;
3071
3072#ifdef CONFIG_TRACER_MAX_TRACE
3073	/* Currently only the top directory has a snapshot */
3074	if (tr->current_trace->print_max || snapshot)
3075		iter->trace_buffer = &tr->max_buffer;
3076	else
3077#endif
3078		iter->trace_buffer = &tr->trace_buffer;
3079	iter->snapshot = snapshot;
3080	iter->pos = -1;
3081	iter->cpu_file = tracing_get_cpu(inode);
3082	mutex_init(&iter->mutex);
3083
3084	/* Notify the tracer early; before we stop tracing. */
3085	if (iter->trace && iter->trace->open)
3086		iter->trace->open(iter);
3087
3088	/* Annotate start of buffers if we had overruns */
3089	if (ring_buffer_overruns(iter->trace_buffer->buffer))
3090		iter->iter_flags |= TRACE_FILE_ANNOTATE;
3091
3092	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
3093	if (trace_clocks[tr->clock_id].in_ns)
3094		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3095
3096	/* stop the trace while dumping if we are not opening "snapshot" */
3097	if (!iter->snapshot)
3098		tracing_stop_tr(tr);
3099
3100	if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3101		for_each_tracing_cpu(cpu) {
3102			iter->buffer_iter[cpu] =
3103				ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3104		}
3105		ring_buffer_read_prepare_sync();
3106		for_each_tracing_cpu(cpu) {
3107			ring_buffer_read_start(iter->buffer_iter[cpu]);
3108			tracing_iter_reset(iter, cpu);
3109		}
3110	} else {
3111		cpu = iter->cpu_file;
3112		iter->buffer_iter[cpu] =
3113			ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3114		ring_buffer_read_prepare_sync();
3115		ring_buffer_read_start(iter->buffer_iter[cpu]);
3116		tracing_iter_reset(iter, cpu);
3117	}
3118
3119	mutex_unlock(&trace_types_lock);
3120
3121	return iter;
3122
3123 fail:
3124	mutex_unlock(&trace_types_lock);
3125	kfree(iter->trace);
3126	kfree(iter->buffer_iter);
3127release:
3128	seq_release_private(inode, file);
3129	return ERR_PTR(-ENOMEM);
3130}
3131
3132int tracing_open_generic(struct inode *inode, struct file *filp)
3133{
3134	if (tracing_disabled)
3135		return -ENODEV;
3136
3137	filp->private_data = inode->i_private;
3138	return 0;
3139}
3140
3141bool tracing_is_disabled(void)
3142{
3143	return (tracing_disabled) ? true: false;
3144}
3145
3146/*
3147 * Open and update trace_array ref count.
3148 * Must have the current trace_array passed to it.
3149 */
3150static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3151{
3152	struct trace_array *tr = inode->i_private;
3153
3154	if (tracing_disabled)
3155		return -ENODEV;
3156
3157	if (trace_array_get(tr) < 0)
3158		return -ENODEV;
3159
3160	filp->private_data = inode->i_private;
3161
3162	return 0;
3163}
3164
3165static int tracing_release(struct inode *inode, struct file *file)
3166{
3167	struct trace_array *tr = inode->i_private;
3168	struct seq_file *m = file->private_data;
3169	struct trace_iterator *iter;
3170	int cpu;
3171
3172	if (!(file->f_mode & FMODE_READ)) {
3173		trace_array_put(tr);
3174		return 0;
3175	}
3176
3177	/* Writes do not use seq_file */
3178	iter = m->private;
3179	mutex_lock(&trace_types_lock);
3180
3181	for_each_tracing_cpu(cpu) {
3182		if (iter->buffer_iter[cpu])
3183			ring_buffer_read_finish(iter->buffer_iter[cpu]);
3184	}
3185
3186	if (iter->trace && iter->trace->close)
3187		iter->trace->close(iter);
3188
3189	if (!iter->snapshot)
3190		/* reenable tracing if it was previously enabled */
3191		tracing_start_tr(tr);
3192
3193	__trace_array_put(tr);
3194
3195	mutex_unlock(&trace_types_lock);
3196
3197	mutex_destroy(&iter->mutex);
3198	free_cpumask_var(iter->started);
3199	kfree(iter->trace);
3200	kfree(iter->buffer_iter);
3201	seq_release_private(inode, file);
3202
3203	return 0;
3204}
3205
3206static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3207{
3208	struct trace_array *tr = inode->i_private;
3209
3210	trace_array_put(tr);
3211	return 0;
3212}
3213
3214static int tracing_single_release_tr(struct inode *inode, struct file *file)
3215{
3216	struct trace_array *tr = inode->i_private;
3217
3218	trace_array_put(tr);
3219
3220	return single_release(inode, file);
3221}
3222
3223static int tracing_open(struct inode *inode, struct file *file)
3224{
3225	struct trace_array *tr = inode->i_private;
3226	struct trace_iterator *iter;
3227	int ret = 0;
3228
3229	if (trace_array_get(tr) < 0)
3230		return -ENODEV;
3231
3232	/* If this file was open for write, then erase contents */
3233	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3234		int cpu = tracing_get_cpu(inode);
3235
3236		if (cpu == RING_BUFFER_ALL_CPUS)
3237			tracing_reset_online_cpus(&tr->trace_buffer);
3238		else
3239			tracing_reset(&tr->trace_buffer, cpu);
3240	}
3241
3242	if (file->f_mode & FMODE_READ) {
3243		iter = __tracing_open(inode, file, false);
3244		if (IS_ERR(iter))
3245			ret = PTR_ERR(iter);
3246		else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3247			iter->iter_flags |= TRACE_FILE_LAT_FMT;
3248	}
3249
3250	if (ret < 0)
3251		trace_array_put(tr);
3252
3253	return ret;
3254}
3255
3256/*
3257 * Some tracers are not suitable for instance buffers.
3258 * A tracer is always available for the global array (toplevel)
3259 * or if it explicitly states that it is.
3260 */
3261static bool
3262trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3263{
3264	return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3265}
3266
3267/* Find the next tracer that this trace array may use */
3268static struct tracer *
3269get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3270{
3271	while (t && !trace_ok_for_array(t, tr))
3272		t = t->next;
3273
3274	return t;
3275}
3276
3277static void *
3278t_next(struct seq_file *m, void *v, loff_t *pos)
3279{
3280	struct trace_array *tr = m->private;
3281	struct tracer *t = v;
3282
3283	(*pos)++;
3284
3285	if (t)
3286		t = get_tracer_for_array(tr, t->next);
3287
3288	return t;
3289}
3290
3291static void *t_start(struct seq_file *m, loff_t *pos)
3292{
3293	struct trace_array *tr = m->private;
3294	struct tracer *t;
3295	loff_t l = 0;
3296
3297	mutex_lock(&trace_types_lock);
3298
3299	t = get_tracer_for_array(tr, trace_types);
3300	for (; t && l < *pos; t = t_next(m, t, &l))
3301			;
3302
3303	return t;
3304}
3305
3306static void t_stop(struct seq_file *m, void *p)
3307{
3308	mutex_unlock(&trace_types_lock);
3309}
3310
3311static int t_show(struct seq_file *m, void *v)
3312{
3313	struct tracer *t = v;
3314
3315	if (!t)
3316		return 0;
3317
3318	seq_puts(m, t->name);
3319	if (t->next)
3320		seq_putc(m, ' ');
3321	else
3322		seq_putc(m, '\n');
3323
3324	return 0;
3325}
3326
3327static const struct seq_operations show_traces_seq_ops = {
3328	.start		= t_start,
3329	.next		= t_next,
3330	.stop		= t_stop,
3331	.show		= t_show,
3332};
3333
3334static int show_traces_open(struct inode *inode, struct file *file)
3335{
3336	struct trace_array *tr = inode->i_private;
3337	struct seq_file *m;
3338	int ret;
3339
3340	if (tracing_disabled)
3341		return -ENODEV;
3342
3343	ret = seq_open(file, &show_traces_seq_ops);
3344	if (ret)
3345		return ret;
3346
3347	m = file->private_data;
3348	m->private = tr;
3349
3350	return 0;
3351}
3352
3353static ssize_t
3354tracing_write_stub(struct file *filp, const char __user *ubuf,
3355		   size_t count, loff_t *ppos)
3356{
3357	return count;
3358}
3359
3360loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3361{
3362	int ret;
3363
3364	if (file->f_mode & FMODE_READ)
3365		ret = seq_lseek(file, offset, whence);
3366	else
3367		file->f_pos = ret = 0;
3368
3369	return ret;
3370}
3371
3372static const struct file_operations tracing_fops = {
3373	.open		= tracing_open,
3374	.read		= seq_read,
3375	.write		= tracing_write_stub,
3376	.llseek		= tracing_lseek,
3377	.release	= tracing_release,
3378};
3379
3380static const struct file_operations show_traces_fops = {
3381	.open		= show_traces_open,
3382	.read		= seq_read,
3383	.release	= seq_release,
3384	.llseek		= seq_lseek,
3385};
3386
3387/*
3388 * The tracer itself will not take this lock, but still we want
3389 * to provide a consistent cpumask to user-space:
3390 */
3391static DEFINE_MUTEX(tracing_cpumask_update_lock);
3392
3393/*
3394 * Temporary storage for the character representation of the
3395 * CPU bitmask (and one more byte for the newline):
3396 */
3397static char mask_str[NR_CPUS + 1];
3398
3399static ssize_t
3400tracing_cpumask_read(struct file *filp, char __user *ubuf,
3401		     size_t count, loff_t *ppos)
3402{
3403	struct trace_array *tr = file_inode(filp)->i_private;
3404	int len;
3405
3406	mutex_lock(&tracing_cpumask_update_lock);
3407
3408	len = snprintf(mask_str, count, "%*pb\n",
3409		       cpumask_pr_args(tr->tracing_cpumask));
3410	if (len >= count) {
3411		count = -EINVAL;
3412		goto out_err;
3413	}
 
3414	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3415
3416out_err:
3417	mutex_unlock(&tracing_cpumask_update_lock);
3418
3419	return count;
3420}
3421
3422static ssize_t
3423tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3424		      size_t count, loff_t *ppos)
3425{
3426	struct trace_array *tr = file_inode(filp)->i_private;
3427	cpumask_var_t tracing_cpumask_new;
3428	int err, cpu;
3429
3430	if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3431		return -ENOMEM;
3432
3433	err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3434	if (err)
3435		goto err_unlock;
3436
3437	mutex_lock(&tracing_cpumask_update_lock);
3438
3439	local_irq_disable();
3440	arch_spin_lock(&tr->max_lock);
3441	for_each_tracing_cpu(cpu) {
3442		/*
3443		 * Increase/decrease the disabled counter if we are
3444		 * about to flip a bit in the cpumask:
3445		 */
3446		if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3447				!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3448			atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3449			ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3450		}
3451		if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3452				cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3453			atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3454			ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3455		}
3456	}
3457	arch_spin_unlock(&tr->max_lock);
3458	local_irq_enable();
3459
3460	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3461
3462	mutex_unlock(&tracing_cpumask_update_lock);
3463	free_cpumask_var(tracing_cpumask_new);
3464
3465	return count;
3466
3467err_unlock:
3468	free_cpumask_var(tracing_cpumask_new);
3469
3470	return err;
3471}
3472
3473static const struct file_operations tracing_cpumask_fops = {
3474	.open		= tracing_open_generic_tr,
3475	.read		= tracing_cpumask_read,
3476	.write		= tracing_cpumask_write,
3477	.release	= tracing_release_generic_tr,
3478	.llseek		= generic_file_llseek,
3479};
3480
3481static int tracing_trace_options_show(struct seq_file *m, void *v)
3482{
3483	struct tracer_opt *trace_opts;
3484	struct trace_array *tr = m->private;
3485	u32 tracer_flags;
3486	int i;
3487
3488	mutex_lock(&trace_types_lock);
3489	tracer_flags = tr->current_trace->flags->val;
3490	trace_opts = tr->current_trace->flags->opts;
3491
3492	for (i = 0; trace_options[i]; i++) {
3493		if (tr->trace_flags & (1 << i))
3494			seq_printf(m, "%s\n", trace_options[i]);
3495		else
3496			seq_printf(m, "no%s\n", trace_options[i]);
3497	}
3498
3499	for (i = 0; trace_opts[i].name; i++) {
3500		if (tracer_flags & trace_opts[i].bit)
3501			seq_printf(m, "%s\n", trace_opts[i].name);
3502		else
3503			seq_printf(m, "no%s\n", trace_opts[i].name);
3504	}
3505	mutex_unlock(&trace_types_lock);
3506
3507	return 0;
3508}
3509
3510static int __set_tracer_option(struct trace_array *tr,
3511			       struct tracer_flags *tracer_flags,
3512			       struct tracer_opt *opts, int neg)
3513{
3514	struct tracer *trace = tracer_flags->trace;
3515	int ret;
3516
3517	ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3518	if (ret)
3519		return ret;
3520
3521	if (neg)
3522		tracer_flags->val &= ~opts->bit;
3523	else
3524		tracer_flags->val |= opts->bit;
3525	return 0;
3526}
3527
3528/* Try to assign a tracer specific option */
3529static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3530{
3531	struct tracer *trace = tr->current_trace;
3532	struct tracer_flags *tracer_flags = trace->flags;
3533	struct tracer_opt *opts = NULL;
3534	int i;
3535
3536	for (i = 0; tracer_flags->opts[i].name; i++) {
3537		opts = &tracer_flags->opts[i];
3538
3539		if (strcmp(cmp, opts->name) == 0)
3540			return __set_tracer_option(tr, trace->flags, opts, neg);
3541	}
3542
3543	return -EINVAL;
3544}
3545
3546/* Some tracers require overwrite to stay enabled */
3547int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3548{
3549	if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3550		return -1;
3551
3552	return 0;
3553}
3554
3555int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3556{
3557	/* do nothing if flag is already set */
3558	if (!!(tr->trace_flags & mask) == !!enabled)
3559		return 0;
3560
3561	/* Give the tracer a chance to approve the change */
3562	if (tr->current_trace->flag_changed)
3563		if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3564			return -EINVAL;
3565
3566	if (enabled)
3567		tr->trace_flags |= mask;
3568	else
3569		tr->trace_flags &= ~mask;
3570
3571	if (mask == TRACE_ITER_RECORD_CMD)
3572		trace_event_enable_cmd_record(enabled);
3573
3574	if (mask == TRACE_ITER_OVERWRITE) {
3575		ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3576#ifdef CONFIG_TRACER_MAX_TRACE
3577		ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3578#endif
3579	}
3580
3581	if (mask == TRACE_ITER_PRINTK) {
3582		trace_printk_start_stop_comm(enabled);
3583		trace_printk_control(enabled);
3584	}
3585
3586	return 0;
3587}
3588
3589static int trace_set_options(struct trace_array *tr, char *option)
3590{
3591	char *cmp;
3592	int neg = 0;
3593	int ret = -ENODEV;
3594	int i;
3595	size_t orig_len = strlen(option);
3596
3597	cmp = strstrip(option);
3598
3599	if (strncmp(cmp, "no", 2) == 0) {
3600		neg = 1;
3601		cmp += 2;
3602	}
3603
3604	mutex_lock(&trace_types_lock);
3605
3606	for (i = 0; trace_options[i]; i++) {
3607		if (strcmp(cmp, trace_options[i]) == 0) {
3608			ret = set_tracer_flag(tr, 1 << i, !neg);
3609			break;
3610		}
3611	}
3612
3613	/* If no option could be set, test the specific tracer options */
3614	if (!trace_options[i])
3615		ret = set_tracer_option(tr, cmp, neg);
3616
3617	mutex_unlock(&trace_types_lock);
3618
3619	/*
3620	 * If the first trailing whitespace is replaced with '\0' by strstrip,
3621	 * turn it back into a space.
3622	 */
3623	if (orig_len > strlen(option))
3624		option[strlen(option)] = ' ';
3625
3626	return ret;
3627}
3628
3629static void __init apply_trace_boot_options(void)
3630{
3631	char *buf = trace_boot_options_buf;
3632	char *option;
3633
3634	while (true) {
3635		option = strsep(&buf, ",");
3636
3637		if (!option)
3638			break;
3639
3640		if (*option)
3641			trace_set_options(&global_trace, option);
3642
3643		/* Put back the comma to allow this to be called again */
3644		if (buf)
3645			*(buf - 1) = ',';
3646	}
3647}
3648
3649static ssize_t
3650tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3651			size_t cnt, loff_t *ppos)
3652{
3653	struct seq_file *m = filp->private_data;
3654	struct trace_array *tr = m->private;
3655	char buf[64];
3656	int ret;
3657
3658	if (cnt >= sizeof(buf))
3659		return -EINVAL;
3660
3661	if (copy_from_user(&buf, ubuf, cnt))
3662		return -EFAULT;
3663
3664	buf[cnt] = 0;
3665
3666	ret = trace_set_options(tr, buf);
3667	if (ret < 0)
3668		return ret;
3669
3670	*ppos += cnt;
3671
3672	return cnt;
3673}
3674
3675static int tracing_trace_options_open(struct inode *inode, struct file *file)
3676{
3677	struct trace_array *tr = inode->i_private;
3678	int ret;
3679
3680	if (tracing_disabled)
3681		return -ENODEV;
3682
3683	if (trace_array_get(tr) < 0)
3684		return -ENODEV;
3685
3686	ret = single_open(file, tracing_trace_options_show, inode->i_private);
3687	if (ret < 0)
3688		trace_array_put(tr);
3689
3690	return ret;
3691}
3692
3693static const struct file_operations tracing_iter_fops = {
3694	.open		= tracing_trace_options_open,
3695	.read		= seq_read,
3696	.llseek		= seq_lseek,
3697	.release	= tracing_single_release_tr,
3698	.write		= tracing_trace_options_write,
3699};
3700
3701static const char readme_msg[] =
3702	"tracing mini-HOWTO:\n\n"
3703	"# echo 0 > tracing_on : quick way to disable tracing\n"
3704	"# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3705	" Important files:\n"
3706	"  trace\t\t\t- The static contents of the buffer\n"
3707	"\t\t\t  To clear the buffer write into this file: echo > trace\n"
3708	"  trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3709	"  current_tracer\t- function and latency tracers\n"
3710	"  available_tracers\t- list of configured tracers for current_tracer\n"
3711	"  buffer_size_kb\t- view and modify size of per cpu buffer\n"
3712	"  buffer_total_size_kb  - view total size of all cpu buffers\n\n"
3713	"  trace_clock\t\t-change the clock used to order events\n"
3714	"       local:   Per cpu clock but may not be synced across CPUs\n"
3715	"      global:   Synced across CPUs but slows tracing down.\n"
3716	"     counter:   Not a clock, but just an increment\n"
3717	"      uptime:   Jiffy counter from time of boot\n"
3718	"        perf:   Same clock that perf events use\n"
3719#ifdef CONFIG_X86_64
3720	"     x86-tsc:   TSC cycle counter\n"
3721#endif
3722	"\n  trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3723	"  tracing_cpumask\t- Limit which CPUs to trace\n"
3724	"  instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3725	"\t\t\t  Remove sub-buffer with rmdir\n"
3726	"  trace_options\t\t- Set format or modify how tracing happens\n"
3727	"\t\t\t  Disable an option by adding a suffix 'no' to the\n"
3728	"\t\t\t  option name\n"
3729	"  saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3730#ifdef CONFIG_DYNAMIC_FTRACE
3731	"\n  available_filter_functions - list of functions that can be filtered on\n"
3732	"  set_ftrace_filter\t- echo function name in here to only trace these\n"
3733	"\t\t\t  functions\n"
3734	"\t     accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3735	"\t     modules: Can select a group via module\n"
3736	"\t      Format: :mod:<module-name>\n"
3737	"\t     example: echo :mod:ext3 > set_ftrace_filter\n"
3738	"\t    triggers: a command to perform when function is hit\n"
3739	"\t      Format: <function>:<trigger>[:count]\n"
3740	"\t     trigger: traceon, traceoff\n"
3741	"\t\t      enable_event:<system>:<event>\n"
3742	"\t\t      disable_event:<system>:<event>\n"
3743#ifdef CONFIG_STACKTRACE
3744	"\t\t      stacktrace\n"
3745#endif
3746#ifdef CONFIG_TRACER_SNAPSHOT
3747	"\t\t      snapshot\n"
3748#endif
3749	"\t\t      dump\n"
3750	"\t\t      cpudump\n"
3751	"\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
3752	"\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
3753	"\t     The first one will disable tracing every time do_fault is hit\n"
3754	"\t     The second will disable tracing at most 3 times when do_trap is hit\n"
3755	"\t       The first time do trap is hit and it disables tracing, the\n"
3756	"\t       counter will decrement to 2. If tracing is already disabled,\n"
3757	"\t       the counter will not decrement. It only decrements when the\n"
3758	"\t       trigger did work\n"
3759	"\t     To remove trigger without count:\n"
3760	"\t       echo '!<function>:<trigger> > set_ftrace_filter\n"
3761	"\t     To remove trigger with a count:\n"
3762	"\t       echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3763	"  set_ftrace_notrace\t- echo function name in here to never trace.\n"
3764	"\t    accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3765	"\t    modules: Can select a group via module command :mod:\n"
3766	"\t    Does not accept triggers\n"
3767#endif /* CONFIG_DYNAMIC_FTRACE */
3768#ifdef CONFIG_FUNCTION_TRACER
3769	"  set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3770	"\t\t    (function)\n"
3771#endif
3772#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3773	"  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3774	"  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3775	"  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3776#endif
3777#ifdef CONFIG_TRACER_SNAPSHOT
3778	"\n  snapshot\t\t- Like 'trace' but shows the content of the static\n"
3779	"\t\t\t  snapshot buffer. Read the contents for more\n"
3780	"\t\t\t  information\n"
3781#endif
3782#ifdef CONFIG_STACK_TRACER
3783	"  stack_trace\t\t- Shows the max stack trace when active\n"
3784	"  stack_max_size\t- Shows current max stack size that was traced\n"
3785	"\t\t\t  Write into this file to reset the max size (trigger a\n"
3786	"\t\t\t  new trace)\n"
3787#ifdef CONFIG_DYNAMIC_FTRACE
3788	"  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3789	"\t\t\t  traces\n"
3790#endif
3791#endif /* CONFIG_STACK_TRACER */
3792	"  events/\t\t- Directory containing all trace event subsystems:\n"
3793	"      enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3794	"  events/<system>/\t- Directory containing all trace events for <system>:\n"
3795	"      enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3796	"\t\t\t  events\n"
3797	"      filter\t\t- If set, only events passing filter are traced\n"
3798	"  events/<system>/<event>/\t- Directory containing control files for\n"
3799	"\t\t\t  <event>:\n"
3800	"      enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3801	"      filter\t\t- If set, only events passing filter are traced\n"
3802	"      trigger\t\t- If set, a command to perform when event is hit\n"
3803	"\t    Format: <trigger>[:count][if <filter>]\n"
3804	"\t   trigger: traceon, traceoff\n"
3805	"\t            enable_event:<system>:<event>\n"
3806	"\t            disable_event:<system>:<event>\n"
3807#ifdef CONFIG_STACKTRACE
3808	"\t\t    stacktrace\n"
3809#endif
3810#ifdef CONFIG_TRACER_SNAPSHOT
3811	"\t\t    snapshot\n"
3812#endif
3813	"\t   example: echo traceoff > events/block/block_unplug/trigger\n"
3814	"\t            echo traceoff:3 > events/block/block_unplug/trigger\n"
3815	"\t            echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3816	"\t                  events/block/block_unplug/trigger\n"
3817	"\t   The first disables tracing every time block_unplug is hit.\n"
3818	"\t   The second disables tracing the first 3 times block_unplug is hit.\n"
3819	"\t   The third enables the kmalloc event the first 3 times block_unplug\n"
3820	"\t     is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3821	"\t   Like function triggers, the counter is only decremented if it\n"
3822	"\t    enabled or disabled tracing.\n"
3823	"\t   To remove a trigger without a count:\n"
3824	"\t     echo '!<trigger> > <system>/<event>/trigger\n"
3825	"\t   To remove a trigger with a count:\n"
3826	"\t     echo '!<trigger>:0 > <system>/<event>/trigger\n"
3827	"\t   Filters can be ignored when removing a trigger.\n"
3828;
3829
3830static ssize_t
3831tracing_readme_read(struct file *filp, char __user *ubuf,
3832		       size_t cnt, loff_t *ppos)
3833{
3834	return simple_read_from_buffer(ubuf, cnt, ppos,
3835					readme_msg, strlen(readme_msg));
3836}
3837
3838static const struct file_operations tracing_readme_fops = {
3839	.open		= tracing_open_generic,
3840	.read		= tracing_readme_read,
3841	.llseek		= generic_file_llseek,
3842};
3843
3844static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3845{
3846	unsigned int *ptr = v;
3847
3848	if (*pos || m->count)
3849		ptr++;
3850
3851	(*pos)++;
3852
3853	for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3854	     ptr++) {
3855		if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3856			continue;
3857
3858		return ptr;
3859	}
3860
3861	return NULL;
3862}
3863
3864static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3865{
3866	void *v;
3867	loff_t l = 0;
3868
3869	preempt_disable();
3870	arch_spin_lock(&trace_cmdline_lock);
3871
3872	v = &savedcmd->map_cmdline_to_pid[0];
3873	while (l <= *pos) {
3874		v = saved_cmdlines_next(m, v, &l);
3875		if (!v)
3876			return NULL;
3877	}
3878
3879	return v;
3880}
3881
3882static void saved_cmdlines_stop(struct seq_file *m, void *v)
3883{
3884	arch_spin_unlock(&trace_cmdline_lock);
3885	preempt_enable();
3886}
3887
3888static int saved_cmdlines_show(struct seq_file *m, void *v)
3889{
3890	char buf[TASK_COMM_LEN];
3891	unsigned int *pid = v;
3892
3893	__trace_find_cmdline(*pid, buf);
3894	seq_printf(m, "%d %s\n", *pid, buf);
3895	return 0;
3896}
3897
3898static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3899	.start		= saved_cmdlines_start,
3900	.next		= saved_cmdlines_next,
3901	.stop		= saved_cmdlines_stop,
3902	.show		= saved_cmdlines_show,
3903};
3904
3905static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3906{
3907	if (tracing_disabled)
3908		return -ENODEV;
3909
3910	return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3911}
3912
3913static const struct file_operations tracing_saved_cmdlines_fops = {
3914	.open		= tracing_saved_cmdlines_open,
3915	.read		= seq_read,
3916	.llseek		= seq_lseek,
3917	.release	= seq_release,
3918};
3919
3920static ssize_t
3921tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3922				 size_t cnt, loff_t *ppos)
3923{
3924	char buf[64];
3925	int r;
3926
3927	arch_spin_lock(&trace_cmdline_lock);
3928	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3929	arch_spin_unlock(&trace_cmdline_lock);
3930
3931	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3932}
3933
3934static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3935{
3936	kfree(s->saved_cmdlines);
3937	kfree(s->map_cmdline_to_pid);
3938	kfree(s);
3939}
3940
3941static int tracing_resize_saved_cmdlines(unsigned int val)
3942{
3943	struct saved_cmdlines_buffer *s, *savedcmd_temp;
3944
3945	s = kmalloc(sizeof(*s), GFP_KERNEL);
3946	if (!s)
3947		return -ENOMEM;
3948
3949	if (allocate_cmdlines_buffer(val, s) < 0) {
3950		kfree(s);
 
3951		return -ENOMEM;
3952	}
3953
3954	arch_spin_lock(&trace_cmdline_lock);
3955	savedcmd_temp = savedcmd;
3956	savedcmd = s;
3957	arch_spin_unlock(&trace_cmdline_lock);
3958	free_saved_cmdlines_buffer(savedcmd_temp);
3959
3960	return 0;
3961}
3962
3963static ssize_t
3964tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3965				  size_t cnt, loff_t *ppos)
3966{
3967	unsigned long val;
3968	int ret;
3969
3970	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3971	if (ret)
3972		return ret;
3973
3974	/* must have at least 1 entry or less than PID_MAX_DEFAULT */
3975	if (!val || val > PID_MAX_DEFAULT)
3976		return -EINVAL;
3977
3978	ret = tracing_resize_saved_cmdlines((unsigned int)val);
3979	if (ret < 0)
3980		return ret;
3981
3982	*ppos += cnt;
3983
3984	return cnt;
3985}
3986
3987static const struct file_operations tracing_saved_cmdlines_size_fops = {
3988	.open		= tracing_open_generic,
3989	.read		= tracing_saved_cmdlines_size_read,
3990	.write		= tracing_saved_cmdlines_size_write,
3991};
3992
3993#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3994static union trace_enum_map_item *
3995update_enum_map(union trace_enum_map_item *ptr)
3996{
3997	if (!ptr->map.enum_string) {
3998		if (ptr->tail.next) {
3999			ptr = ptr->tail.next;
4000			/* Set ptr to the next real item (skip head) */
4001			ptr++;
4002		} else
4003			return NULL;
4004	}
4005	return ptr;
4006}
4007
4008static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4009{
4010	union trace_enum_map_item *ptr = v;
4011
4012	/*
4013	 * Paranoid! If ptr points to end, we don't want to increment past it.
4014	 * This really should never happen.
4015	 */
4016	ptr = update_enum_map(ptr);
4017	if (WARN_ON_ONCE(!ptr))
4018		return NULL;
4019
4020	ptr++;
4021
4022	(*pos)++;
 
4023
4024	ptr = update_enum_map(ptr);
 
4025
4026	return ptr;
4027}
4028
4029static void *enum_map_start(struct seq_file *m, loff_t *pos)
4030{
4031	union trace_enum_map_item *v;
4032	loff_t l = 0;
4033
4034	mutex_lock(&trace_enum_mutex);
4035
4036	v = trace_enum_maps;
4037	if (v)
4038		v++;
4039
4040	while (v && l < *pos) {
4041		v = enum_map_next(m, v, &l);
4042	}
4043
4044	return v;
4045}
4046
4047static void enum_map_stop(struct seq_file *m, void *v)
4048{
4049	mutex_unlock(&trace_enum_mutex);
4050}
4051
4052static int enum_map_show(struct seq_file *m, void *v)
4053{
4054	union trace_enum_map_item *ptr = v;
4055
4056	seq_printf(m, "%s %ld (%s)\n",
4057		   ptr->map.enum_string, ptr->map.enum_value,
4058		   ptr->map.system);
4059
4060	return 0;
4061}
4062
4063static const struct seq_operations tracing_enum_map_seq_ops = {
4064	.start		= enum_map_start,
4065	.next		= enum_map_next,
4066	.stop		= enum_map_stop,
4067	.show		= enum_map_show,
4068};
4069
4070static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4071{
4072	if (tracing_disabled)
4073		return -ENODEV;
4074
4075	return seq_open(filp, &tracing_enum_map_seq_ops);
4076}
4077
4078static const struct file_operations tracing_enum_map_fops = {
4079	.open		= tracing_enum_map_open,
4080	.read		= seq_read,
4081	.llseek		= seq_lseek,
4082	.release	= seq_release,
4083};
4084
4085static inline union trace_enum_map_item *
4086trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4087{
4088	/* Return tail of array given the head */
4089	return ptr + ptr->head.length + 1;
4090}
4091
4092static void
4093trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4094			   int len)
4095{
4096	struct trace_enum_map **stop;
4097	struct trace_enum_map **map;
4098	union trace_enum_map_item *map_array;
4099	union trace_enum_map_item *ptr;
4100
4101	stop = start + len;
4102
4103	/*
4104	 * The trace_enum_maps contains the map plus a head and tail item,
4105	 * where the head holds the module and length of array, and the
4106	 * tail holds a pointer to the next list.
4107	 */
4108	map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4109	if (!map_array) {
4110		pr_warn("Unable to allocate trace enum mapping\n");
4111		return;
4112	}
4113
4114	mutex_lock(&trace_enum_mutex);
4115
4116	if (!trace_enum_maps)
4117		trace_enum_maps = map_array;
4118	else {
4119		ptr = trace_enum_maps;
4120		for (;;) {
4121			ptr = trace_enum_jmp_to_tail(ptr);
4122			if (!ptr->tail.next)
4123				break;
4124			ptr = ptr->tail.next;
4125
4126		}
4127		ptr->tail.next = map_array;
4128	}
4129	map_array->head.mod = mod;
4130	map_array->head.length = len;
4131	map_array++;
4132
4133	for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4134		map_array->map = **map;
4135		map_array++;
4136	}
4137	memset(map_array, 0, sizeof(*map_array));
4138
4139	mutex_unlock(&trace_enum_mutex);
4140}
4141
4142static void trace_create_enum_file(struct dentry *d_tracer)
4143{
4144	trace_create_file("enum_map", 0444, d_tracer,
4145			  NULL, &tracing_enum_map_fops);
4146}
4147
4148#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4149static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4150static inline void trace_insert_enum_map_file(struct module *mod,
4151			      struct trace_enum_map **start, int len) { }
4152#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4153
4154static void trace_insert_enum_map(struct module *mod,
4155				  struct trace_enum_map **start, int len)
4156{
4157	struct trace_enum_map **map;
4158
4159	if (len <= 0)
4160		return;
4161
4162	map = start;
4163
4164	trace_event_enum_update(map, len);
4165
4166	trace_insert_enum_map_file(mod, start, len);
4167}
4168
4169static ssize_t
4170tracing_set_trace_read(struct file *filp, char __user *ubuf,
4171		       size_t cnt, loff_t *ppos)
4172{
4173	struct trace_array *tr = filp->private_data;
4174	char buf[MAX_TRACER_SIZE+2];
4175	int r;
4176
4177	mutex_lock(&trace_types_lock);
4178	r = sprintf(buf, "%s\n", tr->current_trace->name);
4179	mutex_unlock(&trace_types_lock);
4180
4181	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4182}
4183
4184int tracer_init(struct tracer *t, struct trace_array *tr)
4185{
4186	tracing_reset_online_cpus(&tr->trace_buffer);
4187	return t->init(tr);
4188}
4189
4190static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4191{
4192	int cpu;
4193
4194	for_each_tracing_cpu(cpu)
4195		per_cpu_ptr(buf->data, cpu)->entries = val;
4196}
4197
4198#ifdef CONFIG_TRACER_MAX_TRACE
4199/* resize @tr's buffer to the size of @size_tr's entries */
4200static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4201					struct trace_buffer *size_buf, int cpu_id)
4202{
4203	int cpu, ret = 0;
4204
4205	if (cpu_id == RING_BUFFER_ALL_CPUS) {
4206		for_each_tracing_cpu(cpu) {
4207			ret = ring_buffer_resize(trace_buf->buffer,
4208				 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4209			if (ret < 0)
4210				break;
4211			per_cpu_ptr(trace_buf->data, cpu)->entries =
4212				per_cpu_ptr(size_buf->data, cpu)->entries;
4213		}
4214	} else {
4215		ret = ring_buffer_resize(trace_buf->buffer,
4216				 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4217		if (ret == 0)
4218			per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4219				per_cpu_ptr(size_buf->data, cpu_id)->entries;
4220	}
4221
4222	return ret;
4223}
4224#endif /* CONFIG_TRACER_MAX_TRACE */
4225
4226static int __tracing_resize_ring_buffer(struct trace_array *tr,
4227					unsigned long size, int cpu)
4228{
4229	int ret;
4230
4231	/*
4232	 * If kernel or user changes the size of the ring buffer
4233	 * we use the size that was given, and we can forget about
4234	 * expanding it later.
4235	 */
4236	ring_buffer_expanded = true;
4237
4238	/* May be called before buffers are initialized */
4239	if (!tr->trace_buffer.buffer)
4240		return 0;
4241
4242	ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4243	if (ret < 0)
4244		return ret;
4245
4246#ifdef CONFIG_TRACER_MAX_TRACE
4247	if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4248	    !tr->current_trace->use_max_tr)
4249		goto out;
4250
4251	ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4252	if (ret < 0) {
4253		int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4254						     &tr->trace_buffer, cpu);
4255		if (r < 0) {
4256			/*
4257			 * AARGH! We are left with different
4258			 * size max buffer!!!!
4259			 * The max buffer is our "snapshot" buffer.
4260			 * When a tracer needs a snapshot (one of the
4261			 * latency tracers), it swaps the max buffer
4262			 * with the saved snap shot. We succeeded to
4263			 * update the size of the main buffer, but failed to
4264			 * update the size of the max buffer. But when we tried
4265			 * to reset the main buffer to the original size, we
4266			 * failed there too. This is very unlikely to
4267			 * happen, but if it does, warn and kill all
4268			 * tracing.
4269			 */
4270			WARN_ON(1);
4271			tracing_disabled = 1;
4272		}
4273		return ret;
4274	}
4275
4276	if (cpu == RING_BUFFER_ALL_CPUS)
4277		set_buffer_entries(&tr->max_buffer, size);
4278	else
4279		per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4280
4281 out:
4282#endif /* CONFIG_TRACER_MAX_TRACE */
4283
4284	if (cpu == RING_BUFFER_ALL_CPUS)
4285		set_buffer_entries(&tr->trace_buffer, size);
4286	else
4287		per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4288
4289	return ret;
4290}
4291
4292static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4293					  unsigned long size, int cpu_id)
4294{
4295	int ret = size;
4296
4297	mutex_lock(&trace_types_lock);
4298
4299	if (cpu_id != RING_BUFFER_ALL_CPUS) {
4300		/* make sure, this cpu is enabled in the mask */
4301		if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4302			ret = -EINVAL;
4303			goto out;
4304		}
4305	}
4306
4307	ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4308	if (ret < 0)
4309		ret = -ENOMEM;
4310
4311out:
4312	mutex_unlock(&trace_types_lock);
4313
4314	return ret;
4315}
4316
4317
4318/**
4319 * tracing_update_buffers - used by tracing facility to expand ring buffers
4320 *
4321 * To save on memory when the tracing is never used on a system with it
4322 * configured in. The ring buffers are set to a minimum size. But once
4323 * a user starts to use the tracing facility, then they need to grow
4324 * to their default size.
4325 *
4326 * This function is to be called when a tracer is about to be used.
4327 */
4328int tracing_update_buffers(void)
4329{
4330	int ret = 0;
4331
4332	mutex_lock(&trace_types_lock);
4333	if (!ring_buffer_expanded)
4334		ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4335						RING_BUFFER_ALL_CPUS);
4336	mutex_unlock(&trace_types_lock);
4337
4338	return ret;
4339}
4340
4341struct trace_option_dentry;
4342
 
 
 
4343static void
4344create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4345
4346/*
4347 * Used to clear out the tracer before deletion of an instance.
4348 * Must have trace_types_lock held.
4349 */
4350static void tracing_set_nop(struct trace_array *tr)
4351{
4352	if (tr->current_trace == &nop_trace)
4353		return;
4354	
4355	tr->current_trace->enabled--;
4356
4357	if (tr->current_trace->reset)
4358		tr->current_trace->reset(tr);
4359
4360	tr->current_trace = &nop_trace;
4361}
4362
4363static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4364{
4365	/* Only enable if the directory has been created already. */
4366	if (!tr->dir)
4367		return;
4368
4369	create_trace_option_files(tr, t);
4370}
4371
4372static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4373{
 
4374	struct tracer *t;
4375#ifdef CONFIG_TRACER_MAX_TRACE
4376	bool had_max_tr;
4377#endif
4378	int ret = 0;
4379
4380	mutex_lock(&trace_types_lock);
4381
4382	if (!ring_buffer_expanded) {
4383		ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4384						RING_BUFFER_ALL_CPUS);
4385		if (ret < 0)
4386			goto out;
4387		ret = 0;
4388	}
4389
4390	for (t = trace_types; t; t = t->next) {
4391		if (strcmp(t->name, buf) == 0)
4392			break;
4393	}
4394	if (!t) {
4395		ret = -EINVAL;
4396		goto out;
4397	}
4398	if (t == tr->current_trace)
4399		goto out;
4400
4401	/* Some tracers are only allowed for the top level buffer */
4402	if (!trace_ok_for_array(t, tr)) {
4403		ret = -EINVAL;
4404		goto out;
4405	}
4406
4407	/* If trace pipe files are being read, we can't change the tracer */
4408	if (tr->current_trace->ref) {
4409		ret = -EBUSY;
4410		goto out;
4411	}
4412
4413	trace_branch_disable();
4414
4415	tr->current_trace->enabled--;
4416
4417	if (tr->current_trace->reset)
4418		tr->current_trace->reset(tr);
4419
4420	/* Current trace needs to be nop_trace before synchronize_sched */
4421	tr->current_trace = &nop_trace;
4422
4423#ifdef CONFIG_TRACER_MAX_TRACE
4424	had_max_tr = tr->allocated_snapshot;
4425
4426	if (had_max_tr && !t->use_max_tr) {
4427		/*
4428		 * We need to make sure that the update_max_tr sees that
4429		 * current_trace changed to nop_trace to keep it from
4430		 * swapping the buffers after we resize it.
4431		 * The update_max_tr is called from interrupts disabled
4432		 * so a synchronized_sched() is sufficient.
4433		 */
4434		synchronize_sched();
4435		free_snapshot(tr);
4436	}
4437#endif
 
 
 
 
 
4438
4439#ifdef CONFIG_TRACER_MAX_TRACE
4440	if (t->use_max_tr && !had_max_tr) {
4441		ret = alloc_snapshot(tr);
4442		if (ret < 0)
4443			goto out;
4444	}
4445#endif
4446
4447	if (t->init) {
4448		ret = tracer_init(t, tr);
4449		if (ret)
4450			goto out;
4451	}
4452
4453	tr->current_trace = t;
4454	tr->current_trace->enabled++;
4455	trace_branch_enable(tr);
4456 out:
4457	mutex_unlock(&trace_types_lock);
4458
4459	return ret;
4460}
4461
4462static ssize_t
4463tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4464			size_t cnt, loff_t *ppos)
4465{
4466	struct trace_array *tr = filp->private_data;
4467	char buf[MAX_TRACER_SIZE+1];
4468	int i;
4469	size_t ret;
4470	int err;
4471
4472	ret = cnt;
4473
4474	if (cnt > MAX_TRACER_SIZE)
4475		cnt = MAX_TRACER_SIZE;
4476
4477	if (copy_from_user(&buf, ubuf, cnt))
4478		return -EFAULT;
4479
4480	buf[cnt] = 0;
4481
4482	/* strip ending whitespace. */
4483	for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4484		buf[i] = 0;
4485
4486	err = tracing_set_tracer(tr, buf);
4487	if (err)
4488		return err;
4489
4490	*ppos += ret;
4491
4492	return ret;
4493}
4494
4495static ssize_t
4496tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4497		   size_t cnt, loff_t *ppos)
4498{
 
4499	char buf[64];
4500	int r;
4501
4502	r = snprintf(buf, sizeof(buf), "%ld\n",
4503		     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4504	if (r > sizeof(buf))
4505		r = sizeof(buf);
4506	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4507}
4508
4509static ssize_t
4510tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4511		    size_t cnt, loff_t *ppos)
4512{
 
4513	unsigned long val;
4514	int ret;
4515
4516	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4517	if (ret)
4518		return ret;
4519
4520	*ptr = val * 1000;
4521
4522	return cnt;
4523}
4524
4525static ssize_t
4526tracing_thresh_read(struct file *filp, char __user *ubuf,
4527		    size_t cnt, loff_t *ppos)
4528{
4529	return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4530}
4531
4532static ssize_t
4533tracing_thresh_write(struct file *filp, const char __user *ubuf,
4534		     size_t cnt, loff_t *ppos)
4535{
4536	struct trace_array *tr = filp->private_data;
4537	int ret;
4538
4539	mutex_lock(&trace_types_lock);
4540	ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4541	if (ret < 0)
4542		goto out;
4543
4544	if (tr->current_trace->update_thresh) {
4545		ret = tr->current_trace->update_thresh(tr);
4546		if (ret < 0)
4547			goto out;
4548	}
4549
4550	ret = cnt;
4551out:
4552	mutex_unlock(&trace_types_lock);
4553
4554	return ret;
4555}
4556
4557#ifdef CONFIG_TRACER_MAX_TRACE
4558
4559static ssize_t
4560tracing_max_lat_read(struct file *filp, char __user *ubuf,
4561		     size_t cnt, loff_t *ppos)
4562{
4563	return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4564}
4565
4566static ssize_t
4567tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4568		      size_t cnt, loff_t *ppos)
4569{
4570	return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4571}
4572
4573#endif
4574
4575static int tracing_open_pipe(struct inode *inode, struct file *filp)
4576{
4577	struct trace_array *tr = inode->i_private;
4578	struct trace_iterator *iter;
4579	int ret = 0;
4580
4581	if (tracing_disabled)
4582		return -ENODEV;
4583
4584	if (trace_array_get(tr) < 0)
4585		return -ENODEV;
4586
4587	mutex_lock(&trace_types_lock);
4588
4589	/* create a buffer to store the information to pass to userspace */
4590	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4591	if (!iter) {
4592		ret = -ENOMEM;
4593		__trace_array_put(tr);
4594		goto out;
4595	}
4596
4597	trace_seq_init(&iter->seq);
4598	iter->trace = tr->current_trace;
 
 
 
 
 
 
 
 
4599
4600	if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4601		ret = -ENOMEM;
4602		goto fail;
4603	}
4604
4605	/* trace pipe does not show start of buffer */
4606	cpumask_setall(iter->started);
4607
4608	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4609		iter->iter_flags |= TRACE_FILE_LAT_FMT;
4610
4611	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
4612	if (trace_clocks[tr->clock_id].in_ns)
4613		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4614
4615	iter->tr = tr;
4616	iter->trace_buffer = &tr->trace_buffer;
4617	iter->cpu_file = tracing_get_cpu(inode);
4618	mutex_init(&iter->mutex);
4619	filp->private_data = iter;
4620
4621	if (iter->trace->pipe_open)
4622		iter->trace->pipe_open(iter);
4623
4624	nonseekable_open(inode, filp);
4625
4626	tr->current_trace->ref++;
4627out:
4628	mutex_unlock(&trace_types_lock);
4629	return ret;
4630
4631fail:
4632	kfree(iter->trace);
4633	kfree(iter);
4634	__trace_array_put(tr);
4635	mutex_unlock(&trace_types_lock);
4636	return ret;
4637}
4638
4639static int tracing_release_pipe(struct inode *inode, struct file *file)
4640{
4641	struct trace_iterator *iter = file->private_data;
4642	struct trace_array *tr = inode->i_private;
4643
4644	mutex_lock(&trace_types_lock);
4645
4646	tr->current_trace->ref--;
4647
4648	if (iter->trace->pipe_close)
4649		iter->trace->pipe_close(iter);
4650
4651	mutex_unlock(&trace_types_lock);
4652
4653	free_cpumask_var(iter->started);
4654	mutex_destroy(&iter->mutex);
 
4655	kfree(iter);
4656
4657	trace_array_put(tr);
4658
4659	return 0;
4660}
4661
4662static unsigned int
4663trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4664{
4665	struct trace_array *tr = iter->tr;
4666
4667	/* Iterators are static, they should be filled or empty */
4668	if (trace_buffer_iter(iter, iter->cpu_file))
4669		return POLLIN | POLLRDNORM;
4670
4671	if (tr->trace_flags & TRACE_ITER_BLOCK)
4672		/*
4673		 * Always select as readable when in blocking mode
4674		 */
4675		return POLLIN | POLLRDNORM;
4676	else
4677		return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4678					     filp, poll_table);
4679}
4680
4681static unsigned int
4682tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4683{
4684	struct trace_iterator *iter = filp->private_data;
4685
4686	return trace_poll(iter, filp, poll_table);
4687}
4688
4689/* Must be called with iter->mutex held. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4690static int tracing_wait_pipe(struct file *filp)
4691{
4692	struct trace_iterator *iter = filp->private_data;
4693	int ret;
4694
4695	while (trace_empty(iter)) {
4696
4697		if ((filp->f_flags & O_NONBLOCK)) {
4698			return -EAGAIN;
4699		}
4700
 
 
 
 
 
 
 
 
 
4701		/*
4702		 * We block until we read something and tracing is disabled.
4703		 * We still block if tracing is disabled, but we have never
4704		 * read anything. This allows a user to cat this file, and
4705		 * then enable tracing. But after we have read something,
4706		 * we give an EOF when tracing is again disabled.
4707		 *
4708		 * iter->pos will be 0 if we haven't read anything.
4709		 */
4710		if (!tracing_is_on() && iter->pos)
4711			break;
4712
4713		mutex_unlock(&iter->mutex);
4714
4715		ret = wait_on_pipe(iter, false);
4716
4717		mutex_lock(&iter->mutex);
4718
4719		if (ret)
4720			return ret;
4721	}
4722
4723	return 1;
4724}
4725
4726/*
4727 * Consumer reader.
4728 */
4729static ssize_t
4730tracing_read_pipe(struct file *filp, char __user *ubuf,
4731		  size_t cnt, loff_t *ppos)
4732{
4733	struct trace_iterator *iter = filp->private_data;
 
4734	ssize_t sret;
4735
4736	/* return any leftover data */
4737	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4738	if (sret != -EBUSY)
4739		return sret;
4740
4741	trace_seq_init(&iter->seq);
4742
 
 
 
 
 
 
4743	/*
4744	 * Avoid more than one consumer on a single file descriptor
4745	 * This is just a matter of traces coherency, the ring buffer itself
4746	 * is protected.
4747	 */
4748	mutex_lock(&iter->mutex);
4749	if (iter->trace->read) {
4750		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4751		if (sret)
4752			goto out;
4753	}
4754
4755waitagain:
4756	sret = tracing_wait_pipe(filp);
4757	if (sret <= 0)
4758		goto out;
4759
4760	/* stop when tracing is finished */
4761	if (trace_empty(iter)) {
4762		sret = 0;
4763		goto out;
4764	}
4765
4766	if (cnt >= PAGE_SIZE)
4767		cnt = PAGE_SIZE - 1;
4768
4769	/* reset all but tr, trace, and overruns */
4770	memset(&iter->seq, 0,
4771	       sizeof(struct trace_iterator) -
4772	       offsetof(struct trace_iterator, seq));
4773	cpumask_clear(iter->started);
4774	iter->pos = -1;
4775
4776	trace_event_read_lock();
4777	trace_access_lock(iter->cpu_file);
4778	while (trace_find_next_entry_inc(iter) != NULL) {
4779		enum print_line_t ret;
4780		int save_len = iter->seq.seq.len;
4781
4782		ret = print_trace_line(iter);
4783		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4784			/* don't print partial lines */
4785			iter->seq.seq.len = save_len;
4786			break;
4787		}
4788		if (ret != TRACE_TYPE_NO_CONSUME)
4789			trace_consume(iter);
4790
4791		if (trace_seq_used(&iter->seq) >= cnt)
4792			break;
4793
4794		/*
4795		 * Setting the full flag means we reached the trace_seq buffer
4796		 * size and we should leave by partial output condition above.
4797		 * One of the trace_seq_* functions is not used properly.
4798		 */
4799		WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4800			  iter->ent->type);
4801	}
4802	trace_access_unlock(iter->cpu_file);
4803	trace_event_read_unlock();
4804
4805	/* Now copy what we have to the user */
4806	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4807	if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4808		trace_seq_init(&iter->seq);
4809
4810	/*
4811	 * If there was nothing to send to user, in spite of consuming trace
4812	 * entries, go back to wait for more entries.
4813	 */
4814	if (sret == -EBUSY)
4815		goto waitagain;
4816
4817out:
4818	mutex_unlock(&iter->mutex);
4819
4820	return sret;
4821}
4822
4823static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4824				     unsigned int idx)
4825{
4826	__free_page(spd->pages[idx]);
4827}
4828
4829static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4830	.can_merge		= 0,
4831	.confirm		= generic_pipe_buf_confirm,
4832	.release		= generic_pipe_buf_release,
4833	.steal			= generic_pipe_buf_steal,
4834	.get			= generic_pipe_buf_get,
4835};
4836
4837static size_t
4838tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4839{
4840	size_t count;
4841	int save_len;
4842	int ret;
4843
4844	/* Seq buffer is page-sized, exactly what we need. */
4845	for (;;) {
4846		save_len = iter->seq.seq.len;
4847		ret = print_trace_line(iter);
4848
4849		if (trace_seq_has_overflowed(&iter->seq)) {
4850			iter->seq.seq.len = save_len;
 
4851			break;
4852		}
4853
4854		/*
4855		 * This should not be hit, because it should only
4856		 * be set if the iter->seq overflowed. But check it
4857		 * anyway to be safe.
4858		 */
4859		if (ret == TRACE_TYPE_PARTIAL_LINE) {
4860			iter->seq.seq.len = save_len;
4861			break;
4862		}
4863
4864		count = trace_seq_used(&iter->seq) - save_len;
4865		if (rem < count) {
4866			rem = 0;
4867			iter->seq.seq.len = save_len;
4868			break;
4869		}
4870
4871		if (ret != TRACE_TYPE_NO_CONSUME)
4872			trace_consume(iter);
4873		rem -= count;
4874		if (!trace_find_next_entry_inc(iter))	{
4875			rem = 0;
4876			iter->ent = NULL;
4877			break;
4878		}
4879	}
4880
4881	return rem;
4882}
4883
4884static ssize_t tracing_splice_read_pipe(struct file *filp,
4885					loff_t *ppos,
4886					struct pipe_inode_info *pipe,
4887					size_t len,
4888					unsigned int flags)
4889{
4890	struct page *pages_def[PIPE_DEF_BUFFERS];
4891	struct partial_page partial_def[PIPE_DEF_BUFFERS];
4892	struct trace_iterator *iter = filp->private_data;
4893	struct splice_pipe_desc spd = {
4894		.pages		= pages_def,
4895		.partial	= partial_def,
4896		.nr_pages	= 0, /* This gets updated below. */
4897		.nr_pages_max	= PIPE_DEF_BUFFERS,
4898		.flags		= flags,
4899		.ops		= &tracing_pipe_buf_ops,
4900		.spd_release	= tracing_spd_release_pipe,
4901	};
 
4902	ssize_t ret;
4903	size_t rem;
4904	unsigned int i;
4905
4906	if (splice_grow_spd(pipe, &spd))
4907		return -ENOMEM;
4908
 
 
 
 
 
 
4909	mutex_lock(&iter->mutex);
4910
4911	if (iter->trace->splice_read) {
4912		ret = iter->trace->splice_read(iter, filp,
4913					       ppos, pipe, len, flags);
4914		if (ret)
4915			goto out_err;
4916	}
4917
4918	ret = tracing_wait_pipe(filp);
4919	if (ret <= 0)
4920		goto out_err;
4921
4922	if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4923		ret = -EFAULT;
4924		goto out_err;
4925	}
4926
4927	trace_event_read_lock();
4928	trace_access_lock(iter->cpu_file);
4929
4930	/* Fill as many pages as possible. */
4931	for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4932		spd.pages[i] = alloc_page(GFP_KERNEL);
4933		if (!spd.pages[i])
4934			break;
4935
4936		rem = tracing_fill_pipe_page(rem, iter);
4937
4938		/* Copy the data into the page, so we can start over. */
4939		ret = trace_seq_to_buffer(&iter->seq,
4940					  page_address(spd.pages[i]),
4941					  trace_seq_used(&iter->seq));
4942		if (ret < 0) {
4943			__free_page(spd.pages[i]);
4944			break;
4945		}
4946		spd.partial[i].offset = 0;
4947		spd.partial[i].len = trace_seq_used(&iter->seq);
4948
4949		trace_seq_init(&iter->seq);
4950	}
4951
4952	trace_access_unlock(iter->cpu_file);
4953	trace_event_read_unlock();
4954	mutex_unlock(&iter->mutex);
4955
4956	spd.nr_pages = i;
4957
4958	if (i)
4959		ret = splice_to_pipe(pipe, &spd);
4960	else
4961		ret = 0;
4962out:
4963	splice_shrink_spd(&spd);
4964	return ret;
4965
4966out_err:
4967	mutex_unlock(&iter->mutex);
4968	goto out;
4969}
4970
4971static ssize_t
4972tracing_entries_read(struct file *filp, char __user *ubuf,
4973		     size_t cnt, loff_t *ppos)
4974{
4975	struct inode *inode = file_inode(filp);
4976	struct trace_array *tr = inode->i_private;
4977	int cpu = tracing_get_cpu(inode);
4978	char buf[64];
4979	int r = 0;
4980	ssize_t ret;
4981
4982	mutex_lock(&trace_types_lock);
4983
4984	if (cpu == RING_BUFFER_ALL_CPUS) {
4985		int cpu, buf_size_same;
4986		unsigned long size;
4987
4988		size = 0;
4989		buf_size_same = 1;
4990		/* check if all cpu sizes are same */
4991		for_each_tracing_cpu(cpu) {
4992			/* fill in the size from first enabled cpu */
4993			if (size == 0)
4994				size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4995			if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4996				buf_size_same = 0;
4997				break;
4998			}
4999		}
5000
5001		if (buf_size_same) {
5002			if (!ring_buffer_expanded)
5003				r = sprintf(buf, "%lu (expanded: %lu)\n",
5004					    size >> 10,
5005					    trace_buf_size >> 10);
5006			else
5007				r = sprintf(buf, "%lu\n", size >> 10);
5008		} else
5009			r = sprintf(buf, "X\n");
5010	} else
5011		r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5012
5013	mutex_unlock(&trace_types_lock);
5014
5015	ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5016	return ret;
5017}
5018
5019static ssize_t
5020tracing_entries_write(struct file *filp, const char __user *ubuf,
5021		      size_t cnt, loff_t *ppos)
5022{
5023	struct inode *inode = file_inode(filp);
5024	struct trace_array *tr = inode->i_private;
5025	unsigned long val;
5026	int ret;
5027
5028	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5029	if (ret)
5030		return ret;
5031
5032	/* must have at least 1 entry */
5033	if (!val)
5034		return -EINVAL;
5035
5036	/* value is in KB */
5037	val <<= 10;
5038	ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5039	if (ret < 0)
5040		return ret;
5041
5042	*ppos += cnt;
5043
5044	return cnt;
5045}
5046
5047static ssize_t
5048tracing_total_entries_read(struct file *filp, char __user *ubuf,
5049				size_t cnt, loff_t *ppos)
5050{
5051	struct trace_array *tr = filp->private_data;
5052	char buf[64];
5053	int r, cpu;
5054	unsigned long size = 0, expanded_size = 0;
5055
5056	mutex_lock(&trace_types_lock);
5057	for_each_tracing_cpu(cpu) {
5058		size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5059		if (!ring_buffer_expanded)
5060			expanded_size += trace_buf_size >> 10;
5061	}
5062	if (ring_buffer_expanded)
5063		r = sprintf(buf, "%lu\n", size);
5064	else
5065		r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5066	mutex_unlock(&trace_types_lock);
5067
5068	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5069}
5070
5071static ssize_t
5072tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5073			  size_t cnt, loff_t *ppos)
5074{
5075	/*
5076	 * There is no need to read what the user has written, this function
5077	 * is just to make sure that there is no error when "echo" is used
5078	 */
5079
5080	*ppos += cnt;
5081
5082	return cnt;
5083}
5084
5085static int
5086tracing_free_buffer_release(struct inode *inode, struct file *filp)
5087{
5088	struct trace_array *tr = inode->i_private;
5089
5090	/* disable tracing ? */
5091	if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5092		tracer_tracing_off(tr);
5093	/* resize the ring buffer to 0 */
5094	tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5095
5096	trace_array_put(tr);
5097
5098	return 0;
5099}
5100
5101static ssize_t
5102tracing_mark_write(struct file *filp, const char __user *ubuf,
5103					size_t cnt, loff_t *fpos)
5104{
5105	unsigned long addr = (unsigned long)ubuf;
5106	struct trace_array *tr = filp->private_data;
5107	struct ring_buffer_event *event;
5108	struct ring_buffer *buffer;
5109	struct print_entry *entry;
5110	unsigned long irq_flags;
5111	struct page *pages[2];
5112	void *map_page[2];
5113	int nr_pages = 1;
5114	ssize_t written;
5115	int offset;
5116	int size;
5117	int len;
5118	int ret;
5119	int i;
5120
5121	if (tracing_disabled)
5122		return -EINVAL;
5123
5124	if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5125		return -EINVAL;
5126
5127	if (cnt > TRACE_BUF_SIZE)
5128		cnt = TRACE_BUF_SIZE;
5129
5130	/*
5131	 * Userspace is injecting traces into the kernel trace buffer.
5132	 * We want to be as non intrusive as possible.
5133	 * To do so, we do not want to allocate any special buffers
5134	 * or take any locks, but instead write the userspace data
5135	 * straight into the ring buffer.
5136	 *
5137	 * First we need to pin the userspace buffer into memory,
5138	 * which, most likely it is, because it just referenced it.
5139	 * But there's no guarantee that it is. By using get_user_pages_fast()
5140	 * and kmap_atomic/kunmap_atomic() we can get access to the
5141	 * pages directly. We then write the data directly into the
5142	 * ring buffer.
5143	 */
5144	BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5145
5146	/* check if we cross pages */
5147	if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5148		nr_pages = 2;
5149
5150	offset = addr & (PAGE_SIZE - 1);
5151	addr &= PAGE_MASK;
5152
5153	ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5154	if (ret < nr_pages) {
5155		while (--ret >= 0)
5156			put_page(pages[ret]);
5157		written = -EFAULT;
5158		goto out;
5159	}
5160
5161	for (i = 0; i < nr_pages; i++)
5162		map_page[i] = kmap_atomic(pages[i]);
5163
5164	local_save_flags(irq_flags);
5165	size = sizeof(*entry) + cnt + 2; /* possible \n added */
5166	buffer = tr->trace_buffer.buffer;
5167	event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5168					  irq_flags, preempt_count());
5169	if (!event) {
5170		/* Ring buffer disabled, return as if not open for write */
5171		written = -EBADF;
5172		goto out_unlock;
5173	}
5174
5175	entry = ring_buffer_event_data(event);
5176	entry->ip = _THIS_IP_;
5177
5178	if (nr_pages == 2) {
5179		len = PAGE_SIZE - offset;
5180		memcpy(&entry->buf, map_page[0] + offset, len);
5181		memcpy(&entry->buf[len], map_page[1], cnt - len);
5182	} else
5183		memcpy(&entry->buf, map_page[0] + offset, cnt);
5184
5185	if (entry->buf[cnt - 1] != '\n') {
5186		entry->buf[cnt] = '\n';
5187		entry->buf[cnt + 1] = '\0';
5188	} else
5189		entry->buf[cnt] = '\0';
5190
5191	__buffer_unlock_commit(buffer, event);
5192
5193	written = cnt;
5194
5195	*fpos += written;
5196
5197 out_unlock:
5198	for (i = nr_pages - 1; i >= 0; i--) {
5199		kunmap_atomic(map_page[i]);
5200		put_page(pages[i]);
5201	}
5202 out:
5203	return written;
5204}
5205
5206static int tracing_clock_show(struct seq_file *m, void *v)
5207{
5208	struct trace_array *tr = m->private;
5209	int i;
5210
5211	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5212		seq_printf(m,
5213			"%s%s%s%s", i ? " " : "",
5214			i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5215			i == tr->clock_id ? "]" : "");
5216	seq_putc(m, '\n');
5217
5218	return 0;
5219}
5220
5221static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5222{
5223	int i;
5224
5225	for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5226		if (strcmp(trace_clocks[i].name, clockstr) == 0)
5227			break;
5228	}
5229	if (i == ARRAY_SIZE(trace_clocks))
5230		return -EINVAL;
5231
5232	mutex_lock(&trace_types_lock);
5233
5234	tr->clock_id = i;
5235
5236	ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5237
5238	/*
5239	 * New clock may not be consistent with the previous clock.
5240	 * Reset the buffer so that it doesn't have incomparable timestamps.
5241	 */
5242	tracing_reset_online_cpus(&tr->trace_buffer);
5243
5244#ifdef CONFIG_TRACER_MAX_TRACE
5245	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5246		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5247	tracing_reset_online_cpus(&tr->max_buffer);
5248#endif
5249
5250	mutex_unlock(&trace_types_lock);
5251
5252	return 0;
5253}
5254
5255static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5256				   size_t cnt, loff_t *fpos)
5257{
5258	struct seq_file *m = filp->private_data;
5259	struct trace_array *tr = m->private;
5260	char buf[64];
5261	const char *clockstr;
5262	int ret;
5263
5264	if (cnt >= sizeof(buf))
5265		return -EINVAL;
5266
5267	if (copy_from_user(&buf, ubuf, cnt))
5268		return -EFAULT;
5269
5270	buf[cnt] = 0;
5271
5272	clockstr = strstrip(buf);
5273
5274	ret = tracing_set_clock(tr, clockstr);
5275	if (ret)
5276		return ret;
5277
5278	*fpos += cnt;
5279
5280	return cnt;
5281}
5282
5283static int tracing_clock_open(struct inode *inode, struct file *file)
5284{
5285	struct trace_array *tr = inode->i_private;
5286	int ret;
5287
5288	if (tracing_disabled)
5289		return -ENODEV;
5290
5291	if (trace_array_get(tr))
5292		return -ENODEV;
5293
5294	ret = single_open(file, tracing_clock_show, inode->i_private);
5295	if (ret < 0)
5296		trace_array_put(tr);
5297
5298	return ret;
5299}
5300
5301struct ftrace_buffer_info {
5302	struct trace_iterator	iter;
5303	void			*spare;
5304	unsigned int		read;
5305};
5306
5307#ifdef CONFIG_TRACER_SNAPSHOT
5308static int tracing_snapshot_open(struct inode *inode, struct file *file)
5309{
5310	struct trace_array *tr = inode->i_private;
5311	struct trace_iterator *iter;
5312	struct seq_file *m;
5313	int ret = 0;
5314
5315	if (trace_array_get(tr) < 0)
5316		return -ENODEV;
5317
5318	if (file->f_mode & FMODE_READ) {
5319		iter = __tracing_open(inode, file, true);
5320		if (IS_ERR(iter))
5321			ret = PTR_ERR(iter);
5322	} else {
5323		/* Writes still need the seq_file to hold the private data */
5324		ret = -ENOMEM;
5325		m = kzalloc(sizeof(*m), GFP_KERNEL);
5326		if (!m)
5327			goto out;
5328		iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5329		if (!iter) {
5330			kfree(m);
5331			goto out;
5332		}
5333		ret = 0;
5334
5335		iter->tr = tr;
5336		iter->trace_buffer = &tr->max_buffer;
5337		iter->cpu_file = tracing_get_cpu(inode);
5338		m->private = iter;
5339		file->private_data = m;
5340	}
5341out:
5342	if (ret < 0)
5343		trace_array_put(tr);
5344
5345	return ret;
5346}
5347
5348static ssize_t
5349tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5350		       loff_t *ppos)
5351{
5352	struct seq_file *m = filp->private_data;
5353	struct trace_iterator *iter = m->private;
5354	struct trace_array *tr = iter->tr;
5355	unsigned long val;
5356	int ret;
5357
5358	ret = tracing_update_buffers();
5359	if (ret < 0)
5360		return ret;
5361
5362	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5363	if (ret)
5364		return ret;
5365
5366	mutex_lock(&trace_types_lock);
5367
5368	if (tr->current_trace->use_max_tr) {
5369		ret = -EBUSY;
5370		goto out;
5371	}
5372
5373	switch (val) {
5374	case 0:
5375		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5376			ret = -EINVAL;
5377			break;
5378		}
5379		if (tr->allocated_snapshot)
5380			free_snapshot(tr);
5381		break;
5382	case 1:
5383/* Only allow per-cpu swap if the ring buffer supports it */
5384#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5385		if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5386			ret = -EINVAL;
5387			break;
5388		}
5389#endif
5390		if (!tr->allocated_snapshot) {
5391			ret = alloc_snapshot(tr);
5392			if (ret < 0)
5393				break;
5394		}
5395		local_irq_disable();
5396		/* Now, we're going to swap */
5397		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5398			update_max_tr(tr, current, smp_processor_id());
5399		else
5400			update_max_tr_single(tr, current, iter->cpu_file);
5401		local_irq_enable();
5402		break;
5403	default:
5404		if (tr->allocated_snapshot) {
5405			if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5406				tracing_reset_online_cpus(&tr->max_buffer);
5407			else
5408				tracing_reset(&tr->max_buffer, iter->cpu_file);
5409		}
5410		break;
5411	}
5412
5413	if (ret >= 0) {
5414		*ppos += cnt;
5415		ret = cnt;
5416	}
5417out:
5418	mutex_unlock(&trace_types_lock);
5419	return ret;
5420}
5421
5422static int tracing_snapshot_release(struct inode *inode, struct file *file)
5423{
5424	struct seq_file *m = file->private_data;
5425	int ret;
5426
5427	ret = tracing_release(inode, file);
5428
5429	if (file->f_mode & FMODE_READ)
5430		return ret;
5431
5432	/* If write only, the seq_file is just a stub */
5433	if (m)
5434		kfree(m->private);
5435	kfree(m);
5436
5437	return 0;
5438}
5439
5440static int tracing_buffers_open(struct inode *inode, struct file *filp);
5441static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5442				    size_t count, loff_t *ppos);
5443static int tracing_buffers_release(struct inode *inode, struct file *file);
5444static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5445		   struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5446
5447static int snapshot_raw_open(struct inode *inode, struct file *filp)
5448{
5449	struct ftrace_buffer_info *info;
5450	int ret;
5451
5452	ret = tracing_buffers_open(inode, filp);
5453	if (ret < 0)
5454		return ret;
5455
5456	info = filp->private_data;
5457
5458	if (info->iter.trace->use_max_tr) {
5459		tracing_buffers_release(inode, filp);
5460		return -EBUSY;
5461	}
5462
5463	info->iter.snapshot = true;
5464	info->iter.trace_buffer = &info->iter.tr->max_buffer;
5465
5466	return ret;
5467}
5468
5469#endif /* CONFIG_TRACER_SNAPSHOT */
5470
5471
5472static const struct file_operations tracing_thresh_fops = {
5473	.open		= tracing_open_generic,
5474	.read		= tracing_thresh_read,
5475	.write		= tracing_thresh_write,
5476	.llseek		= generic_file_llseek,
5477};
5478
5479#ifdef CONFIG_TRACER_MAX_TRACE
5480static const struct file_operations tracing_max_lat_fops = {
5481	.open		= tracing_open_generic,
5482	.read		= tracing_max_lat_read,
5483	.write		= tracing_max_lat_write,
5484	.llseek		= generic_file_llseek,
5485};
5486#endif
5487
5488static const struct file_operations set_tracer_fops = {
5489	.open		= tracing_open_generic,
5490	.read		= tracing_set_trace_read,
5491	.write		= tracing_set_trace_write,
5492	.llseek		= generic_file_llseek,
5493};
5494
5495static const struct file_operations tracing_pipe_fops = {
5496	.open		= tracing_open_pipe,
5497	.poll		= tracing_poll_pipe,
5498	.read		= tracing_read_pipe,
5499	.splice_read	= tracing_splice_read_pipe,
5500	.release	= tracing_release_pipe,
5501	.llseek		= no_llseek,
5502};
5503
5504static const struct file_operations tracing_entries_fops = {
5505	.open		= tracing_open_generic_tr,
5506	.read		= tracing_entries_read,
5507	.write		= tracing_entries_write,
5508	.llseek		= generic_file_llseek,
5509	.release	= tracing_release_generic_tr,
5510};
5511
5512static const struct file_operations tracing_total_entries_fops = {
5513	.open		= tracing_open_generic_tr,
5514	.read		= tracing_total_entries_read,
5515	.llseek		= generic_file_llseek,
5516	.release	= tracing_release_generic_tr,
5517};
5518
5519static const struct file_operations tracing_free_buffer_fops = {
5520	.open		= tracing_open_generic_tr,
5521	.write		= tracing_free_buffer_write,
5522	.release	= tracing_free_buffer_release,
5523};
5524
5525static const struct file_operations tracing_mark_fops = {
5526	.open		= tracing_open_generic_tr,
5527	.write		= tracing_mark_write,
5528	.llseek		= generic_file_llseek,
5529	.release	= tracing_release_generic_tr,
5530};
5531
5532static const struct file_operations trace_clock_fops = {
5533	.open		= tracing_clock_open,
5534	.read		= seq_read,
5535	.llseek		= seq_lseek,
5536	.release	= tracing_single_release_tr,
5537	.write		= tracing_clock_write,
5538};
5539
5540#ifdef CONFIG_TRACER_SNAPSHOT
5541static const struct file_operations snapshot_fops = {
5542	.open		= tracing_snapshot_open,
5543	.read		= seq_read,
5544	.write		= tracing_snapshot_write,
5545	.llseek		= tracing_lseek,
5546	.release	= tracing_snapshot_release,
5547};
5548
5549static const struct file_operations snapshot_raw_fops = {
5550	.open		= snapshot_raw_open,
5551	.read		= tracing_buffers_read,
5552	.release	= tracing_buffers_release,
5553	.splice_read	= tracing_buffers_splice_read,
5554	.llseek		= no_llseek,
5555};
5556
5557#endif /* CONFIG_TRACER_SNAPSHOT */
5558
5559static int tracing_buffers_open(struct inode *inode, struct file *filp)
5560{
5561	struct trace_array *tr = inode->i_private;
5562	struct ftrace_buffer_info *info;
5563	int ret;
5564
5565	if (tracing_disabled)
5566		return -ENODEV;
5567
5568	if (trace_array_get(tr) < 0)
5569		return -ENODEV;
5570
5571	info = kzalloc(sizeof(*info), GFP_KERNEL);
5572	if (!info) {
5573		trace_array_put(tr);
5574		return -ENOMEM;
5575	}
5576
5577	mutex_lock(&trace_types_lock);
5578
5579	info->iter.tr		= tr;
5580	info->iter.cpu_file	= tracing_get_cpu(inode);
5581	info->iter.trace	= tr->current_trace;
5582	info->iter.trace_buffer = &tr->trace_buffer;
5583	info->spare		= NULL;
5584	/* Force reading ring buffer for first read */
5585	info->read		= (unsigned int)-1;
5586
5587	filp->private_data = info;
5588
5589	tr->current_trace->ref++;
5590
5591	mutex_unlock(&trace_types_lock);
5592
5593	ret = nonseekable_open(inode, filp);
5594	if (ret < 0)
5595		trace_array_put(tr);
5596
5597	return ret;
5598}
5599
5600static unsigned int
5601tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5602{
5603	struct ftrace_buffer_info *info = filp->private_data;
5604	struct trace_iterator *iter = &info->iter;
5605
5606	return trace_poll(iter, filp, poll_table);
5607}
5608
5609static ssize_t
5610tracing_buffers_read(struct file *filp, char __user *ubuf,
5611		     size_t count, loff_t *ppos)
5612{
5613	struct ftrace_buffer_info *info = filp->private_data;
5614	struct trace_iterator *iter = &info->iter;
5615	ssize_t ret;
5616	ssize_t size;
5617
5618	if (!count)
5619		return 0;
5620
 
 
5621#ifdef CONFIG_TRACER_MAX_TRACE
5622	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5623		return -EBUSY;
 
 
5624#endif
5625
5626	if (!info->spare)
5627		info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5628							  iter->cpu_file);
 
5629	if (!info->spare)
5630		return -ENOMEM;
5631
5632	/* Do we have previous read data to read? */
5633	if (info->read < PAGE_SIZE)
5634		goto read;
5635
5636 again:
5637	trace_access_lock(iter->cpu_file);
5638	ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5639				    &info->spare,
5640				    count,
5641				    iter->cpu_file, 0);
5642	trace_access_unlock(iter->cpu_file);
5643
5644	if (ret < 0) {
5645		if (trace_empty(iter)) {
5646			if ((filp->f_flags & O_NONBLOCK))
5647				return -EAGAIN;
5648
5649			ret = wait_on_pipe(iter, false);
5650			if (ret)
5651				return ret;
5652
 
 
 
 
5653			goto again;
5654		}
5655		return 0;
 
5656	}
5657
5658	info->read = 0;
5659 read:
5660	size = PAGE_SIZE - info->read;
5661	if (size > count)
5662		size = count;
5663
5664	ret = copy_to_user(ubuf, info->spare + info->read, size);
5665	if (ret == size)
5666		return -EFAULT;
5667
 
5668	size -= ret;
5669
5670	*ppos += size;
5671	info->read += size;
5672
 
 
 
5673	return size;
5674}
5675
5676static int tracing_buffers_release(struct inode *inode, struct file *file)
5677{
5678	struct ftrace_buffer_info *info = file->private_data;
5679	struct trace_iterator *iter = &info->iter;
5680
5681	mutex_lock(&trace_types_lock);
5682
5683	iter->tr->current_trace->ref--;
5684
5685	__trace_array_put(iter->tr);
5686
5687	if (info->spare)
5688		ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5689	kfree(info);
5690
5691	mutex_unlock(&trace_types_lock);
5692
5693	return 0;
5694}
5695
5696struct buffer_ref {
5697	struct ring_buffer	*buffer;
5698	void			*page;
5699	int			ref;
5700};
5701
5702static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5703				    struct pipe_buffer *buf)
5704{
5705	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5706
5707	if (--ref->ref)
5708		return;
5709
5710	ring_buffer_free_read_page(ref->buffer, ref->page);
5711	kfree(ref);
5712	buf->private = 0;
5713}
5714
5715static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5716				struct pipe_buffer *buf)
5717{
5718	struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5719
5720	ref->ref++;
5721}
5722
5723/* Pipe buffer operations for a buffer. */
5724static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5725	.can_merge		= 0,
5726	.confirm		= generic_pipe_buf_confirm,
5727	.release		= buffer_pipe_buf_release,
5728	.steal			= generic_pipe_buf_steal,
5729	.get			= buffer_pipe_buf_get,
5730};
5731
5732/*
5733 * Callback from splice_to_pipe(), if we need to release some pages
5734 * at the end of the spd in case we error'ed out in filling the pipe.
5735 */
5736static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5737{
5738	struct buffer_ref *ref =
5739		(struct buffer_ref *)spd->partial[i].private;
5740
5741	if (--ref->ref)
5742		return;
5743
5744	ring_buffer_free_read_page(ref->buffer, ref->page);
5745	kfree(ref);
5746	spd->partial[i].private = 0;
5747}
5748
5749static ssize_t
5750tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5751			    struct pipe_inode_info *pipe, size_t len,
5752			    unsigned int flags)
5753{
5754	struct ftrace_buffer_info *info = file->private_data;
5755	struct trace_iterator *iter = &info->iter;
5756	struct partial_page partial_def[PIPE_DEF_BUFFERS];
5757	struct page *pages_def[PIPE_DEF_BUFFERS];
5758	struct splice_pipe_desc spd = {
5759		.pages		= pages_def,
5760		.partial	= partial_def,
5761		.nr_pages_max	= PIPE_DEF_BUFFERS,
5762		.flags		= flags,
5763		.ops		= &buffer_pipe_buf_ops,
5764		.spd_release	= buffer_spd_release,
5765	};
5766	struct buffer_ref *ref;
5767	int entries, size, i;
5768	ssize_t ret = 0;
 
 
5769
5770#ifdef CONFIG_TRACER_MAX_TRACE
5771	if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5772		return -EBUSY;
 
 
5773#endif
5774
5775	if (splice_grow_spd(pipe, &spd))
5776		return -ENOMEM;
 
 
5777
5778	if (*ppos & (PAGE_SIZE - 1))
5779		return -EINVAL;
 
 
5780
5781	if (len & (PAGE_SIZE - 1)) {
5782		if (len < PAGE_SIZE)
5783			return -EINVAL;
 
 
5784		len &= PAGE_MASK;
5785	}
5786
5787 again:
5788	trace_access_lock(iter->cpu_file);
5789	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5790
5791	for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5792		struct page *page;
5793		int r;
5794
5795		ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5796		if (!ref) {
5797			ret = -ENOMEM;
5798			break;
5799		}
5800
5801		ref->ref = 1;
5802		ref->buffer = iter->trace_buffer->buffer;
5803		ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5804		if (!ref->page) {
5805			ret = -ENOMEM;
5806			kfree(ref);
5807			break;
5808		}
5809
5810		r = ring_buffer_read_page(ref->buffer, &ref->page,
5811					  len, iter->cpu_file, 1);
5812		if (r < 0) {
5813			ring_buffer_free_read_page(ref->buffer, ref->page);
5814			kfree(ref);
5815			break;
5816		}
5817
5818		/*
5819		 * zero out any left over data, this is going to
5820		 * user land.
5821		 */
5822		size = ring_buffer_page_len(ref->page);
5823		if (size < PAGE_SIZE)
5824			memset(ref->page + size, 0, PAGE_SIZE - size);
5825
5826		page = virt_to_page(ref->page);
5827
5828		spd.pages[i] = page;
5829		spd.partial[i].len = PAGE_SIZE;
5830		spd.partial[i].offset = 0;
5831		spd.partial[i].private = (unsigned long)ref;
5832		spd.nr_pages++;
5833		*ppos += PAGE_SIZE;
5834
5835		entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5836	}
5837
5838	trace_access_unlock(iter->cpu_file);
5839	spd.nr_pages = i;
5840
5841	/* did we read anything? */
5842	if (!spd.nr_pages) {
5843		if (ret)
5844			return ret;
5845
5846		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5847			return -EAGAIN;
5848
5849		ret = wait_on_pipe(iter, true);
5850		if (ret)
5851			return ret;
5852
 
5853		goto again;
5854	}
5855
5856	ret = splice_to_pipe(pipe, &spd);
5857	splice_shrink_spd(&spd);
 
 
5858
5859	return ret;
5860}
5861
5862static const struct file_operations tracing_buffers_fops = {
5863	.open		= tracing_buffers_open,
5864	.read		= tracing_buffers_read,
5865	.poll		= tracing_buffers_poll,
5866	.release	= tracing_buffers_release,
5867	.splice_read	= tracing_buffers_splice_read,
5868	.llseek		= no_llseek,
5869};
5870
5871static ssize_t
5872tracing_stats_read(struct file *filp, char __user *ubuf,
5873		   size_t count, loff_t *ppos)
5874{
5875	struct inode *inode = file_inode(filp);
5876	struct trace_array *tr = inode->i_private;
5877	struct trace_buffer *trace_buf = &tr->trace_buffer;
5878	int cpu = tracing_get_cpu(inode);
5879	struct trace_seq *s;
5880	unsigned long cnt;
5881	unsigned long long t;
5882	unsigned long usec_rem;
5883
5884	s = kmalloc(sizeof(*s), GFP_KERNEL);
5885	if (!s)
5886		return -ENOMEM;
5887
5888	trace_seq_init(s);
5889
5890	cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5891	trace_seq_printf(s, "entries: %ld\n", cnt);
5892
5893	cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5894	trace_seq_printf(s, "overrun: %ld\n", cnt);
5895
5896	cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5897	trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5898
5899	cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5900	trace_seq_printf(s, "bytes: %ld\n", cnt);
5901
5902	if (trace_clocks[tr->clock_id].in_ns) {
5903		/* local or global for trace_clock */
5904		t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5905		usec_rem = do_div(t, USEC_PER_SEC);
5906		trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5907								t, usec_rem);
5908
5909		t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5910		usec_rem = do_div(t, USEC_PER_SEC);
5911		trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5912	} else {
5913		/* counter or tsc mode for trace_clock */
5914		trace_seq_printf(s, "oldest event ts: %llu\n",
5915				ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5916
5917		trace_seq_printf(s, "now ts: %llu\n",
5918				ring_buffer_time_stamp(trace_buf->buffer, cpu));
5919	}
5920
5921	cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5922	trace_seq_printf(s, "dropped events: %ld\n", cnt);
5923
5924	cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5925	trace_seq_printf(s, "read events: %ld\n", cnt);
5926
5927	count = simple_read_from_buffer(ubuf, count, ppos,
5928					s->buffer, trace_seq_used(s));
5929
5930	kfree(s);
5931
5932	return count;
5933}
5934
5935static const struct file_operations tracing_stats_fops = {
5936	.open		= tracing_open_generic_tr,
5937	.read		= tracing_stats_read,
5938	.llseek		= generic_file_llseek,
5939	.release	= tracing_release_generic_tr,
5940};
5941
5942#ifdef CONFIG_DYNAMIC_FTRACE
5943
5944int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5945{
5946	return 0;
5947}
5948
5949static ssize_t
5950tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5951		  size_t cnt, loff_t *ppos)
5952{
5953	static char ftrace_dyn_info_buffer[1024];
5954	static DEFINE_MUTEX(dyn_info_mutex);
5955	unsigned long *p = filp->private_data;
5956	char *buf = ftrace_dyn_info_buffer;
5957	int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5958	int r;
5959
5960	mutex_lock(&dyn_info_mutex);
5961	r = sprintf(buf, "%ld ", *p);
5962
5963	r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5964	buf[r++] = '\n';
5965
5966	r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5967
5968	mutex_unlock(&dyn_info_mutex);
5969
5970	return r;
5971}
5972
5973static const struct file_operations tracing_dyn_info_fops = {
5974	.open		= tracing_open_generic,
5975	.read		= tracing_read_dyn_info,
5976	.llseek		= generic_file_llseek,
5977};
5978#endif /* CONFIG_DYNAMIC_FTRACE */
5979
5980#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5981static void
5982ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5983{
5984	tracing_snapshot();
5985}
5986
5987static void
5988ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5989{
5990	unsigned long *count = (long *)data;
5991
5992	if (!*count)
5993		return;
5994
5995	if (*count != -1)
5996		(*count)--;
5997
5998	tracing_snapshot();
5999}
6000
6001static int
6002ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6003		      struct ftrace_probe_ops *ops, void *data)
6004{
6005	long count = (long)data;
6006
6007	seq_printf(m, "%ps:", (void *)ip);
6008
6009	seq_puts(m, "snapshot");
6010
6011	if (count == -1)
6012		seq_puts(m, ":unlimited\n");
6013	else
6014		seq_printf(m, ":count=%ld\n", count);
6015
6016	return 0;
6017}
6018
6019static struct ftrace_probe_ops snapshot_probe_ops = {
6020	.func			= ftrace_snapshot,
6021	.print			= ftrace_snapshot_print,
6022};
6023
6024static struct ftrace_probe_ops snapshot_count_probe_ops = {
6025	.func			= ftrace_count_snapshot,
6026	.print			= ftrace_snapshot_print,
6027};
6028
6029static int
6030ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6031			       char *glob, char *cmd, char *param, int enable)
6032{
6033	struct ftrace_probe_ops *ops;
6034	void *count = (void *)-1;
6035	char *number;
6036	int ret;
6037
6038	/* hash funcs only work with set_ftrace_filter */
6039	if (!enable)
6040		return -EINVAL;
6041
6042	ops = param ? &snapshot_count_probe_ops :  &snapshot_probe_ops;
6043
6044	if (glob[0] == '!') {
6045		unregister_ftrace_function_probe_func(glob+1, ops);
6046		return 0;
6047	}
6048
6049	if (!param)
6050		goto out_reg;
6051
6052	number = strsep(&param, ":");
6053
6054	if (!strlen(number))
6055		goto out_reg;
6056
6057	/*
6058	 * We use the callback data field (which is a pointer)
6059	 * as our counter.
6060	 */
6061	ret = kstrtoul(number, 0, (unsigned long *)&count);
6062	if (ret)
6063		return ret;
6064
6065 out_reg:
6066	ret = register_ftrace_function_probe(glob, ops, count);
6067
6068	if (ret >= 0)
6069		alloc_snapshot(&global_trace);
6070
6071	return ret < 0 ? ret : 0;
6072}
6073
6074static struct ftrace_func_command ftrace_snapshot_cmd = {
6075	.name			= "snapshot",
6076	.func			= ftrace_trace_snapshot_callback,
6077};
6078
6079static __init int register_snapshot_cmd(void)
6080{
6081	return register_ftrace_command(&ftrace_snapshot_cmd);
6082}
6083#else
6084static inline __init int register_snapshot_cmd(void) { return 0; }
6085#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6086
6087static struct dentry *tracing_get_dentry(struct trace_array *tr)
6088{
6089	if (WARN_ON(!tr->dir))
6090		return ERR_PTR(-ENODEV);
 
 
 
6091
6092	/* Top directory uses NULL as the parent */
6093	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6094		return NULL;
 
 
 
6095
6096	/* All sub buffers have a descriptor */
6097	return tr->dir;
6098}
6099
 
 
 
 
 
6100static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6101{
6102	struct dentry *d_tracer;
6103
6104	if (tr->percpu_dir)
6105		return tr->percpu_dir;
6106
6107	d_tracer = tracing_get_dentry(tr);
6108	if (IS_ERR(d_tracer))
6109		return NULL;
6110
6111	tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6112
6113	WARN_ONCE(!tr->percpu_dir,
6114		  "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6115
6116	return tr->percpu_dir;
6117}
6118
6119static struct dentry *
6120trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6121		      void *data, long cpu, const struct file_operations *fops)
6122{
6123	struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6124
6125	if (ret) /* See tracing_get_cpu() */
6126		d_inode(ret)->i_cdev = (void *)(cpu + 1);
6127	return ret;
6128}
6129
6130static void
6131tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6132{
6133	struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6134	struct dentry *d_cpu;
6135	char cpu_dir[30]; /* 30 characters should be more than enough */
6136
6137	if (!d_percpu)
6138		return;
6139
6140	snprintf(cpu_dir, 30, "cpu%ld", cpu);
6141	d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6142	if (!d_cpu) {
6143		pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
6144		return;
6145	}
6146
6147	/* per cpu trace_pipe */
6148	trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6149				tr, cpu, &tracing_pipe_fops);
6150
6151	/* per cpu trace */
6152	trace_create_cpu_file("trace", 0644, d_cpu,
6153				tr, cpu, &tracing_fops);
6154
6155	trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6156				tr, cpu, &tracing_buffers_fops);
6157
6158	trace_create_cpu_file("stats", 0444, d_cpu,
6159				tr, cpu, &tracing_stats_fops);
6160
6161	trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6162				tr, cpu, &tracing_entries_fops);
6163
6164#ifdef CONFIG_TRACER_SNAPSHOT
6165	trace_create_cpu_file("snapshot", 0644, d_cpu,
6166				tr, cpu, &snapshot_fops);
6167
6168	trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6169				tr, cpu, &snapshot_raw_fops);
6170#endif
6171}
6172
6173#ifdef CONFIG_FTRACE_SELFTEST
6174/* Let selftest have access to static functions in this file */
6175#include "trace_selftest.c"
6176#endif
6177
 
 
 
 
 
 
 
6178static ssize_t
6179trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6180			loff_t *ppos)
6181{
6182	struct trace_option_dentry *topt = filp->private_data;
6183	char *buf;
6184
6185	if (topt->flags->val & topt->opt->bit)
6186		buf = "1\n";
6187	else
6188		buf = "0\n";
6189
6190	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6191}
6192
6193static ssize_t
6194trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6195			 loff_t *ppos)
6196{
6197	struct trace_option_dentry *topt = filp->private_data;
6198	unsigned long val;
6199	int ret;
6200
6201	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6202	if (ret)
6203		return ret;
6204
6205	if (val != 0 && val != 1)
6206		return -EINVAL;
6207
6208	if (!!(topt->flags->val & topt->opt->bit) != val) {
6209		mutex_lock(&trace_types_lock);
6210		ret = __set_tracer_option(topt->tr, topt->flags,
6211					  topt->opt, !val);
6212		mutex_unlock(&trace_types_lock);
6213		if (ret)
6214			return ret;
6215	}
6216
6217	*ppos += cnt;
6218
6219	return cnt;
6220}
6221
6222
6223static const struct file_operations trace_options_fops = {
6224	.open = tracing_open_generic,
6225	.read = trace_options_read,
6226	.write = trace_options_write,
6227	.llseek	= generic_file_llseek,
6228};
6229
6230/*
6231 * In order to pass in both the trace_array descriptor as well as the index
6232 * to the flag that the trace option file represents, the trace_array
6233 * has a character array of trace_flags_index[], which holds the index
6234 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6235 * The address of this character array is passed to the flag option file
6236 * read/write callbacks.
6237 *
6238 * In order to extract both the index and the trace_array descriptor,
6239 * get_tr_index() uses the following algorithm.
6240 *
6241 *   idx = *ptr;
6242 *
6243 * As the pointer itself contains the address of the index (remember
6244 * index[1] == 1).
6245 *
6246 * Then to get the trace_array descriptor, by subtracting that index
6247 * from the ptr, we get to the start of the index itself.
6248 *
6249 *   ptr - idx == &index[0]
6250 *
6251 * Then a simple container_of() from that pointer gets us to the
6252 * trace_array descriptor.
6253 */
6254static void get_tr_index(void *data, struct trace_array **ptr,
6255			 unsigned int *pindex)
6256{
6257	*pindex = *(unsigned char *)data;
6258
6259	*ptr = container_of(data - *pindex, struct trace_array,
6260			    trace_flags_index);
6261}
6262
6263static ssize_t
6264trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6265			loff_t *ppos)
6266{
6267	void *tr_index = filp->private_data;
6268	struct trace_array *tr;
6269	unsigned int index;
6270	char *buf;
6271
6272	get_tr_index(tr_index, &tr, &index);
6273
6274	if (tr->trace_flags & (1 << index))
6275		buf = "1\n";
6276	else
6277		buf = "0\n";
6278
6279	return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6280}
6281
6282static ssize_t
6283trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6284			 loff_t *ppos)
6285{
6286	void *tr_index = filp->private_data;
6287	struct trace_array *tr;
6288	unsigned int index;
6289	unsigned long val;
6290	int ret;
6291
6292	get_tr_index(tr_index, &tr, &index);
6293
6294	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6295	if (ret)
6296		return ret;
6297
6298	if (val != 0 && val != 1)
6299		return -EINVAL;
6300
6301	mutex_lock(&trace_types_lock);
6302	ret = set_tracer_flag(tr, 1 << index, val);
6303	mutex_unlock(&trace_types_lock);
6304
6305	if (ret < 0)
6306		return ret;
6307
6308	*ppos += cnt;
6309
6310	return cnt;
6311}
6312
6313static const struct file_operations trace_options_core_fops = {
6314	.open = tracing_open_generic,
6315	.read = trace_options_core_read,
6316	.write = trace_options_core_write,
6317	.llseek = generic_file_llseek,
6318};
6319
6320struct dentry *trace_create_file(const char *name,
6321				 umode_t mode,
6322				 struct dentry *parent,
6323				 void *data,
6324				 const struct file_operations *fops)
6325{
6326	struct dentry *ret;
6327
6328	ret = tracefs_create_file(name, mode, parent, data, fops);
6329	if (!ret)
6330		pr_warn("Could not create tracefs '%s' entry\n", name);
6331
6332	return ret;
6333}
6334
6335
6336static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6337{
6338	struct dentry *d_tracer;
6339
6340	if (tr->options)
6341		return tr->options;
6342
6343	d_tracer = tracing_get_dentry(tr);
6344	if (IS_ERR(d_tracer))
6345		return NULL;
6346
6347	tr->options = tracefs_create_dir("options", d_tracer);
6348	if (!tr->options) {
6349		pr_warn("Could not create tracefs directory 'options'\n");
6350		return NULL;
6351	}
6352
6353	return tr->options;
6354}
6355
6356static void
6357create_trace_option_file(struct trace_array *tr,
6358			 struct trace_option_dentry *topt,
6359			 struct tracer_flags *flags,
6360			 struct tracer_opt *opt)
6361{
6362	struct dentry *t_options;
6363
6364	t_options = trace_options_init_dentry(tr);
6365	if (!t_options)
6366		return;
6367
6368	topt->flags = flags;
6369	topt->opt = opt;
6370	topt->tr = tr;
6371
6372	topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6373				    &trace_options_fops);
6374
6375}
6376
6377static void
6378create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6379{
6380	struct trace_option_dentry *topts;
6381	struct trace_options *tr_topts;
6382	struct tracer_flags *flags;
6383	struct tracer_opt *opts;
6384	int cnt;
6385	int i;
6386
6387	if (!tracer)
6388		return;
6389
6390	flags = tracer->flags;
6391
6392	if (!flags || !flags->opts)
6393		return;
6394
6395	/*
6396	 * If this is an instance, only create flags for tracers
6397	 * the instance may have.
6398	 */
6399	if (!trace_ok_for_array(tracer, tr))
6400		return;
6401
6402	for (i = 0; i < tr->nr_topts; i++) {
6403		/* Make sure there's no duplicate flags. */
6404		if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
6405			return;
6406	}
6407
6408	opts = flags->opts;
6409
6410	for (cnt = 0; opts[cnt].name; cnt++)
6411		;
6412
6413	topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6414	if (!topts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6415		return;
6416
6417	tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6418			    GFP_KERNEL);
6419	if (!tr_topts) {
6420		kfree(topts);
6421		return;
6422	}
6423
6424	tr->topts = tr_topts;
6425	tr->topts[tr->nr_topts].tracer = tracer;
6426	tr->topts[tr->nr_topts].topts = topts;
6427	tr->nr_topts++;
6428
6429	for (cnt = 0; opts[cnt].name; cnt++) {
6430		create_trace_option_file(tr, &topts[cnt], flags,
6431					 &opts[cnt]);
6432		WARN_ONCE(topts[cnt].entry == NULL,
6433			  "Failed to create trace option: %s",
6434			  opts[cnt].name);
6435	}
6436}
6437
6438static struct dentry *
6439create_trace_option_core_file(struct trace_array *tr,
6440			      const char *option, long index)
6441{
6442	struct dentry *t_options;
6443
6444	t_options = trace_options_init_dentry(tr);
6445	if (!t_options)
6446		return NULL;
6447
6448	return trace_create_file(option, 0644, t_options,
6449				 (void *)&tr->trace_flags_index[index],
6450				 &trace_options_core_fops);
6451}
6452
6453static void create_trace_options_dir(struct trace_array *tr)
6454{
6455	struct dentry *t_options;
6456	bool top_level = tr == &global_trace;
6457	int i;
6458
6459	t_options = trace_options_init_dentry(tr);
6460	if (!t_options)
6461		return;
6462
6463	for (i = 0; trace_options[i]; i++) {
6464		if (top_level ||
6465		    !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6466			create_trace_option_core_file(tr, trace_options[i], i);
6467	}
6468}
6469
6470static ssize_t
6471rb_simple_read(struct file *filp, char __user *ubuf,
6472	       size_t cnt, loff_t *ppos)
6473{
6474	struct trace_array *tr = filp->private_data;
6475	char buf[64];
6476	int r;
6477
6478	r = tracer_tracing_is_on(tr);
6479	r = sprintf(buf, "%d\n", r);
6480
6481	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6482}
6483
6484static ssize_t
6485rb_simple_write(struct file *filp, const char __user *ubuf,
6486		size_t cnt, loff_t *ppos)
6487{
6488	struct trace_array *tr = filp->private_data;
6489	struct ring_buffer *buffer = tr->trace_buffer.buffer;
6490	unsigned long val;
6491	int ret;
6492
6493	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6494	if (ret)
6495		return ret;
6496
6497	if (buffer) {
6498		mutex_lock(&trace_types_lock);
6499		if (val) {
6500			tracer_tracing_on(tr);
6501			if (tr->current_trace->start)
6502				tr->current_trace->start(tr);
6503		} else {
6504			tracer_tracing_off(tr);
6505			if (tr->current_trace->stop)
6506				tr->current_trace->stop(tr);
6507		}
6508		mutex_unlock(&trace_types_lock);
6509	}
6510
6511	(*ppos)++;
6512
6513	return cnt;
6514}
6515
6516static const struct file_operations rb_simple_fops = {
6517	.open		= tracing_open_generic_tr,
6518	.read		= rb_simple_read,
6519	.write		= rb_simple_write,
6520	.release	= tracing_release_generic_tr,
6521	.llseek		= default_llseek,
6522};
6523
6524struct dentry *trace_instance_dir;
6525
6526static void
6527init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6528
6529static int
6530allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6531{
6532	enum ring_buffer_flags rb_flags;
6533
6534	rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6535
6536	buf->tr = tr;
6537
6538	buf->buffer = ring_buffer_alloc(size, rb_flags);
6539	if (!buf->buffer)
6540		return -ENOMEM;
6541
6542	buf->data = alloc_percpu(struct trace_array_cpu);
6543	if (!buf->data) {
6544		ring_buffer_free(buf->buffer);
6545		return -ENOMEM;
6546	}
6547
6548	/* Allocate the first page for all buffers */
6549	set_buffer_entries(&tr->trace_buffer,
6550			   ring_buffer_size(tr->trace_buffer.buffer, 0));
6551
6552	return 0;
6553}
6554
6555static int allocate_trace_buffers(struct trace_array *tr, int size)
6556{
6557	int ret;
6558
6559	ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6560	if (ret)
6561		return ret;
6562
6563#ifdef CONFIG_TRACER_MAX_TRACE
6564	ret = allocate_trace_buffer(tr, &tr->max_buffer,
6565				    allocate_snapshot ? size : 1);
6566	if (WARN_ON(ret)) {
6567		ring_buffer_free(tr->trace_buffer.buffer);
6568		free_percpu(tr->trace_buffer.data);
6569		return -ENOMEM;
6570	}
6571	tr->allocated_snapshot = allocate_snapshot;
6572
6573	/*
6574	 * Only the top level trace array gets its snapshot allocated
6575	 * from the kernel command line.
6576	 */
6577	allocate_snapshot = false;
6578#endif
6579	return 0;
6580}
6581
6582static void free_trace_buffer(struct trace_buffer *buf)
6583{
6584	if (buf->buffer) {
6585		ring_buffer_free(buf->buffer);
6586		buf->buffer = NULL;
6587		free_percpu(buf->data);
6588		buf->data = NULL;
6589	}
6590}
6591
6592static void free_trace_buffers(struct trace_array *tr)
6593{
6594	if (!tr)
6595		return;
6596
6597	free_trace_buffer(&tr->trace_buffer);
6598
6599#ifdef CONFIG_TRACER_MAX_TRACE
6600	free_trace_buffer(&tr->max_buffer);
6601#endif
6602}
6603
6604static void init_trace_flags_index(struct trace_array *tr)
6605{
6606	int i;
6607
6608	/* Used by the trace options files */
6609	for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6610		tr->trace_flags_index[i] = i;
6611}
6612
6613static void __update_tracer_options(struct trace_array *tr)
6614{
6615	struct tracer *t;
6616
6617	for (t = trace_types; t; t = t->next)
6618		add_tracer_options(tr, t);
6619}
6620
6621static void update_tracer_options(struct trace_array *tr)
6622{
6623	mutex_lock(&trace_types_lock);
6624	__update_tracer_options(tr);
6625	mutex_unlock(&trace_types_lock);
6626}
6627
6628static int instance_mkdir(const char *name)
6629{
6630	struct trace_array *tr;
6631	int ret;
6632
6633	mutex_lock(&trace_types_lock);
6634
6635	ret = -EEXIST;
6636	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6637		if (tr->name && strcmp(tr->name, name) == 0)
6638			goto out_unlock;
6639	}
6640
6641	ret = -ENOMEM;
6642	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6643	if (!tr)
6644		goto out_unlock;
6645
6646	tr->name = kstrdup(name, GFP_KERNEL);
6647	if (!tr->name)
6648		goto out_free_tr;
6649
6650	if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6651		goto out_free_tr;
6652
6653	tr->trace_flags = global_trace.trace_flags;
6654
6655	cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6656
6657	raw_spin_lock_init(&tr->start_lock);
6658
6659	tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6660
6661	tr->current_trace = &nop_trace;
6662
6663	INIT_LIST_HEAD(&tr->systems);
6664	INIT_LIST_HEAD(&tr->events);
6665
6666	if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6667		goto out_free_tr;
6668
6669	tr->dir = tracefs_create_dir(name, trace_instance_dir);
6670	if (!tr->dir)
6671		goto out_free_tr;
6672
6673	ret = event_trace_add_tracer(tr->dir, tr);
6674	if (ret) {
6675		tracefs_remove_recursive(tr->dir);
6676		goto out_free_tr;
6677	}
6678
6679	init_tracer_tracefs(tr, tr->dir);
6680	init_trace_flags_index(tr);
6681	__update_tracer_options(tr);
6682
6683	list_add(&tr->list, &ftrace_trace_arrays);
6684
6685	mutex_unlock(&trace_types_lock);
6686
6687	return 0;
6688
6689 out_free_tr:
6690	free_trace_buffers(tr);
 
6691	free_cpumask_var(tr->tracing_cpumask);
6692	kfree(tr->name);
6693	kfree(tr);
6694
6695 out_unlock:
6696	mutex_unlock(&trace_types_lock);
6697
6698	return ret;
6699
6700}
6701
6702static int instance_rmdir(const char *name)
6703{
6704	struct trace_array *tr;
6705	int found = 0;
6706	int ret;
6707	int i;
6708
6709	mutex_lock(&trace_types_lock);
6710
6711	ret = -ENODEV;
6712	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6713		if (tr->name && strcmp(tr->name, name) == 0) {
6714			found = 1;
6715			break;
6716		}
6717	}
6718	if (!found)
6719		goto out_unlock;
6720
6721	ret = -EBUSY;
6722	if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6723		goto out_unlock;
6724
6725	list_del(&tr->list);
6726
6727	tracing_set_nop(tr);
6728	event_trace_del_tracer(tr);
6729	ftrace_destroy_function_files(tr);
6730	tracefs_remove_recursive(tr->dir);
6731	free_trace_buffers(tr);
6732
6733	for (i = 0; i < tr->nr_topts; i++) {
6734		kfree(tr->topts[i].topts);
6735	}
6736	kfree(tr->topts);
6737
6738	kfree(tr->name);
6739	kfree(tr);
6740
6741	ret = 0;
6742
6743 out_unlock:
6744	mutex_unlock(&trace_types_lock);
6745
6746	return ret;
6747}
6748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6749static __init void create_trace_instances(struct dentry *d_tracer)
6750{
6751	trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6752							 instance_mkdir,
6753							 instance_rmdir);
6754	if (WARN_ON(!trace_instance_dir))
6755		return;
 
 
 
6756}
6757
6758static void
6759init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6760{
6761	int cpu;
6762
6763	trace_create_file("available_tracers", 0444, d_tracer,
6764			tr, &show_traces_fops);
6765
6766	trace_create_file("current_tracer", 0644, d_tracer,
6767			tr, &set_tracer_fops);
6768
6769	trace_create_file("tracing_cpumask", 0644, d_tracer,
6770			  tr, &tracing_cpumask_fops);
6771
6772	trace_create_file("trace_options", 0644, d_tracer,
6773			  tr, &tracing_iter_fops);
6774
6775	trace_create_file("trace", 0644, d_tracer,
6776			  tr, &tracing_fops);
6777
6778	trace_create_file("trace_pipe", 0444, d_tracer,
6779			  tr, &tracing_pipe_fops);
6780
6781	trace_create_file("buffer_size_kb", 0644, d_tracer,
6782			  tr, &tracing_entries_fops);
6783
6784	trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6785			  tr, &tracing_total_entries_fops);
6786
6787	trace_create_file("free_buffer", 0200, d_tracer,
6788			  tr, &tracing_free_buffer_fops);
6789
6790	trace_create_file("trace_marker", 0220, d_tracer,
6791			  tr, &tracing_mark_fops);
6792
6793	trace_create_file("trace_clock", 0644, d_tracer, tr,
6794			  &trace_clock_fops);
6795
6796	trace_create_file("tracing_on", 0644, d_tracer,
6797			  tr, &rb_simple_fops);
6798
6799	create_trace_options_dir(tr);
6800
6801#ifdef CONFIG_TRACER_MAX_TRACE
6802	trace_create_file("tracing_max_latency", 0644, d_tracer,
6803			&tr->max_latency, &tracing_max_lat_fops);
6804#endif
6805
6806	if (ftrace_create_function_files(tr, d_tracer))
6807		WARN(1, "Could not allocate function filter files");
6808
6809#ifdef CONFIG_TRACER_SNAPSHOT
6810	trace_create_file("snapshot", 0644, d_tracer,
6811			  tr, &snapshot_fops);
6812#endif
6813
6814	for_each_tracing_cpu(cpu)
6815		tracing_init_tracefs_percpu(tr, cpu);
6816
6817}
6818
6819static struct vfsmount *trace_automount(void *ingore)
6820{
6821	struct vfsmount *mnt;
6822	struct file_system_type *type;
6823
6824	/*
6825	 * To maintain backward compatibility for tools that mount
6826	 * debugfs to get to the tracing facility, tracefs is automatically
6827	 * mounted to the debugfs/tracing directory.
6828	 */
6829	type = get_fs_type("tracefs");
6830	if (!type)
6831		return NULL;
6832	mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6833	put_filesystem(type);
6834	if (IS_ERR(mnt))
6835		return NULL;
6836	mntget(mnt);
6837
6838	return mnt;
6839}
6840
6841/**
6842 * tracing_init_dentry - initialize top level trace array
6843 *
6844 * This is called when creating files or directories in the tracing
6845 * directory. It is called via fs_initcall() by any of the boot up code
6846 * and expects to return the dentry of the top level tracing directory.
6847 */
6848struct dentry *tracing_init_dentry(void)
6849{
6850	struct trace_array *tr = &global_trace;
6851
6852	/* The top level trace array uses  NULL as parent */
6853	if (tr->dir)
6854		return NULL;
6855
6856	if (WARN_ON(!tracefs_initialized()) ||
6857		(IS_ENABLED(CONFIG_DEBUG_FS) &&
6858		 WARN_ON(!debugfs_initialized())))
6859		return ERR_PTR(-ENODEV);
6860
6861	/*
6862	 * As there may still be users that expect the tracing
6863	 * files to exist in debugfs/tracing, we must automount
6864	 * the tracefs file system there, so older tools still
6865	 * work with the newer kerenl.
6866	 */
6867	tr->dir = debugfs_create_automount("tracing", NULL,
6868					   trace_automount, NULL);
6869	if (!tr->dir) {
6870		pr_warn_once("Could not create debugfs directory 'tracing'\n");
6871		return ERR_PTR(-ENOMEM);
6872	}
6873
6874	return NULL;
6875}
6876
6877extern struct trace_enum_map *__start_ftrace_enum_maps[];
6878extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6879
6880static void __init trace_enum_init(void)
6881{
6882	int len;
6883
6884	len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6885	trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6886}
6887
6888#ifdef CONFIG_MODULES
6889static void trace_module_add_enums(struct module *mod)
6890{
6891	if (!mod->num_trace_enums)
6892		return;
6893
6894	/*
6895	 * Modules with bad taint do not have events created, do
6896	 * not bother with enums either.
6897	 */
6898	if (trace_module_has_bad_taint(mod))
6899		return;
6900
6901	trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6902}
6903
6904#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6905static void trace_module_remove_enums(struct module *mod)
6906{
6907	union trace_enum_map_item *map;
6908	union trace_enum_map_item **last = &trace_enum_maps;
6909
6910	if (!mod->num_trace_enums)
6911		return;
6912
6913	mutex_lock(&trace_enum_mutex);
6914
6915	map = trace_enum_maps;
6916
6917	while (map) {
6918		if (map->head.mod == mod)
6919			break;
6920		map = trace_enum_jmp_to_tail(map);
6921		last = &map->tail.next;
6922		map = map->tail.next;
6923	}
6924	if (!map)
6925		goto out;
6926
6927	*last = trace_enum_jmp_to_tail(map)->tail.next;
6928	kfree(map);
6929 out:
6930	mutex_unlock(&trace_enum_mutex);
6931}
6932#else
6933static inline void trace_module_remove_enums(struct module *mod) { }
6934#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6935
6936static int trace_module_notify(struct notifier_block *self,
6937			       unsigned long val, void *data)
6938{
6939	struct module *mod = data;
6940
6941	switch (val) {
6942	case MODULE_STATE_COMING:
6943		trace_module_add_enums(mod);
6944		break;
6945	case MODULE_STATE_GOING:
6946		trace_module_remove_enums(mod);
6947		break;
6948	}
6949
6950	return 0;
6951}
6952
6953static struct notifier_block trace_module_nb = {
6954	.notifier_call = trace_module_notify,
6955	.priority = 0,
6956};
6957#endif /* CONFIG_MODULES */
6958
6959static __init int tracer_init_tracefs(void)
6960{
6961	struct dentry *d_tracer;
6962
6963	trace_access_lock_init();
6964
6965	d_tracer = tracing_init_dentry();
6966	if (IS_ERR(d_tracer))
6967		return 0;
6968
6969	init_tracer_tracefs(&global_trace, d_tracer);
 
 
 
 
 
6970
6971	trace_create_file("tracing_thresh", 0644, d_tracer,
6972			&global_trace, &tracing_thresh_fops);
6973
6974	trace_create_file("README", 0444, d_tracer,
6975			NULL, &tracing_readme_fops);
6976
6977	trace_create_file("saved_cmdlines", 0444, d_tracer,
6978			NULL, &tracing_saved_cmdlines_fops);
6979
6980	trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6981			  NULL, &tracing_saved_cmdlines_size_fops);
6982
6983	trace_enum_init();
6984
6985	trace_create_enum_file(d_tracer);
6986
6987#ifdef CONFIG_MODULES
6988	register_module_notifier(&trace_module_nb);
6989#endif
6990
6991#ifdef CONFIG_DYNAMIC_FTRACE
6992	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6993			&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6994#endif
6995
6996	create_trace_instances(d_tracer);
6997
6998	update_tracer_options(&global_trace);
6999
7000	return 0;
7001}
7002
7003static int trace_panic_handler(struct notifier_block *this,
7004			       unsigned long event, void *unused)
7005{
7006	if (ftrace_dump_on_oops)
7007		ftrace_dump(ftrace_dump_on_oops);
7008	return NOTIFY_OK;
7009}
7010
7011static struct notifier_block trace_panic_notifier = {
7012	.notifier_call  = trace_panic_handler,
7013	.next           = NULL,
7014	.priority       = 150   /* priority: INT_MAX >= x >= 0 */
7015};
7016
7017static int trace_die_handler(struct notifier_block *self,
7018			     unsigned long val,
7019			     void *data)
7020{
7021	switch (val) {
7022	case DIE_OOPS:
7023		if (ftrace_dump_on_oops)
7024			ftrace_dump(ftrace_dump_on_oops);
7025		break;
7026	default:
7027		break;
7028	}
7029	return NOTIFY_OK;
7030}
7031
7032static struct notifier_block trace_die_notifier = {
7033	.notifier_call = trace_die_handler,
7034	.priority = 200
7035};
7036
7037/*
7038 * printk is set to max of 1024, we really don't need it that big.
7039 * Nothing should be printing 1000 characters anyway.
7040 */
7041#define TRACE_MAX_PRINT		1000
7042
7043/*
7044 * Define here KERN_TRACE so that we have one place to modify
7045 * it if we decide to change what log level the ftrace dump
7046 * should be at.
7047 */
7048#define KERN_TRACE		KERN_EMERG
7049
7050void
7051trace_printk_seq(struct trace_seq *s)
7052{
7053	/* Probably should print a warning here. */
7054	if (s->seq.len >= TRACE_MAX_PRINT)
7055		s->seq.len = TRACE_MAX_PRINT;
7056
7057	/*
7058	 * More paranoid code. Although the buffer size is set to
7059	 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7060	 * an extra layer of protection.
7061	 */
7062	if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7063		s->seq.len = s->seq.size - 1;
7064
7065	/* should be zero ended, but we are paranoid. */
7066	s->buffer[s->seq.len] = 0;
7067
7068	printk(KERN_TRACE "%s", s->buffer);
7069
7070	trace_seq_init(s);
7071}
7072
7073void trace_init_global_iter(struct trace_iterator *iter)
7074{
7075	iter->tr = &global_trace;
7076	iter->trace = iter->tr->current_trace;
7077	iter->cpu_file = RING_BUFFER_ALL_CPUS;
7078	iter->trace_buffer = &global_trace.trace_buffer;
7079
7080	if (iter->trace && iter->trace->open)
7081		iter->trace->open(iter);
7082
7083	/* Annotate start of buffers if we had overruns */
7084	if (ring_buffer_overruns(iter->trace_buffer->buffer))
7085		iter->iter_flags |= TRACE_FILE_ANNOTATE;
7086
7087	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
7088	if (trace_clocks[iter->tr->clock_id].in_ns)
7089		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7090}
7091
7092void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7093{
7094	/* use static because iter can be a bit big for the stack */
7095	static struct trace_iterator iter;
7096	static atomic_t dump_running;
7097	struct trace_array *tr = &global_trace;
7098	unsigned int old_userobj;
7099	unsigned long flags;
7100	int cnt = 0, cpu;
7101
7102	/* Only allow one dump user at a time. */
7103	if (atomic_inc_return(&dump_running) != 1) {
7104		atomic_dec(&dump_running);
7105		return;
7106	}
7107
7108	/*
7109	 * Always turn off tracing when we dump.
7110	 * We don't need to show trace output of what happens
7111	 * between multiple crashes.
7112	 *
7113	 * If the user does a sysrq-z, then they can re-enable
7114	 * tracing with echo 1 > tracing_on.
7115	 */
7116	tracing_off();
7117
7118	local_irq_save(flags);
7119
7120	/* Simulate the iterator */
7121	trace_init_global_iter(&iter);
7122
7123	for_each_tracing_cpu(cpu) {
7124		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7125	}
7126
7127	old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7128
7129	/* don't look at user memory in panic mode */
7130	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7131
7132	switch (oops_dump_mode) {
7133	case DUMP_ALL:
7134		iter.cpu_file = RING_BUFFER_ALL_CPUS;
7135		break;
7136	case DUMP_ORIG:
7137		iter.cpu_file = raw_smp_processor_id();
7138		break;
7139	case DUMP_NONE:
7140		goto out_enable;
7141	default:
7142		printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7143		iter.cpu_file = RING_BUFFER_ALL_CPUS;
7144	}
7145
7146	printk(KERN_TRACE "Dumping ftrace buffer:\n");
7147
7148	/* Did function tracer already get disabled? */
7149	if (ftrace_is_dead()) {
7150		printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7151		printk("#          MAY BE MISSING FUNCTION EVENTS\n");
7152	}
7153
7154	/*
7155	 * We need to stop all tracing on all CPUS to read the
7156	 * the next buffer. This is a bit expensive, but is
7157	 * not done often. We fill all what we can read,
7158	 * and then release the locks again.
7159	 */
7160
7161	while (!trace_empty(&iter)) {
7162
7163		if (!cnt)
7164			printk(KERN_TRACE "---------------------------------\n");
7165
7166		cnt++;
7167
7168		/* reset all but tr, trace, and overruns */
7169		memset(&iter.seq, 0,
7170		       sizeof(struct trace_iterator) -
7171		       offsetof(struct trace_iterator, seq));
7172		iter.iter_flags |= TRACE_FILE_LAT_FMT;
7173		iter.pos = -1;
7174
7175		if (trace_find_next_entry_inc(&iter) != NULL) {
7176			int ret;
7177
7178			ret = print_trace_line(&iter);
7179			if (ret != TRACE_TYPE_NO_CONSUME)
7180				trace_consume(&iter);
7181		}
7182		touch_nmi_watchdog();
7183
7184		trace_printk_seq(&iter.seq);
7185	}
7186
7187	if (!cnt)
7188		printk(KERN_TRACE "   (ftrace buffer empty)\n");
7189	else
7190		printk(KERN_TRACE "---------------------------------\n");
7191
7192 out_enable:
7193	tr->trace_flags |= old_userobj;
7194
7195	for_each_tracing_cpu(cpu) {
7196		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7197	}
7198 	atomic_dec(&dump_running);
7199	local_irq_restore(flags);
7200}
7201EXPORT_SYMBOL_GPL(ftrace_dump);
7202
7203__init static int tracer_alloc_buffers(void)
7204{
7205	int ring_buf_size;
7206	int ret = -ENOMEM;
7207
7208	/*
7209	 * Make sure we don't accidently add more trace options
7210	 * than we have bits for.
7211	 */
7212	BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7213
7214	if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7215		goto out;
7216
7217	if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7218		goto out_free_buffer_mask;
7219
7220	/* Only allocate trace_printk buffers if a trace_printk exists */
7221	if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7222		/* Must be called before global_trace.buffer is allocated */
7223		trace_printk_init_buffers();
7224
7225	/* To save memory, keep the ring buffer size to its minimum */
7226	if (ring_buffer_expanded)
7227		ring_buf_size = trace_buf_size;
7228	else
7229		ring_buf_size = 1;
7230
7231	cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7232	cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7233
7234	raw_spin_lock_init(&global_trace.start_lock);
7235
7236	/* Used for event triggers */
7237	temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7238	if (!temp_buffer)
7239		goto out_free_cpumask;
7240
7241	if (trace_create_savedcmd() < 0)
7242		goto out_free_temp_buffer;
7243
7244	/* TODO: make the number of buffers hot pluggable with CPUS */
7245	if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7246		printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7247		WARN_ON(1);
7248		goto out_free_savedcmd;
7249	}
7250
7251	if (global_trace.buffer_disabled)
7252		tracing_off();
7253
 
 
7254	if (trace_boot_clock) {
7255		ret = tracing_set_clock(&global_trace, trace_boot_clock);
7256		if (ret < 0)
7257			pr_warn("Trace clock %s not defined, going back to default\n",
7258				trace_boot_clock);
7259	}
7260
7261	/*
7262	 * register_tracer() might reference current_trace, so it
7263	 * needs to be set before we register anything. This is
7264	 * just a bootstrap of current_trace anyway.
7265	 */
7266	global_trace.current_trace = &nop_trace;
7267
7268	global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7269
7270	ftrace_init_global_array_ops(&global_trace);
7271
7272	init_trace_flags_index(&global_trace);
7273
7274	register_tracer(&nop_trace);
7275
7276	/* All seems OK, enable tracing */
7277	tracing_disabled = 0;
7278
7279	atomic_notifier_chain_register(&panic_notifier_list,
7280				       &trace_panic_notifier);
7281
7282	register_die_notifier(&trace_die_notifier);
7283
7284	global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7285
7286	INIT_LIST_HEAD(&global_trace.systems);
7287	INIT_LIST_HEAD(&global_trace.events);
7288	list_add(&global_trace.list, &ftrace_trace_arrays);
7289
7290	apply_trace_boot_options();
 
 
 
 
 
7291
7292	register_snapshot_cmd();
7293
7294	return 0;
7295
7296out_free_savedcmd:
7297	free_saved_cmdlines_buffer(savedcmd);
7298out_free_temp_buffer:
7299	ring_buffer_free(temp_buffer);
7300out_free_cpumask:
 
 
 
 
7301	free_cpumask_var(global_trace.tracing_cpumask);
7302out_free_buffer_mask:
7303	free_cpumask_var(tracing_buffer_mask);
7304out:
7305	return ret;
7306}
7307
7308void __init trace_init(void)
7309{
7310	if (tracepoint_printk) {
7311		tracepoint_print_iter =
7312			kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7313		if (WARN_ON(!tracepoint_print_iter))
7314			tracepoint_printk = 0;
7315	}
7316	tracer_alloc_buffers();
7317	trace_event_init();
7318}
7319
7320__init static int clear_boot_tracer(void)
7321{
7322	/*
7323	 * The default tracer at boot buffer is an init section.
7324	 * This function is called in lateinit. If we did not
7325	 * find the boot tracer, then clear it out, to prevent
7326	 * later registration from accessing the buffer that is
7327	 * about to be freed.
7328	 */
7329	if (!default_bootup_tracer)
7330		return 0;
7331
7332	printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7333	       default_bootup_tracer);
7334	default_bootup_tracer = NULL;
7335
7336	return 0;
7337}
7338
7339fs_initcall(tracer_init_tracefs);
 
7340late_initcall(clear_boot_tracer);