Loading...
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
13 */
14#include <linux/ring_buffer.h>
15#include <generated/utsrelease.h>
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
20#include <linux/notifier.h>
21#include <linux/irqflags.h>
22#include <linux/debugfs.h>
23#include <linux/tracefs.h>
24#include <linux/pagemap.h>
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
28#include <linux/kprobes.h>
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
32#include <linux/splice.h>
33#include <linux/kdebug.h>
34#include <linux/string.h>
35#include <linux/mount.h>
36#include <linux/rwsem.h>
37#include <linux/slab.h>
38#include <linux/ctype.h>
39#include <linux/init.h>
40#include <linux/poll.h>
41#include <linux/nmi.h>
42#include <linux/fs.h>
43#include <linux/sched/rt.h>
44
45#include "trace.h"
46#include "trace_output.h"
47
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
52bool ring_buffer_expanded;
53
54/*
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
60 */
61static bool __read_mostly tracing_selftest_running;
62
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
66bool __read_mostly tracing_selftest_disabled;
67
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static int
78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
79{
80 return 0;
81}
82
83/*
84 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
86 * occurred.
87 */
88static DEFINE_PER_CPU(bool, trace_cmdline_save);
89
90/*
91 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
94 * this back to zero.
95 */
96static int tracing_disabled = 1;
97
98cpumask_var_t __read_mostly tracing_buffer_mask;
99
100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
114 */
115
116enum ftrace_dump_mode ftrace_dump_on_oops;
117
118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
122/* Map of enums to their values, for "enum_map" file */
123struct trace_enum_map_head {
124 struct module *mod;
125 unsigned long length;
126};
127
128union trace_enum_map_item;
129
130struct trace_enum_map_tail {
131 /*
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
134 */
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
137};
138
139static DEFINE_MUTEX(trace_enum_mutex);
140
141/*
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
147 */
148union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
152};
153
154static union trace_enum_map_item *trace_enum_maps;
155#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
156
157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
158
159#define MAX_TRACER_SIZE 100
160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
161static char *default_bootup_tracer;
162
163static bool allocate_snapshot;
164
165static int __init set_cmdline_ftrace(char *str)
166{
167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
168 default_bootup_tracer = bootup_tracer_buf;
169 /* We are using ftrace early, expand it */
170 ring_buffer_expanded = true;
171 return 1;
172}
173__setup("ftrace=", set_cmdline_ftrace);
174
175static int __init set_ftrace_dump_on_oops(char *str)
176{
177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
179 return 1;
180 }
181
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
184 return 1;
185 }
186
187 return 0;
188}
189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
190
191static int __init stop_trace_on_warning(char *str)
192{
193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
195 return 1;
196}
197__setup("traceoff_on_warning", stop_trace_on_warning);
198
199static int __init boot_alloc_snapshot(char *str)
200{
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
204 return 1;
205}
206__setup("alloc_snapshot", boot_alloc_snapshot);
207
208
209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
210
211static int __init set_trace_boot_options(char *str)
212{
213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
214 return 0;
215}
216__setup("trace_options=", set_trace_boot_options);
217
218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219static char *trace_boot_clock __initdata;
220
221static int __init set_trace_boot_clock(char *str)
222{
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
225 return 0;
226}
227__setup("trace_clock=", set_trace_boot_clock);
228
229static int __init set_tracepoint_printk(char *str)
230{
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
233 return 1;
234}
235__setup("tp_printk", set_tracepoint_printk);
236
237unsigned long long ns2usecs(cycle_t nsec)
238{
239 nsec += 500;
240 do_div(nsec, 1000);
241 return nsec;
242}
243
244/* trace_flags holds trace_options default values */
245#define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
251
252/* trace_options that are only supported by global_trace */
253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
255
256
257/*
258 * The global_trace is the descriptor that holds the tracing
259 * buffers for the live tracing. For each CPU, it contains
260 * a link list of pages that will store trace entries. The
261 * page descriptor of the pages in the memory is used to hold
262 * the link list by linking the lru item in the page descriptor
263 * to each of the pages in the buffer per CPU.
264 *
265 * For each active CPU there is a data field that holds the
266 * pages for the buffer for that CPU. Each CPU has the same number
267 * of pages allocated for its buffer.
268 */
269static struct trace_array global_trace = {
270 .trace_flags = TRACE_DEFAULT_FLAGS,
271};
272
273LIST_HEAD(ftrace_trace_arrays);
274
275int trace_array_get(struct trace_array *this_tr)
276{
277 struct trace_array *tr;
278 int ret = -ENODEV;
279
280 mutex_lock(&trace_types_lock);
281 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
282 if (tr == this_tr) {
283 tr->ref++;
284 ret = 0;
285 break;
286 }
287 }
288 mutex_unlock(&trace_types_lock);
289
290 return ret;
291}
292
293static void __trace_array_put(struct trace_array *this_tr)
294{
295 WARN_ON(!this_tr->ref);
296 this_tr->ref--;
297}
298
299void trace_array_put(struct trace_array *this_tr)
300{
301 mutex_lock(&trace_types_lock);
302 __trace_array_put(this_tr);
303 mutex_unlock(&trace_types_lock);
304}
305
306int filter_check_discard(struct trace_event_file *file, void *rec,
307 struct ring_buffer *buffer,
308 struct ring_buffer_event *event)
309{
310 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
311 !filter_match_preds(file->filter, rec)) {
312 ring_buffer_discard_commit(buffer, event);
313 return 1;
314 }
315
316 return 0;
317}
318EXPORT_SYMBOL_GPL(filter_check_discard);
319
320int call_filter_check_discard(struct trace_event_call *call, void *rec,
321 struct ring_buffer *buffer,
322 struct ring_buffer_event *event)
323{
324 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
325 !filter_match_preds(call->filter, rec)) {
326 ring_buffer_discard_commit(buffer, event);
327 return 1;
328 }
329
330 return 0;
331}
332EXPORT_SYMBOL_GPL(call_filter_check_discard);
333
334static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
335{
336 u64 ts;
337
338 /* Early boot up does not have a buffer yet */
339 if (!buf->buffer)
340 return trace_clock_local();
341
342 ts = ring_buffer_time_stamp(buf->buffer, cpu);
343 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
344
345 return ts;
346}
347
348cycle_t ftrace_now(int cpu)
349{
350 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
351}
352
353/**
354 * tracing_is_enabled - Show if global_trace has been disabled
355 *
356 * Shows if the global trace has been enabled or not. It uses the
357 * mirror flag "buffer_disabled" to be used in fast paths such as for
358 * the irqsoff tracer. But it may be inaccurate due to races. If you
359 * need to know the accurate state, use tracing_is_on() which is a little
360 * slower, but accurate.
361 */
362int tracing_is_enabled(void)
363{
364 /*
365 * For quick access (irqsoff uses this in fast path), just
366 * return the mirror variable of the state of the ring buffer.
367 * It's a little racy, but we don't really care.
368 */
369 smp_rmb();
370 return !global_trace.buffer_disabled;
371}
372
373/*
374 * trace_buf_size is the size in bytes that is allocated
375 * for a buffer. Note, the number of bytes is always rounded
376 * to page size.
377 *
378 * This number is purposely set to a low number of 16384.
379 * If the dump on oops happens, it will be much appreciated
380 * to not have to wait for all that output. Anyway this can be
381 * boot time and run time configurable.
382 */
383#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
384
385static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
386
387/* trace_types holds a link list of available tracers. */
388static struct tracer *trace_types __read_mostly;
389
390/*
391 * trace_types_lock is used to protect the trace_types list.
392 */
393DEFINE_MUTEX(trace_types_lock);
394
395/*
396 * serialize the access of the ring buffer
397 *
398 * ring buffer serializes readers, but it is low level protection.
399 * The validity of the events (which returns by ring_buffer_peek() ..etc)
400 * are not protected by ring buffer.
401 *
402 * The content of events may become garbage if we allow other process consumes
403 * these events concurrently:
404 * A) the page of the consumed events may become a normal page
405 * (not reader page) in ring buffer, and this page will be rewrited
406 * by events producer.
407 * B) The page of the consumed events may become a page for splice_read,
408 * and this page will be returned to system.
409 *
410 * These primitives allow multi process access to different cpu ring buffer
411 * concurrently.
412 *
413 * These primitives don't distinguish read-only and read-consume access.
414 * Multi read-only access are also serialized.
415 */
416
417#ifdef CONFIG_SMP
418static DECLARE_RWSEM(all_cpu_access_lock);
419static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
420
421static inline void trace_access_lock(int cpu)
422{
423 if (cpu == RING_BUFFER_ALL_CPUS) {
424 /* gain it for accessing the whole ring buffer. */
425 down_write(&all_cpu_access_lock);
426 } else {
427 /* gain it for accessing a cpu ring buffer. */
428
429 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
430 down_read(&all_cpu_access_lock);
431
432 /* Secondly block other access to this @cpu ring buffer. */
433 mutex_lock(&per_cpu(cpu_access_lock, cpu));
434 }
435}
436
437static inline void trace_access_unlock(int cpu)
438{
439 if (cpu == RING_BUFFER_ALL_CPUS) {
440 up_write(&all_cpu_access_lock);
441 } else {
442 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
443 up_read(&all_cpu_access_lock);
444 }
445}
446
447static inline void trace_access_lock_init(void)
448{
449 int cpu;
450
451 for_each_possible_cpu(cpu)
452 mutex_init(&per_cpu(cpu_access_lock, cpu));
453}
454
455#else
456
457static DEFINE_MUTEX(access_lock);
458
459static inline void trace_access_lock(int cpu)
460{
461 (void)cpu;
462 mutex_lock(&access_lock);
463}
464
465static inline void trace_access_unlock(int cpu)
466{
467 (void)cpu;
468 mutex_unlock(&access_lock);
469}
470
471static inline void trace_access_lock_init(void)
472{
473}
474
475#endif
476
477#ifdef CONFIG_STACKTRACE
478static void __ftrace_trace_stack(struct ring_buffer *buffer,
479 unsigned long flags,
480 int skip, int pc, struct pt_regs *regs);
481static inline void ftrace_trace_stack(struct trace_array *tr,
482 struct ring_buffer *buffer,
483 unsigned long flags,
484 int skip, int pc, struct pt_regs *regs);
485
486#else
487static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs)
490{
491}
492static inline void ftrace_trace_stack(struct trace_array *tr,
493 struct ring_buffer *buffer,
494 unsigned long flags,
495 int skip, int pc, struct pt_regs *regs)
496{
497}
498
499#endif
500
501static void tracer_tracing_on(struct trace_array *tr)
502{
503 if (tr->trace_buffer.buffer)
504 ring_buffer_record_on(tr->trace_buffer.buffer);
505 /*
506 * This flag is looked at when buffers haven't been allocated
507 * yet, or by some tracers (like irqsoff), that just want to
508 * know if the ring buffer has been disabled, but it can handle
509 * races of where it gets disabled but we still do a record.
510 * As the check is in the fast path of the tracers, it is more
511 * important to be fast than accurate.
512 */
513 tr->buffer_disabled = 0;
514 /* Make the flag seen by readers */
515 smp_wmb();
516}
517
518/**
519 * tracing_on - enable tracing buffers
520 *
521 * This function enables tracing buffers that may have been
522 * disabled with tracing_off.
523 */
524void tracing_on(void)
525{
526 tracer_tracing_on(&global_trace);
527}
528EXPORT_SYMBOL_GPL(tracing_on);
529
530/**
531 * __trace_puts - write a constant string into the trace buffer.
532 * @ip: The address of the caller
533 * @str: The constant string to write
534 * @size: The size of the string.
535 */
536int __trace_puts(unsigned long ip, const char *str, int size)
537{
538 struct ring_buffer_event *event;
539 struct ring_buffer *buffer;
540 struct print_entry *entry;
541 unsigned long irq_flags;
542 int alloc;
543 int pc;
544
545 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
546 return 0;
547
548 pc = preempt_count();
549
550 if (unlikely(tracing_selftest_running || tracing_disabled))
551 return 0;
552
553 alloc = sizeof(*entry) + size + 2; /* possible \n added */
554
555 local_save_flags(irq_flags);
556 buffer = global_trace.trace_buffer.buffer;
557 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
558 irq_flags, pc);
559 if (!event)
560 return 0;
561
562 entry = ring_buffer_event_data(event);
563 entry->ip = ip;
564
565 memcpy(&entry->buf, str, size);
566
567 /* Add a newline if necessary */
568 if (entry->buf[size - 1] != '\n') {
569 entry->buf[size] = '\n';
570 entry->buf[size + 1] = '\0';
571 } else
572 entry->buf[size] = '\0';
573
574 __buffer_unlock_commit(buffer, event);
575 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
576
577 return size;
578}
579EXPORT_SYMBOL_GPL(__trace_puts);
580
581/**
582 * __trace_bputs - write the pointer to a constant string into trace buffer
583 * @ip: The address of the caller
584 * @str: The constant string to write to the buffer to
585 */
586int __trace_bputs(unsigned long ip, const char *str)
587{
588 struct ring_buffer_event *event;
589 struct ring_buffer *buffer;
590 struct bputs_entry *entry;
591 unsigned long irq_flags;
592 int size = sizeof(struct bputs_entry);
593 int pc;
594
595 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
596 return 0;
597
598 pc = preempt_count();
599
600 if (unlikely(tracing_selftest_running || tracing_disabled))
601 return 0;
602
603 local_save_flags(irq_flags);
604 buffer = global_trace.trace_buffer.buffer;
605 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
606 irq_flags, pc);
607 if (!event)
608 return 0;
609
610 entry = ring_buffer_event_data(event);
611 entry->ip = ip;
612 entry->str = str;
613
614 __buffer_unlock_commit(buffer, event);
615 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
616
617 return 1;
618}
619EXPORT_SYMBOL_GPL(__trace_bputs);
620
621#ifdef CONFIG_TRACER_SNAPSHOT
622/**
623 * trace_snapshot - take a snapshot of the current buffer.
624 *
625 * This causes a swap between the snapshot buffer and the current live
626 * tracing buffer. You can use this to take snapshots of the live
627 * trace when some condition is triggered, but continue to trace.
628 *
629 * Note, make sure to allocate the snapshot with either
630 * a tracing_snapshot_alloc(), or by doing it manually
631 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
632 *
633 * If the snapshot buffer is not allocated, it will stop tracing.
634 * Basically making a permanent snapshot.
635 */
636void tracing_snapshot(void)
637{
638 struct trace_array *tr = &global_trace;
639 struct tracer *tracer = tr->current_trace;
640 unsigned long flags;
641
642 if (in_nmi()) {
643 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
644 internal_trace_puts("*** snapshot is being ignored ***\n");
645 return;
646 }
647
648 if (!tr->allocated_snapshot) {
649 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
650 internal_trace_puts("*** stopping trace here! ***\n");
651 tracing_off();
652 return;
653 }
654
655 /* Note, snapshot can not be used when the tracer uses it */
656 if (tracer->use_max_tr) {
657 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
658 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
659 return;
660 }
661
662 local_irq_save(flags);
663 update_max_tr(tr, current, smp_processor_id());
664 local_irq_restore(flags);
665}
666EXPORT_SYMBOL_GPL(tracing_snapshot);
667
668static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
669 struct trace_buffer *size_buf, int cpu_id);
670static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
671
672static int alloc_snapshot(struct trace_array *tr)
673{
674 int ret;
675
676 if (!tr->allocated_snapshot) {
677
678 /* allocate spare buffer */
679 ret = resize_buffer_duplicate_size(&tr->max_buffer,
680 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
681 if (ret < 0)
682 return ret;
683
684 tr->allocated_snapshot = true;
685 }
686
687 return 0;
688}
689
690static void free_snapshot(struct trace_array *tr)
691{
692 /*
693 * We don't free the ring buffer. instead, resize it because
694 * The max_tr ring buffer has some state (e.g. ring->clock) and
695 * we want preserve it.
696 */
697 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
698 set_buffer_entries(&tr->max_buffer, 1);
699 tracing_reset_online_cpus(&tr->max_buffer);
700 tr->allocated_snapshot = false;
701}
702
703/**
704 * tracing_alloc_snapshot - allocate snapshot buffer.
705 *
706 * This only allocates the snapshot buffer if it isn't already
707 * allocated - it doesn't also take a snapshot.
708 *
709 * This is meant to be used in cases where the snapshot buffer needs
710 * to be set up for events that can't sleep but need to be able to
711 * trigger a snapshot.
712 */
713int tracing_alloc_snapshot(void)
714{
715 struct trace_array *tr = &global_trace;
716 int ret;
717
718 ret = alloc_snapshot(tr);
719 WARN_ON(ret < 0);
720
721 return ret;
722}
723EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
724
725/**
726 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
727 *
728 * This is similar to trace_snapshot(), but it will allocate the
729 * snapshot buffer if it isn't already allocated. Use this only
730 * where it is safe to sleep, as the allocation may sleep.
731 *
732 * This causes a swap between the snapshot buffer and the current live
733 * tracing buffer. You can use this to take snapshots of the live
734 * trace when some condition is triggered, but continue to trace.
735 */
736void tracing_snapshot_alloc(void)
737{
738 int ret;
739
740 ret = tracing_alloc_snapshot();
741 if (ret < 0)
742 return;
743
744 tracing_snapshot();
745}
746EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
747#else
748void tracing_snapshot(void)
749{
750 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
751}
752EXPORT_SYMBOL_GPL(tracing_snapshot);
753int tracing_alloc_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
756 return -ENODEV;
757}
758EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
759void tracing_snapshot_alloc(void)
760{
761 /* Give warning */
762 tracing_snapshot();
763}
764EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
765#endif /* CONFIG_TRACER_SNAPSHOT */
766
767static void tracer_tracing_off(struct trace_array *tr)
768{
769 if (tr->trace_buffer.buffer)
770 ring_buffer_record_off(tr->trace_buffer.buffer);
771 /*
772 * This flag is looked at when buffers haven't been allocated
773 * yet, or by some tracers (like irqsoff), that just want to
774 * know if the ring buffer has been disabled, but it can handle
775 * races of where it gets disabled but we still do a record.
776 * As the check is in the fast path of the tracers, it is more
777 * important to be fast than accurate.
778 */
779 tr->buffer_disabled = 1;
780 /* Make the flag seen by readers */
781 smp_wmb();
782}
783
784/**
785 * tracing_off - turn off tracing buffers
786 *
787 * This function stops the tracing buffers from recording data.
788 * It does not disable any overhead the tracers themselves may
789 * be causing. This function simply causes all recording to
790 * the ring buffers to fail.
791 */
792void tracing_off(void)
793{
794 tracer_tracing_off(&global_trace);
795}
796EXPORT_SYMBOL_GPL(tracing_off);
797
798void disable_trace_on_warning(void)
799{
800 if (__disable_trace_on_warning)
801 tracing_off();
802}
803
804/**
805 * tracer_tracing_is_on - show real state of ring buffer enabled
806 * @tr : the trace array to know if ring buffer is enabled
807 *
808 * Shows real state of the ring buffer if it is enabled or not.
809 */
810static int tracer_tracing_is_on(struct trace_array *tr)
811{
812 if (tr->trace_buffer.buffer)
813 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
814 return !tr->buffer_disabled;
815}
816
817/**
818 * tracing_is_on - show state of ring buffers enabled
819 */
820int tracing_is_on(void)
821{
822 return tracer_tracing_is_on(&global_trace);
823}
824EXPORT_SYMBOL_GPL(tracing_is_on);
825
826static int __init set_buf_size(char *str)
827{
828 unsigned long buf_size;
829
830 if (!str)
831 return 0;
832 buf_size = memparse(str, &str);
833 /* nr_entries can not be zero */
834 if (buf_size == 0)
835 return 0;
836 trace_buf_size = buf_size;
837 return 1;
838}
839__setup("trace_buf_size=", set_buf_size);
840
841static int __init set_tracing_thresh(char *str)
842{
843 unsigned long threshold;
844 int ret;
845
846 if (!str)
847 return 0;
848 ret = kstrtoul(str, 0, &threshold);
849 if (ret < 0)
850 return 0;
851 tracing_thresh = threshold * 1000;
852 return 1;
853}
854__setup("tracing_thresh=", set_tracing_thresh);
855
856unsigned long nsecs_to_usecs(unsigned long nsecs)
857{
858 return nsecs / 1000;
859}
860
861/*
862 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
863 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
864 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
865 * of strings in the order that the enums were defined.
866 */
867#undef C
868#define C(a, b) b
869
870/* These must match the bit postions in trace_iterator_flags */
871static const char *trace_options[] = {
872 TRACE_FLAGS
873 NULL
874};
875
876static struct {
877 u64 (*func)(void);
878 const char *name;
879 int in_ns; /* is this clock in nanoseconds? */
880} trace_clocks[] = {
881 { trace_clock_local, "local", 1 },
882 { trace_clock_global, "global", 1 },
883 { trace_clock_counter, "counter", 0 },
884 { trace_clock_jiffies, "uptime", 0 },
885 { trace_clock, "perf", 1 },
886 { ktime_get_mono_fast_ns, "mono", 1 },
887 { ktime_get_raw_fast_ns, "mono_raw", 1 },
888 ARCH_TRACE_CLOCKS
889};
890
891/*
892 * trace_parser_get_init - gets the buffer for trace parser
893 */
894int trace_parser_get_init(struct trace_parser *parser, int size)
895{
896 memset(parser, 0, sizeof(*parser));
897
898 parser->buffer = kmalloc(size, GFP_KERNEL);
899 if (!parser->buffer)
900 return 1;
901
902 parser->size = size;
903 return 0;
904}
905
906/*
907 * trace_parser_put - frees the buffer for trace parser
908 */
909void trace_parser_put(struct trace_parser *parser)
910{
911 kfree(parser->buffer);
912}
913
914/*
915 * trace_get_user - reads the user input string separated by space
916 * (matched by isspace(ch))
917 *
918 * For each string found the 'struct trace_parser' is updated,
919 * and the function returns.
920 *
921 * Returns number of bytes read.
922 *
923 * See kernel/trace/trace.h for 'struct trace_parser' details.
924 */
925int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
926 size_t cnt, loff_t *ppos)
927{
928 char ch;
929 size_t read = 0;
930 ssize_t ret;
931
932 if (!*ppos)
933 trace_parser_clear(parser);
934
935 ret = get_user(ch, ubuf++);
936 if (ret)
937 goto out;
938
939 read++;
940 cnt--;
941
942 /*
943 * The parser is not finished with the last write,
944 * continue reading the user input without skipping spaces.
945 */
946 if (!parser->cont) {
947 /* skip white space */
948 while (cnt && isspace(ch)) {
949 ret = get_user(ch, ubuf++);
950 if (ret)
951 goto out;
952 read++;
953 cnt--;
954 }
955
956 /* only spaces were written */
957 if (isspace(ch)) {
958 *ppos += read;
959 ret = read;
960 goto out;
961 }
962
963 parser->idx = 0;
964 }
965
966 /* read the non-space input */
967 while (cnt && !isspace(ch)) {
968 if (parser->idx < parser->size - 1)
969 parser->buffer[parser->idx++] = ch;
970 else {
971 ret = -EINVAL;
972 goto out;
973 }
974 ret = get_user(ch, ubuf++);
975 if (ret)
976 goto out;
977 read++;
978 cnt--;
979 }
980
981 /* We either got finished input or we have to wait for another call. */
982 if (isspace(ch)) {
983 parser->buffer[parser->idx] = 0;
984 parser->cont = false;
985 } else if (parser->idx < parser->size - 1) {
986 parser->cont = true;
987 parser->buffer[parser->idx++] = ch;
988 } else {
989 ret = -EINVAL;
990 goto out;
991 }
992
993 *ppos += read;
994 ret = read;
995
996out:
997 return ret;
998}
999
1000/* TODO add a seq_buf_to_buffer() */
1001static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1002{
1003 int len;
1004
1005 if (trace_seq_used(s) <= s->seq.readpos)
1006 return -EBUSY;
1007
1008 len = trace_seq_used(s) - s->seq.readpos;
1009 if (cnt > len)
1010 cnt = len;
1011 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1012
1013 s->seq.readpos += cnt;
1014 return cnt;
1015}
1016
1017unsigned long __read_mostly tracing_thresh;
1018
1019#ifdef CONFIG_TRACER_MAX_TRACE
1020/*
1021 * Copy the new maximum trace into the separate maximum-trace
1022 * structure. (this way the maximum trace is permanently saved,
1023 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1024 */
1025static void
1026__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1027{
1028 struct trace_buffer *trace_buf = &tr->trace_buffer;
1029 struct trace_buffer *max_buf = &tr->max_buffer;
1030 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1031 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1032
1033 max_buf->cpu = cpu;
1034 max_buf->time_start = data->preempt_timestamp;
1035
1036 max_data->saved_latency = tr->max_latency;
1037 max_data->critical_start = data->critical_start;
1038 max_data->critical_end = data->critical_end;
1039
1040 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1041 max_data->pid = tsk->pid;
1042 /*
1043 * If tsk == current, then use current_uid(), as that does not use
1044 * RCU. The irq tracer can be called out of RCU scope.
1045 */
1046 if (tsk == current)
1047 max_data->uid = current_uid();
1048 else
1049 max_data->uid = task_uid(tsk);
1050
1051 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1052 max_data->policy = tsk->policy;
1053 max_data->rt_priority = tsk->rt_priority;
1054
1055 /* record this tasks comm */
1056 tracing_record_cmdline(tsk);
1057}
1058
1059/**
1060 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1061 * @tr: tracer
1062 * @tsk: the task with the latency
1063 * @cpu: The cpu that initiated the trace.
1064 *
1065 * Flip the buffers between the @tr and the max_tr and record information
1066 * about which task was the cause of this latency.
1067 */
1068void
1069update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1070{
1071 struct ring_buffer *buf;
1072
1073 if (tr->stop_count)
1074 return;
1075
1076 WARN_ON_ONCE(!irqs_disabled());
1077
1078 if (!tr->allocated_snapshot) {
1079 /* Only the nop tracer should hit this when disabling */
1080 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1081 return;
1082 }
1083
1084 arch_spin_lock(&tr->max_lock);
1085
1086 buf = tr->trace_buffer.buffer;
1087 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1088 tr->max_buffer.buffer = buf;
1089
1090 __update_max_tr(tr, tsk, cpu);
1091 arch_spin_unlock(&tr->max_lock);
1092}
1093
1094/**
1095 * update_max_tr_single - only copy one trace over, and reset the rest
1096 * @tr - tracer
1097 * @tsk - task with the latency
1098 * @cpu - the cpu of the buffer to copy.
1099 *
1100 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1101 */
1102void
1103update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1104{
1105 int ret;
1106
1107 if (tr->stop_count)
1108 return;
1109
1110 WARN_ON_ONCE(!irqs_disabled());
1111 if (!tr->allocated_snapshot) {
1112 /* Only the nop tracer should hit this when disabling */
1113 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1114 return;
1115 }
1116
1117 arch_spin_lock(&tr->max_lock);
1118
1119 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1120
1121 if (ret == -EBUSY) {
1122 /*
1123 * We failed to swap the buffer due to a commit taking
1124 * place on this CPU. We fail to record, but we reset
1125 * the max trace buffer (no one writes directly to it)
1126 * and flag that it failed.
1127 */
1128 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1129 "Failed to swap buffers due to commit in progress\n");
1130 }
1131
1132 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1133
1134 __update_max_tr(tr, tsk, cpu);
1135 arch_spin_unlock(&tr->max_lock);
1136}
1137#endif /* CONFIG_TRACER_MAX_TRACE */
1138
1139static int wait_on_pipe(struct trace_iterator *iter, bool full)
1140{
1141 /* Iterators are static, they should be filled or empty */
1142 if (trace_buffer_iter(iter, iter->cpu_file))
1143 return 0;
1144
1145 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1146 full);
1147}
1148
1149#ifdef CONFIG_FTRACE_STARTUP_TEST
1150static int run_tracer_selftest(struct tracer *type)
1151{
1152 struct trace_array *tr = &global_trace;
1153 struct tracer *saved_tracer = tr->current_trace;
1154 int ret;
1155
1156 if (!type->selftest || tracing_selftest_disabled)
1157 return 0;
1158
1159 /*
1160 * Run a selftest on this tracer.
1161 * Here we reset the trace buffer, and set the current
1162 * tracer to be this tracer. The tracer can then run some
1163 * internal tracing to verify that everything is in order.
1164 * If we fail, we do not register this tracer.
1165 */
1166 tracing_reset_online_cpus(&tr->trace_buffer);
1167
1168 tr->current_trace = type;
1169
1170#ifdef CONFIG_TRACER_MAX_TRACE
1171 if (type->use_max_tr) {
1172 /* If we expanded the buffers, make sure the max is expanded too */
1173 if (ring_buffer_expanded)
1174 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1175 RING_BUFFER_ALL_CPUS);
1176 tr->allocated_snapshot = true;
1177 }
1178#endif
1179
1180 /* the test is responsible for initializing and enabling */
1181 pr_info("Testing tracer %s: ", type->name);
1182 ret = type->selftest(type, tr);
1183 /* the test is responsible for resetting too */
1184 tr->current_trace = saved_tracer;
1185 if (ret) {
1186 printk(KERN_CONT "FAILED!\n");
1187 /* Add the warning after printing 'FAILED' */
1188 WARN_ON(1);
1189 return -1;
1190 }
1191 /* Only reset on passing, to avoid touching corrupted buffers */
1192 tracing_reset_online_cpus(&tr->trace_buffer);
1193
1194#ifdef CONFIG_TRACER_MAX_TRACE
1195 if (type->use_max_tr) {
1196 tr->allocated_snapshot = false;
1197
1198 /* Shrink the max buffer again */
1199 if (ring_buffer_expanded)
1200 ring_buffer_resize(tr->max_buffer.buffer, 1,
1201 RING_BUFFER_ALL_CPUS);
1202 }
1203#endif
1204
1205 printk(KERN_CONT "PASSED\n");
1206 return 0;
1207}
1208#else
1209static inline int run_tracer_selftest(struct tracer *type)
1210{
1211 return 0;
1212}
1213#endif /* CONFIG_FTRACE_STARTUP_TEST */
1214
1215static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1216
1217static void __init apply_trace_boot_options(void);
1218
1219/**
1220 * register_tracer - register a tracer with the ftrace system.
1221 * @type - the plugin for the tracer
1222 *
1223 * Register a new plugin tracer.
1224 */
1225int __init register_tracer(struct tracer *type)
1226{
1227 struct tracer *t;
1228 int ret = 0;
1229
1230 if (!type->name) {
1231 pr_info("Tracer must have a name\n");
1232 return -1;
1233 }
1234
1235 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1236 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1237 return -1;
1238 }
1239
1240 mutex_lock(&trace_types_lock);
1241
1242 tracing_selftest_running = true;
1243
1244 for (t = trace_types; t; t = t->next) {
1245 if (strcmp(type->name, t->name) == 0) {
1246 /* already found */
1247 pr_info("Tracer %s already registered\n",
1248 type->name);
1249 ret = -1;
1250 goto out;
1251 }
1252 }
1253
1254 if (!type->set_flag)
1255 type->set_flag = &dummy_set_flag;
1256 if (!type->flags) {
1257 /*allocate a dummy tracer_flags*/
1258 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1259 if (!type->flags) {
1260 ret = -ENOMEM;
1261 goto out;
1262 }
1263 type->flags->val = 0;
1264 type->flags->opts = dummy_tracer_opt;
1265 } else
1266 if (!type->flags->opts)
1267 type->flags->opts = dummy_tracer_opt;
1268
1269 /* store the tracer for __set_tracer_option */
1270 type->flags->trace = type;
1271
1272 ret = run_tracer_selftest(type);
1273 if (ret < 0)
1274 goto out;
1275
1276 type->next = trace_types;
1277 trace_types = type;
1278 add_tracer_options(&global_trace, type);
1279
1280 out:
1281 tracing_selftest_running = false;
1282 mutex_unlock(&trace_types_lock);
1283
1284 if (ret || !default_bootup_tracer)
1285 goto out_unlock;
1286
1287 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1288 goto out_unlock;
1289
1290 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1291 /* Do we want this tracer to start on bootup? */
1292 tracing_set_tracer(&global_trace, type->name);
1293 default_bootup_tracer = NULL;
1294
1295 apply_trace_boot_options();
1296
1297 /* disable other selftests, since this will break it. */
1298 tracing_selftest_disabled = true;
1299#ifdef CONFIG_FTRACE_STARTUP_TEST
1300 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1301 type->name);
1302#endif
1303
1304 out_unlock:
1305 return ret;
1306}
1307
1308void tracing_reset(struct trace_buffer *buf, int cpu)
1309{
1310 struct ring_buffer *buffer = buf->buffer;
1311
1312 if (!buffer)
1313 return;
1314
1315 ring_buffer_record_disable(buffer);
1316
1317 /* Make sure all commits have finished */
1318 synchronize_sched();
1319 ring_buffer_reset_cpu(buffer, cpu);
1320
1321 ring_buffer_record_enable(buffer);
1322}
1323
1324void tracing_reset_online_cpus(struct trace_buffer *buf)
1325{
1326 struct ring_buffer *buffer = buf->buffer;
1327 int cpu;
1328
1329 if (!buffer)
1330 return;
1331
1332 ring_buffer_record_disable(buffer);
1333
1334 /* Make sure all commits have finished */
1335 synchronize_sched();
1336
1337 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1338
1339 for_each_online_cpu(cpu)
1340 ring_buffer_reset_cpu(buffer, cpu);
1341
1342 ring_buffer_record_enable(buffer);
1343}
1344
1345/* Must have trace_types_lock held */
1346void tracing_reset_all_online_cpus(void)
1347{
1348 struct trace_array *tr;
1349
1350 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1351 tracing_reset_online_cpus(&tr->trace_buffer);
1352#ifdef CONFIG_TRACER_MAX_TRACE
1353 tracing_reset_online_cpus(&tr->max_buffer);
1354#endif
1355 }
1356}
1357
1358#define SAVED_CMDLINES_DEFAULT 128
1359#define NO_CMDLINE_MAP UINT_MAX
1360static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1361struct saved_cmdlines_buffer {
1362 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1363 unsigned *map_cmdline_to_pid;
1364 unsigned cmdline_num;
1365 int cmdline_idx;
1366 char *saved_cmdlines;
1367};
1368static struct saved_cmdlines_buffer *savedcmd;
1369
1370/* temporary disable recording */
1371static atomic_t trace_record_cmdline_disabled __read_mostly;
1372
1373static inline char *get_saved_cmdlines(int idx)
1374{
1375 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1376}
1377
1378static inline void set_cmdline(int idx, const char *cmdline)
1379{
1380 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1381}
1382
1383static int allocate_cmdlines_buffer(unsigned int val,
1384 struct saved_cmdlines_buffer *s)
1385{
1386 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1387 GFP_KERNEL);
1388 if (!s->map_cmdline_to_pid)
1389 return -ENOMEM;
1390
1391 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1392 if (!s->saved_cmdlines) {
1393 kfree(s->map_cmdline_to_pid);
1394 return -ENOMEM;
1395 }
1396
1397 s->cmdline_idx = 0;
1398 s->cmdline_num = val;
1399 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1400 sizeof(s->map_pid_to_cmdline));
1401 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1402 val * sizeof(*s->map_cmdline_to_pid));
1403
1404 return 0;
1405}
1406
1407static int trace_create_savedcmd(void)
1408{
1409 int ret;
1410
1411 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1412 if (!savedcmd)
1413 return -ENOMEM;
1414
1415 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1416 if (ret < 0) {
1417 kfree(savedcmd);
1418 savedcmd = NULL;
1419 return -ENOMEM;
1420 }
1421
1422 return 0;
1423}
1424
1425int is_tracing_stopped(void)
1426{
1427 return global_trace.stop_count;
1428}
1429
1430/**
1431 * tracing_start - quick start of the tracer
1432 *
1433 * If tracing is enabled but was stopped by tracing_stop,
1434 * this will start the tracer back up.
1435 */
1436void tracing_start(void)
1437{
1438 struct ring_buffer *buffer;
1439 unsigned long flags;
1440
1441 if (tracing_disabled)
1442 return;
1443
1444 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1445 if (--global_trace.stop_count) {
1446 if (global_trace.stop_count < 0) {
1447 /* Someone screwed up their debugging */
1448 WARN_ON_ONCE(1);
1449 global_trace.stop_count = 0;
1450 }
1451 goto out;
1452 }
1453
1454 /* Prevent the buffers from switching */
1455 arch_spin_lock(&global_trace.max_lock);
1456
1457 buffer = global_trace.trace_buffer.buffer;
1458 if (buffer)
1459 ring_buffer_record_enable(buffer);
1460
1461#ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
1463 if (buffer)
1464 ring_buffer_record_enable(buffer);
1465#endif
1466
1467 arch_spin_unlock(&global_trace.max_lock);
1468
1469 out:
1470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_start_tr(struct trace_array *tr)
1474{
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1477
1478 if (tracing_disabled)
1479 return;
1480
1481 /* If global, we need to also start the max tracer */
1482 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1483 return tracing_start();
1484
1485 raw_spin_lock_irqsave(&tr->start_lock, flags);
1486
1487 if (--tr->stop_count) {
1488 if (tr->stop_count < 0) {
1489 /* Someone screwed up their debugging */
1490 WARN_ON_ONCE(1);
1491 tr->stop_count = 0;
1492 }
1493 goto out;
1494 }
1495
1496 buffer = tr->trace_buffer.buffer;
1497 if (buffer)
1498 ring_buffer_record_enable(buffer);
1499
1500 out:
1501 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1502}
1503
1504/**
1505 * tracing_stop - quick stop of the tracer
1506 *
1507 * Light weight way to stop tracing. Use in conjunction with
1508 * tracing_start.
1509 */
1510void tracing_stop(void)
1511{
1512 struct ring_buffer *buffer;
1513 unsigned long flags;
1514
1515 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1516 if (global_trace.stop_count++)
1517 goto out;
1518
1519 /* Prevent the buffers from switching */
1520 arch_spin_lock(&global_trace.max_lock);
1521
1522 buffer = global_trace.trace_buffer.buffer;
1523 if (buffer)
1524 ring_buffer_record_disable(buffer);
1525
1526#ifdef CONFIG_TRACER_MAX_TRACE
1527 buffer = global_trace.max_buffer.buffer;
1528 if (buffer)
1529 ring_buffer_record_disable(buffer);
1530#endif
1531
1532 arch_spin_unlock(&global_trace.max_lock);
1533
1534 out:
1535 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1536}
1537
1538static void tracing_stop_tr(struct trace_array *tr)
1539{
1540 struct ring_buffer *buffer;
1541 unsigned long flags;
1542
1543 /* If global, we need to also stop the max tracer */
1544 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1545 return tracing_stop();
1546
1547 raw_spin_lock_irqsave(&tr->start_lock, flags);
1548 if (tr->stop_count++)
1549 goto out;
1550
1551 buffer = tr->trace_buffer.buffer;
1552 if (buffer)
1553 ring_buffer_record_disable(buffer);
1554
1555 out:
1556 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1557}
1558
1559void trace_stop_cmdline_recording(void);
1560
1561static int trace_save_cmdline(struct task_struct *tsk)
1562{
1563 unsigned pid, idx;
1564
1565 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1566 return 0;
1567
1568 /*
1569 * It's not the end of the world if we don't get
1570 * the lock, but we also don't want to spin
1571 * nor do we want to disable interrupts,
1572 * so if we miss here, then better luck next time.
1573 */
1574 if (!arch_spin_trylock(&trace_cmdline_lock))
1575 return 0;
1576
1577 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1578 if (idx == NO_CMDLINE_MAP) {
1579 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1580
1581 /*
1582 * Check whether the cmdline buffer at idx has a pid
1583 * mapped. We are going to overwrite that entry so we
1584 * need to clear the map_pid_to_cmdline. Otherwise we
1585 * would read the new comm for the old pid.
1586 */
1587 pid = savedcmd->map_cmdline_to_pid[idx];
1588 if (pid != NO_CMDLINE_MAP)
1589 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1590
1591 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1592 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1593
1594 savedcmd->cmdline_idx = idx;
1595 }
1596
1597 set_cmdline(idx, tsk->comm);
1598
1599 arch_spin_unlock(&trace_cmdline_lock);
1600
1601 return 1;
1602}
1603
1604static void __trace_find_cmdline(int pid, char comm[])
1605{
1606 unsigned map;
1607
1608 if (!pid) {
1609 strcpy(comm, "<idle>");
1610 return;
1611 }
1612
1613 if (WARN_ON_ONCE(pid < 0)) {
1614 strcpy(comm, "<XXX>");
1615 return;
1616 }
1617
1618 if (pid > PID_MAX_DEFAULT) {
1619 strcpy(comm, "<...>");
1620 return;
1621 }
1622
1623 map = savedcmd->map_pid_to_cmdline[pid];
1624 if (map != NO_CMDLINE_MAP)
1625 strcpy(comm, get_saved_cmdlines(map));
1626 else
1627 strcpy(comm, "<...>");
1628}
1629
1630void trace_find_cmdline(int pid, char comm[])
1631{
1632 preempt_disable();
1633 arch_spin_lock(&trace_cmdline_lock);
1634
1635 __trace_find_cmdline(pid, comm);
1636
1637 arch_spin_unlock(&trace_cmdline_lock);
1638 preempt_enable();
1639}
1640
1641void tracing_record_cmdline(struct task_struct *tsk)
1642{
1643 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1644 return;
1645
1646 if (!__this_cpu_read(trace_cmdline_save))
1647 return;
1648
1649 if (trace_save_cmdline(tsk))
1650 __this_cpu_write(trace_cmdline_save, false);
1651}
1652
1653void
1654tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1655 int pc)
1656{
1657 struct task_struct *tsk = current;
1658
1659 entry->preempt_count = pc & 0xff;
1660 entry->pid = (tsk) ? tsk->pid : 0;
1661 entry->flags =
1662#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1663 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1664#else
1665 TRACE_FLAG_IRQS_NOSUPPORT |
1666#endif
1667 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
1668 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1669 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1670 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1671 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1672}
1673EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1674
1675struct ring_buffer_event *
1676trace_buffer_lock_reserve(struct ring_buffer *buffer,
1677 int type,
1678 unsigned long len,
1679 unsigned long flags, int pc)
1680{
1681 struct ring_buffer_event *event;
1682
1683 event = ring_buffer_lock_reserve(buffer, len);
1684 if (event != NULL) {
1685 struct trace_entry *ent = ring_buffer_event_data(event);
1686
1687 tracing_generic_entry_update(ent, flags, pc);
1688 ent->type = type;
1689 }
1690
1691 return event;
1692}
1693
1694void
1695__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1696{
1697 __this_cpu_write(trace_cmdline_save, true);
1698 ring_buffer_unlock_commit(buffer, event);
1699}
1700
1701void trace_buffer_unlock_commit(struct trace_array *tr,
1702 struct ring_buffer *buffer,
1703 struct ring_buffer_event *event,
1704 unsigned long flags, int pc)
1705{
1706 __buffer_unlock_commit(buffer, event);
1707
1708 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1709 ftrace_trace_userstack(buffer, flags, pc);
1710}
1711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1712
1713static struct ring_buffer *temp_buffer;
1714
1715struct ring_buffer_event *
1716trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1717 struct trace_event_file *trace_file,
1718 int type, unsigned long len,
1719 unsigned long flags, int pc)
1720{
1721 struct ring_buffer_event *entry;
1722
1723 *current_rb = trace_file->tr->trace_buffer.buffer;
1724 entry = trace_buffer_lock_reserve(*current_rb,
1725 type, len, flags, pc);
1726 /*
1727 * If tracing is off, but we have triggers enabled
1728 * we still need to look at the event data. Use the temp_buffer
1729 * to store the trace event for the tigger to use. It's recusive
1730 * safe and will not be recorded anywhere.
1731 */
1732 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1733 *current_rb = temp_buffer;
1734 entry = trace_buffer_lock_reserve(*current_rb,
1735 type, len, flags, pc);
1736 }
1737 return entry;
1738}
1739EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1740
1741struct ring_buffer_event *
1742trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1743 int type, unsigned long len,
1744 unsigned long flags, int pc)
1745{
1746 *current_rb = global_trace.trace_buffer.buffer;
1747 return trace_buffer_lock_reserve(*current_rb,
1748 type, len, flags, pc);
1749}
1750EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1751
1752void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1753 struct ring_buffer *buffer,
1754 struct ring_buffer_event *event,
1755 unsigned long flags, int pc,
1756 struct pt_regs *regs)
1757{
1758 __buffer_unlock_commit(buffer, event);
1759
1760 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1761 ftrace_trace_userstack(buffer, flags, pc);
1762}
1763EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1764
1765void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1766 struct ring_buffer_event *event)
1767{
1768 ring_buffer_discard_commit(buffer, event);
1769}
1770EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1771
1772void
1773trace_function(struct trace_array *tr,
1774 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1775 int pc)
1776{
1777 struct trace_event_call *call = &event_function;
1778 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1779 struct ring_buffer_event *event;
1780 struct ftrace_entry *entry;
1781
1782 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1783 flags, pc);
1784 if (!event)
1785 return;
1786 entry = ring_buffer_event_data(event);
1787 entry->ip = ip;
1788 entry->parent_ip = parent_ip;
1789
1790 if (!call_filter_check_discard(call, entry, buffer, event))
1791 __buffer_unlock_commit(buffer, event);
1792}
1793
1794#ifdef CONFIG_STACKTRACE
1795
1796#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797struct ftrace_stack {
1798 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1799};
1800
1801static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1802static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1803
1804static void __ftrace_trace_stack(struct ring_buffer *buffer,
1805 unsigned long flags,
1806 int skip, int pc, struct pt_regs *regs)
1807{
1808 struct trace_event_call *call = &event_kernel_stack;
1809 struct ring_buffer_event *event;
1810 struct stack_entry *entry;
1811 struct stack_trace trace;
1812 int use_stack;
1813 int size = FTRACE_STACK_ENTRIES;
1814
1815 trace.nr_entries = 0;
1816 trace.skip = skip;
1817
1818 /*
1819 * Since events can happen in NMIs there's no safe way to
1820 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1821 * or NMI comes in, it will just have to use the default
1822 * FTRACE_STACK_SIZE.
1823 */
1824 preempt_disable_notrace();
1825
1826 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1827 /*
1828 * We don't need any atomic variables, just a barrier.
1829 * If an interrupt comes in, we don't care, because it would
1830 * have exited and put the counter back to what we want.
1831 * We just need a barrier to keep gcc from moving things
1832 * around.
1833 */
1834 barrier();
1835 if (use_stack == 1) {
1836 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1837 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1838
1839 if (regs)
1840 save_stack_trace_regs(regs, &trace);
1841 else
1842 save_stack_trace(&trace);
1843
1844 if (trace.nr_entries > size)
1845 size = trace.nr_entries;
1846 } else
1847 /* From now on, use_stack is a boolean */
1848 use_stack = 0;
1849
1850 size *= sizeof(unsigned long);
1851
1852 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1853 sizeof(*entry) + size, flags, pc);
1854 if (!event)
1855 goto out;
1856 entry = ring_buffer_event_data(event);
1857
1858 memset(&entry->caller, 0, size);
1859
1860 if (use_stack)
1861 memcpy(&entry->caller, trace.entries,
1862 trace.nr_entries * sizeof(unsigned long));
1863 else {
1864 trace.max_entries = FTRACE_STACK_ENTRIES;
1865 trace.entries = entry->caller;
1866 if (regs)
1867 save_stack_trace_regs(regs, &trace);
1868 else
1869 save_stack_trace(&trace);
1870 }
1871
1872 entry->size = trace.nr_entries;
1873
1874 if (!call_filter_check_discard(call, entry, buffer, event))
1875 __buffer_unlock_commit(buffer, event);
1876
1877 out:
1878 /* Again, don't let gcc optimize things here */
1879 barrier();
1880 __this_cpu_dec(ftrace_stack_reserve);
1881 preempt_enable_notrace();
1882
1883}
1884
1885static inline void ftrace_trace_stack(struct trace_array *tr,
1886 struct ring_buffer *buffer,
1887 unsigned long flags,
1888 int skip, int pc, struct pt_regs *regs)
1889{
1890 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1891 return;
1892
1893 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1894}
1895
1896void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1897 int pc)
1898{
1899 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1900}
1901
1902/**
1903 * trace_dump_stack - record a stack back trace in the trace buffer
1904 * @skip: Number of functions to skip (helper handlers)
1905 */
1906void trace_dump_stack(int skip)
1907{
1908 unsigned long flags;
1909
1910 if (tracing_disabled || tracing_selftest_running)
1911 return;
1912
1913 local_save_flags(flags);
1914
1915 /*
1916 * Skip 3 more, seems to get us at the caller of
1917 * this function.
1918 */
1919 skip += 3;
1920 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1921 flags, skip, preempt_count(), NULL);
1922}
1923
1924static DEFINE_PER_CPU(int, user_stack_count);
1925
1926void
1927ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1928{
1929 struct trace_event_call *call = &event_user_stack;
1930 struct ring_buffer_event *event;
1931 struct userstack_entry *entry;
1932 struct stack_trace trace;
1933
1934 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1935 return;
1936
1937 /*
1938 * NMIs can not handle page faults, even with fix ups.
1939 * The save user stack can (and often does) fault.
1940 */
1941 if (unlikely(in_nmi()))
1942 return;
1943
1944 /*
1945 * prevent recursion, since the user stack tracing may
1946 * trigger other kernel events.
1947 */
1948 preempt_disable();
1949 if (__this_cpu_read(user_stack_count))
1950 goto out;
1951
1952 __this_cpu_inc(user_stack_count);
1953
1954 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1955 sizeof(*entry), flags, pc);
1956 if (!event)
1957 goto out_drop_count;
1958 entry = ring_buffer_event_data(event);
1959
1960 entry->tgid = current->tgid;
1961 memset(&entry->caller, 0, sizeof(entry->caller));
1962
1963 trace.nr_entries = 0;
1964 trace.max_entries = FTRACE_STACK_ENTRIES;
1965 trace.skip = 0;
1966 trace.entries = entry->caller;
1967
1968 save_stack_trace_user(&trace);
1969 if (!call_filter_check_discard(call, entry, buffer, event))
1970 __buffer_unlock_commit(buffer, event);
1971
1972 out_drop_count:
1973 __this_cpu_dec(user_stack_count);
1974 out:
1975 preempt_enable();
1976}
1977
1978#ifdef UNUSED
1979static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1980{
1981 ftrace_trace_userstack(tr, flags, preempt_count());
1982}
1983#endif /* UNUSED */
1984
1985#endif /* CONFIG_STACKTRACE */
1986
1987/* created for use with alloc_percpu */
1988struct trace_buffer_struct {
1989 char buffer[TRACE_BUF_SIZE];
1990};
1991
1992static struct trace_buffer_struct *trace_percpu_buffer;
1993static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1994static struct trace_buffer_struct *trace_percpu_irq_buffer;
1995static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1996
1997/*
1998 * The buffer used is dependent on the context. There is a per cpu
1999 * buffer for normal context, softirq contex, hard irq context and
2000 * for NMI context. Thise allows for lockless recording.
2001 *
2002 * Note, if the buffers failed to be allocated, then this returns NULL
2003 */
2004static char *get_trace_buf(void)
2005{
2006 struct trace_buffer_struct *percpu_buffer;
2007
2008 /*
2009 * If we have allocated per cpu buffers, then we do not
2010 * need to do any locking.
2011 */
2012 if (in_nmi())
2013 percpu_buffer = trace_percpu_nmi_buffer;
2014 else if (in_irq())
2015 percpu_buffer = trace_percpu_irq_buffer;
2016 else if (in_softirq())
2017 percpu_buffer = trace_percpu_sirq_buffer;
2018 else
2019 percpu_buffer = trace_percpu_buffer;
2020
2021 if (!percpu_buffer)
2022 return NULL;
2023
2024 return this_cpu_ptr(&percpu_buffer->buffer[0]);
2025}
2026
2027static int alloc_percpu_trace_buffer(void)
2028{
2029 struct trace_buffer_struct *buffers;
2030 struct trace_buffer_struct *sirq_buffers;
2031 struct trace_buffer_struct *irq_buffers;
2032 struct trace_buffer_struct *nmi_buffers;
2033
2034 buffers = alloc_percpu(struct trace_buffer_struct);
2035 if (!buffers)
2036 goto err_warn;
2037
2038 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2039 if (!sirq_buffers)
2040 goto err_sirq;
2041
2042 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2043 if (!irq_buffers)
2044 goto err_irq;
2045
2046 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2047 if (!nmi_buffers)
2048 goto err_nmi;
2049
2050 trace_percpu_buffer = buffers;
2051 trace_percpu_sirq_buffer = sirq_buffers;
2052 trace_percpu_irq_buffer = irq_buffers;
2053 trace_percpu_nmi_buffer = nmi_buffers;
2054
2055 return 0;
2056
2057 err_nmi:
2058 free_percpu(irq_buffers);
2059 err_irq:
2060 free_percpu(sirq_buffers);
2061 err_sirq:
2062 free_percpu(buffers);
2063 err_warn:
2064 WARN(1, "Could not allocate percpu trace_printk buffer");
2065 return -ENOMEM;
2066}
2067
2068static int buffers_allocated;
2069
2070void trace_printk_init_buffers(void)
2071{
2072 if (buffers_allocated)
2073 return;
2074
2075 if (alloc_percpu_trace_buffer())
2076 return;
2077
2078 /* trace_printk() is for debug use only. Don't use it in production. */
2079
2080 pr_warn("\n");
2081 pr_warn("**********************************************************\n");
2082 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2083 pr_warn("** **\n");
2084 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2085 pr_warn("** **\n");
2086 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2087 pr_warn("** unsafe for production use. **\n");
2088 pr_warn("** **\n");
2089 pr_warn("** If you see this message and you are not debugging **\n");
2090 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2091 pr_warn("** **\n");
2092 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2093 pr_warn("**********************************************************\n");
2094
2095 /* Expand the buffers to set size */
2096 tracing_update_buffers();
2097
2098 buffers_allocated = 1;
2099
2100 /*
2101 * trace_printk_init_buffers() can be called by modules.
2102 * If that happens, then we need to start cmdline recording
2103 * directly here. If the global_trace.buffer is already
2104 * allocated here, then this was called by module code.
2105 */
2106 if (global_trace.trace_buffer.buffer)
2107 tracing_start_cmdline_record();
2108}
2109
2110void trace_printk_start_comm(void)
2111{
2112 /* Start tracing comms if trace printk is set */
2113 if (!buffers_allocated)
2114 return;
2115 tracing_start_cmdline_record();
2116}
2117
2118static void trace_printk_start_stop_comm(int enabled)
2119{
2120 if (!buffers_allocated)
2121 return;
2122
2123 if (enabled)
2124 tracing_start_cmdline_record();
2125 else
2126 tracing_stop_cmdline_record();
2127}
2128
2129/**
2130 * trace_vbprintk - write binary msg to tracing buffer
2131 *
2132 */
2133int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2134{
2135 struct trace_event_call *call = &event_bprint;
2136 struct ring_buffer_event *event;
2137 struct ring_buffer *buffer;
2138 struct trace_array *tr = &global_trace;
2139 struct bprint_entry *entry;
2140 unsigned long flags;
2141 char *tbuffer;
2142 int len = 0, size, pc;
2143
2144 if (unlikely(tracing_selftest_running || tracing_disabled))
2145 return 0;
2146
2147 /* Don't pollute graph traces with trace_vprintk internals */
2148 pause_graph_tracing();
2149
2150 pc = preempt_count();
2151 preempt_disable_notrace();
2152
2153 tbuffer = get_trace_buf();
2154 if (!tbuffer) {
2155 len = 0;
2156 goto out;
2157 }
2158
2159 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2160
2161 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2162 goto out;
2163
2164 local_save_flags(flags);
2165 size = sizeof(*entry) + sizeof(u32) * len;
2166 buffer = tr->trace_buffer.buffer;
2167 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2168 flags, pc);
2169 if (!event)
2170 goto out;
2171 entry = ring_buffer_event_data(event);
2172 entry->ip = ip;
2173 entry->fmt = fmt;
2174
2175 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2176 if (!call_filter_check_discard(call, entry, buffer, event)) {
2177 __buffer_unlock_commit(buffer, event);
2178 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2179 }
2180
2181out:
2182 preempt_enable_notrace();
2183 unpause_graph_tracing();
2184
2185 return len;
2186}
2187EXPORT_SYMBOL_GPL(trace_vbprintk);
2188
2189static int
2190__trace_array_vprintk(struct ring_buffer *buffer,
2191 unsigned long ip, const char *fmt, va_list args)
2192{
2193 struct trace_event_call *call = &event_print;
2194 struct ring_buffer_event *event;
2195 int len = 0, size, pc;
2196 struct print_entry *entry;
2197 unsigned long flags;
2198 char *tbuffer;
2199
2200 if (tracing_disabled || tracing_selftest_running)
2201 return 0;
2202
2203 /* Don't pollute graph traces with trace_vprintk internals */
2204 pause_graph_tracing();
2205
2206 pc = preempt_count();
2207 preempt_disable_notrace();
2208
2209
2210 tbuffer = get_trace_buf();
2211 if (!tbuffer) {
2212 len = 0;
2213 goto out;
2214 }
2215
2216 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2217
2218 local_save_flags(flags);
2219 size = sizeof(*entry) + len + 1;
2220 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2221 flags, pc);
2222 if (!event)
2223 goto out;
2224 entry = ring_buffer_event_data(event);
2225 entry->ip = ip;
2226
2227 memcpy(&entry->buf, tbuffer, len + 1);
2228 if (!call_filter_check_discard(call, entry, buffer, event)) {
2229 __buffer_unlock_commit(buffer, event);
2230 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2231 }
2232 out:
2233 preempt_enable_notrace();
2234 unpause_graph_tracing();
2235
2236 return len;
2237}
2238
2239int trace_array_vprintk(struct trace_array *tr,
2240 unsigned long ip, const char *fmt, va_list args)
2241{
2242 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2243}
2244
2245int trace_array_printk(struct trace_array *tr,
2246 unsigned long ip, const char *fmt, ...)
2247{
2248 int ret;
2249 va_list ap;
2250
2251 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2252 return 0;
2253
2254 va_start(ap, fmt);
2255 ret = trace_array_vprintk(tr, ip, fmt, ap);
2256 va_end(ap);
2257 return ret;
2258}
2259
2260int trace_array_printk_buf(struct ring_buffer *buffer,
2261 unsigned long ip, const char *fmt, ...)
2262{
2263 int ret;
2264 va_list ap;
2265
2266 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2267 return 0;
2268
2269 va_start(ap, fmt);
2270 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2271 va_end(ap);
2272 return ret;
2273}
2274
2275int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2276{
2277 return trace_array_vprintk(&global_trace, ip, fmt, args);
2278}
2279EXPORT_SYMBOL_GPL(trace_vprintk);
2280
2281static void trace_iterator_increment(struct trace_iterator *iter)
2282{
2283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2284
2285 iter->idx++;
2286 if (buf_iter)
2287 ring_buffer_read(buf_iter, NULL);
2288}
2289
2290static struct trace_entry *
2291peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2292 unsigned long *lost_events)
2293{
2294 struct ring_buffer_event *event;
2295 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2296
2297 if (buf_iter)
2298 event = ring_buffer_iter_peek(buf_iter, ts);
2299 else
2300 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2301 lost_events);
2302
2303 if (event) {
2304 iter->ent_size = ring_buffer_event_length(event);
2305 return ring_buffer_event_data(event);
2306 }
2307 iter->ent_size = 0;
2308 return NULL;
2309}
2310
2311static struct trace_entry *
2312__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2313 unsigned long *missing_events, u64 *ent_ts)
2314{
2315 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2316 struct trace_entry *ent, *next = NULL;
2317 unsigned long lost_events = 0, next_lost = 0;
2318 int cpu_file = iter->cpu_file;
2319 u64 next_ts = 0, ts;
2320 int next_cpu = -1;
2321 int next_size = 0;
2322 int cpu;
2323
2324 /*
2325 * If we are in a per_cpu trace file, don't bother by iterating over
2326 * all cpu and peek directly.
2327 */
2328 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2329 if (ring_buffer_empty_cpu(buffer, cpu_file))
2330 return NULL;
2331 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2332 if (ent_cpu)
2333 *ent_cpu = cpu_file;
2334
2335 return ent;
2336 }
2337
2338 for_each_tracing_cpu(cpu) {
2339
2340 if (ring_buffer_empty_cpu(buffer, cpu))
2341 continue;
2342
2343 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2344
2345 /*
2346 * Pick the entry with the smallest timestamp:
2347 */
2348 if (ent && (!next || ts < next_ts)) {
2349 next = ent;
2350 next_cpu = cpu;
2351 next_ts = ts;
2352 next_lost = lost_events;
2353 next_size = iter->ent_size;
2354 }
2355 }
2356
2357 iter->ent_size = next_size;
2358
2359 if (ent_cpu)
2360 *ent_cpu = next_cpu;
2361
2362 if (ent_ts)
2363 *ent_ts = next_ts;
2364
2365 if (missing_events)
2366 *missing_events = next_lost;
2367
2368 return next;
2369}
2370
2371/* Find the next real entry, without updating the iterator itself */
2372struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2373 int *ent_cpu, u64 *ent_ts)
2374{
2375 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2376}
2377
2378/* Find the next real entry, and increment the iterator to the next entry */
2379void *trace_find_next_entry_inc(struct trace_iterator *iter)
2380{
2381 iter->ent = __find_next_entry(iter, &iter->cpu,
2382 &iter->lost_events, &iter->ts);
2383
2384 if (iter->ent)
2385 trace_iterator_increment(iter);
2386
2387 return iter->ent ? iter : NULL;
2388}
2389
2390static void trace_consume(struct trace_iterator *iter)
2391{
2392 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2393 &iter->lost_events);
2394}
2395
2396static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2397{
2398 struct trace_iterator *iter = m->private;
2399 int i = (int)*pos;
2400 void *ent;
2401
2402 WARN_ON_ONCE(iter->leftover);
2403
2404 (*pos)++;
2405
2406 /* can't go backwards */
2407 if (iter->idx > i)
2408 return NULL;
2409
2410 if (iter->idx < 0)
2411 ent = trace_find_next_entry_inc(iter);
2412 else
2413 ent = iter;
2414
2415 while (ent && iter->idx < i)
2416 ent = trace_find_next_entry_inc(iter);
2417
2418 iter->pos = *pos;
2419
2420 return ent;
2421}
2422
2423void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2424{
2425 struct ring_buffer_event *event;
2426 struct ring_buffer_iter *buf_iter;
2427 unsigned long entries = 0;
2428 u64 ts;
2429
2430 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2431
2432 buf_iter = trace_buffer_iter(iter, cpu);
2433 if (!buf_iter)
2434 return;
2435
2436 ring_buffer_iter_reset(buf_iter);
2437
2438 /*
2439 * We could have the case with the max latency tracers
2440 * that a reset never took place on a cpu. This is evident
2441 * by the timestamp being before the start of the buffer.
2442 */
2443 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2444 if (ts >= iter->trace_buffer->time_start)
2445 break;
2446 entries++;
2447 ring_buffer_read(buf_iter, NULL);
2448 }
2449
2450 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2451}
2452
2453/*
2454 * The current tracer is copied to avoid a global locking
2455 * all around.
2456 */
2457static void *s_start(struct seq_file *m, loff_t *pos)
2458{
2459 struct trace_iterator *iter = m->private;
2460 struct trace_array *tr = iter->tr;
2461 int cpu_file = iter->cpu_file;
2462 void *p = NULL;
2463 loff_t l = 0;
2464 int cpu;
2465
2466 /*
2467 * copy the tracer to avoid using a global lock all around.
2468 * iter->trace is a copy of current_trace, the pointer to the
2469 * name may be used instead of a strcmp(), as iter->trace->name
2470 * will point to the same string as current_trace->name.
2471 */
2472 mutex_lock(&trace_types_lock);
2473 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2474 *iter->trace = *tr->current_trace;
2475 mutex_unlock(&trace_types_lock);
2476
2477#ifdef CONFIG_TRACER_MAX_TRACE
2478 if (iter->snapshot && iter->trace->use_max_tr)
2479 return ERR_PTR(-EBUSY);
2480#endif
2481
2482 if (!iter->snapshot)
2483 atomic_inc(&trace_record_cmdline_disabled);
2484
2485 if (*pos != iter->pos) {
2486 iter->ent = NULL;
2487 iter->cpu = 0;
2488 iter->idx = -1;
2489
2490 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2491 for_each_tracing_cpu(cpu)
2492 tracing_iter_reset(iter, cpu);
2493 } else
2494 tracing_iter_reset(iter, cpu_file);
2495
2496 iter->leftover = 0;
2497 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2498 ;
2499
2500 } else {
2501 /*
2502 * If we overflowed the seq_file before, then we want
2503 * to just reuse the trace_seq buffer again.
2504 */
2505 if (iter->leftover)
2506 p = iter;
2507 else {
2508 l = *pos - 1;
2509 p = s_next(m, p, &l);
2510 }
2511 }
2512
2513 trace_event_read_lock();
2514 trace_access_lock(cpu_file);
2515 return p;
2516}
2517
2518static void s_stop(struct seq_file *m, void *p)
2519{
2520 struct trace_iterator *iter = m->private;
2521
2522#ifdef CONFIG_TRACER_MAX_TRACE
2523 if (iter->snapshot && iter->trace->use_max_tr)
2524 return;
2525#endif
2526
2527 if (!iter->snapshot)
2528 atomic_dec(&trace_record_cmdline_disabled);
2529
2530 trace_access_unlock(iter->cpu_file);
2531 trace_event_read_unlock();
2532}
2533
2534static void
2535get_total_entries(struct trace_buffer *buf,
2536 unsigned long *total, unsigned long *entries)
2537{
2538 unsigned long count;
2539 int cpu;
2540
2541 *total = 0;
2542 *entries = 0;
2543
2544 for_each_tracing_cpu(cpu) {
2545 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2546 /*
2547 * If this buffer has skipped entries, then we hold all
2548 * entries for the trace and we need to ignore the
2549 * ones before the time stamp.
2550 */
2551 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2552 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2553 /* total is the same as the entries */
2554 *total += count;
2555 } else
2556 *total += count +
2557 ring_buffer_overrun_cpu(buf->buffer, cpu);
2558 *entries += count;
2559 }
2560}
2561
2562static void print_lat_help_header(struct seq_file *m)
2563{
2564 seq_puts(m, "# _------=> CPU# \n"
2565 "# / _-----=> irqs-off \n"
2566 "# | / _----=> need-resched \n"
2567 "# || / _---=> hardirq/softirq \n"
2568 "# ||| / _--=> preempt-depth \n"
2569 "# |||| / delay \n"
2570 "# cmd pid ||||| time | caller \n"
2571 "# \\ / ||||| \\ | / \n");
2572}
2573
2574static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2575{
2576 unsigned long total;
2577 unsigned long entries;
2578
2579 get_total_entries(buf, &total, &entries);
2580 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2581 entries, total, num_online_cpus());
2582 seq_puts(m, "#\n");
2583}
2584
2585static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2586{
2587 print_event_info(buf, m);
2588 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2589 "# | | | | |\n");
2590}
2591
2592static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2593{
2594 print_event_info(buf, m);
2595 seq_puts(m, "# _-----=> irqs-off\n"
2596 "# / _----=> need-resched\n"
2597 "# | / _---=> hardirq/softirq\n"
2598 "# || / _--=> preempt-depth\n"
2599 "# ||| / delay\n"
2600 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2601 "# | | | |||| | |\n");
2602}
2603
2604void
2605print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2606{
2607 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2608 struct trace_buffer *buf = iter->trace_buffer;
2609 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2610 struct tracer *type = iter->trace;
2611 unsigned long entries;
2612 unsigned long total;
2613 const char *name = "preemption";
2614
2615 name = type->name;
2616
2617 get_total_entries(buf, &total, &entries);
2618
2619 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2620 name, UTS_RELEASE);
2621 seq_puts(m, "# -----------------------------------"
2622 "---------------------------------\n");
2623 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2624 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2625 nsecs_to_usecs(data->saved_latency),
2626 entries,
2627 total,
2628 buf->cpu,
2629#if defined(CONFIG_PREEMPT_NONE)
2630 "server",
2631#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2632 "desktop",
2633#elif defined(CONFIG_PREEMPT)
2634 "preempt",
2635#else
2636 "unknown",
2637#endif
2638 /* These are reserved for later use */
2639 0, 0, 0, 0);
2640#ifdef CONFIG_SMP
2641 seq_printf(m, " #P:%d)\n", num_online_cpus());
2642#else
2643 seq_puts(m, ")\n");
2644#endif
2645 seq_puts(m, "# -----------------\n");
2646 seq_printf(m, "# | task: %.16s-%d "
2647 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2648 data->comm, data->pid,
2649 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2650 data->policy, data->rt_priority);
2651 seq_puts(m, "# -----------------\n");
2652
2653 if (data->critical_start) {
2654 seq_puts(m, "# => started at: ");
2655 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2656 trace_print_seq(m, &iter->seq);
2657 seq_puts(m, "\n# => ended at: ");
2658 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2659 trace_print_seq(m, &iter->seq);
2660 seq_puts(m, "\n#\n");
2661 }
2662
2663 seq_puts(m, "#\n");
2664}
2665
2666static void test_cpu_buff_start(struct trace_iterator *iter)
2667{
2668 struct trace_seq *s = &iter->seq;
2669 struct trace_array *tr = iter->tr;
2670
2671 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2672 return;
2673
2674 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2675 return;
2676
2677 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2678 return;
2679
2680 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2681 return;
2682
2683 if (iter->started)
2684 cpumask_set_cpu(iter->cpu, iter->started);
2685
2686 /* Don't print started cpu buffer for the first entry of the trace */
2687 if (iter->idx > 1)
2688 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2689 iter->cpu);
2690}
2691
2692static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2693{
2694 struct trace_array *tr = iter->tr;
2695 struct trace_seq *s = &iter->seq;
2696 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2697 struct trace_entry *entry;
2698 struct trace_event *event;
2699
2700 entry = iter->ent;
2701
2702 test_cpu_buff_start(iter);
2703
2704 event = ftrace_find_event(entry->type);
2705
2706 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2707 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2708 trace_print_lat_context(iter);
2709 else
2710 trace_print_context(iter);
2711 }
2712
2713 if (trace_seq_has_overflowed(s))
2714 return TRACE_TYPE_PARTIAL_LINE;
2715
2716 if (event)
2717 return event->funcs->trace(iter, sym_flags, event);
2718
2719 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2720
2721 return trace_handle_return(s);
2722}
2723
2724static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2725{
2726 struct trace_array *tr = iter->tr;
2727 struct trace_seq *s = &iter->seq;
2728 struct trace_entry *entry;
2729 struct trace_event *event;
2730
2731 entry = iter->ent;
2732
2733 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2734 trace_seq_printf(s, "%d %d %llu ",
2735 entry->pid, iter->cpu, iter->ts);
2736
2737 if (trace_seq_has_overflowed(s))
2738 return TRACE_TYPE_PARTIAL_LINE;
2739
2740 event = ftrace_find_event(entry->type);
2741 if (event)
2742 return event->funcs->raw(iter, 0, event);
2743
2744 trace_seq_printf(s, "%d ?\n", entry->type);
2745
2746 return trace_handle_return(s);
2747}
2748
2749static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2750{
2751 struct trace_array *tr = iter->tr;
2752 struct trace_seq *s = &iter->seq;
2753 unsigned char newline = '\n';
2754 struct trace_entry *entry;
2755 struct trace_event *event;
2756
2757 entry = iter->ent;
2758
2759 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2760 SEQ_PUT_HEX_FIELD(s, entry->pid);
2761 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2762 SEQ_PUT_HEX_FIELD(s, iter->ts);
2763 if (trace_seq_has_overflowed(s))
2764 return TRACE_TYPE_PARTIAL_LINE;
2765 }
2766
2767 event = ftrace_find_event(entry->type);
2768 if (event) {
2769 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2770 if (ret != TRACE_TYPE_HANDLED)
2771 return ret;
2772 }
2773
2774 SEQ_PUT_FIELD(s, newline);
2775
2776 return trace_handle_return(s);
2777}
2778
2779static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2780{
2781 struct trace_array *tr = iter->tr;
2782 struct trace_seq *s = &iter->seq;
2783 struct trace_entry *entry;
2784 struct trace_event *event;
2785
2786 entry = iter->ent;
2787
2788 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2789 SEQ_PUT_FIELD(s, entry->pid);
2790 SEQ_PUT_FIELD(s, iter->cpu);
2791 SEQ_PUT_FIELD(s, iter->ts);
2792 if (trace_seq_has_overflowed(s))
2793 return TRACE_TYPE_PARTIAL_LINE;
2794 }
2795
2796 event = ftrace_find_event(entry->type);
2797 return event ? event->funcs->binary(iter, 0, event) :
2798 TRACE_TYPE_HANDLED;
2799}
2800
2801int trace_empty(struct trace_iterator *iter)
2802{
2803 struct ring_buffer_iter *buf_iter;
2804 int cpu;
2805
2806 /* If we are looking at one CPU buffer, only check that one */
2807 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2808 cpu = iter->cpu_file;
2809 buf_iter = trace_buffer_iter(iter, cpu);
2810 if (buf_iter) {
2811 if (!ring_buffer_iter_empty(buf_iter))
2812 return 0;
2813 } else {
2814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2815 return 0;
2816 }
2817 return 1;
2818 }
2819
2820 for_each_tracing_cpu(cpu) {
2821 buf_iter = trace_buffer_iter(iter, cpu);
2822 if (buf_iter) {
2823 if (!ring_buffer_iter_empty(buf_iter))
2824 return 0;
2825 } else {
2826 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2827 return 0;
2828 }
2829 }
2830
2831 return 1;
2832}
2833
2834/* Called with trace_event_read_lock() held. */
2835enum print_line_t print_trace_line(struct trace_iterator *iter)
2836{
2837 struct trace_array *tr = iter->tr;
2838 unsigned long trace_flags = tr->trace_flags;
2839 enum print_line_t ret;
2840
2841 if (iter->lost_events) {
2842 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2843 iter->cpu, iter->lost_events);
2844 if (trace_seq_has_overflowed(&iter->seq))
2845 return TRACE_TYPE_PARTIAL_LINE;
2846 }
2847
2848 if (iter->trace && iter->trace->print_line) {
2849 ret = iter->trace->print_line(iter);
2850 if (ret != TRACE_TYPE_UNHANDLED)
2851 return ret;
2852 }
2853
2854 if (iter->ent->type == TRACE_BPUTS &&
2855 trace_flags & TRACE_ITER_PRINTK &&
2856 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857 return trace_print_bputs_msg_only(iter);
2858
2859 if (iter->ent->type == TRACE_BPRINT &&
2860 trace_flags & TRACE_ITER_PRINTK &&
2861 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2862 return trace_print_bprintk_msg_only(iter);
2863
2864 if (iter->ent->type == TRACE_PRINT &&
2865 trace_flags & TRACE_ITER_PRINTK &&
2866 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2867 return trace_print_printk_msg_only(iter);
2868
2869 if (trace_flags & TRACE_ITER_BIN)
2870 return print_bin_fmt(iter);
2871
2872 if (trace_flags & TRACE_ITER_HEX)
2873 return print_hex_fmt(iter);
2874
2875 if (trace_flags & TRACE_ITER_RAW)
2876 return print_raw_fmt(iter);
2877
2878 return print_trace_fmt(iter);
2879}
2880
2881void trace_latency_header(struct seq_file *m)
2882{
2883 struct trace_iterator *iter = m->private;
2884 struct trace_array *tr = iter->tr;
2885
2886 /* print nothing if the buffers are empty */
2887 if (trace_empty(iter))
2888 return;
2889
2890 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2891 print_trace_header(m, iter);
2892
2893 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2894 print_lat_help_header(m);
2895}
2896
2897void trace_default_header(struct seq_file *m)
2898{
2899 struct trace_iterator *iter = m->private;
2900 struct trace_array *tr = iter->tr;
2901 unsigned long trace_flags = tr->trace_flags;
2902
2903 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2904 return;
2905
2906 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2907 /* print nothing if the buffers are empty */
2908 if (trace_empty(iter))
2909 return;
2910 print_trace_header(m, iter);
2911 if (!(trace_flags & TRACE_ITER_VERBOSE))
2912 print_lat_help_header(m);
2913 } else {
2914 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2915 if (trace_flags & TRACE_ITER_IRQ_INFO)
2916 print_func_help_header_irq(iter->trace_buffer, m);
2917 else
2918 print_func_help_header(iter->trace_buffer, m);
2919 }
2920 }
2921}
2922
2923static void test_ftrace_alive(struct seq_file *m)
2924{
2925 if (!ftrace_is_dead())
2926 return;
2927 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928 "# MAY BE MISSING FUNCTION EVENTS\n");
2929}
2930
2931#ifdef CONFIG_TRACER_MAX_TRACE
2932static void show_snapshot_main_help(struct seq_file *m)
2933{
2934 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936 "# Takes a snapshot of the main buffer.\n"
2937 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938 "# (Doesn't have to be '2' works with any number that\n"
2939 "# is not a '0' or '1')\n");
2940}
2941
2942static void show_snapshot_percpu_help(struct seq_file *m)
2943{
2944 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2945#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2946 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947 "# Takes a snapshot of the main buffer for this cpu.\n");
2948#else
2949 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950 "# Must use main snapshot file to allocate.\n");
2951#endif
2952 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953 "# (Doesn't have to be '2' works with any number that\n"
2954 "# is not a '0' or '1')\n");
2955}
2956
2957static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2958{
2959 if (iter->tr->allocated_snapshot)
2960 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2961 else
2962 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2963
2964 seq_puts(m, "# Snapshot commands:\n");
2965 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2966 show_snapshot_main_help(m);
2967 else
2968 show_snapshot_percpu_help(m);
2969}
2970#else
2971/* Should never be called */
2972static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2973#endif
2974
2975static int s_show(struct seq_file *m, void *v)
2976{
2977 struct trace_iterator *iter = v;
2978 int ret;
2979
2980 if (iter->ent == NULL) {
2981 if (iter->tr) {
2982 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2983 seq_puts(m, "#\n");
2984 test_ftrace_alive(m);
2985 }
2986 if (iter->snapshot && trace_empty(iter))
2987 print_snapshot_help(m, iter);
2988 else if (iter->trace && iter->trace->print_header)
2989 iter->trace->print_header(m);
2990 else
2991 trace_default_header(m);
2992
2993 } else if (iter->leftover) {
2994 /*
2995 * If we filled the seq_file buffer earlier, we
2996 * want to just show it now.
2997 */
2998 ret = trace_print_seq(m, &iter->seq);
2999
3000 /* ret should this time be zero, but you never know */
3001 iter->leftover = ret;
3002
3003 } else {
3004 print_trace_line(iter);
3005 ret = trace_print_seq(m, &iter->seq);
3006 /*
3007 * If we overflow the seq_file buffer, then it will
3008 * ask us for this data again at start up.
3009 * Use that instead.
3010 * ret is 0 if seq_file write succeeded.
3011 * -1 otherwise.
3012 */
3013 iter->leftover = ret;
3014 }
3015
3016 return 0;
3017}
3018
3019/*
3020 * Should be used after trace_array_get(), trace_types_lock
3021 * ensures that i_cdev was already initialized.
3022 */
3023static inline int tracing_get_cpu(struct inode *inode)
3024{
3025 if (inode->i_cdev) /* See trace_create_cpu_file() */
3026 return (long)inode->i_cdev - 1;
3027 return RING_BUFFER_ALL_CPUS;
3028}
3029
3030static const struct seq_operations tracer_seq_ops = {
3031 .start = s_start,
3032 .next = s_next,
3033 .stop = s_stop,
3034 .show = s_show,
3035};
3036
3037static struct trace_iterator *
3038__tracing_open(struct inode *inode, struct file *file, bool snapshot)
3039{
3040 struct trace_array *tr = inode->i_private;
3041 struct trace_iterator *iter;
3042 int cpu;
3043
3044 if (tracing_disabled)
3045 return ERR_PTR(-ENODEV);
3046
3047 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3048 if (!iter)
3049 return ERR_PTR(-ENOMEM);
3050
3051 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3052 GFP_KERNEL);
3053 if (!iter->buffer_iter)
3054 goto release;
3055
3056 /*
3057 * We make a copy of the current tracer to avoid concurrent
3058 * changes on it while we are reading.
3059 */
3060 mutex_lock(&trace_types_lock);
3061 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3062 if (!iter->trace)
3063 goto fail;
3064
3065 *iter->trace = *tr->current_trace;
3066
3067 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3068 goto fail;
3069
3070 iter->tr = tr;
3071
3072#ifdef CONFIG_TRACER_MAX_TRACE
3073 /* Currently only the top directory has a snapshot */
3074 if (tr->current_trace->print_max || snapshot)
3075 iter->trace_buffer = &tr->max_buffer;
3076 else
3077#endif
3078 iter->trace_buffer = &tr->trace_buffer;
3079 iter->snapshot = snapshot;
3080 iter->pos = -1;
3081 iter->cpu_file = tracing_get_cpu(inode);
3082 mutex_init(&iter->mutex);
3083
3084 /* Notify the tracer early; before we stop tracing. */
3085 if (iter->trace && iter->trace->open)
3086 iter->trace->open(iter);
3087
3088 /* Annotate start of buffers if we had overruns */
3089 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3090 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3091
3092 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3093 if (trace_clocks[tr->clock_id].in_ns)
3094 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3095
3096 /* stop the trace while dumping if we are not opening "snapshot" */
3097 if (!iter->snapshot)
3098 tracing_stop_tr(tr);
3099
3100 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3101 for_each_tracing_cpu(cpu) {
3102 iter->buffer_iter[cpu] =
3103 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3104 }
3105 ring_buffer_read_prepare_sync();
3106 for_each_tracing_cpu(cpu) {
3107 ring_buffer_read_start(iter->buffer_iter[cpu]);
3108 tracing_iter_reset(iter, cpu);
3109 }
3110 } else {
3111 cpu = iter->cpu_file;
3112 iter->buffer_iter[cpu] =
3113 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3114 ring_buffer_read_prepare_sync();
3115 ring_buffer_read_start(iter->buffer_iter[cpu]);
3116 tracing_iter_reset(iter, cpu);
3117 }
3118
3119 mutex_unlock(&trace_types_lock);
3120
3121 return iter;
3122
3123 fail:
3124 mutex_unlock(&trace_types_lock);
3125 kfree(iter->trace);
3126 kfree(iter->buffer_iter);
3127release:
3128 seq_release_private(inode, file);
3129 return ERR_PTR(-ENOMEM);
3130}
3131
3132int tracing_open_generic(struct inode *inode, struct file *filp)
3133{
3134 if (tracing_disabled)
3135 return -ENODEV;
3136
3137 filp->private_data = inode->i_private;
3138 return 0;
3139}
3140
3141bool tracing_is_disabled(void)
3142{
3143 return (tracing_disabled) ? true: false;
3144}
3145
3146/*
3147 * Open and update trace_array ref count.
3148 * Must have the current trace_array passed to it.
3149 */
3150static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3151{
3152 struct trace_array *tr = inode->i_private;
3153
3154 if (tracing_disabled)
3155 return -ENODEV;
3156
3157 if (trace_array_get(tr) < 0)
3158 return -ENODEV;
3159
3160 filp->private_data = inode->i_private;
3161
3162 return 0;
3163}
3164
3165static int tracing_release(struct inode *inode, struct file *file)
3166{
3167 struct trace_array *tr = inode->i_private;
3168 struct seq_file *m = file->private_data;
3169 struct trace_iterator *iter;
3170 int cpu;
3171
3172 if (!(file->f_mode & FMODE_READ)) {
3173 trace_array_put(tr);
3174 return 0;
3175 }
3176
3177 /* Writes do not use seq_file */
3178 iter = m->private;
3179 mutex_lock(&trace_types_lock);
3180
3181 for_each_tracing_cpu(cpu) {
3182 if (iter->buffer_iter[cpu])
3183 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3184 }
3185
3186 if (iter->trace && iter->trace->close)
3187 iter->trace->close(iter);
3188
3189 if (!iter->snapshot)
3190 /* reenable tracing if it was previously enabled */
3191 tracing_start_tr(tr);
3192
3193 __trace_array_put(tr);
3194
3195 mutex_unlock(&trace_types_lock);
3196
3197 mutex_destroy(&iter->mutex);
3198 free_cpumask_var(iter->started);
3199 kfree(iter->trace);
3200 kfree(iter->buffer_iter);
3201 seq_release_private(inode, file);
3202
3203 return 0;
3204}
3205
3206static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3207{
3208 struct trace_array *tr = inode->i_private;
3209
3210 trace_array_put(tr);
3211 return 0;
3212}
3213
3214static int tracing_single_release_tr(struct inode *inode, struct file *file)
3215{
3216 struct trace_array *tr = inode->i_private;
3217
3218 trace_array_put(tr);
3219
3220 return single_release(inode, file);
3221}
3222
3223static int tracing_open(struct inode *inode, struct file *file)
3224{
3225 struct trace_array *tr = inode->i_private;
3226 struct trace_iterator *iter;
3227 int ret = 0;
3228
3229 if (trace_array_get(tr) < 0)
3230 return -ENODEV;
3231
3232 /* If this file was open for write, then erase contents */
3233 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3234 int cpu = tracing_get_cpu(inode);
3235
3236 if (cpu == RING_BUFFER_ALL_CPUS)
3237 tracing_reset_online_cpus(&tr->trace_buffer);
3238 else
3239 tracing_reset(&tr->trace_buffer, cpu);
3240 }
3241
3242 if (file->f_mode & FMODE_READ) {
3243 iter = __tracing_open(inode, file, false);
3244 if (IS_ERR(iter))
3245 ret = PTR_ERR(iter);
3246 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3247 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3248 }
3249
3250 if (ret < 0)
3251 trace_array_put(tr);
3252
3253 return ret;
3254}
3255
3256/*
3257 * Some tracers are not suitable for instance buffers.
3258 * A tracer is always available for the global array (toplevel)
3259 * or if it explicitly states that it is.
3260 */
3261static bool
3262trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3263{
3264 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3265}
3266
3267/* Find the next tracer that this trace array may use */
3268static struct tracer *
3269get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3270{
3271 while (t && !trace_ok_for_array(t, tr))
3272 t = t->next;
3273
3274 return t;
3275}
3276
3277static void *
3278t_next(struct seq_file *m, void *v, loff_t *pos)
3279{
3280 struct trace_array *tr = m->private;
3281 struct tracer *t = v;
3282
3283 (*pos)++;
3284
3285 if (t)
3286 t = get_tracer_for_array(tr, t->next);
3287
3288 return t;
3289}
3290
3291static void *t_start(struct seq_file *m, loff_t *pos)
3292{
3293 struct trace_array *tr = m->private;
3294 struct tracer *t;
3295 loff_t l = 0;
3296
3297 mutex_lock(&trace_types_lock);
3298
3299 t = get_tracer_for_array(tr, trace_types);
3300 for (; t && l < *pos; t = t_next(m, t, &l))
3301 ;
3302
3303 return t;
3304}
3305
3306static void t_stop(struct seq_file *m, void *p)
3307{
3308 mutex_unlock(&trace_types_lock);
3309}
3310
3311static int t_show(struct seq_file *m, void *v)
3312{
3313 struct tracer *t = v;
3314
3315 if (!t)
3316 return 0;
3317
3318 seq_puts(m, t->name);
3319 if (t->next)
3320 seq_putc(m, ' ');
3321 else
3322 seq_putc(m, '\n');
3323
3324 return 0;
3325}
3326
3327static const struct seq_operations show_traces_seq_ops = {
3328 .start = t_start,
3329 .next = t_next,
3330 .stop = t_stop,
3331 .show = t_show,
3332};
3333
3334static int show_traces_open(struct inode *inode, struct file *file)
3335{
3336 struct trace_array *tr = inode->i_private;
3337 struct seq_file *m;
3338 int ret;
3339
3340 if (tracing_disabled)
3341 return -ENODEV;
3342
3343 ret = seq_open(file, &show_traces_seq_ops);
3344 if (ret)
3345 return ret;
3346
3347 m = file->private_data;
3348 m->private = tr;
3349
3350 return 0;
3351}
3352
3353static ssize_t
3354tracing_write_stub(struct file *filp, const char __user *ubuf,
3355 size_t count, loff_t *ppos)
3356{
3357 return count;
3358}
3359
3360loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3361{
3362 int ret;
3363
3364 if (file->f_mode & FMODE_READ)
3365 ret = seq_lseek(file, offset, whence);
3366 else
3367 file->f_pos = ret = 0;
3368
3369 return ret;
3370}
3371
3372static const struct file_operations tracing_fops = {
3373 .open = tracing_open,
3374 .read = seq_read,
3375 .write = tracing_write_stub,
3376 .llseek = tracing_lseek,
3377 .release = tracing_release,
3378};
3379
3380static const struct file_operations show_traces_fops = {
3381 .open = show_traces_open,
3382 .read = seq_read,
3383 .release = seq_release,
3384 .llseek = seq_lseek,
3385};
3386
3387/*
3388 * The tracer itself will not take this lock, but still we want
3389 * to provide a consistent cpumask to user-space:
3390 */
3391static DEFINE_MUTEX(tracing_cpumask_update_lock);
3392
3393/*
3394 * Temporary storage for the character representation of the
3395 * CPU bitmask (and one more byte for the newline):
3396 */
3397static char mask_str[NR_CPUS + 1];
3398
3399static ssize_t
3400tracing_cpumask_read(struct file *filp, char __user *ubuf,
3401 size_t count, loff_t *ppos)
3402{
3403 struct trace_array *tr = file_inode(filp)->i_private;
3404 int len;
3405
3406 mutex_lock(&tracing_cpumask_update_lock);
3407
3408 len = snprintf(mask_str, count, "%*pb\n",
3409 cpumask_pr_args(tr->tracing_cpumask));
3410 if (len >= count) {
3411 count = -EINVAL;
3412 goto out_err;
3413 }
3414 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3415
3416out_err:
3417 mutex_unlock(&tracing_cpumask_update_lock);
3418
3419 return count;
3420}
3421
3422static ssize_t
3423tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3424 size_t count, loff_t *ppos)
3425{
3426 struct trace_array *tr = file_inode(filp)->i_private;
3427 cpumask_var_t tracing_cpumask_new;
3428 int err, cpu;
3429
3430 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3431 return -ENOMEM;
3432
3433 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3434 if (err)
3435 goto err_unlock;
3436
3437 mutex_lock(&tracing_cpumask_update_lock);
3438
3439 local_irq_disable();
3440 arch_spin_lock(&tr->max_lock);
3441 for_each_tracing_cpu(cpu) {
3442 /*
3443 * Increase/decrease the disabled counter if we are
3444 * about to flip a bit in the cpumask:
3445 */
3446 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3447 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3448 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3449 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3450 }
3451 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3452 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3453 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3454 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3455 }
3456 }
3457 arch_spin_unlock(&tr->max_lock);
3458 local_irq_enable();
3459
3460 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3461
3462 mutex_unlock(&tracing_cpumask_update_lock);
3463 free_cpumask_var(tracing_cpumask_new);
3464
3465 return count;
3466
3467err_unlock:
3468 free_cpumask_var(tracing_cpumask_new);
3469
3470 return err;
3471}
3472
3473static const struct file_operations tracing_cpumask_fops = {
3474 .open = tracing_open_generic_tr,
3475 .read = tracing_cpumask_read,
3476 .write = tracing_cpumask_write,
3477 .release = tracing_release_generic_tr,
3478 .llseek = generic_file_llseek,
3479};
3480
3481static int tracing_trace_options_show(struct seq_file *m, void *v)
3482{
3483 struct tracer_opt *trace_opts;
3484 struct trace_array *tr = m->private;
3485 u32 tracer_flags;
3486 int i;
3487
3488 mutex_lock(&trace_types_lock);
3489 tracer_flags = tr->current_trace->flags->val;
3490 trace_opts = tr->current_trace->flags->opts;
3491
3492 for (i = 0; trace_options[i]; i++) {
3493 if (tr->trace_flags & (1 << i))
3494 seq_printf(m, "%s\n", trace_options[i]);
3495 else
3496 seq_printf(m, "no%s\n", trace_options[i]);
3497 }
3498
3499 for (i = 0; trace_opts[i].name; i++) {
3500 if (tracer_flags & trace_opts[i].bit)
3501 seq_printf(m, "%s\n", trace_opts[i].name);
3502 else
3503 seq_printf(m, "no%s\n", trace_opts[i].name);
3504 }
3505 mutex_unlock(&trace_types_lock);
3506
3507 return 0;
3508}
3509
3510static int __set_tracer_option(struct trace_array *tr,
3511 struct tracer_flags *tracer_flags,
3512 struct tracer_opt *opts, int neg)
3513{
3514 struct tracer *trace = tracer_flags->trace;
3515 int ret;
3516
3517 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3518 if (ret)
3519 return ret;
3520
3521 if (neg)
3522 tracer_flags->val &= ~opts->bit;
3523 else
3524 tracer_flags->val |= opts->bit;
3525 return 0;
3526}
3527
3528/* Try to assign a tracer specific option */
3529static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3530{
3531 struct tracer *trace = tr->current_trace;
3532 struct tracer_flags *tracer_flags = trace->flags;
3533 struct tracer_opt *opts = NULL;
3534 int i;
3535
3536 for (i = 0; tracer_flags->opts[i].name; i++) {
3537 opts = &tracer_flags->opts[i];
3538
3539 if (strcmp(cmp, opts->name) == 0)
3540 return __set_tracer_option(tr, trace->flags, opts, neg);
3541 }
3542
3543 return -EINVAL;
3544}
3545
3546/* Some tracers require overwrite to stay enabled */
3547int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3548{
3549 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3550 return -1;
3551
3552 return 0;
3553}
3554
3555int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3556{
3557 /* do nothing if flag is already set */
3558 if (!!(tr->trace_flags & mask) == !!enabled)
3559 return 0;
3560
3561 /* Give the tracer a chance to approve the change */
3562 if (tr->current_trace->flag_changed)
3563 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3564 return -EINVAL;
3565
3566 if (enabled)
3567 tr->trace_flags |= mask;
3568 else
3569 tr->trace_flags &= ~mask;
3570
3571 if (mask == TRACE_ITER_RECORD_CMD)
3572 trace_event_enable_cmd_record(enabled);
3573
3574 if (mask == TRACE_ITER_OVERWRITE) {
3575 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3576#ifdef CONFIG_TRACER_MAX_TRACE
3577 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3578#endif
3579 }
3580
3581 if (mask == TRACE_ITER_PRINTK) {
3582 trace_printk_start_stop_comm(enabled);
3583 trace_printk_control(enabled);
3584 }
3585
3586 return 0;
3587}
3588
3589static int trace_set_options(struct trace_array *tr, char *option)
3590{
3591 char *cmp;
3592 int neg = 0;
3593 int ret = -ENODEV;
3594 int i;
3595 size_t orig_len = strlen(option);
3596
3597 cmp = strstrip(option);
3598
3599 if (strncmp(cmp, "no", 2) == 0) {
3600 neg = 1;
3601 cmp += 2;
3602 }
3603
3604 mutex_lock(&trace_types_lock);
3605
3606 for (i = 0; trace_options[i]; i++) {
3607 if (strcmp(cmp, trace_options[i]) == 0) {
3608 ret = set_tracer_flag(tr, 1 << i, !neg);
3609 break;
3610 }
3611 }
3612
3613 /* If no option could be set, test the specific tracer options */
3614 if (!trace_options[i])
3615 ret = set_tracer_option(tr, cmp, neg);
3616
3617 mutex_unlock(&trace_types_lock);
3618
3619 /*
3620 * If the first trailing whitespace is replaced with '\0' by strstrip,
3621 * turn it back into a space.
3622 */
3623 if (orig_len > strlen(option))
3624 option[strlen(option)] = ' ';
3625
3626 return ret;
3627}
3628
3629static void __init apply_trace_boot_options(void)
3630{
3631 char *buf = trace_boot_options_buf;
3632 char *option;
3633
3634 while (true) {
3635 option = strsep(&buf, ",");
3636
3637 if (!option)
3638 break;
3639
3640 if (*option)
3641 trace_set_options(&global_trace, option);
3642
3643 /* Put back the comma to allow this to be called again */
3644 if (buf)
3645 *(buf - 1) = ',';
3646 }
3647}
3648
3649static ssize_t
3650tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3651 size_t cnt, loff_t *ppos)
3652{
3653 struct seq_file *m = filp->private_data;
3654 struct trace_array *tr = m->private;
3655 char buf[64];
3656 int ret;
3657
3658 if (cnt >= sizeof(buf))
3659 return -EINVAL;
3660
3661 if (copy_from_user(&buf, ubuf, cnt))
3662 return -EFAULT;
3663
3664 buf[cnt] = 0;
3665
3666 ret = trace_set_options(tr, buf);
3667 if (ret < 0)
3668 return ret;
3669
3670 *ppos += cnt;
3671
3672 return cnt;
3673}
3674
3675static int tracing_trace_options_open(struct inode *inode, struct file *file)
3676{
3677 struct trace_array *tr = inode->i_private;
3678 int ret;
3679
3680 if (tracing_disabled)
3681 return -ENODEV;
3682
3683 if (trace_array_get(tr) < 0)
3684 return -ENODEV;
3685
3686 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3687 if (ret < 0)
3688 trace_array_put(tr);
3689
3690 return ret;
3691}
3692
3693static const struct file_operations tracing_iter_fops = {
3694 .open = tracing_trace_options_open,
3695 .read = seq_read,
3696 .llseek = seq_lseek,
3697 .release = tracing_single_release_tr,
3698 .write = tracing_trace_options_write,
3699};
3700
3701static const char readme_msg[] =
3702 "tracing mini-HOWTO:\n\n"
3703 "# echo 0 > tracing_on : quick way to disable tracing\n"
3704 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3705 " Important files:\n"
3706 " trace\t\t\t- The static contents of the buffer\n"
3707 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3708 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3709 " current_tracer\t- function and latency tracers\n"
3710 " available_tracers\t- list of configured tracers for current_tracer\n"
3711 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3712 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3713 " trace_clock\t\t-change the clock used to order events\n"
3714 " local: Per cpu clock but may not be synced across CPUs\n"
3715 " global: Synced across CPUs but slows tracing down.\n"
3716 " counter: Not a clock, but just an increment\n"
3717 " uptime: Jiffy counter from time of boot\n"
3718 " perf: Same clock that perf events use\n"
3719#ifdef CONFIG_X86_64
3720 " x86-tsc: TSC cycle counter\n"
3721#endif
3722 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3723 " tracing_cpumask\t- Limit which CPUs to trace\n"
3724 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3725 "\t\t\t Remove sub-buffer with rmdir\n"
3726 " trace_options\t\t- Set format or modify how tracing happens\n"
3727 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3728 "\t\t\t option name\n"
3729 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3730#ifdef CONFIG_DYNAMIC_FTRACE
3731 "\n available_filter_functions - list of functions that can be filtered on\n"
3732 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3733 "\t\t\t functions\n"
3734 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3735 "\t modules: Can select a group via module\n"
3736 "\t Format: :mod:<module-name>\n"
3737 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3738 "\t triggers: a command to perform when function is hit\n"
3739 "\t Format: <function>:<trigger>[:count]\n"
3740 "\t trigger: traceon, traceoff\n"
3741 "\t\t enable_event:<system>:<event>\n"
3742 "\t\t disable_event:<system>:<event>\n"
3743#ifdef CONFIG_STACKTRACE
3744 "\t\t stacktrace\n"
3745#endif
3746#ifdef CONFIG_TRACER_SNAPSHOT
3747 "\t\t snapshot\n"
3748#endif
3749 "\t\t dump\n"
3750 "\t\t cpudump\n"
3751 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3752 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3753 "\t The first one will disable tracing every time do_fault is hit\n"
3754 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3755 "\t The first time do trap is hit and it disables tracing, the\n"
3756 "\t counter will decrement to 2. If tracing is already disabled,\n"
3757 "\t the counter will not decrement. It only decrements when the\n"
3758 "\t trigger did work\n"
3759 "\t To remove trigger without count:\n"
3760 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3761 "\t To remove trigger with a count:\n"
3762 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3763 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3764 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3765 "\t modules: Can select a group via module command :mod:\n"
3766 "\t Does not accept triggers\n"
3767#endif /* CONFIG_DYNAMIC_FTRACE */
3768#ifdef CONFIG_FUNCTION_TRACER
3769 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3770 "\t\t (function)\n"
3771#endif
3772#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3773 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3774 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3775 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3776#endif
3777#ifdef CONFIG_TRACER_SNAPSHOT
3778 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3779 "\t\t\t snapshot buffer. Read the contents for more\n"
3780 "\t\t\t information\n"
3781#endif
3782#ifdef CONFIG_STACK_TRACER
3783 " stack_trace\t\t- Shows the max stack trace when active\n"
3784 " stack_max_size\t- Shows current max stack size that was traced\n"
3785 "\t\t\t Write into this file to reset the max size (trigger a\n"
3786 "\t\t\t new trace)\n"
3787#ifdef CONFIG_DYNAMIC_FTRACE
3788 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3789 "\t\t\t traces\n"
3790#endif
3791#endif /* CONFIG_STACK_TRACER */
3792 " events/\t\t- Directory containing all trace event subsystems:\n"
3793 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3794 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3795 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3796 "\t\t\t events\n"
3797 " filter\t\t- If set, only events passing filter are traced\n"
3798 " events/<system>/<event>/\t- Directory containing control files for\n"
3799 "\t\t\t <event>:\n"
3800 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3801 " filter\t\t- If set, only events passing filter are traced\n"
3802 " trigger\t\t- If set, a command to perform when event is hit\n"
3803 "\t Format: <trigger>[:count][if <filter>]\n"
3804 "\t trigger: traceon, traceoff\n"
3805 "\t enable_event:<system>:<event>\n"
3806 "\t disable_event:<system>:<event>\n"
3807#ifdef CONFIG_STACKTRACE
3808 "\t\t stacktrace\n"
3809#endif
3810#ifdef CONFIG_TRACER_SNAPSHOT
3811 "\t\t snapshot\n"
3812#endif
3813 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3814 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3815 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3816 "\t events/block/block_unplug/trigger\n"
3817 "\t The first disables tracing every time block_unplug is hit.\n"
3818 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3819 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3820 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3821 "\t Like function triggers, the counter is only decremented if it\n"
3822 "\t enabled or disabled tracing.\n"
3823 "\t To remove a trigger without a count:\n"
3824 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3825 "\t To remove a trigger with a count:\n"
3826 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3827 "\t Filters can be ignored when removing a trigger.\n"
3828;
3829
3830static ssize_t
3831tracing_readme_read(struct file *filp, char __user *ubuf,
3832 size_t cnt, loff_t *ppos)
3833{
3834 return simple_read_from_buffer(ubuf, cnt, ppos,
3835 readme_msg, strlen(readme_msg));
3836}
3837
3838static const struct file_operations tracing_readme_fops = {
3839 .open = tracing_open_generic,
3840 .read = tracing_readme_read,
3841 .llseek = generic_file_llseek,
3842};
3843
3844static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3845{
3846 unsigned int *ptr = v;
3847
3848 if (*pos || m->count)
3849 ptr++;
3850
3851 (*pos)++;
3852
3853 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3854 ptr++) {
3855 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3856 continue;
3857
3858 return ptr;
3859 }
3860
3861 return NULL;
3862}
3863
3864static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3865{
3866 void *v;
3867 loff_t l = 0;
3868
3869 preempt_disable();
3870 arch_spin_lock(&trace_cmdline_lock);
3871
3872 v = &savedcmd->map_cmdline_to_pid[0];
3873 while (l <= *pos) {
3874 v = saved_cmdlines_next(m, v, &l);
3875 if (!v)
3876 return NULL;
3877 }
3878
3879 return v;
3880}
3881
3882static void saved_cmdlines_stop(struct seq_file *m, void *v)
3883{
3884 arch_spin_unlock(&trace_cmdline_lock);
3885 preempt_enable();
3886}
3887
3888static int saved_cmdlines_show(struct seq_file *m, void *v)
3889{
3890 char buf[TASK_COMM_LEN];
3891 unsigned int *pid = v;
3892
3893 __trace_find_cmdline(*pid, buf);
3894 seq_printf(m, "%d %s\n", *pid, buf);
3895 return 0;
3896}
3897
3898static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3899 .start = saved_cmdlines_start,
3900 .next = saved_cmdlines_next,
3901 .stop = saved_cmdlines_stop,
3902 .show = saved_cmdlines_show,
3903};
3904
3905static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3906{
3907 if (tracing_disabled)
3908 return -ENODEV;
3909
3910 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3911}
3912
3913static const struct file_operations tracing_saved_cmdlines_fops = {
3914 .open = tracing_saved_cmdlines_open,
3915 .read = seq_read,
3916 .llseek = seq_lseek,
3917 .release = seq_release,
3918};
3919
3920static ssize_t
3921tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3922 size_t cnt, loff_t *ppos)
3923{
3924 char buf[64];
3925 int r;
3926
3927 arch_spin_lock(&trace_cmdline_lock);
3928 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3929 arch_spin_unlock(&trace_cmdline_lock);
3930
3931 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3932}
3933
3934static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3935{
3936 kfree(s->saved_cmdlines);
3937 kfree(s->map_cmdline_to_pid);
3938 kfree(s);
3939}
3940
3941static int tracing_resize_saved_cmdlines(unsigned int val)
3942{
3943 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3944
3945 s = kmalloc(sizeof(*s), GFP_KERNEL);
3946 if (!s)
3947 return -ENOMEM;
3948
3949 if (allocate_cmdlines_buffer(val, s) < 0) {
3950 kfree(s);
3951 return -ENOMEM;
3952 }
3953
3954 arch_spin_lock(&trace_cmdline_lock);
3955 savedcmd_temp = savedcmd;
3956 savedcmd = s;
3957 arch_spin_unlock(&trace_cmdline_lock);
3958 free_saved_cmdlines_buffer(savedcmd_temp);
3959
3960 return 0;
3961}
3962
3963static ssize_t
3964tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3965 size_t cnt, loff_t *ppos)
3966{
3967 unsigned long val;
3968 int ret;
3969
3970 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3971 if (ret)
3972 return ret;
3973
3974 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3975 if (!val || val > PID_MAX_DEFAULT)
3976 return -EINVAL;
3977
3978 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3979 if (ret < 0)
3980 return ret;
3981
3982 *ppos += cnt;
3983
3984 return cnt;
3985}
3986
3987static const struct file_operations tracing_saved_cmdlines_size_fops = {
3988 .open = tracing_open_generic,
3989 .read = tracing_saved_cmdlines_size_read,
3990 .write = tracing_saved_cmdlines_size_write,
3991};
3992
3993#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3994static union trace_enum_map_item *
3995update_enum_map(union trace_enum_map_item *ptr)
3996{
3997 if (!ptr->map.enum_string) {
3998 if (ptr->tail.next) {
3999 ptr = ptr->tail.next;
4000 /* Set ptr to the next real item (skip head) */
4001 ptr++;
4002 } else
4003 return NULL;
4004 }
4005 return ptr;
4006}
4007
4008static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4009{
4010 union trace_enum_map_item *ptr = v;
4011
4012 /*
4013 * Paranoid! If ptr points to end, we don't want to increment past it.
4014 * This really should never happen.
4015 */
4016 ptr = update_enum_map(ptr);
4017 if (WARN_ON_ONCE(!ptr))
4018 return NULL;
4019
4020 ptr++;
4021
4022 (*pos)++;
4023
4024 ptr = update_enum_map(ptr);
4025
4026 return ptr;
4027}
4028
4029static void *enum_map_start(struct seq_file *m, loff_t *pos)
4030{
4031 union trace_enum_map_item *v;
4032 loff_t l = 0;
4033
4034 mutex_lock(&trace_enum_mutex);
4035
4036 v = trace_enum_maps;
4037 if (v)
4038 v++;
4039
4040 while (v && l < *pos) {
4041 v = enum_map_next(m, v, &l);
4042 }
4043
4044 return v;
4045}
4046
4047static void enum_map_stop(struct seq_file *m, void *v)
4048{
4049 mutex_unlock(&trace_enum_mutex);
4050}
4051
4052static int enum_map_show(struct seq_file *m, void *v)
4053{
4054 union trace_enum_map_item *ptr = v;
4055
4056 seq_printf(m, "%s %ld (%s)\n",
4057 ptr->map.enum_string, ptr->map.enum_value,
4058 ptr->map.system);
4059
4060 return 0;
4061}
4062
4063static const struct seq_operations tracing_enum_map_seq_ops = {
4064 .start = enum_map_start,
4065 .next = enum_map_next,
4066 .stop = enum_map_stop,
4067 .show = enum_map_show,
4068};
4069
4070static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4071{
4072 if (tracing_disabled)
4073 return -ENODEV;
4074
4075 return seq_open(filp, &tracing_enum_map_seq_ops);
4076}
4077
4078static const struct file_operations tracing_enum_map_fops = {
4079 .open = tracing_enum_map_open,
4080 .read = seq_read,
4081 .llseek = seq_lseek,
4082 .release = seq_release,
4083};
4084
4085static inline union trace_enum_map_item *
4086trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4087{
4088 /* Return tail of array given the head */
4089 return ptr + ptr->head.length + 1;
4090}
4091
4092static void
4093trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4094 int len)
4095{
4096 struct trace_enum_map **stop;
4097 struct trace_enum_map **map;
4098 union trace_enum_map_item *map_array;
4099 union trace_enum_map_item *ptr;
4100
4101 stop = start + len;
4102
4103 /*
4104 * The trace_enum_maps contains the map plus a head and tail item,
4105 * where the head holds the module and length of array, and the
4106 * tail holds a pointer to the next list.
4107 */
4108 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4109 if (!map_array) {
4110 pr_warn("Unable to allocate trace enum mapping\n");
4111 return;
4112 }
4113
4114 mutex_lock(&trace_enum_mutex);
4115
4116 if (!trace_enum_maps)
4117 trace_enum_maps = map_array;
4118 else {
4119 ptr = trace_enum_maps;
4120 for (;;) {
4121 ptr = trace_enum_jmp_to_tail(ptr);
4122 if (!ptr->tail.next)
4123 break;
4124 ptr = ptr->tail.next;
4125
4126 }
4127 ptr->tail.next = map_array;
4128 }
4129 map_array->head.mod = mod;
4130 map_array->head.length = len;
4131 map_array++;
4132
4133 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4134 map_array->map = **map;
4135 map_array++;
4136 }
4137 memset(map_array, 0, sizeof(*map_array));
4138
4139 mutex_unlock(&trace_enum_mutex);
4140}
4141
4142static void trace_create_enum_file(struct dentry *d_tracer)
4143{
4144 trace_create_file("enum_map", 0444, d_tracer,
4145 NULL, &tracing_enum_map_fops);
4146}
4147
4148#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4149static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4150static inline void trace_insert_enum_map_file(struct module *mod,
4151 struct trace_enum_map **start, int len) { }
4152#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4153
4154static void trace_insert_enum_map(struct module *mod,
4155 struct trace_enum_map **start, int len)
4156{
4157 struct trace_enum_map **map;
4158
4159 if (len <= 0)
4160 return;
4161
4162 map = start;
4163
4164 trace_event_enum_update(map, len);
4165
4166 trace_insert_enum_map_file(mod, start, len);
4167}
4168
4169static ssize_t
4170tracing_set_trace_read(struct file *filp, char __user *ubuf,
4171 size_t cnt, loff_t *ppos)
4172{
4173 struct trace_array *tr = filp->private_data;
4174 char buf[MAX_TRACER_SIZE+2];
4175 int r;
4176
4177 mutex_lock(&trace_types_lock);
4178 r = sprintf(buf, "%s\n", tr->current_trace->name);
4179 mutex_unlock(&trace_types_lock);
4180
4181 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4182}
4183
4184int tracer_init(struct tracer *t, struct trace_array *tr)
4185{
4186 tracing_reset_online_cpus(&tr->trace_buffer);
4187 return t->init(tr);
4188}
4189
4190static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4191{
4192 int cpu;
4193
4194 for_each_tracing_cpu(cpu)
4195 per_cpu_ptr(buf->data, cpu)->entries = val;
4196}
4197
4198#ifdef CONFIG_TRACER_MAX_TRACE
4199/* resize @tr's buffer to the size of @size_tr's entries */
4200static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4201 struct trace_buffer *size_buf, int cpu_id)
4202{
4203 int cpu, ret = 0;
4204
4205 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4206 for_each_tracing_cpu(cpu) {
4207 ret = ring_buffer_resize(trace_buf->buffer,
4208 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4209 if (ret < 0)
4210 break;
4211 per_cpu_ptr(trace_buf->data, cpu)->entries =
4212 per_cpu_ptr(size_buf->data, cpu)->entries;
4213 }
4214 } else {
4215 ret = ring_buffer_resize(trace_buf->buffer,
4216 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4217 if (ret == 0)
4218 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4219 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4220 }
4221
4222 return ret;
4223}
4224#endif /* CONFIG_TRACER_MAX_TRACE */
4225
4226static int __tracing_resize_ring_buffer(struct trace_array *tr,
4227 unsigned long size, int cpu)
4228{
4229 int ret;
4230
4231 /*
4232 * If kernel or user changes the size of the ring buffer
4233 * we use the size that was given, and we can forget about
4234 * expanding it later.
4235 */
4236 ring_buffer_expanded = true;
4237
4238 /* May be called before buffers are initialized */
4239 if (!tr->trace_buffer.buffer)
4240 return 0;
4241
4242 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4243 if (ret < 0)
4244 return ret;
4245
4246#ifdef CONFIG_TRACER_MAX_TRACE
4247 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4248 !tr->current_trace->use_max_tr)
4249 goto out;
4250
4251 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4252 if (ret < 0) {
4253 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4254 &tr->trace_buffer, cpu);
4255 if (r < 0) {
4256 /*
4257 * AARGH! We are left with different
4258 * size max buffer!!!!
4259 * The max buffer is our "snapshot" buffer.
4260 * When a tracer needs a snapshot (one of the
4261 * latency tracers), it swaps the max buffer
4262 * with the saved snap shot. We succeeded to
4263 * update the size of the main buffer, but failed to
4264 * update the size of the max buffer. But when we tried
4265 * to reset the main buffer to the original size, we
4266 * failed there too. This is very unlikely to
4267 * happen, but if it does, warn and kill all
4268 * tracing.
4269 */
4270 WARN_ON(1);
4271 tracing_disabled = 1;
4272 }
4273 return ret;
4274 }
4275
4276 if (cpu == RING_BUFFER_ALL_CPUS)
4277 set_buffer_entries(&tr->max_buffer, size);
4278 else
4279 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4280
4281 out:
4282#endif /* CONFIG_TRACER_MAX_TRACE */
4283
4284 if (cpu == RING_BUFFER_ALL_CPUS)
4285 set_buffer_entries(&tr->trace_buffer, size);
4286 else
4287 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4288
4289 return ret;
4290}
4291
4292static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4293 unsigned long size, int cpu_id)
4294{
4295 int ret = size;
4296
4297 mutex_lock(&trace_types_lock);
4298
4299 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4300 /* make sure, this cpu is enabled in the mask */
4301 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4302 ret = -EINVAL;
4303 goto out;
4304 }
4305 }
4306
4307 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4308 if (ret < 0)
4309 ret = -ENOMEM;
4310
4311out:
4312 mutex_unlock(&trace_types_lock);
4313
4314 return ret;
4315}
4316
4317
4318/**
4319 * tracing_update_buffers - used by tracing facility to expand ring buffers
4320 *
4321 * To save on memory when the tracing is never used on a system with it
4322 * configured in. The ring buffers are set to a minimum size. But once
4323 * a user starts to use the tracing facility, then they need to grow
4324 * to their default size.
4325 *
4326 * This function is to be called when a tracer is about to be used.
4327 */
4328int tracing_update_buffers(void)
4329{
4330 int ret = 0;
4331
4332 mutex_lock(&trace_types_lock);
4333 if (!ring_buffer_expanded)
4334 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4335 RING_BUFFER_ALL_CPUS);
4336 mutex_unlock(&trace_types_lock);
4337
4338 return ret;
4339}
4340
4341struct trace_option_dentry;
4342
4343static void
4344create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4345
4346/*
4347 * Used to clear out the tracer before deletion of an instance.
4348 * Must have trace_types_lock held.
4349 */
4350static void tracing_set_nop(struct trace_array *tr)
4351{
4352 if (tr->current_trace == &nop_trace)
4353 return;
4354
4355 tr->current_trace->enabled--;
4356
4357 if (tr->current_trace->reset)
4358 tr->current_trace->reset(tr);
4359
4360 tr->current_trace = &nop_trace;
4361}
4362
4363static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4364{
4365 /* Only enable if the directory has been created already. */
4366 if (!tr->dir)
4367 return;
4368
4369 create_trace_option_files(tr, t);
4370}
4371
4372static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4373{
4374 struct tracer *t;
4375#ifdef CONFIG_TRACER_MAX_TRACE
4376 bool had_max_tr;
4377#endif
4378 int ret = 0;
4379
4380 mutex_lock(&trace_types_lock);
4381
4382 if (!ring_buffer_expanded) {
4383 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4384 RING_BUFFER_ALL_CPUS);
4385 if (ret < 0)
4386 goto out;
4387 ret = 0;
4388 }
4389
4390 for (t = trace_types; t; t = t->next) {
4391 if (strcmp(t->name, buf) == 0)
4392 break;
4393 }
4394 if (!t) {
4395 ret = -EINVAL;
4396 goto out;
4397 }
4398 if (t == tr->current_trace)
4399 goto out;
4400
4401 /* Some tracers are only allowed for the top level buffer */
4402 if (!trace_ok_for_array(t, tr)) {
4403 ret = -EINVAL;
4404 goto out;
4405 }
4406
4407 /* If trace pipe files are being read, we can't change the tracer */
4408 if (tr->current_trace->ref) {
4409 ret = -EBUSY;
4410 goto out;
4411 }
4412
4413 trace_branch_disable();
4414
4415 tr->current_trace->enabled--;
4416
4417 if (tr->current_trace->reset)
4418 tr->current_trace->reset(tr);
4419
4420 /* Current trace needs to be nop_trace before synchronize_sched */
4421 tr->current_trace = &nop_trace;
4422
4423#ifdef CONFIG_TRACER_MAX_TRACE
4424 had_max_tr = tr->allocated_snapshot;
4425
4426 if (had_max_tr && !t->use_max_tr) {
4427 /*
4428 * We need to make sure that the update_max_tr sees that
4429 * current_trace changed to nop_trace to keep it from
4430 * swapping the buffers after we resize it.
4431 * The update_max_tr is called from interrupts disabled
4432 * so a synchronized_sched() is sufficient.
4433 */
4434 synchronize_sched();
4435 free_snapshot(tr);
4436 }
4437#endif
4438
4439#ifdef CONFIG_TRACER_MAX_TRACE
4440 if (t->use_max_tr && !had_max_tr) {
4441 ret = alloc_snapshot(tr);
4442 if (ret < 0)
4443 goto out;
4444 }
4445#endif
4446
4447 if (t->init) {
4448 ret = tracer_init(t, tr);
4449 if (ret)
4450 goto out;
4451 }
4452
4453 tr->current_trace = t;
4454 tr->current_trace->enabled++;
4455 trace_branch_enable(tr);
4456 out:
4457 mutex_unlock(&trace_types_lock);
4458
4459 return ret;
4460}
4461
4462static ssize_t
4463tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4464 size_t cnt, loff_t *ppos)
4465{
4466 struct trace_array *tr = filp->private_data;
4467 char buf[MAX_TRACER_SIZE+1];
4468 int i;
4469 size_t ret;
4470 int err;
4471
4472 ret = cnt;
4473
4474 if (cnt > MAX_TRACER_SIZE)
4475 cnt = MAX_TRACER_SIZE;
4476
4477 if (copy_from_user(&buf, ubuf, cnt))
4478 return -EFAULT;
4479
4480 buf[cnt] = 0;
4481
4482 /* strip ending whitespace. */
4483 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4484 buf[i] = 0;
4485
4486 err = tracing_set_tracer(tr, buf);
4487 if (err)
4488 return err;
4489
4490 *ppos += ret;
4491
4492 return ret;
4493}
4494
4495static ssize_t
4496tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4497 size_t cnt, loff_t *ppos)
4498{
4499 char buf[64];
4500 int r;
4501
4502 r = snprintf(buf, sizeof(buf), "%ld\n",
4503 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4504 if (r > sizeof(buf))
4505 r = sizeof(buf);
4506 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4507}
4508
4509static ssize_t
4510tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4511 size_t cnt, loff_t *ppos)
4512{
4513 unsigned long val;
4514 int ret;
4515
4516 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4517 if (ret)
4518 return ret;
4519
4520 *ptr = val * 1000;
4521
4522 return cnt;
4523}
4524
4525static ssize_t
4526tracing_thresh_read(struct file *filp, char __user *ubuf,
4527 size_t cnt, loff_t *ppos)
4528{
4529 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4530}
4531
4532static ssize_t
4533tracing_thresh_write(struct file *filp, const char __user *ubuf,
4534 size_t cnt, loff_t *ppos)
4535{
4536 struct trace_array *tr = filp->private_data;
4537 int ret;
4538
4539 mutex_lock(&trace_types_lock);
4540 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4541 if (ret < 0)
4542 goto out;
4543
4544 if (tr->current_trace->update_thresh) {
4545 ret = tr->current_trace->update_thresh(tr);
4546 if (ret < 0)
4547 goto out;
4548 }
4549
4550 ret = cnt;
4551out:
4552 mutex_unlock(&trace_types_lock);
4553
4554 return ret;
4555}
4556
4557#ifdef CONFIG_TRACER_MAX_TRACE
4558
4559static ssize_t
4560tracing_max_lat_read(struct file *filp, char __user *ubuf,
4561 size_t cnt, loff_t *ppos)
4562{
4563 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4564}
4565
4566static ssize_t
4567tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4568 size_t cnt, loff_t *ppos)
4569{
4570 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4571}
4572
4573#endif
4574
4575static int tracing_open_pipe(struct inode *inode, struct file *filp)
4576{
4577 struct trace_array *tr = inode->i_private;
4578 struct trace_iterator *iter;
4579 int ret = 0;
4580
4581 if (tracing_disabled)
4582 return -ENODEV;
4583
4584 if (trace_array_get(tr) < 0)
4585 return -ENODEV;
4586
4587 mutex_lock(&trace_types_lock);
4588
4589 /* create a buffer to store the information to pass to userspace */
4590 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4591 if (!iter) {
4592 ret = -ENOMEM;
4593 __trace_array_put(tr);
4594 goto out;
4595 }
4596
4597 trace_seq_init(&iter->seq);
4598 iter->trace = tr->current_trace;
4599
4600 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4601 ret = -ENOMEM;
4602 goto fail;
4603 }
4604
4605 /* trace pipe does not show start of buffer */
4606 cpumask_setall(iter->started);
4607
4608 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4609 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4610
4611 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4612 if (trace_clocks[tr->clock_id].in_ns)
4613 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4614
4615 iter->tr = tr;
4616 iter->trace_buffer = &tr->trace_buffer;
4617 iter->cpu_file = tracing_get_cpu(inode);
4618 mutex_init(&iter->mutex);
4619 filp->private_data = iter;
4620
4621 if (iter->trace->pipe_open)
4622 iter->trace->pipe_open(iter);
4623
4624 nonseekable_open(inode, filp);
4625
4626 tr->current_trace->ref++;
4627out:
4628 mutex_unlock(&trace_types_lock);
4629 return ret;
4630
4631fail:
4632 kfree(iter->trace);
4633 kfree(iter);
4634 __trace_array_put(tr);
4635 mutex_unlock(&trace_types_lock);
4636 return ret;
4637}
4638
4639static int tracing_release_pipe(struct inode *inode, struct file *file)
4640{
4641 struct trace_iterator *iter = file->private_data;
4642 struct trace_array *tr = inode->i_private;
4643
4644 mutex_lock(&trace_types_lock);
4645
4646 tr->current_trace->ref--;
4647
4648 if (iter->trace->pipe_close)
4649 iter->trace->pipe_close(iter);
4650
4651 mutex_unlock(&trace_types_lock);
4652
4653 free_cpumask_var(iter->started);
4654 mutex_destroy(&iter->mutex);
4655 kfree(iter);
4656
4657 trace_array_put(tr);
4658
4659 return 0;
4660}
4661
4662static unsigned int
4663trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4664{
4665 struct trace_array *tr = iter->tr;
4666
4667 /* Iterators are static, they should be filled or empty */
4668 if (trace_buffer_iter(iter, iter->cpu_file))
4669 return POLLIN | POLLRDNORM;
4670
4671 if (tr->trace_flags & TRACE_ITER_BLOCK)
4672 /*
4673 * Always select as readable when in blocking mode
4674 */
4675 return POLLIN | POLLRDNORM;
4676 else
4677 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4678 filp, poll_table);
4679}
4680
4681static unsigned int
4682tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4683{
4684 struct trace_iterator *iter = filp->private_data;
4685
4686 return trace_poll(iter, filp, poll_table);
4687}
4688
4689/* Must be called with iter->mutex held. */
4690static int tracing_wait_pipe(struct file *filp)
4691{
4692 struct trace_iterator *iter = filp->private_data;
4693 int ret;
4694
4695 while (trace_empty(iter)) {
4696
4697 if ((filp->f_flags & O_NONBLOCK)) {
4698 return -EAGAIN;
4699 }
4700
4701 /*
4702 * We block until we read something and tracing is disabled.
4703 * We still block if tracing is disabled, but we have never
4704 * read anything. This allows a user to cat this file, and
4705 * then enable tracing. But after we have read something,
4706 * we give an EOF when tracing is again disabled.
4707 *
4708 * iter->pos will be 0 if we haven't read anything.
4709 */
4710 if (!tracing_is_on() && iter->pos)
4711 break;
4712
4713 mutex_unlock(&iter->mutex);
4714
4715 ret = wait_on_pipe(iter, false);
4716
4717 mutex_lock(&iter->mutex);
4718
4719 if (ret)
4720 return ret;
4721 }
4722
4723 return 1;
4724}
4725
4726/*
4727 * Consumer reader.
4728 */
4729static ssize_t
4730tracing_read_pipe(struct file *filp, char __user *ubuf,
4731 size_t cnt, loff_t *ppos)
4732{
4733 struct trace_iterator *iter = filp->private_data;
4734 ssize_t sret;
4735
4736 /* return any leftover data */
4737 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4738 if (sret != -EBUSY)
4739 return sret;
4740
4741 trace_seq_init(&iter->seq);
4742
4743 /*
4744 * Avoid more than one consumer on a single file descriptor
4745 * This is just a matter of traces coherency, the ring buffer itself
4746 * is protected.
4747 */
4748 mutex_lock(&iter->mutex);
4749 if (iter->trace->read) {
4750 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4751 if (sret)
4752 goto out;
4753 }
4754
4755waitagain:
4756 sret = tracing_wait_pipe(filp);
4757 if (sret <= 0)
4758 goto out;
4759
4760 /* stop when tracing is finished */
4761 if (trace_empty(iter)) {
4762 sret = 0;
4763 goto out;
4764 }
4765
4766 if (cnt >= PAGE_SIZE)
4767 cnt = PAGE_SIZE - 1;
4768
4769 /* reset all but tr, trace, and overruns */
4770 memset(&iter->seq, 0,
4771 sizeof(struct trace_iterator) -
4772 offsetof(struct trace_iterator, seq));
4773 cpumask_clear(iter->started);
4774 iter->pos = -1;
4775
4776 trace_event_read_lock();
4777 trace_access_lock(iter->cpu_file);
4778 while (trace_find_next_entry_inc(iter) != NULL) {
4779 enum print_line_t ret;
4780 int save_len = iter->seq.seq.len;
4781
4782 ret = print_trace_line(iter);
4783 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4784 /* don't print partial lines */
4785 iter->seq.seq.len = save_len;
4786 break;
4787 }
4788 if (ret != TRACE_TYPE_NO_CONSUME)
4789 trace_consume(iter);
4790
4791 if (trace_seq_used(&iter->seq) >= cnt)
4792 break;
4793
4794 /*
4795 * Setting the full flag means we reached the trace_seq buffer
4796 * size and we should leave by partial output condition above.
4797 * One of the trace_seq_* functions is not used properly.
4798 */
4799 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4800 iter->ent->type);
4801 }
4802 trace_access_unlock(iter->cpu_file);
4803 trace_event_read_unlock();
4804
4805 /* Now copy what we have to the user */
4806 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4807 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4808 trace_seq_init(&iter->seq);
4809
4810 /*
4811 * If there was nothing to send to user, in spite of consuming trace
4812 * entries, go back to wait for more entries.
4813 */
4814 if (sret == -EBUSY)
4815 goto waitagain;
4816
4817out:
4818 mutex_unlock(&iter->mutex);
4819
4820 return sret;
4821}
4822
4823static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4824 unsigned int idx)
4825{
4826 __free_page(spd->pages[idx]);
4827}
4828
4829static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4830 .can_merge = 0,
4831 .confirm = generic_pipe_buf_confirm,
4832 .release = generic_pipe_buf_release,
4833 .steal = generic_pipe_buf_steal,
4834 .get = generic_pipe_buf_get,
4835};
4836
4837static size_t
4838tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4839{
4840 size_t count;
4841 int save_len;
4842 int ret;
4843
4844 /* Seq buffer is page-sized, exactly what we need. */
4845 for (;;) {
4846 save_len = iter->seq.seq.len;
4847 ret = print_trace_line(iter);
4848
4849 if (trace_seq_has_overflowed(&iter->seq)) {
4850 iter->seq.seq.len = save_len;
4851 break;
4852 }
4853
4854 /*
4855 * This should not be hit, because it should only
4856 * be set if the iter->seq overflowed. But check it
4857 * anyway to be safe.
4858 */
4859 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4860 iter->seq.seq.len = save_len;
4861 break;
4862 }
4863
4864 count = trace_seq_used(&iter->seq) - save_len;
4865 if (rem < count) {
4866 rem = 0;
4867 iter->seq.seq.len = save_len;
4868 break;
4869 }
4870
4871 if (ret != TRACE_TYPE_NO_CONSUME)
4872 trace_consume(iter);
4873 rem -= count;
4874 if (!trace_find_next_entry_inc(iter)) {
4875 rem = 0;
4876 iter->ent = NULL;
4877 break;
4878 }
4879 }
4880
4881 return rem;
4882}
4883
4884static ssize_t tracing_splice_read_pipe(struct file *filp,
4885 loff_t *ppos,
4886 struct pipe_inode_info *pipe,
4887 size_t len,
4888 unsigned int flags)
4889{
4890 struct page *pages_def[PIPE_DEF_BUFFERS];
4891 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4892 struct trace_iterator *iter = filp->private_data;
4893 struct splice_pipe_desc spd = {
4894 .pages = pages_def,
4895 .partial = partial_def,
4896 .nr_pages = 0, /* This gets updated below. */
4897 .nr_pages_max = PIPE_DEF_BUFFERS,
4898 .flags = flags,
4899 .ops = &tracing_pipe_buf_ops,
4900 .spd_release = tracing_spd_release_pipe,
4901 };
4902 ssize_t ret;
4903 size_t rem;
4904 unsigned int i;
4905
4906 if (splice_grow_spd(pipe, &spd))
4907 return -ENOMEM;
4908
4909 mutex_lock(&iter->mutex);
4910
4911 if (iter->trace->splice_read) {
4912 ret = iter->trace->splice_read(iter, filp,
4913 ppos, pipe, len, flags);
4914 if (ret)
4915 goto out_err;
4916 }
4917
4918 ret = tracing_wait_pipe(filp);
4919 if (ret <= 0)
4920 goto out_err;
4921
4922 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4923 ret = -EFAULT;
4924 goto out_err;
4925 }
4926
4927 trace_event_read_lock();
4928 trace_access_lock(iter->cpu_file);
4929
4930 /* Fill as many pages as possible. */
4931 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4932 spd.pages[i] = alloc_page(GFP_KERNEL);
4933 if (!spd.pages[i])
4934 break;
4935
4936 rem = tracing_fill_pipe_page(rem, iter);
4937
4938 /* Copy the data into the page, so we can start over. */
4939 ret = trace_seq_to_buffer(&iter->seq,
4940 page_address(spd.pages[i]),
4941 trace_seq_used(&iter->seq));
4942 if (ret < 0) {
4943 __free_page(spd.pages[i]);
4944 break;
4945 }
4946 spd.partial[i].offset = 0;
4947 spd.partial[i].len = trace_seq_used(&iter->seq);
4948
4949 trace_seq_init(&iter->seq);
4950 }
4951
4952 trace_access_unlock(iter->cpu_file);
4953 trace_event_read_unlock();
4954 mutex_unlock(&iter->mutex);
4955
4956 spd.nr_pages = i;
4957
4958 if (i)
4959 ret = splice_to_pipe(pipe, &spd);
4960 else
4961 ret = 0;
4962out:
4963 splice_shrink_spd(&spd);
4964 return ret;
4965
4966out_err:
4967 mutex_unlock(&iter->mutex);
4968 goto out;
4969}
4970
4971static ssize_t
4972tracing_entries_read(struct file *filp, char __user *ubuf,
4973 size_t cnt, loff_t *ppos)
4974{
4975 struct inode *inode = file_inode(filp);
4976 struct trace_array *tr = inode->i_private;
4977 int cpu = tracing_get_cpu(inode);
4978 char buf[64];
4979 int r = 0;
4980 ssize_t ret;
4981
4982 mutex_lock(&trace_types_lock);
4983
4984 if (cpu == RING_BUFFER_ALL_CPUS) {
4985 int cpu, buf_size_same;
4986 unsigned long size;
4987
4988 size = 0;
4989 buf_size_same = 1;
4990 /* check if all cpu sizes are same */
4991 for_each_tracing_cpu(cpu) {
4992 /* fill in the size from first enabled cpu */
4993 if (size == 0)
4994 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4995 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4996 buf_size_same = 0;
4997 break;
4998 }
4999 }
5000
5001 if (buf_size_same) {
5002 if (!ring_buffer_expanded)
5003 r = sprintf(buf, "%lu (expanded: %lu)\n",
5004 size >> 10,
5005 trace_buf_size >> 10);
5006 else
5007 r = sprintf(buf, "%lu\n", size >> 10);
5008 } else
5009 r = sprintf(buf, "X\n");
5010 } else
5011 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5012
5013 mutex_unlock(&trace_types_lock);
5014
5015 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5016 return ret;
5017}
5018
5019static ssize_t
5020tracing_entries_write(struct file *filp, const char __user *ubuf,
5021 size_t cnt, loff_t *ppos)
5022{
5023 struct inode *inode = file_inode(filp);
5024 struct trace_array *tr = inode->i_private;
5025 unsigned long val;
5026 int ret;
5027
5028 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5029 if (ret)
5030 return ret;
5031
5032 /* must have at least 1 entry */
5033 if (!val)
5034 return -EINVAL;
5035
5036 /* value is in KB */
5037 val <<= 10;
5038 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5039 if (ret < 0)
5040 return ret;
5041
5042 *ppos += cnt;
5043
5044 return cnt;
5045}
5046
5047static ssize_t
5048tracing_total_entries_read(struct file *filp, char __user *ubuf,
5049 size_t cnt, loff_t *ppos)
5050{
5051 struct trace_array *tr = filp->private_data;
5052 char buf[64];
5053 int r, cpu;
5054 unsigned long size = 0, expanded_size = 0;
5055
5056 mutex_lock(&trace_types_lock);
5057 for_each_tracing_cpu(cpu) {
5058 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5059 if (!ring_buffer_expanded)
5060 expanded_size += trace_buf_size >> 10;
5061 }
5062 if (ring_buffer_expanded)
5063 r = sprintf(buf, "%lu\n", size);
5064 else
5065 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5066 mutex_unlock(&trace_types_lock);
5067
5068 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5069}
5070
5071static ssize_t
5072tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5073 size_t cnt, loff_t *ppos)
5074{
5075 /*
5076 * There is no need to read what the user has written, this function
5077 * is just to make sure that there is no error when "echo" is used
5078 */
5079
5080 *ppos += cnt;
5081
5082 return cnt;
5083}
5084
5085static int
5086tracing_free_buffer_release(struct inode *inode, struct file *filp)
5087{
5088 struct trace_array *tr = inode->i_private;
5089
5090 /* disable tracing ? */
5091 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5092 tracer_tracing_off(tr);
5093 /* resize the ring buffer to 0 */
5094 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5095
5096 trace_array_put(tr);
5097
5098 return 0;
5099}
5100
5101static ssize_t
5102tracing_mark_write(struct file *filp, const char __user *ubuf,
5103 size_t cnt, loff_t *fpos)
5104{
5105 unsigned long addr = (unsigned long)ubuf;
5106 struct trace_array *tr = filp->private_data;
5107 struct ring_buffer_event *event;
5108 struct ring_buffer *buffer;
5109 struct print_entry *entry;
5110 unsigned long irq_flags;
5111 struct page *pages[2];
5112 void *map_page[2];
5113 int nr_pages = 1;
5114 ssize_t written;
5115 int offset;
5116 int size;
5117 int len;
5118 int ret;
5119 int i;
5120
5121 if (tracing_disabled)
5122 return -EINVAL;
5123
5124 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5125 return -EINVAL;
5126
5127 if (cnt > TRACE_BUF_SIZE)
5128 cnt = TRACE_BUF_SIZE;
5129
5130 /*
5131 * Userspace is injecting traces into the kernel trace buffer.
5132 * We want to be as non intrusive as possible.
5133 * To do so, we do not want to allocate any special buffers
5134 * or take any locks, but instead write the userspace data
5135 * straight into the ring buffer.
5136 *
5137 * First we need to pin the userspace buffer into memory,
5138 * which, most likely it is, because it just referenced it.
5139 * But there's no guarantee that it is. By using get_user_pages_fast()
5140 * and kmap_atomic/kunmap_atomic() we can get access to the
5141 * pages directly. We then write the data directly into the
5142 * ring buffer.
5143 */
5144 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5145
5146 /* check if we cross pages */
5147 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5148 nr_pages = 2;
5149
5150 offset = addr & (PAGE_SIZE - 1);
5151 addr &= PAGE_MASK;
5152
5153 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5154 if (ret < nr_pages) {
5155 while (--ret >= 0)
5156 put_page(pages[ret]);
5157 written = -EFAULT;
5158 goto out;
5159 }
5160
5161 for (i = 0; i < nr_pages; i++)
5162 map_page[i] = kmap_atomic(pages[i]);
5163
5164 local_save_flags(irq_flags);
5165 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5166 buffer = tr->trace_buffer.buffer;
5167 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5168 irq_flags, preempt_count());
5169 if (!event) {
5170 /* Ring buffer disabled, return as if not open for write */
5171 written = -EBADF;
5172 goto out_unlock;
5173 }
5174
5175 entry = ring_buffer_event_data(event);
5176 entry->ip = _THIS_IP_;
5177
5178 if (nr_pages == 2) {
5179 len = PAGE_SIZE - offset;
5180 memcpy(&entry->buf, map_page[0] + offset, len);
5181 memcpy(&entry->buf[len], map_page[1], cnt - len);
5182 } else
5183 memcpy(&entry->buf, map_page[0] + offset, cnt);
5184
5185 if (entry->buf[cnt - 1] != '\n') {
5186 entry->buf[cnt] = '\n';
5187 entry->buf[cnt + 1] = '\0';
5188 } else
5189 entry->buf[cnt] = '\0';
5190
5191 __buffer_unlock_commit(buffer, event);
5192
5193 written = cnt;
5194
5195 *fpos += written;
5196
5197 out_unlock:
5198 for (i = nr_pages - 1; i >= 0; i--) {
5199 kunmap_atomic(map_page[i]);
5200 put_page(pages[i]);
5201 }
5202 out:
5203 return written;
5204}
5205
5206static int tracing_clock_show(struct seq_file *m, void *v)
5207{
5208 struct trace_array *tr = m->private;
5209 int i;
5210
5211 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5212 seq_printf(m,
5213 "%s%s%s%s", i ? " " : "",
5214 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5215 i == tr->clock_id ? "]" : "");
5216 seq_putc(m, '\n');
5217
5218 return 0;
5219}
5220
5221static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5222{
5223 int i;
5224
5225 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5226 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5227 break;
5228 }
5229 if (i == ARRAY_SIZE(trace_clocks))
5230 return -EINVAL;
5231
5232 mutex_lock(&trace_types_lock);
5233
5234 tr->clock_id = i;
5235
5236 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5237
5238 /*
5239 * New clock may not be consistent with the previous clock.
5240 * Reset the buffer so that it doesn't have incomparable timestamps.
5241 */
5242 tracing_reset_online_cpus(&tr->trace_buffer);
5243
5244#ifdef CONFIG_TRACER_MAX_TRACE
5245 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5246 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5247 tracing_reset_online_cpus(&tr->max_buffer);
5248#endif
5249
5250 mutex_unlock(&trace_types_lock);
5251
5252 return 0;
5253}
5254
5255static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5256 size_t cnt, loff_t *fpos)
5257{
5258 struct seq_file *m = filp->private_data;
5259 struct trace_array *tr = m->private;
5260 char buf[64];
5261 const char *clockstr;
5262 int ret;
5263
5264 if (cnt >= sizeof(buf))
5265 return -EINVAL;
5266
5267 if (copy_from_user(&buf, ubuf, cnt))
5268 return -EFAULT;
5269
5270 buf[cnt] = 0;
5271
5272 clockstr = strstrip(buf);
5273
5274 ret = tracing_set_clock(tr, clockstr);
5275 if (ret)
5276 return ret;
5277
5278 *fpos += cnt;
5279
5280 return cnt;
5281}
5282
5283static int tracing_clock_open(struct inode *inode, struct file *file)
5284{
5285 struct trace_array *tr = inode->i_private;
5286 int ret;
5287
5288 if (tracing_disabled)
5289 return -ENODEV;
5290
5291 if (trace_array_get(tr))
5292 return -ENODEV;
5293
5294 ret = single_open(file, tracing_clock_show, inode->i_private);
5295 if (ret < 0)
5296 trace_array_put(tr);
5297
5298 return ret;
5299}
5300
5301struct ftrace_buffer_info {
5302 struct trace_iterator iter;
5303 void *spare;
5304 unsigned int read;
5305};
5306
5307#ifdef CONFIG_TRACER_SNAPSHOT
5308static int tracing_snapshot_open(struct inode *inode, struct file *file)
5309{
5310 struct trace_array *tr = inode->i_private;
5311 struct trace_iterator *iter;
5312 struct seq_file *m;
5313 int ret = 0;
5314
5315 if (trace_array_get(tr) < 0)
5316 return -ENODEV;
5317
5318 if (file->f_mode & FMODE_READ) {
5319 iter = __tracing_open(inode, file, true);
5320 if (IS_ERR(iter))
5321 ret = PTR_ERR(iter);
5322 } else {
5323 /* Writes still need the seq_file to hold the private data */
5324 ret = -ENOMEM;
5325 m = kzalloc(sizeof(*m), GFP_KERNEL);
5326 if (!m)
5327 goto out;
5328 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5329 if (!iter) {
5330 kfree(m);
5331 goto out;
5332 }
5333 ret = 0;
5334
5335 iter->tr = tr;
5336 iter->trace_buffer = &tr->max_buffer;
5337 iter->cpu_file = tracing_get_cpu(inode);
5338 m->private = iter;
5339 file->private_data = m;
5340 }
5341out:
5342 if (ret < 0)
5343 trace_array_put(tr);
5344
5345 return ret;
5346}
5347
5348static ssize_t
5349tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5350 loff_t *ppos)
5351{
5352 struct seq_file *m = filp->private_data;
5353 struct trace_iterator *iter = m->private;
5354 struct trace_array *tr = iter->tr;
5355 unsigned long val;
5356 int ret;
5357
5358 ret = tracing_update_buffers();
5359 if (ret < 0)
5360 return ret;
5361
5362 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5363 if (ret)
5364 return ret;
5365
5366 mutex_lock(&trace_types_lock);
5367
5368 if (tr->current_trace->use_max_tr) {
5369 ret = -EBUSY;
5370 goto out;
5371 }
5372
5373 switch (val) {
5374 case 0:
5375 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5376 ret = -EINVAL;
5377 break;
5378 }
5379 if (tr->allocated_snapshot)
5380 free_snapshot(tr);
5381 break;
5382 case 1:
5383/* Only allow per-cpu swap if the ring buffer supports it */
5384#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5385 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5386 ret = -EINVAL;
5387 break;
5388 }
5389#endif
5390 if (!tr->allocated_snapshot) {
5391 ret = alloc_snapshot(tr);
5392 if (ret < 0)
5393 break;
5394 }
5395 local_irq_disable();
5396 /* Now, we're going to swap */
5397 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5398 update_max_tr(tr, current, smp_processor_id());
5399 else
5400 update_max_tr_single(tr, current, iter->cpu_file);
5401 local_irq_enable();
5402 break;
5403 default:
5404 if (tr->allocated_snapshot) {
5405 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5406 tracing_reset_online_cpus(&tr->max_buffer);
5407 else
5408 tracing_reset(&tr->max_buffer, iter->cpu_file);
5409 }
5410 break;
5411 }
5412
5413 if (ret >= 0) {
5414 *ppos += cnt;
5415 ret = cnt;
5416 }
5417out:
5418 mutex_unlock(&trace_types_lock);
5419 return ret;
5420}
5421
5422static int tracing_snapshot_release(struct inode *inode, struct file *file)
5423{
5424 struct seq_file *m = file->private_data;
5425 int ret;
5426
5427 ret = tracing_release(inode, file);
5428
5429 if (file->f_mode & FMODE_READ)
5430 return ret;
5431
5432 /* If write only, the seq_file is just a stub */
5433 if (m)
5434 kfree(m->private);
5435 kfree(m);
5436
5437 return 0;
5438}
5439
5440static int tracing_buffers_open(struct inode *inode, struct file *filp);
5441static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5442 size_t count, loff_t *ppos);
5443static int tracing_buffers_release(struct inode *inode, struct file *file);
5444static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5445 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5446
5447static int snapshot_raw_open(struct inode *inode, struct file *filp)
5448{
5449 struct ftrace_buffer_info *info;
5450 int ret;
5451
5452 ret = tracing_buffers_open(inode, filp);
5453 if (ret < 0)
5454 return ret;
5455
5456 info = filp->private_data;
5457
5458 if (info->iter.trace->use_max_tr) {
5459 tracing_buffers_release(inode, filp);
5460 return -EBUSY;
5461 }
5462
5463 info->iter.snapshot = true;
5464 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5465
5466 return ret;
5467}
5468
5469#endif /* CONFIG_TRACER_SNAPSHOT */
5470
5471
5472static const struct file_operations tracing_thresh_fops = {
5473 .open = tracing_open_generic,
5474 .read = tracing_thresh_read,
5475 .write = tracing_thresh_write,
5476 .llseek = generic_file_llseek,
5477};
5478
5479#ifdef CONFIG_TRACER_MAX_TRACE
5480static const struct file_operations tracing_max_lat_fops = {
5481 .open = tracing_open_generic,
5482 .read = tracing_max_lat_read,
5483 .write = tracing_max_lat_write,
5484 .llseek = generic_file_llseek,
5485};
5486#endif
5487
5488static const struct file_operations set_tracer_fops = {
5489 .open = tracing_open_generic,
5490 .read = tracing_set_trace_read,
5491 .write = tracing_set_trace_write,
5492 .llseek = generic_file_llseek,
5493};
5494
5495static const struct file_operations tracing_pipe_fops = {
5496 .open = tracing_open_pipe,
5497 .poll = tracing_poll_pipe,
5498 .read = tracing_read_pipe,
5499 .splice_read = tracing_splice_read_pipe,
5500 .release = tracing_release_pipe,
5501 .llseek = no_llseek,
5502};
5503
5504static const struct file_operations tracing_entries_fops = {
5505 .open = tracing_open_generic_tr,
5506 .read = tracing_entries_read,
5507 .write = tracing_entries_write,
5508 .llseek = generic_file_llseek,
5509 .release = tracing_release_generic_tr,
5510};
5511
5512static const struct file_operations tracing_total_entries_fops = {
5513 .open = tracing_open_generic_tr,
5514 .read = tracing_total_entries_read,
5515 .llseek = generic_file_llseek,
5516 .release = tracing_release_generic_tr,
5517};
5518
5519static const struct file_operations tracing_free_buffer_fops = {
5520 .open = tracing_open_generic_tr,
5521 .write = tracing_free_buffer_write,
5522 .release = tracing_free_buffer_release,
5523};
5524
5525static const struct file_operations tracing_mark_fops = {
5526 .open = tracing_open_generic_tr,
5527 .write = tracing_mark_write,
5528 .llseek = generic_file_llseek,
5529 .release = tracing_release_generic_tr,
5530};
5531
5532static const struct file_operations trace_clock_fops = {
5533 .open = tracing_clock_open,
5534 .read = seq_read,
5535 .llseek = seq_lseek,
5536 .release = tracing_single_release_tr,
5537 .write = tracing_clock_write,
5538};
5539
5540#ifdef CONFIG_TRACER_SNAPSHOT
5541static const struct file_operations snapshot_fops = {
5542 .open = tracing_snapshot_open,
5543 .read = seq_read,
5544 .write = tracing_snapshot_write,
5545 .llseek = tracing_lseek,
5546 .release = tracing_snapshot_release,
5547};
5548
5549static const struct file_operations snapshot_raw_fops = {
5550 .open = snapshot_raw_open,
5551 .read = tracing_buffers_read,
5552 .release = tracing_buffers_release,
5553 .splice_read = tracing_buffers_splice_read,
5554 .llseek = no_llseek,
5555};
5556
5557#endif /* CONFIG_TRACER_SNAPSHOT */
5558
5559static int tracing_buffers_open(struct inode *inode, struct file *filp)
5560{
5561 struct trace_array *tr = inode->i_private;
5562 struct ftrace_buffer_info *info;
5563 int ret;
5564
5565 if (tracing_disabled)
5566 return -ENODEV;
5567
5568 if (trace_array_get(tr) < 0)
5569 return -ENODEV;
5570
5571 info = kzalloc(sizeof(*info), GFP_KERNEL);
5572 if (!info) {
5573 trace_array_put(tr);
5574 return -ENOMEM;
5575 }
5576
5577 mutex_lock(&trace_types_lock);
5578
5579 info->iter.tr = tr;
5580 info->iter.cpu_file = tracing_get_cpu(inode);
5581 info->iter.trace = tr->current_trace;
5582 info->iter.trace_buffer = &tr->trace_buffer;
5583 info->spare = NULL;
5584 /* Force reading ring buffer for first read */
5585 info->read = (unsigned int)-1;
5586
5587 filp->private_data = info;
5588
5589 tr->current_trace->ref++;
5590
5591 mutex_unlock(&trace_types_lock);
5592
5593 ret = nonseekable_open(inode, filp);
5594 if (ret < 0)
5595 trace_array_put(tr);
5596
5597 return ret;
5598}
5599
5600static unsigned int
5601tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5602{
5603 struct ftrace_buffer_info *info = filp->private_data;
5604 struct trace_iterator *iter = &info->iter;
5605
5606 return trace_poll(iter, filp, poll_table);
5607}
5608
5609static ssize_t
5610tracing_buffers_read(struct file *filp, char __user *ubuf,
5611 size_t count, loff_t *ppos)
5612{
5613 struct ftrace_buffer_info *info = filp->private_data;
5614 struct trace_iterator *iter = &info->iter;
5615 ssize_t ret;
5616 ssize_t size;
5617
5618 if (!count)
5619 return 0;
5620
5621#ifdef CONFIG_TRACER_MAX_TRACE
5622 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5623 return -EBUSY;
5624#endif
5625
5626 if (!info->spare)
5627 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5628 iter->cpu_file);
5629 if (!info->spare)
5630 return -ENOMEM;
5631
5632 /* Do we have previous read data to read? */
5633 if (info->read < PAGE_SIZE)
5634 goto read;
5635
5636 again:
5637 trace_access_lock(iter->cpu_file);
5638 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5639 &info->spare,
5640 count,
5641 iter->cpu_file, 0);
5642 trace_access_unlock(iter->cpu_file);
5643
5644 if (ret < 0) {
5645 if (trace_empty(iter)) {
5646 if ((filp->f_flags & O_NONBLOCK))
5647 return -EAGAIN;
5648
5649 ret = wait_on_pipe(iter, false);
5650 if (ret)
5651 return ret;
5652
5653 goto again;
5654 }
5655 return 0;
5656 }
5657
5658 info->read = 0;
5659 read:
5660 size = PAGE_SIZE - info->read;
5661 if (size > count)
5662 size = count;
5663
5664 ret = copy_to_user(ubuf, info->spare + info->read, size);
5665 if (ret == size)
5666 return -EFAULT;
5667
5668 size -= ret;
5669
5670 *ppos += size;
5671 info->read += size;
5672
5673 return size;
5674}
5675
5676static int tracing_buffers_release(struct inode *inode, struct file *file)
5677{
5678 struct ftrace_buffer_info *info = file->private_data;
5679 struct trace_iterator *iter = &info->iter;
5680
5681 mutex_lock(&trace_types_lock);
5682
5683 iter->tr->current_trace->ref--;
5684
5685 __trace_array_put(iter->tr);
5686
5687 if (info->spare)
5688 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5689 kfree(info);
5690
5691 mutex_unlock(&trace_types_lock);
5692
5693 return 0;
5694}
5695
5696struct buffer_ref {
5697 struct ring_buffer *buffer;
5698 void *page;
5699 int ref;
5700};
5701
5702static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5703 struct pipe_buffer *buf)
5704{
5705 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5706
5707 if (--ref->ref)
5708 return;
5709
5710 ring_buffer_free_read_page(ref->buffer, ref->page);
5711 kfree(ref);
5712 buf->private = 0;
5713}
5714
5715static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5716 struct pipe_buffer *buf)
5717{
5718 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5719
5720 ref->ref++;
5721}
5722
5723/* Pipe buffer operations for a buffer. */
5724static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5725 .can_merge = 0,
5726 .confirm = generic_pipe_buf_confirm,
5727 .release = buffer_pipe_buf_release,
5728 .steal = generic_pipe_buf_steal,
5729 .get = buffer_pipe_buf_get,
5730};
5731
5732/*
5733 * Callback from splice_to_pipe(), if we need to release some pages
5734 * at the end of the spd in case we error'ed out in filling the pipe.
5735 */
5736static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5737{
5738 struct buffer_ref *ref =
5739 (struct buffer_ref *)spd->partial[i].private;
5740
5741 if (--ref->ref)
5742 return;
5743
5744 ring_buffer_free_read_page(ref->buffer, ref->page);
5745 kfree(ref);
5746 spd->partial[i].private = 0;
5747}
5748
5749static ssize_t
5750tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5751 struct pipe_inode_info *pipe, size_t len,
5752 unsigned int flags)
5753{
5754 struct ftrace_buffer_info *info = file->private_data;
5755 struct trace_iterator *iter = &info->iter;
5756 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5757 struct page *pages_def[PIPE_DEF_BUFFERS];
5758 struct splice_pipe_desc spd = {
5759 .pages = pages_def,
5760 .partial = partial_def,
5761 .nr_pages_max = PIPE_DEF_BUFFERS,
5762 .flags = flags,
5763 .ops = &buffer_pipe_buf_ops,
5764 .spd_release = buffer_spd_release,
5765 };
5766 struct buffer_ref *ref;
5767 int entries, size, i;
5768 ssize_t ret = 0;
5769
5770#ifdef CONFIG_TRACER_MAX_TRACE
5771 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5772 return -EBUSY;
5773#endif
5774
5775 if (splice_grow_spd(pipe, &spd))
5776 return -ENOMEM;
5777
5778 if (*ppos & (PAGE_SIZE - 1))
5779 return -EINVAL;
5780
5781 if (len & (PAGE_SIZE - 1)) {
5782 if (len < PAGE_SIZE)
5783 return -EINVAL;
5784 len &= PAGE_MASK;
5785 }
5786
5787 again:
5788 trace_access_lock(iter->cpu_file);
5789 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5790
5791 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5792 struct page *page;
5793 int r;
5794
5795 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5796 if (!ref) {
5797 ret = -ENOMEM;
5798 break;
5799 }
5800
5801 ref->ref = 1;
5802 ref->buffer = iter->trace_buffer->buffer;
5803 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5804 if (!ref->page) {
5805 ret = -ENOMEM;
5806 kfree(ref);
5807 break;
5808 }
5809
5810 r = ring_buffer_read_page(ref->buffer, &ref->page,
5811 len, iter->cpu_file, 1);
5812 if (r < 0) {
5813 ring_buffer_free_read_page(ref->buffer, ref->page);
5814 kfree(ref);
5815 break;
5816 }
5817
5818 /*
5819 * zero out any left over data, this is going to
5820 * user land.
5821 */
5822 size = ring_buffer_page_len(ref->page);
5823 if (size < PAGE_SIZE)
5824 memset(ref->page + size, 0, PAGE_SIZE - size);
5825
5826 page = virt_to_page(ref->page);
5827
5828 spd.pages[i] = page;
5829 spd.partial[i].len = PAGE_SIZE;
5830 spd.partial[i].offset = 0;
5831 spd.partial[i].private = (unsigned long)ref;
5832 spd.nr_pages++;
5833 *ppos += PAGE_SIZE;
5834
5835 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5836 }
5837
5838 trace_access_unlock(iter->cpu_file);
5839 spd.nr_pages = i;
5840
5841 /* did we read anything? */
5842 if (!spd.nr_pages) {
5843 if (ret)
5844 return ret;
5845
5846 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5847 return -EAGAIN;
5848
5849 ret = wait_on_pipe(iter, true);
5850 if (ret)
5851 return ret;
5852
5853 goto again;
5854 }
5855
5856 ret = splice_to_pipe(pipe, &spd);
5857 splice_shrink_spd(&spd);
5858
5859 return ret;
5860}
5861
5862static const struct file_operations tracing_buffers_fops = {
5863 .open = tracing_buffers_open,
5864 .read = tracing_buffers_read,
5865 .poll = tracing_buffers_poll,
5866 .release = tracing_buffers_release,
5867 .splice_read = tracing_buffers_splice_read,
5868 .llseek = no_llseek,
5869};
5870
5871static ssize_t
5872tracing_stats_read(struct file *filp, char __user *ubuf,
5873 size_t count, loff_t *ppos)
5874{
5875 struct inode *inode = file_inode(filp);
5876 struct trace_array *tr = inode->i_private;
5877 struct trace_buffer *trace_buf = &tr->trace_buffer;
5878 int cpu = tracing_get_cpu(inode);
5879 struct trace_seq *s;
5880 unsigned long cnt;
5881 unsigned long long t;
5882 unsigned long usec_rem;
5883
5884 s = kmalloc(sizeof(*s), GFP_KERNEL);
5885 if (!s)
5886 return -ENOMEM;
5887
5888 trace_seq_init(s);
5889
5890 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5891 trace_seq_printf(s, "entries: %ld\n", cnt);
5892
5893 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5894 trace_seq_printf(s, "overrun: %ld\n", cnt);
5895
5896 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5897 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5898
5899 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5900 trace_seq_printf(s, "bytes: %ld\n", cnt);
5901
5902 if (trace_clocks[tr->clock_id].in_ns) {
5903 /* local or global for trace_clock */
5904 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5905 usec_rem = do_div(t, USEC_PER_SEC);
5906 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5907 t, usec_rem);
5908
5909 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5910 usec_rem = do_div(t, USEC_PER_SEC);
5911 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5912 } else {
5913 /* counter or tsc mode for trace_clock */
5914 trace_seq_printf(s, "oldest event ts: %llu\n",
5915 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5916
5917 trace_seq_printf(s, "now ts: %llu\n",
5918 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5919 }
5920
5921 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5922 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5923
5924 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5925 trace_seq_printf(s, "read events: %ld\n", cnt);
5926
5927 count = simple_read_from_buffer(ubuf, count, ppos,
5928 s->buffer, trace_seq_used(s));
5929
5930 kfree(s);
5931
5932 return count;
5933}
5934
5935static const struct file_operations tracing_stats_fops = {
5936 .open = tracing_open_generic_tr,
5937 .read = tracing_stats_read,
5938 .llseek = generic_file_llseek,
5939 .release = tracing_release_generic_tr,
5940};
5941
5942#ifdef CONFIG_DYNAMIC_FTRACE
5943
5944int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5945{
5946 return 0;
5947}
5948
5949static ssize_t
5950tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5951 size_t cnt, loff_t *ppos)
5952{
5953 static char ftrace_dyn_info_buffer[1024];
5954 static DEFINE_MUTEX(dyn_info_mutex);
5955 unsigned long *p = filp->private_data;
5956 char *buf = ftrace_dyn_info_buffer;
5957 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5958 int r;
5959
5960 mutex_lock(&dyn_info_mutex);
5961 r = sprintf(buf, "%ld ", *p);
5962
5963 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5964 buf[r++] = '\n';
5965
5966 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5967
5968 mutex_unlock(&dyn_info_mutex);
5969
5970 return r;
5971}
5972
5973static const struct file_operations tracing_dyn_info_fops = {
5974 .open = tracing_open_generic,
5975 .read = tracing_read_dyn_info,
5976 .llseek = generic_file_llseek,
5977};
5978#endif /* CONFIG_DYNAMIC_FTRACE */
5979
5980#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5981static void
5982ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5983{
5984 tracing_snapshot();
5985}
5986
5987static void
5988ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5989{
5990 unsigned long *count = (long *)data;
5991
5992 if (!*count)
5993 return;
5994
5995 if (*count != -1)
5996 (*count)--;
5997
5998 tracing_snapshot();
5999}
6000
6001static int
6002ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6003 struct ftrace_probe_ops *ops, void *data)
6004{
6005 long count = (long)data;
6006
6007 seq_printf(m, "%ps:", (void *)ip);
6008
6009 seq_puts(m, "snapshot");
6010
6011 if (count == -1)
6012 seq_puts(m, ":unlimited\n");
6013 else
6014 seq_printf(m, ":count=%ld\n", count);
6015
6016 return 0;
6017}
6018
6019static struct ftrace_probe_ops snapshot_probe_ops = {
6020 .func = ftrace_snapshot,
6021 .print = ftrace_snapshot_print,
6022};
6023
6024static struct ftrace_probe_ops snapshot_count_probe_ops = {
6025 .func = ftrace_count_snapshot,
6026 .print = ftrace_snapshot_print,
6027};
6028
6029static int
6030ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6031 char *glob, char *cmd, char *param, int enable)
6032{
6033 struct ftrace_probe_ops *ops;
6034 void *count = (void *)-1;
6035 char *number;
6036 int ret;
6037
6038 /* hash funcs only work with set_ftrace_filter */
6039 if (!enable)
6040 return -EINVAL;
6041
6042 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6043
6044 if (glob[0] == '!') {
6045 unregister_ftrace_function_probe_func(glob+1, ops);
6046 return 0;
6047 }
6048
6049 if (!param)
6050 goto out_reg;
6051
6052 number = strsep(¶m, ":");
6053
6054 if (!strlen(number))
6055 goto out_reg;
6056
6057 /*
6058 * We use the callback data field (which is a pointer)
6059 * as our counter.
6060 */
6061 ret = kstrtoul(number, 0, (unsigned long *)&count);
6062 if (ret)
6063 return ret;
6064
6065 out_reg:
6066 ret = register_ftrace_function_probe(glob, ops, count);
6067
6068 if (ret >= 0)
6069 alloc_snapshot(&global_trace);
6070
6071 return ret < 0 ? ret : 0;
6072}
6073
6074static struct ftrace_func_command ftrace_snapshot_cmd = {
6075 .name = "snapshot",
6076 .func = ftrace_trace_snapshot_callback,
6077};
6078
6079static __init int register_snapshot_cmd(void)
6080{
6081 return register_ftrace_command(&ftrace_snapshot_cmd);
6082}
6083#else
6084static inline __init int register_snapshot_cmd(void) { return 0; }
6085#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6086
6087static struct dentry *tracing_get_dentry(struct trace_array *tr)
6088{
6089 if (WARN_ON(!tr->dir))
6090 return ERR_PTR(-ENODEV);
6091
6092 /* Top directory uses NULL as the parent */
6093 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6094 return NULL;
6095
6096 /* All sub buffers have a descriptor */
6097 return tr->dir;
6098}
6099
6100static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6101{
6102 struct dentry *d_tracer;
6103
6104 if (tr->percpu_dir)
6105 return tr->percpu_dir;
6106
6107 d_tracer = tracing_get_dentry(tr);
6108 if (IS_ERR(d_tracer))
6109 return NULL;
6110
6111 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6112
6113 WARN_ONCE(!tr->percpu_dir,
6114 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6115
6116 return tr->percpu_dir;
6117}
6118
6119static struct dentry *
6120trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6121 void *data, long cpu, const struct file_operations *fops)
6122{
6123 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6124
6125 if (ret) /* See tracing_get_cpu() */
6126 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6127 return ret;
6128}
6129
6130static void
6131tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6132{
6133 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6134 struct dentry *d_cpu;
6135 char cpu_dir[30]; /* 30 characters should be more than enough */
6136
6137 if (!d_percpu)
6138 return;
6139
6140 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6141 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6142 if (!d_cpu) {
6143 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
6144 return;
6145 }
6146
6147 /* per cpu trace_pipe */
6148 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6149 tr, cpu, &tracing_pipe_fops);
6150
6151 /* per cpu trace */
6152 trace_create_cpu_file("trace", 0644, d_cpu,
6153 tr, cpu, &tracing_fops);
6154
6155 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6156 tr, cpu, &tracing_buffers_fops);
6157
6158 trace_create_cpu_file("stats", 0444, d_cpu,
6159 tr, cpu, &tracing_stats_fops);
6160
6161 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6162 tr, cpu, &tracing_entries_fops);
6163
6164#ifdef CONFIG_TRACER_SNAPSHOT
6165 trace_create_cpu_file("snapshot", 0644, d_cpu,
6166 tr, cpu, &snapshot_fops);
6167
6168 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6169 tr, cpu, &snapshot_raw_fops);
6170#endif
6171}
6172
6173#ifdef CONFIG_FTRACE_SELFTEST
6174/* Let selftest have access to static functions in this file */
6175#include "trace_selftest.c"
6176#endif
6177
6178static ssize_t
6179trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6180 loff_t *ppos)
6181{
6182 struct trace_option_dentry *topt = filp->private_data;
6183 char *buf;
6184
6185 if (topt->flags->val & topt->opt->bit)
6186 buf = "1\n";
6187 else
6188 buf = "0\n";
6189
6190 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6191}
6192
6193static ssize_t
6194trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6195 loff_t *ppos)
6196{
6197 struct trace_option_dentry *topt = filp->private_data;
6198 unsigned long val;
6199 int ret;
6200
6201 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6202 if (ret)
6203 return ret;
6204
6205 if (val != 0 && val != 1)
6206 return -EINVAL;
6207
6208 if (!!(topt->flags->val & topt->opt->bit) != val) {
6209 mutex_lock(&trace_types_lock);
6210 ret = __set_tracer_option(topt->tr, topt->flags,
6211 topt->opt, !val);
6212 mutex_unlock(&trace_types_lock);
6213 if (ret)
6214 return ret;
6215 }
6216
6217 *ppos += cnt;
6218
6219 return cnt;
6220}
6221
6222
6223static const struct file_operations trace_options_fops = {
6224 .open = tracing_open_generic,
6225 .read = trace_options_read,
6226 .write = trace_options_write,
6227 .llseek = generic_file_llseek,
6228};
6229
6230/*
6231 * In order to pass in both the trace_array descriptor as well as the index
6232 * to the flag that the trace option file represents, the trace_array
6233 * has a character array of trace_flags_index[], which holds the index
6234 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6235 * The address of this character array is passed to the flag option file
6236 * read/write callbacks.
6237 *
6238 * In order to extract both the index and the trace_array descriptor,
6239 * get_tr_index() uses the following algorithm.
6240 *
6241 * idx = *ptr;
6242 *
6243 * As the pointer itself contains the address of the index (remember
6244 * index[1] == 1).
6245 *
6246 * Then to get the trace_array descriptor, by subtracting that index
6247 * from the ptr, we get to the start of the index itself.
6248 *
6249 * ptr - idx == &index[0]
6250 *
6251 * Then a simple container_of() from that pointer gets us to the
6252 * trace_array descriptor.
6253 */
6254static void get_tr_index(void *data, struct trace_array **ptr,
6255 unsigned int *pindex)
6256{
6257 *pindex = *(unsigned char *)data;
6258
6259 *ptr = container_of(data - *pindex, struct trace_array,
6260 trace_flags_index);
6261}
6262
6263static ssize_t
6264trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6265 loff_t *ppos)
6266{
6267 void *tr_index = filp->private_data;
6268 struct trace_array *tr;
6269 unsigned int index;
6270 char *buf;
6271
6272 get_tr_index(tr_index, &tr, &index);
6273
6274 if (tr->trace_flags & (1 << index))
6275 buf = "1\n";
6276 else
6277 buf = "0\n";
6278
6279 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6280}
6281
6282static ssize_t
6283trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6284 loff_t *ppos)
6285{
6286 void *tr_index = filp->private_data;
6287 struct trace_array *tr;
6288 unsigned int index;
6289 unsigned long val;
6290 int ret;
6291
6292 get_tr_index(tr_index, &tr, &index);
6293
6294 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6295 if (ret)
6296 return ret;
6297
6298 if (val != 0 && val != 1)
6299 return -EINVAL;
6300
6301 mutex_lock(&trace_types_lock);
6302 ret = set_tracer_flag(tr, 1 << index, val);
6303 mutex_unlock(&trace_types_lock);
6304
6305 if (ret < 0)
6306 return ret;
6307
6308 *ppos += cnt;
6309
6310 return cnt;
6311}
6312
6313static const struct file_operations trace_options_core_fops = {
6314 .open = tracing_open_generic,
6315 .read = trace_options_core_read,
6316 .write = trace_options_core_write,
6317 .llseek = generic_file_llseek,
6318};
6319
6320struct dentry *trace_create_file(const char *name,
6321 umode_t mode,
6322 struct dentry *parent,
6323 void *data,
6324 const struct file_operations *fops)
6325{
6326 struct dentry *ret;
6327
6328 ret = tracefs_create_file(name, mode, parent, data, fops);
6329 if (!ret)
6330 pr_warn("Could not create tracefs '%s' entry\n", name);
6331
6332 return ret;
6333}
6334
6335
6336static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6337{
6338 struct dentry *d_tracer;
6339
6340 if (tr->options)
6341 return tr->options;
6342
6343 d_tracer = tracing_get_dentry(tr);
6344 if (IS_ERR(d_tracer))
6345 return NULL;
6346
6347 tr->options = tracefs_create_dir("options", d_tracer);
6348 if (!tr->options) {
6349 pr_warn("Could not create tracefs directory 'options'\n");
6350 return NULL;
6351 }
6352
6353 return tr->options;
6354}
6355
6356static void
6357create_trace_option_file(struct trace_array *tr,
6358 struct trace_option_dentry *topt,
6359 struct tracer_flags *flags,
6360 struct tracer_opt *opt)
6361{
6362 struct dentry *t_options;
6363
6364 t_options = trace_options_init_dentry(tr);
6365 if (!t_options)
6366 return;
6367
6368 topt->flags = flags;
6369 topt->opt = opt;
6370 topt->tr = tr;
6371
6372 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6373 &trace_options_fops);
6374
6375}
6376
6377static void
6378create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6379{
6380 struct trace_option_dentry *topts;
6381 struct trace_options *tr_topts;
6382 struct tracer_flags *flags;
6383 struct tracer_opt *opts;
6384 int cnt;
6385 int i;
6386
6387 if (!tracer)
6388 return;
6389
6390 flags = tracer->flags;
6391
6392 if (!flags || !flags->opts)
6393 return;
6394
6395 /*
6396 * If this is an instance, only create flags for tracers
6397 * the instance may have.
6398 */
6399 if (!trace_ok_for_array(tracer, tr))
6400 return;
6401
6402 for (i = 0; i < tr->nr_topts; i++) {
6403 /* Make sure there's no duplicate flags. */
6404 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
6405 return;
6406 }
6407
6408 opts = flags->opts;
6409
6410 for (cnt = 0; opts[cnt].name; cnt++)
6411 ;
6412
6413 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6414 if (!topts)
6415 return;
6416
6417 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6418 GFP_KERNEL);
6419 if (!tr_topts) {
6420 kfree(topts);
6421 return;
6422 }
6423
6424 tr->topts = tr_topts;
6425 tr->topts[tr->nr_topts].tracer = tracer;
6426 tr->topts[tr->nr_topts].topts = topts;
6427 tr->nr_topts++;
6428
6429 for (cnt = 0; opts[cnt].name; cnt++) {
6430 create_trace_option_file(tr, &topts[cnt], flags,
6431 &opts[cnt]);
6432 WARN_ONCE(topts[cnt].entry == NULL,
6433 "Failed to create trace option: %s",
6434 opts[cnt].name);
6435 }
6436}
6437
6438static struct dentry *
6439create_trace_option_core_file(struct trace_array *tr,
6440 const char *option, long index)
6441{
6442 struct dentry *t_options;
6443
6444 t_options = trace_options_init_dentry(tr);
6445 if (!t_options)
6446 return NULL;
6447
6448 return trace_create_file(option, 0644, t_options,
6449 (void *)&tr->trace_flags_index[index],
6450 &trace_options_core_fops);
6451}
6452
6453static void create_trace_options_dir(struct trace_array *tr)
6454{
6455 struct dentry *t_options;
6456 bool top_level = tr == &global_trace;
6457 int i;
6458
6459 t_options = trace_options_init_dentry(tr);
6460 if (!t_options)
6461 return;
6462
6463 for (i = 0; trace_options[i]; i++) {
6464 if (top_level ||
6465 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6466 create_trace_option_core_file(tr, trace_options[i], i);
6467 }
6468}
6469
6470static ssize_t
6471rb_simple_read(struct file *filp, char __user *ubuf,
6472 size_t cnt, loff_t *ppos)
6473{
6474 struct trace_array *tr = filp->private_data;
6475 char buf[64];
6476 int r;
6477
6478 r = tracer_tracing_is_on(tr);
6479 r = sprintf(buf, "%d\n", r);
6480
6481 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6482}
6483
6484static ssize_t
6485rb_simple_write(struct file *filp, const char __user *ubuf,
6486 size_t cnt, loff_t *ppos)
6487{
6488 struct trace_array *tr = filp->private_data;
6489 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6490 unsigned long val;
6491 int ret;
6492
6493 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6494 if (ret)
6495 return ret;
6496
6497 if (buffer) {
6498 mutex_lock(&trace_types_lock);
6499 if (val) {
6500 tracer_tracing_on(tr);
6501 if (tr->current_trace->start)
6502 tr->current_trace->start(tr);
6503 } else {
6504 tracer_tracing_off(tr);
6505 if (tr->current_trace->stop)
6506 tr->current_trace->stop(tr);
6507 }
6508 mutex_unlock(&trace_types_lock);
6509 }
6510
6511 (*ppos)++;
6512
6513 return cnt;
6514}
6515
6516static const struct file_operations rb_simple_fops = {
6517 .open = tracing_open_generic_tr,
6518 .read = rb_simple_read,
6519 .write = rb_simple_write,
6520 .release = tracing_release_generic_tr,
6521 .llseek = default_llseek,
6522};
6523
6524struct dentry *trace_instance_dir;
6525
6526static void
6527init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6528
6529static int
6530allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6531{
6532 enum ring_buffer_flags rb_flags;
6533
6534 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6535
6536 buf->tr = tr;
6537
6538 buf->buffer = ring_buffer_alloc(size, rb_flags);
6539 if (!buf->buffer)
6540 return -ENOMEM;
6541
6542 buf->data = alloc_percpu(struct trace_array_cpu);
6543 if (!buf->data) {
6544 ring_buffer_free(buf->buffer);
6545 return -ENOMEM;
6546 }
6547
6548 /* Allocate the first page for all buffers */
6549 set_buffer_entries(&tr->trace_buffer,
6550 ring_buffer_size(tr->trace_buffer.buffer, 0));
6551
6552 return 0;
6553}
6554
6555static int allocate_trace_buffers(struct trace_array *tr, int size)
6556{
6557 int ret;
6558
6559 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6560 if (ret)
6561 return ret;
6562
6563#ifdef CONFIG_TRACER_MAX_TRACE
6564 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6565 allocate_snapshot ? size : 1);
6566 if (WARN_ON(ret)) {
6567 ring_buffer_free(tr->trace_buffer.buffer);
6568 free_percpu(tr->trace_buffer.data);
6569 return -ENOMEM;
6570 }
6571 tr->allocated_snapshot = allocate_snapshot;
6572
6573 /*
6574 * Only the top level trace array gets its snapshot allocated
6575 * from the kernel command line.
6576 */
6577 allocate_snapshot = false;
6578#endif
6579 return 0;
6580}
6581
6582static void free_trace_buffer(struct trace_buffer *buf)
6583{
6584 if (buf->buffer) {
6585 ring_buffer_free(buf->buffer);
6586 buf->buffer = NULL;
6587 free_percpu(buf->data);
6588 buf->data = NULL;
6589 }
6590}
6591
6592static void free_trace_buffers(struct trace_array *tr)
6593{
6594 if (!tr)
6595 return;
6596
6597 free_trace_buffer(&tr->trace_buffer);
6598
6599#ifdef CONFIG_TRACER_MAX_TRACE
6600 free_trace_buffer(&tr->max_buffer);
6601#endif
6602}
6603
6604static void init_trace_flags_index(struct trace_array *tr)
6605{
6606 int i;
6607
6608 /* Used by the trace options files */
6609 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6610 tr->trace_flags_index[i] = i;
6611}
6612
6613static void __update_tracer_options(struct trace_array *tr)
6614{
6615 struct tracer *t;
6616
6617 for (t = trace_types; t; t = t->next)
6618 add_tracer_options(tr, t);
6619}
6620
6621static void update_tracer_options(struct trace_array *tr)
6622{
6623 mutex_lock(&trace_types_lock);
6624 __update_tracer_options(tr);
6625 mutex_unlock(&trace_types_lock);
6626}
6627
6628static int instance_mkdir(const char *name)
6629{
6630 struct trace_array *tr;
6631 int ret;
6632
6633 mutex_lock(&trace_types_lock);
6634
6635 ret = -EEXIST;
6636 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6637 if (tr->name && strcmp(tr->name, name) == 0)
6638 goto out_unlock;
6639 }
6640
6641 ret = -ENOMEM;
6642 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6643 if (!tr)
6644 goto out_unlock;
6645
6646 tr->name = kstrdup(name, GFP_KERNEL);
6647 if (!tr->name)
6648 goto out_free_tr;
6649
6650 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6651 goto out_free_tr;
6652
6653 tr->trace_flags = global_trace.trace_flags;
6654
6655 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6656
6657 raw_spin_lock_init(&tr->start_lock);
6658
6659 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6660
6661 tr->current_trace = &nop_trace;
6662
6663 INIT_LIST_HEAD(&tr->systems);
6664 INIT_LIST_HEAD(&tr->events);
6665
6666 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6667 goto out_free_tr;
6668
6669 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6670 if (!tr->dir)
6671 goto out_free_tr;
6672
6673 ret = event_trace_add_tracer(tr->dir, tr);
6674 if (ret) {
6675 tracefs_remove_recursive(tr->dir);
6676 goto out_free_tr;
6677 }
6678
6679 init_tracer_tracefs(tr, tr->dir);
6680 init_trace_flags_index(tr);
6681 __update_tracer_options(tr);
6682
6683 list_add(&tr->list, &ftrace_trace_arrays);
6684
6685 mutex_unlock(&trace_types_lock);
6686
6687 return 0;
6688
6689 out_free_tr:
6690 free_trace_buffers(tr);
6691 free_cpumask_var(tr->tracing_cpumask);
6692 kfree(tr->name);
6693 kfree(tr);
6694
6695 out_unlock:
6696 mutex_unlock(&trace_types_lock);
6697
6698 return ret;
6699
6700}
6701
6702static int instance_rmdir(const char *name)
6703{
6704 struct trace_array *tr;
6705 int found = 0;
6706 int ret;
6707 int i;
6708
6709 mutex_lock(&trace_types_lock);
6710
6711 ret = -ENODEV;
6712 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6713 if (tr->name && strcmp(tr->name, name) == 0) {
6714 found = 1;
6715 break;
6716 }
6717 }
6718 if (!found)
6719 goto out_unlock;
6720
6721 ret = -EBUSY;
6722 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6723 goto out_unlock;
6724
6725 list_del(&tr->list);
6726
6727 tracing_set_nop(tr);
6728 event_trace_del_tracer(tr);
6729 ftrace_destroy_function_files(tr);
6730 tracefs_remove_recursive(tr->dir);
6731 free_trace_buffers(tr);
6732
6733 for (i = 0; i < tr->nr_topts; i++) {
6734 kfree(tr->topts[i].topts);
6735 }
6736 kfree(tr->topts);
6737
6738 kfree(tr->name);
6739 kfree(tr);
6740
6741 ret = 0;
6742
6743 out_unlock:
6744 mutex_unlock(&trace_types_lock);
6745
6746 return ret;
6747}
6748
6749static __init void create_trace_instances(struct dentry *d_tracer)
6750{
6751 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6752 instance_mkdir,
6753 instance_rmdir);
6754 if (WARN_ON(!trace_instance_dir))
6755 return;
6756}
6757
6758static void
6759init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6760{
6761 int cpu;
6762
6763 trace_create_file("available_tracers", 0444, d_tracer,
6764 tr, &show_traces_fops);
6765
6766 trace_create_file("current_tracer", 0644, d_tracer,
6767 tr, &set_tracer_fops);
6768
6769 trace_create_file("tracing_cpumask", 0644, d_tracer,
6770 tr, &tracing_cpumask_fops);
6771
6772 trace_create_file("trace_options", 0644, d_tracer,
6773 tr, &tracing_iter_fops);
6774
6775 trace_create_file("trace", 0644, d_tracer,
6776 tr, &tracing_fops);
6777
6778 trace_create_file("trace_pipe", 0444, d_tracer,
6779 tr, &tracing_pipe_fops);
6780
6781 trace_create_file("buffer_size_kb", 0644, d_tracer,
6782 tr, &tracing_entries_fops);
6783
6784 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6785 tr, &tracing_total_entries_fops);
6786
6787 trace_create_file("free_buffer", 0200, d_tracer,
6788 tr, &tracing_free_buffer_fops);
6789
6790 trace_create_file("trace_marker", 0220, d_tracer,
6791 tr, &tracing_mark_fops);
6792
6793 trace_create_file("trace_clock", 0644, d_tracer, tr,
6794 &trace_clock_fops);
6795
6796 trace_create_file("tracing_on", 0644, d_tracer,
6797 tr, &rb_simple_fops);
6798
6799 create_trace_options_dir(tr);
6800
6801#ifdef CONFIG_TRACER_MAX_TRACE
6802 trace_create_file("tracing_max_latency", 0644, d_tracer,
6803 &tr->max_latency, &tracing_max_lat_fops);
6804#endif
6805
6806 if (ftrace_create_function_files(tr, d_tracer))
6807 WARN(1, "Could not allocate function filter files");
6808
6809#ifdef CONFIG_TRACER_SNAPSHOT
6810 trace_create_file("snapshot", 0644, d_tracer,
6811 tr, &snapshot_fops);
6812#endif
6813
6814 for_each_tracing_cpu(cpu)
6815 tracing_init_tracefs_percpu(tr, cpu);
6816
6817}
6818
6819static struct vfsmount *trace_automount(void *ingore)
6820{
6821 struct vfsmount *mnt;
6822 struct file_system_type *type;
6823
6824 /*
6825 * To maintain backward compatibility for tools that mount
6826 * debugfs to get to the tracing facility, tracefs is automatically
6827 * mounted to the debugfs/tracing directory.
6828 */
6829 type = get_fs_type("tracefs");
6830 if (!type)
6831 return NULL;
6832 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6833 put_filesystem(type);
6834 if (IS_ERR(mnt))
6835 return NULL;
6836 mntget(mnt);
6837
6838 return mnt;
6839}
6840
6841/**
6842 * tracing_init_dentry - initialize top level trace array
6843 *
6844 * This is called when creating files or directories in the tracing
6845 * directory. It is called via fs_initcall() by any of the boot up code
6846 * and expects to return the dentry of the top level tracing directory.
6847 */
6848struct dentry *tracing_init_dentry(void)
6849{
6850 struct trace_array *tr = &global_trace;
6851
6852 /* The top level trace array uses NULL as parent */
6853 if (tr->dir)
6854 return NULL;
6855
6856 if (WARN_ON(!tracefs_initialized()) ||
6857 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6858 WARN_ON(!debugfs_initialized())))
6859 return ERR_PTR(-ENODEV);
6860
6861 /*
6862 * As there may still be users that expect the tracing
6863 * files to exist in debugfs/tracing, we must automount
6864 * the tracefs file system there, so older tools still
6865 * work with the newer kerenl.
6866 */
6867 tr->dir = debugfs_create_automount("tracing", NULL,
6868 trace_automount, NULL);
6869 if (!tr->dir) {
6870 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6871 return ERR_PTR(-ENOMEM);
6872 }
6873
6874 return NULL;
6875}
6876
6877extern struct trace_enum_map *__start_ftrace_enum_maps[];
6878extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6879
6880static void __init trace_enum_init(void)
6881{
6882 int len;
6883
6884 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6885 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6886}
6887
6888#ifdef CONFIG_MODULES
6889static void trace_module_add_enums(struct module *mod)
6890{
6891 if (!mod->num_trace_enums)
6892 return;
6893
6894 /*
6895 * Modules with bad taint do not have events created, do
6896 * not bother with enums either.
6897 */
6898 if (trace_module_has_bad_taint(mod))
6899 return;
6900
6901 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6902}
6903
6904#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6905static void trace_module_remove_enums(struct module *mod)
6906{
6907 union trace_enum_map_item *map;
6908 union trace_enum_map_item **last = &trace_enum_maps;
6909
6910 if (!mod->num_trace_enums)
6911 return;
6912
6913 mutex_lock(&trace_enum_mutex);
6914
6915 map = trace_enum_maps;
6916
6917 while (map) {
6918 if (map->head.mod == mod)
6919 break;
6920 map = trace_enum_jmp_to_tail(map);
6921 last = &map->tail.next;
6922 map = map->tail.next;
6923 }
6924 if (!map)
6925 goto out;
6926
6927 *last = trace_enum_jmp_to_tail(map)->tail.next;
6928 kfree(map);
6929 out:
6930 mutex_unlock(&trace_enum_mutex);
6931}
6932#else
6933static inline void trace_module_remove_enums(struct module *mod) { }
6934#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6935
6936static int trace_module_notify(struct notifier_block *self,
6937 unsigned long val, void *data)
6938{
6939 struct module *mod = data;
6940
6941 switch (val) {
6942 case MODULE_STATE_COMING:
6943 trace_module_add_enums(mod);
6944 break;
6945 case MODULE_STATE_GOING:
6946 trace_module_remove_enums(mod);
6947 break;
6948 }
6949
6950 return 0;
6951}
6952
6953static struct notifier_block trace_module_nb = {
6954 .notifier_call = trace_module_notify,
6955 .priority = 0,
6956};
6957#endif /* CONFIG_MODULES */
6958
6959static __init int tracer_init_tracefs(void)
6960{
6961 struct dentry *d_tracer;
6962
6963 trace_access_lock_init();
6964
6965 d_tracer = tracing_init_dentry();
6966 if (IS_ERR(d_tracer))
6967 return 0;
6968
6969 init_tracer_tracefs(&global_trace, d_tracer);
6970
6971 trace_create_file("tracing_thresh", 0644, d_tracer,
6972 &global_trace, &tracing_thresh_fops);
6973
6974 trace_create_file("README", 0444, d_tracer,
6975 NULL, &tracing_readme_fops);
6976
6977 trace_create_file("saved_cmdlines", 0444, d_tracer,
6978 NULL, &tracing_saved_cmdlines_fops);
6979
6980 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6981 NULL, &tracing_saved_cmdlines_size_fops);
6982
6983 trace_enum_init();
6984
6985 trace_create_enum_file(d_tracer);
6986
6987#ifdef CONFIG_MODULES
6988 register_module_notifier(&trace_module_nb);
6989#endif
6990
6991#ifdef CONFIG_DYNAMIC_FTRACE
6992 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6993 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6994#endif
6995
6996 create_trace_instances(d_tracer);
6997
6998 update_tracer_options(&global_trace);
6999
7000 return 0;
7001}
7002
7003static int trace_panic_handler(struct notifier_block *this,
7004 unsigned long event, void *unused)
7005{
7006 if (ftrace_dump_on_oops)
7007 ftrace_dump(ftrace_dump_on_oops);
7008 return NOTIFY_OK;
7009}
7010
7011static struct notifier_block trace_panic_notifier = {
7012 .notifier_call = trace_panic_handler,
7013 .next = NULL,
7014 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7015};
7016
7017static int trace_die_handler(struct notifier_block *self,
7018 unsigned long val,
7019 void *data)
7020{
7021 switch (val) {
7022 case DIE_OOPS:
7023 if (ftrace_dump_on_oops)
7024 ftrace_dump(ftrace_dump_on_oops);
7025 break;
7026 default:
7027 break;
7028 }
7029 return NOTIFY_OK;
7030}
7031
7032static struct notifier_block trace_die_notifier = {
7033 .notifier_call = trace_die_handler,
7034 .priority = 200
7035};
7036
7037/*
7038 * printk is set to max of 1024, we really don't need it that big.
7039 * Nothing should be printing 1000 characters anyway.
7040 */
7041#define TRACE_MAX_PRINT 1000
7042
7043/*
7044 * Define here KERN_TRACE so that we have one place to modify
7045 * it if we decide to change what log level the ftrace dump
7046 * should be at.
7047 */
7048#define KERN_TRACE KERN_EMERG
7049
7050void
7051trace_printk_seq(struct trace_seq *s)
7052{
7053 /* Probably should print a warning here. */
7054 if (s->seq.len >= TRACE_MAX_PRINT)
7055 s->seq.len = TRACE_MAX_PRINT;
7056
7057 /*
7058 * More paranoid code. Although the buffer size is set to
7059 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7060 * an extra layer of protection.
7061 */
7062 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7063 s->seq.len = s->seq.size - 1;
7064
7065 /* should be zero ended, but we are paranoid. */
7066 s->buffer[s->seq.len] = 0;
7067
7068 printk(KERN_TRACE "%s", s->buffer);
7069
7070 trace_seq_init(s);
7071}
7072
7073void trace_init_global_iter(struct trace_iterator *iter)
7074{
7075 iter->tr = &global_trace;
7076 iter->trace = iter->tr->current_trace;
7077 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7078 iter->trace_buffer = &global_trace.trace_buffer;
7079
7080 if (iter->trace && iter->trace->open)
7081 iter->trace->open(iter);
7082
7083 /* Annotate start of buffers if we had overruns */
7084 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7085 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7086
7087 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7088 if (trace_clocks[iter->tr->clock_id].in_ns)
7089 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7090}
7091
7092void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7093{
7094 /* use static because iter can be a bit big for the stack */
7095 static struct trace_iterator iter;
7096 static atomic_t dump_running;
7097 struct trace_array *tr = &global_trace;
7098 unsigned int old_userobj;
7099 unsigned long flags;
7100 int cnt = 0, cpu;
7101
7102 /* Only allow one dump user at a time. */
7103 if (atomic_inc_return(&dump_running) != 1) {
7104 atomic_dec(&dump_running);
7105 return;
7106 }
7107
7108 /*
7109 * Always turn off tracing when we dump.
7110 * We don't need to show trace output of what happens
7111 * between multiple crashes.
7112 *
7113 * If the user does a sysrq-z, then they can re-enable
7114 * tracing with echo 1 > tracing_on.
7115 */
7116 tracing_off();
7117
7118 local_irq_save(flags);
7119
7120 /* Simulate the iterator */
7121 trace_init_global_iter(&iter);
7122
7123 for_each_tracing_cpu(cpu) {
7124 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7125 }
7126
7127 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7128
7129 /* don't look at user memory in panic mode */
7130 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7131
7132 switch (oops_dump_mode) {
7133 case DUMP_ALL:
7134 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7135 break;
7136 case DUMP_ORIG:
7137 iter.cpu_file = raw_smp_processor_id();
7138 break;
7139 case DUMP_NONE:
7140 goto out_enable;
7141 default:
7142 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7143 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7144 }
7145
7146 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7147
7148 /* Did function tracer already get disabled? */
7149 if (ftrace_is_dead()) {
7150 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7151 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7152 }
7153
7154 /*
7155 * We need to stop all tracing on all CPUS to read the
7156 * the next buffer. This is a bit expensive, but is
7157 * not done often. We fill all what we can read,
7158 * and then release the locks again.
7159 */
7160
7161 while (!trace_empty(&iter)) {
7162
7163 if (!cnt)
7164 printk(KERN_TRACE "---------------------------------\n");
7165
7166 cnt++;
7167
7168 /* reset all but tr, trace, and overruns */
7169 memset(&iter.seq, 0,
7170 sizeof(struct trace_iterator) -
7171 offsetof(struct trace_iterator, seq));
7172 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7173 iter.pos = -1;
7174
7175 if (trace_find_next_entry_inc(&iter) != NULL) {
7176 int ret;
7177
7178 ret = print_trace_line(&iter);
7179 if (ret != TRACE_TYPE_NO_CONSUME)
7180 trace_consume(&iter);
7181 }
7182 touch_nmi_watchdog();
7183
7184 trace_printk_seq(&iter.seq);
7185 }
7186
7187 if (!cnt)
7188 printk(KERN_TRACE " (ftrace buffer empty)\n");
7189 else
7190 printk(KERN_TRACE "---------------------------------\n");
7191
7192 out_enable:
7193 tr->trace_flags |= old_userobj;
7194
7195 for_each_tracing_cpu(cpu) {
7196 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7197 }
7198 atomic_dec(&dump_running);
7199 local_irq_restore(flags);
7200}
7201EXPORT_SYMBOL_GPL(ftrace_dump);
7202
7203__init static int tracer_alloc_buffers(void)
7204{
7205 int ring_buf_size;
7206 int ret = -ENOMEM;
7207
7208 /*
7209 * Make sure we don't accidently add more trace options
7210 * than we have bits for.
7211 */
7212 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7213
7214 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7215 goto out;
7216
7217 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7218 goto out_free_buffer_mask;
7219
7220 /* Only allocate trace_printk buffers if a trace_printk exists */
7221 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7222 /* Must be called before global_trace.buffer is allocated */
7223 trace_printk_init_buffers();
7224
7225 /* To save memory, keep the ring buffer size to its minimum */
7226 if (ring_buffer_expanded)
7227 ring_buf_size = trace_buf_size;
7228 else
7229 ring_buf_size = 1;
7230
7231 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7232 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7233
7234 raw_spin_lock_init(&global_trace.start_lock);
7235
7236 /* Used for event triggers */
7237 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7238 if (!temp_buffer)
7239 goto out_free_cpumask;
7240
7241 if (trace_create_savedcmd() < 0)
7242 goto out_free_temp_buffer;
7243
7244 /* TODO: make the number of buffers hot pluggable with CPUS */
7245 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7246 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7247 WARN_ON(1);
7248 goto out_free_savedcmd;
7249 }
7250
7251 if (global_trace.buffer_disabled)
7252 tracing_off();
7253
7254 if (trace_boot_clock) {
7255 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7256 if (ret < 0)
7257 pr_warn("Trace clock %s not defined, going back to default\n",
7258 trace_boot_clock);
7259 }
7260
7261 /*
7262 * register_tracer() might reference current_trace, so it
7263 * needs to be set before we register anything. This is
7264 * just a bootstrap of current_trace anyway.
7265 */
7266 global_trace.current_trace = &nop_trace;
7267
7268 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7269
7270 ftrace_init_global_array_ops(&global_trace);
7271
7272 init_trace_flags_index(&global_trace);
7273
7274 register_tracer(&nop_trace);
7275
7276 /* All seems OK, enable tracing */
7277 tracing_disabled = 0;
7278
7279 atomic_notifier_chain_register(&panic_notifier_list,
7280 &trace_panic_notifier);
7281
7282 register_die_notifier(&trace_die_notifier);
7283
7284 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7285
7286 INIT_LIST_HEAD(&global_trace.systems);
7287 INIT_LIST_HEAD(&global_trace.events);
7288 list_add(&global_trace.list, &ftrace_trace_arrays);
7289
7290 apply_trace_boot_options();
7291
7292 register_snapshot_cmd();
7293
7294 return 0;
7295
7296out_free_savedcmd:
7297 free_saved_cmdlines_buffer(savedcmd);
7298out_free_temp_buffer:
7299 ring_buffer_free(temp_buffer);
7300out_free_cpumask:
7301 free_cpumask_var(global_trace.tracing_cpumask);
7302out_free_buffer_mask:
7303 free_cpumask_var(tracing_buffer_mask);
7304out:
7305 return ret;
7306}
7307
7308void __init trace_init(void)
7309{
7310 if (tracepoint_printk) {
7311 tracepoint_print_iter =
7312 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7313 if (WARN_ON(!tracepoint_print_iter))
7314 tracepoint_printk = 0;
7315 }
7316 tracer_alloc_buffers();
7317 trace_event_init();
7318}
7319
7320__init static int clear_boot_tracer(void)
7321{
7322 /*
7323 * The default tracer at boot buffer is an init section.
7324 * This function is called in lateinit. If we did not
7325 * find the boot tracer, then clear it out, to prevent
7326 * later registration from accessing the buffer that is
7327 * about to be freed.
7328 */
7329 if (!default_bootup_tracer)
7330 return 0;
7331
7332 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7333 default_bootup_tracer);
7334 default_bootup_tracer = NULL;
7335
7336 return 0;
7337}
7338
7339fs_initcall(tracer_init_tracefs);
7340late_initcall(clear_boot_tracer);
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/ring_buffer.h>
15#include <generated/utsrelease.h>
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
20#include <linux/notifier.h>
21#include <linux/irqflags.h>
22#include <linux/debugfs.h>
23#include <linux/pagemap.h>
24#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
27#include <linux/kprobes.h>
28#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
31#include <linux/splice.h>
32#include <linux/kdebug.h>
33#include <linux/string.h>
34#include <linux/rwsem.h>
35#include <linux/slab.h>
36#include <linux/ctype.h>
37#include <linux/init.h>
38#include <linux/poll.h>
39#include <linux/fs.h>
40
41#include "trace.h"
42#include "trace_output.h"
43
44/*
45 * On boot up, the ring buffer is set to the minimum size, so that
46 * we do not waste memory on systems that are not using tracing.
47 */
48int ring_buffer_expanded;
49
50/*
51 * We need to change this state when a selftest is running.
52 * A selftest will lurk into the ring-buffer to count the
53 * entries inserted during the selftest although some concurrent
54 * insertions into the ring-buffer such as trace_printk could occurred
55 * at the same time, giving false positive or negative results.
56 */
57static bool __read_mostly tracing_selftest_running;
58
59/*
60 * If a tracer is running, we do not want to run SELFTEST.
61 */
62bool __read_mostly tracing_selftest_disabled;
63
64/* For tracers that don't implement custom flags */
65static struct tracer_opt dummy_tracer_opt[] = {
66 { }
67};
68
69static struct tracer_flags dummy_tracer_flags = {
70 .val = 0,
71 .opts = dummy_tracer_opt
72};
73
74static int dummy_set_flag(u32 old_flags, u32 bit, int set)
75{
76 return 0;
77}
78
79/*
80 * Kill all tracing for good (never come back).
81 * It is initialized to 1 but will turn to zero if the initialization
82 * of the tracer is successful. But that is the only place that sets
83 * this back to zero.
84 */
85static int tracing_disabled = 1;
86
87DEFINE_PER_CPU(int, ftrace_cpu_disabled);
88
89static inline void ftrace_disable_cpu(void)
90{
91 preempt_disable();
92 __this_cpu_inc(ftrace_cpu_disabled);
93}
94
95static inline void ftrace_enable_cpu(void)
96{
97 __this_cpu_dec(ftrace_cpu_disabled);
98 preempt_enable();
99}
100
101cpumask_var_t __read_mostly tracing_buffer_mask;
102
103/*
104 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
105 *
106 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
107 * is set, then ftrace_dump is called. This will output the contents
108 * of the ftrace buffers to the console. This is very useful for
109 * capturing traces that lead to crashes and outputing it to a
110 * serial console.
111 *
112 * It is default off, but you can enable it with either specifying
113 * "ftrace_dump_on_oops" in the kernel command line, or setting
114 * /proc/sys/kernel/ftrace_dump_on_oops
115 * Set 1 if you want to dump buffers of all CPUs
116 * Set 2 if you want to dump the buffer of the CPU that triggered oops
117 */
118
119enum ftrace_dump_mode ftrace_dump_on_oops;
120
121static int tracing_set_tracer(const char *buf);
122
123#define MAX_TRACER_SIZE 100
124static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
125static char *default_bootup_tracer;
126
127static int __init set_cmdline_ftrace(char *str)
128{
129 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
130 default_bootup_tracer = bootup_tracer_buf;
131 /* We are using ftrace early, expand it */
132 ring_buffer_expanded = 1;
133 return 1;
134}
135__setup("ftrace=", set_cmdline_ftrace);
136
137static int __init set_ftrace_dump_on_oops(char *str)
138{
139 if (*str++ != '=' || !*str) {
140 ftrace_dump_on_oops = DUMP_ALL;
141 return 1;
142 }
143
144 if (!strcmp("orig_cpu", str)) {
145 ftrace_dump_on_oops = DUMP_ORIG;
146 return 1;
147 }
148
149 return 0;
150}
151__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
152
153unsigned long long ns2usecs(cycle_t nsec)
154{
155 nsec += 500;
156 do_div(nsec, 1000);
157 return nsec;
158}
159
160/*
161 * The global_trace is the descriptor that holds the tracing
162 * buffers for the live tracing. For each CPU, it contains
163 * a link list of pages that will store trace entries. The
164 * page descriptor of the pages in the memory is used to hold
165 * the link list by linking the lru item in the page descriptor
166 * to each of the pages in the buffer per CPU.
167 *
168 * For each active CPU there is a data field that holds the
169 * pages for the buffer for that CPU. Each CPU has the same number
170 * of pages allocated for its buffer.
171 */
172static struct trace_array global_trace;
173
174static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
175
176int filter_current_check_discard(struct ring_buffer *buffer,
177 struct ftrace_event_call *call, void *rec,
178 struct ring_buffer_event *event)
179{
180 return filter_check_discard(call, rec, buffer, event);
181}
182EXPORT_SYMBOL_GPL(filter_current_check_discard);
183
184cycle_t ftrace_now(int cpu)
185{
186 u64 ts;
187
188 /* Early boot up does not have a buffer yet */
189 if (!global_trace.buffer)
190 return trace_clock_local();
191
192 ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
193 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
194
195 return ts;
196}
197
198/*
199 * The max_tr is used to snapshot the global_trace when a maximum
200 * latency is reached. Some tracers will use this to store a maximum
201 * trace while it continues examining live traces.
202 *
203 * The buffers for the max_tr are set up the same as the global_trace.
204 * When a snapshot is taken, the link list of the max_tr is swapped
205 * with the link list of the global_trace and the buffers are reset for
206 * the global_trace so the tracing can continue.
207 */
208static struct trace_array max_tr;
209
210static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
211
212/* tracer_enabled is used to toggle activation of a tracer */
213static int tracer_enabled = 1;
214
215/**
216 * tracing_is_enabled - return tracer_enabled status
217 *
218 * This function is used by other tracers to know the status
219 * of the tracer_enabled flag. Tracers may use this function
220 * to know if it should enable their features when starting
221 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
222 */
223int tracing_is_enabled(void)
224{
225 return tracer_enabled;
226}
227
228/*
229 * trace_buf_size is the size in bytes that is allocated
230 * for a buffer. Note, the number of bytes is always rounded
231 * to page size.
232 *
233 * This number is purposely set to a low number of 16384.
234 * If the dump on oops happens, it will be much appreciated
235 * to not have to wait for all that output. Anyway this can be
236 * boot time and run time configurable.
237 */
238#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
239
240static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
241
242/* trace_types holds a link list of available tracers. */
243static struct tracer *trace_types __read_mostly;
244
245/* current_trace points to the tracer that is currently active */
246static struct tracer *current_trace __read_mostly;
247
248/*
249 * trace_types_lock is used to protect the trace_types list.
250 */
251static DEFINE_MUTEX(trace_types_lock);
252
253/*
254 * serialize the access of the ring buffer
255 *
256 * ring buffer serializes readers, but it is low level protection.
257 * The validity of the events (which returns by ring_buffer_peek() ..etc)
258 * are not protected by ring buffer.
259 *
260 * The content of events may become garbage if we allow other process consumes
261 * these events concurrently:
262 * A) the page of the consumed events may become a normal page
263 * (not reader page) in ring buffer, and this page will be rewrited
264 * by events producer.
265 * B) The page of the consumed events may become a page for splice_read,
266 * and this page will be returned to system.
267 *
268 * These primitives allow multi process access to different cpu ring buffer
269 * concurrently.
270 *
271 * These primitives don't distinguish read-only and read-consume access.
272 * Multi read-only access are also serialized.
273 */
274
275#ifdef CONFIG_SMP
276static DECLARE_RWSEM(all_cpu_access_lock);
277static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
278
279static inline void trace_access_lock(int cpu)
280{
281 if (cpu == TRACE_PIPE_ALL_CPU) {
282 /* gain it for accessing the whole ring buffer. */
283 down_write(&all_cpu_access_lock);
284 } else {
285 /* gain it for accessing a cpu ring buffer. */
286
287 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
288 down_read(&all_cpu_access_lock);
289
290 /* Secondly block other access to this @cpu ring buffer. */
291 mutex_lock(&per_cpu(cpu_access_lock, cpu));
292 }
293}
294
295static inline void trace_access_unlock(int cpu)
296{
297 if (cpu == TRACE_PIPE_ALL_CPU) {
298 up_write(&all_cpu_access_lock);
299 } else {
300 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
301 up_read(&all_cpu_access_lock);
302 }
303}
304
305static inline void trace_access_lock_init(void)
306{
307 int cpu;
308
309 for_each_possible_cpu(cpu)
310 mutex_init(&per_cpu(cpu_access_lock, cpu));
311}
312
313#else
314
315static DEFINE_MUTEX(access_lock);
316
317static inline void trace_access_lock(int cpu)
318{
319 (void)cpu;
320 mutex_lock(&access_lock);
321}
322
323static inline void trace_access_unlock(int cpu)
324{
325 (void)cpu;
326 mutex_unlock(&access_lock);
327}
328
329static inline void trace_access_lock_init(void)
330{
331}
332
333#endif
334
335/* trace_wait is a waitqueue for tasks blocked on trace_poll */
336static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
337
338/* trace_flags holds trace_options default values */
339unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
340 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
342
343static int trace_stop_count;
344static DEFINE_SPINLOCK(tracing_start_lock);
345
346static void wakeup_work_handler(struct work_struct *work)
347{
348 wake_up(&trace_wait);
349}
350
351static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
352
353/**
354 * trace_wake_up - wake up tasks waiting for trace input
355 *
356 * Schedules a delayed work to wake up any task that is blocked on the
357 * trace_wait queue. These is used with trace_poll for tasks polling the
358 * trace.
359 */
360void trace_wake_up(void)
361{
362 const unsigned long delay = msecs_to_jiffies(2);
363
364 if (trace_flags & TRACE_ITER_BLOCK)
365 return;
366 schedule_delayed_work(&wakeup_work, delay);
367}
368
369static int __init set_buf_size(char *str)
370{
371 unsigned long buf_size;
372
373 if (!str)
374 return 0;
375 buf_size = memparse(str, &str);
376 /* nr_entries can not be zero */
377 if (buf_size == 0)
378 return 0;
379 trace_buf_size = buf_size;
380 return 1;
381}
382__setup("trace_buf_size=", set_buf_size);
383
384static int __init set_tracing_thresh(char *str)
385{
386 unsigned long threshhold;
387 int ret;
388
389 if (!str)
390 return 0;
391 ret = strict_strtoul(str, 0, &threshhold);
392 if (ret < 0)
393 return 0;
394 tracing_thresh = threshhold * 1000;
395 return 1;
396}
397__setup("tracing_thresh=", set_tracing_thresh);
398
399unsigned long nsecs_to_usecs(unsigned long nsecs)
400{
401 return nsecs / 1000;
402}
403
404/* These must match the bit postions in trace_iterator_flags */
405static const char *trace_options[] = {
406 "print-parent",
407 "sym-offset",
408 "sym-addr",
409 "verbose",
410 "raw",
411 "hex",
412 "bin",
413 "block",
414 "stacktrace",
415 "trace_printk",
416 "ftrace_preempt",
417 "branch",
418 "annotate",
419 "userstacktrace",
420 "sym-userobj",
421 "printk-msg-only",
422 "context-info",
423 "latency-format",
424 "sleep-time",
425 "graph-time",
426 "record-cmd",
427 "overwrite",
428 "disable_on_free",
429 NULL
430};
431
432static struct {
433 u64 (*func)(void);
434 const char *name;
435} trace_clocks[] = {
436 { trace_clock_local, "local" },
437 { trace_clock_global, "global" },
438};
439
440int trace_clock_id;
441
442/*
443 * trace_parser_get_init - gets the buffer for trace parser
444 */
445int trace_parser_get_init(struct trace_parser *parser, int size)
446{
447 memset(parser, 0, sizeof(*parser));
448
449 parser->buffer = kmalloc(size, GFP_KERNEL);
450 if (!parser->buffer)
451 return 1;
452
453 parser->size = size;
454 return 0;
455}
456
457/*
458 * trace_parser_put - frees the buffer for trace parser
459 */
460void trace_parser_put(struct trace_parser *parser)
461{
462 kfree(parser->buffer);
463}
464
465/*
466 * trace_get_user - reads the user input string separated by space
467 * (matched by isspace(ch))
468 *
469 * For each string found the 'struct trace_parser' is updated,
470 * and the function returns.
471 *
472 * Returns number of bytes read.
473 *
474 * See kernel/trace/trace.h for 'struct trace_parser' details.
475 */
476int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
477 size_t cnt, loff_t *ppos)
478{
479 char ch;
480 size_t read = 0;
481 ssize_t ret;
482
483 if (!*ppos)
484 trace_parser_clear(parser);
485
486 ret = get_user(ch, ubuf++);
487 if (ret)
488 goto out;
489
490 read++;
491 cnt--;
492
493 /*
494 * The parser is not finished with the last write,
495 * continue reading the user input without skipping spaces.
496 */
497 if (!parser->cont) {
498 /* skip white space */
499 while (cnt && isspace(ch)) {
500 ret = get_user(ch, ubuf++);
501 if (ret)
502 goto out;
503 read++;
504 cnt--;
505 }
506
507 /* only spaces were written */
508 if (isspace(ch)) {
509 *ppos += read;
510 ret = read;
511 goto out;
512 }
513
514 parser->idx = 0;
515 }
516
517 /* read the non-space input */
518 while (cnt && !isspace(ch)) {
519 if (parser->idx < parser->size - 1)
520 parser->buffer[parser->idx++] = ch;
521 else {
522 ret = -EINVAL;
523 goto out;
524 }
525 ret = get_user(ch, ubuf++);
526 if (ret)
527 goto out;
528 read++;
529 cnt--;
530 }
531
532 /* We either got finished input or we have to wait for another call. */
533 if (isspace(ch)) {
534 parser->buffer[parser->idx] = 0;
535 parser->cont = false;
536 } else {
537 parser->cont = true;
538 parser->buffer[parser->idx++] = ch;
539 }
540
541 *ppos += read;
542 ret = read;
543
544out:
545 return ret;
546}
547
548ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
549{
550 int len;
551 int ret;
552
553 if (!cnt)
554 return 0;
555
556 if (s->len <= s->readpos)
557 return -EBUSY;
558
559 len = s->len - s->readpos;
560 if (cnt > len)
561 cnt = len;
562 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
563 if (ret == cnt)
564 return -EFAULT;
565
566 cnt -= ret;
567
568 s->readpos += cnt;
569 return cnt;
570}
571
572static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
573{
574 int len;
575 void *ret;
576
577 if (s->len <= s->readpos)
578 return -EBUSY;
579
580 len = s->len - s->readpos;
581 if (cnt > len)
582 cnt = len;
583 ret = memcpy(buf, s->buffer + s->readpos, cnt);
584 if (!ret)
585 return -EFAULT;
586
587 s->readpos += cnt;
588 return cnt;
589}
590
591/*
592 * ftrace_max_lock is used to protect the swapping of buffers
593 * when taking a max snapshot. The buffers themselves are
594 * protected by per_cpu spinlocks. But the action of the swap
595 * needs its own lock.
596 *
597 * This is defined as a arch_spinlock_t in order to help
598 * with performance when lockdep debugging is enabled.
599 *
600 * It is also used in other places outside the update_max_tr
601 * so it needs to be defined outside of the
602 * CONFIG_TRACER_MAX_TRACE.
603 */
604static arch_spinlock_t ftrace_max_lock =
605 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
606
607unsigned long __read_mostly tracing_thresh;
608
609#ifdef CONFIG_TRACER_MAX_TRACE
610unsigned long __read_mostly tracing_max_latency;
611
612/*
613 * Copy the new maximum trace into the separate maximum-trace
614 * structure. (this way the maximum trace is permanently saved,
615 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
616 */
617static void
618__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
619{
620 struct trace_array_cpu *data = tr->data[cpu];
621 struct trace_array_cpu *max_data;
622
623 max_tr.cpu = cpu;
624 max_tr.time_start = data->preempt_timestamp;
625
626 max_data = max_tr.data[cpu];
627 max_data->saved_latency = tracing_max_latency;
628 max_data->critical_start = data->critical_start;
629 max_data->critical_end = data->critical_end;
630
631 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
632 max_data->pid = tsk->pid;
633 max_data->uid = task_uid(tsk);
634 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
635 max_data->policy = tsk->policy;
636 max_data->rt_priority = tsk->rt_priority;
637
638 /* record this tasks comm */
639 tracing_record_cmdline(tsk);
640}
641
642/**
643 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
644 * @tr: tracer
645 * @tsk: the task with the latency
646 * @cpu: The cpu that initiated the trace.
647 *
648 * Flip the buffers between the @tr and the max_tr and record information
649 * about which task was the cause of this latency.
650 */
651void
652update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
653{
654 struct ring_buffer *buf = tr->buffer;
655
656 if (trace_stop_count)
657 return;
658
659 WARN_ON_ONCE(!irqs_disabled());
660 if (!current_trace->use_max_tr) {
661 WARN_ON_ONCE(1);
662 return;
663 }
664 arch_spin_lock(&ftrace_max_lock);
665
666 tr->buffer = max_tr.buffer;
667 max_tr.buffer = buf;
668
669 __update_max_tr(tr, tsk, cpu);
670 arch_spin_unlock(&ftrace_max_lock);
671}
672
673/**
674 * update_max_tr_single - only copy one trace over, and reset the rest
675 * @tr - tracer
676 * @tsk - task with the latency
677 * @cpu - the cpu of the buffer to copy.
678 *
679 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
680 */
681void
682update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
683{
684 int ret;
685
686 if (trace_stop_count)
687 return;
688
689 WARN_ON_ONCE(!irqs_disabled());
690 if (!current_trace->use_max_tr) {
691 WARN_ON_ONCE(1);
692 return;
693 }
694
695 arch_spin_lock(&ftrace_max_lock);
696
697 ftrace_disable_cpu();
698
699 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
700
701 if (ret == -EBUSY) {
702 /*
703 * We failed to swap the buffer due to a commit taking
704 * place on this CPU. We fail to record, but we reset
705 * the max trace buffer (no one writes directly to it)
706 * and flag that it failed.
707 */
708 trace_array_printk(&max_tr, _THIS_IP_,
709 "Failed to swap buffers due to commit in progress\n");
710 }
711
712 ftrace_enable_cpu();
713
714 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
715
716 __update_max_tr(tr, tsk, cpu);
717 arch_spin_unlock(&ftrace_max_lock);
718}
719#endif /* CONFIG_TRACER_MAX_TRACE */
720
721/**
722 * register_tracer - register a tracer with the ftrace system.
723 * @type - the plugin for the tracer
724 *
725 * Register a new plugin tracer.
726 */
727int register_tracer(struct tracer *type)
728__releases(kernel_lock)
729__acquires(kernel_lock)
730{
731 struct tracer *t;
732 int ret = 0;
733
734 if (!type->name) {
735 pr_info("Tracer must have a name\n");
736 return -1;
737 }
738
739 if (strlen(type->name) >= MAX_TRACER_SIZE) {
740 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
741 return -1;
742 }
743
744 mutex_lock(&trace_types_lock);
745
746 tracing_selftest_running = true;
747
748 for (t = trace_types; t; t = t->next) {
749 if (strcmp(type->name, t->name) == 0) {
750 /* already found */
751 pr_info("Tracer %s already registered\n",
752 type->name);
753 ret = -1;
754 goto out;
755 }
756 }
757
758 if (!type->set_flag)
759 type->set_flag = &dummy_set_flag;
760 if (!type->flags)
761 type->flags = &dummy_tracer_flags;
762 else
763 if (!type->flags->opts)
764 type->flags->opts = dummy_tracer_opt;
765 if (!type->wait_pipe)
766 type->wait_pipe = default_wait_pipe;
767
768
769#ifdef CONFIG_FTRACE_STARTUP_TEST
770 if (type->selftest && !tracing_selftest_disabled) {
771 struct tracer *saved_tracer = current_trace;
772 struct trace_array *tr = &global_trace;
773
774 /*
775 * Run a selftest on this tracer.
776 * Here we reset the trace buffer, and set the current
777 * tracer to be this tracer. The tracer can then run some
778 * internal tracing to verify that everything is in order.
779 * If we fail, we do not register this tracer.
780 */
781 tracing_reset_online_cpus(tr);
782
783 current_trace = type;
784
785 /* If we expanded the buffers, make sure the max is expanded too */
786 if (ring_buffer_expanded && type->use_max_tr)
787 ring_buffer_resize(max_tr.buffer, trace_buf_size);
788
789 /* the test is responsible for initializing and enabling */
790 pr_info("Testing tracer %s: ", type->name);
791 ret = type->selftest(type, tr);
792 /* the test is responsible for resetting too */
793 current_trace = saved_tracer;
794 if (ret) {
795 printk(KERN_CONT "FAILED!\n");
796 goto out;
797 }
798 /* Only reset on passing, to avoid touching corrupted buffers */
799 tracing_reset_online_cpus(tr);
800
801 /* Shrink the max buffer again */
802 if (ring_buffer_expanded && type->use_max_tr)
803 ring_buffer_resize(max_tr.buffer, 1);
804
805 printk(KERN_CONT "PASSED\n");
806 }
807#endif
808
809 type->next = trace_types;
810 trace_types = type;
811
812 out:
813 tracing_selftest_running = false;
814 mutex_unlock(&trace_types_lock);
815
816 if (ret || !default_bootup_tracer)
817 goto out_unlock;
818
819 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
820 goto out_unlock;
821
822 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
823 /* Do we want this tracer to start on bootup? */
824 tracing_set_tracer(type->name);
825 default_bootup_tracer = NULL;
826 /* disable other selftests, since this will break it. */
827 tracing_selftest_disabled = 1;
828#ifdef CONFIG_FTRACE_STARTUP_TEST
829 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
830 type->name);
831#endif
832
833 out_unlock:
834 return ret;
835}
836
837void unregister_tracer(struct tracer *type)
838{
839 struct tracer **t;
840
841 mutex_lock(&trace_types_lock);
842 for (t = &trace_types; *t; t = &(*t)->next) {
843 if (*t == type)
844 goto found;
845 }
846 pr_info("Tracer %s not registered\n", type->name);
847 goto out;
848
849 found:
850 *t = (*t)->next;
851
852 if (type == current_trace && tracer_enabled) {
853 tracer_enabled = 0;
854 tracing_stop();
855 if (current_trace->stop)
856 current_trace->stop(&global_trace);
857 current_trace = &nop_trace;
858 }
859out:
860 mutex_unlock(&trace_types_lock);
861}
862
863static void __tracing_reset(struct ring_buffer *buffer, int cpu)
864{
865 ftrace_disable_cpu();
866 ring_buffer_reset_cpu(buffer, cpu);
867 ftrace_enable_cpu();
868}
869
870void tracing_reset(struct trace_array *tr, int cpu)
871{
872 struct ring_buffer *buffer = tr->buffer;
873
874 ring_buffer_record_disable(buffer);
875
876 /* Make sure all commits have finished */
877 synchronize_sched();
878 __tracing_reset(buffer, cpu);
879
880 ring_buffer_record_enable(buffer);
881}
882
883void tracing_reset_online_cpus(struct trace_array *tr)
884{
885 struct ring_buffer *buffer = tr->buffer;
886 int cpu;
887
888 ring_buffer_record_disable(buffer);
889
890 /* Make sure all commits have finished */
891 synchronize_sched();
892
893 tr->time_start = ftrace_now(tr->cpu);
894
895 for_each_online_cpu(cpu)
896 __tracing_reset(buffer, cpu);
897
898 ring_buffer_record_enable(buffer);
899}
900
901void tracing_reset_current(int cpu)
902{
903 tracing_reset(&global_trace, cpu);
904}
905
906void tracing_reset_current_online_cpus(void)
907{
908 tracing_reset_online_cpus(&global_trace);
909}
910
911#define SAVED_CMDLINES 128
912#define NO_CMDLINE_MAP UINT_MAX
913static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
914static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
915static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
916static int cmdline_idx;
917static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
918
919/* temporary disable recording */
920static atomic_t trace_record_cmdline_disabled __read_mostly;
921
922static void trace_init_cmdlines(void)
923{
924 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
925 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
926 cmdline_idx = 0;
927}
928
929int is_tracing_stopped(void)
930{
931 return trace_stop_count;
932}
933
934/**
935 * ftrace_off_permanent - disable all ftrace code permanently
936 *
937 * This should only be called when a serious anomally has
938 * been detected. This will turn off the function tracing,
939 * ring buffers, and other tracing utilites. It takes no
940 * locks and can be called from any context.
941 */
942void ftrace_off_permanent(void)
943{
944 tracing_disabled = 1;
945 ftrace_stop();
946 tracing_off_permanent();
947}
948
949/**
950 * tracing_start - quick start of the tracer
951 *
952 * If tracing is enabled but was stopped by tracing_stop,
953 * this will start the tracer back up.
954 */
955void tracing_start(void)
956{
957 struct ring_buffer *buffer;
958 unsigned long flags;
959
960 if (tracing_disabled)
961 return;
962
963 spin_lock_irqsave(&tracing_start_lock, flags);
964 if (--trace_stop_count) {
965 if (trace_stop_count < 0) {
966 /* Someone screwed up their debugging */
967 WARN_ON_ONCE(1);
968 trace_stop_count = 0;
969 }
970 goto out;
971 }
972
973 /* Prevent the buffers from switching */
974 arch_spin_lock(&ftrace_max_lock);
975
976 buffer = global_trace.buffer;
977 if (buffer)
978 ring_buffer_record_enable(buffer);
979
980 buffer = max_tr.buffer;
981 if (buffer)
982 ring_buffer_record_enable(buffer);
983
984 arch_spin_unlock(&ftrace_max_lock);
985
986 ftrace_start();
987 out:
988 spin_unlock_irqrestore(&tracing_start_lock, flags);
989}
990
991/**
992 * tracing_stop - quick stop of the tracer
993 *
994 * Light weight way to stop tracing. Use in conjunction with
995 * tracing_start.
996 */
997void tracing_stop(void)
998{
999 struct ring_buffer *buffer;
1000 unsigned long flags;
1001
1002 ftrace_stop();
1003 spin_lock_irqsave(&tracing_start_lock, flags);
1004 if (trace_stop_count++)
1005 goto out;
1006
1007 /* Prevent the buffers from switching */
1008 arch_spin_lock(&ftrace_max_lock);
1009
1010 buffer = global_trace.buffer;
1011 if (buffer)
1012 ring_buffer_record_disable(buffer);
1013
1014 buffer = max_tr.buffer;
1015 if (buffer)
1016 ring_buffer_record_disable(buffer);
1017
1018 arch_spin_unlock(&ftrace_max_lock);
1019
1020 out:
1021 spin_unlock_irqrestore(&tracing_start_lock, flags);
1022}
1023
1024void trace_stop_cmdline_recording(void);
1025
1026static void trace_save_cmdline(struct task_struct *tsk)
1027{
1028 unsigned pid, idx;
1029
1030 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1031 return;
1032
1033 /*
1034 * It's not the end of the world if we don't get
1035 * the lock, but we also don't want to spin
1036 * nor do we want to disable interrupts,
1037 * so if we miss here, then better luck next time.
1038 */
1039 if (!arch_spin_trylock(&trace_cmdline_lock))
1040 return;
1041
1042 idx = map_pid_to_cmdline[tsk->pid];
1043 if (idx == NO_CMDLINE_MAP) {
1044 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1045
1046 /*
1047 * Check whether the cmdline buffer at idx has a pid
1048 * mapped. We are going to overwrite that entry so we
1049 * need to clear the map_pid_to_cmdline. Otherwise we
1050 * would read the new comm for the old pid.
1051 */
1052 pid = map_cmdline_to_pid[idx];
1053 if (pid != NO_CMDLINE_MAP)
1054 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1055
1056 map_cmdline_to_pid[idx] = tsk->pid;
1057 map_pid_to_cmdline[tsk->pid] = idx;
1058
1059 cmdline_idx = idx;
1060 }
1061
1062 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1063
1064 arch_spin_unlock(&trace_cmdline_lock);
1065}
1066
1067void trace_find_cmdline(int pid, char comm[])
1068{
1069 unsigned map;
1070
1071 if (!pid) {
1072 strcpy(comm, "<idle>");
1073 return;
1074 }
1075
1076 if (WARN_ON_ONCE(pid < 0)) {
1077 strcpy(comm, "<XXX>");
1078 return;
1079 }
1080
1081 if (pid > PID_MAX_DEFAULT) {
1082 strcpy(comm, "<...>");
1083 return;
1084 }
1085
1086 preempt_disable();
1087 arch_spin_lock(&trace_cmdline_lock);
1088 map = map_pid_to_cmdline[pid];
1089 if (map != NO_CMDLINE_MAP)
1090 strcpy(comm, saved_cmdlines[map]);
1091 else
1092 strcpy(comm, "<...>");
1093
1094 arch_spin_unlock(&trace_cmdline_lock);
1095 preempt_enable();
1096}
1097
1098void tracing_record_cmdline(struct task_struct *tsk)
1099{
1100 if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
1101 !tracing_is_on())
1102 return;
1103
1104 trace_save_cmdline(tsk);
1105}
1106
1107void
1108tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1109 int pc)
1110{
1111 struct task_struct *tsk = current;
1112
1113 entry->preempt_count = pc & 0xff;
1114 entry->pid = (tsk) ? tsk->pid : 0;
1115 entry->padding = 0;
1116 entry->flags =
1117#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1118 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1119#else
1120 TRACE_FLAG_IRQS_NOSUPPORT |
1121#endif
1122 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1123 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1124 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1125}
1126EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1127
1128struct ring_buffer_event *
1129trace_buffer_lock_reserve(struct ring_buffer *buffer,
1130 int type,
1131 unsigned long len,
1132 unsigned long flags, int pc)
1133{
1134 struct ring_buffer_event *event;
1135
1136 event = ring_buffer_lock_reserve(buffer, len);
1137 if (event != NULL) {
1138 struct trace_entry *ent = ring_buffer_event_data(event);
1139
1140 tracing_generic_entry_update(ent, flags, pc);
1141 ent->type = type;
1142 }
1143
1144 return event;
1145}
1146
1147static inline void
1148__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1149 struct ring_buffer_event *event,
1150 unsigned long flags, int pc,
1151 int wake)
1152{
1153 ring_buffer_unlock_commit(buffer, event);
1154
1155 ftrace_trace_stack(buffer, flags, 6, pc);
1156 ftrace_trace_userstack(buffer, flags, pc);
1157
1158 if (wake)
1159 trace_wake_up();
1160}
1161
1162void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1163 struct ring_buffer_event *event,
1164 unsigned long flags, int pc)
1165{
1166 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1167}
1168
1169struct ring_buffer_event *
1170trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1171 int type, unsigned long len,
1172 unsigned long flags, int pc)
1173{
1174 *current_rb = global_trace.buffer;
1175 return trace_buffer_lock_reserve(*current_rb,
1176 type, len, flags, pc);
1177}
1178EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1179
1180void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1181 struct ring_buffer_event *event,
1182 unsigned long flags, int pc)
1183{
1184 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1185}
1186EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1187
1188void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
1189 struct ring_buffer_event *event,
1190 unsigned long flags, int pc)
1191{
1192 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1193}
1194EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1195
1196void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1197 struct ring_buffer_event *event,
1198 unsigned long flags, int pc,
1199 struct pt_regs *regs)
1200{
1201 ring_buffer_unlock_commit(buffer, event);
1202
1203 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1204 ftrace_trace_userstack(buffer, flags, pc);
1205}
1206EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
1207
1208void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1209 struct ring_buffer_event *event)
1210{
1211 ring_buffer_discard_commit(buffer, event);
1212}
1213EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1214
1215void
1216trace_function(struct trace_array *tr,
1217 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1218 int pc)
1219{
1220 struct ftrace_event_call *call = &event_function;
1221 struct ring_buffer *buffer = tr->buffer;
1222 struct ring_buffer_event *event;
1223 struct ftrace_entry *entry;
1224
1225 /* If we are reading the ring buffer, don't trace */
1226 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1227 return;
1228
1229 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1230 flags, pc);
1231 if (!event)
1232 return;
1233 entry = ring_buffer_event_data(event);
1234 entry->ip = ip;
1235 entry->parent_ip = parent_ip;
1236
1237 if (!filter_check_discard(call, entry, buffer, event))
1238 ring_buffer_unlock_commit(buffer, event);
1239}
1240
1241void
1242ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1243 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1244 int pc)
1245{
1246 if (likely(!atomic_read(&data->disabled)))
1247 trace_function(tr, ip, parent_ip, flags, pc);
1248}
1249
1250#ifdef CONFIG_STACKTRACE
1251
1252#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1253struct ftrace_stack {
1254 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1255};
1256
1257static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1258static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1259
1260static void __ftrace_trace_stack(struct ring_buffer *buffer,
1261 unsigned long flags,
1262 int skip, int pc, struct pt_regs *regs)
1263{
1264 struct ftrace_event_call *call = &event_kernel_stack;
1265 struct ring_buffer_event *event;
1266 struct stack_entry *entry;
1267 struct stack_trace trace;
1268 int use_stack;
1269 int size = FTRACE_STACK_ENTRIES;
1270
1271 trace.nr_entries = 0;
1272 trace.skip = skip;
1273
1274 /*
1275 * Since events can happen in NMIs there's no safe way to
1276 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1277 * or NMI comes in, it will just have to use the default
1278 * FTRACE_STACK_SIZE.
1279 */
1280 preempt_disable_notrace();
1281
1282 use_stack = ++__get_cpu_var(ftrace_stack_reserve);
1283 /*
1284 * We don't need any atomic variables, just a barrier.
1285 * If an interrupt comes in, we don't care, because it would
1286 * have exited and put the counter back to what we want.
1287 * We just need a barrier to keep gcc from moving things
1288 * around.
1289 */
1290 barrier();
1291 if (use_stack == 1) {
1292 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1293 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1294
1295 if (regs)
1296 save_stack_trace_regs(regs, &trace);
1297 else
1298 save_stack_trace(&trace);
1299
1300 if (trace.nr_entries > size)
1301 size = trace.nr_entries;
1302 } else
1303 /* From now on, use_stack is a boolean */
1304 use_stack = 0;
1305
1306 size *= sizeof(unsigned long);
1307
1308 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1309 sizeof(*entry) + size, flags, pc);
1310 if (!event)
1311 goto out;
1312 entry = ring_buffer_event_data(event);
1313
1314 memset(&entry->caller, 0, size);
1315
1316 if (use_stack)
1317 memcpy(&entry->caller, trace.entries,
1318 trace.nr_entries * sizeof(unsigned long));
1319 else {
1320 trace.max_entries = FTRACE_STACK_ENTRIES;
1321 trace.entries = entry->caller;
1322 if (regs)
1323 save_stack_trace_regs(regs, &trace);
1324 else
1325 save_stack_trace(&trace);
1326 }
1327
1328 entry->size = trace.nr_entries;
1329
1330 if (!filter_check_discard(call, entry, buffer, event))
1331 ring_buffer_unlock_commit(buffer, event);
1332
1333 out:
1334 /* Again, don't let gcc optimize things here */
1335 barrier();
1336 __get_cpu_var(ftrace_stack_reserve)--;
1337 preempt_enable_notrace();
1338
1339}
1340
1341void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1342 int skip, int pc, struct pt_regs *regs)
1343{
1344 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1345 return;
1346
1347 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1348}
1349
1350void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1351 int skip, int pc)
1352{
1353 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1354 return;
1355
1356 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1357}
1358
1359void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1360 int pc)
1361{
1362 __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
1363}
1364
1365/**
1366 * trace_dump_stack - record a stack back trace in the trace buffer
1367 */
1368void trace_dump_stack(void)
1369{
1370 unsigned long flags;
1371
1372 if (tracing_disabled || tracing_selftest_running)
1373 return;
1374
1375 local_save_flags(flags);
1376
1377 /* skipping 3 traces, seems to get us at the caller of this function */
1378 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
1379}
1380
1381static DEFINE_PER_CPU(int, user_stack_count);
1382
1383void
1384ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1385{
1386 struct ftrace_event_call *call = &event_user_stack;
1387 struct ring_buffer_event *event;
1388 struct userstack_entry *entry;
1389 struct stack_trace trace;
1390
1391 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1392 return;
1393
1394 /*
1395 * NMIs can not handle page faults, even with fix ups.
1396 * The save user stack can (and often does) fault.
1397 */
1398 if (unlikely(in_nmi()))
1399 return;
1400
1401 /*
1402 * prevent recursion, since the user stack tracing may
1403 * trigger other kernel events.
1404 */
1405 preempt_disable();
1406 if (__this_cpu_read(user_stack_count))
1407 goto out;
1408
1409 __this_cpu_inc(user_stack_count);
1410
1411 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1412 sizeof(*entry), flags, pc);
1413 if (!event)
1414 goto out_drop_count;
1415 entry = ring_buffer_event_data(event);
1416
1417 entry->tgid = current->tgid;
1418 memset(&entry->caller, 0, sizeof(entry->caller));
1419
1420 trace.nr_entries = 0;
1421 trace.max_entries = FTRACE_STACK_ENTRIES;
1422 trace.skip = 0;
1423 trace.entries = entry->caller;
1424
1425 save_stack_trace_user(&trace);
1426 if (!filter_check_discard(call, entry, buffer, event))
1427 ring_buffer_unlock_commit(buffer, event);
1428
1429 out_drop_count:
1430 __this_cpu_dec(user_stack_count);
1431 out:
1432 preempt_enable();
1433}
1434
1435#ifdef UNUSED
1436static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1437{
1438 ftrace_trace_userstack(tr, flags, preempt_count());
1439}
1440#endif /* UNUSED */
1441
1442#endif /* CONFIG_STACKTRACE */
1443
1444/**
1445 * trace_vbprintk - write binary msg to tracing buffer
1446 *
1447 */
1448int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1449{
1450 static arch_spinlock_t trace_buf_lock =
1451 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1452 static u32 trace_buf[TRACE_BUF_SIZE];
1453
1454 struct ftrace_event_call *call = &event_bprint;
1455 struct ring_buffer_event *event;
1456 struct ring_buffer *buffer;
1457 struct trace_array *tr = &global_trace;
1458 struct trace_array_cpu *data;
1459 struct bprint_entry *entry;
1460 unsigned long flags;
1461 int disable;
1462 int cpu, len = 0, size, pc;
1463
1464 if (unlikely(tracing_selftest_running || tracing_disabled))
1465 return 0;
1466
1467 /* Don't pollute graph traces with trace_vprintk internals */
1468 pause_graph_tracing();
1469
1470 pc = preempt_count();
1471 preempt_disable_notrace();
1472 cpu = raw_smp_processor_id();
1473 data = tr->data[cpu];
1474
1475 disable = atomic_inc_return(&data->disabled);
1476 if (unlikely(disable != 1))
1477 goto out;
1478
1479 /* Lockdep uses trace_printk for lock tracing */
1480 local_irq_save(flags);
1481 arch_spin_lock(&trace_buf_lock);
1482 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1483
1484 if (len > TRACE_BUF_SIZE || len < 0)
1485 goto out_unlock;
1486
1487 size = sizeof(*entry) + sizeof(u32) * len;
1488 buffer = tr->buffer;
1489 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1490 flags, pc);
1491 if (!event)
1492 goto out_unlock;
1493 entry = ring_buffer_event_data(event);
1494 entry->ip = ip;
1495 entry->fmt = fmt;
1496
1497 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1498 if (!filter_check_discard(call, entry, buffer, event)) {
1499 ring_buffer_unlock_commit(buffer, event);
1500 ftrace_trace_stack(buffer, flags, 6, pc);
1501 }
1502
1503out_unlock:
1504 arch_spin_unlock(&trace_buf_lock);
1505 local_irq_restore(flags);
1506
1507out:
1508 atomic_dec_return(&data->disabled);
1509 preempt_enable_notrace();
1510 unpause_graph_tracing();
1511
1512 return len;
1513}
1514EXPORT_SYMBOL_GPL(trace_vbprintk);
1515
1516int trace_array_printk(struct trace_array *tr,
1517 unsigned long ip, const char *fmt, ...)
1518{
1519 int ret;
1520 va_list ap;
1521
1522 if (!(trace_flags & TRACE_ITER_PRINTK))
1523 return 0;
1524
1525 va_start(ap, fmt);
1526 ret = trace_array_vprintk(tr, ip, fmt, ap);
1527 va_end(ap);
1528 return ret;
1529}
1530
1531int trace_array_vprintk(struct trace_array *tr,
1532 unsigned long ip, const char *fmt, va_list args)
1533{
1534 static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1535 static char trace_buf[TRACE_BUF_SIZE];
1536
1537 struct ftrace_event_call *call = &event_print;
1538 struct ring_buffer_event *event;
1539 struct ring_buffer *buffer;
1540 struct trace_array_cpu *data;
1541 int cpu, len = 0, size, pc;
1542 struct print_entry *entry;
1543 unsigned long irq_flags;
1544 int disable;
1545
1546 if (tracing_disabled || tracing_selftest_running)
1547 return 0;
1548
1549 pc = preempt_count();
1550 preempt_disable_notrace();
1551 cpu = raw_smp_processor_id();
1552 data = tr->data[cpu];
1553
1554 disable = atomic_inc_return(&data->disabled);
1555 if (unlikely(disable != 1))
1556 goto out;
1557
1558 pause_graph_tracing();
1559 raw_local_irq_save(irq_flags);
1560 arch_spin_lock(&trace_buf_lock);
1561 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1562
1563 size = sizeof(*entry) + len + 1;
1564 buffer = tr->buffer;
1565 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1566 irq_flags, pc);
1567 if (!event)
1568 goto out_unlock;
1569 entry = ring_buffer_event_data(event);
1570 entry->ip = ip;
1571
1572 memcpy(&entry->buf, trace_buf, len);
1573 entry->buf[len] = '\0';
1574 if (!filter_check_discard(call, entry, buffer, event)) {
1575 ring_buffer_unlock_commit(buffer, event);
1576 ftrace_trace_stack(buffer, irq_flags, 6, pc);
1577 }
1578
1579 out_unlock:
1580 arch_spin_unlock(&trace_buf_lock);
1581 raw_local_irq_restore(irq_flags);
1582 unpause_graph_tracing();
1583 out:
1584 atomic_dec_return(&data->disabled);
1585 preempt_enable_notrace();
1586
1587 return len;
1588}
1589
1590int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1591{
1592 return trace_array_vprintk(&global_trace, ip, fmt, args);
1593}
1594EXPORT_SYMBOL_GPL(trace_vprintk);
1595
1596static void trace_iterator_increment(struct trace_iterator *iter)
1597{
1598 /* Don't allow ftrace to trace into the ring buffers */
1599 ftrace_disable_cpu();
1600
1601 iter->idx++;
1602 if (iter->buffer_iter[iter->cpu])
1603 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1604
1605 ftrace_enable_cpu();
1606}
1607
1608static struct trace_entry *
1609peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1610 unsigned long *lost_events)
1611{
1612 struct ring_buffer_event *event;
1613 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1614
1615 /* Don't allow ftrace to trace into the ring buffers */
1616 ftrace_disable_cpu();
1617
1618 if (buf_iter)
1619 event = ring_buffer_iter_peek(buf_iter, ts);
1620 else
1621 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1622 lost_events);
1623
1624 ftrace_enable_cpu();
1625
1626 if (event) {
1627 iter->ent_size = ring_buffer_event_length(event);
1628 return ring_buffer_event_data(event);
1629 }
1630 iter->ent_size = 0;
1631 return NULL;
1632}
1633
1634static struct trace_entry *
1635__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1636 unsigned long *missing_events, u64 *ent_ts)
1637{
1638 struct ring_buffer *buffer = iter->tr->buffer;
1639 struct trace_entry *ent, *next = NULL;
1640 unsigned long lost_events = 0, next_lost = 0;
1641 int cpu_file = iter->cpu_file;
1642 u64 next_ts = 0, ts;
1643 int next_cpu = -1;
1644 int cpu;
1645
1646 /*
1647 * If we are in a per_cpu trace file, don't bother by iterating over
1648 * all cpu and peek directly.
1649 */
1650 if (cpu_file > TRACE_PIPE_ALL_CPU) {
1651 if (ring_buffer_empty_cpu(buffer, cpu_file))
1652 return NULL;
1653 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1654 if (ent_cpu)
1655 *ent_cpu = cpu_file;
1656
1657 return ent;
1658 }
1659
1660 for_each_tracing_cpu(cpu) {
1661
1662 if (ring_buffer_empty_cpu(buffer, cpu))
1663 continue;
1664
1665 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1666
1667 /*
1668 * Pick the entry with the smallest timestamp:
1669 */
1670 if (ent && (!next || ts < next_ts)) {
1671 next = ent;
1672 next_cpu = cpu;
1673 next_ts = ts;
1674 next_lost = lost_events;
1675 }
1676 }
1677
1678 if (ent_cpu)
1679 *ent_cpu = next_cpu;
1680
1681 if (ent_ts)
1682 *ent_ts = next_ts;
1683
1684 if (missing_events)
1685 *missing_events = next_lost;
1686
1687 return next;
1688}
1689
1690/* Find the next real entry, without updating the iterator itself */
1691struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1692 int *ent_cpu, u64 *ent_ts)
1693{
1694 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1695}
1696
1697/* Find the next real entry, and increment the iterator to the next entry */
1698void *trace_find_next_entry_inc(struct trace_iterator *iter)
1699{
1700 iter->ent = __find_next_entry(iter, &iter->cpu,
1701 &iter->lost_events, &iter->ts);
1702
1703 if (iter->ent)
1704 trace_iterator_increment(iter);
1705
1706 return iter->ent ? iter : NULL;
1707}
1708
1709static void trace_consume(struct trace_iterator *iter)
1710{
1711 /* Don't allow ftrace to trace into the ring buffers */
1712 ftrace_disable_cpu();
1713 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1714 &iter->lost_events);
1715 ftrace_enable_cpu();
1716}
1717
1718static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1719{
1720 struct trace_iterator *iter = m->private;
1721 int i = (int)*pos;
1722 void *ent;
1723
1724 WARN_ON_ONCE(iter->leftover);
1725
1726 (*pos)++;
1727
1728 /* can't go backwards */
1729 if (iter->idx > i)
1730 return NULL;
1731
1732 if (iter->idx < 0)
1733 ent = trace_find_next_entry_inc(iter);
1734 else
1735 ent = iter;
1736
1737 while (ent && iter->idx < i)
1738 ent = trace_find_next_entry_inc(iter);
1739
1740 iter->pos = *pos;
1741
1742 return ent;
1743}
1744
1745void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1746{
1747 struct trace_array *tr = iter->tr;
1748 struct ring_buffer_event *event;
1749 struct ring_buffer_iter *buf_iter;
1750 unsigned long entries = 0;
1751 u64 ts;
1752
1753 tr->data[cpu]->skipped_entries = 0;
1754
1755 if (!iter->buffer_iter[cpu])
1756 return;
1757
1758 buf_iter = iter->buffer_iter[cpu];
1759 ring_buffer_iter_reset(buf_iter);
1760
1761 /*
1762 * We could have the case with the max latency tracers
1763 * that a reset never took place on a cpu. This is evident
1764 * by the timestamp being before the start of the buffer.
1765 */
1766 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1767 if (ts >= iter->tr->time_start)
1768 break;
1769 entries++;
1770 ring_buffer_read(buf_iter, NULL);
1771 }
1772
1773 tr->data[cpu]->skipped_entries = entries;
1774}
1775
1776/*
1777 * The current tracer is copied to avoid a global locking
1778 * all around.
1779 */
1780static void *s_start(struct seq_file *m, loff_t *pos)
1781{
1782 struct trace_iterator *iter = m->private;
1783 static struct tracer *old_tracer;
1784 int cpu_file = iter->cpu_file;
1785 void *p = NULL;
1786 loff_t l = 0;
1787 int cpu;
1788
1789 /* copy the tracer to avoid using a global lock all around */
1790 mutex_lock(&trace_types_lock);
1791 if (unlikely(old_tracer != current_trace && current_trace)) {
1792 old_tracer = current_trace;
1793 *iter->trace = *current_trace;
1794 }
1795 mutex_unlock(&trace_types_lock);
1796
1797 atomic_inc(&trace_record_cmdline_disabled);
1798
1799 if (*pos != iter->pos) {
1800 iter->ent = NULL;
1801 iter->cpu = 0;
1802 iter->idx = -1;
1803
1804 ftrace_disable_cpu();
1805
1806 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1807 for_each_tracing_cpu(cpu)
1808 tracing_iter_reset(iter, cpu);
1809 } else
1810 tracing_iter_reset(iter, cpu_file);
1811
1812 ftrace_enable_cpu();
1813
1814 iter->leftover = 0;
1815 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1816 ;
1817
1818 } else {
1819 /*
1820 * If we overflowed the seq_file before, then we want
1821 * to just reuse the trace_seq buffer again.
1822 */
1823 if (iter->leftover)
1824 p = iter;
1825 else {
1826 l = *pos - 1;
1827 p = s_next(m, p, &l);
1828 }
1829 }
1830
1831 trace_event_read_lock();
1832 trace_access_lock(cpu_file);
1833 return p;
1834}
1835
1836static void s_stop(struct seq_file *m, void *p)
1837{
1838 struct trace_iterator *iter = m->private;
1839
1840 atomic_dec(&trace_record_cmdline_disabled);
1841 trace_access_unlock(iter->cpu_file);
1842 trace_event_read_unlock();
1843}
1844
1845static void print_lat_help_header(struct seq_file *m)
1846{
1847 seq_puts(m, "# _------=> CPU# \n");
1848 seq_puts(m, "# / _-----=> irqs-off \n");
1849 seq_puts(m, "# | / _----=> need-resched \n");
1850 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1851 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1852 seq_puts(m, "# |||| / delay \n");
1853 seq_puts(m, "# cmd pid ||||| time | caller \n");
1854 seq_puts(m, "# \\ / ||||| \\ | / \n");
1855}
1856
1857static void print_func_help_header(struct seq_file *m)
1858{
1859 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1860 seq_puts(m, "# | | | | |\n");
1861}
1862
1863
1864void
1865print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1866{
1867 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1868 struct trace_array *tr = iter->tr;
1869 struct trace_array_cpu *data = tr->data[tr->cpu];
1870 struct tracer *type = current_trace;
1871 unsigned long entries = 0;
1872 unsigned long total = 0;
1873 unsigned long count;
1874 const char *name = "preemption";
1875 int cpu;
1876
1877 if (type)
1878 name = type->name;
1879
1880
1881 for_each_tracing_cpu(cpu) {
1882 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1883 /*
1884 * If this buffer has skipped entries, then we hold all
1885 * entries for the trace and we need to ignore the
1886 * ones before the time stamp.
1887 */
1888 if (tr->data[cpu]->skipped_entries) {
1889 count -= tr->data[cpu]->skipped_entries;
1890 /* total is the same as the entries */
1891 total += count;
1892 } else
1893 total += count +
1894 ring_buffer_overrun_cpu(tr->buffer, cpu);
1895 entries += count;
1896 }
1897
1898 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1899 name, UTS_RELEASE);
1900 seq_puts(m, "# -----------------------------------"
1901 "---------------------------------\n");
1902 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
1903 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1904 nsecs_to_usecs(data->saved_latency),
1905 entries,
1906 total,
1907 tr->cpu,
1908#if defined(CONFIG_PREEMPT_NONE)
1909 "server",
1910#elif defined(CONFIG_PREEMPT_VOLUNTARY)
1911 "desktop",
1912#elif defined(CONFIG_PREEMPT)
1913 "preempt",
1914#else
1915 "unknown",
1916#endif
1917 /* These are reserved for later use */
1918 0, 0, 0, 0);
1919#ifdef CONFIG_SMP
1920 seq_printf(m, " #P:%d)\n", num_online_cpus());
1921#else
1922 seq_puts(m, ")\n");
1923#endif
1924 seq_puts(m, "# -----------------\n");
1925 seq_printf(m, "# | task: %.16s-%d "
1926 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1927 data->comm, data->pid, data->uid, data->nice,
1928 data->policy, data->rt_priority);
1929 seq_puts(m, "# -----------------\n");
1930
1931 if (data->critical_start) {
1932 seq_puts(m, "# => started at: ");
1933 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1934 trace_print_seq(m, &iter->seq);
1935 seq_puts(m, "\n# => ended at: ");
1936 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1937 trace_print_seq(m, &iter->seq);
1938 seq_puts(m, "\n#\n");
1939 }
1940
1941 seq_puts(m, "#\n");
1942}
1943
1944static void test_cpu_buff_start(struct trace_iterator *iter)
1945{
1946 struct trace_seq *s = &iter->seq;
1947
1948 if (!(trace_flags & TRACE_ITER_ANNOTATE))
1949 return;
1950
1951 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1952 return;
1953
1954 if (cpumask_test_cpu(iter->cpu, iter->started))
1955 return;
1956
1957 if (iter->tr->data[iter->cpu]->skipped_entries)
1958 return;
1959
1960 cpumask_set_cpu(iter->cpu, iter->started);
1961
1962 /* Don't print started cpu buffer for the first entry of the trace */
1963 if (iter->idx > 1)
1964 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
1965 iter->cpu);
1966}
1967
1968static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1969{
1970 struct trace_seq *s = &iter->seq;
1971 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1972 struct trace_entry *entry;
1973 struct trace_event *event;
1974
1975 entry = iter->ent;
1976
1977 test_cpu_buff_start(iter);
1978
1979 event = ftrace_find_event(entry->type);
1980
1981 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1982 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1983 if (!trace_print_lat_context(iter))
1984 goto partial;
1985 } else {
1986 if (!trace_print_context(iter))
1987 goto partial;
1988 }
1989 }
1990
1991 if (event)
1992 return event->funcs->trace(iter, sym_flags, event);
1993
1994 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
1995 goto partial;
1996
1997 return TRACE_TYPE_HANDLED;
1998partial:
1999 return TRACE_TYPE_PARTIAL_LINE;
2000}
2001
2002static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2003{
2004 struct trace_seq *s = &iter->seq;
2005 struct trace_entry *entry;
2006 struct trace_event *event;
2007
2008 entry = iter->ent;
2009
2010 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2011 if (!trace_seq_printf(s, "%d %d %llu ",
2012 entry->pid, iter->cpu, iter->ts))
2013 goto partial;
2014 }
2015
2016 event = ftrace_find_event(entry->type);
2017 if (event)
2018 return event->funcs->raw(iter, 0, event);
2019
2020 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2021 goto partial;
2022
2023 return TRACE_TYPE_HANDLED;
2024partial:
2025 return TRACE_TYPE_PARTIAL_LINE;
2026}
2027
2028static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2029{
2030 struct trace_seq *s = &iter->seq;
2031 unsigned char newline = '\n';
2032 struct trace_entry *entry;
2033 struct trace_event *event;
2034
2035 entry = iter->ent;
2036
2037 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2038 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2039 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2040 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2041 }
2042
2043 event = ftrace_find_event(entry->type);
2044 if (event) {
2045 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2046 if (ret != TRACE_TYPE_HANDLED)
2047 return ret;
2048 }
2049
2050 SEQ_PUT_FIELD_RET(s, newline);
2051
2052 return TRACE_TYPE_HANDLED;
2053}
2054
2055static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2056{
2057 struct trace_seq *s = &iter->seq;
2058 struct trace_entry *entry;
2059 struct trace_event *event;
2060
2061 entry = iter->ent;
2062
2063 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2064 SEQ_PUT_FIELD_RET(s, entry->pid);
2065 SEQ_PUT_FIELD_RET(s, iter->cpu);
2066 SEQ_PUT_FIELD_RET(s, iter->ts);
2067 }
2068
2069 event = ftrace_find_event(entry->type);
2070 return event ? event->funcs->binary(iter, 0, event) :
2071 TRACE_TYPE_HANDLED;
2072}
2073
2074int trace_empty(struct trace_iterator *iter)
2075{
2076 int cpu;
2077
2078 /* If we are looking at one CPU buffer, only check that one */
2079 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
2080 cpu = iter->cpu_file;
2081 if (iter->buffer_iter[cpu]) {
2082 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2083 return 0;
2084 } else {
2085 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2086 return 0;
2087 }
2088 return 1;
2089 }
2090
2091 for_each_tracing_cpu(cpu) {
2092 if (iter->buffer_iter[cpu]) {
2093 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2094 return 0;
2095 } else {
2096 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2097 return 0;
2098 }
2099 }
2100
2101 return 1;
2102}
2103
2104/* Called with trace_event_read_lock() held. */
2105enum print_line_t print_trace_line(struct trace_iterator *iter)
2106{
2107 enum print_line_t ret;
2108
2109 if (iter->lost_events &&
2110 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2111 iter->cpu, iter->lost_events))
2112 return TRACE_TYPE_PARTIAL_LINE;
2113
2114 if (iter->trace && iter->trace->print_line) {
2115 ret = iter->trace->print_line(iter);
2116 if (ret != TRACE_TYPE_UNHANDLED)
2117 return ret;
2118 }
2119
2120 if (iter->ent->type == TRACE_BPRINT &&
2121 trace_flags & TRACE_ITER_PRINTK &&
2122 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2123 return trace_print_bprintk_msg_only(iter);
2124
2125 if (iter->ent->type == TRACE_PRINT &&
2126 trace_flags & TRACE_ITER_PRINTK &&
2127 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2128 return trace_print_printk_msg_only(iter);
2129
2130 if (trace_flags & TRACE_ITER_BIN)
2131 return print_bin_fmt(iter);
2132
2133 if (trace_flags & TRACE_ITER_HEX)
2134 return print_hex_fmt(iter);
2135
2136 if (trace_flags & TRACE_ITER_RAW)
2137 return print_raw_fmt(iter);
2138
2139 return print_trace_fmt(iter);
2140}
2141
2142void trace_default_header(struct seq_file *m)
2143{
2144 struct trace_iterator *iter = m->private;
2145
2146 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2147 return;
2148
2149 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2150 /* print nothing if the buffers are empty */
2151 if (trace_empty(iter))
2152 return;
2153 print_trace_header(m, iter);
2154 if (!(trace_flags & TRACE_ITER_VERBOSE))
2155 print_lat_help_header(m);
2156 } else {
2157 if (!(trace_flags & TRACE_ITER_VERBOSE))
2158 print_func_help_header(m);
2159 }
2160}
2161
2162static int s_show(struct seq_file *m, void *v)
2163{
2164 struct trace_iterator *iter = v;
2165 int ret;
2166
2167 if (iter->ent == NULL) {
2168 if (iter->tr) {
2169 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2170 seq_puts(m, "#\n");
2171 }
2172 if (iter->trace && iter->trace->print_header)
2173 iter->trace->print_header(m);
2174 else
2175 trace_default_header(m);
2176
2177 } else if (iter->leftover) {
2178 /*
2179 * If we filled the seq_file buffer earlier, we
2180 * want to just show it now.
2181 */
2182 ret = trace_print_seq(m, &iter->seq);
2183
2184 /* ret should this time be zero, but you never know */
2185 iter->leftover = ret;
2186
2187 } else {
2188 print_trace_line(iter);
2189 ret = trace_print_seq(m, &iter->seq);
2190 /*
2191 * If we overflow the seq_file buffer, then it will
2192 * ask us for this data again at start up.
2193 * Use that instead.
2194 * ret is 0 if seq_file write succeeded.
2195 * -1 otherwise.
2196 */
2197 iter->leftover = ret;
2198 }
2199
2200 return 0;
2201}
2202
2203static const struct seq_operations tracer_seq_ops = {
2204 .start = s_start,
2205 .next = s_next,
2206 .stop = s_stop,
2207 .show = s_show,
2208};
2209
2210static struct trace_iterator *
2211__tracing_open(struct inode *inode, struct file *file)
2212{
2213 long cpu_file = (long) inode->i_private;
2214 void *fail_ret = ERR_PTR(-ENOMEM);
2215 struct trace_iterator *iter;
2216 struct seq_file *m;
2217 int cpu, ret;
2218
2219 if (tracing_disabled)
2220 return ERR_PTR(-ENODEV);
2221
2222 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2223 if (!iter)
2224 return ERR_PTR(-ENOMEM);
2225
2226 /*
2227 * We make a copy of the current tracer to avoid concurrent
2228 * changes on it while we are reading.
2229 */
2230 mutex_lock(&trace_types_lock);
2231 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2232 if (!iter->trace)
2233 goto fail;
2234
2235 if (current_trace)
2236 *iter->trace = *current_trace;
2237
2238 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2239 goto fail;
2240
2241 if (current_trace && current_trace->print_max)
2242 iter->tr = &max_tr;
2243 else
2244 iter->tr = &global_trace;
2245 iter->pos = -1;
2246 mutex_init(&iter->mutex);
2247 iter->cpu_file = cpu_file;
2248
2249 /* Notify the tracer early; before we stop tracing. */
2250 if (iter->trace && iter->trace->open)
2251 iter->trace->open(iter);
2252
2253 /* Annotate start of buffers if we had overruns */
2254 if (ring_buffer_overruns(iter->tr->buffer))
2255 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2256
2257 /* stop the trace while dumping */
2258 tracing_stop();
2259
2260 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2261 for_each_tracing_cpu(cpu) {
2262 iter->buffer_iter[cpu] =
2263 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2264 }
2265 ring_buffer_read_prepare_sync();
2266 for_each_tracing_cpu(cpu) {
2267 ring_buffer_read_start(iter->buffer_iter[cpu]);
2268 tracing_iter_reset(iter, cpu);
2269 }
2270 } else {
2271 cpu = iter->cpu_file;
2272 iter->buffer_iter[cpu] =
2273 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2274 ring_buffer_read_prepare_sync();
2275 ring_buffer_read_start(iter->buffer_iter[cpu]);
2276 tracing_iter_reset(iter, cpu);
2277 }
2278
2279 ret = seq_open(file, &tracer_seq_ops);
2280 if (ret < 0) {
2281 fail_ret = ERR_PTR(ret);
2282 goto fail_buffer;
2283 }
2284
2285 m = file->private_data;
2286 m->private = iter;
2287
2288 mutex_unlock(&trace_types_lock);
2289
2290 return iter;
2291
2292 fail_buffer:
2293 for_each_tracing_cpu(cpu) {
2294 if (iter->buffer_iter[cpu])
2295 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2296 }
2297 free_cpumask_var(iter->started);
2298 tracing_start();
2299 fail:
2300 mutex_unlock(&trace_types_lock);
2301 kfree(iter->trace);
2302 kfree(iter);
2303
2304 return fail_ret;
2305}
2306
2307int tracing_open_generic(struct inode *inode, struct file *filp)
2308{
2309 if (tracing_disabled)
2310 return -ENODEV;
2311
2312 filp->private_data = inode->i_private;
2313 return 0;
2314}
2315
2316static int tracing_release(struct inode *inode, struct file *file)
2317{
2318 struct seq_file *m = file->private_data;
2319 struct trace_iterator *iter;
2320 int cpu;
2321
2322 if (!(file->f_mode & FMODE_READ))
2323 return 0;
2324
2325 iter = m->private;
2326
2327 mutex_lock(&trace_types_lock);
2328 for_each_tracing_cpu(cpu) {
2329 if (iter->buffer_iter[cpu])
2330 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2331 }
2332
2333 if (iter->trace && iter->trace->close)
2334 iter->trace->close(iter);
2335
2336 /* reenable tracing if it was previously enabled */
2337 tracing_start();
2338 mutex_unlock(&trace_types_lock);
2339
2340 seq_release(inode, file);
2341 mutex_destroy(&iter->mutex);
2342 free_cpumask_var(iter->started);
2343 kfree(iter->trace);
2344 kfree(iter);
2345 return 0;
2346}
2347
2348static int tracing_open(struct inode *inode, struct file *file)
2349{
2350 struct trace_iterator *iter;
2351 int ret = 0;
2352
2353 /* If this file was open for write, then erase contents */
2354 if ((file->f_mode & FMODE_WRITE) &&
2355 (file->f_flags & O_TRUNC)) {
2356 long cpu = (long) inode->i_private;
2357
2358 if (cpu == TRACE_PIPE_ALL_CPU)
2359 tracing_reset_online_cpus(&global_trace);
2360 else
2361 tracing_reset(&global_trace, cpu);
2362 }
2363
2364 if (file->f_mode & FMODE_READ) {
2365 iter = __tracing_open(inode, file);
2366 if (IS_ERR(iter))
2367 ret = PTR_ERR(iter);
2368 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2369 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2370 }
2371 return ret;
2372}
2373
2374static void *
2375t_next(struct seq_file *m, void *v, loff_t *pos)
2376{
2377 struct tracer *t = v;
2378
2379 (*pos)++;
2380
2381 if (t)
2382 t = t->next;
2383
2384 return t;
2385}
2386
2387static void *t_start(struct seq_file *m, loff_t *pos)
2388{
2389 struct tracer *t;
2390 loff_t l = 0;
2391
2392 mutex_lock(&trace_types_lock);
2393 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2394 ;
2395
2396 return t;
2397}
2398
2399static void t_stop(struct seq_file *m, void *p)
2400{
2401 mutex_unlock(&trace_types_lock);
2402}
2403
2404static int t_show(struct seq_file *m, void *v)
2405{
2406 struct tracer *t = v;
2407
2408 if (!t)
2409 return 0;
2410
2411 seq_printf(m, "%s", t->name);
2412 if (t->next)
2413 seq_putc(m, ' ');
2414 else
2415 seq_putc(m, '\n');
2416
2417 return 0;
2418}
2419
2420static const struct seq_operations show_traces_seq_ops = {
2421 .start = t_start,
2422 .next = t_next,
2423 .stop = t_stop,
2424 .show = t_show,
2425};
2426
2427static int show_traces_open(struct inode *inode, struct file *file)
2428{
2429 if (tracing_disabled)
2430 return -ENODEV;
2431
2432 return seq_open(file, &show_traces_seq_ops);
2433}
2434
2435static ssize_t
2436tracing_write_stub(struct file *filp, const char __user *ubuf,
2437 size_t count, loff_t *ppos)
2438{
2439 return count;
2440}
2441
2442static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2443{
2444 if (file->f_mode & FMODE_READ)
2445 return seq_lseek(file, offset, origin);
2446 else
2447 return 0;
2448}
2449
2450static const struct file_operations tracing_fops = {
2451 .open = tracing_open,
2452 .read = seq_read,
2453 .write = tracing_write_stub,
2454 .llseek = tracing_seek,
2455 .release = tracing_release,
2456};
2457
2458static const struct file_operations show_traces_fops = {
2459 .open = show_traces_open,
2460 .read = seq_read,
2461 .release = seq_release,
2462 .llseek = seq_lseek,
2463};
2464
2465/*
2466 * Only trace on a CPU if the bitmask is set:
2467 */
2468static cpumask_var_t tracing_cpumask;
2469
2470/*
2471 * The tracer itself will not take this lock, but still we want
2472 * to provide a consistent cpumask to user-space:
2473 */
2474static DEFINE_MUTEX(tracing_cpumask_update_lock);
2475
2476/*
2477 * Temporary storage for the character representation of the
2478 * CPU bitmask (and one more byte for the newline):
2479 */
2480static char mask_str[NR_CPUS + 1];
2481
2482static ssize_t
2483tracing_cpumask_read(struct file *filp, char __user *ubuf,
2484 size_t count, loff_t *ppos)
2485{
2486 int len;
2487
2488 mutex_lock(&tracing_cpumask_update_lock);
2489
2490 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2491 if (count - len < 2) {
2492 count = -EINVAL;
2493 goto out_err;
2494 }
2495 len += sprintf(mask_str + len, "\n");
2496 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2497
2498out_err:
2499 mutex_unlock(&tracing_cpumask_update_lock);
2500
2501 return count;
2502}
2503
2504static ssize_t
2505tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2506 size_t count, loff_t *ppos)
2507{
2508 int err, cpu;
2509 cpumask_var_t tracing_cpumask_new;
2510
2511 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2512 return -ENOMEM;
2513
2514 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2515 if (err)
2516 goto err_unlock;
2517
2518 mutex_lock(&tracing_cpumask_update_lock);
2519
2520 local_irq_disable();
2521 arch_spin_lock(&ftrace_max_lock);
2522 for_each_tracing_cpu(cpu) {
2523 /*
2524 * Increase/decrease the disabled counter if we are
2525 * about to flip a bit in the cpumask:
2526 */
2527 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2528 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2529 atomic_inc(&global_trace.data[cpu]->disabled);
2530 }
2531 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2532 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2533 atomic_dec(&global_trace.data[cpu]->disabled);
2534 }
2535 }
2536 arch_spin_unlock(&ftrace_max_lock);
2537 local_irq_enable();
2538
2539 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2540
2541 mutex_unlock(&tracing_cpumask_update_lock);
2542 free_cpumask_var(tracing_cpumask_new);
2543
2544 return count;
2545
2546err_unlock:
2547 free_cpumask_var(tracing_cpumask_new);
2548
2549 return err;
2550}
2551
2552static const struct file_operations tracing_cpumask_fops = {
2553 .open = tracing_open_generic,
2554 .read = tracing_cpumask_read,
2555 .write = tracing_cpumask_write,
2556 .llseek = generic_file_llseek,
2557};
2558
2559static int tracing_trace_options_show(struct seq_file *m, void *v)
2560{
2561 struct tracer_opt *trace_opts;
2562 u32 tracer_flags;
2563 int i;
2564
2565 mutex_lock(&trace_types_lock);
2566 tracer_flags = current_trace->flags->val;
2567 trace_opts = current_trace->flags->opts;
2568
2569 for (i = 0; trace_options[i]; i++) {
2570 if (trace_flags & (1 << i))
2571 seq_printf(m, "%s\n", trace_options[i]);
2572 else
2573 seq_printf(m, "no%s\n", trace_options[i]);
2574 }
2575
2576 for (i = 0; trace_opts[i].name; i++) {
2577 if (tracer_flags & trace_opts[i].bit)
2578 seq_printf(m, "%s\n", trace_opts[i].name);
2579 else
2580 seq_printf(m, "no%s\n", trace_opts[i].name);
2581 }
2582 mutex_unlock(&trace_types_lock);
2583
2584 return 0;
2585}
2586
2587static int __set_tracer_option(struct tracer *trace,
2588 struct tracer_flags *tracer_flags,
2589 struct tracer_opt *opts, int neg)
2590{
2591 int ret;
2592
2593 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2594 if (ret)
2595 return ret;
2596
2597 if (neg)
2598 tracer_flags->val &= ~opts->bit;
2599 else
2600 tracer_flags->val |= opts->bit;
2601 return 0;
2602}
2603
2604/* Try to assign a tracer specific option */
2605static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2606{
2607 struct tracer_flags *tracer_flags = trace->flags;
2608 struct tracer_opt *opts = NULL;
2609 int i;
2610
2611 for (i = 0; tracer_flags->opts[i].name; i++) {
2612 opts = &tracer_flags->opts[i];
2613
2614 if (strcmp(cmp, opts->name) == 0)
2615 return __set_tracer_option(trace, trace->flags,
2616 opts, neg);
2617 }
2618
2619 return -EINVAL;
2620}
2621
2622static void set_tracer_flags(unsigned int mask, int enabled)
2623{
2624 /* do nothing if flag is already set */
2625 if (!!(trace_flags & mask) == !!enabled)
2626 return;
2627
2628 if (enabled)
2629 trace_flags |= mask;
2630 else
2631 trace_flags &= ~mask;
2632
2633 if (mask == TRACE_ITER_RECORD_CMD)
2634 trace_event_enable_cmd_record(enabled);
2635
2636 if (mask == TRACE_ITER_OVERWRITE)
2637 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2638}
2639
2640static ssize_t
2641tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2642 size_t cnt, loff_t *ppos)
2643{
2644 char buf[64];
2645 char *cmp;
2646 int neg = 0;
2647 int ret;
2648 int i;
2649
2650 if (cnt >= sizeof(buf))
2651 return -EINVAL;
2652
2653 if (copy_from_user(&buf, ubuf, cnt))
2654 return -EFAULT;
2655
2656 buf[cnt] = 0;
2657 cmp = strstrip(buf);
2658
2659 if (strncmp(cmp, "no", 2) == 0) {
2660 neg = 1;
2661 cmp += 2;
2662 }
2663
2664 for (i = 0; trace_options[i]; i++) {
2665 if (strcmp(cmp, trace_options[i]) == 0) {
2666 set_tracer_flags(1 << i, !neg);
2667 break;
2668 }
2669 }
2670
2671 /* If no option could be set, test the specific tracer options */
2672 if (!trace_options[i]) {
2673 mutex_lock(&trace_types_lock);
2674 ret = set_tracer_option(current_trace, cmp, neg);
2675 mutex_unlock(&trace_types_lock);
2676 if (ret)
2677 return ret;
2678 }
2679
2680 *ppos += cnt;
2681
2682 return cnt;
2683}
2684
2685static int tracing_trace_options_open(struct inode *inode, struct file *file)
2686{
2687 if (tracing_disabled)
2688 return -ENODEV;
2689 return single_open(file, tracing_trace_options_show, NULL);
2690}
2691
2692static const struct file_operations tracing_iter_fops = {
2693 .open = tracing_trace_options_open,
2694 .read = seq_read,
2695 .llseek = seq_lseek,
2696 .release = single_release,
2697 .write = tracing_trace_options_write,
2698};
2699
2700static const char readme_msg[] =
2701 "tracing mini-HOWTO:\n\n"
2702 "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2703 "# cat /sys/kernel/debug/tracing/available_tracers\n"
2704 "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
2705 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2706 "nop\n"
2707 "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
2708 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2709 "sched_switch\n"
2710 "# cat /sys/kernel/debug/tracing/trace_options\n"
2711 "noprint-parent nosym-offset nosym-addr noverbose\n"
2712 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2713 "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
2714 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2715 "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
2716;
2717
2718static ssize_t
2719tracing_readme_read(struct file *filp, char __user *ubuf,
2720 size_t cnt, loff_t *ppos)
2721{
2722 return simple_read_from_buffer(ubuf, cnt, ppos,
2723 readme_msg, strlen(readme_msg));
2724}
2725
2726static const struct file_operations tracing_readme_fops = {
2727 .open = tracing_open_generic,
2728 .read = tracing_readme_read,
2729 .llseek = generic_file_llseek,
2730};
2731
2732static ssize_t
2733tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2734 size_t cnt, loff_t *ppos)
2735{
2736 char *buf_comm;
2737 char *file_buf;
2738 char *buf;
2739 int len = 0;
2740 int pid;
2741 int i;
2742
2743 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2744 if (!file_buf)
2745 return -ENOMEM;
2746
2747 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2748 if (!buf_comm) {
2749 kfree(file_buf);
2750 return -ENOMEM;
2751 }
2752
2753 buf = file_buf;
2754
2755 for (i = 0; i < SAVED_CMDLINES; i++) {
2756 int r;
2757
2758 pid = map_cmdline_to_pid[i];
2759 if (pid == -1 || pid == NO_CMDLINE_MAP)
2760 continue;
2761
2762 trace_find_cmdline(pid, buf_comm);
2763 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2764 buf += r;
2765 len += r;
2766 }
2767
2768 len = simple_read_from_buffer(ubuf, cnt, ppos,
2769 file_buf, len);
2770
2771 kfree(file_buf);
2772 kfree(buf_comm);
2773
2774 return len;
2775}
2776
2777static const struct file_operations tracing_saved_cmdlines_fops = {
2778 .open = tracing_open_generic,
2779 .read = tracing_saved_cmdlines_read,
2780 .llseek = generic_file_llseek,
2781};
2782
2783static ssize_t
2784tracing_ctrl_read(struct file *filp, char __user *ubuf,
2785 size_t cnt, loff_t *ppos)
2786{
2787 char buf[64];
2788 int r;
2789
2790 r = sprintf(buf, "%u\n", tracer_enabled);
2791 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2792}
2793
2794static ssize_t
2795tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2796 size_t cnt, loff_t *ppos)
2797{
2798 struct trace_array *tr = filp->private_data;
2799 unsigned long val;
2800 int ret;
2801
2802 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2803 if (ret)
2804 return ret;
2805
2806 val = !!val;
2807
2808 mutex_lock(&trace_types_lock);
2809 if (tracer_enabled ^ val) {
2810
2811 /* Only need to warn if this is used to change the state */
2812 WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
2813
2814 if (val) {
2815 tracer_enabled = 1;
2816 if (current_trace->start)
2817 current_trace->start(tr);
2818 tracing_start();
2819 } else {
2820 tracer_enabled = 0;
2821 tracing_stop();
2822 if (current_trace->stop)
2823 current_trace->stop(tr);
2824 }
2825 }
2826 mutex_unlock(&trace_types_lock);
2827
2828 *ppos += cnt;
2829
2830 return cnt;
2831}
2832
2833static ssize_t
2834tracing_set_trace_read(struct file *filp, char __user *ubuf,
2835 size_t cnt, loff_t *ppos)
2836{
2837 char buf[MAX_TRACER_SIZE+2];
2838 int r;
2839
2840 mutex_lock(&trace_types_lock);
2841 if (current_trace)
2842 r = sprintf(buf, "%s\n", current_trace->name);
2843 else
2844 r = sprintf(buf, "\n");
2845 mutex_unlock(&trace_types_lock);
2846
2847 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2848}
2849
2850int tracer_init(struct tracer *t, struct trace_array *tr)
2851{
2852 tracing_reset_online_cpus(tr);
2853 return t->init(tr);
2854}
2855
2856static int __tracing_resize_ring_buffer(unsigned long size)
2857{
2858 int ret;
2859
2860 /*
2861 * If kernel or user changes the size of the ring buffer
2862 * we use the size that was given, and we can forget about
2863 * expanding it later.
2864 */
2865 ring_buffer_expanded = 1;
2866
2867 ret = ring_buffer_resize(global_trace.buffer, size);
2868 if (ret < 0)
2869 return ret;
2870
2871 if (!current_trace->use_max_tr)
2872 goto out;
2873
2874 ret = ring_buffer_resize(max_tr.buffer, size);
2875 if (ret < 0) {
2876 int r;
2877
2878 r = ring_buffer_resize(global_trace.buffer,
2879 global_trace.entries);
2880 if (r < 0) {
2881 /*
2882 * AARGH! We are left with different
2883 * size max buffer!!!!
2884 * The max buffer is our "snapshot" buffer.
2885 * When a tracer needs a snapshot (one of the
2886 * latency tracers), it swaps the max buffer
2887 * with the saved snap shot. We succeeded to
2888 * update the size of the main buffer, but failed to
2889 * update the size of the max buffer. But when we tried
2890 * to reset the main buffer to the original size, we
2891 * failed there too. This is very unlikely to
2892 * happen, but if it does, warn and kill all
2893 * tracing.
2894 */
2895 WARN_ON(1);
2896 tracing_disabled = 1;
2897 }
2898 return ret;
2899 }
2900
2901 max_tr.entries = size;
2902 out:
2903 global_trace.entries = size;
2904
2905 return ret;
2906}
2907
2908static ssize_t tracing_resize_ring_buffer(unsigned long size)
2909{
2910 int cpu, ret = size;
2911
2912 mutex_lock(&trace_types_lock);
2913
2914 tracing_stop();
2915
2916 /* disable all cpu buffers */
2917 for_each_tracing_cpu(cpu) {
2918 if (global_trace.data[cpu])
2919 atomic_inc(&global_trace.data[cpu]->disabled);
2920 if (max_tr.data[cpu])
2921 atomic_inc(&max_tr.data[cpu]->disabled);
2922 }
2923
2924 if (size != global_trace.entries)
2925 ret = __tracing_resize_ring_buffer(size);
2926
2927 if (ret < 0)
2928 ret = -ENOMEM;
2929
2930 for_each_tracing_cpu(cpu) {
2931 if (global_trace.data[cpu])
2932 atomic_dec(&global_trace.data[cpu]->disabled);
2933 if (max_tr.data[cpu])
2934 atomic_dec(&max_tr.data[cpu]->disabled);
2935 }
2936
2937 tracing_start();
2938 mutex_unlock(&trace_types_lock);
2939
2940 return ret;
2941}
2942
2943
2944/**
2945 * tracing_update_buffers - used by tracing facility to expand ring buffers
2946 *
2947 * To save on memory when the tracing is never used on a system with it
2948 * configured in. The ring buffers are set to a minimum size. But once
2949 * a user starts to use the tracing facility, then they need to grow
2950 * to their default size.
2951 *
2952 * This function is to be called when a tracer is about to be used.
2953 */
2954int tracing_update_buffers(void)
2955{
2956 int ret = 0;
2957
2958 mutex_lock(&trace_types_lock);
2959 if (!ring_buffer_expanded)
2960 ret = __tracing_resize_ring_buffer(trace_buf_size);
2961 mutex_unlock(&trace_types_lock);
2962
2963 return ret;
2964}
2965
2966struct trace_option_dentry;
2967
2968static struct trace_option_dentry *
2969create_trace_option_files(struct tracer *tracer);
2970
2971static void
2972destroy_trace_option_files(struct trace_option_dentry *topts);
2973
2974static int tracing_set_tracer(const char *buf)
2975{
2976 static struct trace_option_dentry *topts;
2977 struct trace_array *tr = &global_trace;
2978 struct tracer *t;
2979 int ret = 0;
2980
2981 mutex_lock(&trace_types_lock);
2982
2983 if (!ring_buffer_expanded) {
2984 ret = __tracing_resize_ring_buffer(trace_buf_size);
2985 if (ret < 0)
2986 goto out;
2987 ret = 0;
2988 }
2989
2990 for (t = trace_types; t; t = t->next) {
2991 if (strcmp(t->name, buf) == 0)
2992 break;
2993 }
2994 if (!t) {
2995 ret = -EINVAL;
2996 goto out;
2997 }
2998 if (t == current_trace)
2999 goto out;
3000
3001 trace_branch_disable();
3002 if (current_trace && current_trace->reset)
3003 current_trace->reset(tr);
3004 if (current_trace && current_trace->use_max_tr) {
3005 /*
3006 * We don't free the ring buffer. instead, resize it because
3007 * The max_tr ring buffer has some state (e.g. ring->clock) and
3008 * we want preserve it.
3009 */
3010 ring_buffer_resize(max_tr.buffer, 1);
3011 max_tr.entries = 1;
3012 }
3013 destroy_trace_option_files(topts);
3014
3015 current_trace = t;
3016
3017 topts = create_trace_option_files(current_trace);
3018 if (current_trace->use_max_tr) {
3019 ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
3020 if (ret < 0)
3021 goto out;
3022 max_tr.entries = global_trace.entries;
3023 }
3024
3025 if (t->init) {
3026 ret = tracer_init(t, tr);
3027 if (ret)
3028 goto out;
3029 }
3030
3031 trace_branch_enable(tr);
3032 out:
3033 mutex_unlock(&trace_types_lock);
3034
3035 return ret;
3036}
3037
3038static ssize_t
3039tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3040 size_t cnt, loff_t *ppos)
3041{
3042 char buf[MAX_TRACER_SIZE+1];
3043 int i;
3044 size_t ret;
3045 int err;
3046
3047 ret = cnt;
3048
3049 if (cnt > MAX_TRACER_SIZE)
3050 cnt = MAX_TRACER_SIZE;
3051
3052 if (copy_from_user(&buf, ubuf, cnt))
3053 return -EFAULT;
3054
3055 buf[cnt] = 0;
3056
3057 /* strip ending whitespace. */
3058 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3059 buf[i] = 0;
3060
3061 err = tracing_set_tracer(buf);
3062 if (err)
3063 return err;
3064
3065 *ppos += ret;
3066
3067 return ret;
3068}
3069
3070static ssize_t
3071tracing_max_lat_read(struct file *filp, char __user *ubuf,
3072 size_t cnt, loff_t *ppos)
3073{
3074 unsigned long *ptr = filp->private_data;
3075 char buf[64];
3076 int r;
3077
3078 r = snprintf(buf, sizeof(buf), "%ld\n",
3079 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3080 if (r > sizeof(buf))
3081 r = sizeof(buf);
3082 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3083}
3084
3085static ssize_t
3086tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3087 size_t cnt, loff_t *ppos)
3088{
3089 unsigned long *ptr = filp->private_data;
3090 unsigned long val;
3091 int ret;
3092
3093 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3094 if (ret)
3095 return ret;
3096
3097 *ptr = val * 1000;
3098
3099 return cnt;
3100}
3101
3102static int tracing_open_pipe(struct inode *inode, struct file *filp)
3103{
3104 long cpu_file = (long) inode->i_private;
3105 struct trace_iterator *iter;
3106 int ret = 0;
3107
3108 if (tracing_disabled)
3109 return -ENODEV;
3110
3111 mutex_lock(&trace_types_lock);
3112
3113 /* create a buffer to store the information to pass to userspace */
3114 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3115 if (!iter) {
3116 ret = -ENOMEM;
3117 goto out;
3118 }
3119
3120 /*
3121 * We make a copy of the current tracer to avoid concurrent
3122 * changes on it while we are reading.
3123 */
3124 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3125 if (!iter->trace) {
3126 ret = -ENOMEM;
3127 goto fail;
3128 }
3129 if (current_trace)
3130 *iter->trace = *current_trace;
3131
3132 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3133 ret = -ENOMEM;
3134 goto fail;
3135 }
3136
3137 /* trace pipe does not show start of buffer */
3138 cpumask_setall(iter->started);
3139
3140 if (trace_flags & TRACE_ITER_LATENCY_FMT)
3141 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3142
3143 iter->cpu_file = cpu_file;
3144 iter->tr = &global_trace;
3145 mutex_init(&iter->mutex);
3146 filp->private_data = iter;
3147
3148 if (iter->trace->pipe_open)
3149 iter->trace->pipe_open(iter);
3150
3151 nonseekable_open(inode, filp);
3152out:
3153 mutex_unlock(&trace_types_lock);
3154 return ret;
3155
3156fail:
3157 kfree(iter->trace);
3158 kfree(iter);
3159 mutex_unlock(&trace_types_lock);
3160 return ret;
3161}
3162
3163static int tracing_release_pipe(struct inode *inode, struct file *file)
3164{
3165 struct trace_iterator *iter = file->private_data;
3166
3167 mutex_lock(&trace_types_lock);
3168
3169 if (iter->trace->pipe_close)
3170 iter->trace->pipe_close(iter);
3171
3172 mutex_unlock(&trace_types_lock);
3173
3174 free_cpumask_var(iter->started);
3175 mutex_destroy(&iter->mutex);
3176 kfree(iter->trace);
3177 kfree(iter);
3178
3179 return 0;
3180}
3181
3182static unsigned int
3183tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3184{
3185 struct trace_iterator *iter = filp->private_data;
3186
3187 if (trace_flags & TRACE_ITER_BLOCK) {
3188 /*
3189 * Always select as readable when in blocking mode
3190 */
3191 return POLLIN | POLLRDNORM;
3192 } else {
3193 if (!trace_empty(iter))
3194 return POLLIN | POLLRDNORM;
3195 poll_wait(filp, &trace_wait, poll_table);
3196 if (!trace_empty(iter))
3197 return POLLIN | POLLRDNORM;
3198
3199 return 0;
3200 }
3201}
3202
3203
3204void default_wait_pipe(struct trace_iterator *iter)
3205{
3206 DEFINE_WAIT(wait);
3207
3208 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3209
3210 if (trace_empty(iter))
3211 schedule();
3212
3213 finish_wait(&trace_wait, &wait);
3214}
3215
3216/*
3217 * This is a make-shift waitqueue.
3218 * A tracer might use this callback on some rare cases:
3219 *
3220 * 1) the current tracer might hold the runqueue lock when it wakes up
3221 * a reader, hence a deadlock (sched, function, and function graph tracers)
3222 * 2) the function tracers, trace all functions, we don't want
3223 * the overhead of calling wake_up and friends
3224 * (and tracing them too)
3225 *
3226 * Anyway, this is really very primitive wakeup.
3227 */
3228void poll_wait_pipe(struct trace_iterator *iter)
3229{
3230 set_current_state(TASK_INTERRUPTIBLE);
3231 /* sleep for 100 msecs, and try again. */
3232 schedule_timeout(HZ / 10);
3233}
3234
3235/* Must be called with trace_types_lock mutex held. */
3236static int tracing_wait_pipe(struct file *filp)
3237{
3238 struct trace_iterator *iter = filp->private_data;
3239
3240 while (trace_empty(iter)) {
3241
3242 if ((filp->f_flags & O_NONBLOCK)) {
3243 return -EAGAIN;
3244 }
3245
3246 mutex_unlock(&iter->mutex);
3247
3248 iter->trace->wait_pipe(iter);
3249
3250 mutex_lock(&iter->mutex);
3251
3252 if (signal_pending(current))
3253 return -EINTR;
3254
3255 /*
3256 * We block until we read something and tracing is disabled.
3257 * We still block if tracing is disabled, but we have never
3258 * read anything. This allows a user to cat this file, and
3259 * then enable tracing. But after we have read something,
3260 * we give an EOF when tracing is again disabled.
3261 *
3262 * iter->pos will be 0 if we haven't read anything.
3263 */
3264 if (!tracer_enabled && iter->pos)
3265 break;
3266 }
3267
3268 return 1;
3269}
3270
3271/*
3272 * Consumer reader.
3273 */
3274static ssize_t
3275tracing_read_pipe(struct file *filp, char __user *ubuf,
3276 size_t cnt, loff_t *ppos)
3277{
3278 struct trace_iterator *iter = filp->private_data;
3279 static struct tracer *old_tracer;
3280 ssize_t sret;
3281
3282 /* return any leftover data */
3283 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3284 if (sret != -EBUSY)
3285 return sret;
3286
3287 trace_seq_init(&iter->seq);
3288
3289 /* copy the tracer to avoid using a global lock all around */
3290 mutex_lock(&trace_types_lock);
3291 if (unlikely(old_tracer != current_trace && current_trace)) {
3292 old_tracer = current_trace;
3293 *iter->trace = *current_trace;
3294 }
3295 mutex_unlock(&trace_types_lock);
3296
3297 /*
3298 * Avoid more than one consumer on a single file descriptor
3299 * This is just a matter of traces coherency, the ring buffer itself
3300 * is protected.
3301 */
3302 mutex_lock(&iter->mutex);
3303 if (iter->trace->read) {
3304 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3305 if (sret)
3306 goto out;
3307 }
3308
3309waitagain:
3310 sret = tracing_wait_pipe(filp);
3311 if (sret <= 0)
3312 goto out;
3313
3314 /* stop when tracing is finished */
3315 if (trace_empty(iter)) {
3316 sret = 0;
3317 goto out;
3318 }
3319
3320 if (cnt >= PAGE_SIZE)
3321 cnt = PAGE_SIZE - 1;
3322
3323 /* reset all but tr, trace, and overruns */
3324 memset(&iter->seq, 0,
3325 sizeof(struct trace_iterator) -
3326 offsetof(struct trace_iterator, seq));
3327 iter->pos = -1;
3328
3329 trace_event_read_lock();
3330 trace_access_lock(iter->cpu_file);
3331 while (trace_find_next_entry_inc(iter) != NULL) {
3332 enum print_line_t ret;
3333 int len = iter->seq.len;
3334
3335 ret = print_trace_line(iter);
3336 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3337 /* don't print partial lines */
3338 iter->seq.len = len;
3339 break;
3340 }
3341 if (ret != TRACE_TYPE_NO_CONSUME)
3342 trace_consume(iter);
3343
3344 if (iter->seq.len >= cnt)
3345 break;
3346
3347 /*
3348 * Setting the full flag means we reached the trace_seq buffer
3349 * size and we should leave by partial output condition above.
3350 * One of the trace_seq_* functions is not used properly.
3351 */
3352 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3353 iter->ent->type);
3354 }
3355 trace_access_unlock(iter->cpu_file);
3356 trace_event_read_unlock();
3357
3358 /* Now copy what we have to the user */
3359 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3360 if (iter->seq.readpos >= iter->seq.len)
3361 trace_seq_init(&iter->seq);
3362
3363 /*
3364 * If there was nothing to send to user, in spite of consuming trace
3365 * entries, go back to wait for more entries.
3366 */
3367 if (sret == -EBUSY)
3368 goto waitagain;
3369
3370out:
3371 mutex_unlock(&iter->mutex);
3372
3373 return sret;
3374}
3375
3376static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3377 struct pipe_buffer *buf)
3378{
3379 __free_page(buf->page);
3380}
3381
3382static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3383 unsigned int idx)
3384{
3385 __free_page(spd->pages[idx]);
3386}
3387
3388static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3389 .can_merge = 0,
3390 .map = generic_pipe_buf_map,
3391 .unmap = generic_pipe_buf_unmap,
3392 .confirm = generic_pipe_buf_confirm,
3393 .release = tracing_pipe_buf_release,
3394 .steal = generic_pipe_buf_steal,
3395 .get = generic_pipe_buf_get,
3396};
3397
3398static size_t
3399tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3400{
3401 size_t count;
3402 int ret;
3403
3404 /* Seq buffer is page-sized, exactly what we need. */
3405 for (;;) {
3406 count = iter->seq.len;
3407 ret = print_trace_line(iter);
3408 count = iter->seq.len - count;
3409 if (rem < count) {
3410 rem = 0;
3411 iter->seq.len -= count;
3412 break;
3413 }
3414 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3415 iter->seq.len -= count;
3416 break;
3417 }
3418
3419 if (ret != TRACE_TYPE_NO_CONSUME)
3420 trace_consume(iter);
3421 rem -= count;
3422 if (!trace_find_next_entry_inc(iter)) {
3423 rem = 0;
3424 iter->ent = NULL;
3425 break;
3426 }
3427 }
3428
3429 return rem;
3430}
3431
3432static ssize_t tracing_splice_read_pipe(struct file *filp,
3433 loff_t *ppos,
3434 struct pipe_inode_info *pipe,
3435 size_t len,
3436 unsigned int flags)
3437{
3438 struct page *pages_def[PIPE_DEF_BUFFERS];
3439 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3440 struct trace_iterator *iter = filp->private_data;
3441 struct splice_pipe_desc spd = {
3442 .pages = pages_def,
3443 .partial = partial_def,
3444 .nr_pages = 0, /* This gets updated below. */
3445 .flags = flags,
3446 .ops = &tracing_pipe_buf_ops,
3447 .spd_release = tracing_spd_release_pipe,
3448 };
3449 static struct tracer *old_tracer;
3450 ssize_t ret;
3451 size_t rem;
3452 unsigned int i;
3453
3454 if (splice_grow_spd(pipe, &spd))
3455 return -ENOMEM;
3456
3457 /* copy the tracer to avoid using a global lock all around */
3458 mutex_lock(&trace_types_lock);
3459 if (unlikely(old_tracer != current_trace && current_trace)) {
3460 old_tracer = current_trace;
3461 *iter->trace = *current_trace;
3462 }
3463 mutex_unlock(&trace_types_lock);
3464
3465 mutex_lock(&iter->mutex);
3466
3467 if (iter->trace->splice_read) {
3468 ret = iter->trace->splice_read(iter, filp,
3469 ppos, pipe, len, flags);
3470 if (ret)
3471 goto out_err;
3472 }
3473
3474 ret = tracing_wait_pipe(filp);
3475 if (ret <= 0)
3476 goto out_err;
3477
3478 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3479 ret = -EFAULT;
3480 goto out_err;
3481 }
3482
3483 trace_event_read_lock();
3484 trace_access_lock(iter->cpu_file);
3485
3486 /* Fill as many pages as possible. */
3487 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3488 spd.pages[i] = alloc_page(GFP_KERNEL);
3489 if (!spd.pages[i])
3490 break;
3491
3492 rem = tracing_fill_pipe_page(rem, iter);
3493
3494 /* Copy the data into the page, so we can start over. */
3495 ret = trace_seq_to_buffer(&iter->seq,
3496 page_address(spd.pages[i]),
3497 iter->seq.len);
3498 if (ret < 0) {
3499 __free_page(spd.pages[i]);
3500 break;
3501 }
3502 spd.partial[i].offset = 0;
3503 spd.partial[i].len = iter->seq.len;
3504
3505 trace_seq_init(&iter->seq);
3506 }
3507
3508 trace_access_unlock(iter->cpu_file);
3509 trace_event_read_unlock();
3510 mutex_unlock(&iter->mutex);
3511
3512 spd.nr_pages = i;
3513
3514 ret = splice_to_pipe(pipe, &spd);
3515out:
3516 splice_shrink_spd(pipe, &spd);
3517 return ret;
3518
3519out_err:
3520 mutex_unlock(&iter->mutex);
3521 goto out;
3522}
3523
3524static ssize_t
3525tracing_entries_read(struct file *filp, char __user *ubuf,
3526 size_t cnt, loff_t *ppos)
3527{
3528 struct trace_array *tr = filp->private_data;
3529 char buf[96];
3530 int r;
3531
3532 mutex_lock(&trace_types_lock);
3533 if (!ring_buffer_expanded)
3534 r = sprintf(buf, "%lu (expanded: %lu)\n",
3535 tr->entries >> 10,
3536 trace_buf_size >> 10);
3537 else
3538 r = sprintf(buf, "%lu\n", tr->entries >> 10);
3539 mutex_unlock(&trace_types_lock);
3540
3541 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3542}
3543
3544static ssize_t
3545tracing_entries_write(struct file *filp, const char __user *ubuf,
3546 size_t cnt, loff_t *ppos)
3547{
3548 unsigned long val;
3549 int ret;
3550
3551 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3552 if (ret)
3553 return ret;
3554
3555 /* must have at least 1 entry */
3556 if (!val)
3557 return -EINVAL;
3558
3559 /* value is in KB */
3560 val <<= 10;
3561
3562 ret = tracing_resize_ring_buffer(val);
3563 if (ret < 0)
3564 return ret;
3565
3566 *ppos += cnt;
3567
3568 return cnt;
3569}
3570
3571static ssize_t
3572tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3573 size_t cnt, loff_t *ppos)
3574{
3575 /*
3576 * There is no need to read what the user has written, this function
3577 * is just to make sure that there is no error when "echo" is used
3578 */
3579
3580 *ppos += cnt;
3581
3582 return cnt;
3583}
3584
3585static int
3586tracing_free_buffer_release(struct inode *inode, struct file *filp)
3587{
3588 /* disable tracing ? */
3589 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3590 tracing_off();
3591 /* resize the ring buffer to 0 */
3592 tracing_resize_ring_buffer(0);
3593
3594 return 0;
3595}
3596
3597static int mark_printk(const char *fmt, ...)
3598{
3599 int ret;
3600 va_list args;
3601 va_start(args, fmt);
3602 ret = trace_vprintk(0, fmt, args);
3603 va_end(args);
3604 return ret;
3605}
3606
3607static ssize_t
3608tracing_mark_write(struct file *filp, const char __user *ubuf,
3609 size_t cnt, loff_t *fpos)
3610{
3611 char *buf;
3612 size_t written;
3613
3614 if (tracing_disabled)
3615 return -EINVAL;
3616
3617 if (cnt > TRACE_BUF_SIZE)
3618 cnt = TRACE_BUF_SIZE;
3619
3620 buf = kmalloc(cnt + 2, GFP_KERNEL);
3621 if (buf == NULL)
3622 return -ENOMEM;
3623
3624 if (copy_from_user(buf, ubuf, cnt)) {
3625 kfree(buf);
3626 return -EFAULT;
3627 }
3628 if (buf[cnt-1] != '\n') {
3629 buf[cnt] = '\n';
3630 buf[cnt+1] = '\0';
3631 } else
3632 buf[cnt] = '\0';
3633
3634 written = mark_printk("%s", buf);
3635 kfree(buf);
3636 *fpos += written;
3637
3638 /* don't tell userspace we wrote more - it might confuse them */
3639 if (written > cnt)
3640 written = cnt;
3641
3642 return written;
3643}
3644
3645static int tracing_clock_show(struct seq_file *m, void *v)
3646{
3647 int i;
3648
3649 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3650 seq_printf(m,
3651 "%s%s%s%s", i ? " " : "",
3652 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3653 i == trace_clock_id ? "]" : "");
3654 seq_putc(m, '\n');
3655
3656 return 0;
3657}
3658
3659static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3660 size_t cnt, loff_t *fpos)
3661{
3662 char buf[64];
3663 const char *clockstr;
3664 int i;
3665
3666 if (cnt >= sizeof(buf))
3667 return -EINVAL;
3668
3669 if (copy_from_user(&buf, ubuf, cnt))
3670 return -EFAULT;
3671
3672 buf[cnt] = 0;
3673
3674 clockstr = strstrip(buf);
3675
3676 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3677 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3678 break;
3679 }
3680 if (i == ARRAY_SIZE(trace_clocks))
3681 return -EINVAL;
3682
3683 trace_clock_id = i;
3684
3685 mutex_lock(&trace_types_lock);
3686
3687 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3688 if (max_tr.buffer)
3689 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3690
3691 mutex_unlock(&trace_types_lock);
3692
3693 *fpos += cnt;
3694
3695 return cnt;
3696}
3697
3698static int tracing_clock_open(struct inode *inode, struct file *file)
3699{
3700 if (tracing_disabled)
3701 return -ENODEV;
3702 return single_open(file, tracing_clock_show, NULL);
3703}
3704
3705static const struct file_operations tracing_max_lat_fops = {
3706 .open = tracing_open_generic,
3707 .read = tracing_max_lat_read,
3708 .write = tracing_max_lat_write,
3709 .llseek = generic_file_llseek,
3710};
3711
3712static const struct file_operations tracing_ctrl_fops = {
3713 .open = tracing_open_generic,
3714 .read = tracing_ctrl_read,
3715 .write = tracing_ctrl_write,
3716 .llseek = generic_file_llseek,
3717};
3718
3719static const struct file_operations set_tracer_fops = {
3720 .open = tracing_open_generic,
3721 .read = tracing_set_trace_read,
3722 .write = tracing_set_trace_write,
3723 .llseek = generic_file_llseek,
3724};
3725
3726static const struct file_operations tracing_pipe_fops = {
3727 .open = tracing_open_pipe,
3728 .poll = tracing_poll_pipe,
3729 .read = tracing_read_pipe,
3730 .splice_read = tracing_splice_read_pipe,
3731 .release = tracing_release_pipe,
3732 .llseek = no_llseek,
3733};
3734
3735static const struct file_operations tracing_entries_fops = {
3736 .open = tracing_open_generic,
3737 .read = tracing_entries_read,
3738 .write = tracing_entries_write,
3739 .llseek = generic_file_llseek,
3740};
3741
3742static const struct file_operations tracing_free_buffer_fops = {
3743 .write = tracing_free_buffer_write,
3744 .release = tracing_free_buffer_release,
3745};
3746
3747static const struct file_operations tracing_mark_fops = {
3748 .open = tracing_open_generic,
3749 .write = tracing_mark_write,
3750 .llseek = generic_file_llseek,
3751};
3752
3753static const struct file_operations trace_clock_fops = {
3754 .open = tracing_clock_open,
3755 .read = seq_read,
3756 .llseek = seq_lseek,
3757 .release = single_release,
3758 .write = tracing_clock_write,
3759};
3760
3761struct ftrace_buffer_info {
3762 struct trace_array *tr;
3763 void *spare;
3764 int cpu;
3765 unsigned int read;
3766};
3767
3768static int tracing_buffers_open(struct inode *inode, struct file *filp)
3769{
3770 int cpu = (int)(long)inode->i_private;
3771 struct ftrace_buffer_info *info;
3772
3773 if (tracing_disabled)
3774 return -ENODEV;
3775
3776 info = kzalloc(sizeof(*info), GFP_KERNEL);
3777 if (!info)
3778 return -ENOMEM;
3779
3780 info->tr = &global_trace;
3781 info->cpu = cpu;
3782 info->spare = NULL;
3783 /* Force reading ring buffer for first read */
3784 info->read = (unsigned int)-1;
3785
3786 filp->private_data = info;
3787
3788 return nonseekable_open(inode, filp);
3789}
3790
3791static ssize_t
3792tracing_buffers_read(struct file *filp, char __user *ubuf,
3793 size_t count, loff_t *ppos)
3794{
3795 struct ftrace_buffer_info *info = filp->private_data;
3796 ssize_t ret;
3797 size_t size;
3798
3799 if (!count)
3800 return 0;
3801
3802 if (!info->spare)
3803 info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
3804 if (!info->spare)
3805 return -ENOMEM;
3806
3807 /* Do we have previous read data to read? */
3808 if (info->read < PAGE_SIZE)
3809 goto read;
3810
3811 info->read = 0;
3812
3813 trace_access_lock(info->cpu);
3814 ret = ring_buffer_read_page(info->tr->buffer,
3815 &info->spare,
3816 count,
3817 info->cpu, 0);
3818 trace_access_unlock(info->cpu);
3819 if (ret < 0)
3820 return 0;
3821
3822read:
3823 size = PAGE_SIZE - info->read;
3824 if (size > count)
3825 size = count;
3826
3827 ret = copy_to_user(ubuf, info->spare + info->read, size);
3828 if (ret == size)
3829 return -EFAULT;
3830 size -= ret;
3831
3832 *ppos += size;
3833 info->read += size;
3834
3835 return size;
3836}
3837
3838static int tracing_buffers_release(struct inode *inode, struct file *file)
3839{
3840 struct ftrace_buffer_info *info = file->private_data;
3841
3842 if (info->spare)
3843 ring_buffer_free_read_page(info->tr->buffer, info->spare);
3844 kfree(info);
3845
3846 return 0;
3847}
3848
3849struct buffer_ref {
3850 struct ring_buffer *buffer;
3851 void *page;
3852 int ref;
3853};
3854
3855static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
3856 struct pipe_buffer *buf)
3857{
3858 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3859
3860 if (--ref->ref)
3861 return;
3862
3863 ring_buffer_free_read_page(ref->buffer, ref->page);
3864 kfree(ref);
3865 buf->private = 0;
3866}
3867
3868static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
3869 struct pipe_buffer *buf)
3870{
3871 return 1;
3872}
3873
3874static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
3875 struct pipe_buffer *buf)
3876{
3877 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
3878
3879 ref->ref++;
3880}
3881
3882/* Pipe buffer operations for a buffer. */
3883static const struct pipe_buf_operations buffer_pipe_buf_ops = {
3884 .can_merge = 0,
3885 .map = generic_pipe_buf_map,
3886 .unmap = generic_pipe_buf_unmap,
3887 .confirm = generic_pipe_buf_confirm,
3888 .release = buffer_pipe_buf_release,
3889 .steal = buffer_pipe_buf_steal,
3890 .get = buffer_pipe_buf_get,
3891};
3892
3893/*
3894 * Callback from splice_to_pipe(), if we need to release some pages
3895 * at the end of the spd in case we error'ed out in filling the pipe.
3896 */
3897static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
3898{
3899 struct buffer_ref *ref =
3900 (struct buffer_ref *)spd->partial[i].private;
3901
3902 if (--ref->ref)
3903 return;
3904
3905 ring_buffer_free_read_page(ref->buffer, ref->page);
3906 kfree(ref);
3907 spd->partial[i].private = 0;
3908}
3909
3910static ssize_t
3911tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3912 struct pipe_inode_info *pipe, size_t len,
3913 unsigned int flags)
3914{
3915 struct ftrace_buffer_info *info = file->private_data;
3916 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3917 struct page *pages_def[PIPE_DEF_BUFFERS];
3918 struct splice_pipe_desc spd = {
3919 .pages = pages_def,
3920 .partial = partial_def,
3921 .flags = flags,
3922 .ops = &buffer_pipe_buf_ops,
3923 .spd_release = buffer_spd_release,
3924 };
3925 struct buffer_ref *ref;
3926 int entries, size, i;
3927 size_t ret;
3928
3929 if (splice_grow_spd(pipe, &spd))
3930 return -ENOMEM;
3931
3932 if (*ppos & (PAGE_SIZE - 1)) {
3933 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
3934 ret = -EINVAL;
3935 goto out;
3936 }
3937
3938 if (len & (PAGE_SIZE - 1)) {
3939 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
3940 if (len < PAGE_SIZE) {
3941 ret = -EINVAL;
3942 goto out;
3943 }
3944 len &= PAGE_MASK;
3945 }
3946
3947 trace_access_lock(info->cpu);
3948 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3949
3950 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
3951 struct page *page;
3952 int r;
3953
3954 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
3955 if (!ref)
3956 break;
3957
3958 ref->ref = 1;
3959 ref->buffer = info->tr->buffer;
3960 ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
3961 if (!ref->page) {
3962 kfree(ref);
3963 break;
3964 }
3965
3966 r = ring_buffer_read_page(ref->buffer, &ref->page,
3967 len, info->cpu, 1);
3968 if (r < 0) {
3969 ring_buffer_free_read_page(ref->buffer, ref->page);
3970 kfree(ref);
3971 break;
3972 }
3973
3974 /*
3975 * zero out any left over data, this is going to
3976 * user land.
3977 */
3978 size = ring_buffer_page_len(ref->page);
3979 if (size < PAGE_SIZE)
3980 memset(ref->page + size, 0, PAGE_SIZE - size);
3981
3982 page = virt_to_page(ref->page);
3983
3984 spd.pages[i] = page;
3985 spd.partial[i].len = PAGE_SIZE;
3986 spd.partial[i].offset = 0;
3987 spd.partial[i].private = (unsigned long)ref;
3988 spd.nr_pages++;
3989 *ppos += PAGE_SIZE;
3990
3991 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
3992 }
3993
3994 trace_access_unlock(info->cpu);
3995 spd.nr_pages = i;
3996
3997 /* did we read anything? */
3998 if (!spd.nr_pages) {
3999 if (flags & SPLICE_F_NONBLOCK)
4000 ret = -EAGAIN;
4001 else
4002 ret = 0;
4003 /* TODO: block */
4004 goto out;
4005 }
4006
4007 ret = splice_to_pipe(pipe, &spd);
4008 splice_shrink_spd(pipe, &spd);
4009out:
4010 return ret;
4011}
4012
4013static const struct file_operations tracing_buffers_fops = {
4014 .open = tracing_buffers_open,
4015 .read = tracing_buffers_read,
4016 .release = tracing_buffers_release,
4017 .splice_read = tracing_buffers_splice_read,
4018 .llseek = no_llseek,
4019};
4020
4021static ssize_t
4022tracing_stats_read(struct file *filp, char __user *ubuf,
4023 size_t count, loff_t *ppos)
4024{
4025 unsigned long cpu = (unsigned long)filp->private_data;
4026 struct trace_array *tr = &global_trace;
4027 struct trace_seq *s;
4028 unsigned long cnt;
4029
4030 s = kmalloc(sizeof(*s), GFP_KERNEL);
4031 if (!s)
4032 return -ENOMEM;
4033
4034 trace_seq_init(s);
4035
4036 cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
4037 trace_seq_printf(s, "entries: %ld\n", cnt);
4038
4039 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
4040 trace_seq_printf(s, "overrun: %ld\n", cnt);
4041
4042 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
4043 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4044
4045 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4046
4047 kfree(s);
4048
4049 return count;
4050}
4051
4052static const struct file_operations tracing_stats_fops = {
4053 .open = tracing_open_generic,
4054 .read = tracing_stats_read,
4055 .llseek = generic_file_llseek,
4056};
4057
4058#ifdef CONFIG_DYNAMIC_FTRACE
4059
4060int __weak ftrace_arch_read_dyn_info(char *buf, int size)
4061{
4062 return 0;
4063}
4064
4065static ssize_t
4066tracing_read_dyn_info(struct file *filp, char __user *ubuf,
4067 size_t cnt, loff_t *ppos)
4068{
4069 static char ftrace_dyn_info_buffer[1024];
4070 static DEFINE_MUTEX(dyn_info_mutex);
4071 unsigned long *p = filp->private_data;
4072 char *buf = ftrace_dyn_info_buffer;
4073 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
4074 int r;
4075
4076 mutex_lock(&dyn_info_mutex);
4077 r = sprintf(buf, "%ld ", *p);
4078
4079 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
4080 buf[r++] = '\n';
4081
4082 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4083
4084 mutex_unlock(&dyn_info_mutex);
4085
4086 return r;
4087}
4088
4089static const struct file_operations tracing_dyn_info_fops = {
4090 .open = tracing_open_generic,
4091 .read = tracing_read_dyn_info,
4092 .llseek = generic_file_llseek,
4093};
4094#endif
4095
4096static struct dentry *d_tracer;
4097
4098struct dentry *tracing_init_dentry(void)
4099{
4100 static int once;
4101
4102 if (d_tracer)
4103 return d_tracer;
4104
4105 if (!debugfs_initialized())
4106 return NULL;
4107
4108 d_tracer = debugfs_create_dir("tracing", NULL);
4109
4110 if (!d_tracer && !once) {
4111 once = 1;
4112 pr_warning("Could not create debugfs directory 'tracing'\n");
4113 return NULL;
4114 }
4115
4116 return d_tracer;
4117}
4118
4119static struct dentry *d_percpu;
4120
4121struct dentry *tracing_dentry_percpu(void)
4122{
4123 static int once;
4124 struct dentry *d_tracer;
4125
4126 if (d_percpu)
4127 return d_percpu;
4128
4129 d_tracer = tracing_init_dentry();
4130
4131 if (!d_tracer)
4132 return NULL;
4133
4134 d_percpu = debugfs_create_dir("per_cpu", d_tracer);
4135
4136 if (!d_percpu && !once) {
4137 once = 1;
4138 pr_warning("Could not create debugfs directory 'per_cpu'\n");
4139 return NULL;
4140 }
4141
4142 return d_percpu;
4143}
4144
4145static void tracing_init_debugfs_percpu(long cpu)
4146{
4147 struct dentry *d_percpu = tracing_dentry_percpu();
4148 struct dentry *d_cpu;
4149 char cpu_dir[30]; /* 30 characters should be more than enough */
4150
4151 snprintf(cpu_dir, 30, "cpu%ld", cpu);
4152 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4153 if (!d_cpu) {
4154 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
4155 return;
4156 }
4157
4158 /* per cpu trace_pipe */
4159 trace_create_file("trace_pipe", 0444, d_cpu,
4160 (void *) cpu, &tracing_pipe_fops);
4161
4162 /* per cpu trace */
4163 trace_create_file("trace", 0644, d_cpu,
4164 (void *) cpu, &tracing_fops);
4165
4166 trace_create_file("trace_pipe_raw", 0444, d_cpu,
4167 (void *) cpu, &tracing_buffers_fops);
4168
4169 trace_create_file("stats", 0444, d_cpu,
4170 (void *) cpu, &tracing_stats_fops);
4171}
4172
4173#ifdef CONFIG_FTRACE_SELFTEST
4174/* Let selftest have access to static functions in this file */
4175#include "trace_selftest.c"
4176#endif
4177
4178struct trace_option_dentry {
4179 struct tracer_opt *opt;
4180 struct tracer_flags *flags;
4181 struct dentry *entry;
4182};
4183
4184static ssize_t
4185trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
4186 loff_t *ppos)
4187{
4188 struct trace_option_dentry *topt = filp->private_data;
4189 char *buf;
4190
4191 if (topt->flags->val & topt->opt->bit)
4192 buf = "1\n";
4193 else
4194 buf = "0\n";
4195
4196 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4197}
4198
4199static ssize_t
4200trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
4201 loff_t *ppos)
4202{
4203 struct trace_option_dentry *topt = filp->private_data;
4204 unsigned long val;
4205 int ret;
4206
4207 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4208 if (ret)
4209 return ret;
4210
4211 if (val != 0 && val != 1)
4212 return -EINVAL;
4213
4214 if (!!(topt->flags->val & topt->opt->bit) != val) {
4215 mutex_lock(&trace_types_lock);
4216 ret = __set_tracer_option(current_trace, topt->flags,
4217 topt->opt, !val);
4218 mutex_unlock(&trace_types_lock);
4219 if (ret)
4220 return ret;
4221 }
4222
4223 *ppos += cnt;
4224
4225 return cnt;
4226}
4227
4228
4229static const struct file_operations trace_options_fops = {
4230 .open = tracing_open_generic,
4231 .read = trace_options_read,
4232 .write = trace_options_write,
4233 .llseek = generic_file_llseek,
4234};
4235
4236static ssize_t
4237trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
4238 loff_t *ppos)
4239{
4240 long index = (long)filp->private_data;
4241 char *buf;
4242
4243 if (trace_flags & (1 << index))
4244 buf = "1\n";
4245 else
4246 buf = "0\n";
4247
4248 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4249}
4250
4251static ssize_t
4252trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4253 loff_t *ppos)
4254{
4255 long index = (long)filp->private_data;
4256 unsigned long val;
4257 int ret;
4258
4259 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4260 if (ret)
4261 return ret;
4262
4263 if (val != 0 && val != 1)
4264 return -EINVAL;
4265 set_tracer_flags(1 << index, val);
4266
4267 *ppos += cnt;
4268
4269 return cnt;
4270}
4271
4272static const struct file_operations trace_options_core_fops = {
4273 .open = tracing_open_generic,
4274 .read = trace_options_core_read,
4275 .write = trace_options_core_write,
4276 .llseek = generic_file_llseek,
4277};
4278
4279struct dentry *trace_create_file(const char *name,
4280 mode_t mode,
4281 struct dentry *parent,
4282 void *data,
4283 const struct file_operations *fops)
4284{
4285 struct dentry *ret;
4286
4287 ret = debugfs_create_file(name, mode, parent, data, fops);
4288 if (!ret)
4289 pr_warning("Could not create debugfs '%s' entry\n", name);
4290
4291 return ret;
4292}
4293
4294
4295static struct dentry *trace_options_init_dentry(void)
4296{
4297 struct dentry *d_tracer;
4298 static struct dentry *t_options;
4299
4300 if (t_options)
4301 return t_options;
4302
4303 d_tracer = tracing_init_dentry();
4304 if (!d_tracer)
4305 return NULL;
4306
4307 t_options = debugfs_create_dir("options", d_tracer);
4308 if (!t_options) {
4309 pr_warning("Could not create debugfs directory 'options'\n");
4310 return NULL;
4311 }
4312
4313 return t_options;
4314}
4315
4316static void
4317create_trace_option_file(struct trace_option_dentry *topt,
4318 struct tracer_flags *flags,
4319 struct tracer_opt *opt)
4320{
4321 struct dentry *t_options;
4322
4323 t_options = trace_options_init_dentry();
4324 if (!t_options)
4325 return;
4326
4327 topt->flags = flags;
4328 topt->opt = opt;
4329
4330 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
4331 &trace_options_fops);
4332
4333}
4334
4335static struct trace_option_dentry *
4336create_trace_option_files(struct tracer *tracer)
4337{
4338 struct trace_option_dentry *topts;
4339 struct tracer_flags *flags;
4340 struct tracer_opt *opts;
4341 int cnt;
4342
4343 if (!tracer)
4344 return NULL;
4345
4346 flags = tracer->flags;
4347
4348 if (!flags || !flags->opts)
4349 return NULL;
4350
4351 opts = flags->opts;
4352
4353 for (cnt = 0; opts[cnt].name; cnt++)
4354 ;
4355
4356 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4357 if (!topts)
4358 return NULL;
4359
4360 for (cnt = 0; opts[cnt].name; cnt++)
4361 create_trace_option_file(&topts[cnt], flags,
4362 &opts[cnt]);
4363
4364 return topts;
4365}
4366
4367static void
4368destroy_trace_option_files(struct trace_option_dentry *topts)
4369{
4370 int cnt;
4371
4372 if (!topts)
4373 return;
4374
4375 for (cnt = 0; topts[cnt].opt; cnt++) {
4376 if (topts[cnt].entry)
4377 debugfs_remove(topts[cnt].entry);
4378 }
4379
4380 kfree(topts);
4381}
4382
4383static struct dentry *
4384create_trace_option_core_file(const char *option, long index)
4385{
4386 struct dentry *t_options;
4387
4388 t_options = trace_options_init_dentry();
4389 if (!t_options)
4390 return NULL;
4391
4392 return trace_create_file(option, 0644, t_options, (void *)index,
4393 &trace_options_core_fops);
4394}
4395
4396static __init void create_trace_options_dir(void)
4397{
4398 struct dentry *t_options;
4399 int i;
4400
4401 t_options = trace_options_init_dentry();
4402 if (!t_options)
4403 return;
4404
4405 for (i = 0; trace_options[i]; i++)
4406 create_trace_option_core_file(trace_options[i], i);
4407}
4408
4409static __init int tracer_init_debugfs(void)
4410{
4411 struct dentry *d_tracer;
4412 int cpu;
4413
4414 trace_access_lock_init();
4415
4416 d_tracer = tracing_init_dentry();
4417
4418 trace_create_file("tracing_enabled", 0644, d_tracer,
4419 &global_trace, &tracing_ctrl_fops);
4420
4421 trace_create_file("trace_options", 0644, d_tracer,
4422 NULL, &tracing_iter_fops);
4423
4424 trace_create_file("tracing_cpumask", 0644, d_tracer,
4425 NULL, &tracing_cpumask_fops);
4426
4427 trace_create_file("trace", 0644, d_tracer,
4428 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4429
4430 trace_create_file("available_tracers", 0444, d_tracer,
4431 &global_trace, &show_traces_fops);
4432
4433 trace_create_file("current_tracer", 0644, d_tracer,
4434 &global_trace, &set_tracer_fops);
4435
4436#ifdef CONFIG_TRACER_MAX_TRACE
4437 trace_create_file("tracing_max_latency", 0644, d_tracer,
4438 &tracing_max_latency, &tracing_max_lat_fops);
4439#endif
4440
4441 trace_create_file("tracing_thresh", 0644, d_tracer,
4442 &tracing_thresh, &tracing_max_lat_fops);
4443
4444 trace_create_file("README", 0444, d_tracer,
4445 NULL, &tracing_readme_fops);
4446
4447 trace_create_file("trace_pipe", 0444, d_tracer,
4448 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4449
4450 trace_create_file("buffer_size_kb", 0644, d_tracer,
4451 &global_trace, &tracing_entries_fops);
4452
4453 trace_create_file("free_buffer", 0644, d_tracer,
4454 &global_trace, &tracing_free_buffer_fops);
4455
4456 trace_create_file("trace_marker", 0220, d_tracer,
4457 NULL, &tracing_mark_fops);
4458
4459 trace_create_file("saved_cmdlines", 0444, d_tracer,
4460 NULL, &tracing_saved_cmdlines_fops);
4461
4462 trace_create_file("trace_clock", 0644, d_tracer, NULL,
4463 &trace_clock_fops);
4464
4465#ifdef CONFIG_DYNAMIC_FTRACE
4466 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4467 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4468#endif
4469
4470 create_trace_options_dir();
4471
4472 for_each_tracing_cpu(cpu)
4473 tracing_init_debugfs_percpu(cpu);
4474
4475 return 0;
4476}
4477
4478static int trace_panic_handler(struct notifier_block *this,
4479 unsigned long event, void *unused)
4480{
4481 if (ftrace_dump_on_oops)
4482 ftrace_dump(ftrace_dump_on_oops);
4483 return NOTIFY_OK;
4484}
4485
4486static struct notifier_block trace_panic_notifier = {
4487 .notifier_call = trace_panic_handler,
4488 .next = NULL,
4489 .priority = 150 /* priority: INT_MAX >= x >= 0 */
4490};
4491
4492static int trace_die_handler(struct notifier_block *self,
4493 unsigned long val,
4494 void *data)
4495{
4496 switch (val) {
4497 case DIE_OOPS:
4498 if (ftrace_dump_on_oops)
4499 ftrace_dump(ftrace_dump_on_oops);
4500 break;
4501 default:
4502 break;
4503 }
4504 return NOTIFY_OK;
4505}
4506
4507static struct notifier_block trace_die_notifier = {
4508 .notifier_call = trace_die_handler,
4509 .priority = 200
4510};
4511
4512/*
4513 * printk is set to max of 1024, we really don't need it that big.
4514 * Nothing should be printing 1000 characters anyway.
4515 */
4516#define TRACE_MAX_PRINT 1000
4517
4518/*
4519 * Define here KERN_TRACE so that we have one place to modify
4520 * it if we decide to change what log level the ftrace dump
4521 * should be at.
4522 */
4523#define KERN_TRACE KERN_EMERG
4524
4525void
4526trace_printk_seq(struct trace_seq *s)
4527{
4528 /* Probably should print a warning here. */
4529 if (s->len >= 1000)
4530 s->len = 1000;
4531
4532 /* should be zero ended, but we are paranoid. */
4533 s->buffer[s->len] = 0;
4534
4535 printk(KERN_TRACE "%s", s->buffer);
4536
4537 trace_seq_init(s);
4538}
4539
4540void trace_init_global_iter(struct trace_iterator *iter)
4541{
4542 iter->tr = &global_trace;
4543 iter->trace = current_trace;
4544 iter->cpu_file = TRACE_PIPE_ALL_CPU;
4545}
4546
4547static void
4548__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4549{
4550 static arch_spinlock_t ftrace_dump_lock =
4551 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4552 /* use static because iter can be a bit big for the stack */
4553 static struct trace_iterator iter;
4554 unsigned int old_userobj;
4555 static int dump_ran;
4556 unsigned long flags;
4557 int cnt = 0, cpu;
4558
4559 /* only one dump */
4560 local_irq_save(flags);
4561 arch_spin_lock(&ftrace_dump_lock);
4562 if (dump_ran)
4563 goto out;
4564
4565 dump_ran = 1;
4566
4567 tracing_off();
4568
4569 if (disable_tracing)
4570 ftrace_kill();
4571
4572 trace_init_global_iter(&iter);
4573
4574 for_each_tracing_cpu(cpu) {
4575 atomic_inc(&iter.tr->data[cpu]->disabled);
4576 }
4577
4578 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4579
4580 /* don't look at user memory in panic mode */
4581 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4582
4583 /* Simulate the iterator */
4584 iter.tr = &global_trace;
4585 iter.trace = current_trace;
4586
4587 switch (oops_dump_mode) {
4588 case DUMP_ALL:
4589 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4590 break;
4591 case DUMP_ORIG:
4592 iter.cpu_file = raw_smp_processor_id();
4593 break;
4594 case DUMP_NONE:
4595 goto out_enable;
4596 default:
4597 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4598 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4599 }
4600
4601 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4602
4603 /*
4604 * We need to stop all tracing on all CPUS to read the
4605 * the next buffer. This is a bit expensive, but is
4606 * not done often. We fill all what we can read,
4607 * and then release the locks again.
4608 */
4609
4610 while (!trace_empty(&iter)) {
4611
4612 if (!cnt)
4613 printk(KERN_TRACE "---------------------------------\n");
4614
4615 cnt++;
4616
4617 /* reset all but tr, trace, and overruns */
4618 memset(&iter.seq, 0,
4619 sizeof(struct trace_iterator) -
4620 offsetof(struct trace_iterator, seq));
4621 iter.iter_flags |= TRACE_FILE_LAT_FMT;
4622 iter.pos = -1;
4623
4624 if (trace_find_next_entry_inc(&iter) != NULL) {
4625 int ret;
4626
4627 ret = print_trace_line(&iter);
4628 if (ret != TRACE_TYPE_NO_CONSUME)
4629 trace_consume(&iter);
4630 }
4631
4632 trace_printk_seq(&iter.seq);
4633 }
4634
4635 if (!cnt)
4636 printk(KERN_TRACE " (ftrace buffer empty)\n");
4637 else
4638 printk(KERN_TRACE "---------------------------------\n");
4639
4640 out_enable:
4641 /* Re-enable tracing if requested */
4642 if (!disable_tracing) {
4643 trace_flags |= old_userobj;
4644
4645 for_each_tracing_cpu(cpu) {
4646 atomic_dec(&iter.tr->data[cpu]->disabled);
4647 }
4648 tracing_on();
4649 }
4650
4651 out:
4652 arch_spin_unlock(&ftrace_dump_lock);
4653 local_irq_restore(flags);
4654}
4655
4656/* By default: disable tracing after the dump */
4657void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
4658{
4659 __ftrace_dump(true, oops_dump_mode);
4660}
4661
4662__init static int tracer_alloc_buffers(void)
4663{
4664 int ring_buf_size;
4665 enum ring_buffer_flags rb_flags;
4666 int i;
4667 int ret = -ENOMEM;
4668
4669
4670 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
4671 goto out;
4672
4673 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
4674 goto out_free_buffer_mask;
4675
4676 /* To save memory, keep the ring buffer size to its minimum */
4677 if (ring_buffer_expanded)
4678 ring_buf_size = trace_buf_size;
4679 else
4680 ring_buf_size = 1;
4681
4682 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
4683
4684 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
4685 cpumask_copy(tracing_cpumask, cpu_all_mask);
4686
4687 /* TODO: make the number of buffers hot pluggable with CPUS */
4688 global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
4689 if (!global_trace.buffer) {
4690 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
4691 WARN_ON(1);
4692 goto out_free_cpumask;
4693 }
4694 global_trace.entries = ring_buffer_size(global_trace.buffer);
4695
4696
4697#ifdef CONFIG_TRACER_MAX_TRACE
4698 max_tr.buffer = ring_buffer_alloc(1, rb_flags);
4699 if (!max_tr.buffer) {
4700 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
4701 WARN_ON(1);
4702 ring_buffer_free(global_trace.buffer);
4703 goto out_free_cpumask;
4704 }
4705 max_tr.entries = 1;
4706#endif
4707
4708 /* Allocate the first page for all buffers */
4709 for_each_tracing_cpu(i) {
4710 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4711 max_tr.data[i] = &per_cpu(max_tr_data, i);
4712 }
4713
4714 trace_init_cmdlines();
4715
4716 register_tracer(&nop_trace);
4717 current_trace = &nop_trace;
4718 /* All seems OK, enable tracing */
4719 tracing_disabled = 0;
4720
4721 atomic_notifier_chain_register(&panic_notifier_list,
4722 &trace_panic_notifier);
4723
4724 register_die_notifier(&trace_die_notifier);
4725
4726 return 0;
4727
4728out_free_cpumask:
4729 free_cpumask_var(tracing_cpumask);
4730out_free_buffer_mask:
4731 free_cpumask_var(tracing_buffer_mask);
4732out:
4733 return ret;
4734}
4735
4736__init static int clear_boot_tracer(void)
4737{
4738 /*
4739 * The default tracer at boot buffer is an init section.
4740 * This function is called in lateinit. If we did not
4741 * find the boot tracer, then clear it out, to prevent
4742 * later registration from accessing the buffer that is
4743 * about to be freed.
4744 */
4745 if (!default_bootup_tracer)
4746 return 0;
4747
4748 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
4749 default_bootup_tracer);
4750 default_bootup_tracer = NULL;
4751
4752 return 0;
4753}
4754
4755early_initcall(tracer_alloc_buffers);
4756fs_initcall(tracer_init_debugfs);
4757late_initcall(clear_boot_tracer);