Loading...
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 Nadia Yvette Chambers
13 */
14#include <linux/ring_buffer.h>
15#include <generated/utsrelease.h>
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
20#include <linux/notifier.h>
21#include <linux/irqflags.h>
22#include <linux/debugfs.h>
23#include <linux/tracefs.h>
24#include <linux/pagemap.h>
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
28#include <linux/kprobes.h>
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
32#include <linux/splice.h>
33#include <linux/kdebug.h>
34#include <linux/string.h>
35#include <linux/mount.h>
36#include <linux/rwsem.h>
37#include <linux/slab.h>
38#include <linux/ctype.h>
39#include <linux/init.h>
40#include <linux/poll.h>
41#include <linux/nmi.h>
42#include <linux/fs.h>
43#include <linux/sched/rt.h>
44
45#include "trace.h"
46#include "trace_output.h"
47
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
52bool ring_buffer_expanded;
53
54/*
55 * We need to change this state when a selftest is running.
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
58 * insertions into the ring-buffer such as trace_printk could occurred
59 * at the same time, giving false positive or negative results.
60 */
61static bool __read_mostly tracing_selftest_running;
62
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
66bool __read_mostly tracing_selftest_disabled;
67
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static int
78dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
79{
80 return 0;
81}
82
83/*
84 * To prevent the comm cache from being overwritten when no
85 * tracing is active, only save the comm when a trace event
86 * occurred.
87 */
88static DEFINE_PER_CPU(bool, trace_cmdline_save);
89
90/*
91 * Kill all tracing for good (never come back).
92 * It is initialized to 1 but will turn to zero if the initialization
93 * of the tracer is successful. But that is the only place that sets
94 * this back to zero.
95 */
96static int tracing_disabled = 1;
97
98cpumask_var_t __read_mostly tracing_buffer_mask;
99
100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
114 */
115
116enum ftrace_dump_mode ftrace_dump_on_oops;
117
118/* When set, tracing will stop when a WARN*() is hit */
119int __disable_trace_on_warning;
120
121#ifdef CONFIG_TRACE_ENUM_MAP_FILE
122/* Map of enums to their values, for "enum_map" file */
123struct trace_enum_map_head {
124 struct module *mod;
125 unsigned long length;
126};
127
128union trace_enum_map_item;
129
130struct trace_enum_map_tail {
131 /*
132 * "end" is first and points to NULL as it must be different
133 * than "mod" or "enum_string"
134 */
135 union trace_enum_map_item *next;
136 const char *end; /* points to NULL */
137};
138
139static DEFINE_MUTEX(trace_enum_mutex);
140
141/*
142 * The trace_enum_maps are saved in an array with two extra elements,
143 * one at the beginning, and one at the end. The beginning item contains
144 * the count of the saved maps (head.length), and the module they
145 * belong to if not built in (head.mod). The ending item contains a
146 * pointer to the next array of saved enum_map items.
147 */
148union trace_enum_map_item {
149 struct trace_enum_map map;
150 struct trace_enum_map_head head;
151 struct trace_enum_map_tail tail;
152};
153
154static union trace_enum_map_item *trace_enum_maps;
155#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
156
157static int tracing_set_tracer(struct trace_array *tr, const char *buf);
158
159#define MAX_TRACER_SIZE 100
160static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
161static char *default_bootup_tracer;
162
163static bool allocate_snapshot;
164
165static int __init set_cmdline_ftrace(char *str)
166{
167 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
168 default_bootup_tracer = bootup_tracer_buf;
169 /* We are using ftrace early, expand it */
170 ring_buffer_expanded = true;
171 return 1;
172}
173__setup("ftrace=", set_cmdline_ftrace);
174
175static int __init set_ftrace_dump_on_oops(char *str)
176{
177 if (*str++ != '=' || !*str) {
178 ftrace_dump_on_oops = DUMP_ALL;
179 return 1;
180 }
181
182 if (!strcmp("orig_cpu", str)) {
183 ftrace_dump_on_oops = DUMP_ORIG;
184 return 1;
185 }
186
187 return 0;
188}
189__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
190
191static int __init stop_trace_on_warning(char *str)
192{
193 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
194 __disable_trace_on_warning = 1;
195 return 1;
196}
197__setup("traceoff_on_warning", stop_trace_on_warning);
198
199static int __init boot_alloc_snapshot(char *str)
200{
201 allocate_snapshot = true;
202 /* We also need the main ring buffer expanded */
203 ring_buffer_expanded = true;
204 return 1;
205}
206__setup("alloc_snapshot", boot_alloc_snapshot);
207
208
209static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
210
211static int __init set_trace_boot_options(char *str)
212{
213 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
214 return 0;
215}
216__setup("trace_options=", set_trace_boot_options);
217
218static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
219static char *trace_boot_clock __initdata;
220
221static int __init set_trace_boot_clock(char *str)
222{
223 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
224 trace_boot_clock = trace_boot_clock_buf;
225 return 0;
226}
227__setup("trace_clock=", set_trace_boot_clock);
228
229static int __init set_tracepoint_printk(char *str)
230{
231 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
232 tracepoint_printk = 1;
233 return 1;
234}
235__setup("tp_printk", set_tracepoint_printk);
236
237unsigned long long ns2usecs(cycle_t nsec)
238{
239 nsec += 500;
240 do_div(nsec, 1000);
241 return nsec;
242}
243
244/* trace_flags holds trace_options default values */
245#define TRACE_DEFAULT_FLAGS \
246 (FUNCTION_DEFAULT_FLAGS | \
247 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
248 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
249 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
250 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
251
252/* trace_options that are only supported by global_trace */
253#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
254 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
255
256
257/*
258 * The global_trace is the descriptor that holds the tracing
259 * buffers for the live tracing. For each CPU, it contains
260 * a link list of pages that will store trace entries. The
261 * page descriptor of the pages in the memory is used to hold
262 * the link list by linking the lru item in the page descriptor
263 * to each of the pages in the buffer per CPU.
264 *
265 * For each active CPU there is a data field that holds the
266 * pages for the buffer for that CPU. Each CPU has the same number
267 * of pages allocated for its buffer.
268 */
269static struct trace_array global_trace = {
270 .trace_flags = TRACE_DEFAULT_FLAGS,
271};
272
273LIST_HEAD(ftrace_trace_arrays);
274
275int trace_array_get(struct trace_array *this_tr)
276{
277 struct trace_array *tr;
278 int ret = -ENODEV;
279
280 mutex_lock(&trace_types_lock);
281 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
282 if (tr == this_tr) {
283 tr->ref++;
284 ret = 0;
285 break;
286 }
287 }
288 mutex_unlock(&trace_types_lock);
289
290 return ret;
291}
292
293static void __trace_array_put(struct trace_array *this_tr)
294{
295 WARN_ON(!this_tr->ref);
296 this_tr->ref--;
297}
298
299void trace_array_put(struct trace_array *this_tr)
300{
301 mutex_lock(&trace_types_lock);
302 __trace_array_put(this_tr);
303 mutex_unlock(&trace_types_lock);
304}
305
306int filter_check_discard(struct trace_event_file *file, void *rec,
307 struct ring_buffer *buffer,
308 struct ring_buffer_event *event)
309{
310 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
311 !filter_match_preds(file->filter, rec)) {
312 ring_buffer_discard_commit(buffer, event);
313 return 1;
314 }
315
316 return 0;
317}
318EXPORT_SYMBOL_GPL(filter_check_discard);
319
320int call_filter_check_discard(struct trace_event_call *call, void *rec,
321 struct ring_buffer *buffer,
322 struct ring_buffer_event *event)
323{
324 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
325 !filter_match_preds(call->filter, rec)) {
326 ring_buffer_discard_commit(buffer, event);
327 return 1;
328 }
329
330 return 0;
331}
332EXPORT_SYMBOL_GPL(call_filter_check_discard);
333
334static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
335{
336 u64 ts;
337
338 /* Early boot up does not have a buffer yet */
339 if (!buf->buffer)
340 return trace_clock_local();
341
342 ts = ring_buffer_time_stamp(buf->buffer, cpu);
343 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
344
345 return ts;
346}
347
348cycle_t ftrace_now(int cpu)
349{
350 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
351}
352
353/**
354 * tracing_is_enabled - Show if global_trace has been disabled
355 *
356 * Shows if the global trace has been enabled or not. It uses the
357 * mirror flag "buffer_disabled" to be used in fast paths such as for
358 * the irqsoff tracer. But it may be inaccurate due to races. If you
359 * need to know the accurate state, use tracing_is_on() which is a little
360 * slower, but accurate.
361 */
362int tracing_is_enabled(void)
363{
364 /*
365 * For quick access (irqsoff uses this in fast path), just
366 * return the mirror variable of the state of the ring buffer.
367 * It's a little racy, but we don't really care.
368 */
369 smp_rmb();
370 return !global_trace.buffer_disabled;
371}
372
373/*
374 * trace_buf_size is the size in bytes that is allocated
375 * for a buffer. Note, the number of bytes is always rounded
376 * to page size.
377 *
378 * This number is purposely set to a low number of 16384.
379 * If the dump on oops happens, it will be much appreciated
380 * to not have to wait for all that output. Anyway this can be
381 * boot time and run time configurable.
382 */
383#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
384
385static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
386
387/* trace_types holds a link list of available tracers. */
388static struct tracer *trace_types __read_mostly;
389
390/*
391 * trace_types_lock is used to protect the trace_types list.
392 */
393DEFINE_MUTEX(trace_types_lock);
394
395/*
396 * serialize the access of the ring buffer
397 *
398 * ring buffer serializes readers, but it is low level protection.
399 * The validity of the events (which returns by ring_buffer_peek() ..etc)
400 * are not protected by ring buffer.
401 *
402 * The content of events may become garbage if we allow other process consumes
403 * these events concurrently:
404 * A) the page of the consumed events may become a normal page
405 * (not reader page) in ring buffer, and this page will be rewrited
406 * by events producer.
407 * B) The page of the consumed events may become a page for splice_read,
408 * and this page will be returned to system.
409 *
410 * These primitives allow multi process access to different cpu ring buffer
411 * concurrently.
412 *
413 * These primitives don't distinguish read-only and read-consume access.
414 * Multi read-only access are also serialized.
415 */
416
417#ifdef CONFIG_SMP
418static DECLARE_RWSEM(all_cpu_access_lock);
419static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
420
421static inline void trace_access_lock(int cpu)
422{
423 if (cpu == RING_BUFFER_ALL_CPUS) {
424 /* gain it for accessing the whole ring buffer. */
425 down_write(&all_cpu_access_lock);
426 } else {
427 /* gain it for accessing a cpu ring buffer. */
428
429 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
430 down_read(&all_cpu_access_lock);
431
432 /* Secondly block other access to this @cpu ring buffer. */
433 mutex_lock(&per_cpu(cpu_access_lock, cpu));
434 }
435}
436
437static inline void trace_access_unlock(int cpu)
438{
439 if (cpu == RING_BUFFER_ALL_CPUS) {
440 up_write(&all_cpu_access_lock);
441 } else {
442 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
443 up_read(&all_cpu_access_lock);
444 }
445}
446
447static inline void trace_access_lock_init(void)
448{
449 int cpu;
450
451 for_each_possible_cpu(cpu)
452 mutex_init(&per_cpu(cpu_access_lock, cpu));
453}
454
455#else
456
457static DEFINE_MUTEX(access_lock);
458
459static inline void trace_access_lock(int cpu)
460{
461 (void)cpu;
462 mutex_lock(&access_lock);
463}
464
465static inline void trace_access_unlock(int cpu)
466{
467 (void)cpu;
468 mutex_unlock(&access_lock);
469}
470
471static inline void trace_access_lock_init(void)
472{
473}
474
475#endif
476
477#ifdef CONFIG_STACKTRACE
478static void __ftrace_trace_stack(struct ring_buffer *buffer,
479 unsigned long flags,
480 int skip, int pc, struct pt_regs *regs);
481static inline void ftrace_trace_stack(struct trace_array *tr,
482 struct ring_buffer *buffer,
483 unsigned long flags,
484 int skip, int pc, struct pt_regs *regs);
485
486#else
487static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs)
490{
491}
492static inline void ftrace_trace_stack(struct trace_array *tr,
493 struct ring_buffer *buffer,
494 unsigned long flags,
495 int skip, int pc, struct pt_regs *regs)
496{
497}
498
499#endif
500
501static void tracer_tracing_on(struct trace_array *tr)
502{
503 if (tr->trace_buffer.buffer)
504 ring_buffer_record_on(tr->trace_buffer.buffer);
505 /*
506 * This flag is looked at when buffers haven't been allocated
507 * yet, or by some tracers (like irqsoff), that just want to
508 * know if the ring buffer has been disabled, but it can handle
509 * races of where it gets disabled but we still do a record.
510 * As the check is in the fast path of the tracers, it is more
511 * important to be fast than accurate.
512 */
513 tr->buffer_disabled = 0;
514 /* Make the flag seen by readers */
515 smp_wmb();
516}
517
518/**
519 * tracing_on - enable tracing buffers
520 *
521 * This function enables tracing buffers that may have been
522 * disabled with tracing_off.
523 */
524void tracing_on(void)
525{
526 tracer_tracing_on(&global_trace);
527}
528EXPORT_SYMBOL_GPL(tracing_on);
529
530/**
531 * __trace_puts - write a constant string into the trace buffer.
532 * @ip: The address of the caller
533 * @str: The constant string to write
534 * @size: The size of the string.
535 */
536int __trace_puts(unsigned long ip, const char *str, int size)
537{
538 struct ring_buffer_event *event;
539 struct ring_buffer *buffer;
540 struct print_entry *entry;
541 unsigned long irq_flags;
542 int alloc;
543 int pc;
544
545 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
546 return 0;
547
548 pc = preempt_count();
549
550 if (unlikely(tracing_selftest_running || tracing_disabled))
551 return 0;
552
553 alloc = sizeof(*entry) + size + 2; /* possible \n added */
554
555 local_save_flags(irq_flags);
556 buffer = global_trace.trace_buffer.buffer;
557 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
558 irq_flags, pc);
559 if (!event)
560 return 0;
561
562 entry = ring_buffer_event_data(event);
563 entry->ip = ip;
564
565 memcpy(&entry->buf, str, size);
566
567 /* Add a newline if necessary */
568 if (entry->buf[size - 1] != '\n') {
569 entry->buf[size] = '\n';
570 entry->buf[size + 1] = '\0';
571 } else
572 entry->buf[size] = '\0';
573
574 __buffer_unlock_commit(buffer, event);
575 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
576
577 return size;
578}
579EXPORT_SYMBOL_GPL(__trace_puts);
580
581/**
582 * __trace_bputs - write the pointer to a constant string into trace buffer
583 * @ip: The address of the caller
584 * @str: The constant string to write to the buffer to
585 */
586int __trace_bputs(unsigned long ip, const char *str)
587{
588 struct ring_buffer_event *event;
589 struct ring_buffer *buffer;
590 struct bputs_entry *entry;
591 unsigned long irq_flags;
592 int size = sizeof(struct bputs_entry);
593 int pc;
594
595 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
596 return 0;
597
598 pc = preempt_count();
599
600 if (unlikely(tracing_selftest_running || tracing_disabled))
601 return 0;
602
603 local_save_flags(irq_flags);
604 buffer = global_trace.trace_buffer.buffer;
605 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
606 irq_flags, pc);
607 if (!event)
608 return 0;
609
610 entry = ring_buffer_event_data(event);
611 entry->ip = ip;
612 entry->str = str;
613
614 __buffer_unlock_commit(buffer, event);
615 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
616
617 return 1;
618}
619EXPORT_SYMBOL_GPL(__trace_bputs);
620
621#ifdef CONFIG_TRACER_SNAPSHOT
622/**
623 * trace_snapshot - take a snapshot of the current buffer.
624 *
625 * This causes a swap between the snapshot buffer and the current live
626 * tracing buffer. You can use this to take snapshots of the live
627 * trace when some condition is triggered, but continue to trace.
628 *
629 * Note, make sure to allocate the snapshot with either
630 * a tracing_snapshot_alloc(), or by doing it manually
631 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
632 *
633 * If the snapshot buffer is not allocated, it will stop tracing.
634 * Basically making a permanent snapshot.
635 */
636void tracing_snapshot(void)
637{
638 struct trace_array *tr = &global_trace;
639 struct tracer *tracer = tr->current_trace;
640 unsigned long flags;
641
642 if (in_nmi()) {
643 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
644 internal_trace_puts("*** snapshot is being ignored ***\n");
645 return;
646 }
647
648 if (!tr->allocated_snapshot) {
649 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
650 internal_trace_puts("*** stopping trace here! ***\n");
651 tracing_off();
652 return;
653 }
654
655 /* Note, snapshot can not be used when the tracer uses it */
656 if (tracer->use_max_tr) {
657 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
658 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
659 return;
660 }
661
662 local_irq_save(flags);
663 update_max_tr(tr, current, smp_processor_id());
664 local_irq_restore(flags);
665}
666EXPORT_SYMBOL_GPL(tracing_snapshot);
667
668static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
669 struct trace_buffer *size_buf, int cpu_id);
670static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
671
672static int alloc_snapshot(struct trace_array *tr)
673{
674 int ret;
675
676 if (!tr->allocated_snapshot) {
677
678 /* allocate spare buffer */
679 ret = resize_buffer_duplicate_size(&tr->max_buffer,
680 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
681 if (ret < 0)
682 return ret;
683
684 tr->allocated_snapshot = true;
685 }
686
687 return 0;
688}
689
690static void free_snapshot(struct trace_array *tr)
691{
692 /*
693 * We don't free the ring buffer. instead, resize it because
694 * The max_tr ring buffer has some state (e.g. ring->clock) and
695 * we want preserve it.
696 */
697 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
698 set_buffer_entries(&tr->max_buffer, 1);
699 tracing_reset_online_cpus(&tr->max_buffer);
700 tr->allocated_snapshot = false;
701}
702
703/**
704 * tracing_alloc_snapshot - allocate snapshot buffer.
705 *
706 * This only allocates the snapshot buffer if it isn't already
707 * allocated - it doesn't also take a snapshot.
708 *
709 * This is meant to be used in cases where the snapshot buffer needs
710 * to be set up for events that can't sleep but need to be able to
711 * trigger a snapshot.
712 */
713int tracing_alloc_snapshot(void)
714{
715 struct trace_array *tr = &global_trace;
716 int ret;
717
718 ret = alloc_snapshot(tr);
719 WARN_ON(ret < 0);
720
721 return ret;
722}
723EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
724
725/**
726 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
727 *
728 * This is similar to trace_snapshot(), but it will allocate the
729 * snapshot buffer if it isn't already allocated. Use this only
730 * where it is safe to sleep, as the allocation may sleep.
731 *
732 * This causes a swap between the snapshot buffer and the current live
733 * tracing buffer. You can use this to take snapshots of the live
734 * trace when some condition is triggered, but continue to trace.
735 */
736void tracing_snapshot_alloc(void)
737{
738 int ret;
739
740 ret = tracing_alloc_snapshot();
741 if (ret < 0)
742 return;
743
744 tracing_snapshot();
745}
746EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
747#else
748void tracing_snapshot(void)
749{
750 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
751}
752EXPORT_SYMBOL_GPL(tracing_snapshot);
753int tracing_alloc_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
756 return -ENODEV;
757}
758EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
759void tracing_snapshot_alloc(void)
760{
761 /* Give warning */
762 tracing_snapshot();
763}
764EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
765#endif /* CONFIG_TRACER_SNAPSHOT */
766
767static void tracer_tracing_off(struct trace_array *tr)
768{
769 if (tr->trace_buffer.buffer)
770 ring_buffer_record_off(tr->trace_buffer.buffer);
771 /*
772 * This flag is looked at when buffers haven't been allocated
773 * yet, or by some tracers (like irqsoff), that just want to
774 * know if the ring buffer has been disabled, but it can handle
775 * races of where it gets disabled but we still do a record.
776 * As the check is in the fast path of the tracers, it is more
777 * important to be fast than accurate.
778 */
779 tr->buffer_disabled = 1;
780 /* Make the flag seen by readers */
781 smp_wmb();
782}
783
784/**
785 * tracing_off - turn off tracing buffers
786 *
787 * This function stops the tracing buffers from recording data.
788 * It does not disable any overhead the tracers themselves may
789 * be causing. This function simply causes all recording to
790 * the ring buffers to fail.
791 */
792void tracing_off(void)
793{
794 tracer_tracing_off(&global_trace);
795}
796EXPORT_SYMBOL_GPL(tracing_off);
797
798void disable_trace_on_warning(void)
799{
800 if (__disable_trace_on_warning)
801 tracing_off();
802}
803
804/**
805 * tracer_tracing_is_on - show real state of ring buffer enabled
806 * @tr : the trace array to know if ring buffer is enabled
807 *
808 * Shows real state of the ring buffer if it is enabled or not.
809 */
810static int tracer_tracing_is_on(struct trace_array *tr)
811{
812 if (tr->trace_buffer.buffer)
813 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
814 return !tr->buffer_disabled;
815}
816
817/**
818 * tracing_is_on - show state of ring buffers enabled
819 */
820int tracing_is_on(void)
821{
822 return tracer_tracing_is_on(&global_trace);
823}
824EXPORT_SYMBOL_GPL(tracing_is_on);
825
826static int __init set_buf_size(char *str)
827{
828 unsigned long buf_size;
829
830 if (!str)
831 return 0;
832 buf_size = memparse(str, &str);
833 /* nr_entries can not be zero */
834 if (buf_size == 0)
835 return 0;
836 trace_buf_size = buf_size;
837 return 1;
838}
839__setup("trace_buf_size=", set_buf_size);
840
841static int __init set_tracing_thresh(char *str)
842{
843 unsigned long threshold;
844 int ret;
845
846 if (!str)
847 return 0;
848 ret = kstrtoul(str, 0, &threshold);
849 if (ret < 0)
850 return 0;
851 tracing_thresh = threshold * 1000;
852 return 1;
853}
854__setup("tracing_thresh=", set_tracing_thresh);
855
856unsigned long nsecs_to_usecs(unsigned long nsecs)
857{
858 return nsecs / 1000;
859}
860
861/*
862 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
863 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
864 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
865 * of strings in the order that the enums were defined.
866 */
867#undef C
868#define C(a, b) b
869
870/* These must match the bit postions in trace_iterator_flags */
871static const char *trace_options[] = {
872 TRACE_FLAGS
873 NULL
874};
875
876static struct {
877 u64 (*func)(void);
878 const char *name;
879 int in_ns; /* is this clock in nanoseconds? */
880} trace_clocks[] = {
881 { trace_clock_local, "local", 1 },
882 { trace_clock_global, "global", 1 },
883 { trace_clock_counter, "counter", 0 },
884 { trace_clock_jiffies, "uptime", 0 },
885 { trace_clock, "perf", 1 },
886 { ktime_get_mono_fast_ns, "mono", 1 },
887 { ktime_get_raw_fast_ns, "mono_raw", 1 },
888 ARCH_TRACE_CLOCKS
889};
890
891/*
892 * trace_parser_get_init - gets the buffer for trace parser
893 */
894int trace_parser_get_init(struct trace_parser *parser, int size)
895{
896 memset(parser, 0, sizeof(*parser));
897
898 parser->buffer = kmalloc(size, GFP_KERNEL);
899 if (!parser->buffer)
900 return 1;
901
902 parser->size = size;
903 return 0;
904}
905
906/*
907 * trace_parser_put - frees the buffer for trace parser
908 */
909void trace_parser_put(struct trace_parser *parser)
910{
911 kfree(parser->buffer);
912}
913
914/*
915 * trace_get_user - reads the user input string separated by space
916 * (matched by isspace(ch))
917 *
918 * For each string found the 'struct trace_parser' is updated,
919 * and the function returns.
920 *
921 * Returns number of bytes read.
922 *
923 * See kernel/trace/trace.h for 'struct trace_parser' details.
924 */
925int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
926 size_t cnt, loff_t *ppos)
927{
928 char ch;
929 size_t read = 0;
930 ssize_t ret;
931
932 if (!*ppos)
933 trace_parser_clear(parser);
934
935 ret = get_user(ch, ubuf++);
936 if (ret)
937 goto out;
938
939 read++;
940 cnt--;
941
942 /*
943 * The parser is not finished with the last write,
944 * continue reading the user input without skipping spaces.
945 */
946 if (!parser->cont) {
947 /* skip white space */
948 while (cnt && isspace(ch)) {
949 ret = get_user(ch, ubuf++);
950 if (ret)
951 goto out;
952 read++;
953 cnt--;
954 }
955
956 /* only spaces were written */
957 if (isspace(ch)) {
958 *ppos += read;
959 ret = read;
960 goto out;
961 }
962
963 parser->idx = 0;
964 }
965
966 /* read the non-space input */
967 while (cnt && !isspace(ch)) {
968 if (parser->idx < parser->size - 1)
969 parser->buffer[parser->idx++] = ch;
970 else {
971 ret = -EINVAL;
972 goto out;
973 }
974 ret = get_user(ch, ubuf++);
975 if (ret)
976 goto out;
977 read++;
978 cnt--;
979 }
980
981 /* We either got finished input or we have to wait for another call. */
982 if (isspace(ch)) {
983 parser->buffer[parser->idx] = 0;
984 parser->cont = false;
985 } else if (parser->idx < parser->size - 1) {
986 parser->cont = true;
987 parser->buffer[parser->idx++] = ch;
988 } else {
989 ret = -EINVAL;
990 goto out;
991 }
992
993 *ppos += read;
994 ret = read;
995
996out:
997 return ret;
998}
999
1000/* TODO add a seq_buf_to_buffer() */
1001static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1002{
1003 int len;
1004
1005 if (trace_seq_used(s) <= s->seq.readpos)
1006 return -EBUSY;
1007
1008 len = trace_seq_used(s) - s->seq.readpos;
1009 if (cnt > len)
1010 cnt = len;
1011 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1012
1013 s->seq.readpos += cnt;
1014 return cnt;
1015}
1016
1017unsigned long __read_mostly tracing_thresh;
1018
1019#ifdef CONFIG_TRACER_MAX_TRACE
1020/*
1021 * Copy the new maximum trace into the separate maximum-trace
1022 * structure. (this way the maximum trace is permanently saved,
1023 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1024 */
1025static void
1026__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1027{
1028 struct trace_buffer *trace_buf = &tr->trace_buffer;
1029 struct trace_buffer *max_buf = &tr->max_buffer;
1030 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1031 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1032
1033 max_buf->cpu = cpu;
1034 max_buf->time_start = data->preempt_timestamp;
1035
1036 max_data->saved_latency = tr->max_latency;
1037 max_data->critical_start = data->critical_start;
1038 max_data->critical_end = data->critical_end;
1039
1040 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1041 max_data->pid = tsk->pid;
1042 /*
1043 * If tsk == current, then use current_uid(), as that does not use
1044 * RCU. The irq tracer can be called out of RCU scope.
1045 */
1046 if (tsk == current)
1047 max_data->uid = current_uid();
1048 else
1049 max_data->uid = task_uid(tsk);
1050
1051 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1052 max_data->policy = tsk->policy;
1053 max_data->rt_priority = tsk->rt_priority;
1054
1055 /* record this tasks comm */
1056 tracing_record_cmdline(tsk);
1057}
1058
1059/**
1060 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1061 * @tr: tracer
1062 * @tsk: the task with the latency
1063 * @cpu: The cpu that initiated the trace.
1064 *
1065 * Flip the buffers between the @tr and the max_tr and record information
1066 * about which task was the cause of this latency.
1067 */
1068void
1069update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1070{
1071 struct ring_buffer *buf;
1072
1073 if (tr->stop_count)
1074 return;
1075
1076 WARN_ON_ONCE(!irqs_disabled());
1077
1078 if (!tr->allocated_snapshot) {
1079 /* Only the nop tracer should hit this when disabling */
1080 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1081 return;
1082 }
1083
1084 arch_spin_lock(&tr->max_lock);
1085
1086 buf = tr->trace_buffer.buffer;
1087 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1088 tr->max_buffer.buffer = buf;
1089
1090 __update_max_tr(tr, tsk, cpu);
1091 arch_spin_unlock(&tr->max_lock);
1092}
1093
1094/**
1095 * update_max_tr_single - only copy one trace over, and reset the rest
1096 * @tr - tracer
1097 * @tsk - task with the latency
1098 * @cpu - the cpu of the buffer to copy.
1099 *
1100 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1101 */
1102void
1103update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1104{
1105 int ret;
1106
1107 if (tr->stop_count)
1108 return;
1109
1110 WARN_ON_ONCE(!irqs_disabled());
1111 if (!tr->allocated_snapshot) {
1112 /* Only the nop tracer should hit this when disabling */
1113 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1114 return;
1115 }
1116
1117 arch_spin_lock(&tr->max_lock);
1118
1119 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1120
1121 if (ret == -EBUSY) {
1122 /*
1123 * We failed to swap the buffer due to a commit taking
1124 * place on this CPU. We fail to record, but we reset
1125 * the max trace buffer (no one writes directly to it)
1126 * and flag that it failed.
1127 */
1128 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1129 "Failed to swap buffers due to commit in progress\n");
1130 }
1131
1132 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1133
1134 __update_max_tr(tr, tsk, cpu);
1135 arch_spin_unlock(&tr->max_lock);
1136}
1137#endif /* CONFIG_TRACER_MAX_TRACE */
1138
1139static int wait_on_pipe(struct trace_iterator *iter, bool full)
1140{
1141 /* Iterators are static, they should be filled or empty */
1142 if (trace_buffer_iter(iter, iter->cpu_file))
1143 return 0;
1144
1145 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1146 full);
1147}
1148
1149#ifdef CONFIG_FTRACE_STARTUP_TEST
1150static int run_tracer_selftest(struct tracer *type)
1151{
1152 struct trace_array *tr = &global_trace;
1153 struct tracer *saved_tracer = tr->current_trace;
1154 int ret;
1155
1156 if (!type->selftest || tracing_selftest_disabled)
1157 return 0;
1158
1159 /*
1160 * Run a selftest on this tracer.
1161 * Here we reset the trace buffer, and set the current
1162 * tracer to be this tracer. The tracer can then run some
1163 * internal tracing to verify that everything is in order.
1164 * If we fail, we do not register this tracer.
1165 */
1166 tracing_reset_online_cpus(&tr->trace_buffer);
1167
1168 tr->current_trace = type;
1169
1170#ifdef CONFIG_TRACER_MAX_TRACE
1171 if (type->use_max_tr) {
1172 /* If we expanded the buffers, make sure the max is expanded too */
1173 if (ring_buffer_expanded)
1174 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1175 RING_BUFFER_ALL_CPUS);
1176 tr->allocated_snapshot = true;
1177 }
1178#endif
1179
1180 /* the test is responsible for initializing and enabling */
1181 pr_info("Testing tracer %s: ", type->name);
1182 ret = type->selftest(type, tr);
1183 /* the test is responsible for resetting too */
1184 tr->current_trace = saved_tracer;
1185 if (ret) {
1186 printk(KERN_CONT "FAILED!\n");
1187 /* Add the warning after printing 'FAILED' */
1188 WARN_ON(1);
1189 return -1;
1190 }
1191 /* Only reset on passing, to avoid touching corrupted buffers */
1192 tracing_reset_online_cpus(&tr->trace_buffer);
1193
1194#ifdef CONFIG_TRACER_MAX_TRACE
1195 if (type->use_max_tr) {
1196 tr->allocated_snapshot = false;
1197
1198 /* Shrink the max buffer again */
1199 if (ring_buffer_expanded)
1200 ring_buffer_resize(tr->max_buffer.buffer, 1,
1201 RING_BUFFER_ALL_CPUS);
1202 }
1203#endif
1204
1205 printk(KERN_CONT "PASSED\n");
1206 return 0;
1207}
1208#else
1209static inline int run_tracer_selftest(struct tracer *type)
1210{
1211 return 0;
1212}
1213#endif /* CONFIG_FTRACE_STARTUP_TEST */
1214
1215static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1216
1217static void __init apply_trace_boot_options(void);
1218
1219/**
1220 * register_tracer - register a tracer with the ftrace system.
1221 * @type - the plugin for the tracer
1222 *
1223 * Register a new plugin tracer.
1224 */
1225int __init register_tracer(struct tracer *type)
1226{
1227 struct tracer *t;
1228 int ret = 0;
1229
1230 if (!type->name) {
1231 pr_info("Tracer must have a name\n");
1232 return -1;
1233 }
1234
1235 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1236 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1237 return -1;
1238 }
1239
1240 mutex_lock(&trace_types_lock);
1241
1242 tracing_selftest_running = true;
1243
1244 for (t = trace_types; t; t = t->next) {
1245 if (strcmp(type->name, t->name) == 0) {
1246 /* already found */
1247 pr_info("Tracer %s already registered\n",
1248 type->name);
1249 ret = -1;
1250 goto out;
1251 }
1252 }
1253
1254 if (!type->set_flag)
1255 type->set_flag = &dummy_set_flag;
1256 if (!type->flags) {
1257 /*allocate a dummy tracer_flags*/
1258 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1259 if (!type->flags) {
1260 ret = -ENOMEM;
1261 goto out;
1262 }
1263 type->flags->val = 0;
1264 type->flags->opts = dummy_tracer_opt;
1265 } else
1266 if (!type->flags->opts)
1267 type->flags->opts = dummy_tracer_opt;
1268
1269 /* store the tracer for __set_tracer_option */
1270 type->flags->trace = type;
1271
1272 ret = run_tracer_selftest(type);
1273 if (ret < 0)
1274 goto out;
1275
1276 type->next = trace_types;
1277 trace_types = type;
1278 add_tracer_options(&global_trace, type);
1279
1280 out:
1281 tracing_selftest_running = false;
1282 mutex_unlock(&trace_types_lock);
1283
1284 if (ret || !default_bootup_tracer)
1285 goto out_unlock;
1286
1287 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1288 goto out_unlock;
1289
1290 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1291 /* Do we want this tracer to start on bootup? */
1292 tracing_set_tracer(&global_trace, type->name);
1293 default_bootup_tracer = NULL;
1294
1295 apply_trace_boot_options();
1296
1297 /* disable other selftests, since this will break it. */
1298 tracing_selftest_disabled = true;
1299#ifdef CONFIG_FTRACE_STARTUP_TEST
1300 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1301 type->name);
1302#endif
1303
1304 out_unlock:
1305 return ret;
1306}
1307
1308void tracing_reset(struct trace_buffer *buf, int cpu)
1309{
1310 struct ring_buffer *buffer = buf->buffer;
1311
1312 if (!buffer)
1313 return;
1314
1315 ring_buffer_record_disable(buffer);
1316
1317 /* Make sure all commits have finished */
1318 synchronize_sched();
1319 ring_buffer_reset_cpu(buffer, cpu);
1320
1321 ring_buffer_record_enable(buffer);
1322}
1323
1324void tracing_reset_online_cpus(struct trace_buffer *buf)
1325{
1326 struct ring_buffer *buffer = buf->buffer;
1327 int cpu;
1328
1329 if (!buffer)
1330 return;
1331
1332 ring_buffer_record_disable(buffer);
1333
1334 /* Make sure all commits have finished */
1335 synchronize_sched();
1336
1337 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1338
1339 for_each_online_cpu(cpu)
1340 ring_buffer_reset_cpu(buffer, cpu);
1341
1342 ring_buffer_record_enable(buffer);
1343}
1344
1345/* Must have trace_types_lock held */
1346void tracing_reset_all_online_cpus(void)
1347{
1348 struct trace_array *tr;
1349
1350 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1351 tracing_reset_online_cpus(&tr->trace_buffer);
1352#ifdef CONFIG_TRACER_MAX_TRACE
1353 tracing_reset_online_cpus(&tr->max_buffer);
1354#endif
1355 }
1356}
1357
1358#define SAVED_CMDLINES_DEFAULT 128
1359#define NO_CMDLINE_MAP UINT_MAX
1360static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1361struct saved_cmdlines_buffer {
1362 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1363 unsigned *map_cmdline_to_pid;
1364 unsigned cmdline_num;
1365 int cmdline_idx;
1366 char *saved_cmdlines;
1367};
1368static struct saved_cmdlines_buffer *savedcmd;
1369
1370/* temporary disable recording */
1371static atomic_t trace_record_cmdline_disabled __read_mostly;
1372
1373static inline char *get_saved_cmdlines(int idx)
1374{
1375 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1376}
1377
1378static inline void set_cmdline(int idx, const char *cmdline)
1379{
1380 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1381}
1382
1383static int allocate_cmdlines_buffer(unsigned int val,
1384 struct saved_cmdlines_buffer *s)
1385{
1386 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1387 GFP_KERNEL);
1388 if (!s->map_cmdline_to_pid)
1389 return -ENOMEM;
1390
1391 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1392 if (!s->saved_cmdlines) {
1393 kfree(s->map_cmdline_to_pid);
1394 return -ENOMEM;
1395 }
1396
1397 s->cmdline_idx = 0;
1398 s->cmdline_num = val;
1399 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1400 sizeof(s->map_pid_to_cmdline));
1401 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1402 val * sizeof(*s->map_cmdline_to_pid));
1403
1404 return 0;
1405}
1406
1407static int trace_create_savedcmd(void)
1408{
1409 int ret;
1410
1411 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1412 if (!savedcmd)
1413 return -ENOMEM;
1414
1415 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1416 if (ret < 0) {
1417 kfree(savedcmd);
1418 savedcmd = NULL;
1419 return -ENOMEM;
1420 }
1421
1422 return 0;
1423}
1424
1425int is_tracing_stopped(void)
1426{
1427 return global_trace.stop_count;
1428}
1429
1430/**
1431 * tracing_start - quick start of the tracer
1432 *
1433 * If tracing is enabled but was stopped by tracing_stop,
1434 * this will start the tracer back up.
1435 */
1436void tracing_start(void)
1437{
1438 struct ring_buffer *buffer;
1439 unsigned long flags;
1440
1441 if (tracing_disabled)
1442 return;
1443
1444 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1445 if (--global_trace.stop_count) {
1446 if (global_trace.stop_count < 0) {
1447 /* Someone screwed up their debugging */
1448 WARN_ON_ONCE(1);
1449 global_trace.stop_count = 0;
1450 }
1451 goto out;
1452 }
1453
1454 /* Prevent the buffers from switching */
1455 arch_spin_lock(&global_trace.max_lock);
1456
1457 buffer = global_trace.trace_buffer.buffer;
1458 if (buffer)
1459 ring_buffer_record_enable(buffer);
1460
1461#ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
1463 if (buffer)
1464 ring_buffer_record_enable(buffer);
1465#endif
1466
1467 arch_spin_unlock(&global_trace.max_lock);
1468
1469 out:
1470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_start_tr(struct trace_array *tr)
1474{
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1477
1478 if (tracing_disabled)
1479 return;
1480
1481 /* If global, we need to also start the max tracer */
1482 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1483 return tracing_start();
1484
1485 raw_spin_lock_irqsave(&tr->start_lock, flags);
1486
1487 if (--tr->stop_count) {
1488 if (tr->stop_count < 0) {
1489 /* Someone screwed up their debugging */
1490 WARN_ON_ONCE(1);
1491 tr->stop_count = 0;
1492 }
1493 goto out;
1494 }
1495
1496 buffer = tr->trace_buffer.buffer;
1497 if (buffer)
1498 ring_buffer_record_enable(buffer);
1499
1500 out:
1501 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1502}
1503
1504/**
1505 * tracing_stop - quick stop of the tracer
1506 *
1507 * Light weight way to stop tracing. Use in conjunction with
1508 * tracing_start.
1509 */
1510void tracing_stop(void)
1511{
1512 struct ring_buffer *buffer;
1513 unsigned long flags;
1514
1515 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1516 if (global_trace.stop_count++)
1517 goto out;
1518
1519 /* Prevent the buffers from switching */
1520 arch_spin_lock(&global_trace.max_lock);
1521
1522 buffer = global_trace.trace_buffer.buffer;
1523 if (buffer)
1524 ring_buffer_record_disable(buffer);
1525
1526#ifdef CONFIG_TRACER_MAX_TRACE
1527 buffer = global_trace.max_buffer.buffer;
1528 if (buffer)
1529 ring_buffer_record_disable(buffer);
1530#endif
1531
1532 arch_spin_unlock(&global_trace.max_lock);
1533
1534 out:
1535 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1536}
1537
1538static void tracing_stop_tr(struct trace_array *tr)
1539{
1540 struct ring_buffer *buffer;
1541 unsigned long flags;
1542
1543 /* If global, we need to also stop the max tracer */
1544 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1545 return tracing_stop();
1546
1547 raw_spin_lock_irqsave(&tr->start_lock, flags);
1548 if (tr->stop_count++)
1549 goto out;
1550
1551 buffer = tr->trace_buffer.buffer;
1552 if (buffer)
1553 ring_buffer_record_disable(buffer);
1554
1555 out:
1556 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1557}
1558
1559void trace_stop_cmdline_recording(void);
1560
1561static int trace_save_cmdline(struct task_struct *tsk)
1562{
1563 unsigned pid, idx;
1564
1565 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1566 return 0;
1567
1568 /*
1569 * It's not the end of the world if we don't get
1570 * the lock, but we also don't want to spin
1571 * nor do we want to disable interrupts,
1572 * so if we miss here, then better luck next time.
1573 */
1574 if (!arch_spin_trylock(&trace_cmdline_lock))
1575 return 0;
1576
1577 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1578 if (idx == NO_CMDLINE_MAP) {
1579 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1580
1581 /*
1582 * Check whether the cmdline buffer at idx has a pid
1583 * mapped. We are going to overwrite that entry so we
1584 * need to clear the map_pid_to_cmdline. Otherwise we
1585 * would read the new comm for the old pid.
1586 */
1587 pid = savedcmd->map_cmdline_to_pid[idx];
1588 if (pid != NO_CMDLINE_MAP)
1589 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1590
1591 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1592 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1593
1594 savedcmd->cmdline_idx = idx;
1595 }
1596
1597 set_cmdline(idx, tsk->comm);
1598
1599 arch_spin_unlock(&trace_cmdline_lock);
1600
1601 return 1;
1602}
1603
1604static void __trace_find_cmdline(int pid, char comm[])
1605{
1606 unsigned map;
1607
1608 if (!pid) {
1609 strcpy(comm, "<idle>");
1610 return;
1611 }
1612
1613 if (WARN_ON_ONCE(pid < 0)) {
1614 strcpy(comm, "<XXX>");
1615 return;
1616 }
1617
1618 if (pid > PID_MAX_DEFAULT) {
1619 strcpy(comm, "<...>");
1620 return;
1621 }
1622
1623 map = savedcmd->map_pid_to_cmdline[pid];
1624 if (map != NO_CMDLINE_MAP)
1625 strcpy(comm, get_saved_cmdlines(map));
1626 else
1627 strcpy(comm, "<...>");
1628}
1629
1630void trace_find_cmdline(int pid, char comm[])
1631{
1632 preempt_disable();
1633 arch_spin_lock(&trace_cmdline_lock);
1634
1635 __trace_find_cmdline(pid, comm);
1636
1637 arch_spin_unlock(&trace_cmdline_lock);
1638 preempt_enable();
1639}
1640
1641void tracing_record_cmdline(struct task_struct *tsk)
1642{
1643 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1644 return;
1645
1646 if (!__this_cpu_read(trace_cmdline_save))
1647 return;
1648
1649 if (trace_save_cmdline(tsk))
1650 __this_cpu_write(trace_cmdline_save, false);
1651}
1652
1653void
1654tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1655 int pc)
1656{
1657 struct task_struct *tsk = current;
1658
1659 entry->preempt_count = pc & 0xff;
1660 entry->pid = (tsk) ? tsk->pid : 0;
1661 entry->flags =
1662#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1663 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1664#else
1665 TRACE_FLAG_IRQS_NOSUPPORT |
1666#endif
1667 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
1668 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1669 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1670 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1671 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
1672}
1673EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1674
1675struct ring_buffer_event *
1676trace_buffer_lock_reserve(struct ring_buffer *buffer,
1677 int type,
1678 unsigned long len,
1679 unsigned long flags, int pc)
1680{
1681 struct ring_buffer_event *event;
1682
1683 event = ring_buffer_lock_reserve(buffer, len);
1684 if (event != NULL) {
1685 struct trace_entry *ent = ring_buffer_event_data(event);
1686
1687 tracing_generic_entry_update(ent, flags, pc);
1688 ent->type = type;
1689 }
1690
1691 return event;
1692}
1693
1694void
1695__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1696{
1697 __this_cpu_write(trace_cmdline_save, true);
1698 ring_buffer_unlock_commit(buffer, event);
1699}
1700
1701void trace_buffer_unlock_commit(struct trace_array *tr,
1702 struct ring_buffer *buffer,
1703 struct ring_buffer_event *event,
1704 unsigned long flags, int pc)
1705{
1706 __buffer_unlock_commit(buffer, event);
1707
1708 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
1709 ftrace_trace_userstack(buffer, flags, pc);
1710}
1711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1712
1713static struct ring_buffer *temp_buffer;
1714
1715struct ring_buffer_event *
1716trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1717 struct trace_event_file *trace_file,
1718 int type, unsigned long len,
1719 unsigned long flags, int pc)
1720{
1721 struct ring_buffer_event *entry;
1722
1723 *current_rb = trace_file->tr->trace_buffer.buffer;
1724 entry = trace_buffer_lock_reserve(*current_rb,
1725 type, len, flags, pc);
1726 /*
1727 * If tracing is off, but we have triggers enabled
1728 * we still need to look at the event data. Use the temp_buffer
1729 * to store the trace event for the tigger to use. It's recusive
1730 * safe and will not be recorded anywhere.
1731 */
1732 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
1733 *current_rb = temp_buffer;
1734 entry = trace_buffer_lock_reserve(*current_rb,
1735 type, len, flags, pc);
1736 }
1737 return entry;
1738}
1739EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1740
1741struct ring_buffer_event *
1742trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1743 int type, unsigned long len,
1744 unsigned long flags, int pc)
1745{
1746 *current_rb = global_trace.trace_buffer.buffer;
1747 return trace_buffer_lock_reserve(*current_rb,
1748 type, len, flags, pc);
1749}
1750EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1751
1752void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1753 struct ring_buffer *buffer,
1754 struct ring_buffer_event *event,
1755 unsigned long flags, int pc,
1756 struct pt_regs *regs)
1757{
1758 __buffer_unlock_commit(buffer, event);
1759
1760 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
1761 ftrace_trace_userstack(buffer, flags, pc);
1762}
1763EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1764
1765void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1766 struct ring_buffer_event *event)
1767{
1768 ring_buffer_discard_commit(buffer, event);
1769}
1770EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1771
1772void
1773trace_function(struct trace_array *tr,
1774 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1775 int pc)
1776{
1777 struct trace_event_call *call = &event_function;
1778 struct ring_buffer *buffer = tr->trace_buffer.buffer;
1779 struct ring_buffer_event *event;
1780 struct ftrace_entry *entry;
1781
1782 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1783 flags, pc);
1784 if (!event)
1785 return;
1786 entry = ring_buffer_event_data(event);
1787 entry->ip = ip;
1788 entry->parent_ip = parent_ip;
1789
1790 if (!call_filter_check_discard(call, entry, buffer, event))
1791 __buffer_unlock_commit(buffer, event);
1792}
1793
1794#ifdef CONFIG_STACKTRACE
1795
1796#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797struct ftrace_stack {
1798 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1799};
1800
1801static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1802static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1803
1804static void __ftrace_trace_stack(struct ring_buffer *buffer,
1805 unsigned long flags,
1806 int skip, int pc, struct pt_regs *regs)
1807{
1808 struct trace_event_call *call = &event_kernel_stack;
1809 struct ring_buffer_event *event;
1810 struct stack_entry *entry;
1811 struct stack_trace trace;
1812 int use_stack;
1813 int size = FTRACE_STACK_ENTRIES;
1814
1815 trace.nr_entries = 0;
1816 trace.skip = skip;
1817
1818 /*
1819 * Since events can happen in NMIs there's no safe way to
1820 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1821 * or NMI comes in, it will just have to use the default
1822 * FTRACE_STACK_SIZE.
1823 */
1824 preempt_disable_notrace();
1825
1826 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
1827 /*
1828 * We don't need any atomic variables, just a barrier.
1829 * If an interrupt comes in, we don't care, because it would
1830 * have exited and put the counter back to what we want.
1831 * We just need a barrier to keep gcc from moving things
1832 * around.
1833 */
1834 barrier();
1835 if (use_stack == 1) {
1836 trace.entries = this_cpu_ptr(ftrace_stack.calls);
1837 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1838
1839 if (regs)
1840 save_stack_trace_regs(regs, &trace);
1841 else
1842 save_stack_trace(&trace);
1843
1844 if (trace.nr_entries > size)
1845 size = trace.nr_entries;
1846 } else
1847 /* From now on, use_stack is a boolean */
1848 use_stack = 0;
1849
1850 size *= sizeof(unsigned long);
1851
1852 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1853 sizeof(*entry) + size, flags, pc);
1854 if (!event)
1855 goto out;
1856 entry = ring_buffer_event_data(event);
1857
1858 memset(&entry->caller, 0, size);
1859
1860 if (use_stack)
1861 memcpy(&entry->caller, trace.entries,
1862 trace.nr_entries * sizeof(unsigned long));
1863 else {
1864 trace.max_entries = FTRACE_STACK_ENTRIES;
1865 trace.entries = entry->caller;
1866 if (regs)
1867 save_stack_trace_regs(regs, &trace);
1868 else
1869 save_stack_trace(&trace);
1870 }
1871
1872 entry->size = trace.nr_entries;
1873
1874 if (!call_filter_check_discard(call, entry, buffer, event))
1875 __buffer_unlock_commit(buffer, event);
1876
1877 out:
1878 /* Again, don't let gcc optimize things here */
1879 barrier();
1880 __this_cpu_dec(ftrace_stack_reserve);
1881 preempt_enable_notrace();
1882
1883}
1884
1885static inline void ftrace_trace_stack(struct trace_array *tr,
1886 struct ring_buffer *buffer,
1887 unsigned long flags,
1888 int skip, int pc, struct pt_regs *regs)
1889{
1890 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
1891 return;
1892
1893 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1894}
1895
1896void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1897 int pc)
1898{
1899 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
1900}
1901
1902/**
1903 * trace_dump_stack - record a stack back trace in the trace buffer
1904 * @skip: Number of functions to skip (helper handlers)
1905 */
1906void trace_dump_stack(int skip)
1907{
1908 unsigned long flags;
1909
1910 if (tracing_disabled || tracing_selftest_running)
1911 return;
1912
1913 local_save_flags(flags);
1914
1915 /*
1916 * Skip 3 more, seems to get us at the caller of
1917 * this function.
1918 */
1919 skip += 3;
1920 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1921 flags, skip, preempt_count(), NULL);
1922}
1923
1924static DEFINE_PER_CPU(int, user_stack_count);
1925
1926void
1927ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1928{
1929 struct trace_event_call *call = &event_user_stack;
1930 struct ring_buffer_event *event;
1931 struct userstack_entry *entry;
1932 struct stack_trace trace;
1933
1934 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
1935 return;
1936
1937 /*
1938 * NMIs can not handle page faults, even with fix ups.
1939 * The save user stack can (and often does) fault.
1940 */
1941 if (unlikely(in_nmi()))
1942 return;
1943
1944 /*
1945 * prevent recursion, since the user stack tracing may
1946 * trigger other kernel events.
1947 */
1948 preempt_disable();
1949 if (__this_cpu_read(user_stack_count))
1950 goto out;
1951
1952 __this_cpu_inc(user_stack_count);
1953
1954 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1955 sizeof(*entry), flags, pc);
1956 if (!event)
1957 goto out_drop_count;
1958 entry = ring_buffer_event_data(event);
1959
1960 entry->tgid = current->tgid;
1961 memset(&entry->caller, 0, sizeof(entry->caller));
1962
1963 trace.nr_entries = 0;
1964 trace.max_entries = FTRACE_STACK_ENTRIES;
1965 trace.skip = 0;
1966 trace.entries = entry->caller;
1967
1968 save_stack_trace_user(&trace);
1969 if (!call_filter_check_discard(call, entry, buffer, event))
1970 __buffer_unlock_commit(buffer, event);
1971
1972 out_drop_count:
1973 __this_cpu_dec(user_stack_count);
1974 out:
1975 preempt_enable();
1976}
1977
1978#ifdef UNUSED
1979static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1980{
1981 ftrace_trace_userstack(tr, flags, preempt_count());
1982}
1983#endif /* UNUSED */
1984
1985#endif /* CONFIG_STACKTRACE */
1986
1987/* created for use with alloc_percpu */
1988struct trace_buffer_struct {
1989 char buffer[TRACE_BUF_SIZE];
1990};
1991
1992static struct trace_buffer_struct *trace_percpu_buffer;
1993static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1994static struct trace_buffer_struct *trace_percpu_irq_buffer;
1995static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1996
1997/*
1998 * The buffer used is dependent on the context. There is a per cpu
1999 * buffer for normal context, softirq contex, hard irq context and
2000 * for NMI context. Thise allows for lockless recording.
2001 *
2002 * Note, if the buffers failed to be allocated, then this returns NULL
2003 */
2004static char *get_trace_buf(void)
2005{
2006 struct trace_buffer_struct *percpu_buffer;
2007
2008 /*
2009 * If we have allocated per cpu buffers, then we do not
2010 * need to do any locking.
2011 */
2012 if (in_nmi())
2013 percpu_buffer = trace_percpu_nmi_buffer;
2014 else if (in_irq())
2015 percpu_buffer = trace_percpu_irq_buffer;
2016 else if (in_softirq())
2017 percpu_buffer = trace_percpu_sirq_buffer;
2018 else
2019 percpu_buffer = trace_percpu_buffer;
2020
2021 if (!percpu_buffer)
2022 return NULL;
2023
2024 return this_cpu_ptr(&percpu_buffer->buffer[0]);
2025}
2026
2027static int alloc_percpu_trace_buffer(void)
2028{
2029 struct trace_buffer_struct *buffers;
2030 struct trace_buffer_struct *sirq_buffers;
2031 struct trace_buffer_struct *irq_buffers;
2032 struct trace_buffer_struct *nmi_buffers;
2033
2034 buffers = alloc_percpu(struct trace_buffer_struct);
2035 if (!buffers)
2036 goto err_warn;
2037
2038 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2039 if (!sirq_buffers)
2040 goto err_sirq;
2041
2042 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2043 if (!irq_buffers)
2044 goto err_irq;
2045
2046 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2047 if (!nmi_buffers)
2048 goto err_nmi;
2049
2050 trace_percpu_buffer = buffers;
2051 trace_percpu_sirq_buffer = sirq_buffers;
2052 trace_percpu_irq_buffer = irq_buffers;
2053 trace_percpu_nmi_buffer = nmi_buffers;
2054
2055 return 0;
2056
2057 err_nmi:
2058 free_percpu(irq_buffers);
2059 err_irq:
2060 free_percpu(sirq_buffers);
2061 err_sirq:
2062 free_percpu(buffers);
2063 err_warn:
2064 WARN(1, "Could not allocate percpu trace_printk buffer");
2065 return -ENOMEM;
2066}
2067
2068static int buffers_allocated;
2069
2070void trace_printk_init_buffers(void)
2071{
2072 if (buffers_allocated)
2073 return;
2074
2075 if (alloc_percpu_trace_buffer())
2076 return;
2077
2078 /* trace_printk() is for debug use only. Don't use it in production. */
2079
2080 pr_warn("\n");
2081 pr_warn("**********************************************************\n");
2082 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2083 pr_warn("** **\n");
2084 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2085 pr_warn("** **\n");
2086 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2087 pr_warn("** unsafe for production use. **\n");
2088 pr_warn("** **\n");
2089 pr_warn("** If you see this message and you are not debugging **\n");
2090 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2091 pr_warn("** **\n");
2092 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2093 pr_warn("**********************************************************\n");
2094
2095 /* Expand the buffers to set size */
2096 tracing_update_buffers();
2097
2098 buffers_allocated = 1;
2099
2100 /*
2101 * trace_printk_init_buffers() can be called by modules.
2102 * If that happens, then we need to start cmdline recording
2103 * directly here. If the global_trace.buffer is already
2104 * allocated here, then this was called by module code.
2105 */
2106 if (global_trace.trace_buffer.buffer)
2107 tracing_start_cmdline_record();
2108}
2109
2110void trace_printk_start_comm(void)
2111{
2112 /* Start tracing comms if trace printk is set */
2113 if (!buffers_allocated)
2114 return;
2115 tracing_start_cmdline_record();
2116}
2117
2118static void trace_printk_start_stop_comm(int enabled)
2119{
2120 if (!buffers_allocated)
2121 return;
2122
2123 if (enabled)
2124 tracing_start_cmdline_record();
2125 else
2126 tracing_stop_cmdline_record();
2127}
2128
2129/**
2130 * trace_vbprintk - write binary msg to tracing buffer
2131 *
2132 */
2133int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2134{
2135 struct trace_event_call *call = &event_bprint;
2136 struct ring_buffer_event *event;
2137 struct ring_buffer *buffer;
2138 struct trace_array *tr = &global_trace;
2139 struct bprint_entry *entry;
2140 unsigned long flags;
2141 char *tbuffer;
2142 int len = 0, size, pc;
2143
2144 if (unlikely(tracing_selftest_running || tracing_disabled))
2145 return 0;
2146
2147 /* Don't pollute graph traces with trace_vprintk internals */
2148 pause_graph_tracing();
2149
2150 pc = preempt_count();
2151 preempt_disable_notrace();
2152
2153 tbuffer = get_trace_buf();
2154 if (!tbuffer) {
2155 len = 0;
2156 goto out;
2157 }
2158
2159 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2160
2161 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2162 goto out;
2163
2164 local_save_flags(flags);
2165 size = sizeof(*entry) + sizeof(u32) * len;
2166 buffer = tr->trace_buffer.buffer;
2167 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2168 flags, pc);
2169 if (!event)
2170 goto out;
2171 entry = ring_buffer_event_data(event);
2172 entry->ip = ip;
2173 entry->fmt = fmt;
2174
2175 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2176 if (!call_filter_check_discard(call, entry, buffer, event)) {
2177 __buffer_unlock_commit(buffer, event);
2178 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2179 }
2180
2181out:
2182 preempt_enable_notrace();
2183 unpause_graph_tracing();
2184
2185 return len;
2186}
2187EXPORT_SYMBOL_GPL(trace_vbprintk);
2188
2189static int
2190__trace_array_vprintk(struct ring_buffer *buffer,
2191 unsigned long ip, const char *fmt, va_list args)
2192{
2193 struct trace_event_call *call = &event_print;
2194 struct ring_buffer_event *event;
2195 int len = 0, size, pc;
2196 struct print_entry *entry;
2197 unsigned long flags;
2198 char *tbuffer;
2199
2200 if (tracing_disabled || tracing_selftest_running)
2201 return 0;
2202
2203 /* Don't pollute graph traces with trace_vprintk internals */
2204 pause_graph_tracing();
2205
2206 pc = preempt_count();
2207 preempt_disable_notrace();
2208
2209
2210 tbuffer = get_trace_buf();
2211 if (!tbuffer) {
2212 len = 0;
2213 goto out;
2214 }
2215
2216 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2217
2218 local_save_flags(flags);
2219 size = sizeof(*entry) + len + 1;
2220 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2221 flags, pc);
2222 if (!event)
2223 goto out;
2224 entry = ring_buffer_event_data(event);
2225 entry->ip = ip;
2226
2227 memcpy(&entry->buf, tbuffer, len + 1);
2228 if (!call_filter_check_discard(call, entry, buffer, event)) {
2229 __buffer_unlock_commit(buffer, event);
2230 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
2231 }
2232 out:
2233 preempt_enable_notrace();
2234 unpause_graph_tracing();
2235
2236 return len;
2237}
2238
2239int trace_array_vprintk(struct trace_array *tr,
2240 unsigned long ip, const char *fmt, va_list args)
2241{
2242 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2243}
2244
2245int trace_array_printk(struct trace_array *tr,
2246 unsigned long ip, const char *fmt, ...)
2247{
2248 int ret;
2249 va_list ap;
2250
2251 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2252 return 0;
2253
2254 va_start(ap, fmt);
2255 ret = trace_array_vprintk(tr, ip, fmt, ap);
2256 va_end(ap);
2257 return ret;
2258}
2259
2260int trace_array_printk_buf(struct ring_buffer *buffer,
2261 unsigned long ip, const char *fmt, ...)
2262{
2263 int ret;
2264 va_list ap;
2265
2266 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
2267 return 0;
2268
2269 va_start(ap, fmt);
2270 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2271 va_end(ap);
2272 return ret;
2273}
2274
2275int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2276{
2277 return trace_array_vprintk(&global_trace, ip, fmt, args);
2278}
2279EXPORT_SYMBOL_GPL(trace_vprintk);
2280
2281static void trace_iterator_increment(struct trace_iterator *iter)
2282{
2283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2284
2285 iter->idx++;
2286 if (buf_iter)
2287 ring_buffer_read(buf_iter, NULL);
2288}
2289
2290static struct trace_entry *
2291peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2292 unsigned long *lost_events)
2293{
2294 struct ring_buffer_event *event;
2295 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
2296
2297 if (buf_iter)
2298 event = ring_buffer_iter_peek(buf_iter, ts);
2299 else
2300 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
2301 lost_events);
2302
2303 if (event) {
2304 iter->ent_size = ring_buffer_event_length(event);
2305 return ring_buffer_event_data(event);
2306 }
2307 iter->ent_size = 0;
2308 return NULL;
2309}
2310
2311static struct trace_entry *
2312__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2313 unsigned long *missing_events, u64 *ent_ts)
2314{
2315 struct ring_buffer *buffer = iter->trace_buffer->buffer;
2316 struct trace_entry *ent, *next = NULL;
2317 unsigned long lost_events = 0, next_lost = 0;
2318 int cpu_file = iter->cpu_file;
2319 u64 next_ts = 0, ts;
2320 int next_cpu = -1;
2321 int next_size = 0;
2322 int cpu;
2323
2324 /*
2325 * If we are in a per_cpu trace file, don't bother by iterating over
2326 * all cpu and peek directly.
2327 */
2328 if (cpu_file > RING_BUFFER_ALL_CPUS) {
2329 if (ring_buffer_empty_cpu(buffer, cpu_file))
2330 return NULL;
2331 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
2332 if (ent_cpu)
2333 *ent_cpu = cpu_file;
2334
2335 return ent;
2336 }
2337
2338 for_each_tracing_cpu(cpu) {
2339
2340 if (ring_buffer_empty_cpu(buffer, cpu))
2341 continue;
2342
2343 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
2344
2345 /*
2346 * Pick the entry with the smallest timestamp:
2347 */
2348 if (ent && (!next || ts < next_ts)) {
2349 next = ent;
2350 next_cpu = cpu;
2351 next_ts = ts;
2352 next_lost = lost_events;
2353 next_size = iter->ent_size;
2354 }
2355 }
2356
2357 iter->ent_size = next_size;
2358
2359 if (ent_cpu)
2360 *ent_cpu = next_cpu;
2361
2362 if (ent_ts)
2363 *ent_ts = next_ts;
2364
2365 if (missing_events)
2366 *missing_events = next_lost;
2367
2368 return next;
2369}
2370
2371/* Find the next real entry, without updating the iterator itself */
2372struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2373 int *ent_cpu, u64 *ent_ts)
2374{
2375 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
2376}
2377
2378/* Find the next real entry, and increment the iterator to the next entry */
2379void *trace_find_next_entry_inc(struct trace_iterator *iter)
2380{
2381 iter->ent = __find_next_entry(iter, &iter->cpu,
2382 &iter->lost_events, &iter->ts);
2383
2384 if (iter->ent)
2385 trace_iterator_increment(iter);
2386
2387 return iter->ent ? iter : NULL;
2388}
2389
2390static void trace_consume(struct trace_iterator *iter)
2391{
2392 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
2393 &iter->lost_events);
2394}
2395
2396static void *s_next(struct seq_file *m, void *v, loff_t *pos)
2397{
2398 struct trace_iterator *iter = m->private;
2399 int i = (int)*pos;
2400 void *ent;
2401
2402 WARN_ON_ONCE(iter->leftover);
2403
2404 (*pos)++;
2405
2406 /* can't go backwards */
2407 if (iter->idx > i)
2408 return NULL;
2409
2410 if (iter->idx < 0)
2411 ent = trace_find_next_entry_inc(iter);
2412 else
2413 ent = iter;
2414
2415 while (ent && iter->idx < i)
2416 ent = trace_find_next_entry_inc(iter);
2417
2418 iter->pos = *pos;
2419
2420 return ent;
2421}
2422
2423void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2424{
2425 struct ring_buffer_event *event;
2426 struct ring_buffer_iter *buf_iter;
2427 unsigned long entries = 0;
2428 u64 ts;
2429
2430 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2431
2432 buf_iter = trace_buffer_iter(iter, cpu);
2433 if (!buf_iter)
2434 return;
2435
2436 ring_buffer_iter_reset(buf_iter);
2437
2438 /*
2439 * We could have the case with the max latency tracers
2440 * that a reset never took place on a cpu. This is evident
2441 * by the timestamp being before the start of the buffer.
2442 */
2443 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
2444 if (ts >= iter->trace_buffer->time_start)
2445 break;
2446 entries++;
2447 ring_buffer_read(buf_iter, NULL);
2448 }
2449
2450 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2451}
2452
2453/*
2454 * The current tracer is copied to avoid a global locking
2455 * all around.
2456 */
2457static void *s_start(struct seq_file *m, loff_t *pos)
2458{
2459 struct trace_iterator *iter = m->private;
2460 struct trace_array *tr = iter->tr;
2461 int cpu_file = iter->cpu_file;
2462 void *p = NULL;
2463 loff_t l = 0;
2464 int cpu;
2465
2466 /*
2467 * copy the tracer to avoid using a global lock all around.
2468 * iter->trace is a copy of current_trace, the pointer to the
2469 * name may be used instead of a strcmp(), as iter->trace->name
2470 * will point to the same string as current_trace->name.
2471 */
2472 mutex_lock(&trace_types_lock);
2473 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2474 *iter->trace = *tr->current_trace;
2475 mutex_unlock(&trace_types_lock);
2476
2477#ifdef CONFIG_TRACER_MAX_TRACE
2478 if (iter->snapshot && iter->trace->use_max_tr)
2479 return ERR_PTR(-EBUSY);
2480#endif
2481
2482 if (!iter->snapshot)
2483 atomic_inc(&trace_record_cmdline_disabled);
2484
2485 if (*pos != iter->pos) {
2486 iter->ent = NULL;
2487 iter->cpu = 0;
2488 iter->idx = -1;
2489
2490 if (cpu_file == RING_BUFFER_ALL_CPUS) {
2491 for_each_tracing_cpu(cpu)
2492 tracing_iter_reset(iter, cpu);
2493 } else
2494 tracing_iter_reset(iter, cpu_file);
2495
2496 iter->leftover = 0;
2497 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2498 ;
2499
2500 } else {
2501 /*
2502 * If we overflowed the seq_file before, then we want
2503 * to just reuse the trace_seq buffer again.
2504 */
2505 if (iter->leftover)
2506 p = iter;
2507 else {
2508 l = *pos - 1;
2509 p = s_next(m, p, &l);
2510 }
2511 }
2512
2513 trace_event_read_lock();
2514 trace_access_lock(cpu_file);
2515 return p;
2516}
2517
2518static void s_stop(struct seq_file *m, void *p)
2519{
2520 struct trace_iterator *iter = m->private;
2521
2522#ifdef CONFIG_TRACER_MAX_TRACE
2523 if (iter->snapshot && iter->trace->use_max_tr)
2524 return;
2525#endif
2526
2527 if (!iter->snapshot)
2528 atomic_dec(&trace_record_cmdline_disabled);
2529
2530 trace_access_unlock(iter->cpu_file);
2531 trace_event_read_unlock();
2532}
2533
2534static void
2535get_total_entries(struct trace_buffer *buf,
2536 unsigned long *total, unsigned long *entries)
2537{
2538 unsigned long count;
2539 int cpu;
2540
2541 *total = 0;
2542 *entries = 0;
2543
2544 for_each_tracing_cpu(cpu) {
2545 count = ring_buffer_entries_cpu(buf->buffer, cpu);
2546 /*
2547 * If this buffer has skipped entries, then we hold all
2548 * entries for the trace and we need to ignore the
2549 * ones before the time stamp.
2550 */
2551 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2552 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
2553 /* total is the same as the entries */
2554 *total += count;
2555 } else
2556 *total += count +
2557 ring_buffer_overrun_cpu(buf->buffer, cpu);
2558 *entries += count;
2559 }
2560}
2561
2562static void print_lat_help_header(struct seq_file *m)
2563{
2564 seq_puts(m, "# _------=> CPU# \n"
2565 "# / _-----=> irqs-off \n"
2566 "# | / _----=> need-resched \n"
2567 "# || / _---=> hardirq/softirq \n"
2568 "# ||| / _--=> preempt-depth \n"
2569 "# |||| / delay \n"
2570 "# cmd pid ||||| time | caller \n"
2571 "# \\ / ||||| \\ | / \n");
2572}
2573
2574static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
2575{
2576 unsigned long total;
2577 unsigned long entries;
2578
2579 get_total_entries(buf, &total, &entries);
2580 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2581 entries, total, num_online_cpus());
2582 seq_puts(m, "#\n");
2583}
2584
2585static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
2586{
2587 print_event_info(buf, m);
2588 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2589 "# | | | | |\n");
2590}
2591
2592static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
2593{
2594 print_event_info(buf, m);
2595 seq_puts(m, "# _-----=> irqs-off\n"
2596 "# / _----=> need-resched\n"
2597 "# | / _---=> hardirq/softirq\n"
2598 "# || / _--=> preempt-depth\n"
2599 "# ||| / delay\n"
2600 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2601 "# | | | |||| | |\n");
2602}
2603
2604void
2605print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2606{
2607 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
2608 struct trace_buffer *buf = iter->trace_buffer;
2609 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2610 struct tracer *type = iter->trace;
2611 unsigned long entries;
2612 unsigned long total;
2613 const char *name = "preemption";
2614
2615 name = type->name;
2616
2617 get_total_entries(buf, &total, &entries);
2618
2619 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2620 name, UTS_RELEASE);
2621 seq_puts(m, "# -----------------------------------"
2622 "---------------------------------\n");
2623 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2624 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2625 nsecs_to_usecs(data->saved_latency),
2626 entries,
2627 total,
2628 buf->cpu,
2629#if defined(CONFIG_PREEMPT_NONE)
2630 "server",
2631#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2632 "desktop",
2633#elif defined(CONFIG_PREEMPT)
2634 "preempt",
2635#else
2636 "unknown",
2637#endif
2638 /* These are reserved for later use */
2639 0, 0, 0, 0);
2640#ifdef CONFIG_SMP
2641 seq_printf(m, " #P:%d)\n", num_online_cpus());
2642#else
2643 seq_puts(m, ")\n");
2644#endif
2645 seq_puts(m, "# -----------------\n");
2646 seq_printf(m, "# | task: %.16s-%d "
2647 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2648 data->comm, data->pid,
2649 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2650 data->policy, data->rt_priority);
2651 seq_puts(m, "# -----------------\n");
2652
2653 if (data->critical_start) {
2654 seq_puts(m, "# => started at: ");
2655 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2656 trace_print_seq(m, &iter->seq);
2657 seq_puts(m, "\n# => ended at: ");
2658 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2659 trace_print_seq(m, &iter->seq);
2660 seq_puts(m, "\n#\n");
2661 }
2662
2663 seq_puts(m, "#\n");
2664}
2665
2666static void test_cpu_buff_start(struct trace_iterator *iter)
2667{
2668 struct trace_seq *s = &iter->seq;
2669 struct trace_array *tr = iter->tr;
2670
2671 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
2672 return;
2673
2674 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2675 return;
2676
2677 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
2678 return;
2679
2680 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2681 return;
2682
2683 if (iter->started)
2684 cpumask_set_cpu(iter->cpu, iter->started);
2685
2686 /* Don't print started cpu buffer for the first entry of the trace */
2687 if (iter->idx > 1)
2688 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2689 iter->cpu);
2690}
2691
2692static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2693{
2694 struct trace_array *tr = iter->tr;
2695 struct trace_seq *s = &iter->seq;
2696 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
2697 struct trace_entry *entry;
2698 struct trace_event *event;
2699
2700 entry = iter->ent;
2701
2702 test_cpu_buff_start(iter);
2703
2704 event = ftrace_find_event(entry->type);
2705
2706 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2707 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2708 trace_print_lat_context(iter);
2709 else
2710 trace_print_context(iter);
2711 }
2712
2713 if (trace_seq_has_overflowed(s))
2714 return TRACE_TYPE_PARTIAL_LINE;
2715
2716 if (event)
2717 return event->funcs->trace(iter, sym_flags, event);
2718
2719 trace_seq_printf(s, "Unknown type %d\n", entry->type);
2720
2721 return trace_handle_return(s);
2722}
2723
2724static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2725{
2726 struct trace_array *tr = iter->tr;
2727 struct trace_seq *s = &iter->seq;
2728 struct trace_entry *entry;
2729 struct trace_event *event;
2730
2731 entry = iter->ent;
2732
2733 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
2734 trace_seq_printf(s, "%d %d %llu ",
2735 entry->pid, iter->cpu, iter->ts);
2736
2737 if (trace_seq_has_overflowed(s))
2738 return TRACE_TYPE_PARTIAL_LINE;
2739
2740 event = ftrace_find_event(entry->type);
2741 if (event)
2742 return event->funcs->raw(iter, 0, event);
2743
2744 trace_seq_printf(s, "%d ?\n", entry->type);
2745
2746 return trace_handle_return(s);
2747}
2748
2749static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2750{
2751 struct trace_array *tr = iter->tr;
2752 struct trace_seq *s = &iter->seq;
2753 unsigned char newline = '\n';
2754 struct trace_entry *entry;
2755 struct trace_event *event;
2756
2757 entry = iter->ent;
2758
2759 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2760 SEQ_PUT_HEX_FIELD(s, entry->pid);
2761 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2762 SEQ_PUT_HEX_FIELD(s, iter->ts);
2763 if (trace_seq_has_overflowed(s))
2764 return TRACE_TYPE_PARTIAL_LINE;
2765 }
2766
2767 event = ftrace_find_event(entry->type);
2768 if (event) {
2769 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2770 if (ret != TRACE_TYPE_HANDLED)
2771 return ret;
2772 }
2773
2774 SEQ_PUT_FIELD(s, newline);
2775
2776 return trace_handle_return(s);
2777}
2778
2779static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2780{
2781 struct trace_array *tr = iter->tr;
2782 struct trace_seq *s = &iter->seq;
2783 struct trace_entry *entry;
2784 struct trace_event *event;
2785
2786 entry = iter->ent;
2787
2788 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
2789 SEQ_PUT_FIELD(s, entry->pid);
2790 SEQ_PUT_FIELD(s, iter->cpu);
2791 SEQ_PUT_FIELD(s, iter->ts);
2792 if (trace_seq_has_overflowed(s))
2793 return TRACE_TYPE_PARTIAL_LINE;
2794 }
2795
2796 event = ftrace_find_event(entry->type);
2797 return event ? event->funcs->binary(iter, 0, event) :
2798 TRACE_TYPE_HANDLED;
2799}
2800
2801int trace_empty(struct trace_iterator *iter)
2802{
2803 struct ring_buffer_iter *buf_iter;
2804 int cpu;
2805
2806 /* If we are looking at one CPU buffer, only check that one */
2807 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
2808 cpu = iter->cpu_file;
2809 buf_iter = trace_buffer_iter(iter, cpu);
2810 if (buf_iter) {
2811 if (!ring_buffer_iter_empty(buf_iter))
2812 return 0;
2813 } else {
2814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2815 return 0;
2816 }
2817 return 1;
2818 }
2819
2820 for_each_tracing_cpu(cpu) {
2821 buf_iter = trace_buffer_iter(iter, cpu);
2822 if (buf_iter) {
2823 if (!ring_buffer_iter_empty(buf_iter))
2824 return 0;
2825 } else {
2826 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
2827 return 0;
2828 }
2829 }
2830
2831 return 1;
2832}
2833
2834/* Called with trace_event_read_lock() held. */
2835enum print_line_t print_trace_line(struct trace_iterator *iter)
2836{
2837 struct trace_array *tr = iter->tr;
2838 unsigned long trace_flags = tr->trace_flags;
2839 enum print_line_t ret;
2840
2841 if (iter->lost_events) {
2842 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2843 iter->cpu, iter->lost_events);
2844 if (trace_seq_has_overflowed(&iter->seq))
2845 return TRACE_TYPE_PARTIAL_LINE;
2846 }
2847
2848 if (iter->trace && iter->trace->print_line) {
2849 ret = iter->trace->print_line(iter);
2850 if (ret != TRACE_TYPE_UNHANDLED)
2851 return ret;
2852 }
2853
2854 if (iter->ent->type == TRACE_BPUTS &&
2855 trace_flags & TRACE_ITER_PRINTK &&
2856 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857 return trace_print_bputs_msg_only(iter);
2858
2859 if (iter->ent->type == TRACE_BPRINT &&
2860 trace_flags & TRACE_ITER_PRINTK &&
2861 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2862 return trace_print_bprintk_msg_only(iter);
2863
2864 if (iter->ent->type == TRACE_PRINT &&
2865 trace_flags & TRACE_ITER_PRINTK &&
2866 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2867 return trace_print_printk_msg_only(iter);
2868
2869 if (trace_flags & TRACE_ITER_BIN)
2870 return print_bin_fmt(iter);
2871
2872 if (trace_flags & TRACE_ITER_HEX)
2873 return print_hex_fmt(iter);
2874
2875 if (trace_flags & TRACE_ITER_RAW)
2876 return print_raw_fmt(iter);
2877
2878 return print_trace_fmt(iter);
2879}
2880
2881void trace_latency_header(struct seq_file *m)
2882{
2883 struct trace_iterator *iter = m->private;
2884 struct trace_array *tr = iter->tr;
2885
2886 /* print nothing if the buffers are empty */
2887 if (trace_empty(iter))
2888 return;
2889
2890 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2891 print_trace_header(m, iter);
2892
2893 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
2894 print_lat_help_header(m);
2895}
2896
2897void trace_default_header(struct seq_file *m)
2898{
2899 struct trace_iterator *iter = m->private;
2900 struct trace_array *tr = iter->tr;
2901 unsigned long trace_flags = tr->trace_flags;
2902
2903 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2904 return;
2905
2906 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2907 /* print nothing if the buffers are empty */
2908 if (trace_empty(iter))
2909 return;
2910 print_trace_header(m, iter);
2911 if (!(trace_flags & TRACE_ITER_VERBOSE))
2912 print_lat_help_header(m);
2913 } else {
2914 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2915 if (trace_flags & TRACE_ITER_IRQ_INFO)
2916 print_func_help_header_irq(iter->trace_buffer, m);
2917 else
2918 print_func_help_header(iter->trace_buffer, m);
2919 }
2920 }
2921}
2922
2923static void test_ftrace_alive(struct seq_file *m)
2924{
2925 if (!ftrace_is_dead())
2926 return;
2927 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928 "# MAY BE MISSING FUNCTION EVENTS\n");
2929}
2930
2931#ifdef CONFIG_TRACER_MAX_TRACE
2932static void show_snapshot_main_help(struct seq_file *m)
2933{
2934 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936 "# Takes a snapshot of the main buffer.\n"
2937 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938 "# (Doesn't have to be '2' works with any number that\n"
2939 "# is not a '0' or '1')\n");
2940}
2941
2942static void show_snapshot_percpu_help(struct seq_file *m)
2943{
2944 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2945#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2946 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947 "# Takes a snapshot of the main buffer for this cpu.\n");
2948#else
2949 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950 "# Must use main snapshot file to allocate.\n");
2951#endif
2952 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953 "# (Doesn't have to be '2' works with any number that\n"
2954 "# is not a '0' or '1')\n");
2955}
2956
2957static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2958{
2959 if (iter->tr->allocated_snapshot)
2960 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
2961 else
2962 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
2963
2964 seq_puts(m, "# Snapshot commands:\n");
2965 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2966 show_snapshot_main_help(m);
2967 else
2968 show_snapshot_percpu_help(m);
2969}
2970#else
2971/* Should never be called */
2972static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2973#endif
2974
2975static int s_show(struct seq_file *m, void *v)
2976{
2977 struct trace_iterator *iter = v;
2978 int ret;
2979
2980 if (iter->ent == NULL) {
2981 if (iter->tr) {
2982 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2983 seq_puts(m, "#\n");
2984 test_ftrace_alive(m);
2985 }
2986 if (iter->snapshot && trace_empty(iter))
2987 print_snapshot_help(m, iter);
2988 else if (iter->trace && iter->trace->print_header)
2989 iter->trace->print_header(m);
2990 else
2991 trace_default_header(m);
2992
2993 } else if (iter->leftover) {
2994 /*
2995 * If we filled the seq_file buffer earlier, we
2996 * want to just show it now.
2997 */
2998 ret = trace_print_seq(m, &iter->seq);
2999
3000 /* ret should this time be zero, but you never know */
3001 iter->leftover = ret;
3002
3003 } else {
3004 print_trace_line(iter);
3005 ret = trace_print_seq(m, &iter->seq);
3006 /*
3007 * If we overflow the seq_file buffer, then it will
3008 * ask us for this data again at start up.
3009 * Use that instead.
3010 * ret is 0 if seq_file write succeeded.
3011 * -1 otherwise.
3012 */
3013 iter->leftover = ret;
3014 }
3015
3016 return 0;
3017}
3018
3019/*
3020 * Should be used after trace_array_get(), trace_types_lock
3021 * ensures that i_cdev was already initialized.
3022 */
3023static inline int tracing_get_cpu(struct inode *inode)
3024{
3025 if (inode->i_cdev) /* See trace_create_cpu_file() */
3026 return (long)inode->i_cdev - 1;
3027 return RING_BUFFER_ALL_CPUS;
3028}
3029
3030static const struct seq_operations tracer_seq_ops = {
3031 .start = s_start,
3032 .next = s_next,
3033 .stop = s_stop,
3034 .show = s_show,
3035};
3036
3037static struct trace_iterator *
3038__tracing_open(struct inode *inode, struct file *file, bool snapshot)
3039{
3040 struct trace_array *tr = inode->i_private;
3041 struct trace_iterator *iter;
3042 int cpu;
3043
3044 if (tracing_disabled)
3045 return ERR_PTR(-ENODEV);
3046
3047 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3048 if (!iter)
3049 return ERR_PTR(-ENOMEM);
3050
3051 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3052 GFP_KERNEL);
3053 if (!iter->buffer_iter)
3054 goto release;
3055
3056 /*
3057 * We make a copy of the current tracer to avoid concurrent
3058 * changes on it while we are reading.
3059 */
3060 mutex_lock(&trace_types_lock);
3061 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3062 if (!iter->trace)
3063 goto fail;
3064
3065 *iter->trace = *tr->current_trace;
3066
3067 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3068 goto fail;
3069
3070 iter->tr = tr;
3071
3072#ifdef CONFIG_TRACER_MAX_TRACE
3073 /* Currently only the top directory has a snapshot */
3074 if (tr->current_trace->print_max || snapshot)
3075 iter->trace_buffer = &tr->max_buffer;
3076 else
3077#endif
3078 iter->trace_buffer = &tr->trace_buffer;
3079 iter->snapshot = snapshot;
3080 iter->pos = -1;
3081 iter->cpu_file = tracing_get_cpu(inode);
3082 mutex_init(&iter->mutex);
3083
3084 /* Notify the tracer early; before we stop tracing. */
3085 if (iter->trace && iter->trace->open)
3086 iter->trace->open(iter);
3087
3088 /* Annotate start of buffers if we had overruns */
3089 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3090 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3091
3092 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3093 if (trace_clocks[tr->clock_id].in_ns)
3094 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3095
3096 /* stop the trace while dumping if we are not opening "snapshot" */
3097 if (!iter->snapshot)
3098 tracing_stop_tr(tr);
3099
3100 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3101 for_each_tracing_cpu(cpu) {
3102 iter->buffer_iter[cpu] =
3103 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3104 }
3105 ring_buffer_read_prepare_sync();
3106 for_each_tracing_cpu(cpu) {
3107 ring_buffer_read_start(iter->buffer_iter[cpu]);
3108 tracing_iter_reset(iter, cpu);
3109 }
3110 } else {
3111 cpu = iter->cpu_file;
3112 iter->buffer_iter[cpu] =
3113 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3114 ring_buffer_read_prepare_sync();
3115 ring_buffer_read_start(iter->buffer_iter[cpu]);
3116 tracing_iter_reset(iter, cpu);
3117 }
3118
3119 mutex_unlock(&trace_types_lock);
3120
3121 return iter;
3122
3123 fail:
3124 mutex_unlock(&trace_types_lock);
3125 kfree(iter->trace);
3126 kfree(iter->buffer_iter);
3127release:
3128 seq_release_private(inode, file);
3129 return ERR_PTR(-ENOMEM);
3130}
3131
3132int tracing_open_generic(struct inode *inode, struct file *filp)
3133{
3134 if (tracing_disabled)
3135 return -ENODEV;
3136
3137 filp->private_data = inode->i_private;
3138 return 0;
3139}
3140
3141bool tracing_is_disabled(void)
3142{
3143 return (tracing_disabled) ? true: false;
3144}
3145
3146/*
3147 * Open and update trace_array ref count.
3148 * Must have the current trace_array passed to it.
3149 */
3150static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3151{
3152 struct trace_array *tr = inode->i_private;
3153
3154 if (tracing_disabled)
3155 return -ENODEV;
3156
3157 if (trace_array_get(tr) < 0)
3158 return -ENODEV;
3159
3160 filp->private_data = inode->i_private;
3161
3162 return 0;
3163}
3164
3165static int tracing_release(struct inode *inode, struct file *file)
3166{
3167 struct trace_array *tr = inode->i_private;
3168 struct seq_file *m = file->private_data;
3169 struct trace_iterator *iter;
3170 int cpu;
3171
3172 if (!(file->f_mode & FMODE_READ)) {
3173 trace_array_put(tr);
3174 return 0;
3175 }
3176
3177 /* Writes do not use seq_file */
3178 iter = m->private;
3179 mutex_lock(&trace_types_lock);
3180
3181 for_each_tracing_cpu(cpu) {
3182 if (iter->buffer_iter[cpu])
3183 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3184 }
3185
3186 if (iter->trace && iter->trace->close)
3187 iter->trace->close(iter);
3188
3189 if (!iter->snapshot)
3190 /* reenable tracing if it was previously enabled */
3191 tracing_start_tr(tr);
3192
3193 __trace_array_put(tr);
3194
3195 mutex_unlock(&trace_types_lock);
3196
3197 mutex_destroy(&iter->mutex);
3198 free_cpumask_var(iter->started);
3199 kfree(iter->trace);
3200 kfree(iter->buffer_iter);
3201 seq_release_private(inode, file);
3202
3203 return 0;
3204}
3205
3206static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3207{
3208 struct trace_array *tr = inode->i_private;
3209
3210 trace_array_put(tr);
3211 return 0;
3212}
3213
3214static int tracing_single_release_tr(struct inode *inode, struct file *file)
3215{
3216 struct trace_array *tr = inode->i_private;
3217
3218 trace_array_put(tr);
3219
3220 return single_release(inode, file);
3221}
3222
3223static int tracing_open(struct inode *inode, struct file *file)
3224{
3225 struct trace_array *tr = inode->i_private;
3226 struct trace_iterator *iter;
3227 int ret = 0;
3228
3229 if (trace_array_get(tr) < 0)
3230 return -ENODEV;
3231
3232 /* If this file was open for write, then erase contents */
3233 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3234 int cpu = tracing_get_cpu(inode);
3235
3236 if (cpu == RING_BUFFER_ALL_CPUS)
3237 tracing_reset_online_cpus(&tr->trace_buffer);
3238 else
3239 tracing_reset(&tr->trace_buffer, cpu);
3240 }
3241
3242 if (file->f_mode & FMODE_READ) {
3243 iter = __tracing_open(inode, file, false);
3244 if (IS_ERR(iter))
3245 ret = PTR_ERR(iter);
3246 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
3247 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3248 }
3249
3250 if (ret < 0)
3251 trace_array_put(tr);
3252
3253 return ret;
3254}
3255
3256/*
3257 * Some tracers are not suitable for instance buffers.
3258 * A tracer is always available for the global array (toplevel)
3259 * or if it explicitly states that it is.
3260 */
3261static bool
3262trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3263{
3264 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3265}
3266
3267/* Find the next tracer that this trace array may use */
3268static struct tracer *
3269get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3270{
3271 while (t && !trace_ok_for_array(t, tr))
3272 t = t->next;
3273
3274 return t;
3275}
3276
3277static void *
3278t_next(struct seq_file *m, void *v, loff_t *pos)
3279{
3280 struct trace_array *tr = m->private;
3281 struct tracer *t = v;
3282
3283 (*pos)++;
3284
3285 if (t)
3286 t = get_tracer_for_array(tr, t->next);
3287
3288 return t;
3289}
3290
3291static void *t_start(struct seq_file *m, loff_t *pos)
3292{
3293 struct trace_array *tr = m->private;
3294 struct tracer *t;
3295 loff_t l = 0;
3296
3297 mutex_lock(&trace_types_lock);
3298
3299 t = get_tracer_for_array(tr, trace_types);
3300 for (; t && l < *pos; t = t_next(m, t, &l))
3301 ;
3302
3303 return t;
3304}
3305
3306static void t_stop(struct seq_file *m, void *p)
3307{
3308 mutex_unlock(&trace_types_lock);
3309}
3310
3311static int t_show(struct seq_file *m, void *v)
3312{
3313 struct tracer *t = v;
3314
3315 if (!t)
3316 return 0;
3317
3318 seq_puts(m, t->name);
3319 if (t->next)
3320 seq_putc(m, ' ');
3321 else
3322 seq_putc(m, '\n');
3323
3324 return 0;
3325}
3326
3327static const struct seq_operations show_traces_seq_ops = {
3328 .start = t_start,
3329 .next = t_next,
3330 .stop = t_stop,
3331 .show = t_show,
3332};
3333
3334static int show_traces_open(struct inode *inode, struct file *file)
3335{
3336 struct trace_array *tr = inode->i_private;
3337 struct seq_file *m;
3338 int ret;
3339
3340 if (tracing_disabled)
3341 return -ENODEV;
3342
3343 ret = seq_open(file, &show_traces_seq_ops);
3344 if (ret)
3345 return ret;
3346
3347 m = file->private_data;
3348 m->private = tr;
3349
3350 return 0;
3351}
3352
3353static ssize_t
3354tracing_write_stub(struct file *filp, const char __user *ubuf,
3355 size_t count, loff_t *ppos)
3356{
3357 return count;
3358}
3359
3360loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
3361{
3362 int ret;
3363
3364 if (file->f_mode & FMODE_READ)
3365 ret = seq_lseek(file, offset, whence);
3366 else
3367 file->f_pos = ret = 0;
3368
3369 return ret;
3370}
3371
3372static const struct file_operations tracing_fops = {
3373 .open = tracing_open,
3374 .read = seq_read,
3375 .write = tracing_write_stub,
3376 .llseek = tracing_lseek,
3377 .release = tracing_release,
3378};
3379
3380static const struct file_operations show_traces_fops = {
3381 .open = show_traces_open,
3382 .read = seq_read,
3383 .release = seq_release,
3384 .llseek = seq_lseek,
3385};
3386
3387/*
3388 * The tracer itself will not take this lock, but still we want
3389 * to provide a consistent cpumask to user-space:
3390 */
3391static DEFINE_MUTEX(tracing_cpumask_update_lock);
3392
3393/*
3394 * Temporary storage for the character representation of the
3395 * CPU bitmask (and one more byte for the newline):
3396 */
3397static char mask_str[NR_CPUS + 1];
3398
3399static ssize_t
3400tracing_cpumask_read(struct file *filp, char __user *ubuf,
3401 size_t count, loff_t *ppos)
3402{
3403 struct trace_array *tr = file_inode(filp)->i_private;
3404 int len;
3405
3406 mutex_lock(&tracing_cpumask_update_lock);
3407
3408 len = snprintf(mask_str, count, "%*pb\n",
3409 cpumask_pr_args(tr->tracing_cpumask));
3410 if (len >= count) {
3411 count = -EINVAL;
3412 goto out_err;
3413 }
3414 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3415
3416out_err:
3417 mutex_unlock(&tracing_cpumask_update_lock);
3418
3419 return count;
3420}
3421
3422static ssize_t
3423tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3424 size_t count, loff_t *ppos)
3425{
3426 struct trace_array *tr = file_inode(filp)->i_private;
3427 cpumask_var_t tracing_cpumask_new;
3428 int err, cpu;
3429
3430 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3431 return -ENOMEM;
3432
3433 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
3434 if (err)
3435 goto err_unlock;
3436
3437 mutex_lock(&tracing_cpumask_update_lock);
3438
3439 local_irq_disable();
3440 arch_spin_lock(&tr->max_lock);
3441 for_each_tracing_cpu(cpu) {
3442 /*
3443 * Increase/decrease the disabled counter if we are
3444 * about to flip a bit in the cpumask:
3445 */
3446 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3447 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3448 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3449 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3450 }
3451 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3452 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3453 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3454 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
3455 }
3456 }
3457 arch_spin_unlock(&tr->max_lock);
3458 local_irq_enable();
3459
3460 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3461
3462 mutex_unlock(&tracing_cpumask_update_lock);
3463 free_cpumask_var(tracing_cpumask_new);
3464
3465 return count;
3466
3467err_unlock:
3468 free_cpumask_var(tracing_cpumask_new);
3469
3470 return err;
3471}
3472
3473static const struct file_operations tracing_cpumask_fops = {
3474 .open = tracing_open_generic_tr,
3475 .read = tracing_cpumask_read,
3476 .write = tracing_cpumask_write,
3477 .release = tracing_release_generic_tr,
3478 .llseek = generic_file_llseek,
3479};
3480
3481static int tracing_trace_options_show(struct seq_file *m, void *v)
3482{
3483 struct tracer_opt *trace_opts;
3484 struct trace_array *tr = m->private;
3485 u32 tracer_flags;
3486 int i;
3487
3488 mutex_lock(&trace_types_lock);
3489 tracer_flags = tr->current_trace->flags->val;
3490 trace_opts = tr->current_trace->flags->opts;
3491
3492 for (i = 0; trace_options[i]; i++) {
3493 if (tr->trace_flags & (1 << i))
3494 seq_printf(m, "%s\n", trace_options[i]);
3495 else
3496 seq_printf(m, "no%s\n", trace_options[i]);
3497 }
3498
3499 for (i = 0; trace_opts[i].name; i++) {
3500 if (tracer_flags & trace_opts[i].bit)
3501 seq_printf(m, "%s\n", trace_opts[i].name);
3502 else
3503 seq_printf(m, "no%s\n", trace_opts[i].name);
3504 }
3505 mutex_unlock(&trace_types_lock);
3506
3507 return 0;
3508}
3509
3510static int __set_tracer_option(struct trace_array *tr,
3511 struct tracer_flags *tracer_flags,
3512 struct tracer_opt *opts, int neg)
3513{
3514 struct tracer *trace = tracer_flags->trace;
3515 int ret;
3516
3517 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
3518 if (ret)
3519 return ret;
3520
3521 if (neg)
3522 tracer_flags->val &= ~opts->bit;
3523 else
3524 tracer_flags->val |= opts->bit;
3525 return 0;
3526}
3527
3528/* Try to assign a tracer specific option */
3529static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
3530{
3531 struct tracer *trace = tr->current_trace;
3532 struct tracer_flags *tracer_flags = trace->flags;
3533 struct tracer_opt *opts = NULL;
3534 int i;
3535
3536 for (i = 0; tracer_flags->opts[i].name; i++) {
3537 opts = &tracer_flags->opts[i];
3538
3539 if (strcmp(cmp, opts->name) == 0)
3540 return __set_tracer_option(tr, trace->flags, opts, neg);
3541 }
3542
3543 return -EINVAL;
3544}
3545
3546/* Some tracers require overwrite to stay enabled */
3547int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3548{
3549 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3550 return -1;
3551
3552 return 0;
3553}
3554
3555int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
3556{
3557 /* do nothing if flag is already set */
3558 if (!!(tr->trace_flags & mask) == !!enabled)
3559 return 0;
3560
3561 /* Give the tracer a chance to approve the change */
3562 if (tr->current_trace->flag_changed)
3563 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
3564 return -EINVAL;
3565
3566 if (enabled)
3567 tr->trace_flags |= mask;
3568 else
3569 tr->trace_flags &= ~mask;
3570
3571 if (mask == TRACE_ITER_RECORD_CMD)
3572 trace_event_enable_cmd_record(enabled);
3573
3574 if (mask == TRACE_ITER_OVERWRITE) {
3575 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
3576#ifdef CONFIG_TRACER_MAX_TRACE
3577 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
3578#endif
3579 }
3580
3581 if (mask == TRACE_ITER_PRINTK) {
3582 trace_printk_start_stop_comm(enabled);
3583 trace_printk_control(enabled);
3584 }
3585
3586 return 0;
3587}
3588
3589static int trace_set_options(struct trace_array *tr, char *option)
3590{
3591 char *cmp;
3592 int neg = 0;
3593 int ret = -ENODEV;
3594 int i;
3595 size_t orig_len = strlen(option);
3596
3597 cmp = strstrip(option);
3598
3599 if (strncmp(cmp, "no", 2) == 0) {
3600 neg = 1;
3601 cmp += 2;
3602 }
3603
3604 mutex_lock(&trace_types_lock);
3605
3606 for (i = 0; trace_options[i]; i++) {
3607 if (strcmp(cmp, trace_options[i]) == 0) {
3608 ret = set_tracer_flag(tr, 1 << i, !neg);
3609 break;
3610 }
3611 }
3612
3613 /* If no option could be set, test the specific tracer options */
3614 if (!trace_options[i])
3615 ret = set_tracer_option(tr, cmp, neg);
3616
3617 mutex_unlock(&trace_types_lock);
3618
3619 /*
3620 * If the first trailing whitespace is replaced with '\0' by strstrip,
3621 * turn it back into a space.
3622 */
3623 if (orig_len > strlen(option))
3624 option[strlen(option)] = ' ';
3625
3626 return ret;
3627}
3628
3629static void __init apply_trace_boot_options(void)
3630{
3631 char *buf = trace_boot_options_buf;
3632 char *option;
3633
3634 while (true) {
3635 option = strsep(&buf, ",");
3636
3637 if (!option)
3638 break;
3639
3640 if (*option)
3641 trace_set_options(&global_trace, option);
3642
3643 /* Put back the comma to allow this to be called again */
3644 if (buf)
3645 *(buf - 1) = ',';
3646 }
3647}
3648
3649static ssize_t
3650tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3651 size_t cnt, loff_t *ppos)
3652{
3653 struct seq_file *m = filp->private_data;
3654 struct trace_array *tr = m->private;
3655 char buf[64];
3656 int ret;
3657
3658 if (cnt >= sizeof(buf))
3659 return -EINVAL;
3660
3661 if (copy_from_user(&buf, ubuf, cnt))
3662 return -EFAULT;
3663
3664 buf[cnt] = 0;
3665
3666 ret = trace_set_options(tr, buf);
3667 if (ret < 0)
3668 return ret;
3669
3670 *ppos += cnt;
3671
3672 return cnt;
3673}
3674
3675static int tracing_trace_options_open(struct inode *inode, struct file *file)
3676{
3677 struct trace_array *tr = inode->i_private;
3678 int ret;
3679
3680 if (tracing_disabled)
3681 return -ENODEV;
3682
3683 if (trace_array_get(tr) < 0)
3684 return -ENODEV;
3685
3686 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3687 if (ret < 0)
3688 trace_array_put(tr);
3689
3690 return ret;
3691}
3692
3693static const struct file_operations tracing_iter_fops = {
3694 .open = tracing_trace_options_open,
3695 .read = seq_read,
3696 .llseek = seq_lseek,
3697 .release = tracing_single_release_tr,
3698 .write = tracing_trace_options_write,
3699};
3700
3701static const char readme_msg[] =
3702 "tracing mini-HOWTO:\n\n"
3703 "# echo 0 > tracing_on : quick way to disable tracing\n"
3704 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3705 " Important files:\n"
3706 " trace\t\t\t- The static contents of the buffer\n"
3707 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3708 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3709 " current_tracer\t- function and latency tracers\n"
3710 " available_tracers\t- list of configured tracers for current_tracer\n"
3711 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3712 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3713 " trace_clock\t\t-change the clock used to order events\n"
3714 " local: Per cpu clock but may not be synced across CPUs\n"
3715 " global: Synced across CPUs but slows tracing down.\n"
3716 " counter: Not a clock, but just an increment\n"
3717 " uptime: Jiffy counter from time of boot\n"
3718 " perf: Same clock that perf events use\n"
3719#ifdef CONFIG_X86_64
3720 " x86-tsc: TSC cycle counter\n"
3721#endif
3722 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3723 " tracing_cpumask\t- Limit which CPUs to trace\n"
3724 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3725 "\t\t\t Remove sub-buffer with rmdir\n"
3726 " trace_options\t\t- Set format or modify how tracing happens\n"
3727 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3728 "\t\t\t option name\n"
3729 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
3730#ifdef CONFIG_DYNAMIC_FTRACE
3731 "\n available_filter_functions - list of functions that can be filtered on\n"
3732 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3733 "\t\t\t functions\n"
3734 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3735 "\t modules: Can select a group via module\n"
3736 "\t Format: :mod:<module-name>\n"
3737 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3738 "\t triggers: a command to perform when function is hit\n"
3739 "\t Format: <function>:<trigger>[:count]\n"
3740 "\t trigger: traceon, traceoff\n"
3741 "\t\t enable_event:<system>:<event>\n"
3742 "\t\t disable_event:<system>:<event>\n"
3743#ifdef CONFIG_STACKTRACE
3744 "\t\t stacktrace\n"
3745#endif
3746#ifdef CONFIG_TRACER_SNAPSHOT
3747 "\t\t snapshot\n"
3748#endif
3749 "\t\t dump\n"
3750 "\t\t cpudump\n"
3751 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3752 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3753 "\t The first one will disable tracing every time do_fault is hit\n"
3754 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3755 "\t The first time do trap is hit and it disables tracing, the\n"
3756 "\t counter will decrement to 2. If tracing is already disabled,\n"
3757 "\t the counter will not decrement. It only decrements when the\n"
3758 "\t trigger did work\n"
3759 "\t To remove trigger without count:\n"
3760 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3761 "\t To remove trigger with a count:\n"
3762 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
3763 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
3764 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3765 "\t modules: Can select a group via module command :mod:\n"
3766 "\t Does not accept triggers\n"
3767#endif /* CONFIG_DYNAMIC_FTRACE */
3768#ifdef CONFIG_FUNCTION_TRACER
3769 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3770 "\t\t (function)\n"
3771#endif
3772#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3773 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
3774 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
3775 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3776#endif
3777#ifdef CONFIG_TRACER_SNAPSHOT
3778 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3779 "\t\t\t snapshot buffer. Read the contents for more\n"
3780 "\t\t\t information\n"
3781#endif
3782#ifdef CONFIG_STACK_TRACER
3783 " stack_trace\t\t- Shows the max stack trace when active\n"
3784 " stack_max_size\t- Shows current max stack size that was traced\n"
3785 "\t\t\t Write into this file to reset the max size (trigger a\n"
3786 "\t\t\t new trace)\n"
3787#ifdef CONFIG_DYNAMIC_FTRACE
3788 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3789 "\t\t\t traces\n"
3790#endif
3791#endif /* CONFIG_STACK_TRACER */
3792 " events/\t\t- Directory containing all trace event subsystems:\n"
3793 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3794 " events/<system>/\t- Directory containing all trace events for <system>:\n"
3795 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3796 "\t\t\t events\n"
3797 " filter\t\t- If set, only events passing filter are traced\n"
3798 " events/<system>/<event>/\t- Directory containing control files for\n"
3799 "\t\t\t <event>:\n"
3800 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3801 " filter\t\t- If set, only events passing filter are traced\n"
3802 " trigger\t\t- If set, a command to perform when event is hit\n"
3803 "\t Format: <trigger>[:count][if <filter>]\n"
3804 "\t trigger: traceon, traceoff\n"
3805 "\t enable_event:<system>:<event>\n"
3806 "\t disable_event:<system>:<event>\n"
3807#ifdef CONFIG_STACKTRACE
3808 "\t\t stacktrace\n"
3809#endif
3810#ifdef CONFIG_TRACER_SNAPSHOT
3811 "\t\t snapshot\n"
3812#endif
3813 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3814 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3815 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3816 "\t events/block/block_unplug/trigger\n"
3817 "\t The first disables tracing every time block_unplug is hit.\n"
3818 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3819 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3820 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3821 "\t Like function triggers, the counter is only decremented if it\n"
3822 "\t enabled or disabled tracing.\n"
3823 "\t To remove a trigger without a count:\n"
3824 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3825 "\t To remove a trigger with a count:\n"
3826 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3827 "\t Filters can be ignored when removing a trigger.\n"
3828;
3829
3830static ssize_t
3831tracing_readme_read(struct file *filp, char __user *ubuf,
3832 size_t cnt, loff_t *ppos)
3833{
3834 return simple_read_from_buffer(ubuf, cnt, ppos,
3835 readme_msg, strlen(readme_msg));
3836}
3837
3838static const struct file_operations tracing_readme_fops = {
3839 .open = tracing_open_generic,
3840 .read = tracing_readme_read,
3841 .llseek = generic_file_llseek,
3842};
3843
3844static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3845{
3846 unsigned int *ptr = v;
3847
3848 if (*pos || m->count)
3849 ptr++;
3850
3851 (*pos)++;
3852
3853 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3854 ptr++) {
3855 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3856 continue;
3857
3858 return ptr;
3859 }
3860
3861 return NULL;
3862}
3863
3864static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3865{
3866 void *v;
3867 loff_t l = 0;
3868
3869 preempt_disable();
3870 arch_spin_lock(&trace_cmdline_lock);
3871
3872 v = &savedcmd->map_cmdline_to_pid[0];
3873 while (l <= *pos) {
3874 v = saved_cmdlines_next(m, v, &l);
3875 if (!v)
3876 return NULL;
3877 }
3878
3879 return v;
3880}
3881
3882static void saved_cmdlines_stop(struct seq_file *m, void *v)
3883{
3884 arch_spin_unlock(&trace_cmdline_lock);
3885 preempt_enable();
3886}
3887
3888static int saved_cmdlines_show(struct seq_file *m, void *v)
3889{
3890 char buf[TASK_COMM_LEN];
3891 unsigned int *pid = v;
3892
3893 __trace_find_cmdline(*pid, buf);
3894 seq_printf(m, "%d %s\n", *pid, buf);
3895 return 0;
3896}
3897
3898static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3899 .start = saved_cmdlines_start,
3900 .next = saved_cmdlines_next,
3901 .stop = saved_cmdlines_stop,
3902 .show = saved_cmdlines_show,
3903};
3904
3905static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3906{
3907 if (tracing_disabled)
3908 return -ENODEV;
3909
3910 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
3911}
3912
3913static const struct file_operations tracing_saved_cmdlines_fops = {
3914 .open = tracing_saved_cmdlines_open,
3915 .read = seq_read,
3916 .llseek = seq_lseek,
3917 .release = seq_release,
3918};
3919
3920static ssize_t
3921tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3922 size_t cnt, loff_t *ppos)
3923{
3924 char buf[64];
3925 int r;
3926
3927 arch_spin_lock(&trace_cmdline_lock);
3928 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
3929 arch_spin_unlock(&trace_cmdline_lock);
3930
3931 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3932}
3933
3934static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3935{
3936 kfree(s->saved_cmdlines);
3937 kfree(s->map_cmdline_to_pid);
3938 kfree(s);
3939}
3940
3941static int tracing_resize_saved_cmdlines(unsigned int val)
3942{
3943 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3944
3945 s = kmalloc(sizeof(*s), GFP_KERNEL);
3946 if (!s)
3947 return -ENOMEM;
3948
3949 if (allocate_cmdlines_buffer(val, s) < 0) {
3950 kfree(s);
3951 return -ENOMEM;
3952 }
3953
3954 arch_spin_lock(&trace_cmdline_lock);
3955 savedcmd_temp = savedcmd;
3956 savedcmd = s;
3957 arch_spin_unlock(&trace_cmdline_lock);
3958 free_saved_cmdlines_buffer(savedcmd_temp);
3959
3960 return 0;
3961}
3962
3963static ssize_t
3964tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3965 size_t cnt, loff_t *ppos)
3966{
3967 unsigned long val;
3968 int ret;
3969
3970 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3971 if (ret)
3972 return ret;
3973
3974 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3975 if (!val || val > PID_MAX_DEFAULT)
3976 return -EINVAL;
3977
3978 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3979 if (ret < 0)
3980 return ret;
3981
3982 *ppos += cnt;
3983
3984 return cnt;
3985}
3986
3987static const struct file_operations tracing_saved_cmdlines_size_fops = {
3988 .open = tracing_open_generic,
3989 .read = tracing_saved_cmdlines_size_read,
3990 .write = tracing_saved_cmdlines_size_write,
3991};
3992
3993#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3994static union trace_enum_map_item *
3995update_enum_map(union trace_enum_map_item *ptr)
3996{
3997 if (!ptr->map.enum_string) {
3998 if (ptr->tail.next) {
3999 ptr = ptr->tail.next;
4000 /* Set ptr to the next real item (skip head) */
4001 ptr++;
4002 } else
4003 return NULL;
4004 }
4005 return ptr;
4006}
4007
4008static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4009{
4010 union trace_enum_map_item *ptr = v;
4011
4012 /*
4013 * Paranoid! If ptr points to end, we don't want to increment past it.
4014 * This really should never happen.
4015 */
4016 ptr = update_enum_map(ptr);
4017 if (WARN_ON_ONCE(!ptr))
4018 return NULL;
4019
4020 ptr++;
4021
4022 (*pos)++;
4023
4024 ptr = update_enum_map(ptr);
4025
4026 return ptr;
4027}
4028
4029static void *enum_map_start(struct seq_file *m, loff_t *pos)
4030{
4031 union trace_enum_map_item *v;
4032 loff_t l = 0;
4033
4034 mutex_lock(&trace_enum_mutex);
4035
4036 v = trace_enum_maps;
4037 if (v)
4038 v++;
4039
4040 while (v && l < *pos) {
4041 v = enum_map_next(m, v, &l);
4042 }
4043
4044 return v;
4045}
4046
4047static void enum_map_stop(struct seq_file *m, void *v)
4048{
4049 mutex_unlock(&trace_enum_mutex);
4050}
4051
4052static int enum_map_show(struct seq_file *m, void *v)
4053{
4054 union trace_enum_map_item *ptr = v;
4055
4056 seq_printf(m, "%s %ld (%s)\n",
4057 ptr->map.enum_string, ptr->map.enum_value,
4058 ptr->map.system);
4059
4060 return 0;
4061}
4062
4063static const struct seq_operations tracing_enum_map_seq_ops = {
4064 .start = enum_map_start,
4065 .next = enum_map_next,
4066 .stop = enum_map_stop,
4067 .show = enum_map_show,
4068};
4069
4070static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4071{
4072 if (tracing_disabled)
4073 return -ENODEV;
4074
4075 return seq_open(filp, &tracing_enum_map_seq_ops);
4076}
4077
4078static const struct file_operations tracing_enum_map_fops = {
4079 .open = tracing_enum_map_open,
4080 .read = seq_read,
4081 .llseek = seq_lseek,
4082 .release = seq_release,
4083};
4084
4085static inline union trace_enum_map_item *
4086trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4087{
4088 /* Return tail of array given the head */
4089 return ptr + ptr->head.length + 1;
4090}
4091
4092static void
4093trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4094 int len)
4095{
4096 struct trace_enum_map **stop;
4097 struct trace_enum_map **map;
4098 union trace_enum_map_item *map_array;
4099 union trace_enum_map_item *ptr;
4100
4101 stop = start + len;
4102
4103 /*
4104 * The trace_enum_maps contains the map plus a head and tail item,
4105 * where the head holds the module and length of array, and the
4106 * tail holds a pointer to the next list.
4107 */
4108 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4109 if (!map_array) {
4110 pr_warn("Unable to allocate trace enum mapping\n");
4111 return;
4112 }
4113
4114 mutex_lock(&trace_enum_mutex);
4115
4116 if (!trace_enum_maps)
4117 trace_enum_maps = map_array;
4118 else {
4119 ptr = trace_enum_maps;
4120 for (;;) {
4121 ptr = trace_enum_jmp_to_tail(ptr);
4122 if (!ptr->tail.next)
4123 break;
4124 ptr = ptr->tail.next;
4125
4126 }
4127 ptr->tail.next = map_array;
4128 }
4129 map_array->head.mod = mod;
4130 map_array->head.length = len;
4131 map_array++;
4132
4133 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4134 map_array->map = **map;
4135 map_array++;
4136 }
4137 memset(map_array, 0, sizeof(*map_array));
4138
4139 mutex_unlock(&trace_enum_mutex);
4140}
4141
4142static void trace_create_enum_file(struct dentry *d_tracer)
4143{
4144 trace_create_file("enum_map", 0444, d_tracer,
4145 NULL, &tracing_enum_map_fops);
4146}
4147
4148#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4149static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4150static inline void trace_insert_enum_map_file(struct module *mod,
4151 struct trace_enum_map **start, int len) { }
4152#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4153
4154static void trace_insert_enum_map(struct module *mod,
4155 struct trace_enum_map **start, int len)
4156{
4157 struct trace_enum_map **map;
4158
4159 if (len <= 0)
4160 return;
4161
4162 map = start;
4163
4164 trace_event_enum_update(map, len);
4165
4166 trace_insert_enum_map_file(mod, start, len);
4167}
4168
4169static ssize_t
4170tracing_set_trace_read(struct file *filp, char __user *ubuf,
4171 size_t cnt, loff_t *ppos)
4172{
4173 struct trace_array *tr = filp->private_data;
4174 char buf[MAX_TRACER_SIZE+2];
4175 int r;
4176
4177 mutex_lock(&trace_types_lock);
4178 r = sprintf(buf, "%s\n", tr->current_trace->name);
4179 mutex_unlock(&trace_types_lock);
4180
4181 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4182}
4183
4184int tracer_init(struct tracer *t, struct trace_array *tr)
4185{
4186 tracing_reset_online_cpus(&tr->trace_buffer);
4187 return t->init(tr);
4188}
4189
4190static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
4191{
4192 int cpu;
4193
4194 for_each_tracing_cpu(cpu)
4195 per_cpu_ptr(buf->data, cpu)->entries = val;
4196}
4197
4198#ifdef CONFIG_TRACER_MAX_TRACE
4199/* resize @tr's buffer to the size of @size_tr's entries */
4200static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4201 struct trace_buffer *size_buf, int cpu_id)
4202{
4203 int cpu, ret = 0;
4204
4205 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4206 for_each_tracing_cpu(cpu) {
4207 ret = ring_buffer_resize(trace_buf->buffer,
4208 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
4209 if (ret < 0)
4210 break;
4211 per_cpu_ptr(trace_buf->data, cpu)->entries =
4212 per_cpu_ptr(size_buf->data, cpu)->entries;
4213 }
4214 } else {
4215 ret = ring_buffer_resize(trace_buf->buffer,
4216 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
4217 if (ret == 0)
4218 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4219 per_cpu_ptr(size_buf->data, cpu_id)->entries;
4220 }
4221
4222 return ret;
4223}
4224#endif /* CONFIG_TRACER_MAX_TRACE */
4225
4226static int __tracing_resize_ring_buffer(struct trace_array *tr,
4227 unsigned long size, int cpu)
4228{
4229 int ret;
4230
4231 /*
4232 * If kernel or user changes the size of the ring buffer
4233 * we use the size that was given, and we can forget about
4234 * expanding it later.
4235 */
4236 ring_buffer_expanded = true;
4237
4238 /* May be called before buffers are initialized */
4239 if (!tr->trace_buffer.buffer)
4240 return 0;
4241
4242 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
4243 if (ret < 0)
4244 return ret;
4245
4246#ifdef CONFIG_TRACER_MAX_TRACE
4247 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4248 !tr->current_trace->use_max_tr)
4249 goto out;
4250
4251 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
4252 if (ret < 0) {
4253 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4254 &tr->trace_buffer, cpu);
4255 if (r < 0) {
4256 /*
4257 * AARGH! We are left with different
4258 * size max buffer!!!!
4259 * The max buffer is our "snapshot" buffer.
4260 * When a tracer needs a snapshot (one of the
4261 * latency tracers), it swaps the max buffer
4262 * with the saved snap shot. We succeeded to
4263 * update the size of the main buffer, but failed to
4264 * update the size of the max buffer. But when we tried
4265 * to reset the main buffer to the original size, we
4266 * failed there too. This is very unlikely to
4267 * happen, but if it does, warn and kill all
4268 * tracing.
4269 */
4270 WARN_ON(1);
4271 tracing_disabled = 1;
4272 }
4273 return ret;
4274 }
4275
4276 if (cpu == RING_BUFFER_ALL_CPUS)
4277 set_buffer_entries(&tr->max_buffer, size);
4278 else
4279 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
4280
4281 out:
4282#endif /* CONFIG_TRACER_MAX_TRACE */
4283
4284 if (cpu == RING_BUFFER_ALL_CPUS)
4285 set_buffer_entries(&tr->trace_buffer, size);
4286 else
4287 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
4288
4289 return ret;
4290}
4291
4292static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4293 unsigned long size, int cpu_id)
4294{
4295 int ret = size;
4296
4297 mutex_lock(&trace_types_lock);
4298
4299 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4300 /* make sure, this cpu is enabled in the mask */
4301 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4302 ret = -EINVAL;
4303 goto out;
4304 }
4305 }
4306
4307 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4308 if (ret < 0)
4309 ret = -ENOMEM;
4310
4311out:
4312 mutex_unlock(&trace_types_lock);
4313
4314 return ret;
4315}
4316
4317
4318/**
4319 * tracing_update_buffers - used by tracing facility to expand ring buffers
4320 *
4321 * To save on memory when the tracing is never used on a system with it
4322 * configured in. The ring buffers are set to a minimum size. But once
4323 * a user starts to use the tracing facility, then they need to grow
4324 * to their default size.
4325 *
4326 * This function is to be called when a tracer is about to be used.
4327 */
4328int tracing_update_buffers(void)
4329{
4330 int ret = 0;
4331
4332 mutex_lock(&trace_types_lock);
4333 if (!ring_buffer_expanded)
4334 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
4335 RING_BUFFER_ALL_CPUS);
4336 mutex_unlock(&trace_types_lock);
4337
4338 return ret;
4339}
4340
4341struct trace_option_dentry;
4342
4343static void
4344create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
4345
4346/*
4347 * Used to clear out the tracer before deletion of an instance.
4348 * Must have trace_types_lock held.
4349 */
4350static void tracing_set_nop(struct trace_array *tr)
4351{
4352 if (tr->current_trace == &nop_trace)
4353 return;
4354
4355 tr->current_trace->enabled--;
4356
4357 if (tr->current_trace->reset)
4358 tr->current_trace->reset(tr);
4359
4360 tr->current_trace = &nop_trace;
4361}
4362
4363static void add_tracer_options(struct trace_array *tr, struct tracer *t)
4364{
4365 /* Only enable if the directory has been created already. */
4366 if (!tr->dir)
4367 return;
4368
4369 create_trace_option_files(tr, t);
4370}
4371
4372static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4373{
4374 struct tracer *t;
4375#ifdef CONFIG_TRACER_MAX_TRACE
4376 bool had_max_tr;
4377#endif
4378 int ret = 0;
4379
4380 mutex_lock(&trace_types_lock);
4381
4382 if (!ring_buffer_expanded) {
4383 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
4384 RING_BUFFER_ALL_CPUS);
4385 if (ret < 0)
4386 goto out;
4387 ret = 0;
4388 }
4389
4390 for (t = trace_types; t; t = t->next) {
4391 if (strcmp(t->name, buf) == 0)
4392 break;
4393 }
4394 if (!t) {
4395 ret = -EINVAL;
4396 goto out;
4397 }
4398 if (t == tr->current_trace)
4399 goto out;
4400
4401 /* Some tracers are only allowed for the top level buffer */
4402 if (!trace_ok_for_array(t, tr)) {
4403 ret = -EINVAL;
4404 goto out;
4405 }
4406
4407 /* If trace pipe files are being read, we can't change the tracer */
4408 if (tr->current_trace->ref) {
4409 ret = -EBUSY;
4410 goto out;
4411 }
4412
4413 trace_branch_disable();
4414
4415 tr->current_trace->enabled--;
4416
4417 if (tr->current_trace->reset)
4418 tr->current_trace->reset(tr);
4419
4420 /* Current trace needs to be nop_trace before synchronize_sched */
4421 tr->current_trace = &nop_trace;
4422
4423#ifdef CONFIG_TRACER_MAX_TRACE
4424 had_max_tr = tr->allocated_snapshot;
4425
4426 if (had_max_tr && !t->use_max_tr) {
4427 /*
4428 * We need to make sure that the update_max_tr sees that
4429 * current_trace changed to nop_trace to keep it from
4430 * swapping the buffers after we resize it.
4431 * The update_max_tr is called from interrupts disabled
4432 * so a synchronized_sched() is sufficient.
4433 */
4434 synchronize_sched();
4435 free_snapshot(tr);
4436 }
4437#endif
4438
4439#ifdef CONFIG_TRACER_MAX_TRACE
4440 if (t->use_max_tr && !had_max_tr) {
4441 ret = alloc_snapshot(tr);
4442 if (ret < 0)
4443 goto out;
4444 }
4445#endif
4446
4447 if (t->init) {
4448 ret = tracer_init(t, tr);
4449 if (ret)
4450 goto out;
4451 }
4452
4453 tr->current_trace = t;
4454 tr->current_trace->enabled++;
4455 trace_branch_enable(tr);
4456 out:
4457 mutex_unlock(&trace_types_lock);
4458
4459 return ret;
4460}
4461
4462static ssize_t
4463tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4464 size_t cnt, loff_t *ppos)
4465{
4466 struct trace_array *tr = filp->private_data;
4467 char buf[MAX_TRACER_SIZE+1];
4468 int i;
4469 size_t ret;
4470 int err;
4471
4472 ret = cnt;
4473
4474 if (cnt > MAX_TRACER_SIZE)
4475 cnt = MAX_TRACER_SIZE;
4476
4477 if (copy_from_user(&buf, ubuf, cnt))
4478 return -EFAULT;
4479
4480 buf[cnt] = 0;
4481
4482 /* strip ending whitespace. */
4483 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4484 buf[i] = 0;
4485
4486 err = tracing_set_tracer(tr, buf);
4487 if (err)
4488 return err;
4489
4490 *ppos += ret;
4491
4492 return ret;
4493}
4494
4495static ssize_t
4496tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4497 size_t cnt, loff_t *ppos)
4498{
4499 char buf[64];
4500 int r;
4501
4502 r = snprintf(buf, sizeof(buf), "%ld\n",
4503 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
4504 if (r > sizeof(buf))
4505 r = sizeof(buf);
4506 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4507}
4508
4509static ssize_t
4510tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4511 size_t cnt, loff_t *ppos)
4512{
4513 unsigned long val;
4514 int ret;
4515
4516 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4517 if (ret)
4518 return ret;
4519
4520 *ptr = val * 1000;
4521
4522 return cnt;
4523}
4524
4525static ssize_t
4526tracing_thresh_read(struct file *filp, char __user *ubuf,
4527 size_t cnt, loff_t *ppos)
4528{
4529 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4530}
4531
4532static ssize_t
4533tracing_thresh_write(struct file *filp, const char __user *ubuf,
4534 size_t cnt, loff_t *ppos)
4535{
4536 struct trace_array *tr = filp->private_data;
4537 int ret;
4538
4539 mutex_lock(&trace_types_lock);
4540 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4541 if (ret < 0)
4542 goto out;
4543
4544 if (tr->current_trace->update_thresh) {
4545 ret = tr->current_trace->update_thresh(tr);
4546 if (ret < 0)
4547 goto out;
4548 }
4549
4550 ret = cnt;
4551out:
4552 mutex_unlock(&trace_types_lock);
4553
4554 return ret;
4555}
4556
4557#ifdef CONFIG_TRACER_MAX_TRACE
4558
4559static ssize_t
4560tracing_max_lat_read(struct file *filp, char __user *ubuf,
4561 size_t cnt, loff_t *ppos)
4562{
4563 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4564}
4565
4566static ssize_t
4567tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4568 size_t cnt, loff_t *ppos)
4569{
4570 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4571}
4572
4573#endif
4574
4575static int tracing_open_pipe(struct inode *inode, struct file *filp)
4576{
4577 struct trace_array *tr = inode->i_private;
4578 struct trace_iterator *iter;
4579 int ret = 0;
4580
4581 if (tracing_disabled)
4582 return -ENODEV;
4583
4584 if (trace_array_get(tr) < 0)
4585 return -ENODEV;
4586
4587 mutex_lock(&trace_types_lock);
4588
4589 /* create a buffer to store the information to pass to userspace */
4590 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4591 if (!iter) {
4592 ret = -ENOMEM;
4593 __trace_array_put(tr);
4594 goto out;
4595 }
4596
4597 trace_seq_init(&iter->seq);
4598 iter->trace = tr->current_trace;
4599
4600 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4601 ret = -ENOMEM;
4602 goto fail;
4603 }
4604
4605 /* trace pipe does not show start of buffer */
4606 cpumask_setall(iter->started);
4607
4608 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4609 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4610
4611 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4612 if (trace_clocks[tr->clock_id].in_ns)
4613 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4614
4615 iter->tr = tr;
4616 iter->trace_buffer = &tr->trace_buffer;
4617 iter->cpu_file = tracing_get_cpu(inode);
4618 mutex_init(&iter->mutex);
4619 filp->private_data = iter;
4620
4621 if (iter->trace->pipe_open)
4622 iter->trace->pipe_open(iter);
4623
4624 nonseekable_open(inode, filp);
4625
4626 tr->current_trace->ref++;
4627out:
4628 mutex_unlock(&trace_types_lock);
4629 return ret;
4630
4631fail:
4632 kfree(iter->trace);
4633 kfree(iter);
4634 __trace_array_put(tr);
4635 mutex_unlock(&trace_types_lock);
4636 return ret;
4637}
4638
4639static int tracing_release_pipe(struct inode *inode, struct file *file)
4640{
4641 struct trace_iterator *iter = file->private_data;
4642 struct trace_array *tr = inode->i_private;
4643
4644 mutex_lock(&trace_types_lock);
4645
4646 tr->current_trace->ref--;
4647
4648 if (iter->trace->pipe_close)
4649 iter->trace->pipe_close(iter);
4650
4651 mutex_unlock(&trace_types_lock);
4652
4653 free_cpumask_var(iter->started);
4654 mutex_destroy(&iter->mutex);
4655 kfree(iter);
4656
4657 trace_array_put(tr);
4658
4659 return 0;
4660}
4661
4662static unsigned int
4663trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
4664{
4665 struct trace_array *tr = iter->tr;
4666
4667 /* Iterators are static, they should be filled or empty */
4668 if (trace_buffer_iter(iter, iter->cpu_file))
4669 return POLLIN | POLLRDNORM;
4670
4671 if (tr->trace_flags & TRACE_ITER_BLOCK)
4672 /*
4673 * Always select as readable when in blocking mode
4674 */
4675 return POLLIN | POLLRDNORM;
4676 else
4677 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
4678 filp, poll_table);
4679}
4680
4681static unsigned int
4682tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4683{
4684 struct trace_iterator *iter = filp->private_data;
4685
4686 return trace_poll(iter, filp, poll_table);
4687}
4688
4689/* Must be called with iter->mutex held. */
4690static int tracing_wait_pipe(struct file *filp)
4691{
4692 struct trace_iterator *iter = filp->private_data;
4693 int ret;
4694
4695 while (trace_empty(iter)) {
4696
4697 if ((filp->f_flags & O_NONBLOCK)) {
4698 return -EAGAIN;
4699 }
4700
4701 /*
4702 * We block until we read something and tracing is disabled.
4703 * We still block if tracing is disabled, but we have never
4704 * read anything. This allows a user to cat this file, and
4705 * then enable tracing. But after we have read something,
4706 * we give an EOF when tracing is again disabled.
4707 *
4708 * iter->pos will be 0 if we haven't read anything.
4709 */
4710 if (!tracing_is_on() && iter->pos)
4711 break;
4712
4713 mutex_unlock(&iter->mutex);
4714
4715 ret = wait_on_pipe(iter, false);
4716
4717 mutex_lock(&iter->mutex);
4718
4719 if (ret)
4720 return ret;
4721 }
4722
4723 return 1;
4724}
4725
4726/*
4727 * Consumer reader.
4728 */
4729static ssize_t
4730tracing_read_pipe(struct file *filp, char __user *ubuf,
4731 size_t cnt, loff_t *ppos)
4732{
4733 struct trace_iterator *iter = filp->private_data;
4734 ssize_t sret;
4735
4736 /* return any leftover data */
4737 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4738 if (sret != -EBUSY)
4739 return sret;
4740
4741 trace_seq_init(&iter->seq);
4742
4743 /*
4744 * Avoid more than one consumer on a single file descriptor
4745 * This is just a matter of traces coherency, the ring buffer itself
4746 * is protected.
4747 */
4748 mutex_lock(&iter->mutex);
4749 if (iter->trace->read) {
4750 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4751 if (sret)
4752 goto out;
4753 }
4754
4755waitagain:
4756 sret = tracing_wait_pipe(filp);
4757 if (sret <= 0)
4758 goto out;
4759
4760 /* stop when tracing is finished */
4761 if (trace_empty(iter)) {
4762 sret = 0;
4763 goto out;
4764 }
4765
4766 if (cnt >= PAGE_SIZE)
4767 cnt = PAGE_SIZE - 1;
4768
4769 /* reset all but tr, trace, and overruns */
4770 memset(&iter->seq, 0,
4771 sizeof(struct trace_iterator) -
4772 offsetof(struct trace_iterator, seq));
4773 cpumask_clear(iter->started);
4774 iter->pos = -1;
4775
4776 trace_event_read_lock();
4777 trace_access_lock(iter->cpu_file);
4778 while (trace_find_next_entry_inc(iter) != NULL) {
4779 enum print_line_t ret;
4780 int save_len = iter->seq.seq.len;
4781
4782 ret = print_trace_line(iter);
4783 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4784 /* don't print partial lines */
4785 iter->seq.seq.len = save_len;
4786 break;
4787 }
4788 if (ret != TRACE_TYPE_NO_CONSUME)
4789 trace_consume(iter);
4790
4791 if (trace_seq_used(&iter->seq) >= cnt)
4792 break;
4793
4794 /*
4795 * Setting the full flag means we reached the trace_seq buffer
4796 * size and we should leave by partial output condition above.
4797 * One of the trace_seq_* functions is not used properly.
4798 */
4799 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4800 iter->ent->type);
4801 }
4802 trace_access_unlock(iter->cpu_file);
4803 trace_event_read_unlock();
4804
4805 /* Now copy what we have to the user */
4806 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4807 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
4808 trace_seq_init(&iter->seq);
4809
4810 /*
4811 * If there was nothing to send to user, in spite of consuming trace
4812 * entries, go back to wait for more entries.
4813 */
4814 if (sret == -EBUSY)
4815 goto waitagain;
4816
4817out:
4818 mutex_unlock(&iter->mutex);
4819
4820 return sret;
4821}
4822
4823static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4824 unsigned int idx)
4825{
4826 __free_page(spd->pages[idx]);
4827}
4828
4829static const struct pipe_buf_operations tracing_pipe_buf_ops = {
4830 .can_merge = 0,
4831 .confirm = generic_pipe_buf_confirm,
4832 .release = generic_pipe_buf_release,
4833 .steal = generic_pipe_buf_steal,
4834 .get = generic_pipe_buf_get,
4835};
4836
4837static size_t
4838tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
4839{
4840 size_t count;
4841 int save_len;
4842 int ret;
4843
4844 /* Seq buffer is page-sized, exactly what we need. */
4845 for (;;) {
4846 save_len = iter->seq.seq.len;
4847 ret = print_trace_line(iter);
4848
4849 if (trace_seq_has_overflowed(&iter->seq)) {
4850 iter->seq.seq.len = save_len;
4851 break;
4852 }
4853
4854 /*
4855 * This should not be hit, because it should only
4856 * be set if the iter->seq overflowed. But check it
4857 * anyway to be safe.
4858 */
4859 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4860 iter->seq.seq.len = save_len;
4861 break;
4862 }
4863
4864 count = trace_seq_used(&iter->seq) - save_len;
4865 if (rem < count) {
4866 rem = 0;
4867 iter->seq.seq.len = save_len;
4868 break;
4869 }
4870
4871 if (ret != TRACE_TYPE_NO_CONSUME)
4872 trace_consume(iter);
4873 rem -= count;
4874 if (!trace_find_next_entry_inc(iter)) {
4875 rem = 0;
4876 iter->ent = NULL;
4877 break;
4878 }
4879 }
4880
4881 return rem;
4882}
4883
4884static ssize_t tracing_splice_read_pipe(struct file *filp,
4885 loff_t *ppos,
4886 struct pipe_inode_info *pipe,
4887 size_t len,
4888 unsigned int flags)
4889{
4890 struct page *pages_def[PIPE_DEF_BUFFERS];
4891 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4892 struct trace_iterator *iter = filp->private_data;
4893 struct splice_pipe_desc spd = {
4894 .pages = pages_def,
4895 .partial = partial_def,
4896 .nr_pages = 0, /* This gets updated below. */
4897 .nr_pages_max = PIPE_DEF_BUFFERS,
4898 .flags = flags,
4899 .ops = &tracing_pipe_buf_ops,
4900 .spd_release = tracing_spd_release_pipe,
4901 };
4902 ssize_t ret;
4903 size_t rem;
4904 unsigned int i;
4905
4906 if (splice_grow_spd(pipe, &spd))
4907 return -ENOMEM;
4908
4909 mutex_lock(&iter->mutex);
4910
4911 if (iter->trace->splice_read) {
4912 ret = iter->trace->splice_read(iter, filp,
4913 ppos, pipe, len, flags);
4914 if (ret)
4915 goto out_err;
4916 }
4917
4918 ret = tracing_wait_pipe(filp);
4919 if (ret <= 0)
4920 goto out_err;
4921
4922 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
4923 ret = -EFAULT;
4924 goto out_err;
4925 }
4926
4927 trace_event_read_lock();
4928 trace_access_lock(iter->cpu_file);
4929
4930 /* Fill as many pages as possible. */
4931 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
4932 spd.pages[i] = alloc_page(GFP_KERNEL);
4933 if (!spd.pages[i])
4934 break;
4935
4936 rem = tracing_fill_pipe_page(rem, iter);
4937
4938 /* Copy the data into the page, so we can start over. */
4939 ret = trace_seq_to_buffer(&iter->seq,
4940 page_address(spd.pages[i]),
4941 trace_seq_used(&iter->seq));
4942 if (ret < 0) {
4943 __free_page(spd.pages[i]);
4944 break;
4945 }
4946 spd.partial[i].offset = 0;
4947 spd.partial[i].len = trace_seq_used(&iter->seq);
4948
4949 trace_seq_init(&iter->seq);
4950 }
4951
4952 trace_access_unlock(iter->cpu_file);
4953 trace_event_read_unlock();
4954 mutex_unlock(&iter->mutex);
4955
4956 spd.nr_pages = i;
4957
4958 if (i)
4959 ret = splice_to_pipe(pipe, &spd);
4960 else
4961 ret = 0;
4962out:
4963 splice_shrink_spd(&spd);
4964 return ret;
4965
4966out_err:
4967 mutex_unlock(&iter->mutex);
4968 goto out;
4969}
4970
4971static ssize_t
4972tracing_entries_read(struct file *filp, char __user *ubuf,
4973 size_t cnt, loff_t *ppos)
4974{
4975 struct inode *inode = file_inode(filp);
4976 struct trace_array *tr = inode->i_private;
4977 int cpu = tracing_get_cpu(inode);
4978 char buf[64];
4979 int r = 0;
4980 ssize_t ret;
4981
4982 mutex_lock(&trace_types_lock);
4983
4984 if (cpu == RING_BUFFER_ALL_CPUS) {
4985 int cpu, buf_size_same;
4986 unsigned long size;
4987
4988 size = 0;
4989 buf_size_same = 1;
4990 /* check if all cpu sizes are same */
4991 for_each_tracing_cpu(cpu) {
4992 /* fill in the size from first enabled cpu */
4993 if (size == 0)
4994 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4995 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
4996 buf_size_same = 0;
4997 break;
4998 }
4999 }
5000
5001 if (buf_size_same) {
5002 if (!ring_buffer_expanded)
5003 r = sprintf(buf, "%lu (expanded: %lu)\n",
5004 size >> 10,
5005 trace_buf_size >> 10);
5006 else
5007 r = sprintf(buf, "%lu\n", size >> 10);
5008 } else
5009 r = sprintf(buf, "X\n");
5010 } else
5011 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5012
5013 mutex_unlock(&trace_types_lock);
5014
5015 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5016 return ret;
5017}
5018
5019static ssize_t
5020tracing_entries_write(struct file *filp, const char __user *ubuf,
5021 size_t cnt, loff_t *ppos)
5022{
5023 struct inode *inode = file_inode(filp);
5024 struct trace_array *tr = inode->i_private;
5025 unsigned long val;
5026 int ret;
5027
5028 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5029 if (ret)
5030 return ret;
5031
5032 /* must have at least 1 entry */
5033 if (!val)
5034 return -EINVAL;
5035
5036 /* value is in KB */
5037 val <<= 10;
5038 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
5039 if (ret < 0)
5040 return ret;
5041
5042 *ppos += cnt;
5043
5044 return cnt;
5045}
5046
5047static ssize_t
5048tracing_total_entries_read(struct file *filp, char __user *ubuf,
5049 size_t cnt, loff_t *ppos)
5050{
5051 struct trace_array *tr = filp->private_data;
5052 char buf[64];
5053 int r, cpu;
5054 unsigned long size = 0, expanded_size = 0;
5055
5056 mutex_lock(&trace_types_lock);
5057 for_each_tracing_cpu(cpu) {
5058 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
5059 if (!ring_buffer_expanded)
5060 expanded_size += trace_buf_size >> 10;
5061 }
5062 if (ring_buffer_expanded)
5063 r = sprintf(buf, "%lu\n", size);
5064 else
5065 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5066 mutex_unlock(&trace_types_lock);
5067
5068 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5069}
5070
5071static ssize_t
5072tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5073 size_t cnt, loff_t *ppos)
5074{
5075 /*
5076 * There is no need to read what the user has written, this function
5077 * is just to make sure that there is no error when "echo" is used
5078 */
5079
5080 *ppos += cnt;
5081
5082 return cnt;
5083}
5084
5085static int
5086tracing_free_buffer_release(struct inode *inode, struct file *filp)
5087{
5088 struct trace_array *tr = inode->i_private;
5089
5090 /* disable tracing ? */
5091 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
5092 tracer_tracing_off(tr);
5093 /* resize the ring buffer to 0 */
5094 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
5095
5096 trace_array_put(tr);
5097
5098 return 0;
5099}
5100
5101static ssize_t
5102tracing_mark_write(struct file *filp, const char __user *ubuf,
5103 size_t cnt, loff_t *fpos)
5104{
5105 unsigned long addr = (unsigned long)ubuf;
5106 struct trace_array *tr = filp->private_data;
5107 struct ring_buffer_event *event;
5108 struct ring_buffer *buffer;
5109 struct print_entry *entry;
5110 unsigned long irq_flags;
5111 struct page *pages[2];
5112 void *map_page[2];
5113 int nr_pages = 1;
5114 ssize_t written;
5115 int offset;
5116 int size;
5117 int len;
5118 int ret;
5119 int i;
5120
5121 if (tracing_disabled)
5122 return -EINVAL;
5123
5124 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5125 return -EINVAL;
5126
5127 if (cnt > TRACE_BUF_SIZE)
5128 cnt = TRACE_BUF_SIZE;
5129
5130 /*
5131 * Userspace is injecting traces into the kernel trace buffer.
5132 * We want to be as non intrusive as possible.
5133 * To do so, we do not want to allocate any special buffers
5134 * or take any locks, but instead write the userspace data
5135 * straight into the ring buffer.
5136 *
5137 * First we need to pin the userspace buffer into memory,
5138 * which, most likely it is, because it just referenced it.
5139 * But there's no guarantee that it is. By using get_user_pages_fast()
5140 * and kmap_atomic/kunmap_atomic() we can get access to the
5141 * pages directly. We then write the data directly into the
5142 * ring buffer.
5143 */
5144 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5145
5146 /* check if we cross pages */
5147 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5148 nr_pages = 2;
5149
5150 offset = addr & (PAGE_SIZE - 1);
5151 addr &= PAGE_MASK;
5152
5153 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5154 if (ret < nr_pages) {
5155 while (--ret >= 0)
5156 put_page(pages[ret]);
5157 written = -EFAULT;
5158 goto out;
5159 }
5160
5161 for (i = 0; i < nr_pages; i++)
5162 map_page[i] = kmap_atomic(pages[i]);
5163
5164 local_save_flags(irq_flags);
5165 size = sizeof(*entry) + cnt + 2; /* possible \n added */
5166 buffer = tr->trace_buffer.buffer;
5167 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5168 irq_flags, preempt_count());
5169 if (!event) {
5170 /* Ring buffer disabled, return as if not open for write */
5171 written = -EBADF;
5172 goto out_unlock;
5173 }
5174
5175 entry = ring_buffer_event_data(event);
5176 entry->ip = _THIS_IP_;
5177
5178 if (nr_pages == 2) {
5179 len = PAGE_SIZE - offset;
5180 memcpy(&entry->buf, map_page[0] + offset, len);
5181 memcpy(&entry->buf[len], map_page[1], cnt - len);
5182 } else
5183 memcpy(&entry->buf, map_page[0] + offset, cnt);
5184
5185 if (entry->buf[cnt - 1] != '\n') {
5186 entry->buf[cnt] = '\n';
5187 entry->buf[cnt + 1] = '\0';
5188 } else
5189 entry->buf[cnt] = '\0';
5190
5191 __buffer_unlock_commit(buffer, event);
5192
5193 written = cnt;
5194
5195 *fpos += written;
5196
5197 out_unlock:
5198 for (i = nr_pages - 1; i >= 0; i--) {
5199 kunmap_atomic(map_page[i]);
5200 put_page(pages[i]);
5201 }
5202 out:
5203 return written;
5204}
5205
5206static int tracing_clock_show(struct seq_file *m, void *v)
5207{
5208 struct trace_array *tr = m->private;
5209 int i;
5210
5211 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
5212 seq_printf(m,
5213 "%s%s%s%s", i ? " " : "",
5214 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5215 i == tr->clock_id ? "]" : "");
5216 seq_putc(m, '\n');
5217
5218 return 0;
5219}
5220
5221static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5222{
5223 int i;
5224
5225 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5226 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5227 break;
5228 }
5229 if (i == ARRAY_SIZE(trace_clocks))
5230 return -EINVAL;
5231
5232 mutex_lock(&trace_types_lock);
5233
5234 tr->clock_id = i;
5235
5236 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5237
5238 /*
5239 * New clock may not be consistent with the previous clock.
5240 * Reset the buffer so that it doesn't have incomparable timestamps.
5241 */
5242 tracing_reset_online_cpus(&tr->trace_buffer);
5243
5244#ifdef CONFIG_TRACER_MAX_TRACE
5245 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5246 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
5247 tracing_reset_online_cpus(&tr->max_buffer);
5248#endif
5249
5250 mutex_unlock(&trace_types_lock);
5251
5252 return 0;
5253}
5254
5255static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5256 size_t cnt, loff_t *fpos)
5257{
5258 struct seq_file *m = filp->private_data;
5259 struct trace_array *tr = m->private;
5260 char buf[64];
5261 const char *clockstr;
5262 int ret;
5263
5264 if (cnt >= sizeof(buf))
5265 return -EINVAL;
5266
5267 if (copy_from_user(&buf, ubuf, cnt))
5268 return -EFAULT;
5269
5270 buf[cnt] = 0;
5271
5272 clockstr = strstrip(buf);
5273
5274 ret = tracing_set_clock(tr, clockstr);
5275 if (ret)
5276 return ret;
5277
5278 *fpos += cnt;
5279
5280 return cnt;
5281}
5282
5283static int tracing_clock_open(struct inode *inode, struct file *file)
5284{
5285 struct trace_array *tr = inode->i_private;
5286 int ret;
5287
5288 if (tracing_disabled)
5289 return -ENODEV;
5290
5291 if (trace_array_get(tr))
5292 return -ENODEV;
5293
5294 ret = single_open(file, tracing_clock_show, inode->i_private);
5295 if (ret < 0)
5296 trace_array_put(tr);
5297
5298 return ret;
5299}
5300
5301struct ftrace_buffer_info {
5302 struct trace_iterator iter;
5303 void *spare;
5304 unsigned int read;
5305};
5306
5307#ifdef CONFIG_TRACER_SNAPSHOT
5308static int tracing_snapshot_open(struct inode *inode, struct file *file)
5309{
5310 struct trace_array *tr = inode->i_private;
5311 struct trace_iterator *iter;
5312 struct seq_file *m;
5313 int ret = 0;
5314
5315 if (trace_array_get(tr) < 0)
5316 return -ENODEV;
5317
5318 if (file->f_mode & FMODE_READ) {
5319 iter = __tracing_open(inode, file, true);
5320 if (IS_ERR(iter))
5321 ret = PTR_ERR(iter);
5322 } else {
5323 /* Writes still need the seq_file to hold the private data */
5324 ret = -ENOMEM;
5325 m = kzalloc(sizeof(*m), GFP_KERNEL);
5326 if (!m)
5327 goto out;
5328 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5329 if (!iter) {
5330 kfree(m);
5331 goto out;
5332 }
5333 ret = 0;
5334
5335 iter->tr = tr;
5336 iter->trace_buffer = &tr->max_buffer;
5337 iter->cpu_file = tracing_get_cpu(inode);
5338 m->private = iter;
5339 file->private_data = m;
5340 }
5341out:
5342 if (ret < 0)
5343 trace_array_put(tr);
5344
5345 return ret;
5346}
5347
5348static ssize_t
5349tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5350 loff_t *ppos)
5351{
5352 struct seq_file *m = filp->private_data;
5353 struct trace_iterator *iter = m->private;
5354 struct trace_array *tr = iter->tr;
5355 unsigned long val;
5356 int ret;
5357
5358 ret = tracing_update_buffers();
5359 if (ret < 0)
5360 return ret;
5361
5362 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5363 if (ret)
5364 return ret;
5365
5366 mutex_lock(&trace_types_lock);
5367
5368 if (tr->current_trace->use_max_tr) {
5369 ret = -EBUSY;
5370 goto out;
5371 }
5372
5373 switch (val) {
5374 case 0:
5375 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5376 ret = -EINVAL;
5377 break;
5378 }
5379 if (tr->allocated_snapshot)
5380 free_snapshot(tr);
5381 break;
5382 case 1:
5383/* Only allow per-cpu swap if the ring buffer supports it */
5384#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5385 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5386 ret = -EINVAL;
5387 break;
5388 }
5389#endif
5390 if (!tr->allocated_snapshot) {
5391 ret = alloc_snapshot(tr);
5392 if (ret < 0)
5393 break;
5394 }
5395 local_irq_disable();
5396 /* Now, we're going to swap */
5397 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5398 update_max_tr(tr, current, smp_processor_id());
5399 else
5400 update_max_tr_single(tr, current, iter->cpu_file);
5401 local_irq_enable();
5402 break;
5403 default:
5404 if (tr->allocated_snapshot) {
5405 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5406 tracing_reset_online_cpus(&tr->max_buffer);
5407 else
5408 tracing_reset(&tr->max_buffer, iter->cpu_file);
5409 }
5410 break;
5411 }
5412
5413 if (ret >= 0) {
5414 *ppos += cnt;
5415 ret = cnt;
5416 }
5417out:
5418 mutex_unlock(&trace_types_lock);
5419 return ret;
5420}
5421
5422static int tracing_snapshot_release(struct inode *inode, struct file *file)
5423{
5424 struct seq_file *m = file->private_data;
5425 int ret;
5426
5427 ret = tracing_release(inode, file);
5428
5429 if (file->f_mode & FMODE_READ)
5430 return ret;
5431
5432 /* If write only, the seq_file is just a stub */
5433 if (m)
5434 kfree(m->private);
5435 kfree(m);
5436
5437 return 0;
5438}
5439
5440static int tracing_buffers_open(struct inode *inode, struct file *filp);
5441static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5442 size_t count, loff_t *ppos);
5443static int tracing_buffers_release(struct inode *inode, struct file *file);
5444static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5445 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5446
5447static int snapshot_raw_open(struct inode *inode, struct file *filp)
5448{
5449 struct ftrace_buffer_info *info;
5450 int ret;
5451
5452 ret = tracing_buffers_open(inode, filp);
5453 if (ret < 0)
5454 return ret;
5455
5456 info = filp->private_data;
5457
5458 if (info->iter.trace->use_max_tr) {
5459 tracing_buffers_release(inode, filp);
5460 return -EBUSY;
5461 }
5462
5463 info->iter.snapshot = true;
5464 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5465
5466 return ret;
5467}
5468
5469#endif /* CONFIG_TRACER_SNAPSHOT */
5470
5471
5472static const struct file_operations tracing_thresh_fops = {
5473 .open = tracing_open_generic,
5474 .read = tracing_thresh_read,
5475 .write = tracing_thresh_write,
5476 .llseek = generic_file_llseek,
5477};
5478
5479#ifdef CONFIG_TRACER_MAX_TRACE
5480static const struct file_operations tracing_max_lat_fops = {
5481 .open = tracing_open_generic,
5482 .read = tracing_max_lat_read,
5483 .write = tracing_max_lat_write,
5484 .llseek = generic_file_llseek,
5485};
5486#endif
5487
5488static const struct file_operations set_tracer_fops = {
5489 .open = tracing_open_generic,
5490 .read = tracing_set_trace_read,
5491 .write = tracing_set_trace_write,
5492 .llseek = generic_file_llseek,
5493};
5494
5495static const struct file_operations tracing_pipe_fops = {
5496 .open = tracing_open_pipe,
5497 .poll = tracing_poll_pipe,
5498 .read = tracing_read_pipe,
5499 .splice_read = tracing_splice_read_pipe,
5500 .release = tracing_release_pipe,
5501 .llseek = no_llseek,
5502};
5503
5504static const struct file_operations tracing_entries_fops = {
5505 .open = tracing_open_generic_tr,
5506 .read = tracing_entries_read,
5507 .write = tracing_entries_write,
5508 .llseek = generic_file_llseek,
5509 .release = tracing_release_generic_tr,
5510};
5511
5512static const struct file_operations tracing_total_entries_fops = {
5513 .open = tracing_open_generic_tr,
5514 .read = tracing_total_entries_read,
5515 .llseek = generic_file_llseek,
5516 .release = tracing_release_generic_tr,
5517};
5518
5519static const struct file_operations tracing_free_buffer_fops = {
5520 .open = tracing_open_generic_tr,
5521 .write = tracing_free_buffer_write,
5522 .release = tracing_free_buffer_release,
5523};
5524
5525static const struct file_operations tracing_mark_fops = {
5526 .open = tracing_open_generic_tr,
5527 .write = tracing_mark_write,
5528 .llseek = generic_file_llseek,
5529 .release = tracing_release_generic_tr,
5530};
5531
5532static const struct file_operations trace_clock_fops = {
5533 .open = tracing_clock_open,
5534 .read = seq_read,
5535 .llseek = seq_lseek,
5536 .release = tracing_single_release_tr,
5537 .write = tracing_clock_write,
5538};
5539
5540#ifdef CONFIG_TRACER_SNAPSHOT
5541static const struct file_operations snapshot_fops = {
5542 .open = tracing_snapshot_open,
5543 .read = seq_read,
5544 .write = tracing_snapshot_write,
5545 .llseek = tracing_lseek,
5546 .release = tracing_snapshot_release,
5547};
5548
5549static const struct file_operations snapshot_raw_fops = {
5550 .open = snapshot_raw_open,
5551 .read = tracing_buffers_read,
5552 .release = tracing_buffers_release,
5553 .splice_read = tracing_buffers_splice_read,
5554 .llseek = no_llseek,
5555};
5556
5557#endif /* CONFIG_TRACER_SNAPSHOT */
5558
5559static int tracing_buffers_open(struct inode *inode, struct file *filp)
5560{
5561 struct trace_array *tr = inode->i_private;
5562 struct ftrace_buffer_info *info;
5563 int ret;
5564
5565 if (tracing_disabled)
5566 return -ENODEV;
5567
5568 if (trace_array_get(tr) < 0)
5569 return -ENODEV;
5570
5571 info = kzalloc(sizeof(*info), GFP_KERNEL);
5572 if (!info) {
5573 trace_array_put(tr);
5574 return -ENOMEM;
5575 }
5576
5577 mutex_lock(&trace_types_lock);
5578
5579 info->iter.tr = tr;
5580 info->iter.cpu_file = tracing_get_cpu(inode);
5581 info->iter.trace = tr->current_trace;
5582 info->iter.trace_buffer = &tr->trace_buffer;
5583 info->spare = NULL;
5584 /* Force reading ring buffer for first read */
5585 info->read = (unsigned int)-1;
5586
5587 filp->private_data = info;
5588
5589 tr->current_trace->ref++;
5590
5591 mutex_unlock(&trace_types_lock);
5592
5593 ret = nonseekable_open(inode, filp);
5594 if (ret < 0)
5595 trace_array_put(tr);
5596
5597 return ret;
5598}
5599
5600static unsigned int
5601tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5602{
5603 struct ftrace_buffer_info *info = filp->private_data;
5604 struct trace_iterator *iter = &info->iter;
5605
5606 return trace_poll(iter, filp, poll_table);
5607}
5608
5609static ssize_t
5610tracing_buffers_read(struct file *filp, char __user *ubuf,
5611 size_t count, loff_t *ppos)
5612{
5613 struct ftrace_buffer_info *info = filp->private_data;
5614 struct trace_iterator *iter = &info->iter;
5615 ssize_t ret;
5616 ssize_t size;
5617
5618 if (!count)
5619 return 0;
5620
5621#ifdef CONFIG_TRACER_MAX_TRACE
5622 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5623 return -EBUSY;
5624#endif
5625
5626 if (!info->spare)
5627 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5628 iter->cpu_file);
5629 if (!info->spare)
5630 return -ENOMEM;
5631
5632 /* Do we have previous read data to read? */
5633 if (info->read < PAGE_SIZE)
5634 goto read;
5635
5636 again:
5637 trace_access_lock(iter->cpu_file);
5638 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
5639 &info->spare,
5640 count,
5641 iter->cpu_file, 0);
5642 trace_access_unlock(iter->cpu_file);
5643
5644 if (ret < 0) {
5645 if (trace_empty(iter)) {
5646 if ((filp->f_flags & O_NONBLOCK))
5647 return -EAGAIN;
5648
5649 ret = wait_on_pipe(iter, false);
5650 if (ret)
5651 return ret;
5652
5653 goto again;
5654 }
5655 return 0;
5656 }
5657
5658 info->read = 0;
5659 read:
5660 size = PAGE_SIZE - info->read;
5661 if (size > count)
5662 size = count;
5663
5664 ret = copy_to_user(ubuf, info->spare + info->read, size);
5665 if (ret == size)
5666 return -EFAULT;
5667
5668 size -= ret;
5669
5670 *ppos += size;
5671 info->read += size;
5672
5673 return size;
5674}
5675
5676static int tracing_buffers_release(struct inode *inode, struct file *file)
5677{
5678 struct ftrace_buffer_info *info = file->private_data;
5679 struct trace_iterator *iter = &info->iter;
5680
5681 mutex_lock(&trace_types_lock);
5682
5683 iter->tr->current_trace->ref--;
5684
5685 __trace_array_put(iter->tr);
5686
5687 if (info->spare)
5688 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
5689 kfree(info);
5690
5691 mutex_unlock(&trace_types_lock);
5692
5693 return 0;
5694}
5695
5696struct buffer_ref {
5697 struct ring_buffer *buffer;
5698 void *page;
5699 int ref;
5700};
5701
5702static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5703 struct pipe_buffer *buf)
5704{
5705 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5706
5707 if (--ref->ref)
5708 return;
5709
5710 ring_buffer_free_read_page(ref->buffer, ref->page);
5711 kfree(ref);
5712 buf->private = 0;
5713}
5714
5715static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5716 struct pipe_buffer *buf)
5717{
5718 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5719
5720 ref->ref++;
5721}
5722
5723/* Pipe buffer operations for a buffer. */
5724static const struct pipe_buf_operations buffer_pipe_buf_ops = {
5725 .can_merge = 0,
5726 .confirm = generic_pipe_buf_confirm,
5727 .release = buffer_pipe_buf_release,
5728 .steal = generic_pipe_buf_steal,
5729 .get = buffer_pipe_buf_get,
5730};
5731
5732/*
5733 * Callback from splice_to_pipe(), if we need to release some pages
5734 * at the end of the spd in case we error'ed out in filling the pipe.
5735 */
5736static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5737{
5738 struct buffer_ref *ref =
5739 (struct buffer_ref *)spd->partial[i].private;
5740
5741 if (--ref->ref)
5742 return;
5743
5744 ring_buffer_free_read_page(ref->buffer, ref->page);
5745 kfree(ref);
5746 spd->partial[i].private = 0;
5747}
5748
5749static ssize_t
5750tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5751 struct pipe_inode_info *pipe, size_t len,
5752 unsigned int flags)
5753{
5754 struct ftrace_buffer_info *info = file->private_data;
5755 struct trace_iterator *iter = &info->iter;
5756 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5757 struct page *pages_def[PIPE_DEF_BUFFERS];
5758 struct splice_pipe_desc spd = {
5759 .pages = pages_def,
5760 .partial = partial_def,
5761 .nr_pages_max = PIPE_DEF_BUFFERS,
5762 .flags = flags,
5763 .ops = &buffer_pipe_buf_ops,
5764 .spd_release = buffer_spd_release,
5765 };
5766 struct buffer_ref *ref;
5767 int entries, size, i;
5768 ssize_t ret = 0;
5769
5770#ifdef CONFIG_TRACER_MAX_TRACE
5771 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5772 return -EBUSY;
5773#endif
5774
5775 if (splice_grow_spd(pipe, &spd))
5776 return -ENOMEM;
5777
5778 if (*ppos & (PAGE_SIZE - 1))
5779 return -EINVAL;
5780
5781 if (len & (PAGE_SIZE - 1)) {
5782 if (len < PAGE_SIZE)
5783 return -EINVAL;
5784 len &= PAGE_MASK;
5785 }
5786
5787 again:
5788 trace_access_lock(iter->cpu_file);
5789 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5790
5791 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
5792 struct page *page;
5793 int r;
5794
5795 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5796 if (!ref) {
5797 ret = -ENOMEM;
5798 break;
5799 }
5800
5801 ref->ref = 1;
5802 ref->buffer = iter->trace_buffer->buffer;
5803 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5804 if (!ref->page) {
5805 ret = -ENOMEM;
5806 kfree(ref);
5807 break;
5808 }
5809
5810 r = ring_buffer_read_page(ref->buffer, &ref->page,
5811 len, iter->cpu_file, 1);
5812 if (r < 0) {
5813 ring_buffer_free_read_page(ref->buffer, ref->page);
5814 kfree(ref);
5815 break;
5816 }
5817
5818 /*
5819 * zero out any left over data, this is going to
5820 * user land.
5821 */
5822 size = ring_buffer_page_len(ref->page);
5823 if (size < PAGE_SIZE)
5824 memset(ref->page + size, 0, PAGE_SIZE - size);
5825
5826 page = virt_to_page(ref->page);
5827
5828 spd.pages[i] = page;
5829 spd.partial[i].len = PAGE_SIZE;
5830 spd.partial[i].offset = 0;
5831 spd.partial[i].private = (unsigned long)ref;
5832 spd.nr_pages++;
5833 *ppos += PAGE_SIZE;
5834
5835 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
5836 }
5837
5838 trace_access_unlock(iter->cpu_file);
5839 spd.nr_pages = i;
5840
5841 /* did we read anything? */
5842 if (!spd.nr_pages) {
5843 if (ret)
5844 return ret;
5845
5846 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5847 return -EAGAIN;
5848
5849 ret = wait_on_pipe(iter, true);
5850 if (ret)
5851 return ret;
5852
5853 goto again;
5854 }
5855
5856 ret = splice_to_pipe(pipe, &spd);
5857 splice_shrink_spd(&spd);
5858
5859 return ret;
5860}
5861
5862static const struct file_operations tracing_buffers_fops = {
5863 .open = tracing_buffers_open,
5864 .read = tracing_buffers_read,
5865 .poll = tracing_buffers_poll,
5866 .release = tracing_buffers_release,
5867 .splice_read = tracing_buffers_splice_read,
5868 .llseek = no_llseek,
5869};
5870
5871static ssize_t
5872tracing_stats_read(struct file *filp, char __user *ubuf,
5873 size_t count, loff_t *ppos)
5874{
5875 struct inode *inode = file_inode(filp);
5876 struct trace_array *tr = inode->i_private;
5877 struct trace_buffer *trace_buf = &tr->trace_buffer;
5878 int cpu = tracing_get_cpu(inode);
5879 struct trace_seq *s;
5880 unsigned long cnt;
5881 unsigned long long t;
5882 unsigned long usec_rem;
5883
5884 s = kmalloc(sizeof(*s), GFP_KERNEL);
5885 if (!s)
5886 return -ENOMEM;
5887
5888 trace_seq_init(s);
5889
5890 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
5891 trace_seq_printf(s, "entries: %ld\n", cnt);
5892
5893 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
5894 trace_seq_printf(s, "overrun: %ld\n", cnt);
5895
5896 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
5897 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5898
5899 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
5900 trace_seq_printf(s, "bytes: %ld\n", cnt);
5901
5902 if (trace_clocks[tr->clock_id].in_ns) {
5903 /* local or global for trace_clock */
5904 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5905 usec_rem = do_div(t, USEC_PER_SEC);
5906 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5907 t, usec_rem);
5908
5909 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
5910 usec_rem = do_div(t, USEC_PER_SEC);
5911 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5912 } else {
5913 /* counter or tsc mode for trace_clock */
5914 trace_seq_printf(s, "oldest event ts: %llu\n",
5915 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
5916
5917 trace_seq_printf(s, "now ts: %llu\n",
5918 ring_buffer_time_stamp(trace_buf->buffer, cpu));
5919 }
5920
5921 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
5922 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5923
5924 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
5925 trace_seq_printf(s, "read events: %ld\n", cnt);
5926
5927 count = simple_read_from_buffer(ubuf, count, ppos,
5928 s->buffer, trace_seq_used(s));
5929
5930 kfree(s);
5931
5932 return count;
5933}
5934
5935static const struct file_operations tracing_stats_fops = {
5936 .open = tracing_open_generic_tr,
5937 .read = tracing_stats_read,
5938 .llseek = generic_file_llseek,
5939 .release = tracing_release_generic_tr,
5940};
5941
5942#ifdef CONFIG_DYNAMIC_FTRACE
5943
5944int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5945{
5946 return 0;
5947}
5948
5949static ssize_t
5950tracing_read_dyn_info(struct file *filp, char __user *ubuf,
5951 size_t cnt, loff_t *ppos)
5952{
5953 static char ftrace_dyn_info_buffer[1024];
5954 static DEFINE_MUTEX(dyn_info_mutex);
5955 unsigned long *p = filp->private_data;
5956 char *buf = ftrace_dyn_info_buffer;
5957 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
5958 int r;
5959
5960 mutex_lock(&dyn_info_mutex);
5961 r = sprintf(buf, "%ld ", *p);
5962
5963 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
5964 buf[r++] = '\n';
5965
5966 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5967
5968 mutex_unlock(&dyn_info_mutex);
5969
5970 return r;
5971}
5972
5973static const struct file_operations tracing_dyn_info_fops = {
5974 .open = tracing_open_generic,
5975 .read = tracing_read_dyn_info,
5976 .llseek = generic_file_llseek,
5977};
5978#endif /* CONFIG_DYNAMIC_FTRACE */
5979
5980#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5981static void
5982ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5983{
5984 tracing_snapshot();
5985}
5986
5987static void
5988ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5989{
5990 unsigned long *count = (long *)data;
5991
5992 if (!*count)
5993 return;
5994
5995 if (*count != -1)
5996 (*count)--;
5997
5998 tracing_snapshot();
5999}
6000
6001static int
6002ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6003 struct ftrace_probe_ops *ops, void *data)
6004{
6005 long count = (long)data;
6006
6007 seq_printf(m, "%ps:", (void *)ip);
6008
6009 seq_puts(m, "snapshot");
6010
6011 if (count == -1)
6012 seq_puts(m, ":unlimited\n");
6013 else
6014 seq_printf(m, ":count=%ld\n", count);
6015
6016 return 0;
6017}
6018
6019static struct ftrace_probe_ops snapshot_probe_ops = {
6020 .func = ftrace_snapshot,
6021 .print = ftrace_snapshot_print,
6022};
6023
6024static struct ftrace_probe_ops snapshot_count_probe_ops = {
6025 .func = ftrace_count_snapshot,
6026 .print = ftrace_snapshot_print,
6027};
6028
6029static int
6030ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6031 char *glob, char *cmd, char *param, int enable)
6032{
6033 struct ftrace_probe_ops *ops;
6034 void *count = (void *)-1;
6035 char *number;
6036 int ret;
6037
6038 /* hash funcs only work with set_ftrace_filter */
6039 if (!enable)
6040 return -EINVAL;
6041
6042 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6043
6044 if (glob[0] == '!') {
6045 unregister_ftrace_function_probe_func(glob+1, ops);
6046 return 0;
6047 }
6048
6049 if (!param)
6050 goto out_reg;
6051
6052 number = strsep(¶m, ":");
6053
6054 if (!strlen(number))
6055 goto out_reg;
6056
6057 /*
6058 * We use the callback data field (which is a pointer)
6059 * as our counter.
6060 */
6061 ret = kstrtoul(number, 0, (unsigned long *)&count);
6062 if (ret)
6063 return ret;
6064
6065 out_reg:
6066 ret = register_ftrace_function_probe(glob, ops, count);
6067
6068 if (ret >= 0)
6069 alloc_snapshot(&global_trace);
6070
6071 return ret < 0 ? ret : 0;
6072}
6073
6074static struct ftrace_func_command ftrace_snapshot_cmd = {
6075 .name = "snapshot",
6076 .func = ftrace_trace_snapshot_callback,
6077};
6078
6079static __init int register_snapshot_cmd(void)
6080{
6081 return register_ftrace_command(&ftrace_snapshot_cmd);
6082}
6083#else
6084static inline __init int register_snapshot_cmd(void) { return 0; }
6085#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
6086
6087static struct dentry *tracing_get_dentry(struct trace_array *tr)
6088{
6089 if (WARN_ON(!tr->dir))
6090 return ERR_PTR(-ENODEV);
6091
6092 /* Top directory uses NULL as the parent */
6093 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6094 return NULL;
6095
6096 /* All sub buffers have a descriptor */
6097 return tr->dir;
6098}
6099
6100static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6101{
6102 struct dentry *d_tracer;
6103
6104 if (tr->percpu_dir)
6105 return tr->percpu_dir;
6106
6107 d_tracer = tracing_get_dentry(tr);
6108 if (IS_ERR(d_tracer))
6109 return NULL;
6110
6111 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
6112
6113 WARN_ONCE(!tr->percpu_dir,
6114 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
6115
6116 return tr->percpu_dir;
6117}
6118
6119static struct dentry *
6120trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6121 void *data, long cpu, const struct file_operations *fops)
6122{
6123 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6124
6125 if (ret) /* See tracing_get_cpu() */
6126 d_inode(ret)->i_cdev = (void *)(cpu + 1);
6127 return ret;
6128}
6129
6130static void
6131tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
6132{
6133 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
6134 struct dentry *d_cpu;
6135 char cpu_dir[30]; /* 30 characters should be more than enough */
6136
6137 if (!d_percpu)
6138 return;
6139
6140 snprintf(cpu_dir, 30, "cpu%ld", cpu);
6141 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
6142 if (!d_cpu) {
6143 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
6144 return;
6145 }
6146
6147 /* per cpu trace_pipe */
6148 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
6149 tr, cpu, &tracing_pipe_fops);
6150
6151 /* per cpu trace */
6152 trace_create_cpu_file("trace", 0644, d_cpu,
6153 tr, cpu, &tracing_fops);
6154
6155 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
6156 tr, cpu, &tracing_buffers_fops);
6157
6158 trace_create_cpu_file("stats", 0444, d_cpu,
6159 tr, cpu, &tracing_stats_fops);
6160
6161 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
6162 tr, cpu, &tracing_entries_fops);
6163
6164#ifdef CONFIG_TRACER_SNAPSHOT
6165 trace_create_cpu_file("snapshot", 0644, d_cpu,
6166 tr, cpu, &snapshot_fops);
6167
6168 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
6169 tr, cpu, &snapshot_raw_fops);
6170#endif
6171}
6172
6173#ifdef CONFIG_FTRACE_SELFTEST
6174/* Let selftest have access to static functions in this file */
6175#include "trace_selftest.c"
6176#endif
6177
6178static ssize_t
6179trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6180 loff_t *ppos)
6181{
6182 struct trace_option_dentry *topt = filp->private_data;
6183 char *buf;
6184
6185 if (topt->flags->val & topt->opt->bit)
6186 buf = "1\n";
6187 else
6188 buf = "0\n";
6189
6190 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6191}
6192
6193static ssize_t
6194trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6195 loff_t *ppos)
6196{
6197 struct trace_option_dentry *topt = filp->private_data;
6198 unsigned long val;
6199 int ret;
6200
6201 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6202 if (ret)
6203 return ret;
6204
6205 if (val != 0 && val != 1)
6206 return -EINVAL;
6207
6208 if (!!(topt->flags->val & topt->opt->bit) != val) {
6209 mutex_lock(&trace_types_lock);
6210 ret = __set_tracer_option(topt->tr, topt->flags,
6211 topt->opt, !val);
6212 mutex_unlock(&trace_types_lock);
6213 if (ret)
6214 return ret;
6215 }
6216
6217 *ppos += cnt;
6218
6219 return cnt;
6220}
6221
6222
6223static const struct file_operations trace_options_fops = {
6224 .open = tracing_open_generic,
6225 .read = trace_options_read,
6226 .write = trace_options_write,
6227 .llseek = generic_file_llseek,
6228};
6229
6230/*
6231 * In order to pass in both the trace_array descriptor as well as the index
6232 * to the flag that the trace option file represents, the trace_array
6233 * has a character array of trace_flags_index[], which holds the index
6234 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6235 * The address of this character array is passed to the flag option file
6236 * read/write callbacks.
6237 *
6238 * In order to extract both the index and the trace_array descriptor,
6239 * get_tr_index() uses the following algorithm.
6240 *
6241 * idx = *ptr;
6242 *
6243 * As the pointer itself contains the address of the index (remember
6244 * index[1] == 1).
6245 *
6246 * Then to get the trace_array descriptor, by subtracting that index
6247 * from the ptr, we get to the start of the index itself.
6248 *
6249 * ptr - idx == &index[0]
6250 *
6251 * Then a simple container_of() from that pointer gets us to the
6252 * trace_array descriptor.
6253 */
6254static void get_tr_index(void *data, struct trace_array **ptr,
6255 unsigned int *pindex)
6256{
6257 *pindex = *(unsigned char *)data;
6258
6259 *ptr = container_of(data - *pindex, struct trace_array,
6260 trace_flags_index);
6261}
6262
6263static ssize_t
6264trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6265 loff_t *ppos)
6266{
6267 void *tr_index = filp->private_data;
6268 struct trace_array *tr;
6269 unsigned int index;
6270 char *buf;
6271
6272 get_tr_index(tr_index, &tr, &index);
6273
6274 if (tr->trace_flags & (1 << index))
6275 buf = "1\n";
6276 else
6277 buf = "0\n";
6278
6279 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6280}
6281
6282static ssize_t
6283trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6284 loff_t *ppos)
6285{
6286 void *tr_index = filp->private_data;
6287 struct trace_array *tr;
6288 unsigned int index;
6289 unsigned long val;
6290 int ret;
6291
6292 get_tr_index(tr_index, &tr, &index);
6293
6294 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6295 if (ret)
6296 return ret;
6297
6298 if (val != 0 && val != 1)
6299 return -EINVAL;
6300
6301 mutex_lock(&trace_types_lock);
6302 ret = set_tracer_flag(tr, 1 << index, val);
6303 mutex_unlock(&trace_types_lock);
6304
6305 if (ret < 0)
6306 return ret;
6307
6308 *ppos += cnt;
6309
6310 return cnt;
6311}
6312
6313static const struct file_operations trace_options_core_fops = {
6314 .open = tracing_open_generic,
6315 .read = trace_options_core_read,
6316 .write = trace_options_core_write,
6317 .llseek = generic_file_llseek,
6318};
6319
6320struct dentry *trace_create_file(const char *name,
6321 umode_t mode,
6322 struct dentry *parent,
6323 void *data,
6324 const struct file_operations *fops)
6325{
6326 struct dentry *ret;
6327
6328 ret = tracefs_create_file(name, mode, parent, data, fops);
6329 if (!ret)
6330 pr_warn("Could not create tracefs '%s' entry\n", name);
6331
6332 return ret;
6333}
6334
6335
6336static struct dentry *trace_options_init_dentry(struct trace_array *tr)
6337{
6338 struct dentry *d_tracer;
6339
6340 if (tr->options)
6341 return tr->options;
6342
6343 d_tracer = tracing_get_dentry(tr);
6344 if (IS_ERR(d_tracer))
6345 return NULL;
6346
6347 tr->options = tracefs_create_dir("options", d_tracer);
6348 if (!tr->options) {
6349 pr_warn("Could not create tracefs directory 'options'\n");
6350 return NULL;
6351 }
6352
6353 return tr->options;
6354}
6355
6356static void
6357create_trace_option_file(struct trace_array *tr,
6358 struct trace_option_dentry *topt,
6359 struct tracer_flags *flags,
6360 struct tracer_opt *opt)
6361{
6362 struct dentry *t_options;
6363
6364 t_options = trace_options_init_dentry(tr);
6365 if (!t_options)
6366 return;
6367
6368 topt->flags = flags;
6369 topt->opt = opt;
6370 topt->tr = tr;
6371
6372 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
6373 &trace_options_fops);
6374
6375}
6376
6377static void
6378create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
6379{
6380 struct trace_option_dentry *topts;
6381 struct trace_options *tr_topts;
6382 struct tracer_flags *flags;
6383 struct tracer_opt *opts;
6384 int cnt;
6385 int i;
6386
6387 if (!tracer)
6388 return;
6389
6390 flags = tracer->flags;
6391
6392 if (!flags || !flags->opts)
6393 return;
6394
6395 /*
6396 * If this is an instance, only create flags for tracers
6397 * the instance may have.
6398 */
6399 if (!trace_ok_for_array(tracer, tr))
6400 return;
6401
6402 for (i = 0; i < tr->nr_topts; i++) {
6403 /* Make sure there's no duplicate flags. */
6404 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
6405 return;
6406 }
6407
6408 opts = flags->opts;
6409
6410 for (cnt = 0; opts[cnt].name; cnt++)
6411 ;
6412
6413 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
6414 if (!topts)
6415 return;
6416
6417 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6418 GFP_KERNEL);
6419 if (!tr_topts) {
6420 kfree(topts);
6421 return;
6422 }
6423
6424 tr->topts = tr_topts;
6425 tr->topts[tr->nr_topts].tracer = tracer;
6426 tr->topts[tr->nr_topts].topts = topts;
6427 tr->nr_topts++;
6428
6429 for (cnt = 0; opts[cnt].name; cnt++) {
6430 create_trace_option_file(tr, &topts[cnt], flags,
6431 &opts[cnt]);
6432 WARN_ONCE(topts[cnt].entry == NULL,
6433 "Failed to create trace option: %s",
6434 opts[cnt].name);
6435 }
6436}
6437
6438static struct dentry *
6439create_trace_option_core_file(struct trace_array *tr,
6440 const char *option, long index)
6441{
6442 struct dentry *t_options;
6443
6444 t_options = trace_options_init_dentry(tr);
6445 if (!t_options)
6446 return NULL;
6447
6448 return trace_create_file(option, 0644, t_options,
6449 (void *)&tr->trace_flags_index[index],
6450 &trace_options_core_fops);
6451}
6452
6453static void create_trace_options_dir(struct trace_array *tr)
6454{
6455 struct dentry *t_options;
6456 bool top_level = tr == &global_trace;
6457 int i;
6458
6459 t_options = trace_options_init_dentry(tr);
6460 if (!t_options)
6461 return;
6462
6463 for (i = 0; trace_options[i]; i++) {
6464 if (top_level ||
6465 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6466 create_trace_option_core_file(tr, trace_options[i], i);
6467 }
6468}
6469
6470static ssize_t
6471rb_simple_read(struct file *filp, char __user *ubuf,
6472 size_t cnt, loff_t *ppos)
6473{
6474 struct trace_array *tr = filp->private_data;
6475 char buf[64];
6476 int r;
6477
6478 r = tracer_tracing_is_on(tr);
6479 r = sprintf(buf, "%d\n", r);
6480
6481 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6482}
6483
6484static ssize_t
6485rb_simple_write(struct file *filp, const char __user *ubuf,
6486 size_t cnt, loff_t *ppos)
6487{
6488 struct trace_array *tr = filp->private_data;
6489 struct ring_buffer *buffer = tr->trace_buffer.buffer;
6490 unsigned long val;
6491 int ret;
6492
6493 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6494 if (ret)
6495 return ret;
6496
6497 if (buffer) {
6498 mutex_lock(&trace_types_lock);
6499 if (val) {
6500 tracer_tracing_on(tr);
6501 if (tr->current_trace->start)
6502 tr->current_trace->start(tr);
6503 } else {
6504 tracer_tracing_off(tr);
6505 if (tr->current_trace->stop)
6506 tr->current_trace->stop(tr);
6507 }
6508 mutex_unlock(&trace_types_lock);
6509 }
6510
6511 (*ppos)++;
6512
6513 return cnt;
6514}
6515
6516static const struct file_operations rb_simple_fops = {
6517 .open = tracing_open_generic_tr,
6518 .read = rb_simple_read,
6519 .write = rb_simple_write,
6520 .release = tracing_release_generic_tr,
6521 .llseek = default_llseek,
6522};
6523
6524struct dentry *trace_instance_dir;
6525
6526static void
6527init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
6528
6529static int
6530allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
6531{
6532 enum ring_buffer_flags rb_flags;
6533
6534 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6535
6536 buf->tr = tr;
6537
6538 buf->buffer = ring_buffer_alloc(size, rb_flags);
6539 if (!buf->buffer)
6540 return -ENOMEM;
6541
6542 buf->data = alloc_percpu(struct trace_array_cpu);
6543 if (!buf->data) {
6544 ring_buffer_free(buf->buffer);
6545 return -ENOMEM;
6546 }
6547
6548 /* Allocate the first page for all buffers */
6549 set_buffer_entries(&tr->trace_buffer,
6550 ring_buffer_size(tr->trace_buffer.buffer, 0));
6551
6552 return 0;
6553}
6554
6555static int allocate_trace_buffers(struct trace_array *tr, int size)
6556{
6557 int ret;
6558
6559 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6560 if (ret)
6561 return ret;
6562
6563#ifdef CONFIG_TRACER_MAX_TRACE
6564 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6565 allocate_snapshot ? size : 1);
6566 if (WARN_ON(ret)) {
6567 ring_buffer_free(tr->trace_buffer.buffer);
6568 free_percpu(tr->trace_buffer.data);
6569 return -ENOMEM;
6570 }
6571 tr->allocated_snapshot = allocate_snapshot;
6572
6573 /*
6574 * Only the top level trace array gets its snapshot allocated
6575 * from the kernel command line.
6576 */
6577 allocate_snapshot = false;
6578#endif
6579 return 0;
6580}
6581
6582static void free_trace_buffer(struct trace_buffer *buf)
6583{
6584 if (buf->buffer) {
6585 ring_buffer_free(buf->buffer);
6586 buf->buffer = NULL;
6587 free_percpu(buf->data);
6588 buf->data = NULL;
6589 }
6590}
6591
6592static void free_trace_buffers(struct trace_array *tr)
6593{
6594 if (!tr)
6595 return;
6596
6597 free_trace_buffer(&tr->trace_buffer);
6598
6599#ifdef CONFIG_TRACER_MAX_TRACE
6600 free_trace_buffer(&tr->max_buffer);
6601#endif
6602}
6603
6604static void init_trace_flags_index(struct trace_array *tr)
6605{
6606 int i;
6607
6608 /* Used by the trace options files */
6609 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6610 tr->trace_flags_index[i] = i;
6611}
6612
6613static void __update_tracer_options(struct trace_array *tr)
6614{
6615 struct tracer *t;
6616
6617 for (t = trace_types; t; t = t->next)
6618 add_tracer_options(tr, t);
6619}
6620
6621static void update_tracer_options(struct trace_array *tr)
6622{
6623 mutex_lock(&trace_types_lock);
6624 __update_tracer_options(tr);
6625 mutex_unlock(&trace_types_lock);
6626}
6627
6628static int instance_mkdir(const char *name)
6629{
6630 struct trace_array *tr;
6631 int ret;
6632
6633 mutex_lock(&trace_types_lock);
6634
6635 ret = -EEXIST;
6636 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6637 if (tr->name && strcmp(tr->name, name) == 0)
6638 goto out_unlock;
6639 }
6640
6641 ret = -ENOMEM;
6642 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6643 if (!tr)
6644 goto out_unlock;
6645
6646 tr->name = kstrdup(name, GFP_KERNEL);
6647 if (!tr->name)
6648 goto out_free_tr;
6649
6650 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6651 goto out_free_tr;
6652
6653 tr->trace_flags = global_trace.trace_flags;
6654
6655 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6656
6657 raw_spin_lock_init(&tr->start_lock);
6658
6659 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6660
6661 tr->current_trace = &nop_trace;
6662
6663 INIT_LIST_HEAD(&tr->systems);
6664 INIT_LIST_HEAD(&tr->events);
6665
6666 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
6667 goto out_free_tr;
6668
6669 tr->dir = tracefs_create_dir(name, trace_instance_dir);
6670 if (!tr->dir)
6671 goto out_free_tr;
6672
6673 ret = event_trace_add_tracer(tr->dir, tr);
6674 if (ret) {
6675 tracefs_remove_recursive(tr->dir);
6676 goto out_free_tr;
6677 }
6678
6679 init_tracer_tracefs(tr, tr->dir);
6680 init_trace_flags_index(tr);
6681 __update_tracer_options(tr);
6682
6683 list_add(&tr->list, &ftrace_trace_arrays);
6684
6685 mutex_unlock(&trace_types_lock);
6686
6687 return 0;
6688
6689 out_free_tr:
6690 free_trace_buffers(tr);
6691 free_cpumask_var(tr->tracing_cpumask);
6692 kfree(tr->name);
6693 kfree(tr);
6694
6695 out_unlock:
6696 mutex_unlock(&trace_types_lock);
6697
6698 return ret;
6699
6700}
6701
6702static int instance_rmdir(const char *name)
6703{
6704 struct trace_array *tr;
6705 int found = 0;
6706 int ret;
6707 int i;
6708
6709 mutex_lock(&trace_types_lock);
6710
6711 ret = -ENODEV;
6712 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6713 if (tr->name && strcmp(tr->name, name) == 0) {
6714 found = 1;
6715 break;
6716 }
6717 }
6718 if (!found)
6719 goto out_unlock;
6720
6721 ret = -EBUSY;
6722 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
6723 goto out_unlock;
6724
6725 list_del(&tr->list);
6726
6727 tracing_set_nop(tr);
6728 event_trace_del_tracer(tr);
6729 ftrace_destroy_function_files(tr);
6730 tracefs_remove_recursive(tr->dir);
6731 free_trace_buffers(tr);
6732
6733 for (i = 0; i < tr->nr_topts; i++) {
6734 kfree(tr->topts[i].topts);
6735 }
6736 kfree(tr->topts);
6737
6738 kfree(tr->name);
6739 kfree(tr);
6740
6741 ret = 0;
6742
6743 out_unlock:
6744 mutex_unlock(&trace_types_lock);
6745
6746 return ret;
6747}
6748
6749static __init void create_trace_instances(struct dentry *d_tracer)
6750{
6751 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6752 instance_mkdir,
6753 instance_rmdir);
6754 if (WARN_ON(!trace_instance_dir))
6755 return;
6756}
6757
6758static void
6759init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6760{
6761 int cpu;
6762
6763 trace_create_file("available_tracers", 0444, d_tracer,
6764 tr, &show_traces_fops);
6765
6766 trace_create_file("current_tracer", 0644, d_tracer,
6767 tr, &set_tracer_fops);
6768
6769 trace_create_file("tracing_cpumask", 0644, d_tracer,
6770 tr, &tracing_cpumask_fops);
6771
6772 trace_create_file("trace_options", 0644, d_tracer,
6773 tr, &tracing_iter_fops);
6774
6775 trace_create_file("trace", 0644, d_tracer,
6776 tr, &tracing_fops);
6777
6778 trace_create_file("trace_pipe", 0444, d_tracer,
6779 tr, &tracing_pipe_fops);
6780
6781 trace_create_file("buffer_size_kb", 0644, d_tracer,
6782 tr, &tracing_entries_fops);
6783
6784 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6785 tr, &tracing_total_entries_fops);
6786
6787 trace_create_file("free_buffer", 0200, d_tracer,
6788 tr, &tracing_free_buffer_fops);
6789
6790 trace_create_file("trace_marker", 0220, d_tracer,
6791 tr, &tracing_mark_fops);
6792
6793 trace_create_file("trace_clock", 0644, d_tracer, tr,
6794 &trace_clock_fops);
6795
6796 trace_create_file("tracing_on", 0644, d_tracer,
6797 tr, &rb_simple_fops);
6798
6799 create_trace_options_dir(tr);
6800
6801#ifdef CONFIG_TRACER_MAX_TRACE
6802 trace_create_file("tracing_max_latency", 0644, d_tracer,
6803 &tr->max_latency, &tracing_max_lat_fops);
6804#endif
6805
6806 if (ftrace_create_function_files(tr, d_tracer))
6807 WARN(1, "Could not allocate function filter files");
6808
6809#ifdef CONFIG_TRACER_SNAPSHOT
6810 trace_create_file("snapshot", 0644, d_tracer,
6811 tr, &snapshot_fops);
6812#endif
6813
6814 for_each_tracing_cpu(cpu)
6815 tracing_init_tracefs_percpu(tr, cpu);
6816
6817}
6818
6819static struct vfsmount *trace_automount(void *ingore)
6820{
6821 struct vfsmount *mnt;
6822 struct file_system_type *type;
6823
6824 /*
6825 * To maintain backward compatibility for tools that mount
6826 * debugfs to get to the tracing facility, tracefs is automatically
6827 * mounted to the debugfs/tracing directory.
6828 */
6829 type = get_fs_type("tracefs");
6830 if (!type)
6831 return NULL;
6832 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6833 put_filesystem(type);
6834 if (IS_ERR(mnt))
6835 return NULL;
6836 mntget(mnt);
6837
6838 return mnt;
6839}
6840
6841/**
6842 * tracing_init_dentry - initialize top level trace array
6843 *
6844 * This is called when creating files or directories in the tracing
6845 * directory. It is called via fs_initcall() by any of the boot up code
6846 * and expects to return the dentry of the top level tracing directory.
6847 */
6848struct dentry *tracing_init_dentry(void)
6849{
6850 struct trace_array *tr = &global_trace;
6851
6852 /* The top level trace array uses NULL as parent */
6853 if (tr->dir)
6854 return NULL;
6855
6856 if (WARN_ON(!tracefs_initialized()) ||
6857 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6858 WARN_ON(!debugfs_initialized())))
6859 return ERR_PTR(-ENODEV);
6860
6861 /*
6862 * As there may still be users that expect the tracing
6863 * files to exist in debugfs/tracing, we must automount
6864 * the tracefs file system there, so older tools still
6865 * work with the newer kerenl.
6866 */
6867 tr->dir = debugfs_create_automount("tracing", NULL,
6868 trace_automount, NULL);
6869 if (!tr->dir) {
6870 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6871 return ERR_PTR(-ENOMEM);
6872 }
6873
6874 return NULL;
6875}
6876
6877extern struct trace_enum_map *__start_ftrace_enum_maps[];
6878extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6879
6880static void __init trace_enum_init(void)
6881{
6882 int len;
6883
6884 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
6885 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
6886}
6887
6888#ifdef CONFIG_MODULES
6889static void trace_module_add_enums(struct module *mod)
6890{
6891 if (!mod->num_trace_enums)
6892 return;
6893
6894 /*
6895 * Modules with bad taint do not have events created, do
6896 * not bother with enums either.
6897 */
6898 if (trace_module_has_bad_taint(mod))
6899 return;
6900
6901 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
6902}
6903
6904#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6905static void trace_module_remove_enums(struct module *mod)
6906{
6907 union trace_enum_map_item *map;
6908 union trace_enum_map_item **last = &trace_enum_maps;
6909
6910 if (!mod->num_trace_enums)
6911 return;
6912
6913 mutex_lock(&trace_enum_mutex);
6914
6915 map = trace_enum_maps;
6916
6917 while (map) {
6918 if (map->head.mod == mod)
6919 break;
6920 map = trace_enum_jmp_to_tail(map);
6921 last = &map->tail.next;
6922 map = map->tail.next;
6923 }
6924 if (!map)
6925 goto out;
6926
6927 *last = trace_enum_jmp_to_tail(map)->tail.next;
6928 kfree(map);
6929 out:
6930 mutex_unlock(&trace_enum_mutex);
6931}
6932#else
6933static inline void trace_module_remove_enums(struct module *mod) { }
6934#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6935
6936static int trace_module_notify(struct notifier_block *self,
6937 unsigned long val, void *data)
6938{
6939 struct module *mod = data;
6940
6941 switch (val) {
6942 case MODULE_STATE_COMING:
6943 trace_module_add_enums(mod);
6944 break;
6945 case MODULE_STATE_GOING:
6946 trace_module_remove_enums(mod);
6947 break;
6948 }
6949
6950 return 0;
6951}
6952
6953static struct notifier_block trace_module_nb = {
6954 .notifier_call = trace_module_notify,
6955 .priority = 0,
6956};
6957#endif /* CONFIG_MODULES */
6958
6959static __init int tracer_init_tracefs(void)
6960{
6961 struct dentry *d_tracer;
6962
6963 trace_access_lock_init();
6964
6965 d_tracer = tracing_init_dentry();
6966 if (IS_ERR(d_tracer))
6967 return 0;
6968
6969 init_tracer_tracefs(&global_trace, d_tracer);
6970
6971 trace_create_file("tracing_thresh", 0644, d_tracer,
6972 &global_trace, &tracing_thresh_fops);
6973
6974 trace_create_file("README", 0444, d_tracer,
6975 NULL, &tracing_readme_fops);
6976
6977 trace_create_file("saved_cmdlines", 0444, d_tracer,
6978 NULL, &tracing_saved_cmdlines_fops);
6979
6980 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6981 NULL, &tracing_saved_cmdlines_size_fops);
6982
6983 trace_enum_init();
6984
6985 trace_create_enum_file(d_tracer);
6986
6987#ifdef CONFIG_MODULES
6988 register_module_notifier(&trace_module_nb);
6989#endif
6990
6991#ifdef CONFIG_DYNAMIC_FTRACE
6992 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6993 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
6994#endif
6995
6996 create_trace_instances(d_tracer);
6997
6998 update_tracer_options(&global_trace);
6999
7000 return 0;
7001}
7002
7003static int trace_panic_handler(struct notifier_block *this,
7004 unsigned long event, void *unused)
7005{
7006 if (ftrace_dump_on_oops)
7007 ftrace_dump(ftrace_dump_on_oops);
7008 return NOTIFY_OK;
7009}
7010
7011static struct notifier_block trace_panic_notifier = {
7012 .notifier_call = trace_panic_handler,
7013 .next = NULL,
7014 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7015};
7016
7017static int trace_die_handler(struct notifier_block *self,
7018 unsigned long val,
7019 void *data)
7020{
7021 switch (val) {
7022 case DIE_OOPS:
7023 if (ftrace_dump_on_oops)
7024 ftrace_dump(ftrace_dump_on_oops);
7025 break;
7026 default:
7027 break;
7028 }
7029 return NOTIFY_OK;
7030}
7031
7032static struct notifier_block trace_die_notifier = {
7033 .notifier_call = trace_die_handler,
7034 .priority = 200
7035};
7036
7037/*
7038 * printk is set to max of 1024, we really don't need it that big.
7039 * Nothing should be printing 1000 characters anyway.
7040 */
7041#define TRACE_MAX_PRINT 1000
7042
7043/*
7044 * Define here KERN_TRACE so that we have one place to modify
7045 * it if we decide to change what log level the ftrace dump
7046 * should be at.
7047 */
7048#define KERN_TRACE KERN_EMERG
7049
7050void
7051trace_printk_seq(struct trace_seq *s)
7052{
7053 /* Probably should print a warning here. */
7054 if (s->seq.len >= TRACE_MAX_PRINT)
7055 s->seq.len = TRACE_MAX_PRINT;
7056
7057 /*
7058 * More paranoid code. Although the buffer size is set to
7059 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7060 * an extra layer of protection.
7061 */
7062 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7063 s->seq.len = s->seq.size - 1;
7064
7065 /* should be zero ended, but we are paranoid. */
7066 s->buffer[s->seq.len] = 0;
7067
7068 printk(KERN_TRACE "%s", s->buffer);
7069
7070 trace_seq_init(s);
7071}
7072
7073void trace_init_global_iter(struct trace_iterator *iter)
7074{
7075 iter->tr = &global_trace;
7076 iter->trace = iter->tr->current_trace;
7077 iter->cpu_file = RING_BUFFER_ALL_CPUS;
7078 iter->trace_buffer = &global_trace.trace_buffer;
7079
7080 if (iter->trace && iter->trace->open)
7081 iter->trace->open(iter);
7082
7083 /* Annotate start of buffers if we had overruns */
7084 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7085 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7086
7087 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7088 if (trace_clocks[iter->tr->clock_id].in_ns)
7089 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
7090}
7091
7092void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
7093{
7094 /* use static because iter can be a bit big for the stack */
7095 static struct trace_iterator iter;
7096 static atomic_t dump_running;
7097 struct trace_array *tr = &global_trace;
7098 unsigned int old_userobj;
7099 unsigned long flags;
7100 int cnt = 0, cpu;
7101
7102 /* Only allow one dump user at a time. */
7103 if (atomic_inc_return(&dump_running) != 1) {
7104 atomic_dec(&dump_running);
7105 return;
7106 }
7107
7108 /*
7109 * Always turn off tracing when we dump.
7110 * We don't need to show trace output of what happens
7111 * between multiple crashes.
7112 *
7113 * If the user does a sysrq-z, then they can re-enable
7114 * tracing with echo 1 > tracing_on.
7115 */
7116 tracing_off();
7117
7118 local_irq_save(flags);
7119
7120 /* Simulate the iterator */
7121 trace_init_global_iter(&iter);
7122
7123 for_each_tracing_cpu(cpu) {
7124 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7125 }
7126
7127 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
7128
7129 /* don't look at user memory in panic mode */
7130 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7131
7132 switch (oops_dump_mode) {
7133 case DUMP_ALL:
7134 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7135 break;
7136 case DUMP_ORIG:
7137 iter.cpu_file = raw_smp_processor_id();
7138 break;
7139 case DUMP_NONE:
7140 goto out_enable;
7141 default:
7142 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
7143 iter.cpu_file = RING_BUFFER_ALL_CPUS;
7144 }
7145
7146 printk(KERN_TRACE "Dumping ftrace buffer:\n");
7147
7148 /* Did function tracer already get disabled? */
7149 if (ftrace_is_dead()) {
7150 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7151 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7152 }
7153
7154 /*
7155 * We need to stop all tracing on all CPUS to read the
7156 * the next buffer. This is a bit expensive, but is
7157 * not done often. We fill all what we can read,
7158 * and then release the locks again.
7159 */
7160
7161 while (!trace_empty(&iter)) {
7162
7163 if (!cnt)
7164 printk(KERN_TRACE "---------------------------------\n");
7165
7166 cnt++;
7167
7168 /* reset all but tr, trace, and overruns */
7169 memset(&iter.seq, 0,
7170 sizeof(struct trace_iterator) -
7171 offsetof(struct trace_iterator, seq));
7172 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7173 iter.pos = -1;
7174
7175 if (trace_find_next_entry_inc(&iter) != NULL) {
7176 int ret;
7177
7178 ret = print_trace_line(&iter);
7179 if (ret != TRACE_TYPE_NO_CONSUME)
7180 trace_consume(&iter);
7181 }
7182 touch_nmi_watchdog();
7183
7184 trace_printk_seq(&iter.seq);
7185 }
7186
7187 if (!cnt)
7188 printk(KERN_TRACE " (ftrace buffer empty)\n");
7189 else
7190 printk(KERN_TRACE "---------------------------------\n");
7191
7192 out_enable:
7193 tr->trace_flags |= old_userobj;
7194
7195 for_each_tracing_cpu(cpu) {
7196 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
7197 }
7198 atomic_dec(&dump_running);
7199 local_irq_restore(flags);
7200}
7201EXPORT_SYMBOL_GPL(ftrace_dump);
7202
7203__init static int tracer_alloc_buffers(void)
7204{
7205 int ring_buf_size;
7206 int ret = -ENOMEM;
7207
7208 /*
7209 * Make sure we don't accidently add more trace options
7210 * than we have bits for.
7211 */
7212 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
7213
7214 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7215 goto out;
7216
7217 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
7218 goto out_free_buffer_mask;
7219
7220 /* Only allocate trace_printk buffers if a trace_printk exists */
7221 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
7222 /* Must be called before global_trace.buffer is allocated */
7223 trace_printk_init_buffers();
7224
7225 /* To save memory, keep the ring buffer size to its minimum */
7226 if (ring_buffer_expanded)
7227 ring_buf_size = trace_buf_size;
7228 else
7229 ring_buf_size = 1;
7230
7231 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
7232 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
7233
7234 raw_spin_lock_init(&global_trace.start_lock);
7235
7236 /* Used for event triggers */
7237 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7238 if (!temp_buffer)
7239 goto out_free_cpumask;
7240
7241 if (trace_create_savedcmd() < 0)
7242 goto out_free_temp_buffer;
7243
7244 /* TODO: make the number of buffers hot pluggable with CPUS */
7245 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
7246 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7247 WARN_ON(1);
7248 goto out_free_savedcmd;
7249 }
7250
7251 if (global_trace.buffer_disabled)
7252 tracing_off();
7253
7254 if (trace_boot_clock) {
7255 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7256 if (ret < 0)
7257 pr_warn("Trace clock %s not defined, going back to default\n",
7258 trace_boot_clock);
7259 }
7260
7261 /*
7262 * register_tracer() might reference current_trace, so it
7263 * needs to be set before we register anything. This is
7264 * just a bootstrap of current_trace anyway.
7265 */
7266 global_trace.current_trace = &nop_trace;
7267
7268 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7269
7270 ftrace_init_global_array_ops(&global_trace);
7271
7272 init_trace_flags_index(&global_trace);
7273
7274 register_tracer(&nop_trace);
7275
7276 /* All seems OK, enable tracing */
7277 tracing_disabled = 0;
7278
7279 atomic_notifier_chain_register(&panic_notifier_list,
7280 &trace_panic_notifier);
7281
7282 register_die_notifier(&trace_die_notifier);
7283
7284 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7285
7286 INIT_LIST_HEAD(&global_trace.systems);
7287 INIT_LIST_HEAD(&global_trace.events);
7288 list_add(&global_trace.list, &ftrace_trace_arrays);
7289
7290 apply_trace_boot_options();
7291
7292 register_snapshot_cmd();
7293
7294 return 0;
7295
7296out_free_savedcmd:
7297 free_saved_cmdlines_buffer(savedcmd);
7298out_free_temp_buffer:
7299 ring_buffer_free(temp_buffer);
7300out_free_cpumask:
7301 free_cpumask_var(global_trace.tracing_cpumask);
7302out_free_buffer_mask:
7303 free_cpumask_var(tracing_buffer_mask);
7304out:
7305 return ret;
7306}
7307
7308void __init trace_init(void)
7309{
7310 if (tracepoint_printk) {
7311 tracepoint_print_iter =
7312 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7313 if (WARN_ON(!tracepoint_print_iter))
7314 tracepoint_printk = 0;
7315 }
7316 tracer_alloc_buffers();
7317 trace_event_init();
7318}
7319
7320__init static int clear_boot_tracer(void)
7321{
7322 /*
7323 * The default tracer at boot buffer is an init section.
7324 * This function is called in lateinit. If we did not
7325 * find the boot tracer, then clear it out, to prevent
7326 * later registration from accessing the buffer that is
7327 * about to be freed.
7328 */
7329 if (!default_bootup_tracer)
7330 return 0;
7331
7332 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7333 default_bootup_tracer);
7334 default_bootup_tracer = NULL;
7335
7336 return 0;
7337}
7338
7339fs_initcall(tracer_init_tracefs);
7340late_initcall(clear_boot_tracer);
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/ring_buffer.h>
15#include <generated/utsrelease.h>
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
20#include <linux/notifier.h>
21#include <linux/irqflags.h>
22#include <linux/debugfs.h>
23#include <linux/pagemap.h>
24#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
27#include <linux/kprobes.h>
28#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
31#include <linux/splice.h>
32#include <linux/kdebug.h>
33#include <linux/string.h>
34#include <linux/rwsem.h>
35#include <linux/slab.h>
36#include <linux/ctype.h>
37#include <linux/init.h>
38#include <linux/poll.h>
39#include <linux/nmi.h>
40#include <linux/fs.h>
41
42#include "trace.h"
43#include "trace_output.h"
44
45/*
46 * On boot up, the ring buffer is set to the minimum size, so that
47 * we do not waste memory on systems that are not using tracing.
48 */
49int ring_buffer_expanded;
50
51/*
52 * We need to change this state when a selftest is running.
53 * A selftest will lurk into the ring-buffer to count the
54 * entries inserted during the selftest although some concurrent
55 * insertions into the ring-buffer such as trace_printk could occurred
56 * at the same time, giving false positive or negative results.
57 */
58static bool __read_mostly tracing_selftest_running;
59
60/*
61 * If a tracer is running, we do not want to run SELFTEST.
62 */
63bool __read_mostly tracing_selftest_disabled;
64
65/* For tracers that don't implement custom flags */
66static struct tracer_opt dummy_tracer_opt[] = {
67 { }
68};
69
70static struct tracer_flags dummy_tracer_flags = {
71 .val = 0,
72 .opts = dummy_tracer_opt
73};
74
75static int dummy_set_flag(u32 old_flags, u32 bit, int set)
76{
77 return 0;
78}
79
80/*
81 * Kill all tracing for good (never come back).
82 * It is initialized to 1 but will turn to zero if the initialization
83 * of the tracer is successful. But that is the only place that sets
84 * this back to zero.
85 */
86static int tracing_disabled = 1;
87
88DEFINE_PER_CPU(int, ftrace_cpu_disabled);
89
90cpumask_var_t __read_mostly tracing_buffer_mask;
91
92/*
93 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
94 *
95 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
96 * is set, then ftrace_dump is called. This will output the contents
97 * of the ftrace buffers to the console. This is very useful for
98 * capturing traces that lead to crashes and outputing it to a
99 * serial console.
100 *
101 * It is default off, but you can enable it with either specifying
102 * "ftrace_dump_on_oops" in the kernel command line, or setting
103 * /proc/sys/kernel/ftrace_dump_on_oops
104 * Set 1 if you want to dump buffers of all CPUs
105 * Set 2 if you want to dump the buffer of the CPU that triggered oops
106 */
107
108enum ftrace_dump_mode ftrace_dump_on_oops;
109
110static int tracing_set_tracer(const char *buf);
111
112#define MAX_TRACER_SIZE 100
113static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
114static char *default_bootup_tracer;
115
116static int __init set_cmdline_ftrace(char *str)
117{
118 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
119 default_bootup_tracer = bootup_tracer_buf;
120 /* We are using ftrace early, expand it */
121 ring_buffer_expanded = 1;
122 return 1;
123}
124__setup("ftrace=", set_cmdline_ftrace);
125
126static int __init set_ftrace_dump_on_oops(char *str)
127{
128 if (*str++ != '=' || !*str) {
129 ftrace_dump_on_oops = DUMP_ALL;
130 return 1;
131 }
132
133 if (!strcmp("orig_cpu", str)) {
134 ftrace_dump_on_oops = DUMP_ORIG;
135 return 1;
136 }
137
138 return 0;
139}
140__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
141
142unsigned long long ns2usecs(cycle_t nsec)
143{
144 nsec += 500;
145 do_div(nsec, 1000);
146 return nsec;
147}
148
149/*
150 * The global_trace is the descriptor that holds the tracing
151 * buffers for the live tracing. For each CPU, it contains
152 * a link list of pages that will store trace entries. The
153 * page descriptor of the pages in the memory is used to hold
154 * the link list by linking the lru item in the page descriptor
155 * to each of the pages in the buffer per CPU.
156 *
157 * For each active CPU there is a data field that holds the
158 * pages for the buffer for that CPU. Each CPU has the same number
159 * of pages allocated for its buffer.
160 */
161static struct trace_array global_trace;
162
163static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
164
165int filter_current_check_discard(struct ring_buffer *buffer,
166 struct ftrace_event_call *call, void *rec,
167 struct ring_buffer_event *event)
168{
169 return filter_check_discard(call, rec, buffer, event);
170}
171EXPORT_SYMBOL_GPL(filter_current_check_discard);
172
173cycle_t ftrace_now(int cpu)
174{
175 u64 ts;
176
177 /* Early boot up does not have a buffer yet */
178 if (!global_trace.buffer)
179 return trace_clock_local();
180
181 ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
182 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
183
184 return ts;
185}
186
187/*
188 * The max_tr is used to snapshot the global_trace when a maximum
189 * latency is reached. Some tracers will use this to store a maximum
190 * trace while it continues examining live traces.
191 *
192 * The buffers for the max_tr are set up the same as the global_trace.
193 * When a snapshot is taken, the link list of the max_tr is swapped
194 * with the link list of the global_trace and the buffers are reset for
195 * the global_trace so the tracing can continue.
196 */
197static struct trace_array max_tr;
198
199static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
200
201/* tracer_enabled is used to toggle activation of a tracer */
202static int tracer_enabled = 1;
203
204/**
205 * tracing_is_enabled - return tracer_enabled status
206 *
207 * This function is used by other tracers to know the status
208 * of the tracer_enabled flag. Tracers may use this function
209 * to know if it should enable their features when starting
210 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
211 */
212int tracing_is_enabled(void)
213{
214 return tracer_enabled;
215}
216
217/*
218 * trace_buf_size is the size in bytes that is allocated
219 * for a buffer. Note, the number of bytes is always rounded
220 * to page size.
221 *
222 * This number is purposely set to a low number of 16384.
223 * If the dump on oops happens, it will be much appreciated
224 * to not have to wait for all that output. Anyway this can be
225 * boot time and run time configurable.
226 */
227#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
228
229static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
230
231/* trace_types holds a link list of available tracers. */
232static struct tracer *trace_types __read_mostly;
233
234/* current_trace points to the tracer that is currently active */
235static struct tracer *current_trace __read_mostly;
236
237/*
238 * trace_types_lock is used to protect the trace_types list.
239 */
240static DEFINE_MUTEX(trace_types_lock);
241
242/*
243 * serialize the access of the ring buffer
244 *
245 * ring buffer serializes readers, but it is low level protection.
246 * The validity of the events (which returns by ring_buffer_peek() ..etc)
247 * are not protected by ring buffer.
248 *
249 * The content of events may become garbage if we allow other process consumes
250 * these events concurrently:
251 * A) the page of the consumed events may become a normal page
252 * (not reader page) in ring buffer, and this page will be rewrited
253 * by events producer.
254 * B) The page of the consumed events may become a page for splice_read,
255 * and this page will be returned to system.
256 *
257 * These primitives allow multi process access to different cpu ring buffer
258 * concurrently.
259 *
260 * These primitives don't distinguish read-only and read-consume access.
261 * Multi read-only access are also serialized.
262 */
263
264#ifdef CONFIG_SMP
265static DECLARE_RWSEM(all_cpu_access_lock);
266static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
267
268static inline void trace_access_lock(int cpu)
269{
270 if (cpu == TRACE_PIPE_ALL_CPU) {
271 /* gain it for accessing the whole ring buffer. */
272 down_write(&all_cpu_access_lock);
273 } else {
274 /* gain it for accessing a cpu ring buffer. */
275
276 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
277 down_read(&all_cpu_access_lock);
278
279 /* Secondly block other access to this @cpu ring buffer. */
280 mutex_lock(&per_cpu(cpu_access_lock, cpu));
281 }
282}
283
284static inline void trace_access_unlock(int cpu)
285{
286 if (cpu == TRACE_PIPE_ALL_CPU) {
287 up_write(&all_cpu_access_lock);
288 } else {
289 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
290 up_read(&all_cpu_access_lock);
291 }
292}
293
294static inline void trace_access_lock_init(void)
295{
296 int cpu;
297
298 for_each_possible_cpu(cpu)
299 mutex_init(&per_cpu(cpu_access_lock, cpu));
300}
301
302#else
303
304static DEFINE_MUTEX(access_lock);
305
306static inline void trace_access_lock(int cpu)
307{
308 (void)cpu;
309 mutex_lock(&access_lock);
310}
311
312static inline void trace_access_unlock(int cpu)
313{
314 (void)cpu;
315 mutex_unlock(&access_lock);
316}
317
318static inline void trace_access_lock_init(void)
319{
320}
321
322#endif
323
324/* trace_wait is a waitqueue for tasks blocked on trace_poll */
325static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
326
327/* trace_flags holds trace_options default values */
328unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
329 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
330 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
331 TRACE_ITER_IRQ_INFO;
332
333static int trace_stop_count;
334static DEFINE_RAW_SPINLOCK(tracing_start_lock);
335
336static void wakeup_work_handler(struct work_struct *work)
337{
338 wake_up(&trace_wait);
339}
340
341static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
342
343/**
344 * tracing_on - enable tracing buffers
345 *
346 * This function enables tracing buffers that may have been
347 * disabled with tracing_off.
348 */
349void tracing_on(void)
350{
351 if (global_trace.buffer)
352 ring_buffer_record_on(global_trace.buffer);
353 /*
354 * This flag is only looked at when buffers haven't been
355 * allocated yet. We don't really care about the race
356 * between setting this flag and actually turning
357 * on the buffer.
358 */
359 global_trace.buffer_disabled = 0;
360}
361EXPORT_SYMBOL_GPL(tracing_on);
362
363/**
364 * tracing_off - turn off tracing buffers
365 *
366 * This function stops the tracing buffers from recording data.
367 * It does not disable any overhead the tracers themselves may
368 * be causing. This function simply causes all recording to
369 * the ring buffers to fail.
370 */
371void tracing_off(void)
372{
373 if (global_trace.buffer)
374 ring_buffer_record_off(global_trace.buffer);
375 /*
376 * This flag is only looked at when buffers haven't been
377 * allocated yet. We don't really care about the race
378 * between setting this flag and actually turning
379 * on the buffer.
380 */
381 global_trace.buffer_disabled = 1;
382}
383EXPORT_SYMBOL_GPL(tracing_off);
384
385/**
386 * tracing_is_on - show state of ring buffers enabled
387 */
388int tracing_is_on(void)
389{
390 if (global_trace.buffer)
391 return ring_buffer_record_is_on(global_trace.buffer);
392 return !global_trace.buffer_disabled;
393}
394EXPORT_SYMBOL_GPL(tracing_is_on);
395
396/**
397 * trace_wake_up - wake up tasks waiting for trace input
398 *
399 * Schedules a delayed work to wake up any task that is blocked on the
400 * trace_wait queue. These is used with trace_poll for tasks polling the
401 * trace.
402 */
403void trace_wake_up(void)
404{
405 const unsigned long delay = msecs_to_jiffies(2);
406
407 if (trace_flags & TRACE_ITER_BLOCK)
408 return;
409 schedule_delayed_work(&wakeup_work, delay);
410}
411
412static int __init set_buf_size(char *str)
413{
414 unsigned long buf_size;
415
416 if (!str)
417 return 0;
418 buf_size = memparse(str, &str);
419 /* nr_entries can not be zero */
420 if (buf_size == 0)
421 return 0;
422 trace_buf_size = buf_size;
423 return 1;
424}
425__setup("trace_buf_size=", set_buf_size);
426
427static int __init set_tracing_thresh(char *str)
428{
429 unsigned long threshhold;
430 int ret;
431
432 if (!str)
433 return 0;
434 ret = strict_strtoul(str, 0, &threshhold);
435 if (ret < 0)
436 return 0;
437 tracing_thresh = threshhold * 1000;
438 return 1;
439}
440__setup("tracing_thresh=", set_tracing_thresh);
441
442unsigned long nsecs_to_usecs(unsigned long nsecs)
443{
444 return nsecs / 1000;
445}
446
447/* These must match the bit postions in trace_iterator_flags */
448static const char *trace_options[] = {
449 "print-parent",
450 "sym-offset",
451 "sym-addr",
452 "verbose",
453 "raw",
454 "hex",
455 "bin",
456 "block",
457 "stacktrace",
458 "trace_printk",
459 "ftrace_preempt",
460 "branch",
461 "annotate",
462 "userstacktrace",
463 "sym-userobj",
464 "printk-msg-only",
465 "context-info",
466 "latency-format",
467 "sleep-time",
468 "graph-time",
469 "record-cmd",
470 "overwrite",
471 "disable_on_free",
472 "irq-info",
473 NULL
474};
475
476static struct {
477 u64 (*func)(void);
478 const char *name;
479} trace_clocks[] = {
480 { trace_clock_local, "local" },
481 { trace_clock_global, "global" },
482 { trace_clock_counter, "counter" },
483};
484
485int trace_clock_id;
486
487/*
488 * trace_parser_get_init - gets the buffer for trace parser
489 */
490int trace_parser_get_init(struct trace_parser *parser, int size)
491{
492 memset(parser, 0, sizeof(*parser));
493
494 parser->buffer = kmalloc(size, GFP_KERNEL);
495 if (!parser->buffer)
496 return 1;
497
498 parser->size = size;
499 return 0;
500}
501
502/*
503 * trace_parser_put - frees the buffer for trace parser
504 */
505void trace_parser_put(struct trace_parser *parser)
506{
507 kfree(parser->buffer);
508}
509
510/*
511 * trace_get_user - reads the user input string separated by space
512 * (matched by isspace(ch))
513 *
514 * For each string found the 'struct trace_parser' is updated,
515 * and the function returns.
516 *
517 * Returns number of bytes read.
518 *
519 * See kernel/trace/trace.h for 'struct trace_parser' details.
520 */
521int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
522 size_t cnt, loff_t *ppos)
523{
524 char ch;
525 size_t read = 0;
526 ssize_t ret;
527
528 if (!*ppos)
529 trace_parser_clear(parser);
530
531 ret = get_user(ch, ubuf++);
532 if (ret)
533 goto out;
534
535 read++;
536 cnt--;
537
538 /*
539 * The parser is not finished with the last write,
540 * continue reading the user input without skipping spaces.
541 */
542 if (!parser->cont) {
543 /* skip white space */
544 while (cnt && isspace(ch)) {
545 ret = get_user(ch, ubuf++);
546 if (ret)
547 goto out;
548 read++;
549 cnt--;
550 }
551
552 /* only spaces were written */
553 if (isspace(ch)) {
554 *ppos += read;
555 ret = read;
556 goto out;
557 }
558
559 parser->idx = 0;
560 }
561
562 /* read the non-space input */
563 while (cnt && !isspace(ch)) {
564 if (parser->idx < parser->size - 1)
565 parser->buffer[parser->idx++] = ch;
566 else {
567 ret = -EINVAL;
568 goto out;
569 }
570 ret = get_user(ch, ubuf++);
571 if (ret)
572 goto out;
573 read++;
574 cnt--;
575 }
576
577 /* We either got finished input or we have to wait for another call. */
578 if (isspace(ch)) {
579 parser->buffer[parser->idx] = 0;
580 parser->cont = false;
581 } else {
582 parser->cont = true;
583 parser->buffer[parser->idx++] = ch;
584 }
585
586 *ppos += read;
587 ret = read;
588
589out:
590 return ret;
591}
592
593ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
594{
595 int len;
596 int ret;
597
598 if (!cnt)
599 return 0;
600
601 if (s->len <= s->readpos)
602 return -EBUSY;
603
604 len = s->len - s->readpos;
605 if (cnt > len)
606 cnt = len;
607 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
608 if (ret == cnt)
609 return -EFAULT;
610
611 cnt -= ret;
612
613 s->readpos += cnt;
614 return cnt;
615}
616
617static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
618{
619 int len;
620
621 if (s->len <= s->readpos)
622 return -EBUSY;
623
624 len = s->len - s->readpos;
625 if (cnt > len)
626 cnt = len;
627 memcpy(buf, s->buffer + s->readpos, cnt);
628
629 s->readpos += cnt;
630 return cnt;
631}
632
633/*
634 * ftrace_max_lock is used to protect the swapping of buffers
635 * when taking a max snapshot. The buffers themselves are
636 * protected by per_cpu spinlocks. But the action of the swap
637 * needs its own lock.
638 *
639 * This is defined as a arch_spinlock_t in order to help
640 * with performance when lockdep debugging is enabled.
641 *
642 * It is also used in other places outside the update_max_tr
643 * so it needs to be defined outside of the
644 * CONFIG_TRACER_MAX_TRACE.
645 */
646static arch_spinlock_t ftrace_max_lock =
647 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
648
649unsigned long __read_mostly tracing_thresh;
650
651#ifdef CONFIG_TRACER_MAX_TRACE
652unsigned long __read_mostly tracing_max_latency;
653
654/*
655 * Copy the new maximum trace into the separate maximum-trace
656 * structure. (this way the maximum trace is permanently saved,
657 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
658 */
659static void
660__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
661{
662 struct trace_array_cpu *data = tr->data[cpu];
663 struct trace_array_cpu *max_data;
664
665 max_tr.cpu = cpu;
666 max_tr.time_start = data->preempt_timestamp;
667
668 max_data = max_tr.data[cpu];
669 max_data->saved_latency = tracing_max_latency;
670 max_data->critical_start = data->critical_start;
671 max_data->critical_end = data->critical_end;
672
673 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
674 max_data->pid = tsk->pid;
675 max_data->uid = task_uid(tsk);
676 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
677 max_data->policy = tsk->policy;
678 max_data->rt_priority = tsk->rt_priority;
679
680 /* record this tasks comm */
681 tracing_record_cmdline(tsk);
682}
683
684/**
685 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
686 * @tr: tracer
687 * @tsk: the task with the latency
688 * @cpu: The cpu that initiated the trace.
689 *
690 * Flip the buffers between the @tr and the max_tr and record information
691 * about which task was the cause of this latency.
692 */
693void
694update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
695{
696 struct ring_buffer *buf = tr->buffer;
697
698 if (trace_stop_count)
699 return;
700
701 WARN_ON_ONCE(!irqs_disabled());
702 if (!current_trace->use_max_tr) {
703 WARN_ON_ONCE(1);
704 return;
705 }
706 arch_spin_lock(&ftrace_max_lock);
707
708 tr->buffer = max_tr.buffer;
709 max_tr.buffer = buf;
710
711 __update_max_tr(tr, tsk, cpu);
712 arch_spin_unlock(&ftrace_max_lock);
713}
714
715/**
716 * update_max_tr_single - only copy one trace over, and reset the rest
717 * @tr - tracer
718 * @tsk - task with the latency
719 * @cpu - the cpu of the buffer to copy.
720 *
721 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
722 */
723void
724update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
725{
726 int ret;
727
728 if (trace_stop_count)
729 return;
730
731 WARN_ON_ONCE(!irqs_disabled());
732 if (!current_trace->use_max_tr) {
733 WARN_ON_ONCE(1);
734 return;
735 }
736
737 arch_spin_lock(&ftrace_max_lock);
738
739 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
740
741 if (ret == -EBUSY) {
742 /*
743 * We failed to swap the buffer due to a commit taking
744 * place on this CPU. We fail to record, but we reset
745 * the max trace buffer (no one writes directly to it)
746 * and flag that it failed.
747 */
748 trace_array_printk(&max_tr, _THIS_IP_,
749 "Failed to swap buffers due to commit in progress\n");
750 }
751
752 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
753
754 __update_max_tr(tr, tsk, cpu);
755 arch_spin_unlock(&ftrace_max_lock);
756}
757#endif /* CONFIG_TRACER_MAX_TRACE */
758
759/**
760 * register_tracer - register a tracer with the ftrace system.
761 * @type - the plugin for the tracer
762 *
763 * Register a new plugin tracer.
764 */
765int register_tracer(struct tracer *type)
766{
767 struct tracer *t;
768 int ret = 0;
769
770 if (!type->name) {
771 pr_info("Tracer must have a name\n");
772 return -1;
773 }
774
775 if (strlen(type->name) >= MAX_TRACER_SIZE) {
776 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
777 return -1;
778 }
779
780 mutex_lock(&trace_types_lock);
781
782 tracing_selftest_running = true;
783
784 for (t = trace_types; t; t = t->next) {
785 if (strcmp(type->name, t->name) == 0) {
786 /* already found */
787 pr_info("Tracer %s already registered\n",
788 type->name);
789 ret = -1;
790 goto out;
791 }
792 }
793
794 if (!type->set_flag)
795 type->set_flag = &dummy_set_flag;
796 if (!type->flags)
797 type->flags = &dummy_tracer_flags;
798 else
799 if (!type->flags->opts)
800 type->flags->opts = dummy_tracer_opt;
801 if (!type->wait_pipe)
802 type->wait_pipe = default_wait_pipe;
803
804
805#ifdef CONFIG_FTRACE_STARTUP_TEST
806 if (type->selftest && !tracing_selftest_disabled) {
807 struct tracer *saved_tracer = current_trace;
808 struct trace_array *tr = &global_trace;
809
810 /*
811 * Run a selftest on this tracer.
812 * Here we reset the trace buffer, and set the current
813 * tracer to be this tracer. The tracer can then run some
814 * internal tracing to verify that everything is in order.
815 * If we fail, we do not register this tracer.
816 */
817 tracing_reset_online_cpus(tr);
818
819 current_trace = type;
820
821 /* If we expanded the buffers, make sure the max is expanded too */
822 if (ring_buffer_expanded && type->use_max_tr)
823 ring_buffer_resize(max_tr.buffer, trace_buf_size,
824 RING_BUFFER_ALL_CPUS);
825
826 /* the test is responsible for initializing and enabling */
827 pr_info("Testing tracer %s: ", type->name);
828 ret = type->selftest(type, tr);
829 /* the test is responsible for resetting too */
830 current_trace = saved_tracer;
831 if (ret) {
832 printk(KERN_CONT "FAILED!\n");
833 goto out;
834 }
835 /* Only reset on passing, to avoid touching corrupted buffers */
836 tracing_reset_online_cpus(tr);
837
838 /* Shrink the max buffer again */
839 if (ring_buffer_expanded && type->use_max_tr)
840 ring_buffer_resize(max_tr.buffer, 1,
841 RING_BUFFER_ALL_CPUS);
842
843 printk(KERN_CONT "PASSED\n");
844 }
845#endif
846
847 type->next = trace_types;
848 trace_types = type;
849
850 out:
851 tracing_selftest_running = false;
852 mutex_unlock(&trace_types_lock);
853
854 if (ret || !default_bootup_tracer)
855 goto out_unlock;
856
857 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
858 goto out_unlock;
859
860 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
861 /* Do we want this tracer to start on bootup? */
862 tracing_set_tracer(type->name);
863 default_bootup_tracer = NULL;
864 /* disable other selftests, since this will break it. */
865 tracing_selftest_disabled = 1;
866#ifdef CONFIG_FTRACE_STARTUP_TEST
867 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
868 type->name);
869#endif
870
871 out_unlock:
872 return ret;
873}
874
875void unregister_tracer(struct tracer *type)
876{
877 struct tracer **t;
878
879 mutex_lock(&trace_types_lock);
880 for (t = &trace_types; *t; t = &(*t)->next) {
881 if (*t == type)
882 goto found;
883 }
884 pr_info("Tracer %s not registered\n", type->name);
885 goto out;
886
887 found:
888 *t = (*t)->next;
889
890 if (type == current_trace && tracer_enabled) {
891 tracer_enabled = 0;
892 tracing_stop();
893 if (current_trace->stop)
894 current_trace->stop(&global_trace);
895 current_trace = &nop_trace;
896 }
897out:
898 mutex_unlock(&trace_types_lock);
899}
900
901void tracing_reset(struct trace_array *tr, int cpu)
902{
903 struct ring_buffer *buffer = tr->buffer;
904
905 ring_buffer_record_disable(buffer);
906
907 /* Make sure all commits have finished */
908 synchronize_sched();
909 ring_buffer_reset_cpu(buffer, cpu);
910
911 ring_buffer_record_enable(buffer);
912}
913
914void tracing_reset_online_cpus(struct trace_array *tr)
915{
916 struct ring_buffer *buffer = tr->buffer;
917 int cpu;
918
919 ring_buffer_record_disable(buffer);
920
921 /* Make sure all commits have finished */
922 synchronize_sched();
923
924 tr->time_start = ftrace_now(tr->cpu);
925
926 for_each_online_cpu(cpu)
927 ring_buffer_reset_cpu(buffer, cpu);
928
929 ring_buffer_record_enable(buffer);
930}
931
932void tracing_reset_current(int cpu)
933{
934 tracing_reset(&global_trace, cpu);
935}
936
937void tracing_reset_current_online_cpus(void)
938{
939 tracing_reset_online_cpus(&global_trace);
940}
941
942#define SAVED_CMDLINES 128
943#define NO_CMDLINE_MAP UINT_MAX
944static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
945static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
946static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
947static int cmdline_idx;
948static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
949
950/* temporary disable recording */
951static atomic_t trace_record_cmdline_disabled __read_mostly;
952
953static void trace_init_cmdlines(void)
954{
955 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
956 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
957 cmdline_idx = 0;
958}
959
960int is_tracing_stopped(void)
961{
962 return trace_stop_count;
963}
964
965/**
966 * ftrace_off_permanent - disable all ftrace code permanently
967 *
968 * This should only be called when a serious anomally has
969 * been detected. This will turn off the function tracing,
970 * ring buffers, and other tracing utilites. It takes no
971 * locks and can be called from any context.
972 */
973void ftrace_off_permanent(void)
974{
975 tracing_disabled = 1;
976 ftrace_stop();
977 tracing_off_permanent();
978}
979
980/**
981 * tracing_start - quick start of the tracer
982 *
983 * If tracing is enabled but was stopped by tracing_stop,
984 * this will start the tracer back up.
985 */
986void tracing_start(void)
987{
988 struct ring_buffer *buffer;
989 unsigned long flags;
990
991 if (tracing_disabled)
992 return;
993
994 raw_spin_lock_irqsave(&tracing_start_lock, flags);
995 if (--trace_stop_count) {
996 if (trace_stop_count < 0) {
997 /* Someone screwed up their debugging */
998 WARN_ON_ONCE(1);
999 trace_stop_count = 0;
1000 }
1001 goto out;
1002 }
1003
1004 /* Prevent the buffers from switching */
1005 arch_spin_lock(&ftrace_max_lock);
1006
1007 buffer = global_trace.buffer;
1008 if (buffer)
1009 ring_buffer_record_enable(buffer);
1010
1011 buffer = max_tr.buffer;
1012 if (buffer)
1013 ring_buffer_record_enable(buffer);
1014
1015 arch_spin_unlock(&ftrace_max_lock);
1016
1017 ftrace_start();
1018 out:
1019 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1020}
1021
1022/**
1023 * tracing_stop - quick stop of the tracer
1024 *
1025 * Light weight way to stop tracing. Use in conjunction with
1026 * tracing_start.
1027 */
1028void tracing_stop(void)
1029{
1030 struct ring_buffer *buffer;
1031 unsigned long flags;
1032
1033 ftrace_stop();
1034 raw_spin_lock_irqsave(&tracing_start_lock, flags);
1035 if (trace_stop_count++)
1036 goto out;
1037
1038 /* Prevent the buffers from switching */
1039 arch_spin_lock(&ftrace_max_lock);
1040
1041 buffer = global_trace.buffer;
1042 if (buffer)
1043 ring_buffer_record_disable(buffer);
1044
1045 buffer = max_tr.buffer;
1046 if (buffer)
1047 ring_buffer_record_disable(buffer);
1048
1049 arch_spin_unlock(&ftrace_max_lock);
1050
1051 out:
1052 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1053}
1054
1055void trace_stop_cmdline_recording(void);
1056
1057static void trace_save_cmdline(struct task_struct *tsk)
1058{
1059 unsigned pid, idx;
1060
1061 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1062 return;
1063
1064 /*
1065 * It's not the end of the world if we don't get
1066 * the lock, but we also don't want to spin
1067 * nor do we want to disable interrupts,
1068 * so if we miss here, then better luck next time.
1069 */
1070 if (!arch_spin_trylock(&trace_cmdline_lock))
1071 return;
1072
1073 idx = map_pid_to_cmdline[tsk->pid];
1074 if (idx == NO_CMDLINE_MAP) {
1075 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1076
1077 /*
1078 * Check whether the cmdline buffer at idx has a pid
1079 * mapped. We are going to overwrite that entry so we
1080 * need to clear the map_pid_to_cmdline. Otherwise we
1081 * would read the new comm for the old pid.
1082 */
1083 pid = map_cmdline_to_pid[idx];
1084 if (pid != NO_CMDLINE_MAP)
1085 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1086
1087 map_cmdline_to_pid[idx] = tsk->pid;
1088 map_pid_to_cmdline[tsk->pid] = idx;
1089
1090 cmdline_idx = idx;
1091 }
1092
1093 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1094
1095 arch_spin_unlock(&trace_cmdline_lock);
1096}
1097
1098void trace_find_cmdline(int pid, char comm[])
1099{
1100 unsigned map;
1101
1102 if (!pid) {
1103 strcpy(comm, "<idle>");
1104 return;
1105 }
1106
1107 if (WARN_ON_ONCE(pid < 0)) {
1108 strcpy(comm, "<XXX>");
1109 return;
1110 }
1111
1112 if (pid > PID_MAX_DEFAULT) {
1113 strcpy(comm, "<...>");
1114 return;
1115 }
1116
1117 preempt_disable();
1118 arch_spin_lock(&trace_cmdline_lock);
1119 map = map_pid_to_cmdline[pid];
1120 if (map != NO_CMDLINE_MAP)
1121 strcpy(comm, saved_cmdlines[map]);
1122 else
1123 strcpy(comm, "<...>");
1124
1125 arch_spin_unlock(&trace_cmdline_lock);
1126 preempt_enable();
1127}
1128
1129void tracing_record_cmdline(struct task_struct *tsk)
1130{
1131 if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
1132 !tracing_is_on())
1133 return;
1134
1135 trace_save_cmdline(tsk);
1136}
1137
1138void
1139tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1140 int pc)
1141{
1142 struct task_struct *tsk = current;
1143
1144 entry->preempt_count = pc & 0xff;
1145 entry->pid = (tsk) ? tsk->pid : 0;
1146 entry->padding = 0;
1147 entry->flags =
1148#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1149 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1150#else
1151 TRACE_FLAG_IRQS_NOSUPPORT |
1152#endif
1153 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1154 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1155 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1156}
1157EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1158
1159struct ring_buffer_event *
1160trace_buffer_lock_reserve(struct ring_buffer *buffer,
1161 int type,
1162 unsigned long len,
1163 unsigned long flags, int pc)
1164{
1165 struct ring_buffer_event *event;
1166
1167 event = ring_buffer_lock_reserve(buffer, len);
1168 if (event != NULL) {
1169 struct trace_entry *ent = ring_buffer_event_data(event);
1170
1171 tracing_generic_entry_update(ent, flags, pc);
1172 ent->type = type;
1173 }
1174
1175 return event;
1176}
1177
1178static inline void
1179__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1180 struct ring_buffer_event *event,
1181 unsigned long flags, int pc,
1182 int wake)
1183{
1184 ring_buffer_unlock_commit(buffer, event);
1185
1186 ftrace_trace_stack(buffer, flags, 6, pc);
1187 ftrace_trace_userstack(buffer, flags, pc);
1188
1189 if (wake)
1190 trace_wake_up();
1191}
1192
1193void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1194 struct ring_buffer_event *event,
1195 unsigned long flags, int pc)
1196{
1197 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1198}
1199
1200struct ring_buffer_event *
1201trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1202 int type, unsigned long len,
1203 unsigned long flags, int pc)
1204{
1205 *current_rb = global_trace.buffer;
1206 return trace_buffer_lock_reserve(*current_rb,
1207 type, len, flags, pc);
1208}
1209EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1210
1211void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1212 struct ring_buffer_event *event,
1213 unsigned long flags, int pc)
1214{
1215 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1216}
1217EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1218
1219void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
1220 struct ring_buffer_event *event,
1221 unsigned long flags, int pc)
1222{
1223 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1224}
1225EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1226
1227void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1228 struct ring_buffer_event *event,
1229 unsigned long flags, int pc,
1230 struct pt_regs *regs)
1231{
1232 ring_buffer_unlock_commit(buffer, event);
1233
1234 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1235 ftrace_trace_userstack(buffer, flags, pc);
1236}
1237EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
1238
1239void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1240 struct ring_buffer_event *event)
1241{
1242 ring_buffer_discard_commit(buffer, event);
1243}
1244EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1245
1246void
1247trace_function(struct trace_array *tr,
1248 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1249 int pc)
1250{
1251 struct ftrace_event_call *call = &event_function;
1252 struct ring_buffer *buffer = tr->buffer;
1253 struct ring_buffer_event *event;
1254 struct ftrace_entry *entry;
1255
1256 /* If we are reading the ring buffer, don't trace */
1257 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1258 return;
1259
1260 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1261 flags, pc);
1262 if (!event)
1263 return;
1264 entry = ring_buffer_event_data(event);
1265 entry->ip = ip;
1266 entry->parent_ip = parent_ip;
1267
1268 if (!filter_check_discard(call, entry, buffer, event))
1269 ring_buffer_unlock_commit(buffer, event);
1270}
1271
1272void
1273ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1274 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1275 int pc)
1276{
1277 if (likely(!atomic_read(&data->disabled)))
1278 trace_function(tr, ip, parent_ip, flags, pc);
1279}
1280
1281#ifdef CONFIG_STACKTRACE
1282
1283#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1284struct ftrace_stack {
1285 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1286};
1287
1288static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1289static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1290
1291static void __ftrace_trace_stack(struct ring_buffer *buffer,
1292 unsigned long flags,
1293 int skip, int pc, struct pt_regs *regs)
1294{
1295 struct ftrace_event_call *call = &event_kernel_stack;
1296 struct ring_buffer_event *event;
1297 struct stack_entry *entry;
1298 struct stack_trace trace;
1299 int use_stack;
1300 int size = FTRACE_STACK_ENTRIES;
1301
1302 trace.nr_entries = 0;
1303 trace.skip = skip;
1304
1305 /*
1306 * Since events can happen in NMIs there's no safe way to
1307 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1308 * or NMI comes in, it will just have to use the default
1309 * FTRACE_STACK_SIZE.
1310 */
1311 preempt_disable_notrace();
1312
1313 use_stack = ++__get_cpu_var(ftrace_stack_reserve);
1314 /*
1315 * We don't need any atomic variables, just a barrier.
1316 * If an interrupt comes in, we don't care, because it would
1317 * have exited and put the counter back to what we want.
1318 * We just need a barrier to keep gcc from moving things
1319 * around.
1320 */
1321 barrier();
1322 if (use_stack == 1) {
1323 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1324 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1325
1326 if (regs)
1327 save_stack_trace_regs(regs, &trace);
1328 else
1329 save_stack_trace(&trace);
1330
1331 if (trace.nr_entries > size)
1332 size = trace.nr_entries;
1333 } else
1334 /* From now on, use_stack is a boolean */
1335 use_stack = 0;
1336
1337 size *= sizeof(unsigned long);
1338
1339 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1340 sizeof(*entry) + size, flags, pc);
1341 if (!event)
1342 goto out;
1343 entry = ring_buffer_event_data(event);
1344
1345 memset(&entry->caller, 0, size);
1346
1347 if (use_stack)
1348 memcpy(&entry->caller, trace.entries,
1349 trace.nr_entries * sizeof(unsigned long));
1350 else {
1351 trace.max_entries = FTRACE_STACK_ENTRIES;
1352 trace.entries = entry->caller;
1353 if (regs)
1354 save_stack_trace_regs(regs, &trace);
1355 else
1356 save_stack_trace(&trace);
1357 }
1358
1359 entry->size = trace.nr_entries;
1360
1361 if (!filter_check_discard(call, entry, buffer, event))
1362 ring_buffer_unlock_commit(buffer, event);
1363
1364 out:
1365 /* Again, don't let gcc optimize things here */
1366 barrier();
1367 __get_cpu_var(ftrace_stack_reserve)--;
1368 preempt_enable_notrace();
1369
1370}
1371
1372void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1373 int skip, int pc, struct pt_regs *regs)
1374{
1375 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1376 return;
1377
1378 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1379}
1380
1381void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1382 int skip, int pc)
1383{
1384 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1385 return;
1386
1387 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1388}
1389
1390void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1391 int pc)
1392{
1393 __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
1394}
1395
1396/**
1397 * trace_dump_stack - record a stack back trace in the trace buffer
1398 */
1399void trace_dump_stack(void)
1400{
1401 unsigned long flags;
1402
1403 if (tracing_disabled || tracing_selftest_running)
1404 return;
1405
1406 local_save_flags(flags);
1407
1408 /* skipping 3 traces, seems to get us at the caller of this function */
1409 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
1410}
1411
1412static DEFINE_PER_CPU(int, user_stack_count);
1413
1414void
1415ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1416{
1417 struct ftrace_event_call *call = &event_user_stack;
1418 struct ring_buffer_event *event;
1419 struct userstack_entry *entry;
1420 struct stack_trace trace;
1421
1422 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1423 return;
1424
1425 /*
1426 * NMIs can not handle page faults, even with fix ups.
1427 * The save user stack can (and often does) fault.
1428 */
1429 if (unlikely(in_nmi()))
1430 return;
1431
1432 /*
1433 * prevent recursion, since the user stack tracing may
1434 * trigger other kernel events.
1435 */
1436 preempt_disable();
1437 if (__this_cpu_read(user_stack_count))
1438 goto out;
1439
1440 __this_cpu_inc(user_stack_count);
1441
1442 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1443 sizeof(*entry), flags, pc);
1444 if (!event)
1445 goto out_drop_count;
1446 entry = ring_buffer_event_data(event);
1447
1448 entry->tgid = current->tgid;
1449 memset(&entry->caller, 0, sizeof(entry->caller));
1450
1451 trace.nr_entries = 0;
1452 trace.max_entries = FTRACE_STACK_ENTRIES;
1453 trace.skip = 0;
1454 trace.entries = entry->caller;
1455
1456 save_stack_trace_user(&trace);
1457 if (!filter_check_discard(call, entry, buffer, event))
1458 ring_buffer_unlock_commit(buffer, event);
1459
1460 out_drop_count:
1461 __this_cpu_dec(user_stack_count);
1462 out:
1463 preempt_enable();
1464}
1465
1466#ifdef UNUSED
1467static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1468{
1469 ftrace_trace_userstack(tr, flags, preempt_count());
1470}
1471#endif /* UNUSED */
1472
1473#endif /* CONFIG_STACKTRACE */
1474
1475/* created for use with alloc_percpu */
1476struct trace_buffer_struct {
1477 char buffer[TRACE_BUF_SIZE];
1478};
1479
1480static struct trace_buffer_struct *trace_percpu_buffer;
1481static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1482static struct trace_buffer_struct *trace_percpu_irq_buffer;
1483static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1484
1485/*
1486 * The buffer used is dependent on the context. There is a per cpu
1487 * buffer for normal context, softirq contex, hard irq context and
1488 * for NMI context. Thise allows for lockless recording.
1489 *
1490 * Note, if the buffers failed to be allocated, then this returns NULL
1491 */
1492static char *get_trace_buf(void)
1493{
1494 struct trace_buffer_struct *percpu_buffer;
1495 struct trace_buffer_struct *buffer;
1496
1497 /*
1498 * If we have allocated per cpu buffers, then we do not
1499 * need to do any locking.
1500 */
1501 if (in_nmi())
1502 percpu_buffer = trace_percpu_nmi_buffer;
1503 else if (in_irq())
1504 percpu_buffer = trace_percpu_irq_buffer;
1505 else if (in_softirq())
1506 percpu_buffer = trace_percpu_sirq_buffer;
1507 else
1508 percpu_buffer = trace_percpu_buffer;
1509
1510 if (!percpu_buffer)
1511 return NULL;
1512
1513 buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
1514
1515 return buffer->buffer;
1516}
1517
1518static int alloc_percpu_trace_buffer(void)
1519{
1520 struct trace_buffer_struct *buffers;
1521 struct trace_buffer_struct *sirq_buffers;
1522 struct trace_buffer_struct *irq_buffers;
1523 struct trace_buffer_struct *nmi_buffers;
1524
1525 buffers = alloc_percpu(struct trace_buffer_struct);
1526 if (!buffers)
1527 goto err_warn;
1528
1529 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1530 if (!sirq_buffers)
1531 goto err_sirq;
1532
1533 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1534 if (!irq_buffers)
1535 goto err_irq;
1536
1537 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1538 if (!nmi_buffers)
1539 goto err_nmi;
1540
1541 trace_percpu_buffer = buffers;
1542 trace_percpu_sirq_buffer = sirq_buffers;
1543 trace_percpu_irq_buffer = irq_buffers;
1544 trace_percpu_nmi_buffer = nmi_buffers;
1545
1546 return 0;
1547
1548 err_nmi:
1549 free_percpu(irq_buffers);
1550 err_irq:
1551 free_percpu(sirq_buffers);
1552 err_sirq:
1553 free_percpu(buffers);
1554 err_warn:
1555 WARN(1, "Could not allocate percpu trace_printk buffer");
1556 return -ENOMEM;
1557}
1558
1559void trace_printk_init_buffers(void)
1560{
1561 static int buffers_allocated;
1562
1563 if (buffers_allocated)
1564 return;
1565
1566 if (alloc_percpu_trace_buffer())
1567 return;
1568
1569 pr_info("ftrace: Allocated trace_printk buffers\n");
1570
1571 buffers_allocated = 1;
1572}
1573
1574/**
1575 * trace_vbprintk - write binary msg to tracing buffer
1576 *
1577 */
1578int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1579{
1580 struct ftrace_event_call *call = &event_bprint;
1581 struct ring_buffer_event *event;
1582 struct ring_buffer *buffer;
1583 struct trace_array *tr = &global_trace;
1584 struct bprint_entry *entry;
1585 unsigned long flags;
1586 char *tbuffer;
1587 int len = 0, size, pc;
1588
1589 if (unlikely(tracing_selftest_running || tracing_disabled))
1590 return 0;
1591
1592 /* Don't pollute graph traces with trace_vprintk internals */
1593 pause_graph_tracing();
1594
1595 pc = preempt_count();
1596 preempt_disable_notrace();
1597
1598 tbuffer = get_trace_buf();
1599 if (!tbuffer) {
1600 len = 0;
1601 goto out;
1602 }
1603
1604 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1605
1606 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1607 goto out;
1608
1609 local_save_flags(flags);
1610 size = sizeof(*entry) + sizeof(u32) * len;
1611 buffer = tr->buffer;
1612 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1613 flags, pc);
1614 if (!event)
1615 goto out;
1616 entry = ring_buffer_event_data(event);
1617 entry->ip = ip;
1618 entry->fmt = fmt;
1619
1620 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1621 if (!filter_check_discard(call, entry, buffer, event)) {
1622 ring_buffer_unlock_commit(buffer, event);
1623 ftrace_trace_stack(buffer, flags, 6, pc);
1624 }
1625
1626out:
1627 preempt_enable_notrace();
1628 unpause_graph_tracing();
1629
1630 return len;
1631}
1632EXPORT_SYMBOL_GPL(trace_vbprintk);
1633
1634int trace_array_printk(struct trace_array *tr,
1635 unsigned long ip, const char *fmt, ...)
1636{
1637 int ret;
1638 va_list ap;
1639
1640 if (!(trace_flags & TRACE_ITER_PRINTK))
1641 return 0;
1642
1643 va_start(ap, fmt);
1644 ret = trace_array_vprintk(tr, ip, fmt, ap);
1645 va_end(ap);
1646 return ret;
1647}
1648
1649int trace_array_vprintk(struct trace_array *tr,
1650 unsigned long ip, const char *fmt, va_list args)
1651{
1652 struct ftrace_event_call *call = &event_print;
1653 struct ring_buffer_event *event;
1654 struct ring_buffer *buffer;
1655 int len = 0, size, pc;
1656 struct print_entry *entry;
1657 unsigned long flags;
1658 char *tbuffer;
1659
1660 if (tracing_disabled || tracing_selftest_running)
1661 return 0;
1662
1663 /* Don't pollute graph traces with trace_vprintk internals */
1664 pause_graph_tracing();
1665
1666 pc = preempt_count();
1667 preempt_disable_notrace();
1668
1669
1670 tbuffer = get_trace_buf();
1671 if (!tbuffer) {
1672 len = 0;
1673 goto out;
1674 }
1675
1676 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1677 if (len > TRACE_BUF_SIZE)
1678 goto out;
1679
1680 local_save_flags(flags);
1681 size = sizeof(*entry) + len + 1;
1682 buffer = tr->buffer;
1683 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1684 flags, pc);
1685 if (!event)
1686 goto out;
1687 entry = ring_buffer_event_data(event);
1688 entry->ip = ip;
1689
1690 memcpy(&entry->buf, tbuffer, len);
1691 entry->buf[len] = '\0';
1692 if (!filter_check_discard(call, entry, buffer, event)) {
1693 ring_buffer_unlock_commit(buffer, event);
1694 ftrace_trace_stack(buffer, flags, 6, pc);
1695 }
1696 out:
1697 preempt_enable_notrace();
1698 unpause_graph_tracing();
1699
1700 return len;
1701}
1702
1703int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1704{
1705 return trace_array_vprintk(&global_trace, ip, fmt, args);
1706}
1707EXPORT_SYMBOL_GPL(trace_vprintk);
1708
1709static void trace_iterator_increment(struct trace_iterator *iter)
1710{
1711 iter->idx++;
1712 if (iter->buffer_iter[iter->cpu])
1713 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1714}
1715
1716static struct trace_entry *
1717peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1718 unsigned long *lost_events)
1719{
1720 struct ring_buffer_event *event;
1721 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1722
1723 if (buf_iter)
1724 event = ring_buffer_iter_peek(buf_iter, ts);
1725 else
1726 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1727 lost_events);
1728
1729 if (event) {
1730 iter->ent_size = ring_buffer_event_length(event);
1731 return ring_buffer_event_data(event);
1732 }
1733 iter->ent_size = 0;
1734 return NULL;
1735}
1736
1737static struct trace_entry *
1738__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1739 unsigned long *missing_events, u64 *ent_ts)
1740{
1741 struct ring_buffer *buffer = iter->tr->buffer;
1742 struct trace_entry *ent, *next = NULL;
1743 unsigned long lost_events = 0, next_lost = 0;
1744 int cpu_file = iter->cpu_file;
1745 u64 next_ts = 0, ts;
1746 int next_cpu = -1;
1747 int next_size = 0;
1748 int cpu;
1749
1750 /*
1751 * If we are in a per_cpu trace file, don't bother by iterating over
1752 * all cpu and peek directly.
1753 */
1754 if (cpu_file > TRACE_PIPE_ALL_CPU) {
1755 if (ring_buffer_empty_cpu(buffer, cpu_file))
1756 return NULL;
1757 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1758 if (ent_cpu)
1759 *ent_cpu = cpu_file;
1760
1761 return ent;
1762 }
1763
1764 for_each_tracing_cpu(cpu) {
1765
1766 if (ring_buffer_empty_cpu(buffer, cpu))
1767 continue;
1768
1769 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1770
1771 /*
1772 * Pick the entry with the smallest timestamp:
1773 */
1774 if (ent && (!next || ts < next_ts)) {
1775 next = ent;
1776 next_cpu = cpu;
1777 next_ts = ts;
1778 next_lost = lost_events;
1779 next_size = iter->ent_size;
1780 }
1781 }
1782
1783 iter->ent_size = next_size;
1784
1785 if (ent_cpu)
1786 *ent_cpu = next_cpu;
1787
1788 if (ent_ts)
1789 *ent_ts = next_ts;
1790
1791 if (missing_events)
1792 *missing_events = next_lost;
1793
1794 return next;
1795}
1796
1797/* Find the next real entry, without updating the iterator itself */
1798struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1799 int *ent_cpu, u64 *ent_ts)
1800{
1801 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1802}
1803
1804/* Find the next real entry, and increment the iterator to the next entry */
1805void *trace_find_next_entry_inc(struct trace_iterator *iter)
1806{
1807 iter->ent = __find_next_entry(iter, &iter->cpu,
1808 &iter->lost_events, &iter->ts);
1809
1810 if (iter->ent)
1811 trace_iterator_increment(iter);
1812
1813 return iter->ent ? iter : NULL;
1814}
1815
1816static void trace_consume(struct trace_iterator *iter)
1817{
1818 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1819 &iter->lost_events);
1820}
1821
1822static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1823{
1824 struct trace_iterator *iter = m->private;
1825 int i = (int)*pos;
1826 void *ent;
1827
1828 WARN_ON_ONCE(iter->leftover);
1829
1830 (*pos)++;
1831
1832 /* can't go backwards */
1833 if (iter->idx > i)
1834 return NULL;
1835
1836 if (iter->idx < 0)
1837 ent = trace_find_next_entry_inc(iter);
1838 else
1839 ent = iter;
1840
1841 while (ent && iter->idx < i)
1842 ent = trace_find_next_entry_inc(iter);
1843
1844 iter->pos = *pos;
1845
1846 return ent;
1847}
1848
1849void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1850{
1851 struct trace_array *tr = iter->tr;
1852 struct ring_buffer_event *event;
1853 struct ring_buffer_iter *buf_iter;
1854 unsigned long entries = 0;
1855 u64 ts;
1856
1857 tr->data[cpu]->skipped_entries = 0;
1858
1859 if (!iter->buffer_iter[cpu])
1860 return;
1861
1862 buf_iter = iter->buffer_iter[cpu];
1863 ring_buffer_iter_reset(buf_iter);
1864
1865 /*
1866 * We could have the case with the max latency tracers
1867 * that a reset never took place on a cpu. This is evident
1868 * by the timestamp being before the start of the buffer.
1869 */
1870 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1871 if (ts >= iter->tr->time_start)
1872 break;
1873 entries++;
1874 ring_buffer_read(buf_iter, NULL);
1875 }
1876
1877 tr->data[cpu]->skipped_entries = entries;
1878}
1879
1880/*
1881 * The current tracer is copied to avoid a global locking
1882 * all around.
1883 */
1884static void *s_start(struct seq_file *m, loff_t *pos)
1885{
1886 struct trace_iterator *iter = m->private;
1887 static struct tracer *old_tracer;
1888 int cpu_file = iter->cpu_file;
1889 void *p = NULL;
1890 loff_t l = 0;
1891 int cpu;
1892
1893 /* copy the tracer to avoid using a global lock all around */
1894 mutex_lock(&trace_types_lock);
1895 if (unlikely(old_tracer != current_trace && current_trace)) {
1896 old_tracer = current_trace;
1897 *iter->trace = *current_trace;
1898 }
1899 mutex_unlock(&trace_types_lock);
1900
1901 atomic_inc(&trace_record_cmdline_disabled);
1902
1903 if (*pos != iter->pos) {
1904 iter->ent = NULL;
1905 iter->cpu = 0;
1906 iter->idx = -1;
1907
1908 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1909 for_each_tracing_cpu(cpu)
1910 tracing_iter_reset(iter, cpu);
1911 } else
1912 tracing_iter_reset(iter, cpu_file);
1913
1914 iter->leftover = 0;
1915 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1916 ;
1917
1918 } else {
1919 /*
1920 * If we overflowed the seq_file before, then we want
1921 * to just reuse the trace_seq buffer again.
1922 */
1923 if (iter->leftover)
1924 p = iter;
1925 else {
1926 l = *pos - 1;
1927 p = s_next(m, p, &l);
1928 }
1929 }
1930
1931 trace_event_read_lock();
1932 trace_access_lock(cpu_file);
1933 return p;
1934}
1935
1936static void s_stop(struct seq_file *m, void *p)
1937{
1938 struct trace_iterator *iter = m->private;
1939
1940 atomic_dec(&trace_record_cmdline_disabled);
1941 trace_access_unlock(iter->cpu_file);
1942 trace_event_read_unlock();
1943}
1944
1945static void
1946get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
1947{
1948 unsigned long count;
1949 int cpu;
1950
1951 *total = 0;
1952 *entries = 0;
1953
1954 for_each_tracing_cpu(cpu) {
1955 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1956 /*
1957 * If this buffer has skipped entries, then we hold all
1958 * entries for the trace and we need to ignore the
1959 * ones before the time stamp.
1960 */
1961 if (tr->data[cpu]->skipped_entries) {
1962 count -= tr->data[cpu]->skipped_entries;
1963 /* total is the same as the entries */
1964 *total += count;
1965 } else
1966 *total += count +
1967 ring_buffer_overrun_cpu(tr->buffer, cpu);
1968 *entries += count;
1969 }
1970}
1971
1972static void print_lat_help_header(struct seq_file *m)
1973{
1974 seq_puts(m, "# _------=> CPU# \n");
1975 seq_puts(m, "# / _-----=> irqs-off \n");
1976 seq_puts(m, "# | / _----=> need-resched \n");
1977 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1978 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1979 seq_puts(m, "# |||| / delay \n");
1980 seq_puts(m, "# cmd pid ||||| time | caller \n");
1981 seq_puts(m, "# \\ / ||||| \\ | / \n");
1982}
1983
1984static void print_event_info(struct trace_array *tr, struct seq_file *m)
1985{
1986 unsigned long total;
1987 unsigned long entries;
1988
1989 get_total_entries(tr, &total, &entries);
1990 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
1991 entries, total, num_online_cpus());
1992 seq_puts(m, "#\n");
1993}
1994
1995static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
1996{
1997 print_event_info(tr, m);
1998 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1999 seq_puts(m, "# | | | | |\n");
2000}
2001
2002static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
2003{
2004 print_event_info(tr, m);
2005 seq_puts(m, "# _-----=> irqs-off\n");
2006 seq_puts(m, "# / _----=> need-resched\n");
2007 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2008 seq_puts(m, "# || / _--=> preempt-depth\n");
2009 seq_puts(m, "# ||| / delay\n");
2010 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2011 seq_puts(m, "# | | | |||| | |\n");
2012}
2013
2014void
2015print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2016{
2017 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2018 struct trace_array *tr = iter->tr;
2019 struct trace_array_cpu *data = tr->data[tr->cpu];
2020 struct tracer *type = current_trace;
2021 unsigned long entries;
2022 unsigned long total;
2023 const char *name = "preemption";
2024
2025 if (type)
2026 name = type->name;
2027
2028 get_total_entries(tr, &total, &entries);
2029
2030 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2031 name, UTS_RELEASE);
2032 seq_puts(m, "# -----------------------------------"
2033 "---------------------------------\n");
2034 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2035 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2036 nsecs_to_usecs(data->saved_latency),
2037 entries,
2038 total,
2039 tr->cpu,
2040#if defined(CONFIG_PREEMPT_NONE)
2041 "server",
2042#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2043 "desktop",
2044#elif defined(CONFIG_PREEMPT)
2045 "preempt",
2046#else
2047 "unknown",
2048#endif
2049 /* These are reserved for later use */
2050 0, 0, 0, 0);
2051#ifdef CONFIG_SMP
2052 seq_printf(m, " #P:%d)\n", num_online_cpus());
2053#else
2054 seq_puts(m, ")\n");
2055#endif
2056 seq_puts(m, "# -----------------\n");
2057 seq_printf(m, "# | task: %.16s-%d "
2058 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2059 data->comm, data->pid, data->uid, data->nice,
2060 data->policy, data->rt_priority);
2061 seq_puts(m, "# -----------------\n");
2062
2063 if (data->critical_start) {
2064 seq_puts(m, "# => started at: ");
2065 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2066 trace_print_seq(m, &iter->seq);
2067 seq_puts(m, "\n# => ended at: ");
2068 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2069 trace_print_seq(m, &iter->seq);
2070 seq_puts(m, "\n#\n");
2071 }
2072
2073 seq_puts(m, "#\n");
2074}
2075
2076static void test_cpu_buff_start(struct trace_iterator *iter)
2077{
2078 struct trace_seq *s = &iter->seq;
2079
2080 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2081 return;
2082
2083 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2084 return;
2085
2086 if (cpumask_test_cpu(iter->cpu, iter->started))
2087 return;
2088
2089 if (iter->tr->data[iter->cpu]->skipped_entries)
2090 return;
2091
2092 cpumask_set_cpu(iter->cpu, iter->started);
2093
2094 /* Don't print started cpu buffer for the first entry of the trace */
2095 if (iter->idx > 1)
2096 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2097 iter->cpu);
2098}
2099
2100static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2101{
2102 struct trace_seq *s = &iter->seq;
2103 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2104 struct trace_entry *entry;
2105 struct trace_event *event;
2106
2107 entry = iter->ent;
2108
2109 test_cpu_buff_start(iter);
2110
2111 event = ftrace_find_event(entry->type);
2112
2113 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2114 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2115 if (!trace_print_lat_context(iter))
2116 goto partial;
2117 } else {
2118 if (!trace_print_context(iter))
2119 goto partial;
2120 }
2121 }
2122
2123 if (event)
2124 return event->funcs->trace(iter, sym_flags, event);
2125
2126 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2127 goto partial;
2128
2129 return TRACE_TYPE_HANDLED;
2130partial:
2131 return TRACE_TYPE_PARTIAL_LINE;
2132}
2133
2134static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2135{
2136 struct trace_seq *s = &iter->seq;
2137 struct trace_entry *entry;
2138 struct trace_event *event;
2139
2140 entry = iter->ent;
2141
2142 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2143 if (!trace_seq_printf(s, "%d %d %llu ",
2144 entry->pid, iter->cpu, iter->ts))
2145 goto partial;
2146 }
2147
2148 event = ftrace_find_event(entry->type);
2149 if (event)
2150 return event->funcs->raw(iter, 0, event);
2151
2152 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2153 goto partial;
2154
2155 return TRACE_TYPE_HANDLED;
2156partial:
2157 return TRACE_TYPE_PARTIAL_LINE;
2158}
2159
2160static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2161{
2162 struct trace_seq *s = &iter->seq;
2163 unsigned char newline = '\n';
2164 struct trace_entry *entry;
2165 struct trace_event *event;
2166
2167 entry = iter->ent;
2168
2169 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2170 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2171 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2172 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2173 }
2174
2175 event = ftrace_find_event(entry->type);
2176 if (event) {
2177 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2178 if (ret != TRACE_TYPE_HANDLED)
2179 return ret;
2180 }
2181
2182 SEQ_PUT_FIELD_RET(s, newline);
2183
2184 return TRACE_TYPE_HANDLED;
2185}
2186
2187static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2188{
2189 struct trace_seq *s = &iter->seq;
2190 struct trace_entry *entry;
2191 struct trace_event *event;
2192
2193 entry = iter->ent;
2194
2195 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2196 SEQ_PUT_FIELD_RET(s, entry->pid);
2197 SEQ_PUT_FIELD_RET(s, iter->cpu);
2198 SEQ_PUT_FIELD_RET(s, iter->ts);
2199 }
2200
2201 event = ftrace_find_event(entry->type);
2202 return event ? event->funcs->binary(iter, 0, event) :
2203 TRACE_TYPE_HANDLED;
2204}
2205
2206int trace_empty(struct trace_iterator *iter)
2207{
2208 int cpu;
2209
2210 /* If we are looking at one CPU buffer, only check that one */
2211 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
2212 cpu = iter->cpu_file;
2213 if (iter->buffer_iter[cpu]) {
2214 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2215 return 0;
2216 } else {
2217 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2218 return 0;
2219 }
2220 return 1;
2221 }
2222
2223 for_each_tracing_cpu(cpu) {
2224 if (iter->buffer_iter[cpu]) {
2225 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2226 return 0;
2227 } else {
2228 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2229 return 0;
2230 }
2231 }
2232
2233 return 1;
2234}
2235
2236/* Called with trace_event_read_lock() held. */
2237enum print_line_t print_trace_line(struct trace_iterator *iter)
2238{
2239 enum print_line_t ret;
2240
2241 if (iter->lost_events &&
2242 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2243 iter->cpu, iter->lost_events))
2244 return TRACE_TYPE_PARTIAL_LINE;
2245
2246 if (iter->trace && iter->trace->print_line) {
2247 ret = iter->trace->print_line(iter);
2248 if (ret != TRACE_TYPE_UNHANDLED)
2249 return ret;
2250 }
2251
2252 if (iter->ent->type == TRACE_BPRINT &&
2253 trace_flags & TRACE_ITER_PRINTK &&
2254 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2255 return trace_print_bprintk_msg_only(iter);
2256
2257 if (iter->ent->type == TRACE_PRINT &&
2258 trace_flags & TRACE_ITER_PRINTK &&
2259 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2260 return trace_print_printk_msg_only(iter);
2261
2262 if (trace_flags & TRACE_ITER_BIN)
2263 return print_bin_fmt(iter);
2264
2265 if (trace_flags & TRACE_ITER_HEX)
2266 return print_hex_fmt(iter);
2267
2268 if (trace_flags & TRACE_ITER_RAW)
2269 return print_raw_fmt(iter);
2270
2271 return print_trace_fmt(iter);
2272}
2273
2274void trace_latency_header(struct seq_file *m)
2275{
2276 struct trace_iterator *iter = m->private;
2277
2278 /* print nothing if the buffers are empty */
2279 if (trace_empty(iter))
2280 return;
2281
2282 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2283 print_trace_header(m, iter);
2284
2285 if (!(trace_flags & TRACE_ITER_VERBOSE))
2286 print_lat_help_header(m);
2287}
2288
2289void trace_default_header(struct seq_file *m)
2290{
2291 struct trace_iterator *iter = m->private;
2292
2293 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2294 return;
2295
2296 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2297 /* print nothing if the buffers are empty */
2298 if (trace_empty(iter))
2299 return;
2300 print_trace_header(m, iter);
2301 if (!(trace_flags & TRACE_ITER_VERBOSE))
2302 print_lat_help_header(m);
2303 } else {
2304 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2305 if (trace_flags & TRACE_ITER_IRQ_INFO)
2306 print_func_help_header_irq(iter->tr, m);
2307 else
2308 print_func_help_header(iter->tr, m);
2309 }
2310 }
2311}
2312
2313static void test_ftrace_alive(struct seq_file *m)
2314{
2315 if (!ftrace_is_dead())
2316 return;
2317 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2318 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2319}
2320
2321static int s_show(struct seq_file *m, void *v)
2322{
2323 struct trace_iterator *iter = v;
2324 int ret;
2325
2326 if (iter->ent == NULL) {
2327 if (iter->tr) {
2328 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2329 seq_puts(m, "#\n");
2330 test_ftrace_alive(m);
2331 }
2332 if (iter->trace && iter->trace->print_header)
2333 iter->trace->print_header(m);
2334 else
2335 trace_default_header(m);
2336
2337 } else if (iter->leftover) {
2338 /*
2339 * If we filled the seq_file buffer earlier, we
2340 * want to just show it now.
2341 */
2342 ret = trace_print_seq(m, &iter->seq);
2343
2344 /* ret should this time be zero, but you never know */
2345 iter->leftover = ret;
2346
2347 } else {
2348 print_trace_line(iter);
2349 ret = trace_print_seq(m, &iter->seq);
2350 /*
2351 * If we overflow the seq_file buffer, then it will
2352 * ask us for this data again at start up.
2353 * Use that instead.
2354 * ret is 0 if seq_file write succeeded.
2355 * -1 otherwise.
2356 */
2357 iter->leftover = ret;
2358 }
2359
2360 return 0;
2361}
2362
2363static const struct seq_operations tracer_seq_ops = {
2364 .start = s_start,
2365 .next = s_next,
2366 .stop = s_stop,
2367 .show = s_show,
2368};
2369
2370static struct trace_iterator *
2371__tracing_open(struct inode *inode, struct file *file)
2372{
2373 long cpu_file = (long) inode->i_private;
2374 struct trace_iterator *iter;
2375 int cpu;
2376
2377 if (tracing_disabled)
2378 return ERR_PTR(-ENODEV);
2379
2380 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2381 if (!iter)
2382 return ERR_PTR(-ENOMEM);
2383
2384 /*
2385 * We make a copy of the current tracer to avoid concurrent
2386 * changes on it while we are reading.
2387 */
2388 mutex_lock(&trace_types_lock);
2389 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2390 if (!iter->trace)
2391 goto fail;
2392
2393 if (current_trace)
2394 *iter->trace = *current_trace;
2395
2396 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2397 goto fail;
2398
2399 if (current_trace && current_trace->print_max)
2400 iter->tr = &max_tr;
2401 else
2402 iter->tr = &global_trace;
2403 iter->pos = -1;
2404 mutex_init(&iter->mutex);
2405 iter->cpu_file = cpu_file;
2406
2407 /* Notify the tracer early; before we stop tracing. */
2408 if (iter->trace && iter->trace->open)
2409 iter->trace->open(iter);
2410
2411 /* Annotate start of buffers if we had overruns */
2412 if (ring_buffer_overruns(iter->tr->buffer))
2413 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2414
2415 /* stop the trace while dumping */
2416 tracing_stop();
2417
2418 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2419 for_each_tracing_cpu(cpu) {
2420 iter->buffer_iter[cpu] =
2421 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2422 }
2423 ring_buffer_read_prepare_sync();
2424 for_each_tracing_cpu(cpu) {
2425 ring_buffer_read_start(iter->buffer_iter[cpu]);
2426 tracing_iter_reset(iter, cpu);
2427 }
2428 } else {
2429 cpu = iter->cpu_file;
2430 iter->buffer_iter[cpu] =
2431 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2432 ring_buffer_read_prepare_sync();
2433 ring_buffer_read_start(iter->buffer_iter[cpu]);
2434 tracing_iter_reset(iter, cpu);
2435 }
2436
2437 mutex_unlock(&trace_types_lock);
2438
2439 return iter;
2440
2441 fail:
2442 mutex_unlock(&trace_types_lock);
2443 kfree(iter->trace);
2444 seq_release_private(inode, file);
2445 return ERR_PTR(-ENOMEM);
2446}
2447
2448int tracing_open_generic(struct inode *inode, struct file *filp)
2449{
2450 if (tracing_disabled)
2451 return -ENODEV;
2452
2453 filp->private_data = inode->i_private;
2454 return 0;
2455}
2456
2457static int tracing_release(struct inode *inode, struct file *file)
2458{
2459 struct seq_file *m = file->private_data;
2460 struct trace_iterator *iter;
2461 int cpu;
2462
2463 if (!(file->f_mode & FMODE_READ))
2464 return 0;
2465
2466 iter = m->private;
2467
2468 mutex_lock(&trace_types_lock);
2469 for_each_tracing_cpu(cpu) {
2470 if (iter->buffer_iter[cpu])
2471 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2472 }
2473
2474 if (iter->trace && iter->trace->close)
2475 iter->trace->close(iter);
2476
2477 /* reenable tracing if it was previously enabled */
2478 tracing_start();
2479 mutex_unlock(&trace_types_lock);
2480
2481 mutex_destroy(&iter->mutex);
2482 free_cpumask_var(iter->started);
2483 kfree(iter->trace);
2484 seq_release_private(inode, file);
2485 return 0;
2486}
2487
2488static int tracing_open(struct inode *inode, struct file *file)
2489{
2490 struct trace_iterator *iter;
2491 int ret = 0;
2492
2493 /* If this file was open for write, then erase contents */
2494 if ((file->f_mode & FMODE_WRITE) &&
2495 (file->f_flags & O_TRUNC)) {
2496 long cpu = (long) inode->i_private;
2497
2498 if (cpu == TRACE_PIPE_ALL_CPU)
2499 tracing_reset_online_cpus(&global_trace);
2500 else
2501 tracing_reset(&global_trace, cpu);
2502 }
2503
2504 if (file->f_mode & FMODE_READ) {
2505 iter = __tracing_open(inode, file);
2506 if (IS_ERR(iter))
2507 ret = PTR_ERR(iter);
2508 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2509 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2510 }
2511 return ret;
2512}
2513
2514static void *
2515t_next(struct seq_file *m, void *v, loff_t *pos)
2516{
2517 struct tracer *t = v;
2518
2519 (*pos)++;
2520
2521 if (t)
2522 t = t->next;
2523
2524 return t;
2525}
2526
2527static void *t_start(struct seq_file *m, loff_t *pos)
2528{
2529 struct tracer *t;
2530 loff_t l = 0;
2531
2532 mutex_lock(&trace_types_lock);
2533 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2534 ;
2535
2536 return t;
2537}
2538
2539static void t_stop(struct seq_file *m, void *p)
2540{
2541 mutex_unlock(&trace_types_lock);
2542}
2543
2544static int t_show(struct seq_file *m, void *v)
2545{
2546 struct tracer *t = v;
2547
2548 if (!t)
2549 return 0;
2550
2551 seq_printf(m, "%s", t->name);
2552 if (t->next)
2553 seq_putc(m, ' ');
2554 else
2555 seq_putc(m, '\n');
2556
2557 return 0;
2558}
2559
2560static const struct seq_operations show_traces_seq_ops = {
2561 .start = t_start,
2562 .next = t_next,
2563 .stop = t_stop,
2564 .show = t_show,
2565};
2566
2567static int show_traces_open(struct inode *inode, struct file *file)
2568{
2569 if (tracing_disabled)
2570 return -ENODEV;
2571
2572 return seq_open(file, &show_traces_seq_ops);
2573}
2574
2575static ssize_t
2576tracing_write_stub(struct file *filp, const char __user *ubuf,
2577 size_t count, loff_t *ppos)
2578{
2579 return count;
2580}
2581
2582static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2583{
2584 if (file->f_mode & FMODE_READ)
2585 return seq_lseek(file, offset, origin);
2586 else
2587 return 0;
2588}
2589
2590static const struct file_operations tracing_fops = {
2591 .open = tracing_open,
2592 .read = seq_read,
2593 .write = tracing_write_stub,
2594 .llseek = tracing_seek,
2595 .release = tracing_release,
2596};
2597
2598static const struct file_operations show_traces_fops = {
2599 .open = show_traces_open,
2600 .read = seq_read,
2601 .release = seq_release,
2602 .llseek = seq_lseek,
2603};
2604
2605/*
2606 * Only trace on a CPU if the bitmask is set:
2607 */
2608static cpumask_var_t tracing_cpumask;
2609
2610/*
2611 * The tracer itself will not take this lock, but still we want
2612 * to provide a consistent cpumask to user-space:
2613 */
2614static DEFINE_MUTEX(tracing_cpumask_update_lock);
2615
2616/*
2617 * Temporary storage for the character representation of the
2618 * CPU bitmask (and one more byte for the newline):
2619 */
2620static char mask_str[NR_CPUS + 1];
2621
2622static ssize_t
2623tracing_cpumask_read(struct file *filp, char __user *ubuf,
2624 size_t count, loff_t *ppos)
2625{
2626 int len;
2627
2628 mutex_lock(&tracing_cpumask_update_lock);
2629
2630 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2631 if (count - len < 2) {
2632 count = -EINVAL;
2633 goto out_err;
2634 }
2635 len += sprintf(mask_str + len, "\n");
2636 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2637
2638out_err:
2639 mutex_unlock(&tracing_cpumask_update_lock);
2640
2641 return count;
2642}
2643
2644static ssize_t
2645tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2646 size_t count, loff_t *ppos)
2647{
2648 int err, cpu;
2649 cpumask_var_t tracing_cpumask_new;
2650
2651 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2652 return -ENOMEM;
2653
2654 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2655 if (err)
2656 goto err_unlock;
2657
2658 mutex_lock(&tracing_cpumask_update_lock);
2659
2660 local_irq_disable();
2661 arch_spin_lock(&ftrace_max_lock);
2662 for_each_tracing_cpu(cpu) {
2663 /*
2664 * Increase/decrease the disabled counter if we are
2665 * about to flip a bit in the cpumask:
2666 */
2667 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2668 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2669 atomic_inc(&global_trace.data[cpu]->disabled);
2670 ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
2671 }
2672 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2673 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2674 atomic_dec(&global_trace.data[cpu]->disabled);
2675 ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
2676 }
2677 }
2678 arch_spin_unlock(&ftrace_max_lock);
2679 local_irq_enable();
2680
2681 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2682
2683 mutex_unlock(&tracing_cpumask_update_lock);
2684 free_cpumask_var(tracing_cpumask_new);
2685
2686 return count;
2687
2688err_unlock:
2689 free_cpumask_var(tracing_cpumask_new);
2690
2691 return err;
2692}
2693
2694static const struct file_operations tracing_cpumask_fops = {
2695 .open = tracing_open_generic,
2696 .read = tracing_cpumask_read,
2697 .write = tracing_cpumask_write,
2698 .llseek = generic_file_llseek,
2699};
2700
2701static int tracing_trace_options_show(struct seq_file *m, void *v)
2702{
2703 struct tracer_opt *trace_opts;
2704 u32 tracer_flags;
2705 int i;
2706
2707 mutex_lock(&trace_types_lock);
2708 tracer_flags = current_trace->flags->val;
2709 trace_opts = current_trace->flags->opts;
2710
2711 for (i = 0; trace_options[i]; i++) {
2712 if (trace_flags & (1 << i))
2713 seq_printf(m, "%s\n", trace_options[i]);
2714 else
2715 seq_printf(m, "no%s\n", trace_options[i]);
2716 }
2717
2718 for (i = 0; trace_opts[i].name; i++) {
2719 if (tracer_flags & trace_opts[i].bit)
2720 seq_printf(m, "%s\n", trace_opts[i].name);
2721 else
2722 seq_printf(m, "no%s\n", trace_opts[i].name);
2723 }
2724 mutex_unlock(&trace_types_lock);
2725
2726 return 0;
2727}
2728
2729static int __set_tracer_option(struct tracer *trace,
2730 struct tracer_flags *tracer_flags,
2731 struct tracer_opt *opts, int neg)
2732{
2733 int ret;
2734
2735 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2736 if (ret)
2737 return ret;
2738
2739 if (neg)
2740 tracer_flags->val &= ~opts->bit;
2741 else
2742 tracer_flags->val |= opts->bit;
2743 return 0;
2744}
2745
2746/* Try to assign a tracer specific option */
2747static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2748{
2749 struct tracer_flags *tracer_flags = trace->flags;
2750 struct tracer_opt *opts = NULL;
2751 int i;
2752
2753 for (i = 0; tracer_flags->opts[i].name; i++) {
2754 opts = &tracer_flags->opts[i];
2755
2756 if (strcmp(cmp, opts->name) == 0)
2757 return __set_tracer_option(trace, trace->flags,
2758 opts, neg);
2759 }
2760
2761 return -EINVAL;
2762}
2763
2764static void set_tracer_flags(unsigned int mask, int enabled)
2765{
2766 /* do nothing if flag is already set */
2767 if (!!(trace_flags & mask) == !!enabled)
2768 return;
2769
2770 if (enabled)
2771 trace_flags |= mask;
2772 else
2773 trace_flags &= ~mask;
2774
2775 if (mask == TRACE_ITER_RECORD_CMD)
2776 trace_event_enable_cmd_record(enabled);
2777
2778 if (mask == TRACE_ITER_OVERWRITE)
2779 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2780}
2781
2782static ssize_t
2783tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2784 size_t cnt, loff_t *ppos)
2785{
2786 char buf[64];
2787 char *cmp;
2788 int neg = 0;
2789 int ret;
2790 int i;
2791
2792 if (cnt >= sizeof(buf))
2793 return -EINVAL;
2794
2795 if (copy_from_user(&buf, ubuf, cnt))
2796 return -EFAULT;
2797
2798 buf[cnt] = 0;
2799 cmp = strstrip(buf);
2800
2801 if (strncmp(cmp, "no", 2) == 0) {
2802 neg = 1;
2803 cmp += 2;
2804 }
2805
2806 for (i = 0; trace_options[i]; i++) {
2807 if (strcmp(cmp, trace_options[i]) == 0) {
2808 set_tracer_flags(1 << i, !neg);
2809 break;
2810 }
2811 }
2812
2813 /* If no option could be set, test the specific tracer options */
2814 if (!trace_options[i]) {
2815 mutex_lock(&trace_types_lock);
2816 ret = set_tracer_option(current_trace, cmp, neg);
2817 mutex_unlock(&trace_types_lock);
2818 if (ret)
2819 return ret;
2820 }
2821
2822 *ppos += cnt;
2823
2824 return cnt;
2825}
2826
2827static int tracing_trace_options_open(struct inode *inode, struct file *file)
2828{
2829 if (tracing_disabled)
2830 return -ENODEV;
2831 return single_open(file, tracing_trace_options_show, NULL);
2832}
2833
2834static const struct file_operations tracing_iter_fops = {
2835 .open = tracing_trace_options_open,
2836 .read = seq_read,
2837 .llseek = seq_lseek,
2838 .release = single_release,
2839 .write = tracing_trace_options_write,
2840};
2841
2842static const char readme_msg[] =
2843 "tracing mini-HOWTO:\n\n"
2844 "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2845 "# cat /sys/kernel/debug/tracing/available_tracers\n"
2846 "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
2847 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2848 "nop\n"
2849 "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
2850 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2851 "wakeup\n"
2852 "# cat /sys/kernel/debug/tracing/trace_options\n"
2853 "noprint-parent nosym-offset nosym-addr noverbose\n"
2854 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2855 "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
2856 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2857 "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
2858;
2859
2860static ssize_t
2861tracing_readme_read(struct file *filp, char __user *ubuf,
2862 size_t cnt, loff_t *ppos)
2863{
2864 return simple_read_from_buffer(ubuf, cnt, ppos,
2865 readme_msg, strlen(readme_msg));
2866}
2867
2868static const struct file_operations tracing_readme_fops = {
2869 .open = tracing_open_generic,
2870 .read = tracing_readme_read,
2871 .llseek = generic_file_llseek,
2872};
2873
2874static ssize_t
2875tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2876 size_t cnt, loff_t *ppos)
2877{
2878 char *buf_comm;
2879 char *file_buf;
2880 char *buf;
2881 int len = 0;
2882 int pid;
2883 int i;
2884
2885 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2886 if (!file_buf)
2887 return -ENOMEM;
2888
2889 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2890 if (!buf_comm) {
2891 kfree(file_buf);
2892 return -ENOMEM;
2893 }
2894
2895 buf = file_buf;
2896
2897 for (i = 0; i < SAVED_CMDLINES; i++) {
2898 int r;
2899
2900 pid = map_cmdline_to_pid[i];
2901 if (pid == -1 || pid == NO_CMDLINE_MAP)
2902 continue;
2903
2904 trace_find_cmdline(pid, buf_comm);
2905 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2906 buf += r;
2907 len += r;
2908 }
2909
2910 len = simple_read_from_buffer(ubuf, cnt, ppos,
2911 file_buf, len);
2912
2913 kfree(file_buf);
2914 kfree(buf_comm);
2915
2916 return len;
2917}
2918
2919static const struct file_operations tracing_saved_cmdlines_fops = {
2920 .open = tracing_open_generic,
2921 .read = tracing_saved_cmdlines_read,
2922 .llseek = generic_file_llseek,
2923};
2924
2925static ssize_t
2926tracing_ctrl_read(struct file *filp, char __user *ubuf,
2927 size_t cnt, loff_t *ppos)
2928{
2929 char buf[64];
2930 int r;
2931
2932 r = sprintf(buf, "%u\n", tracer_enabled);
2933 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2934}
2935
2936static ssize_t
2937tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2938 size_t cnt, loff_t *ppos)
2939{
2940 struct trace_array *tr = filp->private_data;
2941 unsigned long val;
2942 int ret;
2943
2944 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2945 if (ret)
2946 return ret;
2947
2948 val = !!val;
2949
2950 mutex_lock(&trace_types_lock);
2951 if (tracer_enabled ^ val) {
2952
2953 /* Only need to warn if this is used to change the state */
2954 WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
2955
2956 if (val) {
2957 tracer_enabled = 1;
2958 if (current_trace->start)
2959 current_trace->start(tr);
2960 tracing_start();
2961 } else {
2962 tracer_enabled = 0;
2963 tracing_stop();
2964 if (current_trace->stop)
2965 current_trace->stop(tr);
2966 }
2967 }
2968 mutex_unlock(&trace_types_lock);
2969
2970 *ppos += cnt;
2971
2972 return cnt;
2973}
2974
2975static ssize_t
2976tracing_set_trace_read(struct file *filp, char __user *ubuf,
2977 size_t cnt, loff_t *ppos)
2978{
2979 char buf[MAX_TRACER_SIZE+2];
2980 int r;
2981
2982 mutex_lock(&trace_types_lock);
2983 if (current_trace)
2984 r = sprintf(buf, "%s\n", current_trace->name);
2985 else
2986 r = sprintf(buf, "\n");
2987 mutex_unlock(&trace_types_lock);
2988
2989 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2990}
2991
2992int tracer_init(struct tracer *t, struct trace_array *tr)
2993{
2994 tracing_reset_online_cpus(tr);
2995 return t->init(tr);
2996}
2997
2998static void set_buffer_entries(struct trace_array *tr, unsigned long val)
2999{
3000 int cpu;
3001 for_each_tracing_cpu(cpu)
3002 tr->data[cpu]->entries = val;
3003}
3004
3005static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3006{
3007 int ret;
3008
3009 /*
3010 * If kernel or user changes the size of the ring buffer
3011 * we use the size that was given, and we can forget about
3012 * expanding it later.
3013 */
3014 ring_buffer_expanded = 1;
3015
3016 ret = ring_buffer_resize(global_trace.buffer, size, cpu);
3017 if (ret < 0)
3018 return ret;
3019
3020 if (!current_trace->use_max_tr)
3021 goto out;
3022
3023 ret = ring_buffer_resize(max_tr.buffer, size, cpu);
3024 if (ret < 0) {
3025 int r = 0;
3026
3027 if (cpu == RING_BUFFER_ALL_CPUS) {
3028 int i;
3029 for_each_tracing_cpu(i) {
3030 r = ring_buffer_resize(global_trace.buffer,
3031 global_trace.data[i]->entries,
3032 i);
3033 if (r < 0)
3034 break;
3035 }
3036 } else {
3037 r = ring_buffer_resize(global_trace.buffer,
3038 global_trace.data[cpu]->entries,
3039 cpu);
3040 }
3041
3042 if (r < 0) {
3043 /*
3044 * AARGH! We are left with different
3045 * size max buffer!!!!
3046 * The max buffer is our "snapshot" buffer.
3047 * When a tracer needs a snapshot (one of the
3048 * latency tracers), it swaps the max buffer
3049 * with the saved snap shot. We succeeded to
3050 * update the size of the main buffer, but failed to
3051 * update the size of the max buffer. But when we tried
3052 * to reset the main buffer to the original size, we
3053 * failed there too. This is very unlikely to
3054 * happen, but if it does, warn and kill all
3055 * tracing.
3056 */
3057 WARN_ON(1);
3058 tracing_disabled = 1;
3059 }
3060 return ret;
3061 }
3062
3063 if (cpu == RING_BUFFER_ALL_CPUS)
3064 set_buffer_entries(&max_tr, size);
3065 else
3066 max_tr.data[cpu]->entries = size;
3067
3068 out:
3069 if (cpu == RING_BUFFER_ALL_CPUS)
3070 set_buffer_entries(&global_trace, size);
3071 else
3072 global_trace.data[cpu]->entries = size;
3073
3074 return ret;
3075}
3076
3077static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
3078{
3079 int ret = size;
3080
3081 mutex_lock(&trace_types_lock);
3082
3083 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3084 /* make sure, this cpu is enabled in the mask */
3085 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3086 ret = -EINVAL;
3087 goto out;
3088 }
3089 }
3090
3091 ret = __tracing_resize_ring_buffer(size, cpu_id);
3092 if (ret < 0)
3093 ret = -ENOMEM;
3094
3095out:
3096 mutex_unlock(&trace_types_lock);
3097
3098 return ret;
3099}
3100
3101
3102/**
3103 * tracing_update_buffers - used by tracing facility to expand ring buffers
3104 *
3105 * To save on memory when the tracing is never used on a system with it
3106 * configured in. The ring buffers are set to a minimum size. But once
3107 * a user starts to use the tracing facility, then they need to grow
3108 * to their default size.
3109 *
3110 * This function is to be called when a tracer is about to be used.
3111 */
3112int tracing_update_buffers(void)
3113{
3114 int ret = 0;
3115
3116 mutex_lock(&trace_types_lock);
3117 if (!ring_buffer_expanded)
3118 ret = __tracing_resize_ring_buffer(trace_buf_size,
3119 RING_BUFFER_ALL_CPUS);
3120 mutex_unlock(&trace_types_lock);
3121
3122 return ret;
3123}
3124
3125struct trace_option_dentry;
3126
3127static struct trace_option_dentry *
3128create_trace_option_files(struct tracer *tracer);
3129
3130static void
3131destroy_trace_option_files(struct trace_option_dentry *topts);
3132
3133static int tracing_set_tracer(const char *buf)
3134{
3135 static struct trace_option_dentry *topts;
3136 struct trace_array *tr = &global_trace;
3137 struct tracer *t;
3138 int ret = 0;
3139
3140 mutex_lock(&trace_types_lock);
3141
3142 if (!ring_buffer_expanded) {
3143 ret = __tracing_resize_ring_buffer(trace_buf_size,
3144 RING_BUFFER_ALL_CPUS);
3145 if (ret < 0)
3146 goto out;
3147 ret = 0;
3148 }
3149
3150 for (t = trace_types; t; t = t->next) {
3151 if (strcmp(t->name, buf) == 0)
3152 break;
3153 }
3154 if (!t) {
3155 ret = -EINVAL;
3156 goto out;
3157 }
3158 if (t == current_trace)
3159 goto out;
3160
3161 trace_branch_disable();
3162 if (current_trace && current_trace->reset)
3163 current_trace->reset(tr);
3164 if (current_trace && current_trace->use_max_tr) {
3165 /*
3166 * We don't free the ring buffer. instead, resize it because
3167 * The max_tr ring buffer has some state (e.g. ring->clock) and
3168 * we want preserve it.
3169 */
3170 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3171 set_buffer_entries(&max_tr, 1);
3172 }
3173 destroy_trace_option_files(topts);
3174
3175 current_trace = t;
3176
3177 topts = create_trace_option_files(current_trace);
3178 if (current_trace->use_max_tr) {
3179 int cpu;
3180 /* we need to make per cpu buffer sizes equivalent */
3181 for_each_tracing_cpu(cpu) {
3182 ret = ring_buffer_resize(max_tr.buffer,
3183 global_trace.data[cpu]->entries,
3184 cpu);
3185 if (ret < 0)
3186 goto out;
3187 max_tr.data[cpu]->entries =
3188 global_trace.data[cpu]->entries;
3189 }
3190 }
3191
3192 if (t->init) {
3193 ret = tracer_init(t, tr);
3194 if (ret)
3195 goto out;
3196 }
3197
3198 trace_branch_enable(tr);
3199 out:
3200 mutex_unlock(&trace_types_lock);
3201
3202 return ret;
3203}
3204
3205static ssize_t
3206tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3207 size_t cnt, loff_t *ppos)
3208{
3209 char buf[MAX_TRACER_SIZE+1];
3210 int i;
3211 size_t ret;
3212 int err;
3213
3214 ret = cnt;
3215
3216 if (cnt > MAX_TRACER_SIZE)
3217 cnt = MAX_TRACER_SIZE;
3218
3219 if (copy_from_user(&buf, ubuf, cnt))
3220 return -EFAULT;
3221
3222 buf[cnt] = 0;
3223
3224 /* strip ending whitespace. */
3225 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3226 buf[i] = 0;
3227
3228 err = tracing_set_tracer(buf);
3229 if (err)
3230 return err;
3231
3232 *ppos += ret;
3233
3234 return ret;
3235}
3236
3237static ssize_t
3238tracing_max_lat_read(struct file *filp, char __user *ubuf,
3239 size_t cnt, loff_t *ppos)
3240{
3241 unsigned long *ptr = filp->private_data;
3242 char buf[64];
3243 int r;
3244
3245 r = snprintf(buf, sizeof(buf), "%ld\n",
3246 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3247 if (r > sizeof(buf))
3248 r = sizeof(buf);
3249 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3250}
3251
3252static ssize_t
3253tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3254 size_t cnt, loff_t *ppos)
3255{
3256 unsigned long *ptr = filp->private_data;
3257 unsigned long val;
3258 int ret;
3259
3260 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3261 if (ret)
3262 return ret;
3263
3264 *ptr = val * 1000;
3265
3266 return cnt;
3267}
3268
3269static int tracing_open_pipe(struct inode *inode, struct file *filp)
3270{
3271 long cpu_file = (long) inode->i_private;
3272 struct trace_iterator *iter;
3273 int ret = 0;
3274
3275 if (tracing_disabled)
3276 return -ENODEV;
3277
3278 mutex_lock(&trace_types_lock);
3279
3280 /* create a buffer to store the information to pass to userspace */
3281 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3282 if (!iter) {
3283 ret = -ENOMEM;
3284 goto out;
3285 }
3286
3287 /*
3288 * We make a copy of the current tracer to avoid concurrent
3289 * changes on it while we are reading.
3290 */
3291 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3292 if (!iter->trace) {
3293 ret = -ENOMEM;
3294 goto fail;
3295 }
3296 if (current_trace)
3297 *iter->trace = *current_trace;
3298
3299 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3300 ret = -ENOMEM;
3301 goto fail;
3302 }
3303
3304 /* trace pipe does not show start of buffer */
3305 cpumask_setall(iter->started);
3306
3307 if (trace_flags & TRACE_ITER_LATENCY_FMT)
3308 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3309
3310 iter->cpu_file = cpu_file;
3311 iter->tr = &global_trace;
3312 mutex_init(&iter->mutex);
3313 filp->private_data = iter;
3314
3315 if (iter->trace->pipe_open)
3316 iter->trace->pipe_open(iter);
3317
3318 nonseekable_open(inode, filp);
3319out:
3320 mutex_unlock(&trace_types_lock);
3321 return ret;
3322
3323fail:
3324 kfree(iter->trace);
3325 kfree(iter);
3326 mutex_unlock(&trace_types_lock);
3327 return ret;
3328}
3329
3330static int tracing_release_pipe(struct inode *inode, struct file *file)
3331{
3332 struct trace_iterator *iter = file->private_data;
3333
3334 mutex_lock(&trace_types_lock);
3335
3336 if (iter->trace->pipe_close)
3337 iter->trace->pipe_close(iter);
3338
3339 mutex_unlock(&trace_types_lock);
3340
3341 free_cpumask_var(iter->started);
3342 mutex_destroy(&iter->mutex);
3343 kfree(iter->trace);
3344 kfree(iter);
3345
3346 return 0;
3347}
3348
3349static unsigned int
3350tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3351{
3352 struct trace_iterator *iter = filp->private_data;
3353
3354 if (trace_flags & TRACE_ITER_BLOCK) {
3355 /*
3356 * Always select as readable when in blocking mode
3357 */
3358 return POLLIN | POLLRDNORM;
3359 } else {
3360 if (!trace_empty(iter))
3361 return POLLIN | POLLRDNORM;
3362 poll_wait(filp, &trace_wait, poll_table);
3363 if (!trace_empty(iter))
3364 return POLLIN | POLLRDNORM;
3365
3366 return 0;
3367 }
3368}
3369
3370
3371void default_wait_pipe(struct trace_iterator *iter)
3372{
3373 DEFINE_WAIT(wait);
3374
3375 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3376
3377 if (trace_empty(iter))
3378 schedule();
3379
3380 finish_wait(&trace_wait, &wait);
3381}
3382
3383/*
3384 * This is a make-shift waitqueue.
3385 * A tracer might use this callback on some rare cases:
3386 *
3387 * 1) the current tracer might hold the runqueue lock when it wakes up
3388 * a reader, hence a deadlock (sched, function, and function graph tracers)
3389 * 2) the function tracers, trace all functions, we don't want
3390 * the overhead of calling wake_up and friends
3391 * (and tracing them too)
3392 *
3393 * Anyway, this is really very primitive wakeup.
3394 */
3395void poll_wait_pipe(struct trace_iterator *iter)
3396{
3397 set_current_state(TASK_INTERRUPTIBLE);
3398 /* sleep for 100 msecs, and try again. */
3399 schedule_timeout(HZ / 10);
3400}
3401
3402/* Must be called with trace_types_lock mutex held. */
3403static int tracing_wait_pipe(struct file *filp)
3404{
3405 struct trace_iterator *iter = filp->private_data;
3406
3407 while (trace_empty(iter)) {
3408
3409 if ((filp->f_flags & O_NONBLOCK)) {
3410 return -EAGAIN;
3411 }
3412
3413 mutex_unlock(&iter->mutex);
3414
3415 iter->trace->wait_pipe(iter);
3416
3417 mutex_lock(&iter->mutex);
3418
3419 if (signal_pending(current))
3420 return -EINTR;
3421
3422 /*
3423 * We block until we read something and tracing is disabled.
3424 * We still block if tracing is disabled, but we have never
3425 * read anything. This allows a user to cat this file, and
3426 * then enable tracing. But after we have read something,
3427 * we give an EOF when tracing is again disabled.
3428 *
3429 * iter->pos will be 0 if we haven't read anything.
3430 */
3431 if (!tracer_enabled && iter->pos)
3432 break;
3433 }
3434
3435 return 1;
3436}
3437
3438/*
3439 * Consumer reader.
3440 */
3441static ssize_t
3442tracing_read_pipe(struct file *filp, char __user *ubuf,
3443 size_t cnt, loff_t *ppos)
3444{
3445 struct trace_iterator *iter = filp->private_data;
3446 static struct tracer *old_tracer;
3447 ssize_t sret;
3448
3449 /* return any leftover data */
3450 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3451 if (sret != -EBUSY)
3452 return sret;
3453
3454 trace_seq_init(&iter->seq);
3455
3456 /* copy the tracer to avoid using a global lock all around */
3457 mutex_lock(&trace_types_lock);
3458 if (unlikely(old_tracer != current_trace && current_trace)) {
3459 old_tracer = current_trace;
3460 *iter->trace = *current_trace;
3461 }
3462 mutex_unlock(&trace_types_lock);
3463
3464 /*
3465 * Avoid more than one consumer on a single file descriptor
3466 * This is just a matter of traces coherency, the ring buffer itself
3467 * is protected.
3468 */
3469 mutex_lock(&iter->mutex);
3470 if (iter->trace->read) {
3471 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3472 if (sret)
3473 goto out;
3474 }
3475
3476waitagain:
3477 sret = tracing_wait_pipe(filp);
3478 if (sret <= 0)
3479 goto out;
3480
3481 /* stop when tracing is finished */
3482 if (trace_empty(iter)) {
3483 sret = 0;
3484 goto out;
3485 }
3486
3487 if (cnt >= PAGE_SIZE)
3488 cnt = PAGE_SIZE - 1;
3489
3490 /* reset all but tr, trace, and overruns */
3491 memset(&iter->seq, 0,
3492 sizeof(struct trace_iterator) -
3493 offsetof(struct trace_iterator, seq));
3494 iter->pos = -1;
3495
3496 trace_event_read_lock();
3497 trace_access_lock(iter->cpu_file);
3498 while (trace_find_next_entry_inc(iter) != NULL) {
3499 enum print_line_t ret;
3500 int len = iter->seq.len;
3501
3502 ret = print_trace_line(iter);
3503 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3504 /* don't print partial lines */
3505 iter->seq.len = len;
3506 break;
3507 }
3508 if (ret != TRACE_TYPE_NO_CONSUME)
3509 trace_consume(iter);
3510
3511 if (iter->seq.len >= cnt)
3512 break;
3513
3514 /*
3515 * Setting the full flag means we reached the trace_seq buffer
3516 * size and we should leave by partial output condition above.
3517 * One of the trace_seq_* functions is not used properly.
3518 */
3519 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3520 iter->ent->type);
3521 }
3522 trace_access_unlock(iter->cpu_file);
3523 trace_event_read_unlock();
3524
3525 /* Now copy what we have to the user */
3526 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3527 if (iter->seq.readpos >= iter->seq.len)
3528 trace_seq_init(&iter->seq);
3529
3530 /*
3531 * If there was nothing to send to user, in spite of consuming trace
3532 * entries, go back to wait for more entries.
3533 */
3534 if (sret == -EBUSY)
3535 goto waitagain;
3536
3537out:
3538 mutex_unlock(&iter->mutex);
3539
3540 return sret;
3541}
3542
3543static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3544 struct pipe_buffer *buf)
3545{
3546 __free_page(buf->page);
3547}
3548
3549static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3550 unsigned int idx)
3551{
3552 __free_page(spd->pages[idx]);
3553}
3554
3555static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3556 .can_merge = 0,
3557 .map = generic_pipe_buf_map,
3558 .unmap = generic_pipe_buf_unmap,
3559 .confirm = generic_pipe_buf_confirm,
3560 .release = tracing_pipe_buf_release,
3561 .steal = generic_pipe_buf_steal,
3562 .get = generic_pipe_buf_get,
3563};
3564
3565static size_t
3566tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3567{
3568 size_t count;
3569 int ret;
3570
3571 /* Seq buffer is page-sized, exactly what we need. */
3572 for (;;) {
3573 count = iter->seq.len;
3574 ret = print_trace_line(iter);
3575 count = iter->seq.len - count;
3576 if (rem < count) {
3577 rem = 0;
3578 iter->seq.len -= count;
3579 break;
3580 }
3581 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3582 iter->seq.len -= count;
3583 break;
3584 }
3585
3586 if (ret != TRACE_TYPE_NO_CONSUME)
3587 trace_consume(iter);
3588 rem -= count;
3589 if (!trace_find_next_entry_inc(iter)) {
3590 rem = 0;
3591 iter->ent = NULL;
3592 break;
3593 }
3594 }
3595
3596 return rem;
3597}
3598
3599static ssize_t tracing_splice_read_pipe(struct file *filp,
3600 loff_t *ppos,
3601 struct pipe_inode_info *pipe,
3602 size_t len,
3603 unsigned int flags)
3604{
3605 struct page *pages_def[PIPE_DEF_BUFFERS];
3606 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3607 struct trace_iterator *iter = filp->private_data;
3608 struct splice_pipe_desc spd = {
3609 .pages = pages_def,
3610 .partial = partial_def,
3611 .nr_pages = 0, /* This gets updated below. */
3612 .nr_pages_max = PIPE_DEF_BUFFERS,
3613 .flags = flags,
3614 .ops = &tracing_pipe_buf_ops,
3615 .spd_release = tracing_spd_release_pipe,
3616 };
3617 static struct tracer *old_tracer;
3618 ssize_t ret;
3619 size_t rem;
3620 unsigned int i;
3621
3622 if (splice_grow_spd(pipe, &spd))
3623 return -ENOMEM;
3624
3625 /* copy the tracer to avoid using a global lock all around */
3626 mutex_lock(&trace_types_lock);
3627 if (unlikely(old_tracer != current_trace && current_trace)) {
3628 old_tracer = current_trace;
3629 *iter->trace = *current_trace;
3630 }
3631 mutex_unlock(&trace_types_lock);
3632
3633 mutex_lock(&iter->mutex);
3634
3635 if (iter->trace->splice_read) {
3636 ret = iter->trace->splice_read(iter, filp,
3637 ppos, pipe, len, flags);
3638 if (ret)
3639 goto out_err;
3640 }
3641
3642 ret = tracing_wait_pipe(filp);
3643 if (ret <= 0)
3644 goto out_err;
3645
3646 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3647 ret = -EFAULT;
3648 goto out_err;
3649 }
3650
3651 trace_event_read_lock();
3652 trace_access_lock(iter->cpu_file);
3653
3654 /* Fill as many pages as possible. */
3655 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3656 spd.pages[i] = alloc_page(GFP_KERNEL);
3657 if (!spd.pages[i])
3658 break;
3659
3660 rem = tracing_fill_pipe_page(rem, iter);
3661
3662 /* Copy the data into the page, so we can start over. */
3663 ret = trace_seq_to_buffer(&iter->seq,
3664 page_address(spd.pages[i]),
3665 iter->seq.len);
3666 if (ret < 0) {
3667 __free_page(spd.pages[i]);
3668 break;
3669 }
3670 spd.partial[i].offset = 0;
3671 spd.partial[i].len = iter->seq.len;
3672
3673 trace_seq_init(&iter->seq);
3674 }
3675
3676 trace_access_unlock(iter->cpu_file);
3677 trace_event_read_unlock();
3678 mutex_unlock(&iter->mutex);
3679
3680 spd.nr_pages = i;
3681
3682 ret = splice_to_pipe(pipe, &spd);
3683out:
3684 splice_shrink_spd(&spd);
3685 return ret;
3686
3687out_err:
3688 mutex_unlock(&iter->mutex);
3689 goto out;
3690}
3691
3692struct ftrace_entries_info {
3693 struct trace_array *tr;
3694 int cpu;
3695};
3696
3697static int tracing_entries_open(struct inode *inode, struct file *filp)
3698{
3699 struct ftrace_entries_info *info;
3700
3701 if (tracing_disabled)
3702 return -ENODEV;
3703
3704 info = kzalloc(sizeof(*info), GFP_KERNEL);
3705 if (!info)
3706 return -ENOMEM;
3707
3708 info->tr = &global_trace;
3709 info->cpu = (unsigned long)inode->i_private;
3710
3711 filp->private_data = info;
3712
3713 return 0;
3714}
3715
3716static ssize_t
3717tracing_entries_read(struct file *filp, char __user *ubuf,
3718 size_t cnt, loff_t *ppos)
3719{
3720 struct ftrace_entries_info *info = filp->private_data;
3721 struct trace_array *tr = info->tr;
3722 char buf[64];
3723 int r = 0;
3724 ssize_t ret;
3725
3726 mutex_lock(&trace_types_lock);
3727
3728 if (info->cpu == RING_BUFFER_ALL_CPUS) {
3729 int cpu, buf_size_same;
3730 unsigned long size;
3731
3732 size = 0;
3733 buf_size_same = 1;
3734 /* check if all cpu sizes are same */
3735 for_each_tracing_cpu(cpu) {
3736 /* fill in the size from first enabled cpu */
3737 if (size == 0)
3738 size = tr->data[cpu]->entries;
3739 if (size != tr->data[cpu]->entries) {
3740 buf_size_same = 0;
3741 break;
3742 }
3743 }
3744
3745 if (buf_size_same) {
3746 if (!ring_buffer_expanded)
3747 r = sprintf(buf, "%lu (expanded: %lu)\n",
3748 size >> 10,
3749 trace_buf_size >> 10);
3750 else
3751 r = sprintf(buf, "%lu\n", size >> 10);
3752 } else
3753 r = sprintf(buf, "X\n");
3754 } else
3755 r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
3756
3757 mutex_unlock(&trace_types_lock);
3758
3759 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3760 return ret;
3761}
3762
3763static ssize_t
3764tracing_entries_write(struct file *filp, const char __user *ubuf,
3765 size_t cnt, loff_t *ppos)
3766{
3767 struct ftrace_entries_info *info = filp->private_data;
3768 unsigned long val;
3769 int ret;
3770
3771 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3772 if (ret)
3773 return ret;
3774
3775 /* must have at least 1 entry */
3776 if (!val)
3777 return -EINVAL;
3778
3779 /* value is in KB */
3780 val <<= 10;
3781
3782 ret = tracing_resize_ring_buffer(val, info->cpu);
3783 if (ret < 0)
3784 return ret;
3785
3786 *ppos += cnt;
3787
3788 return cnt;
3789}
3790
3791static int
3792tracing_entries_release(struct inode *inode, struct file *filp)
3793{
3794 struct ftrace_entries_info *info = filp->private_data;
3795
3796 kfree(info);
3797
3798 return 0;
3799}
3800
3801static ssize_t
3802tracing_total_entries_read(struct file *filp, char __user *ubuf,
3803 size_t cnt, loff_t *ppos)
3804{
3805 struct trace_array *tr = filp->private_data;
3806 char buf[64];
3807 int r, cpu;
3808 unsigned long size = 0, expanded_size = 0;
3809
3810 mutex_lock(&trace_types_lock);
3811 for_each_tracing_cpu(cpu) {
3812 size += tr->data[cpu]->entries >> 10;
3813 if (!ring_buffer_expanded)
3814 expanded_size += trace_buf_size >> 10;
3815 }
3816 if (ring_buffer_expanded)
3817 r = sprintf(buf, "%lu\n", size);
3818 else
3819 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
3820 mutex_unlock(&trace_types_lock);
3821
3822 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3823}
3824
3825static ssize_t
3826tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3827 size_t cnt, loff_t *ppos)
3828{
3829 /*
3830 * There is no need to read what the user has written, this function
3831 * is just to make sure that there is no error when "echo" is used
3832 */
3833
3834 *ppos += cnt;
3835
3836 return cnt;
3837}
3838
3839static int
3840tracing_free_buffer_release(struct inode *inode, struct file *filp)
3841{
3842 /* disable tracing ? */
3843 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3844 tracing_off();
3845 /* resize the ring buffer to 0 */
3846 tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
3847
3848 return 0;
3849}
3850
3851static ssize_t
3852tracing_mark_write(struct file *filp, const char __user *ubuf,
3853 size_t cnt, loff_t *fpos)
3854{
3855 unsigned long addr = (unsigned long)ubuf;
3856 struct ring_buffer_event *event;
3857 struct ring_buffer *buffer;
3858 struct print_entry *entry;
3859 unsigned long irq_flags;
3860 struct page *pages[2];
3861 void *map_page[2];
3862 int nr_pages = 1;
3863 ssize_t written;
3864 int offset;
3865 int size;
3866 int len;
3867 int ret;
3868 int i;
3869
3870 if (tracing_disabled)
3871 return -EINVAL;
3872
3873 if (cnt > TRACE_BUF_SIZE)
3874 cnt = TRACE_BUF_SIZE;
3875
3876 /*
3877 * Userspace is injecting traces into the kernel trace buffer.
3878 * We want to be as non intrusive as possible.
3879 * To do so, we do not want to allocate any special buffers
3880 * or take any locks, but instead write the userspace data
3881 * straight into the ring buffer.
3882 *
3883 * First we need to pin the userspace buffer into memory,
3884 * which, most likely it is, because it just referenced it.
3885 * But there's no guarantee that it is. By using get_user_pages_fast()
3886 * and kmap_atomic/kunmap_atomic() we can get access to the
3887 * pages directly. We then write the data directly into the
3888 * ring buffer.
3889 */
3890 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
3891
3892 /* check if we cross pages */
3893 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
3894 nr_pages = 2;
3895
3896 offset = addr & (PAGE_SIZE - 1);
3897 addr &= PAGE_MASK;
3898
3899 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
3900 if (ret < nr_pages) {
3901 while (--ret >= 0)
3902 put_page(pages[ret]);
3903 written = -EFAULT;
3904 goto out;
3905 }
3906
3907 for (i = 0; i < nr_pages; i++)
3908 map_page[i] = kmap_atomic(pages[i]);
3909
3910 local_save_flags(irq_flags);
3911 size = sizeof(*entry) + cnt + 2; /* possible \n added */
3912 buffer = global_trace.buffer;
3913 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3914 irq_flags, preempt_count());
3915 if (!event) {
3916 /* Ring buffer disabled, return as if not open for write */
3917 written = -EBADF;
3918 goto out_unlock;
3919 }
3920
3921 entry = ring_buffer_event_data(event);
3922 entry->ip = _THIS_IP_;
3923
3924 if (nr_pages == 2) {
3925 len = PAGE_SIZE - offset;
3926 memcpy(&entry->buf, map_page[0] + offset, len);
3927 memcpy(&entry->buf[len], map_page[1], cnt - len);
3928 } else
3929 memcpy(&entry->buf, map_page[0] + offset, cnt);
3930
3931 if (entry->buf[cnt - 1] != '\n') {
3932 entry->buf[cnt] = '\n';
3933 entry->buf[cnt + 1] = '\0';
3934 } else
3935 entry->buf[cnt] = '\0';
3936
3937 ring_buffer_unlock_commit(buffer, event);
3938
3939 written = cnt;
3940
3941 *fpos += written;
3942
3943 out_unlock:
3944 for (i = 0; i < nr_pages; i++){
3945 kunmap_atomic(map_page[i]);
3946 put_page(pages[i]);
3947 }
3948 out:
3949 return written;
3950}
3951
3952static int tracing_clock_show(struct seq_file *m, void *v)
3953{
3954 int i;
3955
3956 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3957 seq_printf(m,
3958 "%s%s%s%s", i ? " " : "",
3959 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3960 i == trace_clock_id ? "]" : "");
3961 seq_putc(m, '\n');
3962
3963 return 0;
3964}
3965
3966static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3967 size_t cnt, loff_t *fpos)
3968{
3969 char buf[64];
3970 const char *clockstr;
3971 int i;
3972
3973 if (cnt >= sizeof(buf))
3974 return -EINVAL;
3975
3976 if (copy_from_user(&buf, ubuf, cnt))
3977 return -EFAULT;
3978
3979 buf[cnt] = 0;
3980
3981 clockstr = strstrip(buf);
3982
3983 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3984 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3985 break;
3986 }
3987 if (i == ARRAY_SIZE(trace_clocks))
3988 return -EINVAL;
3989
3990 trace_clock_id = i;
3991
3992 mutex_lock(&trace_types_lock);
3993
3994 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3995 if (max_tr.buffer)
3996 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3997
3998 mutex_unlock(&trace_types_lock);
3999
4000 *fpos += cnt;
4001
4002 return cnt;
4003}
4004
4005static int tracing_clock_open(struct inode *inode, struct file *file)
4006{
4007 if (tracing_disabled)
4008 return -ENODEV;
4009 return single_open(file, tracing_clock_show, NULL);
4010}
4011
4012static const struct file_operations tracing_max_lat_fops = {
4013 .open = tracing_open_generic,
4014 .read = tracing_max_lat_read,
4015 .write = tracing_max_lat_write,
4016 .llseek = generic_file_llseek,
4017};
4018
4019static const struct file_operations tracing_ctrl_fops = {
4020 .open = tracing_open_generic,
4021 .read = tracing_ctrl_read,
4022 .write = tracing_ctrl_write,
4023 .llseek = generic_file_llseek,
4024};
4025
4026static const struct file_operations set_tracer_fops = {
4027 .open = tracing_open_generic,
4028 .read = tracing_set_trace_read,
4029 .write = tracing_set_trace_write,
4030 .llseek = generic_file_llseek,
4031};
4032
4033static const struct file_operations tracing_pipe_fops = {
4034 .open = tracing_open_pipe,
4035 .poll = tracing_poll_pipe,
4036 .read = tracing_read_pipe,
4037 .splice_read = tracing_splice_read_pipe,
4038 .release = tracing_release_pipe,
4039 .llseek = no_llseek,
4040};
4041
4042static const struct file_operations tracing_entries_fops = {
4043 .open = tracing_entries_open,
4044 .read = tracing_entries_read,
4045 .write = tracing_entries_write,
4046 .release = tracing_entries_release,
4047 .llseek = generic_file_llseek,
4048};
4049
4050static const struct file_operations tracing_total_entries_fops = {
4051 .open = tracing_open_generic,
4052 .read = tracing_total_entries_read,
4053 .llseek = generic_file_llseek,
4054};
4055
4056static const struct file_operations tracing_free_buffer_fops = {
4057 .write = tracing_free_buffer_write,
4058 .release = tracing_free_buffer_release,
4059};
4060
4061static const struct file_operations tracing_mark_fops = {
4062 .open = tracing_open_generic,
4063 .write = tracing_mark_write,
4064 .llseek = generic_file_llseek,
4065};
4066
4067static const struct file_operations trace_clock_fops = {
4068 .open = tracing_clock_open,
4069 .read = seq_read,
4070 .llseek = seq_lseek,
4071 .release = single_release,
4072 .write = tracing_clock_write,
4073};
4074
4075struct ftrace_buffer_info {
4076 struct trace_array *tr;
4077 void *spare;
4078 int cpu;
4079 unsigned int read;
4080};
4081
4082static int tracing_buffers_open(struct inode *inode, struct file *filp)
4083{
4084 int cpu = (int)(long)inode->i_private;
4085 struct ftrace_buffer_info *info;
4086
4087 if (tracing_disabled)
4088 return -ENODEV;
4089
4090 info = kzalloc(sizeof(*info), GFP_KERNEL);
4091 if (!info)
4092 return -ENOMEM;
4093
4094 info->tr = &global_trace;
4095 info->cpu = cpu;
4096 info->spare = NULL;
4097 /* Force reading ring buffer for first read */
4098 info->read = (unsigned int)-1;
4099
4100 filp->private_data = info;
4101
4102 return nonseekable_open(inode, filp);
4103}
4104
4105static ssize_t
4106tracing_buffers_read(struct file *filp, char __user *ubuf,
4107 size_t count, loff_t *ppos)
4108{
4109 struct ftrace_buffer_info *info = filp->private_data;
4110 ssize_t ret;
4111 size_t size;
4112
4113 if (!count)
4114 return 0;
4115
4116 if (!info->spare)
4117 info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
4118 if (!info->spare)
4119 return -ENOMEM;
4120
4121 /* Do we have previous read data to read? */
4122 if (info->read < PAGE_SIZE)
4123 goto read;
4124
4125 trace_access_lock(info->cpu);
4126 ret = ring_buffer_read_page(info->tr->buffer,
4127 &info->spare,
4128 count,
4129 info->cpu, 0);
4130 trace_access_unlock(info->cpu);
4131 if (ret < 0)
4132 return 0;
4133
4134 info->read = 0;
4135
4136read:
4137 size = PAGE_SIZE - info->read;
4138 if (size > count)
4139 size = count;
4140
4141 ret = copy_to_user(ubuf, info->spare + info->read, size);
4142 if (ret == size)
4143 return -EFAULT;
4144 size -= ret;
4145
4146 *ppos += size;
4147 info->read += size;
4148
4149 return size;
4150}
4151
4152static int tracing_buffers_release(struct inode *inode, struct file *file)
4153{
4154 struct ftrace_buffer_info *info = file->private_data;
4155
4156 if (info->spare)
4157 ring_buffer_free_read_page(info->tr->buffer, info->spare);
4158 kfree(info);
4159
4160 return 0;
4161}
4162
4163struct buffer_ref {
4164 struct ring_buffer *buffer;
4165 void *page;
4166 int ref;
4167};
4168
4169static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4170 struct pipe_buffer *buf)
4171{
4172 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4173
4174 if (--ref->ref)
4175 return;
4176
4177 ring_buffer_free_read_page(ref->buffer, ref->page);
4178 kfree(ref);
4179 buf->private = 0;
4180}
4181
4182static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
4183 struct pipe_buffer *buf)
4184{
4185 return 1;
4186}
4187
4188static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4189 struct pipe_buffer *buf)
4190{
4191 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4192
4193 ref->ref++;
4194}
4195
4196/* Pipe buffer operations for a buffer. */
4197static const struct pipe_buf_operations buffer_pipe_buf_ops = {
4198 .can_merge = 0,
4199 .map = generic_pipe_buf_map,
4200 .unmap = generic_pipe_buf_unmap,
4201 .confirm = generic_pipe_buf_confirm,
4202 .release = buffer_pipe_buf_release,
4203 .steal = buffer_pipe_buf_steal,
4204 .get = buffer_pipe_buf_get,
4205};
4206
4207/*
4208 * Callback from splice_to_pipe(), if we need to release some pages
4209 * at the end of the spd in case we error'ed out in filling the pipe.
4210 */
4211static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4212{
4213 struct buffer_ref *ref =
4214 (struct buffer_ref *)spd->partial[i].private;
4215
4216 if (--ref->ref)
4217 return;
4218
4219 ring_buffer_free_read_page(ref->buffer, ref->page);
4220 kfree(ref);
4221 spd->partial[i].private = 0;
4222}
4223
4224static ssize_t
4225tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4226 struct pipe_inode_info *pipe, size_t len,
4227 unsigned int flags)
4228{
4229 struct ftrace_buffer_info *info = file->private_data;
4230 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4231 struct page *pages_def[PIPE_DEF_BUFFERS];
4232 struct splice_pipe_desc spd = {
4233 .pages = pages_def,
4234 .partial = partial_def,
4235 .nr_pages_max = PIPE_DEF_BUFFERS,
4236 .flags = flags,
4237 .ops = &buffer_pipe_buf_ops,
4238 .spd_release = buffer_spd_release,
4239 };
4240 struct buffer_ref *ref;
4241 int entries, size, i;
4242 size_t ret;
4243
4244 if (splice_grow_spd(pipe, &spd))
4245 return -ENOMEM;
4246
4247 if (*ppos & (PAGE_SIZE - 1)) {
4248 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
4249 ret = -EINVAL;
4250 goto out;
4251 }
4252
4253 if (len & (PAGE_SIZE - 1)) {
4254 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
4255 if (len < PAGE_SIZE) {
4256 ret = -EINVAL;
4257 goto out;
4258 }
4259 len &= PAGE_MASK;
4260 }
4261
4262 trace_access_lock(info->cpu);
4263 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4264
4265 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4266 struct page *page;
4267 int r;
4268
4269 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4270 if (!ref)
4271 break;
4272
4273 ref->ref = 1;
4274 ref->buffer = info->tr->buffer;
4275 ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
4276 if (!ref->page) {
4277 kfree(ref);
4278 break;
4279 }
4280
4281 r = ring_buffer_read_page(ref->buffer, &ref->page,
4282 len, info->cpu, 1);
4283 if (r < 0) {
4284 ring_buffer_free_read_page(ref->buffer, ref->page);
4285 kfree(ref);
4286 break;
4287 }
4288
4289 /*
4290 * zero out any left over data, this is going to
4291 * user land.
4292 */
4293 size = ring_buffer_page_len(ref->page);
4294 if (size < PAGE_SIZE)
4295 memset(ref->page + size, 0, PAGE_SIZE - size);
4296
4297 page = virt_to_page(ref->page);
4298
4299 spd.pages[i] = page;
4300 spd.partial[i].len = PAGE_SIZE;
4301 spd.partial[i].offset = 0;
4302 spd.partial[i].private = (unsigned long)ref;
4303 spd.nr_pages++;
4304 *ppos += PAGE_SIZE;
4305
4306 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4307 }
4308
4309 trace_access_unlock(info->cpu);
4310 spd.nr_pages = i;
4311
4312 /* did we read anything? */
4313 if (!spd.nr_pages) {
4314 if (flags & SPLICE_F_NONBLOCK)
4315 ret = -EAGAIN;
4316 else
4317 ret = 0;
4318 /* TODO: block */
4319 goto out;
4320 }
4321
4322 ret = splice_to_pipe(pipe, &spd);
4323 splice_shrink_spd(&spd);
4324out:
4325 return ret;
4326}
4327
4328static const struct file_operations tracing_buffers_fops = {
4329 .open = tracing_buffers_open,
4330 .read = tracing_buffers_read,
4331 .release = tracing_buffers_release,
4332 .splice_read = tracing_buffers_splice_read,
4333 .llseek = no_llseek,
4334};
4335
4336static ssize_t
4337tracing_stats_read(struct file *filp, char __user *ubuf,
4338 size_t count, loff_t *ppos)
4339{
4340 unsigned long cpu = (unsigned long)filp->private_data;
4341 struct trace_array *tr = &global_trace;
4342 struct trace_seq *s;
4343 unsigned long cnt;
4344 unsigned long long t;
4345 unsigned long usec_rem;
4346
4347 s = kmalloc(sizeof(*s), GFP_KERNEL);
4348 if (!s)
4349 return -ENOMEM;
4350
4351 trace_seq_init(s);
4352
4353 cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
4354 trace_seq_printf(s, "entries: %ld\n", cnt);
4355
4356 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
4357 trace_seq_printf(s, "overrun: %ld\n", cnt);
4358
4359 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
4360 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4361
4362 cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4363 trace_seq_printf(s, "bytes: %ld\n", cnt);
4364
4365 t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4366 usec_rem = do_div(t, USEC_PER_SEC);
4367 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
4368
4369 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4370 usec_rem = do_div(t, USEC_PER_SEC);
4371 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4372
4373 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4374
4375 kfree(s);
4376
4377 return count;
4378}
4379
4380static const struct file_operations tracing_stats_fops = {
4381 .open = tracing_open_generic,
4382 .read = tracing_stats_read,
4383 .llseek = generic_file_llseek,
4384};
4385
4386#ifdef CONFIG_DYNAMIC_FTRACE
4387
4388int __weak ftrace_arch_read_dyn_info(char *buf, int size)
4389{
4390 return 0;
4391}
4392
4393static ssize_t
4394tracing_read_dyn_info(struct file *filp, char __user *ubuf,
4395 size_t cnt, loff_t *ppos)
4396{
4397 static char ftrace_dyn_info_buffer[1024];
4398 static DEFINE_MUTEX(dyn_info_mutex);
4399 unsigned long *p = filp->private_data;
4400 char *buf = ftrace_dyn_info_buffer;
4401 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
4402 int r;
4403
4404 mutex_lock(&dyn_info_mutex);
4405 r = sprintf(buf, "%ld ", *p);
4406
4407 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
4408 buf[r++] = '\n';
4409
4410 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4411
4412 mutex_unlock(&dyn_info_mutex);
4413
4414 return r;
4415}
4416
4417static const struct file_operations tracing_dyn_info_fops = {
4418 .open = tracing_open_generic,
4419 .read = tracing_read_dyn_info,
4420 .llseek = generic_file_llseek,
4421};
4422#endif
4423
4424static struct dentry *d_tracer;
4425
4426struct dentry *tracing_init_dentry(void)
4427{
4428 static int once;
4429
4430 if (d_tracer)
4431 return d_tracer;
4432
4433 if (!debugfs_initialized())
4434 return NULL;
4435
4436 d_tracer = debugfs_create_dir("tracing", NULL);
4437
4438 if (!d_tracer && !once) {
4439 once = 1;
4440 pr_warning("Could not create debugfs directory 'tracing'\n");
4441 return NULL;
4442 }
4443
4444 return d_tracer;
4445}
4446
4447static struct dentry *d_percpu;
4448
4449struct dentry *tracing_dentry_percpu(void)
4450{
4451 static int once;
4452 struct dentry *d_tracer;
4453
4454 if (d_percpu)
4455 return d_percpu;
4456
4457 d_tracer = tracing_init_dentry();
4458
4459 if (!d_tracer)
4460 return NULL;
4461
4462 d_percpu = debugfs_create_dir("per_cpu", d_tracer);
4463
4464 if (!d_percpu && !once) {
4465 once = 1;
4466 pr_warning("Could not create debugfs directory 'per_cpu'\n");
4467 return NULL;
4468 }
4469
4470 return d_percpu;
4471}
4472
4473static void tracing_init_debugfs_percpu(long cpu)
4474{
4475 struct dentry *d_percpu = tracing_dentry_percpu();
4476 struct dentry *d_cpu;
4477 char cpu_dir[30]; /* 30 characters should be more than enough */
4478
4479 if (!d_percpu)
4480 return;
4481
4482 snprintf(cpu_dir, 30, "cpu%ld", cpu);
4483 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4484 if (!d_cpu) {
4485 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
4486 return;
4487 }
4488
4489 /* per cpu trace_pipe */
4490 trace_create_file("trace_pipe", 0444, d_cpu,
4491 (void *) cpu, &tracing_pipe_fops);
4492
4493 /* per cpu trace */
4494 trace_create_file("trace", 0644, d_cpu,
4495 (void *) cpu, &tracing_fops);
4496
4497 trace_create_file("trace_pipe_raw", 0444, d_cpu,
4498 (void *) cpu, &tracing_buffers_fops);
4499
4500 trace_create_file("stats", 0444, d_cpu,
4501 (void *) cpu, &tracing_stats_fops);
4502
4503 trace_create_file("buffer_size_kb", 0444, d_cpu,
4504 (void *) cpu, &tracing_entries_fops);
4505}
4506
4507#ifdef CONFIG_FTRACE_SELFTEST
4508/* Let selftest have access to static functions in this file */
4509#include "trace_selftest.c"
4510#endif
4511
4512struct trace_option_dentry {
4513 struct tracer_opt *opt;
4514 struct tracer_flags *flags;
4515 struct dentry *entry;
4516};
4517
4518static ssize_t
4519trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
4520 loff_t *ppos)
4521{
4522 struct trace_option_dentry *topt = filp->private_data;
4523 char *buf;
4524
4525 if (topt->flags->val & topt->opt->bit)
4526 buf = "1\n";
4527 else
4528 buf = "0\n";
4529
4530 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4531}
4532
4533static ssize_t
4534trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
4535 loff_t *ppos)
4536{
4537 struct trace_option_dentry *topt = filp->private_data;
4538 unsigned long val;
4539 int ret;
4540
4541 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4542 if (ret)
4543 return ret;
4544
4545 if (val != 0 && val != 1)
4546 return -EINVAL;
4547
4548 if (!!(topt->flags->val & topt->opt->bit) != val) {
4549 mutex_lock(&trace_types_lock);
4550 ret = __set_tracer_option(current_trace, topt->flags,
4551 topt->opt, !val);
4552 mutex_unlock(&trace_types_lock);
4553 if (ret)
4554 return ret;
4555 }
4556
4557 *ppos += cnt;
4558
4559 return cnt;
4560}
4561
4562
4563static const struct file_operations trace_options_fops = {
4564 .open = tracing_open_generic,
4565 .read = trace_options_read,
4566 .write = trace_options_write,
4567 .llseek = generic_file_llseek,
4568};
4569
4570static ssize_t
4571trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
4572 loff_t *ppos)
4573{
4574 long index = (long)filp->private_data;
4575 char *buf;
4576
4577 if (trace_flags & (1 << index))
4578 buf = "1\n";
4579 else
4580 buf = "0\n";
4581
4582 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4583}
4584
4585static ssize_t
4586trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4587 loff_t *ppos)
4588{
4589 long index = (long)filp->private_data;
4590 unsigned long val;
4591 int ret;
4592
4593 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4594 if (ret)
4595 return ret;
4596
4597 if (val != 0 && val != 1)
4598 return -EINVAL;
4599 set_tracer_flags(1 << index, val);
4600
4601 *ppos += cnt;
4602
4603 return cnt;
4604}
4605
4606static const struct file_operations trace_options_core_fops = {
4607 .open = tracing_open_generic,
4608 .read = trace_options_core_read,
4609 .write = trace_options_core_write,
4610 .llseek = generic_file_llseek,
4611};
4612
4613struct dentry *trace_create_file(const char *name,
4614 umode_t mode,
4615 struct dentry *parent,
4616 void *data,
4617 const struct file_operations *fops)
4618{
4619 struct dentry *ret;
4620
4621 ret = debugfs_create_file(name, mode, parent, data, fops);
4622 if (!ret)
4623 pr_warning("Could not create debugfs '%s' entry\n", name);
4624
4625 return ret;
4626}
4627
4628
4629static struct dentry *trace_options_init_dentry(void)
4630{
4631 struct dentry *d_tracer;
4632 static struct dentry *t_options;
4633
4634 if (t_options)
4635 return t_options;
4636
4637 d_tracer = tracing_init_dentry();
4638 if (!d_tracer)
4639 return NULL;
4640
4641 t_options = debugfs_create_dir("options", d_tracer);
4642 if (!t_options) {
4643 pr_warning("Could not create debugfs directory 'options'\n");
4644 return NULL;
4645 }
4646
4647 return t_options;
4648}
4649
4650static void
4651create_trace_option_file(struct trace_option_dentry *topt,
4652 struct tracer_flags *flags,
4653 struct tracer_opt *opt)
4654{
4655 struct dentry *t_options;
4656
4657 t_options = trace_options_init_dentry();
4658 if (!t_options)
4659 return;
4660
4661 topt->flags = flags;
4662 topt->opt = opt;
4663
4664 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
4665 &trace_options_fops);
4666
4667}
4668
4669static struct trace_option_dentry *
4670create_trace_option_files(struct tracer *tracer)
4671{
4672 struct trace_option_dentry *topts;
4673 struct tracer_flags *flags;
4674 struct tracer_opt *opts;
4675 int cnt;
4676
4677 if (!tracer)
4678 return NULL;
4679
4680 flags = tracer->flags;
4681
4682 if (!flags || !flags->opts)
4683 return NULL;
4684
4685 opts = flags->opts;
4686
4687 for (cnt = 0; opts[cnt].name; cnt++)
4688 ;
4689
4690 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4691 if (!topts)
4692 return NULL;
4693
4694 for (cnt = 0; opts[cnt].name; cnt++)
4695 create_trace_option_file(&topts[cnt], flags,
4696 &opts[cnt]);
4697
4698 return topts;
4699}
4700
4701static void
4702destroy_trace_option_files(struct trace_option_dentry *topts)
4703{
4704 int cnt;
4705
4706 if (!topts)
4707 return;
4708
4709 for (cnt = 0; topts[cnt].opt; cnt++) {
4710 if (topts[cnt].entry)
4711 debugfs_remove(topts[cnt].entry);
4712 }
4713
4714 kfree(topts);
4715}
4716
4717static struct dentry *
4718create_trace_option_core_file(const char *option, long index)
4719{
4720 struct dentry *t_options;
4721
4722 t_options = trace_options_init_dentry();
4723 if (!t_options)
4724 return NULL;
4725
4726 return trace_create_file(option, 0644, t_options, (void *)index,
4727 &trace_options_core_fops);
4728}
4729
4730static __init void create_trace_options_dir(void)
4731{
4732 struct dentry *t_options;
4733 int i;
4734
4735 t_options = trace_options_init_dentry();
4736 if (!t_options)
4737 return;
4738
4739 for (i = 0; trace_options[i]; i++)
4740 create_trace_option_core_file(trace_options[i], i);
4741}
4742
4743static ssize_t
4744rb_simple_read(struct file *filp, char __user *ubuf,
4745 size_t cnt, loff_t *ppos)
4746{
4747 struct trace_array *tr = filp->private_data;
4748 struct ring_buffer *buffer = tr->buffer;
4749 char buf[64];
4750 int r;
4751
4752 if (buffer)
4753 r = ring_buffer_record_is_on(buffer);
4754 else
4755 r = 0;
4756
4757 r = sprintf(buf, "%d\n", r);
4758
4759 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4760}
4761
4762static ssize_t
4763rb_simple_write(struct file *filp, const char __user *ubuf,
4764 size_t cnt, loff_t *ppos)
4765{
4766 struct trace_array *tr = filp->private_data;
4767 struct ring_buffer *buffer = tr->buffer;
4768 unsigned long val;
4769 int ret;
4770
4771 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4772 if (ret)
4773 return ret;
4774
4775 if (buffer) {
4776 if (val)
4777 ring_buffer_record_on(buffer);
4778 else
4779 ring_buffer_record_off(buffer);
4780 }
4781
4782 (*ppos)++;
4783
4784 return cnt;
4785}
4786
4787static const struct file_operations rb_simple_fops = {
4788 .open = tracing_open_generic,
4789 .read = rb_simple_read,
4790 .write = rb_simple_write,
4791 .llseek = default_llseek,
4792};
4793
4794static __init int tracer_init_debugfs(void)
4795{
4796 struct dentry *d_tracer;
4797 int cpu;
4798
4799 trace_access_lock_init();
4800
4801 d_tracer = tracing_init_dentry();
4802
4803 trace_create_file("tracing_enabled", 0644, d_tracer,
4804 &global_trace, &tracing_ctrl_fops);
4805
4806 trace_create_file("trace_options", 0644, d_tracer,
4807 NULL, &tracing_iter_fops);
4808
4809 trace_create_file("tracing_cpumask", 0644, d_tracer,
4810 NULL, &tracing_cpumask_fops);
4811
4812 trace_create_file("trace", 0644, d_tracer,
4813 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4814
4815 trace_create_file("available_tracers", 0444, d_tracer,
4816 &global_trace, &show_traces_fops);
4817
4818 trace_create_file("current_tracer", 0644, d_tracer,
4819 &global_trace, &set_tracer_fops);
4820
4821#ifdef CONFIG_TRACER_MAX_TRACE
4822 trace_create_file("tracing_max_latency", 0644, d_tracer,
4823 &tracing_max_latency, &tracing_max_lat_fops);
4824#endif
4825
4826 trace_create_file("tracing_thresh", 0644, d_tracer,
4827 &tracing_thresh, &tracing_max_lat_fops);
4828
4829 trace_create_file("README", 0444, d_tracer,
4830 NULL, &tracing_readme_fops);
4831
4832 trace_create_file("trace_pipe", 0444, d_tracer,
4833 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4834
4835 trace_create_file("buffer_size_kb", 0644, d_tracer,
4836 (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
4837
4838 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4839 &global_trace, &tracing_total_entries_fops);
4840
4841 trace_create_file("free_buffer", 0644, d_tracer,
4842 &global_trace, &tracing_free_buffer_fops);
4843
4844 trace_create_file("trace_marker", 0220, d_tracer,
4845 NULL, &tracing_mark_fops);
4846
4847 trace_create_file("saved_cmdlines", 0444, d_tracer,
4848 NULL, &tracing_saved_cmdlines_fops);
4849
4850 trace_create_file("trace_clock", 0644, d_tracer, NULL,
4851 &trace_clock_fops);
4852
4853 trace_create_file("tracing_on", 0644, d_tracer,
4854 &global_trace, &rb_simple_fops);
4855
4856#ifdef CONFIG_DYNAMIC_FTRACE
4857 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4858 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4859#endif
4860
4861 create_trace_options_dir();
4862
4863 for_each_tracing_cpu(cpu)
4864 tracing_init_debugfs_percpu(cpu);
4865
4866 return 0;
4867}
4868
4869static int trace_panic_handler(struct notifier_block *this,
4870 unsigned long event, void *unused)
4871{
4872 if (ftrace_dump_on_oops)
4873 ftrace_dump(ftrace_dump_on_oops);
4874 return NOTIFY_OK;
4875}
4876
4877static struct notifier_block trace_panic_notifier = {
4878 .notifier_call = trace_panic_handler,
4879 .next = NULL,
4880 .priority = 150 /* priority: INT_MAX >= x >= 0 */
4881};
4882
4883static int trace_die_handler(struct notifier_block *self,
4884 unsigned long val,
4885 void *data)
4886{
4887 switch (val) {
4888 case DIE_OOPS:
4889 if (ftrace_dump_on_oops)
4890 ftrace_dump(ftrace_dump_on_oops);
4891 break;
4892 default:
4893 break;
4894 }
4895 return NOTIFY_OK;
4896}
4897
4898static struct notifier_block trace_die_notifier = {
4899 .notifier_call = trace_die_handler,
4900 .priority = 200
4901};
4902
4903/*
4904 * printk is set to max of 1024, we really don't need it that big.
4905 * Nothing should be printing 1000 characters anyway.
4906 */
4907#define TRACE_MAX_PRINT 1000
4908
4909/*
4910 * Define here KERN_TRACE so that we have one place to modify
4911 * it if we decide to change what log level the ftrace dump
4912 * should be at.
4913 */
4914#define KERN_TRACE KERN_EMERG
4915
4916void
4917trace_printk_seq(struct trace_seq *s)
4918{
4919 /* Probably should print a warning here. */
4920 if (s->len >= 1000)
4921 s->len = 1000;
4922
4923 /* should be zero ended, but we are paranoid. */
4924 s->buffer[s->len] = 0;
4925
4926 printk(KERN_TRACE "%s", s->buffer);
4927
4928 trace_seq_init(s);
4929}
4930
4931void trace_init_global_iter(struct trace_iterator *iter)
4932{
4933 iter->tr = &global_trace;
4934 iter->trace = current_trace;
4935 iter->cpu_file = TRACE_PIPE_ALL_CPU;
4936}
4937
4938static void
4939__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4940{
4941 static arch_spinlock_t ftrace_dump_lock =
4942 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4943 /* use static because iter can be a bit big for the stack */
4944 static struct trace_iterator iter;
4945 unsigned int old_userobj;
4946 static int dump_ran;
4947 unsigned long flags;
4948 int cnt = 0, cpu;
4949
4950 /* only one dump */
4951 local_irq_save(flags);
4952 arch_spin_lock(&ftrace_dump_lock);
4953 if (dump_ran)
4954 goto out;
4955
4956 dump_ran = 1;
4957
4958 tracing_off();
4959
4960 /* Did function tracer already get disabled? */
4961 if (ftrace_is_dead()) {
4962 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
4963 printk("# MAY BE MISSING FUNCTION EVENTS\n");
4964 }
4965
4966 if (disable_tracing)
4967 ftrace_kill();
4968
4969 trace_init_global_iter(&iter);
4970
4971 for_each_tracing_cpu(cpu) {
4972 atomic_inc(&iter.tr->data[cpu]->disabled);
4973 }
4974
4975 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4976
4977 /* don't look at user memory in panic mode */
4978 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4979
4980 /* Simulate the iterator */
4981 iter.tr = &global_trace;
4982 iter.trace = current_trace;
4983
4984 switch (oops_dump_mode) {
4985 case DUMP_ALL:
4986 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4987 break;
4988 case DUMP_ORIG:
4989 iter.cpu_file = raw_smp_processor_id();
4990 break;
4991 case DUMP_NONE:
4992 goto out_enable;
4993 default:
4994 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4995 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4996 }
4997
4998 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4999
5000 /*
5001 * We need to stop all tracing on all CPUS to read the
5002 * the next buffer. This is a bit expensive, but is
5003 * not done often. We fill all what we can read,
5004 * and then release the locks again.
5005 */
5006
5007 while (!trace_empty(&iter)) {
5008
5009 if (!cnt)
5010 printk(KERN_TRACE "---------------------------------\n");
5011
5012 cnt++;
5013
5014 /* reset all but tr, trace, and overruns */
5015 memset(&iter.seq, 0,
5016 sizeof(struct trace_iterator) -
5017 offsetof(struct trace_iterator, seq));
5018 iter.iter_flags |= TRACE_FILE_LAT_FMT;
5019 iter.pos = -1;
5020
5021 if (trace_find_next_entry_inc(&iter) != NULL) {
5022 int ret;
5023
5024 ret = print_trace_line(&iter);
5025 if (ret != TRACE_TYPE_NO_CONSUME)
5026 trace_consume(&iter);
5027 }
5028 touch_nmi_watchdog();
5029
5030 trace_printk_seq(&iter.seq);
5031 }
5032
5033 if (!cnt)
5034 printk(KERN_TRACE " (ftrace buffer empty)\n");
5035 else
5036 printk(KERN_TRACE "---------------------------------\n");
5037
5038 out_enable:
5039 /* Re-enable tracing if requested */
5040 if (!disable_tracing) {
5041 trace_flags |= old_userobj;
5042
5043 for_each_tracing_cpu(cpu) {
5044 atomic_dec(&iter.tr->data[cpu]->disabled);
5045 }
5046 tracing_on();
5047 }
5048
5049 out:
5050 arch_spin_unlock(&ftrace_dump_lock);
5051 local_irq_restore(flags);
5052}
5053
5054/* By default: disable tracing after the dump */
5055void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
5056{
5057 __ftrace_dump(true, oops_dump_mode);
5058}
5059EXPORT_SYMBOL_GPL(ftrace_dump);
5060
5061__init static int tracer_alloc_buffers(void)
5062{
5063 int ring_buf_size;
5064 enum ring_buffer_flags rb_flags;
5065 int i;
5066 int ret = -ENOMEM;
5067
5068
5069 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
5070 goto out;
5071
5072 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
5073 goto out_free_buffer_mask;
5074
5075 /* Only allocate trace_printk buffers if a trace_printk exists */
5076 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
5077 trace_printk_init_buffers();
5078
5079 /* To save memory, keep the ring buffer size to its minimum */
5080 if (ring_buffer_expanded)
5081 ring_buf_size = trace_buf_size;
5082 else
5083 ring_buf_size = 1;
5084
5085 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5086
5087 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
5088 cpumask_copy(tracing_cpumask, cpu_all_mask);
5089
5090 /* TODO: make the number of buffers hot pluggable with CPUS */
5091 global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
5092 if (!global_trace.buffer) {
5093 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
5094 WARN_ON(1);
5095 goto out_free_cpumask;
5096 }
5097 if (global_trace.buffer_disabled)
5098 tracing_off();
5099
5100
5101#ifdef CONFIG_TRACER_MAX_TRACE
5102 max_tr.buffer = ring_buffer_alloc(1, rb_flags);
5103 if (!max_tr.buffer) {
5104 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5105 WARN_ON(1);
5106 ring_buffer_free(global_trace.buffer);
5107 goto out_free_cpumask;
5108 }
5109#endif
5110
5111 /* Allocate the first page for all buffers */
5112 for_each_tracing_cpu(i) {
5113 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
5114 max_tr.data[i] = &per_cpu(max_tr_data, i);
5115 }
5116
5117 set_buffer_entries(&global_trace,
5118 ring_buffer_size(global_trace.buffer, 0));
5119#ifdef CONFIG_TRACER_MAX_TRACE
5120 set_buffer_entries(&max_tr, 1);
5121#endif
5122
5123 trace_init_cmdlines();
5124
5125 register_tracer(&nop_trace);
5126 current_trace = &nop_trace;
5127 /* All seems OK, enable tracing */
5128 tracing_disabled = 0;
5129
5130 atomic_notifier_chain_register(&panic_notifier_list,
5131 &trace_panic_notifier);
5132
5133 register_die_notifier(&trace_die_notifier);
5134
5135 return 0;
5136
5137out_free_cpumask:
5138 free_cpumask_var(tracing_cpumask);
5139out_free_buffer_mask:
5140 free_cpumask_var(tracing_buffer_mask);
5141out:
5142 return ret;
5143}
5144
5145__init static int clear_boot_tracer(void)
5146{
5147 /*
5148 * The default tracer at boot buffer is an init section.
5149 * This function is called in lateinit. If we did not
5150 * find the boot tracer, then clear it out, to prevent
5151 * later registration from accessing the buffer that is
5152 * about to be freed.
5153 */
5154 if (!default_bootup_tracer)
5155 return 0;
5156
5157 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
5158 default_bootup_tracer);
5159 default_bootup_tracer = NULL;
5160
5161 return 0;
5162}
5163
5164early_initcall(tracer_alloc_buffers);
5165fs_initcall(tracer_init_debugfs);
5166late_initcall(clear_boot_tracer);