Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */
15#include <linux/ring_buffer.h>
16#include <generated/utsrelease.h>
17#include <linux/stacktrace.h>
18#include <linux/writeback.h>
19#include <linux/kallsyms.h>
20#include <linux/security.h>
21#include <linux/seq_file.h>
22#include <linux/notifier.h>
23#include <linux/irqflags.h>
24#include <linux/debugfs.h>
25#include <linux/tracefs.h>
26#include <linux/pagemap.h>
27#include <linux/hardirq.h>
28#include <linux/linkage.h>
29#include <linux/uaccess.h>
30#include <linux/vmalloc.h>
31#include <linux/ftrace.h>
32#include <linux/module.h>
33#include <linux/percpu.h>
34#include <linux/splice.h>
35#include <linux/kdebug.h>
36#include <linux/string.h>
37#include <linux/mount.h>
38#include <linux/rwsem.h>
39#include <linux/slab.h>
40#include <linux/ctype.h>
41#include <linux/init.h>
42#include <linux/poll.h>
43#include <linux/nmi.h>
44#include <linux/fs.h>
45#include <linux/trace.h>
46#include <linux/sched/clock.h>
47#include <linux/sched/rt.h>
48
49#include "trace.h"
50#include "trace_output.h"
51
52/*
53 * On boot up, the ring buffer is set to the minimum size, so that
54 * we do not waste memory on systems that are not using tracing.
55 */
56bool ring_buffer_expanded;
57
58/*
59 * We need to change this state when a selftest is running.
60 * A selftest will lurk into the ring-buffer to count the
61 * entries inserted during the selftest although some concurrent
62 * insertions into the ring-buffer such as trace_printk could occurred
63 * at the same time, giving false positive or negative results.
64 */
65static bool __read_mostly tracing_selftest_running;
66
67/*
68 * If a tracer is running, we do not want to run SELFTEST.
69 */
70bool __read_mostly tracing_selftest_disabled;
71
72/* Pipe tracepoints to printk */
73struct trace_iterator *tracepoint_print_iter;
74int tracepoint_printk;
75static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
76
77/* For tracers that don't implement custom flags */
78static struct tracer_opt dummy_tracer_opt[] = {
79 { }
80};
81
82static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
84{
85 return 0;
86}
87
88/*
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_taskinfo_save);
94
95/*
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
101static int tracing_disabled = 1;
102
103cpumask_var_t __read_mostly tracing_buffer_mask;
104
105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
119 */
120
121enum ftrace_dump_mode ftrace_dump_on_oops;
122
123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
126#ifdef CONFIG_TRACE_EVAL_MAP_FILE
127/* Map of enums to their values, for "eval_map" file */
128struct trace_eval_map_head {
129 struct module *mod;
130 unsigned long length;
131};
132
133union trace_eval_map_item;
134
135struct trace_eval_map_tail {
136 /*
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "eval_string"
139 */
140 union trace_eval_map_item *next;
141 const char *end; /* points to NULL */
142};
143
144static DEFINE_MUTEX(trace_eval_mutex);
145
146/*
147 * The trace_eval_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved eval_map items.
152 */
153union trace_eval_map_item {
154 struct trace_eval_map map;
155 struct trace_eval_map_head head;
156 struct trace_eval_map_tail tail;
157};
158
159static union trace_eval_map_item *trace_eval_maps;
160#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
161
162static int tracing_set_tracer(struct trace_array *tr, const char *buf);
163static void ftrace_trace_userstack(struct ring_buffer *buffer,
164 unsigned long flags, int pc);
165
166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
168static char *default_bootup_tracer;
169
170static bool allocate_snapshot;
171
172static int __init set_cmdline_ftrace(char *str)
173{
174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
175 default_bootup_tracer = bootup_tracer_buf;
176 /* We are using ftrace early, expand it */
177 ring_buffer_expanded = true;
178 return 1;
179}
180__setup("ftrace=", set_cmdline_ftrace);
181
182static int __init set_ftrace_dump_on_oops(char *str)
183{
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
197
198static int __init stop_trace_on_warning(char *str)
199{
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
202 return 1;
203}
204__setup("traceoff_on_warning", stop_trace_on_warning);
205
206static int __init boot_alloc_snapshot(char *str)
207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
213__setup("alloc_snapshot", boot_alloc_snapshot);
214
215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217
218static int __init set_trace_boot_options(char *str)
219{
220 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
221 return 0;
222}
223__setup("trace_options=", set_trace_boot_options);
224
225static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
226static char *trace_boot_clock __initdata;
227
228static int __init set_trace_boot_clock(char *str)
229{
230 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
231 trace_boot_clock = trace_boot_clock_buf;
232 return 0;
233}
234__setup("trace_clock=", set_trace_boot_clock);
235
236static int __init set_tracepoint_printk(char *str)
237{
238 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
239 tracepoint_printk = 1;
240 return 1;
241}
242__setup("tp_printk", set_tracepoint_printk);
243
244unsigned long long ns2usecs(u64 nsec)
245{
246 nsec += 500;
247 do_div(nsec, 1000);
248 return nsec;
249}
250
251/* trace_flags holds trace_options default values */
252#define TRACE_DEFAULT_FLAGS \
253 (FUNCTION_DEFAULT_FLAGS | \
254 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
255 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
256 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
257 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
258
259/* trace_options that are only supported by global_trace */
260#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
261 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
262
263/* trace_flags that are default zero for instances */
264#define ZEROED_TRACE_FLAGS \
265 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
266
267/*
268 * The global_trace is the descriptor that holds the top-level tracing
269 * buffers for the live tracing.
270 */
271static struct trace_array global_trace = {
272 .trace_flags = TRACE_DEFAULT_FLAGS,
273};
274
275LIST_HEAD(ftrace_trace_arrays);
276
277int trace_array_get(struct trace_array *this_tr)
278{
279 struct trace_array *tr;
280 int ret = -ENODEV;
281
282 mutex_lock(&trace_types_lock);
283 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
284 if (tr == this_tr) {
285 tr->ref++;
286 ret = 0;
287 break;
288 }
289 }
290 mutex_unlock(&trace_types_lock);
291
292 return ret;
293}
294
295static void __trace_array_put(struct trace_array *this_tr)
296{
297 WARN_ON(!this_tr->ref);
298 this_tr->ref--;
299}
300
301void trace_array_put(struct trace_array *this_tr)
302{
303 mutex_lock(&trace_types_lock);
304 __trace_array_put(this_tr);
305 mutex_unlock(&trace_types_lock);
306}
307
308int tracing_check_open_get_tr(struct trace_array *tr)
309{
310 int ret;
311
312 ret = security_locked_down(LOCKDOWN_TRACEFS);
313 if (ret)
314 return ret;
315
316 if (tracing_disabled)
317 return -ENODEV;
318
319 if (tr && trace_array_get(tr) < 0)
320 return -ENODEV;
321
322 return 0;
323}
324
325int call_filter_check_discard(struct trace_event_call *call, void *rec,
326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
328{
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 __trace_event_discard_commit(buffer, event);
332 return 1;
333 }
334
335 return 0;
336}
337
338void trace_free_pid_list(struct trace_pid_list *pid_list)
339{
340 vfree(pid_list->pids);
341 kfree(pid_list);
342}
343
344/**
345 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
346 * @filtered_pids: The list of pids to check
347 * @search_pid: The PID to find in @filtered_pids
348 *
349 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
350 */
351bool
352trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
353{
354 /*
355 * If pid_max changed after filtered_pids was created, we
356 * by default ignore all pids greater than the previous pid_max.
357 */
358 if (search_pid >= filtered_pids->pid_max)
359 return false;
360
361 return test_bit(search_pid, filtered_pids->pids);
362}
363
364/**
365 * trace_ignore_this_task - should a task be ignored for tracing
366 * @filtered_pids: The list of pids to check
367 * @task: The task that should be ignored if not filtered
368 *
369 * Checks if @task should be traced or not from @filtered_pids.
370 * Returns true if @task should *NOT* be traced.
371 * Returns false if @task should be traced.
372 */
373bool
374trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
375{
376 /*
377 * Return false, because if filtered_pids does not exist,
378 * all pids are good to trace.
379 */
380 if (!filtered_pids)
381 return false;
382
383 return !trace_find_filtered_pid(filtered_pids, task->pid);
384}
385
386/**
387 * trace_filter_add_remove_task - Add or remove a task from a pid_list
388 * @pid_list: The list to modify
389 * @self: The current task for fork or NULL for exit
390 * @task: The task to add or remove
391 *
392 * If adding a task, if @self is defined, the task is only added if @self
393 * is also included in @pid_list. This happens on fork and tasks should
394 * only be added when the parent is listed. If @self is NULL, then the
395 * @task pid will be removed from the list, which would happen on exit
396 * of a task.
397 */
398void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
399 struct task_struct *self,
400 struct task_struct *task)
401{
402 if (!pid_list)
403 return;
404
405 /* For forks, we only add if the forking task is listed */
406 if (self) {
407 if (!trace_find_filtered_pid(pid_list, self->pid))
408 return;
409 }
410
411 /* Sorry, but we don't support pid_max changing after setting */
412 if (task->pid >= pid_list->pid_max)
413 return;
414
415 /* "self" is set for forks, and NULL for exits */
416 if (self)
417 set_bit(task->pid, pid_list->pids);
418 else
419 clear_bit(task->pid, pid_list->pids);
420}
421
422/**
423 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
424 * @pid_list: The pid list to show
425 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
426 * @pos: The position of the file
427 *
428 * This is used by the seq_file "next" operation to iterate the pids
429 * listed in a trace_pid_list structure.
430 *
431 * Returns the pid+1 as we want to display pid of zero, but NULL would
432 * stop the iteration.
433 */
434void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
435{
436 unsigned long pid = (unsigned long)v;
437
438 (*pos)++;
439
440 /* pid already is +1 of the actual prevous bit */
441 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
442
443 /* Return pid + 1 to allow zero to be represented */
444 if (pid < pid_list->pid_max)
445 return (void *)(pid + 1);
446
447 return NULL;
448}
449
450/**
451 * trace_pid_start - Used for seq_file to start reading pid lists
452 * @pid_list: The pid list to show
453 * @pos: The position of the file
454 *
455 * This is used by seq_file "start" operation to start the iteration
456 * of listing pids.
457 *
458 * Returns the pid+1 as we want to display pid of zero, but NULL would
459 * stop the iteration.
460 */
461void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
462{
463 unsigned long pid;
464 loff_t l = 0;
465
466 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
467 if (pid >= pid_list->pid_max)
468 return NULL;
469
470 /* Return pid + 1 so that zero can be the exit value */
471 for (pid++; pid && l < *pos;
472 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
473 ;
474 return (void *)pid;
475}
476
477/**
478 * trace_pid_show - show the current pid in seq_file processing
479 * @m: The seq_file structure to write into
480 * @v: A void pointer of the pid (+1) value to display
481 *
482 * Can be directly used by seq_file operations to display the current
483 * pid value.
484 */
485int trace_pid_show(struct seq_file *m, void *v)
486{
487 unsigned long pid = (unsigned long)v - 1;
488
489 seq_printf(m, "%lu\n", pid);
490 return 0;
491}
492
493/* 128 should be much more than enough */
494#define PID_BUF_SIZE 127
495
496int trace_pid_write(struct trace_pid_list *filtered_pids,
497 struct trace_pid_list **new_pid_list,
498 const char __user *ubuf, size_t cnt)
499{
500 struct trace_pid_list *pid_list;
501 struct trace_parser parser;
502 unsigned long val;
503 int nr_pids = 0;
504 ssize_t read = 0;
505 ssize_t ret = 0;
506 loff_t pos;
507 pid_t pid;
508
509 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
510 return -ENOMEM;
511
512 /*
513 * Always recreate a new array. The write is an all or nothing
514 * operation. Always create a new array when adding new pids by
515 * the user. If the operation fails, then the current list is
516 * not modified.
517 */
518 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
519 if (!pid_list) {
520 trace_parser_put(&parser);
521 return -ENOMEM;
522 }
523
524 pid_list->pid_max = READ_ONCE(pid_max);
525
526 /* Only truncating will shrink pid_max */
527 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
528 pid_list->pid_max = filtered_pids->pid_max;
529
530 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
531 if (!pid_list->pids) {
532 trace_parser_put(&parser);
533 kfree(pid_list);
534 return -ENOMEM;
535 }
536
537 if (filtered_pids) {
538 /* copy the current bits to the new max */
539 for_each_set_bit(pid, filtered_pids->pids,
540 filtered_pids->pid_max) {
541 set_bit(pid, pid_list->pids);
542 nr_pids++;
543 }
544 }
545
546 while (cnt > 0) {
547
548 pos = 0;
549
550 ret = trace_get_user(&parser, ubuf, cnt, &pos);
551 if (ret < 0 || !trace_parser_loaded(&parser))
552 break;
553
554 read += ret;
555 ubuf += ret;
556 cnt -= ret;
557
558 ret = -EINVAL;
559 if (kstrtoul(parser.buffer, 0, &val))
560 break;
561 if (val >= pid_list->pid_max)
562 break;
563
564 pid = (pid_t)val;
565
566 set_bit(pid, pid_list->pids);
567 nr_pids++;
568
569 trace_parser_clear(&parser);
570 ret = 0;
571 }
572 trace_parser_put(&parser);
573
574 if (ret < 0) {
575 trace_free_pid_list(pid_list);
576 return ret;
577 }
578
579 if (!nr_pids) {
580 /* Cleared the list of pids */
581 trace_free_pid_list(pid_list);
582 read = ret;
583 pid_list = NULL;
584 }
585
586 *new_pid_list = pid_list;
587
588 return read;
589}
590
591static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
592{
593 u64 ts;
594
595 /* Early boot up does not have a buffer yet */
596 if (!buf->buffer)
597 return trace_clock_local();
598
599 ts = ring_buffer_time_stamp(buf->buffer, cpu);
600 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
601
602 return ts;
603}
604
605u64 ftrace_now(int cpu)
606{
607 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
608}
609
610/**
611 * tracing_is_enabled - Show if global_trace has been disabled
612 *
613 * Shows if the global trace has been enabled or not. It uses the
614 * mirror flag "buffer_disabled" to be used in fast paths such as for
615 * the irqsoff tracer. But it may be inaccurate due to races. If you
616 * need to know the accurate state, use tracing_is_on() which is a little
617 * slower, but accurate.
618 */
619int tracing_is_enabled(void)
620{
621 /*
622 * For quick access (irqsoff uses this in fast path), just
623 * return the mirror variable of the state of the ring buffer.
624 * It's a little racy, but we don't really care.
625 */
626 smp_rmb();
627 return !global_trace.buffer_disabled;
628}
629
630/*
631 * trace_buf_size is the size in bytes that is allocated
632 * for a buffer. Note, the number of bytes is always rounded
633 * to page size.
634 *
635 * This number is purposely set to a low number of 16384.
636 * If the dump on oops happens, it will be much appreciated
637 * to not have to wait for all that output. Anyway this can be
638 * boot time and run time configurable.
639 */
640#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
641
642static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
643
644/* trace_types holds a link list of available tracers. */
645static struct tracer *trace_types __read_mostly;
646
647/*
648 * trace_types_lock is used to protect the trace_types list.
649 */
650DEFINE_MUTEX(trace_types_lock);
651
652/*
653 * serialize the access of the ring buffer
654 *
655 * ring buffer serializes readers, but it is low level protection.
656 * The validity of the events (which returns by ring_buffer_peek() ..etc)
657 * are not protected by ring buffer.
658 *
659 * The content of events may become garbage if we allow other process consumes
660 * these events concurrently:
661 * A) the page of the consumed events may become a normal page
662 * (not reader page) in ring buffer, and this page will be rewrited
663 * by events producer.
664 * B) The page of the consumed events may become a page for splice_read,
665 * and this page will be returned to system.
666 *
667 * These primitives allow multi process access to different cpu ring buffer
668 * concurrently.
669 *
670 * These primitives don't distinguish read-only and read-consume access.
671 * Multi read-only access are also serialized.
672 */
673
674#ifdef CONFIG_SMP
675static DECLARE_RWSEM(all_cpu_access_lock);
676static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
677
678static inline void trace_access_lock(int cpu)
679{
680 if (cpu == RING_BUFFER_ALL_CPUS) {
681 /* gain it for accessing the whole ring buffer. */
682 down_write(&all_cpu_access_lock);
683 } else {
684 /* gain it for accessing a cpu ring buffer. */
685
686 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
687 down_read(&all_cpu_access_lock);
688
689 /* Secondly block other access to this @cpu ring buffer. */
690 mutex_lock(&per_cpu(cpu_access_lock, cpu));
691 }
692}
693
694static inline void trace_access_unlock(int cpu)
695{
696 if (cpu == RING_BUFFER_ALL_CPUS) {
697 up_write(&all_cpu_access_lock);
698 } else {
699 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
700 up_read(&all_cpu_access_lock);
701 }
702}
703
704static inline void trace_access_lock_init(void)
705{
706 int cpu;
707
708 for_each_possible_cpu(cpu)
709 mutex_init(&per_cpu(cpu_access_lock, cpu));
710}
711
712#else
713
714static DEFINE_MUTEX(access_lock);
715
716static inline void trace_access_lock(int cpu)
717{
718 (void)cpu;
719 mutex_lock(&access_lock);
720}
721
722static inline void trace_access_unlock(int cpu)
723{
724 (void)cpu;
725 mutex_unlock(&access_lock);
726}
727
728static inline void trace_access_lock_init(void)
729{
730}
731
732#endif
733
734#ifdef CONFIG_STACKTRACE
735static void __ftrace_trace_stack(struct ring_buffer *buffer,
736 unsigned long flags,
737 int skip, int pc, struct pt_regs *regs);
738static inline void ftrace_trace_stack(struct trace_array *tr,
739 struct ring_buffer *buffer,
740 unsigned long flags,
741 int skip, int pc, struct pt_regs *regs);
742
743#else
744static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
745 unsigned long flags,
746 int skip, int pc, struct pt_regs *regs)
747{
748}
749static inline void ftrace_trace_stack(struct trace_array *tr,
750 struct ring_buffer *buffer,
751 unsigned long flags,
752 int skip, int pc, struct pt_regs *regs)
753{
754}
755
756#endif
757
758static __always_inline void
759trace_event_setup(struct ring_buffer_event *event,
760 int type, unsigned long flags, int pc)
761{
762 struct trace_entry *ent = ring_buffer_event_data(event);
763
764 tracing_generic_entry_update(ent, type, flags, pc);
765}
766
767static __always_inline struct ring_buffer_event *
768__trace_buffer_lock_reserve(struct ring_buffer *buffer,
769 int type,
770 unsigned long len,
771 unsigned long flags, int pc)
772{
773 struct ring_buffer_event *event;
774
775 event = ring_buffer_lock_reserve(buffer, len);
776 if (event != NULL)
777 trace_event_setup(event, type, flags, pc);
778
779 return event;
780}
781
782void tracer_tracing_on(struct trace_array *tr)
783{
784 if (tr->trace_buffer.buffer)
785 ring_buffer_record_on(tr->trace_buffer.buffer);
786 /*
787 * This flag is looked at when buffers haven't been allocated
788 * yet, or by some tracers (like irqsoff), that just want to
789 * know if the ring buffer has been disabled, but it can handle
790 * races of where it gets disabled but we still do a record.
791 * As the check is in the fast path of the tracers, it is more
792 * important to be fast than accurate.
793 */
794 tr->buffer_disabled = 0;
795 /* Make the flag seen by readers */
796 smp_wmb();
797}
798
799/**
800 * tracing_on - enable tracing buffers
801 *
802 * This function enables tracing buffers that may have been
803 * disabled with tracing_off.
804 */
805void tracing_on(void)
806{
807 tracer_tracing_on(&global_trace);
808}
809EXPORT_SYMBOL_GPL(tracing_on);
810
811
812static __always_inline void
813__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
814{
815 __this_cpu_write(trace_taskinfo_save, true);
816
817 /* If this is the temp buffer, we need to commit fully */
818 if (this_cpu_read(trace_buffered_event) == event) {
819 /* Length is in event->array[0] */
820 ring_buffer_write(buffer, event->array[0], &event->array[1]);
821 /* Release the temp buffer */
822 this_cpu_dec(trace_buffered_event_cnt);
823 } else
824 ring_buffer_unlock_commit(buffer, event);
825}
826
827/**
828 * __trace_puts - write a constant string into the trace buffer.
829 * @ip: The address of the caller
830 * @str: The constant string to write
831 * @size: The size of the string.
832 */
833int __trace_puts(unsigned long ip, const char *str, int size)
834{
835 struct ring_buffer_event *event;
836 struct ring_buffer *buffer;
837 struct print_entry *entry;
838 unsigned long irq_flags;
839 int alloc;
840 int pc;
841
842 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
843 return 0;
844
845 pc = preempt_count();
846
847 if (unlikely(tracing_selftest_running || tracing_disabled))
848 return 0;
849
850 alloc = sizeof(*entry) + size + 2; /* possible \n added */
851
852 local_save_flags(irq_flags);
853 buffer = global_trace.trace_buffer.buffer;
854 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
855 irq_flags, pc);
856 if (!event)
857 return 0;
858
859 entry = ring_buffer_event_data(event);
860 entry->ip = ip;
861
862 memcpy(&entry->buf, str, size);
863
864 /* Add a newline if necessary */
865 if (entry->buf[size - 1] != '\n') {
866 entry->buf[size] = '\n';
867 entry->buf[size + 1] = '\0';
868 } else
869 entry->buf[size] = '\0';
870
871 __buffer_unlock_commit(buffer, event);
872 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
873
874 return size;
875}
876EXPORT_SYMBOL_GPL(__trace_puts);
877
878/**
879 * __trace_bputs - write the pointer to a constant string into trace buffer
880 * @ip: The address of the caller
881 * @str: The constant string to write to the buffer to
882 */
883int __trace_bputs(unsigned long ip, const char *str)
884{
885 struct ring_buffer_event *event;
886 struct ring_buffer *buffer;
887 struct bputs_entry *entry;
888 unsigned long irq_flags;
889 int size = sizeof(struct bputs_entry);
890 int pc;
891
892 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
893 return 0;
894
895 pc = preempt_count();
896
897 if (unlikely(tracing_selftest_running || tracing_disabled))
898 return 0;
899
900 local_save_flags(irq_flags);
901 buffer = global_trace.trace_buffer.buffer;
902 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
903 irq_flags, pc);
904 if (!event)
905 return 0;
906
907 entry = ring_buffer_event_data(event);
908 entry->ip = ip;
909 entry->str = str;
910
911 __buffer_unlock_commit(buffer, event);
912 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
913
914 return 1;
915}
916EXPORT_SYMBOL_GPL(__trace_bputs);
917
918#ifdef CONFIG_TRACER_SNAPSHOT
919void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
920{
921 struct tracer *tracer = tr->current_trace;
922 unsigned long flags;
923
924 if (in_nmi()) {
925 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
926 internal_trace_puts("*** snapshot is being ignored ***\n");
927 return;
928 }
929
930 if (!tr->allocated_snapshot) {
931 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
932 internal_trace_puts("*** stopping trace here! ***\n");
933 tracing_off();
934 return;
935 }
936
937 /* Note, snapshot can not be used when the tracer uses it */
938 if (tracer->use_max_tr) {
939 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
940 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
941 return;
942 }
943
944 local_irq_save(flags);
945 update_max_tr(tr, current, smp_processor_id(), cond_data);
946 local_irq_restore(flags);
947}
948
949void tracing_snapshot_instance(struct trace_array *tr)
950{
951 tracing_snapshot_instance_cond(tr, NULL);
952}
953
954/**
955 * tracing_snapshot - take a snapshot of the current buffer.
956 *
957 * This causes a swap between the snapshot buffer and the current live
958 * tracing buffer. You can use this to take snapshots of the live
959 * trace when some condition is triggered, but continue to trace.
960 *
961 * Note, make sure to allocate the snapshot with either
962 * a tracing_snapshot_alloc(), or by doing it manually
963 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
964 *
965 * If the snapshot buffer is not allocated, it will stop tracing.
966 * Basically making a permanent snapshot.
967 */
968void tracing_snapshot(void)
969{
970 struct trace_array *tr = &global_trace;
971
972 tracing_snapshot_instance(tr);
973}
974EXPORT_SYMBOL_GPL(tracing_snapshot);
975
976/**
977 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
978 * @tr: The tracing instance to snapshot
979 * @cond_data: The data to be tested conditionally, and possibly saved
980 *
981 * This is the same as tracing_snapshot() except that the snapshot is
982 * conditional - the snapshot will only happen if the
983 * cond_snapshot.update() implementation receiving the cond_data
984 * returns true, which means that the trace array's cond_snapshot
985 * update() operation used the cond_data to determine whether the
986 * snapshot should be taken, and if it was, presumably saved it along
987 * with the snapshot.
988 */
989void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
990{
991 tracing_snapshot_instance_cond(tr, cond_data);
992}
993EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
994
995/**
996 * tracing_snapshot_cond_data - get the user data associated with a snapshot
997 * @tr: The tracing instance
998 *
999 * When the user enables a conditional snapshot using
1000 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1001 * with the snapshot. This accessor is used to retrieve it.
1002 *
1003 * Should not be called from cond_snapshot.update(), since it takes
1004 * the tr->max_lock lock, which the code calling
1005 * cond_snapshot.update() has already done.
1006 *
1007 * Returns the cond_data associated with the trace array's snapshot.
1008 */
1009void *tracing_cond_snapshot_data(struct trace_array *tr)
1010{
1011 void *cond_data = NULL;
1012
1013 arch_spin_lock(&tr->max_lock);
1014
1015 if (tr->cond_snapshot)
1016 cond_data = tr->cond_snapshot->cond_data;
1017
1018 arch_spin_unlock(&tr->max_lock);
1019
1020 return cond_data;
1021}
1022EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1023
1024static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
1025 struct trace_buffer *size_buf, int cpu_id);
1026static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
1027
1028int tracing_alloc_snapshot_instance(struct trace_array *tr)
1029{
1030 int ret;
1031
1032 if (!tr->allocated_snapshot) {
1033
1034 /* allocate spare buffer */
1035 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1036 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
1037 if (ret < 0)
1038 return ret;
1039
1040 tr->allocated_snapshot = true;
1041 }
1042
1043 return 0;
1044}
1045
1046static void free_snapshot(struct trace_array *tr)
1047{
1048 /*
1049 * We don't free the ring buffer. instead, resize it because
1050 * The max_tr ring buffer has some state (e.g. ring->clock) and
1051 * we want preserve it.
1052 */
1053 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1054 set_buffer_entries(&tr->max_buffer, 1);
1055 tracing_reset_online_cpus(&tr->max_buffer);
1056 tr->allocated_snapshot = false;
1057}
1058
1059/**
1060 * tracing_alloc_snapshot - allocate snapshot buffer.
1061 *
1062 * This only allocates the snapshot buffer if it isn't already
1063 * allocated - it doesn't also take a snapshot.
1064 *
1065 * This is meant to be used in cases where the snapshot buffer needs
1066 * to be set up for events that can't sleep but need to be able to
1067 * trigger a snapshot.
1068 */
1069int tracing_alloc_snapshot(void)
1070{
1071 struct trace_array *tr = &global_trace;
1072 int ret;
1073
1074 ret = tracing_alloc_snapshot_instance(tr);
1075 WARN_ON(ret < 0);
1076
1077 return ret;
1078}
1079EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1080
1081/**
1082 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1083 *
1084 * This is similar to tracing_snapshot(), but it will allocate the
1085 * snapshot buffer if it isn't already allocated. Use this only
1086 * where it is safe to sleep, as the allocation may sleep.
1087 *
1088 * This causes a swap between the snapshot buffer and the current live
1089 * tracing buffer. You can use this to take snapshots of the live
1090 * trace when some condition is triggered, but continue to trace.
1091 */
1092void tracing_snapshot_alloc(void)
1093{
1094 int ret;
1095
1096 ret = tracing_alloc_snapshot();
1097 if (ret < 0)
1098 return;
1099
1100 tracing_snapshot();
1101}
1102EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1103
1104/**
1105 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1106 * @tr: The tracing instance
1107 * @cond_data: User data to associate with the snapshot
1108 * @update: Implementation of the cond_snapshot update function
1109 *
1110 * Check whether the conditional snapshot for the given instance has
1111 * already been enabled, or if the current tracer is already using a
1112 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1113 * save the cond_data and update function inside.
1114 *
1115 * Returns 0 if successful, error otherwise.
1116 */
1117int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1118 cond_update_fn_t update)
1119{
1120 struct cond_snapshot *cond_snapshot;
1121 int ret = 0;
1122
1123 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1124 if (!cond_snapshot)
1125 return -ENOMEM;
1126
1127 cond_snapshot->cond_data = cond_data;
1128 cond_snapshot->update = update;
1129
1130 mutex_lock(&trace_types_lock);
1131
1132 ret = tracing_alloc_snapshot_instance(tr);
1133 if (ret)
1134 goto fail_unlock;
1135
1136 if (tr->current_trace->use_max_tr) {
1137 ret = -EBUSY;
1138 goto fail_unlock;
1139 }
1140
1141 /*
1142 * The cond_snapshot can only change to NULL without the
1143 * trace_types_lock. We don't care if we race with it going
1144 * to NULL, but we want to make sure that it's not set to
1145 * something other than NULL when we get here, which we can
1146 * do safely with only holding the trace_types_lock and not
1147 * having to take the max_lock.
1148 */
1149 if (tr->cond_snapshot) {
1150 ret = -EBUSY;
1151 goto fail_unlock;
1152 }
1153
1154 arch_spin_lock(&tr->max_lock);
1155 tr->cond_snapshot = cond_snapshot;
1156 arch_spin_unlock(&tr->max_lock);
1157
1158 mutex_unlock(&trace_types_lock);
1159
1160 return ret;
1161
1162 fail_unlock:
1163 mutex_unlock(&trace_types_lock);
1164 kfree(cond_snapshot);
1165 return ret;
1166}
1167EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1168
1169/**
1170 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1171 * @tr: The tracing instance
1172 *
1173 * Check whether the conditional snapshot for the given instance is
1174 * enabled; if so, free the cond_snapshot associated with it,
1175 * otherwise return -EINVAL.
1176 *
1177 * Returns 0 if successful, error otherwise.
1178 */
1179int tracing_snapshot_cond_disable(struct trace_array *tr)
1180{
1181 int ret = 0;
1182
1183 arch_spin_lock(&tr->max_lock);
1184
1185 if (!tr->cond_snapshot)
1186 ret = -EINVAL;
1187 else {
1188 kfree(tr->cond_snapshot);
1189 tr->cond_snapshot = NULL;
1190 }
1191
1192 arch_spin_unlock(&tr->max_lock);
1193
1194 return ret;
1195}
1196EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1197#else
1198void tracing_snapshot(void)
1199{
1200 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1201}
1202EXPORT_SYMBOL_GPL(tracing_snapshot);
1203void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1204{
1205 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1206}
1207EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1208int tracing_alloc_snapshot(void)
1209{
1210 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1211 return -ENODEV;
1212}
1213EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1214void tracing_snapshot_alloc(void)
1215{
1216 /* Give warning */
1217 tracing_snapshot();
1218}
1219EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1220void *tracing_cond_snapshot_data(struct trace_array *tr)
1221{
1222 return NULL;
1223}
1224EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1225int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1226{
1227 return -ENODEV;
1228}
1229EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1230int tracing_snapshot_cond_disable(struct trace_array *tr)
1231{
1232 return false;
1233}
1234EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1235#endif /* CONFIG_TRACER_SNAPSHOT */
1236
1237void tracer_tracing_off(struct trace_array *tr)
1238{
1239 if (tr->trace_buffer.buffer)
1240 ring_buffer_record_off(tr->trace_buffer.buffer);
1241 /*
1242 * This flag is looked at when buffers haven't been allocated
1243 * yet, or by some tracers (like irqsoff), that just want to
1244 * know if the ring buffer has been disabled, but it can handle
1245 * races of where it gets disabled but we still do a record.
1246 * As the check is in the fast path of the tracers, it is more
1247 * important to be fast than accurate.
1248 */
1249 tr->buffer_disabled = 1;
1250 /* Make the flag seen by readers */
1251 smp_wmb();
1252}
1253
1254/**
1255 * tracing_off - turn off tracing buffers
1256 *
1257 * This function stops the tracing buffers from recording data.
1258 * It does not disable any overhead the tracers themselves may
1259 * be causing. This function simply causes all recording to
1260 * the ring buffers to fail.
1261 */
1262void tracing_off(void)
1263{
1264 tracer_tracing_off(&global_trace);
1265}
1266EXPORT_SYMBOL_GPL(tracing_off);
1267
1268void disable_trace_on_warning(void)
1269{
1270 if (__disable_trace_on_warning)
1271 tracing_off();
1272}
1273
1274/**
1275 * tracer_tracing_is_on - show real state of ring buffer enabled
1276 * @tr : the trace array to know if ring buffer is enabled
1277 *
1278 * Shows real state of the ring buffer if it is enabled or not.
1279 */
1280bool tracer_tracing_is_on(struct trace_array *tr)
1281{
1282 if (tr->trace_buffer.buffer)
1283 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1284 return !tr->buffer_disabled;
1285}
1286
1287/**
1288 * tracing_is_on - show state of ring buffers enabled
1289 */
1290int tracing_is_on(void)
1291{
1292 return tracer_tracing_is_on(&global_trace);
1293}
1294EXPORT_SYMBOL_GPL(tracing_is_on);
1295
1296static int __init set_buf_size(char *str)
1297{
1298 unsigned long buf_size;
1299
1300 if (!str)
1301 return 0;
1302 buf_size = memparse(str, &str);
1303 /* nr_entries can not be zero */
1304 if (buf_size == 0)
1305 return 0;
1306 trace_buf_size = buf_size;
1307 return 1;
1308}
1309__setup("trace_buf_size=", set_buf_size);
1310
1311static int __init set_tracing_thresh(char *str)
1312{
1313 unsigned long threshold;
1314 int ret;
1315
1316 if (!str)
1317 return 0;
1318 ret = kstrtoul(str, 0, &threshold);
1319 if (ret < 0)
1320 return 0;
1321 tracing_thresh = threshold * 1000;
1322 return 1;
1323}
1324__setup("tracing_thresh=", set_tracing_thresh);
1325
1326unsigned long nsecs_to_usecs(unsigned long nsecs)
1327{
1328 return nsecs / 1000;
1329}
1330
1331/*
1332 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1333 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1334 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1335 * of strings in the order that the evals (enum) were defined.
1336 */
1337#undef C
1338#define C(a, b) b
1339
1340/* These must match the bit postions in trace_iterator_flags */
1341static const char *trace_options[] = {
1342 TRACE_FLAGS
1343 NULL
1344};
1345
1346static struct {
1347 u64 (*func)(void);
1348 const char *name;
1349 int in_ns; /* is this clock in nanoseconds? */
1350} trace_clocks[] = {
1351 { trace_clock_local, "local", 1 },
1352 { trace_clock_global, "global", 1 },
1353 { trace_clock_counter, "counter", 0 },
1354 { trace_clock_jiffies, "uptime", 0 },
1355 { trace_clock, "perf", 1 },
1356 { ktime_get_mono_fast_ns, "mono", 1 },
1357 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1358 { ktime_get_boot_fast_ns, "boot", 1 },
1359 ARCH_TRACE_CLOCKS
1360};
1361
1362bool trace_clock_in_ns(struct trace_array *tr)
1363{
1364 if (trace_clocks[tr->clock_id].in_ns)
1365 return true;
1366
1367 return false;
1368}
1369
1370/*
1371 * trace_parser_get_init - gets the buffer for trace parser
1372 */
1373int trace_parser_get_init(struct trace_parser *parser, int size)
1374{
1375 memset(parser, 0, sizeof(*parser));
1376
1377 parser->buffer = kmalloc(size, GFP_KERNEL);
1378 if (!parser->buffer)
1379 return 1;
1380
1381 parser->size = size;
1382 return 0;
1383}
1384
1385/*
1386 * trace_parser_put - frees the buffer for trace parser
1387 */
1388void trace_parser_put(struct trace_parser *parser)
1389{
1390 kfree(parser->buffer);
1391 parser->buffer = NULL;
1392}
1393
1394/*
1395 * trace_get_user - reads the user input string separated by space
1396 * (matched by isspace(ch))
1397 *
1398 * For each string found the 'struct trace_parser' is updated,
1399 * and the function returns.
1400 *
1401 * Returns number of bytes read.
1402 *
1403 * See kernel/trace/trace.h for 'struct trace_parser' details.
1404 */
1405int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1406 size_t cnt, loff_t *ppos)
1407{
1408 char ch;
1409 size_t read = 0;
1410 ssize_t ret;
1411
1412 if (!*ppos)
1413 trace_parser_clear(parser);
1414
1415 ret = get_user(ch, ubuf++);
1416 if (ret)
1417 goto out;
1418
1419 read++;
1420 cnt--;
1421
1422 /*
1423 * The parser is not finished with the last write,
1424 * continue reading the user input without skipping spaces.
1425 */
1426 if (!parser->cont) {
1427 /* skip white space */
1428 while (cnt && isspace(ch)) {
1429 ret = get_user(ch, ubuf++);
1430 if (ret)
1431 goto out;
1432 read++;
1433 cnt--;
1434 }
1435
1436 parser->idx = 0;
1437
1438 /* only spaces were written */
1439 if (isspace(ch) || !ch) {
1440 *ppos += read;
1441 ret = read;
1442 goto out;
1443 }
1444 }
1445
1446 /* read the non-space input */
1447 while (cnt && !isspace(ch) && ch) {
1448 if (parser->idx < parser->size - 1)
1449 parser->buffer[parser->idx++] = ch;
1450 else {
1451 ret = -EINVAL;
1452 goto out;
1453 }
1454 ret = get_user(ch, ubuf++);
1455 if (ret)
1456 goto out;
1457 read++;
1458 cnt--;
1459 }
1460
1461 /* We either got finished input or we have to wait for another call. */
1462 if (isspace(ch) || !ch) {
1463 parser->buffer[parser->idx] = 0;
1464 parser->cont = false;
1465 } else if (parser->idx < parser->size - 1) {
1466 parser->cont = true;
1467 parser->buffer[parser->idx++] = ch;
1468 /* Make sure the parsed string always terminates with '\0'. */
1469 parser->buffer[parser->idx] = 0;
1470 } else {
1471 ret = -EINVAL;
1472 goto out;
1473 }
1474
1475 *ppos += read;
1476 ret = read;
1477
1478out:
1479 return ret;
1480}
1481
1482/* TODO add a seq_buf_to_buffer() */
1483static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1484{
1485 int len;
1486
1487 if (trace_seq_used(s) <= s->seq.readpos)
1488 return -EBUSY;
1489
1490 len = trace_seq_used(s) - s->seq.readpos;
1491 if (cnt > len)
1492 cnt = len;
1493 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1494
1495 s->seq.readpos += cnt;
1496 return cnt;
1497}
1498
1499unsigned long __read_mostly tracing_thresh;
1500
1501#ifdef CONFIG_TRACER_MAX_TRACE
1502/*
1503 * Copy the new maximum trace into the separate maximum-trace
1504 * structure. (this way the maximum trace is permanently saved,
1505 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1506 */
1507static void
1508__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1509{
1510 struct trace_buffer *trace_buf = &tr->trace_buffer;
1511 struct trace_buffer *max_buf = &tr->max_buffer;
1512 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1513 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1514
1515 max_buf->cpu = cpu;
1516 max_buf->time_start = data->preempt_timestamp;
1517
1518 max_data->saved_latency = tr->max_latency;
1519 max_data->critical_start = data->critical_start;
1520 max_data->critical_end = data->critical_end;
1521
1522 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1523 max_data->pid = tsk->pid;
1524 /*
1525 * If tsk == current, then use current_uid(), as that does not use
1526 * RCU. The irq tracer can be called out of RCU scope.
1527 */
1528 if (tsk == current)
1529 max_data->uid = current_uid();
1530 else
1531 max_data->uid = task_uid(tsk);
1532
1533 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1534 max_data->policy = tsk->policy;
1535 max_data->rt_priority = tsk->rt_priority;
1536
1537 /* record this tasks comm */
1538 tracing_record_cmdline(tsk);
1539}
1540
1541/**
1542 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1543 * @tr: tracer
1544 * @tsk: the task with the latency
1545 * @cpu: The cpu that initiated the trace.
1546 * @cond_data: User data associated with a conditional snapshot
1547 *
1548 * Flip the buffers between the @tr and the max_tr and record information
1549 * about which task was the cause of this latency.
1550 */
1551void
1552update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1553 void *cond_data)
1554{
1555 if (tr->stop_count)
1556 return;
1557
1558 WARN_ON_ONCE(!irqs_disabled());
1559
1560 if (!tr->allocated_snapshot) {
1561 /* Only the nop tracer should hit this when disabling */
1562 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1563 return;
1564 }
1565
1566 arch_spin_lock(&tr->max_lock);
1567
1568 /* Inherit the recordable setting from trace_buffer */
1569 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1570 ring_buffer_record_on(tr->max_buffer.buffer);
1571 else
1572 ring_buffer_record_off(tr->max_buffer.buffer);
1573
1574#ifdef CONFIG_TRACER_SNAPSHOT
1575 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1576 goto out_unlock;
1577#endif
1578 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1579
1580 __update_max_tr(tr, tsk, cpu);
1581
1582 out_unlock:
1583 arch_spin_unlock(&tr->max_lock);
1584}
1585
1586/**
1587 * update_max_tr_single - only copy one trace over, and reset the rest
1588 * @tr: tracer
1589 * @tsk: task with the latency
1590 * @cpu: the cpu of the buffer to copy.
1591 *
1592 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1593 */
1594void
1595update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1596{
1597 int ret;
1598
1599 if (tr->stop_count)
1600 return;
1601
1602 WARN_ON_ONCE(!irqs_disabled());
1603 if (!tr->allocated_snapshot) {
1604 /* Only the nop tracer should hit this when disabling */
1605 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1606 return;
1607 }
1608
1609 arch_spin_lock(&tr->max_lock);
1610
1611 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1612
1613 if (ret == -EBUSY) {
1614 /*
1615 * We failed to swap the buffer due to a commit taking
1616 * place on this CPU. We fail to record, but we reset
1617 * the max trace buffer (no one writes directly to it)
1618 * and flag that it failed.
1619 */
1620 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1621 "Failed to swap buffers due to commit in progress\n");
1622 }
1623
1624 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1625
1626 __update_max_tr(tr, tsk, cpu);
1627 arch_spin_unlock(&tr->max_lock);
1628}
1629#endif /* CONFIG_TRACER_MAX_TRACE */
1630
1631static int wait_on_pipe(struct trace_iterator *iter, int full)
1632{
1633 /* Iterators are static, they should be filled or empty */
1634 if (trace_buffer_iter(iter, iter->cpu_file))
1635 return 0;
1636
1637 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1638 full);
1639}
1640
1641#ifdef CONFIG_FTRACE_STARTUP_TEST
1642static bool selftests_can_run;
1643
1644struct trace_selftests {
1645 struct list_head list;
1646 struct tracer *type;
1647};
1648
1649static LIST_HEAD(postponed_selftests);
1650
1651static int save_selftest(struct tracer *type)
1652{
1653 struct trace_selftests *selftest;
1654
1655 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1656 if (!selftest)
1657 return -ENOMEM;
1658
1659 selftest->type = type;
1660 list_add(&selftest->list, &postponed_selftests);
1661 return 0;
1662}
1663
1664static int run_tracer_selftest(struct tracer *type)
1665{
1666 struct trace_array *tr = &global_trace;
1667 struct tracer *saved_tracer = tr->current_trace;
1668 int ret;
1669
1670 if (!type->selftest || tracing_selftest_disabled)
1671 return 0;
1672
1673 /*
1674 * If a tracer registers early in boot up (before scheduling is
1675 * initialized and such), then do not run its selftests yet.
1676 * Instead, run it a little later in the boot process.
1677 */
1678 if (!selftests_can_run)
1679 return save_selftest(type);
1680
1681 /*
1682 * Run a selftest on this tracer.
1683 * Here we reset the trace buffer, and set the current
1684 * tracer to be this tracer. The tracer can then run some
1685 * internal tracing to verify that everything is in order.
1686 * If we fail, we do not register this tracer.
1687 */
1688 tracing_reset_online_cpus(&tr->trace_buffer);
1689
1690 tr->current_trace = type;
1691
1692#ifdef CONFIG_TRACER_MAX_TRACE
1693 if (type->use_max_tr) {
1694 /* If we expanded the buffers, make sure the max is expanded too */
1695 if (ring_buffer_expanded)
1696 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1697 RING_BUFFER_ALL_CPUS);
1698 tr->allocated_snapshot = true;
1699 }
1700#endif
1701
1702 /* the test is responsible for initializing and enabling */
1703 pr_info("Testing tracer %s: ", type->name);
1704 ret = type->selftest(type, tr);
1705 /* the test is responsible for resetting too */
1706 tr->current_trace = saved_tracer;
1707 if (ret) {
1708 printk(KERN_CONT "FAILED!\n");
1709 /* Add the warning after printing 'FAILED' */
1710 WARN_ON(1);
1711 return -1;
1712 }
1713 /* Only reset on passing, to avoid touching corrupted buffers */
1714 tracing_reset_online_cpus(&tr->trace_buffer);
1715
1716#ifdef CONFIG_TRACER_MAX_TRACE
1717 if (type->use_max_tr) {
1718 tr->allocated_snapshot = false;
1719
1720 /* Shrink the max buffer again */
1721 if (ring_buffer_expanded)
1722 ring_buffer_resize(tr->max_buffer.buffer, 1,
1723 RING_BUFFER_ALL_CPUS);
1724 }
1725#endif
1726
1727 printk(KERN_CONT "PASSED\n");
1728 return 0;
1729}
1730
1731static __init int init_trace_selftests(void)
1732{
1733 struct trace_selftests *p, *n;
1734 struct tracer *t, **last;
1735 int ret;
1736
1737 selftests_can_run = true;
1738
1739 mutex_lock(&trace_types_lock);
1740
1741 if (list_empty(&postponed_selftests))
1742 goto out;
1743
1744 pr_info("Running postponed tracer tests:\n");
1745
1746 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1747 /* This loop can take minutes when sanitizers are enabled, so
1748 * lets make sure we allow RCU processing.
1749 */
1750 cond_resched();
1751 ret = run_tracer_selftest(p->type);
1752 /* If the test fails, then warn and remove from available_tracers */
1753 if (ret < 0) {
1754 WARN(1, "tracer: %s failed selftest, disabling\n",
1755 p->type->name);
1756 last = &trace_types;
1757 for (t = trace_types; t; t = t->next) {
1758 if (t == p->type) {
1759 *last = t->next;
1760 break;
1761 }
1762 last = &t->next;
1763 }
1764 }
1765 list_del(&p->list);
1766 kfree(p);
1767 }
1768
1769 out:
1770 mutex_unlock(&trace_types_lock);
1771
1772 return 0;
1773}
1774core_initcall(init_trace_selftests);
1775#else
1776static inline int run_tracer_selftest(struct tracer *type)
1777{
1778 return 0;
1779}
1780#endif /* CONFIG_FTRACE_STARTUP_TEST */
1781
1782static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1783
1784static void __init apply_trace_boot_options(void);
1785
1786/**
1787 * register_tracer - register a tracer with the ftrace system.
1788 * @type: the plugin for the tracer
1789 *
1790 * Register a new plugin tracer.
1791 */
1792int __init register_tracer(struct tracer *type)
1793{
1794 struct tracer *t;
1795 int ret = 0;
1796
1797 if (!type->name) {
1798 pr_info("Tracer must have a name\n");
1799 return -1;
1800 }
1801
1802 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1803 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1804 return -1;
1805 }
1806
1807 mutex_lock(&trace_types_lock);
1808
1809 tracing_selftest_running = true;
1810
1811 for (t = trace_types; t; t = t->next) {
1812 if (strcmp(type->name, t->name) == 0) {
1813 /* already found */
1814 pr_info("Tracer %s already registered\n",
1815 type->name);
1816 ret = -1;
1817 goto out;
1818 }
1819 }
1820
1821 if (!type->set_flag)
1822 type->set_flag = &dummy_set_flag;
1823 if (!type->flags) {
1824 /*allocate a dummy tracer_flags*/
1825 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1826 if (!type->flags) {
1827 ret = -ENOMEM;
1828 goto out;
1829 }
1830 type->flags->val = 0;
1831 type->flags->opts = dummy_tracer_opt;
1832 } else
1833 if (!type->flags->opts)
1834 type->flags->opts = dummy_tracer_opt;
1835
1836 /* store the tracer for __set_tracer_option */
1837 type->flags->trace = type;
1838
1839 ret = run_tracer_selftest(type);
1840 if (ret < 0)
1841 goto out;
1842
1843 type->next = trace_types;
1844 trace_types = type;
1845 add_tracer_options(&global_trace, type);
1846
1847 out:
1848 tracing_selftest_running = false;
1849 mutex_unlock(&trace_types_lock);
1850
1851 if (ret || !default_bootup_tracer)
1852 goto out_unlock;
1853
1854 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1855 goto out_unlock;
1856
1857 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1858 /* Do we want this tracer to start on bootup? */
1859 tracing_set_tracer(&global_trace, type->name);
1860 default_bootup_tracer = NULL;
1861
1862 apply_trace_boot_options();
1863
1864 /* disable other selftests, since this will break it. */
1865 tracing_selftest_disabled = true;
1866#ifdef CONFIG_FTRACE_STARTUP_TEST
1867 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1868 type->name);
1869#endif
1870
1871 out_unlock:
1872 return ret;
1873}
1874
1875static void tracing_reset_cpu(struct trace_buffer *buf, int cpu)
1876{
1877 struct ring_buffer *buffer = buf->buffer;
1878
1879 if (!buffer)
1880 return;
1881
1882 ring_buffer_record_disable(buffer);
1883
1884 /* Make sure all commits have finished */
1885 synchronize_rcu();
1886 ring_buffer_reset_cpu(buffer, cpu);
1887
1888 ring_buffer_record_enable(buffer);
1889}
1890
1891void tracing_reset_online_cpus(struct trace_buffer *buf)
1892{
1893 struct ring_buffer *buffer = buf->buffer;
1894 int cpu;
1895
1896 if (!buffer)
1897 return;
1898
1899 ring_buffer_record_disable(buffer);
1900
1901 /* Make sure all commits have finished */
1902 synchronize_rcu();
1903
1904 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1905
1906 for_each_online_cpu(cpu)
1907 ring_buffer_reset_cpu(buffer, cpu);
1908
1909 ring_buffer_record_enable(buffer);
1910}
1911
1912/* Must have trace_types_lock held */
1913void tracing_reset_all_online_cpus(void)
1914{
1915 struct trace_array *tr;
1916
1917 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1918 if (!tr->clear_trace)
1919 continue;
1920 tr->clear_trace = false;
1921 tracing_reset_online_cpus(&tr->trace_buffer);
1922#ifdef CONFIG_TRACER_MAX_TRACE
1923 tracing_reset_online_cpus(&tr->max_buffer);
1924#endif
1925 }
1926}
1927
1928static int *tgid_map;
1929
1930#define SAVED_CMDLINES_DEFAULT 128
1931#define NO_CMDLINE_MAP UINT_MAX
1932static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1933struct saved_cmdlines_buffer {
1934 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1935 unsigned *map_cmdline_to_pid;
1936 unsigned cmdline_num;
1937 int cmdline_idx;
1938 char *saved_cmdlines;
1939};
1940static struct saved_cmdlines_buffer *savedcmd;
1941
1942/* temporary disable recording */
1943static atomic_t trace_record_taskinfo_disabled __read_mostly;
1944
1945static inline char *get_saved_cmdlines(int idx)
1946{
1947 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1948}
1949
1950static inline void set_cmdline(int idx, const char *cmdline)
1951{
1952 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1953}
1954
1955static int allocate_cmdlines_buffer(unsigned int val,
1956 struct saved_cmdlines_buffer *s)
1957{
1958 s->map_cmdline_to_pid = kmalloc_array(val,
1959 sizeof(*s->map_cmdline_to_pid),
1960 GFP_KERNEL);
1961 if (!s->map_cmdline_to_pid)
1962 return -ENOMEM;
1963
1964 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1965 if (!s->saved_cmdlines) {
1966 kfree(s->map_cmdline_to_pid);
1967 return -ENOMEM;
1968 }
1969
1970 s->cmdline_idx = 0;
1971 s->cmdline_num = val;
1972 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1973 sizeof(s->map_pid_to_cmdline));
1974 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1975 val * sizeof(*s->map_cmdline_to_pid));
1976
1977 return 0;
1978}
1979
1980static int trace_create_savedcmd(void)
1981{
1982 int ret;
1983
1984 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1985 if (!savedcmd)
1986 return -ENOMEM;
1987
1988 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1989 if (ret < 0) {
1990 kfree(savedcmd);
1991 savedcmd = NULL;
1992 return -ENOMEM;
1993 }
1994
1995 return 0;
1996}
1997
1998int is_tracing_stopped(void)
1999{
2000 return global_trace.stop_count;
2001}
2002
2003/**
2004 * tracing_start - quick start of the tracer
2005 *
2006 * If tracing is enabled but was stopped by tracing_stop,
2007 * this will start the tracer back up.
2008 */
2009void tracing_start(void)
2010{
2011 struct ring_buffer *buffer;
2012 unsigned long flags;
2013
2014 if (tracing_disabled)
2015 return;
2016
2017 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2018 if (--global_trace.stop_count) {
2019 if (global_trace.stop_count < 0) {
2020 /* Someone screwed up their debugging */
2021 WARN_ON_ONCE(1);
2022 global_trace.stop_count = 0;
2023 }
2024 goto out;
2025 }
2026
2027 /* Prevent the buffers from switching */
2028 arch_spin_lock(&global_trace.max_lock);
2029
2030 buffer = global_trace.trace_buffer.buffer;
2031 if (buffer)
2032 ring_buffer_record_enable(buffer);
2033
2034#ifdef CONFIG_TRACER_MAX_TRACE
2035 buffer = global_trace.max_buffer.buffer;
2036 if (buffer)
2037 ring_buffer_record_enable(buffer);
2038#endif
2039
2040 arch_spin_unlock(&global_trace.max_lock);
2041
2042 out:
2043 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2044}
2045
2046static void tracing_start_tr(struct trace_array *tr)
2047{
2048 struct ring_buffer *buffer;
2049 unsigned long flags;
2050
2051 if (tracing_disabled)
2052 return;
2053
2054 /* If global, we need to also start the max tracer */
2055 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2056 return tracing_start();
2057
2058 raw_spin_lock_irqsave(&tr->start_lock, flags);
2059
2060 if (--tr->stop_count) {
2061 if (tr->stop_count < 0) {
2062 /* Someone screwed up their debugging */
2063 WARN_ON_ONCE(1);
2064 tr->stop_count = 0;
2065 }
2066 goto out;
2067 }
2068
2069 buffer = tr->trace_buffer.buffer;
2070 if (buffer)
2071 ring_buffer_record_enable(buffer);
2072
2073 out:
2074 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2075}
2076
2077/**
2078 * tracing_stop - quick stop of the tracer
2079 *
2080 * Light weight way to stop tracing. Use in conjunction with
2081 * tracing_start.
2082 */
2083void tracing_stop(void)
2084{
2085 struct ring_buffer *buffer;
2086 unsigned long flags;
2087
2088 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2089 if (global_trace.stop_count++)
2090 goto out;
2091
2092 /* Prevent the buffers from switching */
2093 arch_spin_lock(&global_trace.max_lock);
2094
2095 buffer = global_trace.trace_buffer.buffer;
2096 if (buffer)
2097 ring_buffer_record_disable(buffer);
2098
2099#ifdef CONFIG_TRACER_MAX_TRACE
2100 buffer = global_trace.max_buffer.buffer;
2101 if (buffer)
2102 ring_buffer_record_disable(buffer);
2103#endif
2104
2105 arch_spin_unlock(&global_trace.max_lock);
2106
2107 out:
2108 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2109}
2110
2111static void tracing_stop_tr(struct trace_array *tr)
2112{
2113 struct ring_buffer *buffer;
2114 unsigned long flags;
2115
2116 /* If global, we need to also stop the max tracer */
2117 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2118 return tracing_stop();
2119
2120 raw_spin_lock_irqsave(&tr->start_lock, flags);
2121 if (tr->stop_count++)
2122 goto out;
2123
2124 buffer = tr->trace_buffer.buffer;
2125 if (buffer)
2126 ring_buffer_record_disable(buffer);
2127
2128 out:
2129 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2130}
2131
2132static int trace_save_cmdline(struct task_struct *tsk)
2133{
2134 unsigned pid, idx;
2135
2136 /* treat recording of idle task as a success */
2137 if (!tsk->pid)
2138 return 1;
2139
2140 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2141 return 0;
2142
2143 /*
2144 * It's not the end of the world if we don't get
2145 * the lock, but we also don't want to spin
2146 * nor do we want to disable interrupts,
2147 * so if we miss here, then better luck next time.
2148 */
2149 if (!arch_spin_trylock(&trace_cmdline_lock))
2150 return 0;
2151
2152 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2153 if (idx == NO_CMDLINE_MAP) {
2154 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2155
2156 /*
2157 * Check whether the cmdline buffer at idx has a pid
2158 * mapped. We are going to overwrite that entry so we
2159 * need to clear the map_pid_to_cmdline. Otherwise we
2160 * would read the new comm for the old pid.
2161 */
2162 pid = savedcmd->map_cmdline_to_pid[idx];
2163 if (pid != NO_CMDLINE_MAP)
2164 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2165
2166 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2167 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2168
2169 savedcmd->cmdline_idx = idx;
2170 }
2171
2172 set_cmdline(idx, tsk->comm);
2173
2174 arch_spin_unlock(&trace_cmdline_lock);
2175
2176 return 1;
2177}
2178
2179static void __trace_find_cmdline(int pid, char comm[])
2180{
2181 unsigned map;
2182
2183 if (!pid) {
2184 strcpy(comm, "<idle>");
2185 return;
2186 }
2187
2188 if (WARN_ON_ONCE(pid < 0)) {
2189 strcpy(comm, "<XXX>");
2190 return;
2191 }
2192
2193 if (pid > PID_MAX_DEFAULT) {
2194 strcpy(comm, "<...>");
2195 return;
2196 }
2197
2198 map = savedcmd->map_pid_to_cmdline[pid];
2199 if (map != NO_CMDLINE_MAP)
2200 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2201 else
2202 strcpy(comm, "<...>");
2203}
2204
2205void trace_find_cmdline(int pid, char comm[])
2206{
2207 preempt_disable();
2208 arch_spin_lock(&trace_cmdline_lock);
2209
2210 __trace_find_cmdline(pid, comm);
2211
2212 arch_spin_unlock(&trace_cmdline_lock);
2213 preempt_enable();
2214}
2215
2216int trace_find_tgid(int pid)
2217{
2218 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2219 return 0;
2220
2221 return tgid_map[pid];
2222}
2223
2224static int trace_save_tgid(struct task_struct *tsk)
2225{
2226 /* treat recording of idle task as a success */
2227 if (!tsk->pid)
2228 return 1;
2229
2230 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2231 return 0;
2232
2233 tgid_map[tsk->pid] = tsk->tgid;
2234 return 1;
2235}
2236
2237static bool tracing_record_taskinfo_skip(int flags)
2238{
2239 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2240 return true;
2241 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2242 return true;
2243 if (!__this_cpu_read(trace_taskinfo_save))
2244 return true;
2245 return false;
2246}
2247
2248/**
2249 * tracing_record_taskinfo - record the task info of a task
2250 *
2251 * @task: task to record
2252 * @flags: TRACE_RECORD_CMDLINE for recording comm
2253 * TRACE_RECORD_TGID for recording tgid
2254 */
2255void tracing_record_taskinfo(struct task_struct *task, int flags)
2256{
2257 bool done;
2258
2259 if (tracing_record_taskinfo_skip(flags))
2260 return;
2261
2262 /*
2263 * Record as much task information as possible. If some fail, continue
2264 * to try to record the others.
2265 */
2266 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2267 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2268
2269 /* If recording any information failed, retry again soon. */
2270 if (!done)
2271 return;
2272
2273 __this_cpu_write(trace_taskinfo_save, false);
2274}
2275
2276/**
2277 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2278 *
2279 * @prev: previous task during sched_switch
2280 * @next: next task during sched_switch
2281 * @flags: TRACE_RECORD_CMDLINE for recording comm
2282 * TRACE_RECORD_TGID for recording tgid
2283 */
2284void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2285 struct task_struct *next, int flags)
2286{
2287 bool done;
2288
2289 if (tracing_record_taskinfo_skip(flags))
2290 return;
2291
2292 /*
2293 * Record as much task information as possible. If some fail, continue
2294 * to try to record the others.
2295 */
2296 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2297 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2298 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2299 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2300
2301 /* If recording any information failed, retry again soon. */
2302 if (!done)
2303 return;
2304
2305 __this_cpu_write(trace_taskinfo_save, false);
2306}
2307
2308/* Helpers to record a specific task information */
2309void tracing_record_cmdline(struct task_struct *task)
2310{
2311 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2312}
2313
2314void tracing_record_tgid(struct task_struct *task)
2315{
2316 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2317}
2318
2319/*
2320 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2321 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2322 * simplifies those functions and keeps them in sync.
2323 */
2324enum print_line_t trace_handle_return(struct trace_seq *s)
2325{
2326 return trace_seq_has_overflowed(s) ?
2327 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2328}
2329EXPORT_SYMBOL_GPL(trace_handle_return);
2330
2331void
2332tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2333 unsigned long flags, int pc)
2334{
2335 struct task_struct *tsk = current;
2336
2337 entry->preempt_count = pc & 0xff;
2338 entry->pid = (tsk) ? tsk->pid : 0;
2339 entry->type = type;
2340 entry->flags =
2341#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2342 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2343#else
2344 TRACE_FLAG_IRQS_NOSUPPORT |
2345#endif
2346 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2347 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2348 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2349 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2350 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2351}
2352EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2353
2354struct ring_buffer_event *
2355trace_buffer_lock_reserve(struct ring_buffer *buffer,
2356 int type,
2357 unsigned long len,
2358 unsigned long flags, int pc)
2359{
2360 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2361}
2362
2363DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2364DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2365static int trace_buffered_event_ref;
2366
2367/**
2368 * trace_buffered_event_enable - enable buffering events
2369 *
2370 * When events are being filtered, it is quicker to use a temporary
2371 * buffer to write the event data into if there's a likely chance
2372 * that it will not be committed. The discard of the ring buffer
2373 * is not as fast as committing, and is much slower than copying
2374 * a commit.
2375 *
2376 * When an event is to be filtered, allocate per cpu buffers to
2377 * write the event data into, and if the event is filtered and discarded
2378 * it is simply dropped, otherwise, the entire data is to be committed
2379 * in one shot.
2380 */
2381void trace_buffered_event_enable(void)
2382{
2383 struct ring_buffer_event *event;
2384 struct page *page;
2385 int cpu;
2386
2387 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2388
2389 if (trace_buffered_event_ref++)
2390 return;
2391
2392 for_each_tracing_cpu(cpu) {
2393 page = alloc_pages_node(cpu_to_node(cpu),
2394 GFP_KERNEL | __GFP_NORETRY, 0);
2395 if (!page)
2396 goto failed;
2397
2398 event = page_address(page);
2399 memset(event, 0, sizeof(*event));
2400
2401 per_cpu(trace_buffered_event, cpu) = event;
2402
2403 preempt_disable();
2404 if (cpu == smp_processor_id() &&
2405 this_cpu_read(trace_buffered_event) !=
2406 per_cpu(trace_buffered_event, cpu))
2407 WARN_ON_ONCE(1);
2408 preempt_enable();
2409 }
2410
2411 return;
2412 failed:
2413 trace_buffered_event_disable();
2414}
2415
2416static void enable_trace_buffered_event(void *data)
2417{
2418 /* Probably not needed, but do it anyway */
2419 smp_rmb();
2420 this_cpu_dec(trace_buffered_event_cnt);
2421}
2422
2423static void disable_trace_buffered_event(void *data)
2424{
2425 this_cpu_inc(trace_buffered_event_cnt);
2426}
2427
2428/**
2429 * trace_buffered_event_disable - disable buffering events
2430 *
2431 * When a filter is removed, it is faster to not use the buffered
2432 * events, and to commit directly into the ring buffer. Free up
2433 * the temp buffers when there are no more users. This requires
2434 * special synchronization with current events.
2435 */
2436void trace_buffered_event_disable(void)
2437{
2438 int cpu;
2439
2440 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2441
2442 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2443 return;
2444
2445 if (--trace_buffered_event_ref)
2446 return;
2447
2448 preempt_disable();
2449 /* For each CPU, set the buffer as used. */
2450 smp_call_function_many(tracing_buffer_mask,
2451 disable_trace_buffered_event, NULL, 1);
2452 preempt_enable();
2453
2454 /* Wait for all current users to finish */
2455 synchronize_rcu();
2456
2457 for_each_tracing_cpu(cpu) {
2458 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2459 per_cpu(trace_buffered_event, cpu) = NULL;
2460 }
2461 /*
2462 * Make sure trace_buffered_event is NULL before clearing
2463 * trace_buffered_event_cnt.
2464 */
2465 smp_wmb();
2466
2467 preempt_disable();
2468 /* Do the work on each cpu */
2469 smp_call_function_many(tracing_buffer_mask,
2470 enable_trace_buffered_event, NULL, 1);
2471 preempt_enable();
2472}
2473
2474static struct ring_buffer *temp_buffer;
2475
2476struct ring_buffer_event *
2477trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2478 struct trace_event_file *trace_file,
2479 int type, unsigned long len,
2480 unsigned long flags, int pc)
2481{
2482 struct ring_buffer_event *entry;
2483 int val;
2484
2485 *current_rb = trace_file->tr->trace_buffer.buffer;
2486
2487 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2488 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2489 (entry = this_cpu_read(trace_buffered_event))) {
2490 /* Try to use the per cpu buffer first */
2491 val = this_cpu_inc_return(trace_buffered_event_cnt);
2492 if (val == 1) {
2493 trace_event_setup(entry, type, flags, pc);
2494 entry->array[0] = len;
2495 return entry;
2496 }
2497 this_cpu_dec(trace_buffered_event_cnt);
2498 }
2499
2500 entry = __trace_buffer_lock_reserve(*current_rb,
2501 type, len, flags, pc);
2502 /*
2503 * If tracing is off, but we have triggers enabled
2504 * we still need to look at the event data. Use the temp_buffer
2505 * to store the trace event for the tigger to use. It's recusive
2506 * safe and will not be recorded anywhere.
2507 */
2508 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2509 *current_rb = temp_buffer;
2510 entry = __trace_buffer_lock_reserve(*current_rb,
2511 type, len, flags, pc);
2512 }
2513 return entry;
2514}
2515EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2516
2517static DEFINE_SPINLOCK(tracepoint_iter_lock);
2518static DEFINE_MUTEX(tracepoint_printk_mutex);
2519
2520static void output_printk(struct trace_event_buffer *fbuffer)
2521{
2522 struct trace_event_call *event_call;
2523 struct trace_event *event;
2524 unsigned long flags;
2525 struct trace_iterator *iter = tracepoint_print_iter;
2526
2527 /* We should never get here if iter is NULL */
2528 if (WARN_ON_ONCE(!iter))
2529 return;
2530
2531 event_call = fbuffer->trace_file->event_call;
2532 if (!event_call || !event_call->event.funcs ||
2533 !event_call->event.funcs->trace)
2534 return;
2535
2536 event = &fbuffer->trace_file->event_call->event;
2537
2538 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2539 trace_seq_init(&iter->seq);
2540 iter->ent = fbuffer->entry;
2541 event_call->event.funcs->trace(iter, 0, event);
2542 trace_seq_putc(&iter->seq, 0);
2543 printk("%s", iter->seq.buffer);
2544
2545 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2546}
2547
2548int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2549 void __user *buffer, size_t *lenp,
2550 loff_t *ppos)
2551{
2552 int save_tracepoint_printk;
2553 int ret;
2554
2555 mutex_lock(&tracepoint_printk_mutex);
2556 save_tracepoint_printk = tracepoint_printk;
2557
2558 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2559
2560 /*
2561 * This will force exiting early, as tracepoint_printk
2562 * is always zero when tracepoint_printk_iter is not allocated
2563 */
2564 if (!tracepoint_print_iter)
2565 tracepoint_printk = 0;
2566
2567 if (save_tracepoint_printk == tracepoint_printk)
2568 goto out;
2569
2570 if (tracepoint_printk)
2571 static_key_enable(&tracepoint_printk_key.key);
2572 else
2573 static_key_disable(&tracepoint_printk_key.key);
2574
2575 out:
2576 mutex_unlock(&tracepoint_printk_mutex);
2577
2578 return ret;
2579}
2580
2581void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2582{
2583 if (static_key_false(&tracepoint_printk_key.key))
2584 output_printk(fbuffer);
2585
2586 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2587 fbuffer->event, fbuffer->entry,
2588 fbuffer->flags, fbuffer->pc);
2589}
2590EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2591
2592/*
2593 * Skip 3:
2594 *
2595 * trace_buffer_unlock_commit_regs()
2596 * trace_event_buffer_commit()
2597 * trace_event_raw_event_xxx()
2598 */
2599# define STACK_SKIP 3
2600
2601void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2602 struct ring_buffer *buffer,
2603 struct ring_buffer_event *event,
2604 unsigned long flags, int pc,
2605 struct pt_regs *regs)
2606{
2607 __buffer_unlock_commit(buffer, event);
2608
2609 /*
2610 * If regs is not set, then skip the necessary functions.
2611 * Note, we can still get here via blktrace, wakeup tracer
2612 * and mmiotrace, but that's ok if they lose a function or
2613 * two. They are not that meaningful.
2614 */
2615 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2616 ftrace_trace_userstack(buffer, flags, pc);
2617}
2618
2619/*
2620 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2621 */
2622void
2623trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2624 struct ring_buffer_event *event)
2625{
2626 __buffer_unlock_commit(buffer, event);
2627}
2628
2629static void
2630trace_process_export(struct trace_export *export,
2631 struct ring_buffer_event *event)
2632{
2633 struct trace_entry *entry;
2634 unsigned int size = 0;
2635
2636 entry = ring_buffer_event_data(event);
2637 size = ring_buffer_event_length(event);
2638 export->write(export, entry, size);
2639}
2640
2641static DEFINE_MUTEX(ftrace_export_lock);
2642
2643static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2644
2645static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2646
2647static inline void ftrace_exports_enable(void)
2648{
2649 static_branch_enable(&ftrace_exports_enabled);
2650}
2651
2652static inline void ftrace_exports_disable(void)
2653{
2654 static_branch_disable(&ftrace_exports_enabled);
2655}
2656
2657static void ftrace_exports(struct ring_buffer_event *event)
2658{
2659 struct trace_export *export;
2660
2661 preempt_disable_notrace();
2662
2663 export = rcu_dereference_raw_check(ftrace_exports_list);
2664 while (export) {
2665 trace_process_export(export, event);
2666 export = rcu_dereference_raw_check(export->next);
2667 }
2668
2669 preempt_enable_notrace();
2670}
2671
2672static inline void
2673add_trace_export(struct trace_export **list, struct trace_export *export)
2674{
2675 rcu_assign_pointer(export->next, *list);
2676 /*
2677 * We are entering export into the list but another
2678 * CPU might be walking that list. We need to make sure
2679 * the export->next pointer is valid before another CPU sees
2680 * the export pointer included into the list.
2681 */
2682 rcu_assign_pointer(*list, export);
2683}
2684
2685static inline int
2686rm_trace_export(struct trace_export **list, struct trace_export *export)
2687{
2688 struct trace_export **p;
2689
2690 for (p = list; *p != NULL; p = &(*p)->next)
2691 if (*p == export)
2692 break;
2693
2694 if (*p != export)
2695 return -1;
2696
2697 rcu_assign_pointer(*p, (*p)->next);
2698
2699 return 0;
2700}
2701
2702static inline void
2703add_ftrace_export(struct trace_export **list, struct trace_export *export)
2704{
2705 if (*list == NULL)
2706 ftrace_exports_enable();
2707
2708 add_trace_export(list, export);
2709}
2710
2711static inline int
2712rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2713{
2714 int ret;
2715
2716 ret = rm_trace_export(list, export);
2717 if (*list == NULL)
2718 ftrace_exports_disable();
2719
2720 return ret;
2721}
2722
2723int register_ftrace_export(struct trace_export *export)
2724{
2725 if (WARN_ON_ONCE(!export->write))
2726 return -1;
2727
2728 mutex_lock(&ftrace_export_lock);
2729
2730 add_ftrace_export(&ftrace_exports_list, export);
2731
2732 mutex_unlock(&ftrace_export_lock);
2733
2734 return 0;
2735}
2736EXPORT_SYMBOL_GPL(register_ftrace_export);
2737
2738int unregister_ftrace_export(struct trace_export *export)
2739{
2740 int ret;
2741
2742 mutex_lock(&ftrace_export_lock);
2743
2744 ret = rm_ftrace_export(&ftrace_exports_list, export);
2745
2746 mutex_unlock(&ftrace_export_lock);
2747
2748 return ret;
2749}
2750EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2751
2752void
2753trace_function(struct trace_array *tr,
2754 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2755 int pc)
2756{
2757 struct trace_event_call *call = &event_function;
2758 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2759 struct ring_buffer_event *event;
2760 struct ftrace_entry *entry;
2761
2762 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2763 flags, pc);
2764 if (!event)
2765 return;
2766 entry = ring_buffer_event_data(event);
2767 entry->ip = ip;
2768 entry->parent_ip = parent_ip;
2769
2770 if (!call_filter_check_discard(call, entry, buffer, event)) {
2771 if (static_branch_unlikely(&ftrace_exports_enabled))
2772 ftrace_exports(event);
2773 __buffer_unlock_commit(buffer, event);
2774 }
2775}
2776
2777#ifdef CONFIG_STACKTRACE
2778
2779/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2780#define FTRACE_KSTACK_NESTING 4
2781
2782#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2783
2784struct ftrace_stack {
2785 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2786};
2787
2788
2789struct ftrace_stacks {
2790 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2791};
2792
2793static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2794static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2795
2796static void __ftrace_trace_stack(struct ring_buffer *buffer,
2797 unsigned long flags,
2798 int skip, int pc, struct pt_regs *regs)
2799{
2800 struct trace_event_call *call = &event_kernel_stack;
2801 struct ring_buffer_event *event;
2802 unsigned int size, nr_entries;
2803 struct ftrace_stack *fstack;
2804 struct stack_entry *entry;
2805 int stackidx;
2806
2807 /*
2808 * Add one, for this function and the call to save_stack_trace()
2809 * If regs is set, then these functions will not be in the way.
2810 */
2811#ifndef CONFIG_UNWINDER_ORC
2812 if (!regs)
2813 skip++;
2814#endif
2815
2816 /*
2817 * Since events can happen in NMIs there's no safe way to
2818 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2819 * or NMI comes in, it will just have to use the default
2820 * FTRACE_STACK_SIZE.
2821 */
2822 preempt_disable_notrace();
2823
2824 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2825
2826 /* This should never happen. If it does, yell once and skip */
2827 if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
2828 goto out;
2829
2830 /*
2831 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2832 * interrupt will either see the value pre increment or post
2833 * increment. If the interrupt happens pre increment it will have
2834 * restored the counter when it returns. We just need a barrier to
2835 * keep gcc from moving things around.
2836 */
2837 barrier();
2838
2839 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2840 size = ARRAY_SIZE(fstack->calls);
2841
2842 if (regs) {
2843 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2844 size, skip);
2845 } else {
2846 nr_entries = stack_trace_save(fstack->calls, size, skip);
2847 }
2848
2849 size = nr_entries * sizeof(unsigned long);
2850 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2851 sizeof(*entry) + size, flags, pc);
2852 if (!event)
2853 goto out;
2854 entry = ring_buffer_event_data(event);
2855
2856 memcpy(&entry->caller, fstack->calls, size);
2857 entry->size = nr_entries;
2858
2859 if (!call_filter_check_discard(call, entry, buffer, event))
2860 __buffer_unlock_commit(buffer, event);
2861
2862 out:
2863 /* Again, don't let gcc optimize things here */
2864 barrier();
2865 __this_cpu_dec(ftrace_stack_reserve);
2866 preempt_enable_notrace();
2867
2868}
2869
2870static inline void ftrace_trace_stack(struct trace_array *tr,
2871 struct ring_buffer *buffer,
2872 unsigned long flags,
2873 int skip, int pc, struct pt_regs *regs)
2874{
2875 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2876 return;
2877
2878 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2879}
2880
2881void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2882 int pc)
2883{
2884 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2885
2886 if (rcu_is_watching()) {
2887 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2888 return;
2889 }
2890
2891 /*
2892 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2893 * but if the above rcu_is_watching() failed, then the NMI
2894 * triggered someplace critical, and rcu_irq_enter() should
2895 * not be called from NMI.
2896 */
2897 if (unlikely(in_nmi()))
2898 return;
2899
2900 rcu_irq_enter_irqson();
2901 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2902 rcu_irq_exit_irqson();
2903}
2904
2905/**
2906 * trace_dump_stack - record a stack back trace in the trace buffer
2907 * @skip: Number of functions to skip (helper handlers)
2908 */
2909void trace_dump_stack(int skip)
2910{
2911 unsigned long flags;
2912
2913 if (tracing_disabled || tracing_selftest_running)
2914 return;
2915
2916 local_save_flags(flags);
2917
2918#ifndef CONFIG_UNWINDER_ORC
2919 /* Skip 1 to skip this function. */
2920 skip++;
2921#endif
2922 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2923 flags, skip, preempt_count(), NULL);
2924}
2925EXPORT_SYMBOL_GPL(trace_dump_stack);
2926
2927#ifdef CONFIG_USER_STACKTRACE_SUPPORT
2928static DEFINE_PER_CPU(int, user_stack_count);
2929
2930static void
2931ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2932{
2933 struct trace_event_call *call = &event_user_stack;
2934 struct ring_buffer_event *event;
2935 struct userstack_entry *entry;
2936
2937 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2938 return;
2939
2940 /*
2941 * NMIs can not handle page faults, even with fix ups.
2942 * The save user stack can (and often does) fault.
2943 */
2944 if (unlikely(in_nmi()))
2945 return;
2946
2947 /*
2948 * prevent recursion, since the user stack tracing may
2949 * trigger other kernel events.
2950 */
2951 preempt_disable();
2952 if (__this_cpu_read(user_stack_count))
2953 goto out;
2954
2955 __this_cpu_inc(user_stack_count);
2956
2957 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2958 sizeof(*entry), flags, pc);
2959 if (!event)
2960 goto out_drop_count;
2961 entry = ring_buffer_event_data(event);
2962
2963 entry->tgid = current->tgid;
2964 memset(&entry->caller, 0, sizeof(entry->caller));
2965
2966 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
2967 if (!call_filter_check_discard(call, entry, buffer, event))
2968 __buffer_unlock_commit(buffer, event);
2969
2970 out_drop_count:
2971 __this_cpu_dec(user_stack_count);
2972 out:
2973 preempt_enable();
2974}
2975#else /* CONFIG_USER_STACKTRACE_SUPPORT */
2976static void ftrace_trace_userstack(struct ring_buffer *buffer,
2977 unsigned long flags, int pc)
2978{
2979}
2980#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
2981
2982#endif /* CONFIG_STACKTRACE */
2983
2984/* created for use with alloc_percpu */
2985struct trace_buffer_struct {
2986 int nesting;
2987 char buffer[4][TRACE_BUF_SIZE];
2988};
2989
2990static struct trace_buffer_struct *trace_percpu_buffer;
2991
2992/*
2993 * Thise allows for lockless recording. If we're nested too deeply, then
2994 * this returns NULL.
2995 */
2996static char *get_trace_buf(void)
2997{
2998 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2999
3000 if (!buffer || buffer->nesting >= 4)
3001 return NULL;
3002
3003 buffer->nesting++;
3004
3005 /* Interrupts must see nesting incremented before we use the buffer */
3006 barrier();
3007 return &buffer->buffer[buffer->nesting][0];
3008}
3009
3010static void put_trace_buf(void)
3011{
3012 /* Don't let the decrement of nesting leak before this */
3013 barrier();
3014 this_cpu_dec(trace_percpu_buffer->nesting);
3015}
3016
3017static int alloc_percpu_trace_buffer(void)
3018{
3019 struct trace_buffer_struct *buffers;
3020
3021 buffers = alloc_percpu(struct trace_buffer_struct);
3022 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
3023 return -ENOMEM;
3024
3025 trace_percpu_buffer = buffers;
3026 return 0;
3027}
3028
3029static int buffers_allocated;
3030
3031void trace_printk_init_buffers(void)
3032{
3033 if (buffers_allocated)
3034 return;
3035
3036 if (alloc_percpu_trace_buffer())
3037 return;
3038
3039 /* trace_printk() is for debug use only. Don't use it in production. */
3040
3041 pr_warn("\n");
3042 pr_warn("**********************************************************\n");
3043 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3044 pr_warn("** **\n");
3045 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3046 pr_warn("** **\n");
3047 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3048 pr_warn("** unsafe for production use. **\n");
3049 pr_warn("** **\n");
3050 pr_warn("** If you see this message and you are not debugging **\n");
3051 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3052 pr_warn("** **\n");
3053 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3054 pr_warn("**********************************************************\n");
3055
3056 /* Expand the buffers to set size */
3057 tracing_update_buffers();
3058
3059 buffers_allocated = 1;
3060
3061 /*
3062 * trace_printk_init_buffers() can be called by modules.
3063 * If that happens, then we need to start cmdline recording
3064 * directly here. If the global_trace.buffer is already
3065 * allocated here, then this was called by module code.
3066 */
3067 if (global_trace.trace_buffer.buffer)
3068 tracing_start_cmdline_record();
3069}
3070EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3071
3072void trace_printk_start_comm(void)
3073{
3074 /* Start tracing comms if trace printk is set */
3075 if (!buffers_allocated)
3076 return;
3077 tracing_start_cmdline_record();
3078}
3079
3080static void trace_printk_start_stop_comm(int enabled)
3081{
3082 if (!buffers_allocated)
3083 return;
3084
3085 if (enabled)
3086 tracing_start_cmdline_record();
3087 else
3088 tracing_stop_cmdline_record();
3089}
3090
3091/**
3092 * trace_vbprintk - write binary msg to tracing buffer
3093 * @ip: The address of the caller
3094 * @fmt: The string format to write to the buffer
3095 * @args: Arguments for @fmt
3096 */
3097int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3098{
3099 struct trace_event_call *call = &event_bprint;
3100 struct ring_buffer_event *event;
3101 struct ring_buffer *buffer;
3102 struct trace_array *tr = &global_trace;
3103 struct bprint_entry *entry;
3104 unsigned long flags;
3105 char *tbuffer;
3106 int len = 0, size, pc;
3107
3108 if (unlikely(tracing_selftest_running || tracing_disabled))
3109 return 0;
3110
3111 /* Don't pollute graph traces with trace_vprintk internals */
3112 pause_graph_tracing();
3113
3114 pc = preempt_count();
3115 preempt_disable_notrace();
3116
3117 tbuffer = get_trace_buf();
3118 if (!tbuffer) {
3119 len = 0;
3120 goto out_nobuffer;
3121 }
3122
3123 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3124
3125 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3126 goto out;
3127
3128 local_save_flags(flags);
3129 size = sizeof(*entry) + sizeof(u32) * len;
3130 buffer = tr->trace_buffer.buffer;
3131 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3132 flags, pc);
3133 if (!event)
3134 goto out;
3135 entry = ring_buffer_event_data(event);
3136 entry->ip = ip;
3137 entry->fmt = fmt;
3138
3139 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3140 if (!call_filter_check_discard(call, entry, buffer, event)) {
3141 __buffer_unlock_commit(buffer, event);
3142 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3143 }
3144
3145out:
3146 put_trace_buf();
3147
3148out_nobuffer:
3149 preempt_enable_notrace();
3150 unpause_graph_tracing();
3151
3152 return len;
3153}
3154EXPORT_SYMBOL_GPL(trace_vbprintk);
3155
3156__printf(3, 0)
3157static int
3158__trace_array_vprintk(struct ring_buffer *buffer,
3159 unsigned long ip, const char *fmt, va_list args)
3160{
3161 struct trace_event_call *call = &event_print;
3162 struct ring_buffer_event *event;
3163 int len = 0, size, pc;
3164 struct print_entry *entry;
3165 unsigned long flags;
3166 char *tbuffer;
3167
3168 if (tracing_disabled || tracing_selftest_running)
3169 return 0;
3170
3171 /* Don't pollute graph traces with trace_vprintk internals */
3172 pause_graph_tracing();
3173
3174 pc = preempt_count();
3175 preempt_disable_notrace();
3176
3177
3178 tbuffer = get_trace_buf();
3179 if (!tbuffer) {
3180 len = 0;
3181 goto out_nobuffer;
3182 }
3183
3184 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3185
3186 local_save_flags(flags);
3187 size = sizeof(*entry) + len + 1;
3188 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3189 flags, pc);
3190 if (!event)
3191 goto out;
3192 entry = ring_buffer_event_data(event);
3193 entry->ip = ip;
3194
3195 memcpy(&entry->buf, tbuffer, len + 1);
3196 if (!call_filter_check_discard(call, entry, buffer, event)) {
3197 __buffer_unlock_commit(buffer, event);
3198 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3199 }
3200
3201out:
3202 put_trace_buf();
3203
3204out_nobuffer:
3205 preempt_enable_notrace();
3206 unpause_graph_tracing();
3207
3208 return len;
3209}
3210
3211__printf(3, 0)
3212int trace_array_vprintk(struct trace_array *tr,
3213 unsigned long ip, const char *fmt, va_list args)
3214{
3215 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3216}
3217
3218__printf(3, 0)
3219int trace_array_printk(struct trace_array *tr,
3220 unsigned long ip, const char *fmt, ...)
3221{
3222 int ret;
3223 va_list ap;
3224
3225 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3226 return 0;
3227
3228 va_start(ap, fmt);
3229 ret = trace_array_vprintk(tr, ip, fmt, ap);
3230 va_end(ap);
3231 return ret;
3232}
3233EXPORT_SYMBOL_GPL(trace_array_printk);
3234
3235__printf(3, 4)
3236int trace_array_printk_buf(struct ring_buffer *buffer,
3237 unsigned long ip, const char *fmt, ...)
3238{
3239 int ret;
3240 va_list ap;
3241
3242 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3243 return 0;
3244
3245 va_start(ap, fmt);
3246 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3247 va_end(ap);
3248 return ret;
3249}
3250
3251__printf(2, 0)
3252int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3253{
3254 return trace_array_vprintk(&global_trace, ip, fmt, args);
3255}
3256EXPORT_SYMBOL_GPL(trace_vprintk);
3257
3258static void trace_iterator_increment(struct trace_iterator *iter)
3259{
3260 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3261
3262 iter->idx++;
3263 if (buf_iter)
3264 ring_buffer_read(buf_iter, NULL);
3265}
3266
3267static struct trace_entry *
3268peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3269 unsigned long *lost_events)
3270{
3271 struct ring_buffer_event *event;
3272 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3273
3274 if (buf_iter)
3275 event = ring_buffer_iter_peek(buf_iter, ts);
3276 else
3277 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3278 lost_events);
3279
3280 if (event) {
3281 iter->ent_size = ring_buffer_event_length(event);
3282 return ring_buffer_event_data(event);
3283 }
3284 iter->ent_size = 0;
3285 return NULL;
3286}
3287
3288static struct trace_entry *
3289__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3290 unsigned long *missing_events, u64 *ent_ts)
3291{
3292 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3293 struct trace_entry *ent, *next = NULL;
3294 unsigned long lost_events = 0, next_lost = 0;
3295 int cpu_file = iter->cpu_file;
3296 u64 next_ts = 0, ts;
3297 int next_cpu = -1;
3298 int next_size = 0;
3299 int cpu;
3300
3301 /*
3302 * If we are in a per_cpu trace file, don't bother by iterating over
3303 * all cpu and peek directly.
3304 */
3305 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3306 if (ring_buffer_empty_cpu(buffer, cpu_file))
3307 return NULL;
3308 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3309 if (ent_cpu)
3310 *ent_cpu = cpu_file;
3311
3312 return ent;
3313 }
3314
3315 for_each_tracing_cpu(cpu) {
3316
3317 if (ring_buffer_empty_cpu(buffer, cpu))
3318 continue;
3319
3320 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3321
3322 /*
3323 * Pick the entry with the smallest timestamp:
3324 */
3325 if (ent && (!next || ts < next_ts)) {
3326 next = ent;
3327 next_cpu = cpu;
3328 next_ts = ts;
3329 next_lost = lost_events;
3330 next_size = iter->ent_size;
3331 }
3332 }
3333
3334 iter->ent_size = next_size;
3335
3336 if (ent_cpu)
3337 *ent_cpu = next_cpu;
3338
3339 if (ent_ts)
3340 *ent_ts = next_ts;
3341
3342 if (missing_events)
3343 *missing_events = next_lost;
3344
3345 return next;
3346}
3347
3348/* Find the next real entry, without updating the iterator itself */
3349struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3350 int *ent_cpu, u64 *ent_ts)
3351{
3352 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3353}
3354
3355/* Find the next real entry, and increment the iterator to the next entry */
3356void *trace_find_next_entry_inc(struct trace_iterator *iter)
3357{
3358 iter->ent = __find_next_entry(iter, &iter->cpu,
3359 &iter->lost_events, &iter->ts);
3360
3361 if (iter->ent)
3362 trace_iterator_increment(iter);
3363
3364 return iter->ent ? iter : NULL;
3365}
3366
3367static void trace_consume(struct trace_iterator *iter)
3368{
3369 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3370 &iter->lost_events);
3371}
3372
3373static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3374{
3375 struct trace_iterator *iter = m->private;
3376 int i = (int)*pos;
3377 void *ent;
3378
3379 WARN_ON_ONCE(iter->leftover);
3380
3381 (*pos)++;
3382
3383 /* can't go backwards */
3384 if (iter->idx > i)
3385 return NULL;
3386
3387 if (iter->idx < 0)
3388 ent = trace_find_next_entry_inc(iter);
3389 else
3390 ent = iter;
3391
3392 while (ent && iter->idx < i)
3393 ent = trace_find_next_entry_inc(iter);
3394
3395 iter->pos = *pos;
3396
3397 return ent;
3398}
3399
3400void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3401{
3402 struct ring_buffer_event *event;
3403 struct ring_buffer_iter *buf_iter;
3404 unsigned long entries = 0;
3405 u64 ts;
3406
3407 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3408
3409 buf_iter = trace_buffer_iter(iter, cpu);
3410 if (!buf_iter)
3411 return;
3412
3413 ring_buffer_iter_reset(buf_iter);
3414
3415 /*
3416 * We could have the case with the max latency tracers
3417 * that a reset never took place on a cpu. This is evident
3418 * by the timestamp being before the start of the buffer.
3419 */
3420 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3421 if (ts >= iter->trace_buffer->time_start)
3422 break;
3423 entries++;
3424 ring_buffer_read(buf_iter, NULL);
3425 }
3426
3427 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3428}
3429
3430/*
3431 * The current tracer is copied to avoid a global locking
3432 * all around.
3433 */
3434static void *s_start(struct seq_file *m, loff_t *pos)
3435{
3436 struct trace_iterator *iter = m->private;
3437 struct trace_array *tr = iter->tr;
3438 int cpu_file = iter->cpu_file;
3439 void *p = NULL;
3440 loff_t l = 0;
3441 int cpu;
3442
3443 /*
3444 * copy the tracer to avoid using a global lock all around.
3445 * iter->trace is a copy of current_trace, the pointer to the
3446 * name may be used instead of a strcmp(), as iter->trace->name
3447 * will point to the same string as current_trace->name.
3448 */
3449 mutex_lock(&trace_types_lock);
3450 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3451 *iter->trace = *tr->current_trace;
3452 mutex_unlock(&trace_types_lock);
3453
3454#ifdef CONFIG_TRACER_MAX_TRACE
3455 if (iter->snapshot && iter->trace->use_max_tr)
3456 return ERR_PTR(-EBUSY);
3457#endif
3458
3459 if (!iter->snapshot)
3460 atomic_inc(&trace_record_taskinfo_disabled);
3461
3462 if (*pos != iter->pos) {
3463 iter->ent = NULL;
3464 iter->cpu = 0;
3465 iter->idx = -1;
3466
3467 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3468 for_each_tracing_cpu(cpu)
3469 tracing_iter_reset(iter, cpu);
3470 } else
3471 tracing_iter_reset(iter, cpu_file);
3472
3473 iter->leftover = 0;
3474 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3475 ;
3476
3477 } else {
3478 /*
3479 * If we overflowed the seq_file before, then we want
3480 * to just reuse the trace_seq buffer again.
3481 */
3482 if (iter->leftover)
3483 p = iter;
3484 else {
3485 l = *pos - 1;
3486 p = s_next(m, p, &l);
3487 }
3488 }
3489
3490 trace_event_read_lock();
3491 trace_access_lock(cpu_file);
3492 return p;
3493}
3494
3495static void s_stop(struct seq_file *m, void *p)
3496{
3497 struct trace_iterator *iter = m->private;
3498
3499#ifdef CONFIG_TRACER_MAX_TRACE
3500 if (iter->snapshot && iter->trace->use_max_tr)
3501 return;
3502#endif
3503
3504 if (!iter->snapshot)
3505 atomic_dec(&trace_record_taskinfo_disabled);
3506
3507 trace_access_unlock(iter->cpu_file);
3508 trace_event_read_unlock();
3509}
3510
3511static void
3512get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
3513 unsigned long *entries, int cpu)
3514{
3515 unsigned long count;
3516
3517 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3518 /*
3519 * If this buffer has skipped entries, then we hold all
3520 * entries for the trace and we need to ignore the
3521 * ones before the time stamp.
3522 */
3523 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3524 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3525 /* total is the same as the entries */
3526 *total = count;
3527 } else
3528 *total = count +
3529 ring_buffer_overrun_cpu(buf->buffer, cpu);
3530 *entries = count;
3531}
3532
3533static void
3534get_total_entries(struct trace_buffer *buf,
3535 unsigned long *total, unsigned long *entries)
3536{
3537 unsigned long t, e;
3538 int cpu;
3539
3540 *total = 0;
3541 *entries = 0;
3542
3543 for_each_tracing_cpu(cpu) {
3544 get_total_entries_cpu(buf, &t, &e, cpu);
3545 *total += t;
3546 *entries += e;
3547 }
3548}
3549
3550unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3551{
3552 unsigned long total, entries;
3553
3554 if (!tr)
3555 tr = &global_trace;
3556
3557 get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
3558
3559 return entries;
3560}
3561
3562unsigned long trace_total_entries(struct trace_array *tr)
3563{
3564 unsigned long total, entries;
3565
3566 if (!tr)
3567 tr = &global_trace;
3568
3569 get_total_entries(&tr->trace_buffer, &total, &entries);
3570
3571 return entries;
3572}
3573
3574static void print_lat_help_header(struct seq_file *m)
3575{
3576 seq_puts(m, "# _------=> CPU# \n"
3577 "# / _-----=> irqs-off \n"
3578 "# | / _----=> need-resched \n"
3579 "# || / _---=> hardirq/softirq \n"
3580 "# ||| / _--=> preempt-depth \n"
3581 "# |||| / delay \n"
3582 "# cmd pid ||||| time | caller \n"
3583 "# \\ / ||||| \\ | / \n");
3584}
3585
3586static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3587{
3588 unsigned long total;
3589 unsigned long entries;
3590
3591 get_total_entries(buf, &total, &entries);
3592 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3593 entries, total, num_online_cpus());
3594 seq_puts(m, "#\n");
3595}
3596
3597static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3598 unsigned int flags)
3599{
3600 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3601
3602 print_event_info(buf, m);
3603
3604 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3605 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3606}
3607
3608static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3609 unsigned int flags)
3610{
3611 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3612 const char *space = " ";
3613 int prec = tgid ? 10 : 2;
3614
3615 print_event_info(buf, m);
3616
3617 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3618 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3619 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3620 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3621 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3622 seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3623 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3624}
3625
3626void
3627print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3628{
3629 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3630 struct trace_buffer *buf = iter->trace_buffer;
3631 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3632 struct tracer *type = iter->trace;
3633 unsigned long entries;
3634 unsigned long total;
3635 const char *name = "preemption";
3636
3637 name = type->name;
3638
3639 get_total_entries(buf, &total, &entries);
3640
3641 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3642 name, UTS_RELEASE);
3643 seq_puts(m, "# -----------------------------------"
3644 "---------------------------------\n");
3645 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3646 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3647 nsecs_to_usecs(data->saved_latency),
3648 entries,
3649 total,
3650 buf->cpu,
3651#if defined(CONFIG_PREEMPT_NONE)
3652 "server",
3653#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3654 "desktop",
3655#elif defined(CONFIG_PREEMPT)
3656 "preempt",
3657#else
3658 "unknown",
3659#endif
3660 /* These are reserved for later use */
3661 0, 0, 0, 0);
3662#ifdef CONFIG_SMP
3663 seq_printf(m, " #P:%d)\n", num_online_cpus());
3664#else
3665 seq_puts(m, ")\n");
3666#endif
3667 seq_puts(m, "# -----------------\n");
3668 seq_printf(m, "# | task: %.16s-%d "
3669 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3670 data->comm, data->pid,
3671 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3672 data->policy, data->rt_priority);
3673 seq_puts(m, "# -----------------\n");
3674
3675 if (data->critical_start) {
3676 seq_puts(m, "# => started at: ");
3677 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3678 trace_print_seq(m, &iter->seq);
3679 seq_puts(m, "\n# => ended at: ");
3680 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3681 trace_print_seq(m, &iter->seq);
3682 seq_puts(m, "\n#\n");
3683 }
3684
3685 seq_puts(m, "#\n");
3686}
3687
3688static void test_cpu_buff_start(struct trace_iterator *iter)
3689{
3690 struct trace_seq *s = &iter->seq;
3691 struct trace_array *tr = iter->tr;
3692
3693 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3694 return;
3695
3696 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3697 return;
3698
3699 if (cpumask_available(iter->started) &&
3700 cpumask_test_cpu(iter->cpu, iter->started))
3701 return;
3702
3703 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3704 return;
3705
3706 if (cpumask_available(iter->started))
3707 cpumask_set_cpu(iter->cpu, iter->started);
3708
3709 /* Don't print started cpu buffer for the first entry of the trace */
3710 if (iter->idx > 1)
3711 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3712 iter->cpu);
3713}
3714
3715static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3716{
3717 struct trace_array *tr = iter->tr;
3718 struct trace_seq *s = &iter->seq;
3719 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3720 struct trace_entry *entry;
3721 struct trace_event *event;
3722
3723 entry = iter->ent;
3724
3725 test_cpu_buff_start(iter);
3726
3727 event = ftrace_find_event(entry->type);
3728
3729 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3730 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3731 trace_print_lat_context(iter);
3732 else
3733 trace_print_context(iter);
3734 }
3735
3736 if (trace_seq_has_overflowed(s))
3737 return TRACE_TYPE_PARTIAL_LINE;
3738
3739 if (event)
3740 return event->funcs->trace(iter, sym_flags, event);
3741
3742 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3743
3744 return trace_handle_return(s);
3745}
3746
3747static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3748{
3749 struct trace_array *tr = iter->tr;
3750 struct trace_seq *s = &iter->seq;
3751 struct trace_entry *entry;
3752 struct trace_event *event;
3753
3754 entry = iter->ent;
3755
3756 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3757 trace_seq_printf(s, "%d %d %llu ",
3758 entry->pid, iter->cpu, iter->ts);
3759
3760 if (trace_seq_has_overflowed(s))
3761 return TRACE_TYPE_PARTIAL_LINE;
3762
3763 event = ftrace_find_event(entry->type);
3764 if (event)
3765 return event->funcs->raw(iter, 0, event);
3766
3767 trace_seq_printf(s, "%d ?\n", entry->type);
3768
3769 return trace_handle_return(s);
3770}
3771
3772static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3773{
3774 struct trace_array *tr = iter->tr;
3775 struct trace_seq *s = &iter->seq;
3776 unsigned char newline = '\n';
3777 struct trace_entry *entry;
3778 struct trace_event *event;
3779
3780 entry = iter->ent;
3781
3782 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3783 SEQ_PUT_HEX_FIELD(s, entry->pid);
3784 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3785 SEQ_PUT_HEX_FIELD(s, iter->ts);
3786 if (trace_seq_has_overflowed(s))
3787 return TRACE_TYPE_PARTIAL_LINE;
3788 }
3789
3790 event = ftrace_find_event(entry->type);
3791 if (event) {
3792 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3793 if (ret != TRACE_TYPE_HANDLED)
3794 return ret;
3795 }
3796
3797 SEQ_PUT_FIELD(s, newline);
3798
3799 return trace_handle_return(s);
3800}
3801
3802static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3803{
3804 struct trace_array *tr = iter->tr;
3805 struct trace_seq *s = &iter->seq;
3806 struct trace_entry *entry;
3807 struct trace_event *event;
3808
3809 entry = iter->ent;
3810
3811 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3812 SEQ_PUT_FIELD(s, entry->pid);
3813 SEQ_PUT_FIELD(s, iter->cpu);
3814 SEQ_PUT_FIELD(s, iter->ts);
3815 if (trace_seq_has_overflowed(s))
3816 return TRACE_TYPE_PARTIAL_LINE;
3817 }
3818
3819 event = ftrace_find_event(entry->type);
3820 return event ? event->funcs->binary(iter, 0, event) :
3821 TRACE_TYPE_HANDLED;
3822}
3823
3824int trace_empty(struct trace_iterator *iter)
3825{
3826 struct ring_buffer_iter *buf_iter;
3827 int cpu;
3828
3829 /* If we are looking at one CPU buffer, only check that one */
3830 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3831 cpu = iter->cpu_file;
3832 buf_iter = trace_buffer_iter(iter, cpu);
3833 if (buf_iter) {
3834 if (!ring_buffer_iter_empty(buf_iter))
3835 return 0;
3836 } else {
3837 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3838 return 0;
3839 }
3840 return 1;
3841 }
3842
3843 for_each_tracing_cpu(cpu) {
3844 buf_iter = trace_buffer_iter(iter, cpu);
3845 if (buf_iter) {
3846 if (!ring_buffer_iter_empty(buf_iter))
3847 return 0;
3848 } else {
3849 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3850 return 0;
3851 }
3852 }
3853
3854 return 1;
3855}
3856
3857/* Called with trace_event_read_lock() held. */
3858enum print_line_t print_trace_line(struct trace_iterator *iter)
3859{
3860 struct trace_array *tr = iter->tr;
3861 unsigned long trace_flags = tr->trace_flags;
3862 enum print_line_t ret;
3863
3864 if (iter->lost_events) {
3865 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3866 iter->cpu, iter->lost_events);
3867 if (trace_seq_has_overflowed(&iter->seq))
3868 return TRACE_TYPE_PARTIAL_LINE;
3869 }
3870
3871 if (iter->trace && iter->trace->print_line) {
3872 ret = iter->trace->print_line(iter);
3873 if (ret != TRACE_TYPE_UNHANDLED)
3874 return ret;
3875 }
3876
3877 if (iter->ent->type == TRACE_BPUTS &&
3878 trace_flags & TRACE_ITER_PRINTK &&
3879 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3880 return trace_print_bputs_msg_only(iter);
3881
3882 if (iter->ent->type == TRACE_BPRINT &&
3883 trace_flags & TRACE_ITER_PRINTK &&
3884 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3885 return trace_print_bprintk_msg_only(iter);
3886
3887 if (iter->ent->type == TRACE_PRINT &&
3888 trace_flags & TRACE_ITER_PRINTK &&
3889 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3890 return trace_print_printk_msg_only(iter);
3891
3892 if (trace_flags & TRACE_ITER_BIN)
3893 return print_bin_fmt(iter);
3894
3895 if (trace_flags & TRACE_ITER_HEX)
3896 return print_hex_fmt(iter);
3897
3898 if (trace_flags & TRACE_ITER_RAW)
3899 return print_raw_fmt(iter);
3900
3901 return print_trace_fmt(iter);
3902}
3903
3904void trace_latency_header(struct seq_file *m)
3905{
3906 struct trace_iterator *iter = m->private;
3907 struct trace_array *tr = iter->tr;
3908
3909 /* print nothing if the buffers are empty */
3910 if (trace_empty(iter))
3911 return;
3912
3913 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3914 print_trace_header(m, iter);
3915
3916 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3917 print_lat_help_header(m);
3918}
3919
3920void trace_default_header(struct seq_file *m)
3921{
3922 struct trace_iterator *iter = m->private;
3923 struct trace_array *tr = iter->tr;
3924 unsigned long trace_flags = tr->trace_flags;
3925
3926 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3927 return;
3928
3929 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3930 /* print nothing if the buffers are empty */
3931 if (trace_empty(iter))
3932 return;
3933 print_trace_header(m, iter);
3934 if (!(trace_flags & TRACE_ITER_VERBOSE))
3935 print_lat_help_header(m);
3936 } else {
3937 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3938 if (trace_flags & TRACE_ITER_IRQ_INFO)
3939 print_func_help_header_irq(iter->trace_buffer,
3940 m, trace_flags);
3941 else
3942 print_func_help_header(iter->trace_buffer, m,
3943 trace_flags);
3944 }
3945 }
3946}
3947
3948static void test_ftrace_alive(struct seq_file *m)
3949{
3950 if (!ftrace_is_dead())
3951 return;
3952 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3953 "# MAY BE MISSING FUNCTION EVENTS\n");
3954}
3955
3956#ifdef CONFIG_TRACER_MAX_TRACE
3957static void show_snapshot_main_help(struct seq_file *m)
3958{
3959 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3960 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3961 "# Takes a snapshot of the main buffer.\n"
3962 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3963 "# (Doesn't have to be '2' works with any number that\n"
3964 "# is not a '0' or '1')\n");
3965}
3966
3967static void show_snapshot_percpu_help(struct seq_file *m)
3968{
3969 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3970#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3971 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3972 "# Takes a snapshot of the main buffer for this cpu.\n");
3973#else
3974 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3975 "# Must use main snapshot file to allocate.\n");
3976#endif
3977 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3978 "# (Doesn't have to be '2' works with any number that\n"
3979 "# is not a '0' or '1')\n");
3980}
3981
3982static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3983{
3984 if (iter->tr->allocated_snapshot)
3985 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3986 else
3987 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3988
3989 seq_puts(m, "# Snapshot commands:\n");
3990 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3991 show_snapshot_main_help(m);
3992 else
3993 show_snapshot_percpu_help(m);
3994}
3995#else
3996/* Should never be called */
3997static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3998#endif
3999
4000static int s_show(struct seq_file *m, void *v)
4001{
4002 struct trace_iterator *iter = v;
4003 int ret;
4004
4005 if (iter->ent == NULL) {
4006 if (iter->tr) {
4007 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4008 seq_puts(m, "#\n");
4009 test_ftrace_alive(m);
4010 }
4011 if (iter->snapshot && trace_empty(iter))
4012 print_snapshot_help(m, iter);
4013 else if (iter->trace && iter->trace->print_header)
4014 iter->trace->print_header(m);
4015 else
4016 trace_default_header(m);
4017
4018 } else if (iter->leftover) {
4019 /*
4020 * If we filled the seq_file buffer earlier, we
4021 * want to just show it now.
4022 */
4023 ret = trace_print_seq(m, &iter->seq);
4024
4025 /* ret should this time be zero, but you never know */
4026 iter->leftover = ret;
4027
4028 } else {
4029 print_trace_line(iter);
4030 ret = trace_print_seq(m, &iter->seq);
4031 /*
4032 * If we overflow the seq_file buffer, then it will
4033 * ask us for this data again at start up.
4034 * Use that instead.
4035 * ret is 0 if seq_file write succeeded.
4036 * -1 otherwise.
4037 */
4038 iter->leftover = ret;
4039 }
4040
4041 return 0;
4042}
4043
4044/*
4045 * Should be used after trace_array_get(), trace_types_lock
4046 * ensures that i_cdev was already initialized.
4047 */
4048static inline int tracing_get_cpu(struct inode *inode)
4049{
4050 if (inode->i_cdev) /* See trace_create_cpu_file() */
4051 return (long)inode->i_cdev - 1;
4052 return RING_BUFFER_ALL_CPUS;
4053}
4054
4055static const struct seq_operations tracer_seq_ops = {
4056 .start = s_start,
4057 .next = s_next,
4058 .stop = s_stop,
4059 .show = s_show,
4060};
4061
4062static struct trace_iterator *
4063__tracing_open(struct inode *inode, struct file *file, bool snapshot)
4064{
4065 struct trace_array *tr = inode->i_private;
4066 struct trace_iterator *iter;
4067 int cpu;
4068
4069 if (tracing_disabled)
4070 return ERR_PTR(-ENODEV);
4071
4072 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4073 if (!iter)
4074 return ERR_PTR(-ENOMEM);
4075
4076 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4077 GFP_KERNEL);
4078 if (!iter->buffer_iter)
4079 goto release;
4080
4081 /*
4082 * We make a copy of the current tracer to avoid concurrent
4083 * changes on it while we are reading.
4084 */
4085 mutex_lock(&trace_types_lock);
4086 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4087 if (!iter->trace)
4088 goto fail;
4089
4090 *iter->trace = *tr->current_trace;
4091
4092 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4093 goto fail;
4094
4095 iter->tr = tr;
4096
4097#ifdef CONFIG_TRACER_MAX_TRACE
4098 /* Currently only the top directory has a snapshot */
4099 if (tr->current_trace->print_max || snapshot)
4100 iter->trace_buffer = &tr->max_buffer;
4101 else
4102#endif
4103 iter->trace_buffer = &tr->trace_buffer;
4104 iter->snapshot = snapshot;
4105 iter->pos = -1;
4106 iter->cpu_file = tracing_get_cpu(inode);
4107 mutex_init(&iter->mutex);
4108
4109 /* Notify the tracer early; before we stop tracing. */
4110 if (iter->trace && iter->trace->open)
4111 iter->trace->open(iter);
4112
4113 /* Annotate start of buffers if we had overruns */
4114 if (ring_buffer_overruns(iter->trace_buffer->buffer))
4115 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4116
4117 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4118 if (trace_clocks[tr->clock_id].in_ns)
4119 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4120
4121 /* stop the trace while dumping if we are not opening "snapshot" */
4122 if (!iter->snapshot)
4123 tracing_stop_tr(tr);
4124
4125 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4126 for_each_tracing_cpu(cpu) {
4127 iter->buffer_iter[cpu] =
4128 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4129 cpu, GFP_KERNEL);
4130 }
4131 ring_buffer_read_prepare_sync();
4132 for_each_tracing_cpu(cpu) {
4133 ring_buffer_read_start(iter->buffer_iter[cpu]);
4134 tracing_iter_reset(iter, cpu);
4135 }
4136 } else {
4137 cpu = iter->cpu_file;
4138 iter->buffer_iter[cpu] =
4139 ring_buffer_read_prepare(iter->trace_buffer->buffer,
4140 cpu, GFP_KERNEL);
4141 ring_buffer_read_prepare_sync();
4142 ring_buffer_read_start(iter->buffer_iter[cpu]);
4143 tracing_iter_reset(iter, cpu);
4144 }
4145
4146 mutex_unlock(&trace_types_lock);
4147
4148 return iter;
4149
4150 fail:
4151 mutex_unlock(&trace_types_lock);
4152 kfree(iter->trace);
4153 kfree(iter->buffer_iter);
4154release:
4155 seq_release_private(inode, file);
4156 return ERR_PTR(-ENOMEM);
4157}
4158
4159int tracing_open_generic(struct inode *inode, struct file *filp)
4160{
4161 int ret;
4162
4163 ret = tracing_check_open_get_tr(NULL);
4164 if (ret)
4165 return ret;
4166
4167 filp->private_data = inode->i_private;
4168 return 0;
4169}
4170
4171bool tracing_is_disabled(void)
4172{
4173 return (tracing_disabled) ? true: false;
4174}
4175
4176/*
4177 * Open and update trace_array ref count.
4178 * Must have the current trace_array passed to it.
4179 */
4180int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4181{
4182 struct trace_array *tr = inode->i_private;
4183 int ret;
4184
4185 ret = tracing_check_open_get_tr(tr);
4186 if (ret)
4187 return ret;
4188
4189 filp->private_data = inode->i_private;
4190
4191 return 0;
4192}
4193
4194static int tracing_release(struct inode *inode, struct file *file)
4195{
4196 struct trace_array *tr = inode->i_private;
4197 struct seq_file *m = file->private_data;
4198 struct trace_iterator *iter;
4199 int cpu;
4200
4201 if (!(file->f_mode & FMODE_READ)) {
4202 trace_array_put(tr);
4203 return 0;
4204 }
4205
4206 /* Writes do not use seq_file */
4207 iter = m->private;
4208 mutex_lock(&trace_types_lock);
4209
4210 for_each_tracing_cpu(cpu) {
4211 if (iter->buffer_iter[cpu])
4212 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4213 }
4214
4215 if (iter->trace && iter->trace->close)
4216 iter->trace->close(iter);
4217
4218 if (!iter->snapshot)
4219 /* reenable tracing if it was previously enabled */
4220 tracing_start_tr(tr);
4221
4222 __trace_array_put(tr);
4223
4224 mutex_unlock(&trace_types_lock);
4225
4226 mutex_destroy(&iter->mutex);
4227 free_cpumask_var(iter->started);
4228 kfree(iter->trace);
4229 kfree(iter->buffer_iter);
4230 seq_release_private(inode, file);
4231
4232 return 0;
4233}
4234
4235static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4236{
4237 struct trace_array *tr = inode->i_private;
4238
4239 trace_array_put(tr);
4240 return 0;
4241}
4242
4243static int tracing_single_release_tr(struct inode *inode, struct file *file)
4244{
4245 struct trace_array *tr = inode->i_private;
4246
4247 trace_array_put(tr);
4248
4249 return single_release(inode, file);
4250}
4251
4252static int tracing_open(struct inode *inode, struct file *file)
4253{
4254 struct trace_array *tr = inode->i_private;
4255 struct trace_iterator *iter;
4256 int ret;
4257
4258 ret = tracing_check_open_get_tr(tr);
4259 if (ret)
4260 return ret;
4261
4262 /* If this file was open for write, then erase contents */
4263 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4264 int cpu = tracing_get_cpu(inode);
4265 struct trace_buffer *trace_buf = &tr->trace_buffer;
4266
4267#ifdef CONFIG_TRACER_MAX_TRACE
4268 if (tr->current_trace->print_max)
4269 trace_buf = &tr->max_buffer;
4270#endif
4271
4272 if (cpu == RING_BUFFER_ALL_CPUS)
4273 tracing_reset_online_cpus(trace_buf);
4274 else
4275 tracing_reset_cpu(trace_buf, cpu);
4276 }
4277
4278 if (file->f_mode & FMODE_READ) {
4279 iter = __tracing_open(inode, file, false);
4280 if (IS_ERR(iter))
4281 ret = PTR_ERR(iter);
4282 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4283 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4284 }
4285
4286 if (ret < 0)
4287 trace_array_put(tr);
4288
4289 return ret;
4290}
4291
4292/*
4293 * Some tracers are not suitable for instance buffers.
4294 * A tracer is always available for the global array (toplevel)
4295 * or if it explicitly states that it is.
4296 */
4297static bool
4298trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4299{
4300 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4301}
4302
4303/* Find the next tracer that this trace array may use */
4304static struct tracer *
4305get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4306{
4307 while (t && !trace_ok_for_array(t, tr))
4308 t = t->next;
4309
4310 return t;
4311}
4312
4313static void *
4314t_next(struct seq_file *m, void *v, loff_t *pos)
4315{
4316 struct trace_array *tr = m->private;
4317 struct tracer *t = v;
4318
4319 (*pos)++;
4320
4321 if (t)
4322 t = get_tracer_for_array(tr, t->next);
4323
4324 return t;
4325}
4326
4327static void *t_start(struct seq_file *m, loff_t *pos)
4328{
4329 struct trace_array *tr = m->private;
4330 struct tracer *t;
4331 loff_t l = 0;
4332
4333 mutex_lock(&trace_types_lock);
4334
4335 t = get_tracer_for_array(tr, trace_types);
4336 for (; t && l < *pos; t = t_next(m, t, &l))
4337 ;
4338
4339 return t;
4340}
4341
4342static void t_stop(struct seq_file *m, void *p)
4343{
4344 mutex_unlock(&trace_types_lock);
4345}
4346
4347static int t_show(struct seq_file *m, void *v)
4348{
4349 struct tracer *t = v;
4350
4351 if (!t)
4352 return 0;
4353
4354 seq_puts(m, t->name);
4355 if (t->next)
4356 seq_putc(m, ' ');
4357 else
4358 seq_putc(m, '\n');
4359
4360 return 0;
4361}
4362
4363static const struct seq_operations show_traces_seq_ops = {
4364 .start = t_start,
4365 .next = t_next,
4366 .stop = t_stop,
4367 .show = t_show,
4368};
4369
4370static int show_traces_open(struct inode *inode, struct file *file)
4371{
4372 struct trace_array *tr = inode->i_private;
4373 struct seq_file *m;
4374 int ret;
4375
4376 ret = tracing_check_open_get_tr(tr);
4377 if (ret)
4378 return ret;
4379
4380 ret = seq_open(file, &show_traces_seq_ops);
4381 if (ret) {
4382 trace_array_put(tr);
4383 return ret;
4384 }
4385
4386 m = file->private_data;
4387 m->private = tr;
4388
4389 return 0;
4390}
4391
4392static int show_traces_release(struct inode *inode, struct file *file)
4393{
4394 struct trace_array *tr = inode->i_private;
4395
4396 trace_array_put(tr);
4397 return seq_release(inode, file);
4398}
4399
4400static ssize_t
4401tracing_write_stub(struct file *filp, const char __user *ubuf,
4402 size_t count, loff_t *ppos)
4403{
4404 return count;
4405}
4406
4407loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4408{
4409 int ret;
4410
4411 if (file->f_mode & FMODE_READ)
4412 ret = seq_lseek(file, offset, whence);
4413 else
4414 file->f_pos = ret = 0;
4415
4416 return ret;
4417}
4418
4419static const struct file_operations tracing_fops = {
4420 .open = tracing_open,
4421 .read = seq_read,
4422 .write = tracing_write_stub,
4423 .llseek = tracing_lseek,
4424 .release = tracing_release,
4425};
4426
4427static const struct file_operations show_traces_fops = {
4428 .open = show_traces_open,
4429 .read = seq_read,
4430 .llseek = seq_lseek,
4431 .release = show_traces_release,
4432};
4433
4434static ssize_t
4435tracing_cpumask_read(struct file *filp, char __user *ubuf,
4436 size_t count, loff_t *ppos)
4437{
4438 struct trace_array *tr = file_inode(filp)->i_private;
4439 char *mask_str;
4440 int len;
4441
4442 len = snprintf(NULL, 0, "%*pb\n",
4443 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4444 mask_str = kmalloc(len, GFP_KERNEL);
4445 if (!mask_str)
4446 return -ENOMEM;
4447
4448 len = snprintf(mask_str, len, "%*pb\n",
4449 cpumask_pr_args(tr->tracing_cpumask));
4450 if (len >= count) {
4451 count = -EINVAL;
4452 goto out_err;
4453 }
4454 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4455
4456out_err:
4457 kfree(mask_str);
4458
4459 return count;
4460}
4461
4462static ssize_t
4463tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4464 size_t count, loff_t *ppos)
4465{
4466 struct trace_array *tr = file_inode(filp)->i_private;
4467 cpumask_var_t tracing_cpumask_new;
4468 int err, cpu;
4469
4470 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4471 return -ENOMEM;
4472
4473 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4474 if (err)
4475 goto err_unlock;
4476
4477 local_irq_disable();
4478 arch_spin_lock(&tr->max_lock);
4479 for_each_tracing_cpu(cpu) {
4480 /*
4481 * Increase/decrease the disabled counter if we are
4482 * about to flip a bit in the cpumask:
4483 */
4484 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4485 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4486 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4487 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4488 }
4489 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4490 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4491 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4492 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4493 }
4494 }
4495 arch_spin_unlock(&tr->max_lock);
4496 local_irq_enable();
4497
4498 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4499 free_cpumask_var(tracing_cpumask_new);
4500
4501 return count;
4502
4503err_unlock:
4504 free_cpumask_var(tracing_cpumask_new);
4505
4506 return err;
4507}
4508
4509static const struct file_operations tracing_cpumask_fops = {
4510 .open = tracing_open_generic_tr,
4511 .read = tracing_cpumask_read,
4512 .write = tracing_cpumask_write,
4513 .release = tracing_release_generic_tr,
4514 .llseek = generic_file_llseek,
4515};
4516
4517static int tracing_trace_options_show(struct seq_file *m, void *v)
4518{
4519 struct tracer_opt *trace_opts;
4520 struct trace_array *tr = m->private;
4521 u32 tracer_flags;
4522 int i;
4523
4524 mutex_lock(&trace_types_lock);
4525 tracer_flags = tr->current_trace->flags->val;
4526 trace_opts = tr->current_trace->flags->opts;
4527
4528 for (i = 0; trace_options[i]; i++) {
4529 if (tr->trace_flags & (1 << i))
4530 seq_printf(m, "%s\n", trace_options[i]);
4531 else
4532 seq_printf(m, "no%s\n", trace_options[i]);
4533 }
4534
4535 for (i = 0; trace_opts[i].name; i++) {
4536 if (tracer_flags & trace_opts[i].bit)
4537 seq_printf(m, "%s\n", trace_opts[i].name);
4538 else
4539 seq_printf(m, "no%s\n", trace_opts[i].name);
4540 }
4541 mutex_unlock(&trace_types_lock);
4542
4543 return 0;
4544}
4545
4546static int __set_tracer_option(struct trace_array *tr,
4547 struct tracer_flags *tracer_flags,
4548 struct tracer_opt *opts, int neg)
4549{
4550 struct tracer *trace = tracer_flags->trace;
4551 int ret;
4552
4553 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4554 if (ret)
4555 return ret;
4556
4557 if (neg)
4558 tracer_flags->val &= ~opts->bit;
4559 else
4560 tracer_flags->val |= opts->bit;
4561 return 0;
4562}
4563
4564/* Try to assign a tracer specific option */
4565static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4566{
4567 struct tracer *trace = tr->current_trace;
4568 struct tracer_flags *tracer_flags = trace->flags;
4569 struct tracer_opt *opts = NULL;
4570 int i;
4571
4572 for (i = 0; tracer_flags->opts[i].name; i++) {
4573 opts = &tracer_flags->opts[i];
4574
4575 if (strcmp(cmp, opts->name) == 0)
4576 return __set_tracer_option(tr, trace->flags, opts, neg);
4577 }
4578
4579 return -EINVAL;
4580}
4581
4582/* Some tracers require overwrite to stay enabled */
4583int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4584{
4585 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4586 return -1;
4587
4588 return 0;
4589}
4590
4591int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4592{
4593 /* do nothing if flag is already set */
4594 if (!!(tr->trace_flags & mask) == !!enabled)
4595 return 0;
4596
4597 /* Give the tracer a chance to approve the change */
4598 if (tr->current_trace->flag_changed)
4599 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4600 return -EINVAL;
4601
4602 if (enabled)
4603 tr->trace_flags |= mask;
4604 else
4605 tr->trace_flags &= ~mask;
4606
4607 if (mask == TRACE_ITER_RECORD_CMD)
4608 trace_event_enable_cmd_record(enabled);
4609
4610 if (mask == TRACE_ITER_RECORD_TGID) {
4611 if (!tgid_map)
4612 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4613 sizeof(*tgid_map),
4614 GFP_KERNEL);
4615 if (!tgid_map) {
4616 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4617 return -ENOMEM;
4618 }
4619
4620 trace_event_enable_tgid_record(enabled);
4621 }
4622
4623 if (mask == TRACE_ITER_EVENT_FORK)
4624 trace_event_follow_fork(tr, enabled);
4625
4626 if (mask == TRACE_ITER_FUNC_FORK)
4627 ftrace_pid_follow_fork(tr, enabled);
4628
4629 if (mask == TRACE_ITER_OVERWRITE) {
4630 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4631#ifdef CONFIG_TRACER_MAX_TRACE
4632 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4633#endif
4634 }
4635
4636 if (mask == TRACE_ITER_PRINTK) {
4637 trace_printk_start_stop_comm(enabled);
4638 trace_printk_control(enabled);
4639 }
4640
4641 return 0;
4642}
4643
4644static int trace_set_options(struct trace_array *tr, char *option)
4645{
4646 char *cmp;
4647 int neg = 0;
4648 int ret;
4649 size_t orig_len = strlen(option);
4650 int len;
4651
4652 cmp = strstrip(option);
4653
4654 len = str_has_prefix(cmp, "no");
4655 if (len)
4656 neg = 1;
4657
4658 cmp += len;
4659
4660 mutex_lock(&trace_types_lock);
4661
4662 ret = match_string(trace_options, -1, cmp);
4663 /* If no option could be set, test the specific tracer options */
4664 if (ret < 0)
4665 ret = set_tracer_option(tr, cmp, neg);
4666 else
4667 ret = set_tracer_flag(tr, 1 << ret, !neg);
4668
4669 mutex_unlock(&trace_types_lock);
4670
4671 /*
4672 * If the first trailing whitespace is replaced with '\0' by strstrip,
4673 * turn it back into a space.
4674 */
4675 if (orig_len > strlen(option))
4676 option[strlen(option)] = ' ';
4677
4678 return ret;
4679}
4680
4681static void __init apply_trace_boot_options(void)
4682{
4683 char *buf = trace_boot_options_buf;
4684 char *option;
4685
4686 while (true) {
4687 option = strsep(&buf, ",");
4688
4689 if (!option)
4690 break;
4691
4692 if (*option)
4693 trace_set_options(&global_trace, option);
4694
4695 /* Put back the comma to allow this to be called again */
4696 if (buf)
4697 *(buf - 1) = ',';
4698 }
4699}
4700
4701static ssize_t
4702tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4703 size_t cnt, loff_t *ppos)
4704{
4705 struct seq_file *m = filp->private_data;
4706 struct trace_array *tr = m->private;
4707 char buf[64];
4708 int ret;
4709
4710 if (cnt >= sizeof(buf))
4711 return -EINVAL;
4712
4713 if (copy_from_user(buf, ubuf, cnt))
4714 return -EFAULT;
4715
4716 buf[cnt] = 0;
4717
4718 ret = trace_set_options(tr, buf);
4719 if (ret < 0)
4720 return ret;
4721
4722 *ppos += cnt;
4723
4724 return cnt;
4725}
4726
4727static int tracing_trace_options_open(struct inode *inode, struct file *file)
4728{
4729 struct trace_array *tr = inode->i_private;
4730 int ret;
4731
4732 ret = tracing_check_open_get_tr(tr);
4733 if (ret)
4734 return ret;
4735
4736 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4737 if (ret < 0)
4738 trace_array_put(tr);
4739
4740 return ret;
4741}
4742
4743static const struct file_operations tracing_iter_fops = {
4744 .open = tracing_trace_options_open,
4745 .read = seq_read,
4746 .llseek = seq_lseek,
4747 .release = tracing_single_release_tr,
4748 .write = tracing_trace_options_write,
4749};
4750
4751static const char readme_msg[] =
4752 "tracing mini-HOWTO:\n\n"
4753 "# echo 0 > tracing_on : quick way to disable tracing\n"
4754 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4755 " Important files:\n"
4756 " trace\t\t\t- The static contents of the buffer\n"
4757 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4758 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4759 " current_tracer\t- function and latency tracers\n"
4760 " available_tracers\t- list of configured tracers for current_tracer\n"
4761 " error_log\t- error log for failed commands (that support it)\n"
4762 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4763 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4764 " trace_clock\t\t-change the clock used to order events\n"
4765 " local: Per cpu clock but may not be synced across CPUs\n"
4766 " global: Synced across CPUs but slows tracing down.\n"
4767 " counter: Not a clock, but just an increment\n"
4768 " uptime: Jiffy counter from time of boot\n"
4769 " perf: Same clock that perf events use\n"
4770#ifdef CONFIG_X86_64
4771 " x86-tsc: TSC cycle counter\n"
4772#endif
4773 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4774 " delta: Delta difference against a buffer-wide timestamp\n"
4775 " absolute: Absolute (standalone) timestamp\n"
4776 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4777 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4778 " tracing_cpumask\t- Limit which CPUs to trace\n"
4779 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4780 "\t\t\t Remove sub-buffer with rmdir\n"
4781 " trace_options\t\t- Set format or modify how tracing happens\n"
4782 "\t\t\t Disable an option by prefixing 'no' to the\n"
4783 "\t\t\t option name\n"
4784 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4785#ifdef CONFIG_DYNAMIC_FTRACE
4786 "\n available_filter_functions - list of functions that can be filtered on\n"
4787 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4788 "\t\t\t functions\n"
4789 "\t accepts: func_full_name or glob-matching-pattern\n"
4790 "\t modules: Can select a group via module\n"
4791 "\t Format: :mod:<module-name>\n"
4792 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4793 "\t triggers: a command to perform when function is hit\n"
4794 "\t Format: <function>:<trigger>[:count]\n"
4795 "\t trigger: traceon, traceoff\n"
4796 "\t\t enable_event:<system>:<event>\n"
4797 "\t\t disable_event:<system>:<event>\n"
4798#ifdef CONFIG_STACKTRACE
4799 "\t\t stacktrace\n"
4800#endif
4801#ifdef CONFIG_TRACER_SNAPSHOT
4802 "\t\t snapshot\n"
4803#endif
4804 "\t\t dump\n"
4805 "\t\t cpudump\n"
4806 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4807 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4808 "\t The first one will disable tracing every time do_fault is hit\n"
4809 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4810 "\t The first time do trap is hit and it disables tracing, the\n"
4811 "\t counter will decrement to 2. If tracing is already disabled,\n"
4812 "\t the counter will not decrement. It only decrements when the\n"
4813 "\t trigger did work\n"
4814 "\t To remove trigger without count:\n"
4815 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4816 "\t To remove trigger with a count:\n"
4817 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4818 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4819 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4820 "\t modules: Can select a group via module command :mod:\n"
4821 "\t Does not accept triggers\n"
4822#endif /* CONFIG_DYNAMIC_FTRACE */
4823#ifdef CONFIG_FUNCTION_TRACER
4824 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4825 "\t\t (function)\n"
4826#endif
4827#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4828 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4829 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4830 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4831#endif
4832#ifdef CONFIG_TRACER_SNAPSHOT
4833 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4834 "\t\t\t snapshot buffer. Read the contents for more\n"
4835 "\t\t\t information\n"
4836#endif
4837#ifdef CONFIG_STACK_TRACER
4838 " stack_trace\t\t- Shows the max stack trace when active\n"
4839 " stack_max_size\t- Shows current max stack size that was traced\n"
4840 "\t\t\t Write into this file to reset the max size (trigger a\n"
4841 "\t\t\t new trace)\n"
4842#ifdef CONFIG_DYNAMIC_FTRACE
4843 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4844 "\t\t\t traces\n"
4845#endif
4846#endif /* CONFIG_STACK_TRACER */
4847#ifdef CONFIG_DYNAMIC_EVENTS
4848 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
4849 "\t\t\t Write into this file to define/undefine new trace events.\n"
4850#endif
4851#ifdef CONFIG_KPROBE_EVENTS
4852 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
4853 "\t\t\t Write into this file to define/undefine new trace events.\n"
4854#endif
4855#ifdef CONFIG_UPROBE_EVENTS
4856 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
4857 "\t\t\t Write into this file to define/undefine new trace events.\n"
4858#endif
4859#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4860 "\t accepts: event-definitions (one definition per line)\n"
4861 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4862 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4863#ifdef CONFIG_HIST_TRIGGERS
4864 "\t s:[synthetic/]<event> <field> [<field>]\n"
4865#endif
4866 "\t -:[<group>/]<event>\n"
4867#ifdef CONFIG_KPROBE_EVENTS
4868 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4869 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4870#endif
4871#ifdef CONFIG_UPROBE_EVENTS
4872 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4873#endif
4874 "\t args: <name>=fetcharg[:type]\n"
4875 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4876#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4877 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
4878#else
4879 "\t $stack<index>, $stack, $retval, $comm,\n"
4880#endif
4881 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
4882 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4883 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
4884 "\t <type>\\[<array-size>\\]\n"
4885#ifdef CONFIG_HIST_TRIGGERS
4886 "\t field: <stype> <name>;\n"
4887 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
4888 "\t [unsigned] char/int/long\n"
4889#endif
4890#endif
4891 " events/\t\t- Directory containing all trace event subsystems:\n"
4892 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4893 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4894 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4895 "\t\t\t events\n"
4896 " filter\t\t- If set, only events passing filter are traced\n"
4897 " events/<system>/<event>/\t- Directory containing control files for\n"
4898 "\t\t\t <event>:\n"
4899 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4900 " filter\t\t- If set, only events passing filter are traced\n"
4901 " trigger\t\t- If set, a command to perform when event is hit\n"
4902 "\t Format: <trigger>[:count][if <filter>]\n"
4903 "\t trigger: traceon, traceoff\n"
4904 "\t enable_event:<system>:<event>\n"
4905 "\t disable_event:<system>:<event>\n"
4906#ifdef CONFIG_HIST_TRIGGERS
4907 "\t enable_hist:<system>:<event>\n"
4908 "\t disable_hist:<system>:<event>\n"
4909#endif
4910#ifdef CONFIG_STACKTRACE
4911 "\t\t stacktrace\n"
4912#endif
4913#ifdef CONFIG_TRACER_SNAPSHOT
4914 "\t\t snapshot\n"
4915#endif
4916#ifdef CONFIG_HIST_TRIGGERS
4917 "\t\t hist (see below)\n"
4918#endif
4919 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4920 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4921 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4922 "\t events/block/block_unplug/trigger\n"
4923 "\t The first disables tracing every time block_unplug is hit.\n"
4924 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4925 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4926 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4927 "\t Like function triggers, the counter is only decremented if it\n"
4928 "\t enabled or disabled tracing.\n"
4929 "\t To remove a trigger without a count:\n"
4930 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4931 "\t To remove a trigger with a count:\n"
4932 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4933 "\t Filters can be ignored when removing a trigger.\n"
4934#ifdef CONFIG_HIST_TRIGGERS
4935 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4936 "\t Format: hist:keys=<field1[,field2,...]>\n"
4937 "\t [:values=<field1[,field2,...]>]\n"
4938 "\t [:sort=<field1[,field2,...]>]\n"
4939 "\t [:size=#entries]\n"
4940 "\t [:pause][:continue][:clear]\n"
4941 "\t [:name=histname1]\n"
4942 "\t [:<handler>.<action>]\n"
4943 "\t [if <filter>]\n\n"
4944 "\t When a matching event is hit, an entry is added to a hash\n"
4945 "\t table using the key(s) and value(s) named, and the value of a\n"
4946 "\t sum called 'hitcount' is incremented. Keys and values\n"
4947 "\t correspond to fields in the event's format description. Keys\n"
4948 "\t can be any field, or the special string 'stacktrace'.\n"
4949 "\t Compound keys consisting of up to two fields can be specified\n"
4950 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4951 "\t fields. Sort keys consisting of up to two fields can be\n"
4952 "\t specified using the 'sort' keyword. The sort direction can\n"
4953 "\t be modified by appending '.descending' or '.ascending' to a\n"
4954 "\t sort field. The 'size' parameter can be used to specify more\n"
4955 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4956 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4957 "\t its histogram data will be shared with other triggers of the\n"
4958 "\t same name, and trigger hits will update this common data.\n\n"
4959 "\t Reading the 'hist' file for the event will dump the hash\n"
4960 "\t table in its entirety to stdout. If there are multiple hist\n"
4961 "\t triggers attached to an event, there will be a table for each\n"
4962 "\t trigger in the output. The table displayed for a named\n"
4963 "\t trigger will be the same as any other instance having the\n"
4964 "\t same name. The default format used to display a given field\n"
4965 "\t can be modified by appending any of the following modifiers\n"
4966 "\t to the field name, as applicable:\n\n"
4967 "\t .hex display a number as a hex value\n"
4968 "\t .sym display an address as a symbol\n"
4969 "\t .sym-offset display an address as a symbol and offset\n"
4970 "\t .execname display a common_pid as a program name\n"
4971 "\t .syscall display a syscall id as a syscall name\n"
4972 "\t .log2 display log2 value rather than raw number\n"
4973 "\t .usecs display a common_timestamp in microseconds\n\n"
4974 "\t The 'pause' parameter can be used to pause an existing hist\n"
4975 "\t trigger or to start a hist trigger but not log any events\n"
4976 "\t until told to do so. 'continue' can be used to start or\n"
4977 "\t restart a paused hist trigger.\n\n"
4978 "\t The 'clear' parameter will clear the contents of a running\n"
4979 "\t hist trigger and leave its current paused/active state\n"
4980 "\t unchanged.\n\n"
4981 "\t The enable_hist and disable_hist triggers can be used to\n"
4982 "\t have one event conditionally start and stop another event's\n"
4983 "\t already-attached hist trigger. The syntax is analogous to\n"
4984 "\t the enable_event and disable_event triggers.\n\n"
4985 "\t Hist trigger handlers and actions are executed whenever a\n"
4986 "\t a histogram entry is added or updated. They take the form:\n\n"
4987 "\t <handler>.<action>\n\n"
4988 "\t The available handlers are:\n\n"
4989 "\t onmatch(matching.event) - invoke on addition or update\n"
4990 "\t onmax(var) - invoke if var exceeds current max\n"
4991 "\t onchange(var) - invoke action if var changes\n\n"
4992 "\t The available actions are:\n\n"
4993 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
4994 "\t save(field,...) - save current event fields\n"
4995#ifdef CONFIG_TRACER_SNAPSHOT
4996 "\t snapshot() - snapshot the trace buffer\n"
4997#endif
4998#endif
4999;
5000
5001static ssize_t
5002tracing_readme_read(struct file *filp, char __user *ubuf,
5003 size_t cnt, loff_t *ppos)
5004{
5005 return simple_read_from_buffer(ubuf, cnt, ppos,
5006 readme_msg, strlen(readme_msg));
5007}
5008
5009static const struct file_operations tracing_readme_fops = {
5010 .open = tracing_open_generic,
5011 .read = tracing_readme_read,
5012 .llseek = generic_file_llseek,
5013};
5014
5015static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5016{
5017 int *ptr = v;
5018
5019 if (*pos || m->count)
5020 ptr++;
5021
5022 (*pos)++;
5023
5024 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5025 if (trace_find_tgid(*ptr))
5026 return ptr;
5027 }
5028
5029 return NULL;
5030}
5031
5032static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5033{
5034 void *v;
5035 loff_t l = 0;
5036
5037 if (!tgid_map)
5038 return NULL;
5039
5040 v = &tgid_map[0];
5041 while (l <= *pos) {
5042 v = saved_tgids_next(m, v, &l);
5043 if (!v)
5044 return NULL;
5045 }
5046
5047 return v;
5048}
5049
5050static void saved_tgids_stop(struct seq_file *m, void *v)
5051{
5052}
5053
5054static int saved_tgids_show(struct seq_file *m, void *v)
5055{
5056 int pid = (int *)v - tgid_map;
5057
5058 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5059 return 0;
5060}
5061
5062static const struct seq_operations tracing_saved_tgids_seq_ops = {
5063 .start = saved_tgids_start,
5064 .stop = saved_tgids_stop,
5065 .next = saved_tgids_next,
5066 .show = saved_tgids_show,
5067};
5068
5069static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5070{
5071 int ret;
5072
5073 ret = tracing_check_open_get_tr(NULL);
5074 if (ret)
5075 return ret;
5076
5077 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5078}
5079
5080
5081static const struct file_operations tracing_saved_tgids_fops = {
5082 .open = tracing_saved_tgids_open,
5083 .read = seq_read,
5084 .llseek = seq_lseek,
5085 .release = seq_release,
5086};
5087
5088static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5089{
5090 unsigned int *ptr = v;
5091
5092 if (*pos || m->count)
5093 ptr++;
5094
5095 (*pos)++;
5096
5097 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5098 ptr++) {
5099 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5100 continue;
5101
5102 return ptr;
5103 }
5104
5105 return NULL;
5106}
5107
5108static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5109{
5110 void *v;
5111 loff_t l = 0;
5112
5113 preempt_disable();
5114 arch_spin_lock(&trace_cmdline_lock);
5115
5116 v = &savedcmd->map_cmdline_to_pid[0];
5117 while (l <= *pos) {
5118 v = saved_cmdlines_next(m, v, &l);
5119 if (!v)
5120 return NULL;
5121 }
5122
5123 return v;
5124}
5125
5126static void saved_cmdlines_stop(struct seq_file *m, void *v)
5127{
5128 arch_spin_unlock(&trace_cmdline_lock);
5129 preempt_enable();
5130}
5131
5132static int saved_cmdlines_show(struct seq_file *m, void *v)
5133{
5134 char buf[TASK_COMM_LEN];
5135 unsigned int *pid = v;
5136
5137 __trace_find_cmdline(*pid, buf);
5138 seq_printf(m, "%d %s\n", *pid, buf);
5139 return 0;
5140}
5141
5142static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5143 .start = saved_cmdlines_start,
5144 .next = saved_cmdlines_next,
5145 .stop = saved_cmdlines_stop,
5146 .show = saved_cmdlines_show,
5147};
5148
5149static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5150{
5151 int ret;
5152
5153 ret = tracing_check_open_get_tr(NULL);
5154 if (ret)
5155 return ret;
5156
5157 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5158}
5159
5160static const struct file_operations tracing_saved_cmdlines_fops = {
5161 .open = tracing_saved_cmdlines_open,
5162 .read = seq_read,
5163 .llseek = seq_lseek,
5164 .release = seq_release,
5165};
5166
5167static ssize_t
5168tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5169 size_t cnt, loff_t *ppos)
5170{
5171 char buf[64];
5172 int r;
5173
5174 arch_spin_lock(&trace_cmdline_lock);
5175 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5176 arch_spin_unlock(&trace_cmdline_lock);
5177
5178 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5179}
5180
5181static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5182{
5183 kfree(s->saved_cmdlines);
5184 kfree(s->map_cmdline_to_pid);
5185 kfree(s);
5186}
5187
5188static int tracing_resize_saved_cmdlines(unsigned int val)
5189{
5190 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5191
5192 s = kmalloc(sizeof(*s), GFP_KERNEL);
5193 if (!s)
5194 return -ENOMEM;
5195
5196 if (allocate_cmdlines_buffer(val, s) < 0) {
5197 kfree(s);
5198 return -ENOMEM;
5199 }
5200
5201 arch_spin_lock(&trace_cmdline_lock);
5202 savedcmd_temp = savedcmd;
5203 savedcmd = s;
5204 arch_spin_unlock(&trace_cmdline_lock);
5205 free_saved_cmdlines_buffer(savedcmd_temp);
5206
5207 return 0;
5208}
5209
5210static ssize_t
5211tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5212 size_t cnt, loff_t *ppos)
5213{
5214 unsigned long val;
5215 int ret;
5216
5217 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5218 if (ret)
5219 return ret;
5220
5221 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5222 if (!val || val > PID_MAX_DEFAULT)
5223 return -EINVAL;
5224
5225 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5226 if (ret < 0)
5227 return ret;
5228
5229 *ppos += cnt;
5230
5231 return cnt;
5232}
5233
5234static const struct file_operations tracing_saved_cmdlines_size_fops = {
5235 .open = tracing_open_generic,
5236 .read = tracing_saved_cmdlines_size_read,
5237 .write = tracing_saved_cmdlines_size_write,
5238};
5239
5240#ifdef CONFIG_TRACE_EVAL_MAP_FILE
5241static union trace_eval_map_item *
5242update_eval_map(union trace_eval_map_item *ptr)
5243{
5244 if (!ptr->map.eval_string) {
5245 if (ptr->tail.next) {
5246 ptr = ptr->tail.next;
5247 /* Set ptr to the next real item (skip head) */
5248 ptr++;
5249 } else
5250 return NULL;
5251 }
5252 return ptr;
5253}
5254
5255static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5256{
5257 union trace_eval_map_item *ptr = v;
5258
5259 /*
5260 * Paranoid! If ptr points to end, we don't want to increment past it.
5261 * This really should never happen.
5262 */
5263 ptr = update_eval_map(ptr);
5264 if (WARN_ON_ONCE(!ptr))
5265 return NULL;
5266
5267 ptr++;
5268
5269 (*pos)++;
5270
5271 ptr = update_eval_map(ptr);
5272
5273 return ptr;
5274}
5275
5276static void *eval_map_start(struct seq_file *m, loff_t *pos)
5277{
5278 union trace_eval_map_item *v;
5279 loff_t l = 0;
5280
5281 mutex_lock(&trace_eval_mutex);
5282
5283 v = trace_eval_maps;
5284 if (v)
5285 v++;
5286
5287 while (v && l < *pos) {
5288 v = eval_map_next(m, v, &l);
5289 }
5290
5291 return v;
5292}
5293
5294static void eval_map_stop(struct seq_file *m, void *v)
5295{
5296 mutex_unlock(&trace_eval_mutex);
5297}
5298
5299static int eval_map_show(struct seq_file *m, void *v)
5300{
5301 union trace_eval_map_item *ptr = v;
5302
5303 seq_printf(m, "%s %ld (%s)\n",
5304 ptr->map.eval_string, ptr->map.eval_value,
5305 ptr->map.system);
5306
5307 return 0;
5308}
5309
5310static const struct seq_operations tracing_eval_map_seq_ops = {
5311 .start = eval_map_start,
5312 .next = eval_map_next,
5313 .stop = eval_map_stop,
5314 .show = eval_map_show,
5315};
5316
5317static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5318{
5319 int ret;
5320
5321 ret = tracing_check_open_get_tr(NULL);
5322 if (ret)
5323 return ret;
5324
5325 return seq_open(filp, &tracing_eval_map_seq_ops);
5326}
5327
5328static const struct file_operations tracing_eval_map_fops = {
5329 .open = tracing_eval_map_open,
5330 .read = seq_read,
5331 .llseek = seq_lseek,
5332 .release = seq_release,
5333};
5334
5335static inline union trace_eval_map_item *
5336trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5337{
5338 /* Return tail of array given the head */
5339 return ptr + ptr->head.length + 1;
5340}
5341
5342static void
5343trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5344 int len)
5345{
5346 struct trace_eval_map **stop;
5347 struct trace_eval_map **map;
5348 union trace_eval_map_item *map_array;
5349 union trace_eval_map_item *ptr;
5350
5351 stop = start + len;
5352
5353 /*
5354 * The trace_eval_maps contains the map plus a head and tail item,
5355 * where the head holds the module and length of array, and the
5356 * tail holds a pointer to the next list.
5357 */
5358 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5359 if (!map_array) {
5360 pr_warn("Unable to allocate trace eval mapping\n");
5361 return;
5362 }
5363
5364 mutex_lock(&trace_eval_mutex);
5365
5366 if (!trace_eval_maps)
5367 trace_eval_maps = map_array;
5368 else {
5369 ptr = trace_eval_maps;
5370 for (;;) {
5371 ptr = trace_eval_jmp_to_tail(ptr);
5372 if (!ptr->tail.next)
5373 break;
5374 ptr = ptr->tail.next;
5375
5376 }
5377 ptr->tail.next = map_array;
5378 }
5379 map_array->head.mod = mod;
5380 map_array->head.length = len;
5381 map_array++;
5382
5383 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5384 map_array->map = **map;
5385 map_array++;
5386 }
5387 memset(map_array, 0, sizeof(*map_array));
5388
5389 mutex_unlock(&trace_eval_mutex);
5390}
5391
5392static void trace_create_eval_file(struct dentry *d_tracer)
5393{
5394 trace_create_file("eval_map", 0444, d_tracer,
5395 NULL, &tracing_eval_map_fops);
5396}
5397
5398#else /* CONFIG_TRACE_EVAL_MAP_FILE */
5399static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5400static inline void trace_insert_eval_map_file(struct module *mod,
5401 struct trace_eval_map **start, int len) { }
5402#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5403
5404static void trace_insert_eval_map(struct module *mod,
5405 struct trace_eval_map **start, int len)
5406{
5407 struct trace_eval_map **map;
5408
5409 if (len <= 0)
5410 return;
5411
5412 map = start;
5413
5414 trace_event_eval_update(map, len);
5415
5416 trace_insert_eval_map_file(mod, start, len);
5417}
5418
5419static ssize_t
5420tracing_set_trace_read(struct file *filp, char __user *ubuf,
5421 size_t cnt, loff_t *ppos)
5422{
5423 struct trace_array *tr = filp->private_data;
5424 char buf[MAX_TRACER_SIZE+2];
5425 int r;
5426
5427 mutex_lock(&trace_types_lock);
5428 r = sprintf(buf, "%s\n", tr->current_trace->name);
5429 mutex_unlock(&trace_types_lock);
5430
5431 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5432}
5433
5434int tracer_init(struct tracer *t, struct trace_array *tr)
5435{
5436 tracing_reset_online_cpus(&tr->trace_buffer);
5437 return t->init(tr);
5438}
5439
5440static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5441{
5442 int cpu;
5443
5444 for_each_tracing_cpu(cpu)
5445 per_cpu_ptr(buf->data, cpu)->entries = val;
5446}
5447
5448#ifdef CONFIG_TRACER_MAX_TRACE
5449/* resize @tr's buffer to the size of @size_tr's entries */
5450static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5451 struct trace_buffer *size_buf, int cpu_id)
5452{
5453 int cpu, ret = 0;
5454
5455 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5456 for_each_tracing_cpu(cpu) {
5457 ret = ring_buffer_resize(trace_buf->buffer,
5458 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5459 if (ret < 0)
5460 break;
5461 per_cpu_ptr(trace_buf->data, cpu)->entries =
5462 per_cpu_ptr(size_buf->data, cpu)->entries;
5463 }
5464 } else {
5465 ret = ring_buffer_resize(trace_buf->buffer,
5466 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5467 if (ret == 0)
5468 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5469 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5470 }
5471
5472 return ret;
5473}
5474#endif /* CONFIG_TRACER_MAX_TRACE */
5475
5476static int __tracing_resize_ring_buffer(struct trace_array *tr,
5477 unsigned long size, int cpu)
5478{
5479 int ret;
5480
5481 /*
5482 * If kernel or user changes the size of the ring buffer
5483 * we use the size that was given, and we can forget about
5484 * expanding it later.
5485 */
5486 ring_buffer_expanded = true;
5487
5488 /* May be called before buffers are initialized */
5489 if (!tr->trace_buffer.buffer)
5490 return 0;
5491
5492 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5493 if (ret < 0)
5494 return ret;
5495
5496#ifdef CONFIG_TRACER_MAX_TRACE
5497 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5498 !tr->current_trace->use_max_tr)
5499 goto out;
5500
5501 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5502 if (ret < 0) {
5503 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5504 &tr->trace_buffer, cpu);
5505 if (r < 0) {
5506 /*
5507 * AARGH! We are left with different
5508 * size max buffer!!!!
5509 * The max buffer is our "snapshot" buffer.
5510 * When a tracer needs a snapshot (one of the
5511 * latency tracers), it swaps the max buffer
5512 * with the saved snap shot. We succeeded to
5513 * update the size of the main buffer, but failed to
5514 * update the size of the max buffer. But when we tried
5515 * to reset the main buffer to the original size, we
5516 * failed there too. This is very unlikely to
5517 * happen, but if it does, warn and kill all
5518 * tracing.
5519 */
5520 WARN_ON(1);
5521 tracing_disabled = 1;
5522 }
5523 return ret;
5524 }
5525
5526 if (cpu == RING_BUFFER_ALL_CPUS)
5527 set_buffer_entries(&tr->max_buffer, size);
5528 else
5529 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5530
5531 out:
5532#endif /* CONFIG_TRACER_MAX_TRACE */
5533
5534 if (cpu == RING_BUFFER_ALL_CPUS)
5535 set_buffer_entries(&tr->trace_buffer, size);
5536 else
5537 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5538
5539 return ret;
5540}
5541
5542static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5543 unsigned long size, int cpu_id)
5544{
5545 int ret = size;
5546
5547 mutex_lock(&trace_types_lock);
5548
5549 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5550 /* make sure, this cpu is enabled in the mask */
5551 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5552 ret = -EINVAL;
5553 goto out;
5554 }
5555 }
5556
5557 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5558 if (ret < 0)
5559 ret = -ENOMEM;
5560
5561out:
5562 mutex_unlock(&trace_types_lock);
5563
5564 return ret;
5565}
5566
5567
5568/**
5569 * tracing_update_buffers - used by tracing facility to expand ring buffers
5570 *
5571 * To save on memory when the tracing is never used on a system with it
5572 * configured in. The ring buffers are set to a minimum size. But once
5573 * a user starts to use the tracing facility, then they need to grow
5574 * to their default size.
5575 *
5576 * This function is to be called when a tracer is about to be used.
5577 */
5578int tracing_update_buffers(void)
5579{
5580 int ret = 0;
5581
5582 mutex_lock(&trace_types_lock);
5583 if (!ring_buffer_expanded)
5584 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5585 RING_BUFFER_ALL_CPUS);
5586 mutex_unlock(&trace_types_lock);
5587
5588 return ret;
5589}
5590
5591struct trace_option_dentry;
5592
5593static void
5594create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5595
5596/*
5597 * Used to clear out the tracer before deletion of an instance.
5598 * Must have trace_types_lock held.
5599 */
5600static void tracing_set_nop(struct trace_array *tr)
5601{
5602 if (tr->current_trace == &nop_trace)
5603 return;
5604
5605 tr->current_trace->enabled--;
5606
5607 if (tr->current_trace->reset)
5608 tr->current_trace->reset(tr);
5609
5610 tr->current_trace = &nop_trace;
5611}
5612
5613static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5614{
5615 /* Only enable if the directory has been created already. */
5616 if (!tr->dir)
5617 return;
5618
5619 create_trace_option_files(tr, t);
5620}
5621
5622static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5623{
5624 struct tracer *t;
5625#ifdef CONFIG_TRACER_MAX_TRACE
5626 bool had_max_tr;
5627#endif
5628 int ret = 0;
5629
5630 mutex_lock(&trace_types_lock);
5631
5632 if (!ring_buffer_expanded) {
5633 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5634 RING_BUFFER_ALL_CPUS);
5635 if (ret < 0)
5636 goto out;
5637 ret = 0;
5638 }
5639
5640 for (t = trace_types; t; t = t->next) {
5641 if (strcmp(t->name, buf) == 0)
5642 break;
5643 }
5644 if (!t) {
5645 ret = -EINVAL;
5646 goto out;
5647 }
5648 if (t == tr->current_trace)
5649 goto out;
5650
5651#ifdef CONFIG_TRACER_SNAPSHOT
5652 if (t->use_max_tr) {
5653 arch_spin_lock(&tr->max_lock);
5654 if (tr->cond_snapshot)
5655 ret = -EBUSY;
5656 arch_spin_unlock(&tr->max_lock);
5657 if (ret)
5658 goto out;
5659 }
5660#endif
5661 /* Some tracers won't work on kernel command line */
5662 if (system_state < SYSTEM_RUNNING && t->noboot) {
5663 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5664 t->name);
5665 goto out;
5666 }
5667
5668 /* Some tracers are only allowed for the top level buffer */
5669 if (!trace_ok_for_array(t, tr)) {
5670 ret = -EINVAL;
5671 goto out;
5672 }
5673
5674 /* If trace pipe files are being read, we can't change the tracer */
5675 if (tr->current_trace->ref) {
5676 ret = -EBUSY;
5677 goto out;
5678 }
5679
5680 trace_branch_disable();
5681
5682 tr->current_trace->enabled--;
5683
5684 if (tr->current_trace->reset)
5685 tr->current_trace->reset(tr);
5686
5687 /* Current trace needs to be nop_trace before synchronize_rcu */
5688 tr->current_trace = &nop_trace;
5689
5690#ifdef CONFIG_TRACER_MAX_TRACE
5691 had_max_tr = tr->allocated_snapshot;
5692
5693 if (had_max_tr && !t->use_max_tr) {
5694 /*
5695 * We need to make sure that the update_max_tr sees that
5696 * current_trace changed to nop_trace to keep it from
5697 * swapping the buffers after we resize it.
5698 * The update_max_tr is called from interrupts disabled
5699 * so a synchronized_sched() is sufficient.
5700 */
5701 synchronize_rcu();
5702 free_snapshot(tr);
5703 }
5704#endif
5705
5706#ifdef CONFIG_TRACER_MAX_TRACE
5707 if (t->use_max_tr && !had_max_tr) {
5708 ret = tracing_alloc_snapshot_instance(tr);
5709 if (ret < 0)
5710 goto out;
5711 }
5712#endif
5713
5714 if (t->init) {
5715 ret = tracer_init(t, tr);
5716 if (ret)
5717 goto out;
5718 }
5719
5720 tr->current_trace = t;
5721 tr->current_trace->enabled++;
5722 trace_branch_enable(tr);
5723 out:
5724 mutex_unlock(&trace_types_lock);
5725
5726 return ret;
5727}
5728
5729static ssize_t
5730tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5731 size_t cnt, loff_t *ppos)
5732{
5733 struct trace_array *tr = filp->private_data;
5734 char buf[MAX_TRACER_SIZE+1];
5735 int i;
5736 size_t ret;
5737 int err;
5738
5739 ret = cnt;
5740
5741 if (cnt > MAX_TRACER_SIZE)
5742 cnt = MAX_TRACER_SIZE;
5743
5744 if (copy_from_user(buf, ubuf, cnt))
5745 return -EFAULT;
5746
5747 buf[cnt] = 0;
5748
5749 /* strip ending whitespace. */
5750 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5751 buf[i] = 0;
5752
5753 err = tracing_set_tracer(tr, buf);
5754 if (err)
5755 return err;
5756
5757 *ppos += ret;
5758
5759 return ret;
5760}
5761
5762static ssize_t
5763tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5764 size_t cnt, loff_t *ppos)
5765{
5766 char buf[64];
5767 int r;
5768
5769 r = snprintf(buf, sizeof(buf), "%ld\n",
5770 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5771 if (r > sizeof(buf))
5772 r = sizeof(buf);
5773 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5774}
5775
5776static ssize_t
5777tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5778 size_t cnt, loff_t *ppos)
5779{
5780 unsigned long val;
5781 int ret;
5782
5783 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5784 if (ret)
5785 return ret;
5786
5787 *ptr = val * 1000;
5788
5789 return cnt;
5790}
5791
5792static ssize_t
5793tracing_thresh_read(struct file *filp, char __user *ubuf,
5794 size_t cnt, loff_t *ppos)
5795{
5796 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5797}
5798
5799static ssize_t
5800tracing_thresh_write(struct file *filp, const char __user *ubuf,
5801 size_t cnt, loff_t *ppos)
5802{
5803 struct trace_array *tr = filp->private_data;
5804 int ret;
5805
5806 mutex_lock(&trace_types_lock);
5807 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5808 if (ret < 0)
5809 goto out;
5810
5811 if (tr->current_trace->update_thresh) {
5812 ret = tr->current_trace->update_thresh(tr);
5813 if (ret < 0)
5814 goto out;
5815 }
5816
5817 ret = cnt;
5818out:
5819 mutex_unlock(&trace_types_lock);
5820
5821 return ret;
5822}
5823
5824#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5825
5826static ssize_t
5827tracing_max_lat_read(struct file *filp, char __user *ubuf,
5828 size_t cnt, loff_t *ppos)
5829{
5830 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5831}
5832
5833static ssize_t
5834tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5835 size_t cnt, loff_t *ppos)
5836{
5837 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5838}
5839
5840#endif
5841
5842static int tracing_open_pipe(struct inode *inode, struct file *filp)
5843{
5844 struct trace_array *tr = inode->i_private;
5845 struct trace_iterator *iter;
5846 int ret;
5847
5848 ret = tracing_check_open_get_tr(tr);
5849 if (ret)
5850 return ret;
5851
5852 mutex_lock(&trace_types_lock);
5853
5854 /* create a buffer to store the information to pass to userspace */
5855 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5856 if (!iter) {
5857 ret = -ENOMEM;
5858 __trace_array_put(tr);
5859 goto out;
5860 }
5861
5862 trace_seq_init(&iter->seq);
5863 iter->trace = tr->current_trace;
5864
5865 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5866 ret = -ENOMEM;
5867 goto fail;
5868 }
5869
5870 /* trace pipe does not show start of buffer */
5871 cpumask_setall(iter->started);
5872
5873 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5874 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5875
5876 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5877 if (trace_clocks[tr->clock_id].in_ns)
5878 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5879
5880 iter->tr = tr;
5881 iter->trace_buffer = &tr->trace_buffer;
5882 iter->cpu_file = tracing_get_cpu(inode);
5883 mutex_init(&iter->mutex);
5884 filp->private_data = iter;
5885
5886 if (iter->trace->pipe_open)
5887 iter->trace->pipe_open(iter);
5888
5889 nonseekable_open(inode, filp);
5890
5891 tr->current_trace->ref++;
5892out:
5893 mutex_unlock(&trace_types_lock);
5894 return ret;
5895
5896fail:
5897 kfree(iter);
5898 __trace_array_put(tr);
5899 mutex_unlock(&trace_types_lock);
5900 return ret;
5901}
5902
5903static int tracing_release_pipe(struct inode *inode, struct file *file)
5904{
5905 struct trace_iterator *iter = file->private_data;
5906 struct trace_array *tr = inode->i_private;
5907
5908 mutex_lock(&trace_types_lock);
5909
5910 tr->current_trace->ref--;
5911
5912 if (iter->trace->pipe_close)
5913 iter->trace->pipe_close(iter);
5914
5915 mutex_unlock(&trace_types_lock);
5916
5917 free_cpumask_var(iter->started);
5918 mutex_destroy(&iter->mutex);
5919 kfree(iter);
5920
5921 trace_array_put(tr);
5922
5923 return 0;
5924}
5925
5926static __poll_t
5927trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5928{
5929 struct trace_array *tr = iter->tr;
5930
5931 /* Iterators are static, they should be filled or empty */
5932 if (trace_buffer_iter(iter, iter->cpu_file))
5933 return EPOLLIN | EPOLLRDNORM;
5934
5935 if (tr->trace_flags & TRACE_ITER_BLOCK)
5936 /*
5937 * Always select as readable when in blocking mode
5938 */
5939 return EPOLLIN | EPOLLRDNORM;
5940 else
5941 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5942 filp, poll_table);
5943}
5944
5945static __poll_t
5946tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5947{
5948 struct trace_iterator *iter = filp->private_data;
5949
5950 return trace_poll(iter, filp, poll_table);
5951}
5952
5953/* Must be called with iter->mutex held. */
5954static int tracing_wait_pipe(struct file *filp)
5955{
5956 struct trace_iterator *iter = filp->private_data;
5957 int ret;
5958
5959 while (trace_empty(iter)) {
5960
5961 if ((filp->f_flags & O_NONBLOCK)) {
5962 return -EAGAIN;
5963 }
5964
5965 /*
5966 * We block until we read something and tracing is disabled.
5967 * We still block if tracing is disabled, but we have never
5968 * read anything. This allows a user to cat this file, and
5969 * then enable tracing. But after we have read something,
5970 * we give an EOF when tracing is again disabled.
5971 *
5972 * iter->pos will be 0 if we haven't read anything.
5973 */
5974 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5975 break;
5976
5977 mutex_unlock(&iter->mutex);
5978
5979 ret = wait_on_pipe(iter, 0);
5980
5981 mutex_lock(&iter->mutex);
5982
5983 if (ret)
5984 return ret;
5985 }
5986
5987 return 1;
5988}
5989
5990/*
5991 * Consumer reader.
5992 */
5993static ssize_t
5994tracing_read_pipe(struct file *filp, char __user *ubuf,
5995 size_t cnt, loff_t *ppos)
5996{
5997 struct trace_iterator *iter = filp->private_data;
5998 ssize_t sret;
5999
6000 /*
6001 * Avoid more than one consumer on a single file descriptor
6002 * This is just a matter of traces coherency, the ring buffer itself
6003 * is protected.
6004 */
6005 mutex_lock(&iter->mutex);
6006
6007 /* return any leftover data */
6008 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6009 if (sret != -EBUSY)
6010 goto out;
6011
6012 trace_seq_init(&iter->seq);
6013
6014 if (iter->trace->read) {
6015 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6016 if (sret)
6017 goto out;
6018 }
6019
6020waitagain:
6021 sret = tracing_wait_pipe(filp);
6022 if (sret <= 0)
6023 goto out;
6024
6025 /* stop when tracing is finished */
6026 if (trace_empty(iter)) {
6027 sret = 0;
6028 goto out;
6029 }
6030
6031 if (cnt >= PAGE_SIZE)
6032 cnt = PAGE_SIZE - 1;
6033
6034 /* reset all but tr, trace, and overruns */
6035 memset(&iter->seq, 0,
6036 sizeof(struct trace_iterator) -
6037 offsetof(struct trace_iterator, seq));
6038 cpumask_clear(iter->started);
6039 trace_seq_init(&iter->seq);
6040 iter->pos = -1;
6041
6042 trace_event_read_lock();
6043 trace_access_lock(iter->cpu_file);
6044 while (trace_find_next_entry_inc(iter) != NULL) {
6045 enum print_line_t ret;
6046 int save_len = iter->seq.seq.len;
6047
6048 ret = print_trace_line(iter);
6049 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6050 /* don't print partial lines */
6051 iter->seq.seq.len = save_len;
6052 break;
6053 }
6054 if (ret != TRACE_TYPE_NO_CONSUME)
6055 trace_consume(iter);
6056
6057 if (trace_seq_used(&iter->seq) >= cnt)
6058 break;
6059
6060 /*
6061 * Setting the full flag means we reached the trace_seq buffer
6062 * size and we should leave by partial output condition above.
6063 * One of the trace_seq_* functions is not used properly.
6064 */
6065 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6066 iter->ent->type);
6067 }
6068 trace_access_unlock(iter->cpu_file);
6069 trace_event_read_unlock();
6070
6071 /* Now copy what we have to the user */
6072 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6073 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6074 trace_seq_init(&iter->seq);
6075
6076 /*
6077 * If there was nothing to send to user, in spite of consuming trace
6078 * entries, go back to wait for more entries.
6079 */
6080 if (sret == -EBUSY)
6081 goto waitagain;
6082
6083out:
6084 mutex_unlock(&iter->mutex);
6085
6086 return sret;
6087}
6088
6089static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6090 unsigned int idx)
6091{
6092 __free_page(spd->pages[idx]);
6093}
6094
6095static const struct pipe_buf_operations tracing_pipe_buf_ops = {
6096 .confirm = generic_pipe_buf_confirm,
6097 .release = generic_pipe_buf_release,
6098 .steal = generic_pipe_buf_steal,
6099 .get = generic_pipe_buf_get,
6100};
6101
6102static size_t
6103tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6104{
6105 size_t count;
6106 int save_len;
6107 int ret;
6108
6109 /* Seq buffer is page-sized, exactly what we need. */
6110 for (;;) {
6111 save_len = iter->seq.seq.len;
6112 ret = print_trace_line(iter);
6113
6114 if (trace_seq_has_overflowed(&iter->seq)) {
6115 iter->seq.seq.len = save_len;
6116 break;
6117 }
6118
6119 /*
6120 * This should not be hit, because it should only
6121 * be set if the iter->seq overflowed. But check it
6122 * anyway to be safe.
6123 */
6124 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6125 iter->seq.seq.len = save_len;
6126 break;
6127 }
6128
6129 count = trace_seq_used(&iter->seq) - save_len;
6130 if (rem < count) {
6131 rem = 0;
6132 iter->seq.seq.len = save_len;
6133 break;
6134 }
6135
6136 if (ret != TRACE_TYPE_NO_CONSUME)
6137 trace_consume(iter);
6138 rem -= count;
6139 if (!trace_find_next_entry_inc(iter)) {
6140 rem = 0;
6141 iter->ent = NULL;
6142 break;
6143 }
6144 }
6145
6146 return rem;
6147}
6148
6149static ssize_t tracing_splice_read_pipe(struct file *filp,
6150 loff_t *ppos,
6151 struct pipe_inode_info *pipe,
6152 size_t len,
6153 unsigned int flags)
6154{
6155 struct page *pages_def[PIPE_DEF_BUFFERS];
6156 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6157 struct trace_iterator *iter = filp->private_data;
6158 struct splice_pipe_desc spd = {
6159 .pages = pages_def,
6160 .partial = partial_def,
6161 .nr_pages = 0, /* This gets updated below. */
6162 .nr_pages_max = PIPE_DEF_BUFFERS,
6163 .ops = &tracing_pipe_buf_ops,
6164 .spd_release = tracing_spd_release_pipe,
6165 };
6166 ssize_t ret;
6167 size_t rem;
6168 unsigned int i;
6169
6170 if (splice_grow_spd(pipe, &spd))
6171 return -ENOMEM;
6172
6173 mutex_lock(&iter->mutex);
6174
6175 if (iter->trace->splice_read) {
6176 ret = iter->trace->splice_read(iter, filp,
6177 ppos, pipe, len, flags);
6178 if (ret)
6179 goto out_err;
6180 }
6181
6182 ret = tracing_wait_pipe(filp);
6183 if (ret <= 0)
6184 goto out_err;
6185
6186 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6187 ret = -EFAULT;
6188 goto out_err;
6189 }
6190
6191 trace_event_read_lock();
6192 trace_access_lock(iter->cpu_file);
6193
6194 /* Fill as many pages as possible. */
6195 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6196 spd.pages[i] = alloc_page(GFP_KERNEL);
6197 if (!spd.pages[i])
6198 break;
6199
6200 rem = tracing_fill_pipe_page(rem, iter);
6201
6202 /* Copy the data into the page, so we can start over. */
6203 ret = trace_seq_to_buffer(&iter->seq,
6204 page_address(spd.pages[i]),
6205 trace_seq_used(&iter->seq));
6206 if (ret < 0) {
6207 __free_page(spd.pages[i]);
6208 break;
6209 }
6210 spd.partial[i].offset = 0;
6211 spd.partial[i].len = trace_seq_used(&iter->seq);
6212
6213 trace_seq_init(&iter->seq);
6214 }
6215
6216 trace_access_unlock(iter->cpu_file);
6217 trace_event_read_unlock();
6218 mutex_unlock(&iter->mutex);
6219
6220 spd.nr_pages = i;
6221
6222 if (i)
6223 ret = splice_to_pipe(pipe, &spd);
6224 else
6225 ret = 0;
6226out:
6227 splice_shrink_spd(&spd);
6228 return ret;
6229
6230out_err:
6231 mutex_unlock(&iter->mutex);
6232 goto out;
6233}
6234
6235static ssize_t
6236tracing_entries_read(struct file *filp, char __user *ubuf,
6237 size_t cnt, loff_t *ppos)
6238{
6239 struct inode *inode = file_inode(filp);
6240 struct trace_array *tr = inode->i_private;
6241 int cpu = tracing_get_cpu(inode);
6242 char buf[64];
6243 int r = 0;
6244 ssize_t ret;
6245
6246 mutex_lock(&trace_types_lock);
6247
6248 if (cpu == RING_BUFFER_ALL_CPUS) {
6249 int cpu, buf_size_same;
6250 unsigned long size;
6251
6252 size = 0;
6253 buf_size_same = 1;
6254 /* check if all cpu sizes are same */
6255 for_each_tracing_cpu(cpu) {
6256 /* fill in the size from first enabled cpu */
6257 if (size == 0)
6258 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
6259 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
6260 buf_size_same = 0;
6261 break;
6262 }
6263 }
6264
6265 if (buf_size_same) {
6266 if (!ring_buffer_expanded)
6267 r = sprintf(buf, "%lu (expanded: %lu)\n",
6268 size >> 10,
6269 trace_buf_size >> 10);
6270 else
6271 r = sprintf(buf, "%lu\n", size >> 10);
6272 } else
6273 r = sprintf(buf, "X\n");
6274 } else
6275 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
6276
6277 mutex_unlock(&trace_types_lock);
6278
6279 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6280 return ret;
6281}
6282
6283static ssize_t
6284tracing_entries_write(struct file *filp, const char __user *ubuf,
6285 size_t cnt, loff_t *ppos)
6286{
6287 struct inode *inode = file_inode(filp);
6288 struct trace_array *tr = inode->i_private;
6289 unsigned long val;
6290 int ret;
6291
6292 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6293 if (ret)
6294 return ret;
6295
6296 /* must have at least 1 entry */
6297 if (!val)
6298 return -EINVAL;
6299
6300 /* value is in KB */
6301 val <<= 10;
6302 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6303 if (ret < 0)
6304 return ret;
6305
6306 *ppos += cnt;
6307
6308 return cnt;
6309}
6310
6311static ssize_t
6312tracing_total_entries_read(struct file *filp, char __user *ubuf,
6313 size_t cnt, loff_t *ppos)
6314{
6315 struct trace_array *tr = filp->private_data;
6316 char buf[64];
6317 int r, cpu;
6318 unsigned long size = 0, expanded_size = 0;
6319
6320 mutex_lock(&trace_types_lock);
6321 for_each_tracing_cpu(cpu) {
6322 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6323 if (!ring_buffer_expanded)
6324 expanded_size += trace_buf_size >> 10;
6325 }
6326 if (ring_buffer_expanded)
6327 r = sprintf(buf, "%lu\n", size);
6328 else
6329 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6330 mutex_unlock(&trace_types_lock);
6331
6332 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6333}
6334
6335static ssize_t
6336tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6337 size_t cnt, loff_t *ppos)
6338{
6339 /*
6340 * There is no need to read what the user has written, this function
6341 * is just to make sure that there is no error when "echo" is used
6342 */
6343
6344 *ppos += cnt;
6345
6346 return cnt;
6347}
6348
6349static int
6350tracing_free_buffer_release(struct inode *inode, struct file *filp)
6351{
6352 struct trace_array *tr = inode->i_private;
6353
6354 /* disable tracing ? */
6355 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6356 tracer_tracing_off(tr);
6357 /* resize the ring buffer to 0 */
6358 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6359
6360 trace_array_put(tr);
6361
6362 return 0;
6363}
6364
6365static ssize_t
6366tracing_mark_write(struct file *filp, const char __user *ubuf,
6367 size_t cnt, loff_t *fpos)
6368{
6369 struct trace_array *tr = filp->private_data;
6370 struct ring_buffer_event *event;
6371 enum event_trigger_type tt = ETT_NONE;
6372 struct ring_buffer *buffer;
6373 struct print_entry *entry;
6374 unsigned long irq_flags;
6375 ssize_t written;
6376 int size;
6377 int len;
6378
6379/* Used in tracing_mark_raw_write() as well */
6380#define FAULTED_STR "<faulted>"
6381#define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6382
6383 if (tracing_disabled)
6384 return -EINVAL;
6385
6386 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6387 return -EINVAL;
6388
6389 if (cnt > TRACE_BUF_SIZE)
6390 cnt = TRACE_BUF_SIZE;
6391
6392 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6393
6394 local_save_flags(irq_flags);
6395 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6396
6397 /* If less than "<faulted>", then make sure we can still add that */
6398 if (cnt < FAULTED_SIZE)
6399 size += FAULTED_SIZE - cnt;
6400
6401 buffer = tr->trace_buffer.buffer;
6402 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6403 irq_flags, preempt_count());
6404 if (unlikely(!event))
6405 /* Ring buffer disabled, return as if not open for write */
6406 return -EBADF;
6407
6408 entry = ring_buffer_event_data(event);
6409 entry->ip = _THIS_IP_;
6410
6411 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6412 if (len) {
6413 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6414 cnt = FAULTED_SIZE;
6415 written = -EFAULT;
6416 } else
6417 written = cnt;
6418 len = cnt;
6419
6420 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6421 /* do not add \n before testing triggers, but add \0 */
6422 entry->buf[cnt] = '\0';
6423 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6424 }
6425
6426 if (entry->buf[cnt - 1] != '\n') {
6427 entry->buf[cnt] = '\n';
6428 entry->buf[cnt + 1] = '\0';
6429 } else
6430 entry->buf[cnt] = '\0';
6431
6432 __buffer_unlock_commit(buffer, event);
6433
6434 if (tt)
6435 event_triggers_post_call(tr->trace_marker_file, tt);
6436
6437 if (written > 0)
6438 *fpos += written;
6439
6440 return written;
6441}
6442
6443/* Limit it for now to 3K (including tag) */
6444#define RAW_DATA_MAX_SIZE (1024*3)
6445
6446static ssize_t
6447tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6448 size_t cnt, loff_t *fpos)
6449{
6450 struct trace_array *tr = filp->private_data;
6451 struct ring_buffer_event *event;
6452 struct ring_buffer *buffer;
6453 struct raw_data_entry *entry;
6454 unsigned long irq_flags;
6455 ssize_t written;
6456 int size;
6457 int len;
6458
6459#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6460
6461 if (tracing_disabled)
6462 return -EINVAL;
6463
6464 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6465 return -EINVAL;
6466
6467 /* The marker must at least have a tag id */
6468 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6469 return -EINVAL;
6470
6471 if (cnt > TRACE_BUF_SIZE)
6472 cnt = TRACE_BUF_SIZE;
6473
6474 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6475
6476 local_save_flags(irq_flags);
6477 size = sizeof(*entry) + cnt;
6478 if (cnt < FAULT_SIZE_ID)
6479 size += FAULT_SIZE_ID - cnt;
6480
6481 buffer = tr->trace_buffer.buffer;
6482 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6483 irq_flags, preempt_count());
6484 if (!event)
6485 /* Ring buffer disabled, return as if not open for write */
6486 return -EBADF;
6487
6488 entry = ring_buffer_event_data(event);
6489
6490 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6491 if (len) {
6492 entry->id = -1;
6493 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6494 written = -EFAULT;
6495 } else
6496 written = cnt;
6497
6498 __buffer_unlock_commit(buffer, event);
6499
6500 if (written > 0)
6501 *fpos += written;
6502
6503 return written;
6504}
6505
6506static int tracing_clock_show(struct seq_file *m, void *v)
6507{
6508 struct trace_array *tr = m->private;
6509 int i;
6510
6511 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6512 seq_printf(m,
6513 "%s%s%s%s", i ? " " : "",
6514 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6515 i == tr->clock_id ? "]" : "");
6516 seq_putc(m, '\n');
6517
6518 return 0;
6519}
6520
6521int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6522{
6523 int i;
6524
6525 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6526 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6527 break;
6528 }
6529 if (i == ARRAY_SIZE(trace_clocks))
6530 return -EINVAL;
6531
6532 mutex_lock(&trace_types_lock);
6533
6534 tr->clock_id = i;
6535
6536 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6537
6538 /*
6539 * New clock may not be consistent with the previous clock.
6540 * Reset the buffer so that it doesn't have incomparable timestamps.
6541 */
6542 tracing_reset_online_cpus(&tr->trace_buffer);
6543
6544#ifdef CONFIG_TRACER_MAX_TRACE
6545 if (tr->max_buffer.buffer)
6546 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6547 tracing_reset_online_cpus(&tr->max_buffer);
6548#endif
6549
6550 mutex_unlock(&trace_types_lock);
6551
6552 return 0;
6553}
6554
6555static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6556 size_t cnt, loff_t *fpos)
6557{
6558 struct seq_file *m = filp->private_data;
6559 struct trace_array *tr = m->private;
6560 char buf[64];
6561 const char *clockstr;
6562 int ret;
6563
6564 if (cnt >= sizeof(buf))
6565 return -EINVAL;
6566
6567 if (copy_from_user(buf, ubuf, cnt))
6568 return -EFAULT;
6569
6570 buf[cnt] = 0;
6571
6572 clockstr = strstrip(buf);
6573
6574 ret = tracing_set_clock(tr, clockstr);
6575 if (ret)
6576 return ret;
6577
6578 *fpos += cnt;
6579
6580 return cnt;
6581}
6582
6583static int tracing_clock_open(struct inode *inode, struct file *file)
6584{
6585 struct trace_array *tr = inode->i_private;
6586 int ret;
6587
6588 ret = tracing_check_open_get_tr(tr);
6589 if (ret)
6590 return ret;
6591
6592 ret = single_open(file, tracing_clock_show, inode->i_private);
6593 if (ret < 0)
6594 trace_array_put(tr);
6595
6596 return ret;
6597}
6598
6599static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6600{
6601 struct trace_array *tr = m->private;
6602
6603 mutex_lock(&trace_types_lock);
6604
6605 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6606 seq_puts(m, "delta [absolute]\n");
6607 else
6608 seq_puts(m, "[delta] absolute\n");
6609
6610 mutex_unlock(&trace_types_lock);
6611
6612 return 0;
6613}
6614
6615static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6616{
6617 struct trace_array *tr = inode->i_private;
6618 int ret;
6619
6620 ret = tracing_check_open_get_tr(tr);
6621 if (ret)
6622 return ret;
6623
6624 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6625 if (ret < 0)
6626 trace_array_put(tr);
6627
6628 return ret;
6629}
6630
6631int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6632{
6633 int ret = 0;
6634
6635 mutex_lock(&trace_types_lock);
6636
6637 if (abs && tr->time_stamp_abs_ref++)
6638 goto out;
6639
6640 if (!abs) {
6641 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6642 ret = -EINVAL;
6643 goto out;
6644 }
6645
6646 if (--tr->time_stamp_abs_ref)
6647 goto out;
6648 }
6649
6650 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6651
6652#ifdef CONFIG_TRACER_MAX_TRACE
6653 if (tr->max_buffer.buffer)
6654 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6655#endif
6656 out:
6657 mutex_unlock(&trace_types_lock);
6658
6659 return ret;
6660}
6661
6662struct ftrace_buffer_info {
6663 struct trace_iterator iter;
6664 void *spare;
6665 unsigned int spare_cpu;
6666 unsigned int read;
6667};
6668
6669#ifdef CONFIG_TRACER_SNAPSHOT
6670static int tracing_snapshot_open(struct inode *inode, struct file *file)
6671{
6672 struct trace_array *tr = inode->i_private;
6673 struct trace_iterator *iter;
6674 struct seq_file *m;
6675 int ret;
6676
6677 ret = tracing_check_open_get_tr(tr);
6678 if (ret)
6679 return ret;
6680
6681 if (file->f_mode & FMODE_READ) {
6682 iter = __tracing_open(inode, file, true);
6683 if (IS_ERR(iter))
6684 ret = PTR_ERR(iter);
6685 } else {
6686 /* Writes still need the seq_file to hold the private data */
6687 ret = -ENOMEM;
6688 m = kzalloc(sizeof(*m), GFP_KERNEL);
6689 if (!m)
6690 goto out;
6691 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6692 if (!iter) {
6693 kfree(m);
6694 goto out;
6695 }
6696 ret = 0;
6697
6698 iter->tr = tr;
6699 iter->trace_buffer = &tr->max_buffer;
6700 iter->cpu_file = tracing_get_cpu(inode);
6701 m->private = iter;
6702 file->private_data = m;
6703 }
6704out:
6705 if (ret < 0)
6706 trace_array_put(tr);
6707
6708 return ret;
6709}
6710
6711static ssize_t
6712tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6713 loff_t *ppos)
6714{
6715 struct seq_file *m = filp->private_data;
6716 struct trace_iterator *iter = m->private;
6717 struct trace_array *tr = iter->tr;
6718 unsigned long val;
6719 int ret;
6720
6721 ret = tracing_update_buffers();
6722 if (ret < 0)
6723 return ret;
6724
6725 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6726 if (ret)
6727 return ret;
6728
6729 mutex_lock(&trace_types_lock);
6730
6731 if (tr->current_trace->use_max_tr) {
6732 ret = -EBUSY;
6733 goto out;
6734 }
6735
6736 arch_spin_lock(&tr->max_lock);
6737 if (tr->cond_snapshot)
6738 ret = -EBUSY;
6739 arch_spin_unlock(&tr->max_lock);
6740 if (ret)
6741 goto out;
6742
6743 switch (val) {
6744 case 0:
6745 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6746 ret = -EINVAL;
6747 break;
6748 }
6749 if (tr->allocated_snapshot)
6750 free_snapshot(tr);
6751 break;
6752 case 1:
6753/* Only allow per-cpu swap if the ring buffer supports it */
6754#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6755 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6756 ret = -EINVAL;
6757 break;
6758 }
6759#endif
6760 if (tr->allocated_snapshot)
6761 ret = resize_buffer_duplicate_size(&tr->max_buffer,
6762 &tr->trace_buffer, iter->cpu_file);
6763 else
6764 ret = tracing_alloc_snapshot_instance(tr);
6765 if (ret < 0)
6766 break;
6767 local_irq_disable();
6768 /* Now, we're going to swap */
6769 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6770 update_max_tr(tr, current, smp_processor_id(), NULL);
6771 else
6772 update_max_tr_single(tr, current, iter->cpu_file);
6773 local_irq_enable();
6774 break;
6775 default:
6776 if (tr->allocated_snapshot) {
6777 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6778 tracing_reset_online_cpus(&tr->max_buffer);
6779 else
6780 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
6781 }
6782 break;
6783 }
6784
6785 if (ret >= 0) {
6786 *ppos += cnt;
6787 ret = cnt;
6788 }
6789out:
6790 mutex_unlock(&trace_types_lock);
6791 return ret;
6792}
6793
6794static int tracing_snapshot_release(struct inode *inode, struct file *file)
6795{
6796 struct seq_file *m = file->private_data;
6797 int ret;
6798
6799 ret = tracing_release(inode, file);
6800
6801 if (file->f_mode & FMODE_READ)
6802 return ret;
6803
6804 /* If write only, the seq_file is just a stub */
6805 if (m)
6806 kfree(m->private);
6807 kfree(m);
6808
6809 return 0;
6810}
6811
6812static int tracing_buffers_open(struct inode *inode, struct file *filp);
6813static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6814 size_t count, loff_t *ppos);
6815static int tracing_buffers_release(struct inode *inode, struct file *file);
6816static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6817 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6818
6819static int snapshot_raw_open(struct inode *inode, struct file *filp)
6820{
6821 struct ftrace_buffer_info *info;
6822 int ret;
6823
6824 /* The following checks for tracefs lockdown */
6825 ret = tracing_buffers_open(inode, filp);
6826 if (ret < 0)
6827 return ret;
6828
6829 info = filp->private_data;
6830
6831 if (info->iter.trace->use_max_tr) {
6832 tracing_buffers_release(inode, filp);
6833 return -EBUSY;
6834 }
6835
6836 info->iter.snapshot = true;
6837 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6838
6839 return ret;
6840}
6841
6842#endif /* CONFIG_TRACER_SNAPSHOT */
6843
6844
6845static const struct file_operations tracing_thresh_fops = {
6846 .open = tracing_open_generic,
6847 .read = tracing_thresh_read,
6848 .write = tracing_thresh_write,
6849 .llseek = generic_file_llseek,
6850};
6851
6852#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6853static const struct file_operations tracing_max_lat_fops = {
6854 .open = tracing_open_generic,
6855 .read = tracing_max_lat_read,
6856 .write = tracing_max_lat_write,
6857 .llseek = generic_file_llseek,
6858};
6859#endif
6860
6861static const struct file_operations set_tracer_fops = {
6862 .open = tracing_open_generic,
6863 .read = tracing_set_trace_read,
6864 .write = tracing_set_trace_write,
6865 .llseek = generic_file_llseek,
6866};
6867
6868static const struct file_operations tracing_pipe_fops = {
6869 .open = tracing_open_pipe,
6870 .poll = tracing_poll_pipe,
6871 .read = tracing_read_pipe,
6872 .splice_read = tracing_splice_read_pipe,
6873 .release = tracing_release_pipe,
6874 .llseek = no_llseek,
6875};
6876
6877static const struct file_operations tracing_entries_fops = {
6878 .open = tracing_open_generic_tr,
6879 .read = tracing_entries_read,
6880 .write = tracing_entries_write,
6881 .llseek = generic_file_llseek,
6882 .release = tracing_release_generic_tr,
6883};
6884
6885static const struct file_operations tracing_total_entries_fops = {
6886 .open = tracing_open_generic_tr,
6887 .read = tracing_total_entries_read,
6888 .llseek = generic_file_llseek,
6889 .release = tracing_release_generic_tr,
6890};
6891
6892static const struct file_operations tracing_free_buffer_fops = {
6893 .open = tracing_open_generic_tr,
6894 .write = tracing_free_buffer_write,
6895 .release = tracing_free_buffer_release,
6896};
6897
6898static const struct file_operations tracing_mark_fops = {
6899 .open = tracing_open_generic_tr,
6900 .write = tracing_mark_write,
6901 .llseek = generic_file_llseek,
6902 .release = tracing_release_generic_tr,
6903};
6904
6905static const struct file_operations tracing_mark_raw_fops = {
6906 .open = tracing_open_generic_tr,
6907 .write = tracing_mark_raw_write,
6908 .llseek = generic_file_llseek,
6909 .release = tracing_release_generic_tr,
6910};
6911
6912static const struct file_operations trace_clock_fops = {
6913 .open = tracing_clock_open,
6914 .read = seq_read,
6915 .llseek = seq_lseek,
6916 .release = tracing_single_release_tr,
6917 .write = tracing_clock_write,
6918};
6919
6920static const struct file_operations trace_time_stamp_mode_fops = {
6921 .open = tracing_time_stamp_mode_open,
6922 .read = seq_read,
6923 .llseek = seq_lseek,
6924 .release = tracing_single_release_tr,
6925};
6926
6927#ifdef CONFIG_TRACER_SNAPSHOT
6928static const struct file_operations snapshot_fops = {
6929 .open = tracing_snapshot_open,
6930 .read = seq_read,
6931 .write = tracing_snapshot_write,
6932 .llseek = tracing_lseek,
6933 .release = tracing_snapshot_release,
6934};
6935
6936static const struct file_operations snapshot_raw_fops = {
6937 .open = snapshot_raw_open,
6938 .read = tracing_buffers_read,
6939 .release = tracing_buffers_release,
6940 .splice_read = tracing_buffers_splice_read,
6941 .llseek = no_llseek,
6942};
6943
6944#endif /* CONFIG_TRACER_SNAPSHOT */
6945
6946#define TRACING_LOG_ERRS_MAX 8
6947#define TRACING_LOG_LOC_MAX 128
6948
6949#define CMD_PREFIX " Command: "
6950
6951struct err_info {
6952 const char **errs; /* ptr to loc-specific array of err strings */
6953 u8 type; /* index into errs -> specific err string */
6954 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
6955 u64 ts;
6956};
6957
6958struct tracing_log_err {
6959 struct list_head list;
6960 struct err_info info;
6961 char loc[TRACING_LOG_LOC_MAX]; /* err location */
6962 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
6963};
6964
6965static DEFINE_MUTEX(tracing_err_log_lock);
6966
6967static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
6968{
6969 struct tracing_log_err *err;
6970
6971 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
6972 err = kzalloc(sizeof(*err), GFP_KERNEL);
6973 if (!err)
6974 err = ERR_PTR(-ENOMEM);
6975 tr->n_err_log_entries++;
6976
6977 return err;
6978 }
6979
6980 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
6981 list_del(&err->list);
6982
6983 return err;
6984}
6985
6986/**
6987 * err_pos - find the position of a string within a command for error careting
6988 * @cmd: The tracing command that caused the error
6989 * @str: The string to position the caret at within @cmd
6990 *
6991 * Finds the position of the first occurence of @str within @cmd. The
6992 * return value can be passed to tracing_log_err() for caret placement
6993 * within @cmd.
6994 *
6995 * Returns the index within @cmd of the first occurence of @str or 0
6996 * if @str was not found.
6997 */
6998unsigned int err_pos(char *cmd, const char *str)
6999{
7000 char *found;
7001
7002 if (WARN_ON(!strlen(cmd)))
7003 return 0;
7004
7005 found = strstr(cmd, str);
7006 if (found)
7007 return found - cmd;
7008
7009 return 0;
7010}
7011
7012/**
7013 * tracing_log_err - write an error to the tracing error log
7014 * @tr: The associated trace array for the error (NULL for top level array)
7015 * @loc: A string describing where the error occurred
7016 * @cmd: The tracing command that caused the error
7017 * @errs: The array of loc-specific static error strings
7018 * @type: The index into errs[], which produces the specific static err string
7019 * @pos: The position the caret should be placed in the cmd
7020 *
7021 * Writes an error into tracing/error_log of the form:
7022 *
7023 * <loc>: error: <text>
7024 * Command: <cmd>
7025 * ^
7026 *
7027 * tracing/error_log is a small log file containing the last
7028 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7029 * unless there has been a tracing error, and the error log can be
7030 * cleared and have its memory freed by writing the empty string in
7031 * truncation mode to it i.e. echo > tracing/error_log.
7032 *
7033 * NOTE: the @errs array along with the @type param are used to
7034 * produce a static error string - this string is not copied and saved
7035 * when the error is logged - only a pointer to it is saved. See
7036 * existing callers for examples of how static strings are typically
7037 * defined for use with tracing_log_err().
7038 */
7039void tracing_log_err(struct trace_array *tr,
7040 const char *loc, const char *cmd,
7041 const char **errs, u8 type, u8 pos)
7042{
7043 struct tracing_log_err *err;
7044
7045 if (!tr)
7046 tr = &global_trace;
7047
7048 mutex_lock(&tracing_err_log_lock);
7049 err = get_tracing_log_err(tr);
7050 if (PTR_ERR(err) == -ENOMEM) {
7051 mutex_unlock(&tracing_err_log_lock);
7052 return;
7053 }
7054
7055 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7056 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7057
7058 err->info.errs = errs;
7059 err->info.type = type;
7060 err->info.pos = pos;
7061 err->info.ts = local_clock();
7062
7063 list_add_tail(&err->list, &tr->err_log);
7064 mutex_unlock(&tracing_err_log_lock);
7065}
7066
7067static void clear_tracing_err_log(struct trace_array *tr)
7068{
7069 struct tracing_log_err *err, *next;
7070
7071 mutex_lock(&tracing_err_log_lock);
7072 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7073 list_del(&err->list);
7074 kfree(err);
7075 }
7076
7077 tr->n_err_log_entries = 0;
7078 mutex_unlock(&tracing_err_log_lock);
7079}
7080
7081static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7082{
7083 struct trace_array *tr = m->private;
7084
7085 mutex_lock(&tracing_err_log_lock);
7086
7087 return seq_list_start(&tr->err_log, *pos);
7088}
7089
7090static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7091{
7092 struct trace_array *tr = m->private;
7093
7094 return seq_list_next(v, &tr->err_log, pos);
7095}
7096
7097static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7098{
7099 mutex_unlock(&tracing_err_log_lock);
7100}
7101
7102static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7103{
7104 u8 i;
7105
7106 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7107 seq_putc(m, ' ');
7108 for (i = 0; i < pos; i++)
7109 seq_putc(m, ' ');
7110 seq_puts(m, "^\n");
7111}
7112
7113static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7114{
7115 struct tracing_log_err *err = v;
7116
7117 if (err) {
7118 const char *err_text = err->info.errs[err->info.type];
7119 u64 sec = err->info.ts;
7120 u32 nsec;
7121
7122 nsec = do_div(sec, NSEC_PER_SEC);
7123 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7124 err->loc, err_text);
7125 seq_printf(m, "%s", err->cmd);
7126 tracing_err_log_show_pos(m, err->info.pos);
7127 }
7128
7129 return 0;
7130}
7131
7132static const struct seq_operations tracing_err_log_seq_ops = {
7133 .start = tracing_err_log_seq_start,
7134 .next = tracing_err_log_seq_next,
7135 .stop = tracing_err_log_seq_stop,
7136 .show = tracing_err_log_seq_show
7137};
7138
7139static int tracing_err_log_open(struct inode *inode, struct file *file)
7140{
7141 struct trace_array *tr = inode->i_private;
7142 int ret = 0;
7143
7144 ret = tracing_check_open_get_tr(tr);
7145 if (ret)
7146 return ret;
7147
7148 /* If this file was opened for write, then erase contents */
7149 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7150 clear_tracing_err_log(tr);
7151
7152 if (file->f_mode & FMODE_READ) {
7153 ret = seq_open(file, &tracing_err_log_seq_ops);
7154 if (!ret) {
7155 struct seq_file *m = file->private_data;
7156 m->private = tr;
7157 } else {
7158 trace_array_put(tr);
7159 }
7160 }
7161 return ret;
7162}
7163
7164static ssize_t tracing_err_log_write(struct file *file,
7165 const char __user *buffer,
7166 size_t count, loff_t *ppos)
7167{
7168 return count;
7169}
7170
7171static int tracing_err_log_release(struct inode *inode, struct file *file)
7172{
7173 struct trace_array *tr = inode->i_private;
7174
7175 trace_array_put(tr);
7176
7177 if (file->f_mode & FMODE_READ)
7178 seq_release(inode, file);
7179
7180 return 0;
7181}
7182
7183static const struct file_operations tracing_err_log_fops = {
7184 .open = tracing_err_log_open,
7185 .write = tracing_err_log_write,
7186 .read = seq_read,
7187 .llseek = seq_lseek,
7188 .release = tracing_err_log_release,
7189};
7190
7191static int tracing_buffers_open(struct inode *inode, struct file *filp)
7192{
7193 struct trace_array *tr = inode->i_private;
7194 struct ftrace_buffer_info *info;
7195 int ret;
7196
7197 ret = tracing_check_open_get_tr(tr);
7198 if (ret)
7199 return ret;
7200
7201 info = kzalloc(sizeof(*info), GFP_KERNEL);
7202 if (!info) {
7203 trace_array_put(tr);
7204 return -ENOMEM;
7205 }
7206
7207 mutex_lock(&trace_types_lock);
7208
7209 info->iter.tr = tr;
7210 info->iter.cpu_file = tracing_get_cpu(inode);
7211 info->iter.trace = tr->current_trace;
7212 info->iter.trace_buffer = &tr->trace_buffer;
7213 info->spare = NULL;
7214 /* Force reading ring buffer for first read */
7215 info->read = (unsigned int)-1;
7216
7217 filp->private_data = info;
7218
7219 tr->current_trace->ref++;
7220
7221 mutex_unlock(&trace_types_lock);
7222
7223 ret = nonseekable_open(inode, filp);
7224 if (ret < 0)
7225 trace_array_put(tr);
7226
7227 return ret;
7228}
7229
7230static __poll_t
7231tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7232{
7233 struct ftrace_buffer_info *info = filp->private_data;
7234 struct trace_iterator *iter = &info->iter;
7235
7236 return trace_poll(iter, filp, poll_table);
7237}
7238
7239static ssize_t
7240tracing_buffers_read(struct file *filp, char __user *ubuf,
7241 size_t count, loff_t *ppos)
7242{
7243 struct ftrace_buffer_info *info = filp->private_data;
7244 struct trace_iterator *iter = &info->iter;
7245 ssize_t ret = 0;
7246 ssize_t size;
7247
7248 if (!count)
7249 return 0;
7250
7251#ifdef CONFIG_TRACER_MAX_TRACE
7252 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7253 return -EBUSY;
7254#endif
7255
7256 if (!info->spare) {
7257 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
7258 iter->cpu_file);
7259 if (IS_ERR(info->spare)) {
7260 ret = PTR_ERR(info->spare);
7261 info->spare = NULL;
7262 } else {
7263 info->spare_cpu = iter->cpu_file;
7264 }
7265 }
7266 if (!info->spare)
7267 return ret;
7268
7269 /* Do we have previous read data to read? */
7270 if (info->read < PAGE_SIZE)
7271 goto read;
7272
7273 again:
7274 trace_access_lock(iter->cpu_file);
7275 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
7276 &info->spare,
7277 count,
7278 iter->cpu_file, 0);
7279 trace_access_unlock(iter->cpu_file);
7280
7281 if (ret < 0) {
7282 if (trace_empty(iter)) {
7283 if ((filp->f_flags & O_NONBLOCK))
7284 return -EAGAIN;
7285
7286 ret = wait_on_pipe(iter, 0);
7287 if (ret)
7288 return ret;
7289
7290 goto again;
7291 }
7292 return 0;
7293 }
7294
7295 info->read = 0;
7296 read:
7297 size = PAGE_SIZE - info->read;
7298 if (size > count)
7299 size = count;
7300
7301 ret = copy_to_user(ubuf, info->spare + info->read, size);
7302 if (ret == size)
7303 return -EFAULT;
7304
7305 size -= ret;
7306
7307 *ppos += size;
7308 info->read += size;
7309
7310 return size;
7311}
7312
7313static int tracing_buffers_release(struct inode *inode, struct file *file)
7314{
7315 struct ftrace_buffer_info *info = file->private_data;
7316 struct trace_iterator *iter = &info->iter;
7317
7318 mutex_lock(&trace_types_lock);
7319
7320 iter->tr->current_trace->ref--;
7321
7322 __trace_array_put(iter->tr);
7323
7324 if (info->spare)
7325 ring_buffer_free_read_page(iter->trace_buffer->buffer,
7326 info->spare_cpu, info->spare);
7327 kfree(info);
7328
7329 mutex_unlock(&trace_types_lock);
7330
7331 return 0;
7332}
7333
7334struct buffer_ref {
7335 struct ring_buffer *buffer;
7336 void *page;
7337 int cpu;
7338 refcount_t refcount;
7339};
7340
7341static void buffer_ref_release(struct buffer_ref *ref)
7342{
7343 if (!refcount_dec_and_test(&ref->refcount))
7344 return;
7345 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7346 kfree(ref);
7347}
7348
7349static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7350 struct pipe_buffer *buf)
7351{
7352 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7353
7354 buffer_ref_release(ref);
7355 buf->private = 0;
7356}
7357
7358static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7359 struct pipe_buffer *buf)
7360{
7361 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7362
7363 if (refcount_read(&ref->refcount) > INT_MAX/2)
7364 return false;
7365
7366 refcount_inc(&ref->refcount);
7367 return true;
7368}
7369
7370/* Pipe buffer operations for a buffer. */
7371static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7372 .confirm = generic_pipe_buf_confirm,
7373 .release = buffer_pipe_buf_release,
7374 .steal = generic_pipe_buf_nosteal,
7375 .get = buffer_pipe_buf_get,
7376};
7377
7378/*
7379 * Callback from splice_to_pipe(), if we need to release some pages
7380 * at the end of the spd in case we error'ed out in filling the pipe.
7381 */
7382static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7383{
7384 struct buffer_ref *ref =
7385 (struct buffer_ref *)spd->partial[i].private;
7386
7387 buffer_ref_release(ref);
7388 spd->partial[i].private = 0;
7389}
7390
7391static ssize_t
7392tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7393 struct pipe_inode_info *pipe, size_t len,
7394 unsigned int flags)
7395{
7396 struct ftrace_buffer_info *info = file->private_data;
7397 struct trace_iterator *iter = &info->iter;
7398 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7399 struct page *pages_def[PIPE_DEF_BUFFERS];
7400 struct splice_pipe_desc spd = {
7401 .pages = pages_def,
7402 .partial = partial_def,
7403 .nr_pages_max = PIPE_DEF_BUFFERS,
7404 .ops = &buffer_pipe_buf_ops,
7405 .spd_release = buffer_spd_release,
7406 };
7407 struct buffer_ref *ref;
7408 int entries, i;
7409 ssize_t ret = 0;
7410
7411#ifdef CONFIG_TRACER_MAX_TRACE
7412 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7413 return -EBUSY;
7414#endif
7415
7416 if (*ppos & (PAGE_SIZE - 1))
7417 return -EINVAL;
7418
7419 if (len & (PAGE_SIZE - 1)) {
7420 if (len < PAGE_SIZE)
7421 return -EINVAL;
7422 len &= PAGE_MASK;
7423 }
7424
7425 if (splice_grow_spd(pipe, &spd))
7426 return -ENOMEM;
7427
7428 again:
7429 trace_access_lock(iter->cpu_file);
7430 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7431
7432 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7433 struct page *page;
7434 int r;
7435
7436 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7437 if (!ref) {
7438 ret = -ENOMEM;
7439 break;
7440 }
7441
7442 refcount_set(&ref->refcount, 1);
7443 ref->buffer = iter->trace_buffer->buffer;
7444 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7445 if (IS_ERR(ref->page)) {
7446 ret = PTR_ERR(ref->page);
7447 ref->page = NULL;
7448 kfree(ref);
7449 break;
7450 }
7451 ref->cpu = iter->cpu_file;
7452
7453 r = ring_buffer_read_page(ref->buffer, &ref->page,
7454 len, iter->cpu_file, 1);
7455 if (r < 0) {
7456 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7457 ref->page);
7458 kfree(ref);
7459 break;
7460 }
7461
7462 page = virt_to_page(ref->page);
7463
7464 spd.pages[i] = page;
7465 spd.partial[i].len = PAGE_SIZE;
7466 spd.partial[i].offset = 0;
7467 spd.partial[i].private = (unsigned long)ref;
7468 spd.nr_pages++;
7469 *ppos += PAGE_SIZE;
7470
7471 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
7472 }
7473
7474 trace_access_unlock(iter->cpu_file);
7475 spd.nr_pages = i;
7476
7477 /* did we read anything? */
7478 if (!spd.nr_pages) {
7479 if (ret)
7480 goto out;
7481
7482 ret = -EAGAIN;
7483 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7484 goto out;
7485
7486 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7487 if (ret)
7488 goto out;
7489
7490 goto again;
7491 }
7492
7493 ret = splice_to_pipe(pipe, &spd);
7494out:
7495 splice_shrink_spd(&spd);
7496
7497 return ret;
7498}
7499
7500static const struct file_operations tracing_buffers_fops = {
7501 .open = tracing_buffers_open,
7502 .read = tracing_buffers_read,
7503 .poll = tracing_buffers_poll,
7504 .release = tracing_buffers_release,
7505 .splice_read = tracing_buffers_splice_read,
7506 .llseek = no_llseek,
7507};
7508
7509static ssize_t
7510tracing_stats_read(struct file *filp, char __user *ubuf,
7511 size_t count, loff_t *ppos)
7512{
7513 struct inode *inode = file_inode(filp);
7514 struct trace_array *tr = inode->i_private;
7515 struct trace_buffer *trace_buf = &tr->trace_buffer;
7516 int cpu = tracing_get_cpu(inode);
7517 struct trace_seq *s;
7518 unsigned long cnt;
7519 unsigned long long t;
7520 unsigned long usec_rem;
7521
7522 s = kmalloc(sizeof(*s), GFP_KERNEL);
7523 if (!s)
7524 return -ENOMEM;
7525
7526 trace_seq_init(s);
7527
7528 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7529 trace_seq_printf(s, "entries: %ld\n", cnt);
7530
7531 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7532 trace_seq_printf(s, "overrun: %ld\n", cnt);
7533
7534 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7535 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7536
7537 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7538 trace_seq_printf(s, "bytes: %ld\n", cnt);
7539
7540 if (trace_clocks[tr->clock_id].in_ns) {
7541 /* local or global for trace_clock */
7542 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7543 usec_rem = do_div(t, USEC_PER_SEC);
7544 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7545 t, usec_rem);
7546
7547 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7548 usec_rem = do_div(t, USEC_PER_SEC);
7549 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7550 } else {
7551 /* counter or tsc mode for trace_clock */
7552 trace_seq_printf(s, "oldest event ts: %llu\n",
7553 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7554
7555 trace_seq_printf(s, "now ts: %llu\n",
7556 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7557 }
7558
7559 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7560 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7561
7562 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7563 trace_seq_printf(s, "read events: %ld\n", cnt);
7564
7565 count = simple_read_from_buffer(ubuf, count, ppos,
7566 s->buffer, trace_seq_used(s));
7567
7568 kfree(s);
7569
7570 return count;
7571}
7572
7573static const struct file_operations tracing_stats_fops = {
7574 .open = tracing_open_generic_tr,
7575 .read = tracing_stats_read,
7576 .llseek = generic_file_llseek,
7577 .release = tracing_release_generic_tr,
7578};
7579
7580#ifdef CONFIG_DYNAMIC_FTRACE
7581
7582static ssize_t
7583tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7584 size_t cnt, loff_t *ppos)
7585{
7586 unsigned long *p = filp->private_data;
7587 char buf[64]; /* Not too big for a shallow stack */
7588 int r;
7589
7590 r = scnprintf(buf, 63, "%ld", *p);
7591 buf[r++] = '\n';
7592
7593 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7594}
7595
7596static const struct file_operations tracing_dyn_info_fops = {
7597 .open = tracing_open_generic,
7598 .read = tracing_read_dyn_info,
7599 .llseek = generic_file_llseek,
7600};
7601#endif /* CONFIG_DYNAMIC_FTRACE */
7602
7603#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7604static void
7605ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7606 struct trace_array *tr, struct ftrace_probe_ops *ops,
7607 void *data)
7608{
7609 tracing_snapshot_instance(tr);
7610}
7611
7612static void
7613ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7614 struct trace_array *tr, struct ftrace_probe_ops *ops,
7615 void *data)
7616{
7617 struct ftrace_func_mapper *mapper = data;
7618 long *count = NULL;
7619
7620 if (mapper)
7621 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7622
7623 if (count) {
7624
7625 if (*count <= 0)
7626 return;
7627
7628 (*count)--;
7629 }
7630
7631 tracing_snapshot_instance(tr);
7632}
7633
7634static int
7635ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7636 struct ftrace_probe_ops *ops, void *data)
7637{
7638 struct ftrace_func_mapper *mapper = data;
7639 long *count = NULL;
7640
7641 seq_printf(m, "%ps:", (void *)ip);
7642
7643 seq_puts(m, "snapshot");
7644
7645 if (mapper)
7646 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7647
7648 if (count)
7649 seq_printf(m, ":count=%ld\n", *count);
7650 else
7651 seq_puts(m, ":unlimited\n");
7652
7653 return 0;
7654}
7655
7656static int
7657ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7658 unsigned long ip, void *init_data, void **data)
7659{
7660 struct ftrace_func_mapper *mapper = *data;
7661
7662 if (!mapper) {
7663 mapper = allocate_ftrace_func_mapper();
7664 if (!mapper)
7665 return -ENOMEM;
7666 *data = mapper;
7667 }
7668
7669 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7670}
7671
7672static void
7673ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7674 unsigned long ip, void *data)
7675{
7676 struct ftrace_func_mapper *mapper = data;
7677
7678 if (!ip) {
7679 if (!mapper)
7680 return;
7681 free_ftrace_func_mapper(mapper, NULL);
7682 return;
7683 }
7684
7685 ftrace_func_mapper_remove_ip(mapper, ip);
7686}
7687
7688static struct ftrace_probe_ops snapshot_probe_ops = {
7689 .func = ftrace_snapshot,
7690 .print = ftrace_snapshot_print,
7691};
7692
7693static struct ftrace_probe_ops snapshot_count_probe_ops = {
7694 .func = ftrace_count_snapshot,
7695 .print = ftrace_snapshot_print,
7696 .init = ftrace_snapshot_init,
7697 .free = ftrace_snapshot_free,
7698};
7699
7700static int
7701ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7702 char *glob, char *cmd, char *param, int enable)
7703{
7704 struct ftrace_probe_ops *ops;
7705 void *count = (void *)-1;
7706 char *number;
7707 int ret;
7708
7709 if (!tr)
7710 return -ENODEV;
7711
7712 /* hash funcs only work with set_ftrace_filter */
7713 if (!enable)
7714 return -EINVAL;
7715
7716 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7717
7718 if (glob[0] == '!')
7719 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7720
7721 if (!param)
7722 goto out_reg;
7723
7724 number = strsep(¶m, ":");
7725
7726 if (!strlen(number))
7727 goto out_reg;
7728
7729 /*
7730 * We use the callback data field (which is a pointer)
7731 * as our counter.
7732 */
7733 ret = kstrtoul(number, 0, (unsigned long *)&count);
7734 if (ret)
7735 return ret;
7736
7737 out_reg:
7738 ret = tracing_alloc_snapshot_instance(tr);
7739 if (ret < 0)
7740 goto out;
7741
7742 ret = register_ftrace_function_probe(glob, tr, ops, count);
7743
7744 out:
7745 return ret < 0 ? ret : 0;
7746}
7747
7748static struct ftrace_func_command ftrace_snapshot_cmd = {
7749 .name = "snapshot",
7750 .func = ftrace_trace_snapshot_callback,
7751};
7752
7753static __init int register_snapshot_cmd(void)
7754{
7755 return register_ftrace_command(&ftrace_snapshot_cmd);
7756}
7757#else
7758static inline __init int register_snapshot_cmd(void) { return 0; }
7759#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7760
7761static struct dentry *tracing_get_dentry(struct trace_array *tr)
7762{
7763 if (WARN_ON(!tr->dir))
7764 return ERR_PTR(-ENODEV);
7765
7766 /* Top directory uses NULL as the parent */
7767 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7768 return NULL;
7769
7770 /* All sub buffers have a descriptor */
7771 return tr->dir;
7772}
7773
7774static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7775{
7776 struct dentry *d_tracer;
7777
7778 if (tr->percpu_dir)
7779 return tr->percpu_dir;
7780
7781 d_tracer = tracing_get_dentry(tr);
7782 if (IS_ERR(d_tracer))
7783 return NULL;
7784
7785 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7786
7787 WARN_ONCE(!tr->percpu_dir,
7788 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7789
7790 return tr->percpu_dir;
7791}
7792
7793static struct dentry *
7794trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7795 void *data, long cpu, const struct file_operations *fops)
7796{
7797 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7798
7799 if (ret) /* See tracing_get_cpu() */
7800 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7801 return ret;
7802}
7803
7804static void
7805tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7806{
7807 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7808 struct dentry *d_cpu;
7809 char cpu_dir[30]; /* 30 characters should be more than enough */
7810
7811 if (!d_percpu)
7812 return;
7813
7814 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7815 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7816 if (!d_cpu) {
7817 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7818 return;
7819 }
7820
7821 /* per cpu trace_pipe */
7822 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7823 tr, cpu, &tracing_pipe_fops);
7824
7825 /* per cpu trace */
7826 trace_create_cpu_file("trace", 0644, d_cpu,
7827 tr, cpu, &tracing_fops);
7828
7829 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7830 tr, cpu, &tracing_buffers_fops);
7831
7832 trace_create_cpu_file("stats", 0444, d_cpu,
7833 tr, cpu, &tracing_stats_fops);
7834
7835 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7836 tr, cpu, &tracing_entries_fops);
7837
7838#ifdef CONFIG_TRACER_SNAPSHOT
7839 trace_create_cpu_file("snapshot", 0644, d_cpu,
7840 tr, cpu, &snapshot_fops);
7841
7842 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7843 tr, cpu, &snapshot_raw_fops);
7844#endif
7845}
7846
7847#ifdef CONFIG_FTRACE_SELFTEST
7848/* Let selftest have access to static functions in this file */
7849#include "trace_selftest.c"
7850#endif
7851
7852static ssize_t
7853trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7854 loff_t *ppos)
7855{
7856 struct trace_option_dentry *topt = filp->private_data;
7857 char *buf;
7858
7859 if (topt->flags->val & topt->opt->bit)
7860 buf = "1\n";
7861 else
7862 buf = "0\n";
7863
7864 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7865}
7866
7867static ssize_t
7868trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7869 loff_t *ppos)
7870{
7871 struct trace_option_dentry *topt = filp->private_data;
7872 unsigned long val;
7873 int ret;
7874
7875 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7876 if (ret)
7877 return ret;
7878
7879 if (val != 0 && val != 1)
7880 return -EINVAL;
7881
7882 if (!!(topt->flags->val & topt->opt->bit) != val) {
7883 mutex_lock(&trace_types_lock);
7884 ret = __set_tracer_option(topt->tr, topt->flags,
7885 topt->opt, !val);
7886 mutex_unlock(&trace_types_lock);
7887 if (ret)
7888 return ret;
7889 }
7890
7891 *ppos += cnt;
7892
7893 return cnt;
7894}
7895
7896
7897static const struct file_operations trace_options_fops = {
7898 .open = tracing_open_generic,
7899 .read = trace_options_read,
7900 .write = trace_options_write,
7901 .llseek = generic_file_llseek,
7902};
7903
7904/*
7905 * In order to pass in both the trace_array descriptor as well as the index
7906 * to the flag that the trace option file represents, the trace_array
7907 * has a character array of trace_flags_index[], which holds the index
7908 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7909 * The address of this character array is passed to the flag option file
7910 * read/write callbacks.
7911 *
7912 * In order to extract both the index and the trace_array descriptor,
7913 * get_tr_index() uses the following algorithm.
7914 *
7915 * idx = *ptr;
7916 *
7917 * As the pointer itself contains the address of the index (remember
7918 * index[1] == 1).
7919 *
7920 * Then to get the trace_array descriptor, by subtracting that index
7921 * from the ptr, we get to the start of the index itself.
7922 *
7923 * ptr - idx == &index[0]
7924 *
7925 * Then a simple container_of() from that pointer gets us to the
7926 * trace_array descriptor.
7927 */
7928static void get_tr_index(void *data, struct trace_array **ptr,
7929 unsigned int *pindex)
7930{
7931 *pindex = *(unsigned char *)data;
7932
7933 *ptr = container_of(data - *pindex, struct trace_array,
7934 trace_flags_index);
7935}
7936
7937static ssize_t
7938trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7939 loff_t *ppos)
7940{
7941 void *tr_index = filp->private_data;
7942 struct trace_array *tr;
7943 unsigned int index;
7944 char *buf;
7945
7946 get_tr_index(tr_index, &tr, &index);
7947
7948 if (tr->trace_flags & (1 << index))
7949 buf = "1\n";
7950 else
7951 buf = "0\n";
7952
7953 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7954}
7955
7956static ssize_t
7957trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7958 loff_t *ppos)
7959{
7960 void *tr_index = filp->private_data;
7961 struct trace_array *tr;
7962 unsigned int index;
7963 unsigned long val;
7964 int ret;
7965
7966 get_tr_index(tr_index, &tr, &index);
7967
7968 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7969 if (ret)
7970 return ret;
7971
7972 if (val != 0 && val != 1)
7973 return -EINVAL;
7974
7975 mutex_lock(&trace_types_lock);
7976 ret = set_tracer_flag(tr, 1 << index, val);
7977 mutex_unlock(&trace_types_lock);
7978
7979 if (ret < 0)
7980 return ret;
7981
7982 *ppos += cnt;
7983
7984 return cnt;
7985}
7986
7987static const struct file_operations trace_options_core_fops = {
7988 .open = tracing_open_generic,
7989 .read = trace_options_core_read,
7990 .write = trace_options_core_write,
7991 .llseek = generic_file_llseek,
7992};
7993
7994struct dentry *trace_create_file(const char *name,
7995 umode_t mode,
7996 struct dentry *parent,
7997 void *data,
7998 const struct file_operations *fops)
7999{
8000 struct dentry *ret;
8001
8002 ret = tracefs_create_file(name, mode, parent, data, fops);
8003 if (!ret)
8004 pr_warn("Could not create tracefs '%s' entry\n", name);
8005
8006 return ret;
8007}
8008
8009
8010static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8011{
8012 struct dentry *d_tracer;
8013
8014 if (tr->options)
8015 return tr->options;
8016
8017 d_tracer = tracing_get_dentry(tr);
8018 if (IS_ERR(d_tracer))
8019 return NULL;
8020
8021 tr->options = tracefs_create_dir("options", d_tracer);
8022 if (!tr->options) {
8023 pr_warn("Could not create tracefs directory 'options'\n");
8024 return NULL;
8025 }
8026
8027 return tr->options;
8028}
8029
8030static void
8031create_trace_option_file(struct trace_array *tr,
8032 struct trace_option_dentry *topt,
8033 struct tracer_flags *flags,
8034 struct tracer_opt *opt)
8035{
8036 struct dentry *t_options;
8037
8038 t_options = trace_options_init_dentry(tr);
8039 if (!t_options)
8040 return;
8041
8042 topt->flags = flags;
8043 topt->opt = opt;
8044 topt->tr = tr;
8045
8046 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8047 &trace_options_fops);
8048
8049}
8050
8051static void
8052create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8053{
8054 struct trace_option_dentry *topts;
8055 struct trace_options *tr_topts;
8056 struct tracer_flags *flags;
8057 struct tracer_opt *opts;
8058 int cnt;
8059 int i;
8060
8061 if (!tracer)
8062 return;
8063
8064 flags = tracer->flags;
8065
8066 if (!flags || !flags->opts)
8067 return;
8068
8069 /*
8070 * If this is an instance, only create flags for tracers
8071 * the instance may have.
8072 */
8073 if (!trace_ok_for_array(tracer, tr))
8074 return;
8075
8076 for (i = 0; i < tr->nr_topts; i++) {
8077 /* Make sure there's no duplicate flags. */
8078 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8079 return;
8080 }
8081
8082 opts = flags->opts;
8083
8084 for (cnt = 0; opts[cnt].name; cnt++)
8085 ;
8086
8087 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8088 if (!topts)
8089 return;
8090
8091 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8092 GFP_KERNEL);
8093 if (!tr_topts) {
8094 kfree(topts);
8095 return;
8096 }
8097
8098 tr->topts = tr_topts;
8099 tr->topts[tr->nr_topts].tracer = tracer;
8100 tr->topts[tr->nr_topts].topts = topts;
8101 tr->nr_topts++;
8102
8103 for (cnt = 0; opts[cnt].name; cnt++) {
8104 create_trace_option_file(tr, &topts[cnt], flags,
8105 &opts[cnt]);
8106 WARN_ONCE(topts[cnt].entry == NULL,
8107 "Failed to create trace option: %s",
8108 opts[cnt].name);
8109 }
8110}
8111
8112static struct dentry *
8113create_trace_option_core_file(struct trace_array *tr,
8114 const char *option, long index)
8115{
8116 struct dentry *t_options;
8117
8118 t_options = trace_options_init_dentry(tr);
8119 if (!t_options)
8120 return NULL;
8121
8122 return trace_create_file(option, 0644, t_options,
8123 (void *)&tr->trace_flags_index[index],
8124 &trace_options_core_fops);
8125}
8126
8127static void create_trace_options_dir(struct trace_array *tr)
8128{
8129 struct dentry *t_options;
8130 bool top_level = tr == &global_trace;
8131 int i;
8132
8133 t_options = trace_options_init_dentry(tr);
8134 if (!t_options)
8135 return;
8136
8137 for (i = 0; trace_options[i]; i++) {
8138 if (top_level ||
8139 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8140 create_trace_option_core_file(tr, trace_options[i], i);
8141 }
8142}
8143
8144static ssize_t
8145rb_simple_read(struct file *filp, char __user *ubuf,
8146 size_t cnt, loff_t *ppos)
8147{
8148 struct trace_array *tr = filp->private_data;
8149 char buf[64];
8150 int r;
8151
8152 r = tracer_tracing_is_on(tr);
8153 r = sprintf(buf, "%d\n", r);
8154
8155 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8156}
8157
8158static ssize_t
8159rb_simple_write(struct file *filp, const char __user *ubuf,
8160 size_t cnt, loff_t *ppos)
8161{
8162 struct trace_array *tr = filp->private_data;
8163 struct ring_buffer *buffer = tr->trace_buffer.buffer;
8164 unsigned long val;
8165 int ret;
8166
8167 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8168 if (ret)
8169 return ret;
8170
8171 if (buffer) {
8172 mutex_lock(&trace_types_lock);
8173 if (!!val == tracer_tracing_is_on(tr)) {
8174 val = 0; /* do nothing */
8175 } else if (val) {
8176 tracer_tracing_on(tr);
8177 if (tr->current_trace->start)
8178 tr->current_trace->start(tr);
8179 } else {
8180 tracer_tracing_off(tr);
8181 if (tr->current_trace->stop)
8182 tr->current_trace->stop(tr);
8183 }
8184 mutex_unlock(&trace_types_lock);
8185 }
8186
8187 (*ppos)++;
8188
8189 return cnt;
8190}
8191
8192static const struct file_operations rb_simple_fops = {
8193 .open = tracing_open_generic_tr,
8194 .read = rb_simple_read,
8195 .write = rb_simple_write,
8196 .release = tracing_release_generic_tr,
8197 .llseek = default_llseek,
8198};
8199
8200static ssize_t
8201buffer_percent_read(struct file *filp, char __user *ubuf,
8202 size_t cnt, loff_t *ppos)
8203{
8204 struct trace_array *tr = filp->private_data;
8205 char buf[64];
8206 int r;
8207
8208 r = tr->buffer_percent;
8209 r = sprintf(buf, "%d\n", r);
8210
8211 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8212}
8213
8214static ssize_t
8215buffer_percent_write(struct file *filp, const char __user *ubuf,
8216 size_t cnt, loff_t *ppos)
8217{
8218 struct trace_array *tr = filp->private_data;
8219 unsigned long val;
8220 int ret;
8221
8222 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8223 if (ret)
8224 return ret;
8225
8226 if (val > 100)
8227 return -EINVAL;
8228
8229 if (!val)
8230 val = 1;
8231
8232 tr->buffer_percent = val;
8233
8234 (*ppos)++;
8235
8236 return cnt;
8237}
8238
8239static const struct file_operations buffer_percent_fops = {
8240 .open = tracing_open_generic_tr,
8241 .read = buffer_percent_read,
8242 .write = buffer_percent_write,
8243 .release = tracing_release_generic_tr,
8244 .llseek = default_llseek,
8245};
8246
8247static struct dentry *trace_instance_dir;
8248
8249static void
8250init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8251
8252static int
8253allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
8254{
8255 enum ring_buffer_flags rb_flags;
8256
8257 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8258
8259 buf->tr = tr;
8260
8261 buf->buffer = ring_buffer_alloc(size, rb_flags);
8262 if (!buf->buffer)
8263 return -ENOMEM;
8264
8265 buf->data = alloc_percpu(struct trace_array_cpu);
8266 if (!buf->data) {
8267 ring_buffer_free(buf->buffer);
8268 buf->buffer = NULL;
8269 return -ENOMEM;
8270 }
8271
8272 /* Allocate the first page for all buffers */
8273 set_buffer_entries(&tr->trace_buffer,
8274 ring_buffer_size(tr->trace_buffer.buffer, 0));
8275
8276 return 0;
8277}
8278
8279static int allocate_trace_buffers(struct trace_array *tr, int size)
8280{
8281 int ret;
8282
8283 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
8284 if (ret)
8285 return ret;
8286
8287#ifdef CONFIG_TRACER_MAX_TRACE
8288 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8289 allocate_snapshot ? size : 1);
8290 if (WARN_ON(ret)) {
8291 ring_buffer_free(tr->trace_buffer.buffer);
8292 tr->trace_buffer.buffer = NULL;
8293 free_percpu(tr->trace_buffer.data);
8294 tr->trace_buffer.data = NULL;
8295 return -ENOMEM;
8296 }
8297 tr->allocated_snapshot = allocate_snapshot;
8298
8299 /*
8300 * Only the top level trace array gets its snapshot allocated
8301 * from the kernel command line.
8302 */
8303 allocate_snapshot = false;
8304#endif
8305 return 0;
8306}
8307
8308static void free_trace_buffer(struct trace_buffer *buf)
8309{
8310 if (buf->buffer) {
8311 ring_buffer_free(buf->buffer);
8312 buf->buffer = NULL;
8313 free_percpu(buf->data);
8314 buf->data = NULL;
8315 }
8316}
8317
8318static void free_trace_buffers(struct trace_array *tr)
8319{
8320 if (!tr)
8321 return;
8322
8323 free_trace_buffer(&tr->trace_buffer);
8324
8325#ifdef CONFIG_TRACER_MAX_TRACE
8326 free_trace_buffer(&tr->max_buffer);
8327#endif
8328}
8329
8330static void init_trace_flags_index(struct trace_array *tr)
8331{
8332 int i;
8333
8334 /* Used by the trace options files */
8335 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8336 tr->trace_flags_index[i] = i;
8337}
8338
8339static void __update_tracer_options(struct trace_array *tr)
8340{
8341 struct tracer *t;
8342
8343 for (t = trace_types; t; t = t->next)
8344 add_tracer_options(tr, t);
8345}
8346
8347static void update_tracer_options(struct trace_array *tr)
8348{
8349 mutex_lock(&trace_types_lock);
8350 __update_tracer_options(tr);
8351 mutex_unlock(&trace_types_lock);
8352}
8353
8354struct trace_array *trace_array_create(const char *name)
8355{
8356 struct trace_array *tr;
8357 int ret;
8358
8359 mutex_lock(&event_mutex);
8360 mutex_lock(&trace_types_lock);
8361
8362 ret = -EEXIST;
8363 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8364 if (tr->name && strcmp(tr->name, name) == 0)
8365 goto out_unlock;
8366 }
8367
8368 ret = -ENOMEM;
8369 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8370 if (!tr)
8371 goto out_unlock;
8372
8373 tr->name = kstrdup(name, GFP_KERNEL);
8374 if (!tr->name)
8375 goto out_free_tr;
8376
8377 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8378 goto out_free_tr;
8379
8380 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8381
8382 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8383
8384 raw_spin_lock_init(&tr->start_lock);
8385
8386 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8387
8388 tr->current_trace = &nop_trace;
8389
8390 INIT_LIST_HEAD(&tr->systems);
8391 INIT_LIST_HEAD(&tr->events);
8392 INIT_LIST_HEAD(&tr->hist_vars);
8393 INIT_LIST_HEAD(&tr->err_log);
8394
8395 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8396 goto out_free_tr;
8397
8398 tr->dir = tracefs_create_dir(name, trace_instance_dir);
8399 if (!tr->dir)
8400 goto out_free_tr;
8401
8402 ret = event_trace_add_tracer(tr->dir, tr);
8403 if (ret) {
8404 tracefs_remove_recursive(tr->dir);
8405 goto out_free_tr;
8406 }
8407
8408 ftrace_init_trace_array(tr);
8409
8410 init_tracer_tracefs(tr, tr->dir);
8411 init_trace_flags_index(tr);
8412 __update_tracer_options(tr);
8413
8414 list_add(&tr->list, &ftrace_trace_arrays);
8415
8416 mutex_unlock(&trace_types_lock);
8417 mutex_unlock(&event_mutex);
8418
8419 return tr;
8420
8421 out_free_tr:
8422 free_trace_buffers(tr);
8423 free_cpumask_var(tr->tracing_cpumask);
8424 kfree(tr->name);
8425 kfree(tr);
8426
8427 out_unlock:
8428 mutex_unlock(&trace_types_lock);
8429 mutex_unlock(&event_mutex);
8430
8431 return ERR_PTR(ret);
8432}
8433EXPORT_SYMBOL_GPL(trace_array_create);
8434
8435static int instance_mkdir(const char *name)
8436{
8437 return PTR_ERR_OR_ZERO(trace_array_create(name));
8438}
8439
8440static int __remove_instance(struct trace_array *tr)
8441{
8442 int i;
8443
8444 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
8445 return -EBUSY;
8446
8447 list_del(&tr->list);
8448
8449 /* Disable all the flags that were enabled coming in */
8450 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8451 if ((1 << i) & ZEROED_TRACE_FLAGS)
8452 set_tracer_flag(tr, 1 << i, 0);
8453 }
8454
8455 tracing_set_nop(tr);
8456 clear_ftrace_function_probes(tr);
8457 event_trace_del_tracer(tr);
8458 ftrace_clear_pids(tr);
8459 ftrace_destroy_function_files(tr);
8460 tracefs_remove_recursive(tr->dir);
8461 free_trace_buffers(tr);
8462
8463 for (i = 0; i < tr->nr_topts; i++) {
8464 kfree(tr->topts[i].topts);
8465 }
8466 kfree(tr->topts);
8467
8468 free_cpumask_var(tr->tracing_cpumask);
8469 kfree(tr->name);
8470 kfree(tr);
8471 tr = NULL;
8472
8473 return 0;
8474}
8475
8476int trace_array_destroy(struct trace_array *tr)
8477{
8478 int ret;
8479
8480 if (!tr)
8481 return -EINVAL;
8482
8483 mutex_lock(&event_mutex);
8484 mutex_lock(&trace_types_lock);
8485
8486 ret = __remove_instance(tr);
8487
8488 mutex_unlock(&trace_types_lock);
8489 mutex_unlock(&event_mutex);
8490
8491 return ret;
8492}
8493EXPORT_SYMBOL_GPL(trace_array_destroy);
8494
8495static int instance_rmdir(const char *name)
8496{
8497 struct trace_array *tr;
8498 int ret;
8499
8500 mutex_lock(&event_mutex);
8501 mutex_lock(&trace_types_lock);
8502
8503 ret = -ENODEV;
8504 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8505 if (tr->name && strcmp(tr->name, name) == 0) {
8506 ret = __remove_instance(tr);
8507 break;
8508 }
8509 }
8510
8511 mutex_unlock(&trace_types_lock);
8512 mutex_unlock(&event_mutex);
8513
8514 return ret;
8515}
8516
8517static __init void create_trace_instances(struct dentry *d_tracer)
8518{
8519 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8520 instance_mkdir,
8521 instance_rmdir);
8522 if (WARN_ON(!trace_instance_dir))
8523 return;
8524}
8525
8526static void
8527init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8528{
8529 struct trace_event_file *file;
8530 int cpu;
8531
8532 trace_create_file("available_tracers", 0444, d_tracer,
8533 tr, &show_traces_fops);
8534
8535 trace_create_file("current_tracer", 0644, d_tracer,
8536 tr, &set_tracer_fops);
8537
8538 trace_create_file("tracing_cpumask", 0644, d_tracer,
8539 tr, &tracing_cpumask_fops);
8540
8541 trace_create_file("trace_options", 0644, d_tracer,
8542 tr, &tracing_iter_fops);
8543
8544 trace_create_file("trace", 0644, d_tracer,
8545 tr, &tracing_fops);
8546
8547 trace_create_file("trace_pipe", 0444, d_tracer,
8548 tr, &tracing_pipe_fops);
8549
8550 trace_create_file("buffer_size_kb", 0644, d_tracer,
8551 tr, &tracing_entries_fops);
8552
8553 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8554 tr, &tracing_total_entries_fops);
8555
8556 trace_create_file("free_buffer", 0200, d_tracer,
8557 tr, &tracing_free_buffer_fops);
8558
8559 trace_create_file("trace_marker", 0220, d_tracer,
8560 tr, &tracing_mark_fops);
8561
8562 file = __find_event_file(tr, "ftrace", "print");
8563 if (file && file->dir)
8564 trace_create_file("trigger", 0644, file->dir, file,
8565 &event_trigger_fops);
8566 tr->trace_marker_file = file;
8567
8568 trace_create_file("trace_marker_raw", 0220, d_tracer,
8569 tr, &tracing_mark_raw_fops);
8570
8571 trace_create_file("trace_clock", 0644, d_tracer, tr,
8572 &trace_clock_fops);
8573
8574 trace_create_file("tracing_on", 0644, d_tracer,
8575 tr, &rb_simple_fops);
8576
8577 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8578 &trace_time_stamp_mode_fops);
8579
8580 tr->buffer_percent = 50;
8581
8582 trace_create_file("buffer_percent", 0444, d_tracer,
8583 tr, &buffer_percent_fops);
8584
8585 create_trace_options_dir(tr);
8586
8587#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8588 trace_create_file("tracing_max_latency", 0644, d_tracer,
8589 &tr->max_latency, &tracing_max_lat_fops);
8590#endif
8591
8592 if (ftrace_create_function_files(tr, d_tracer))
8593 WARN(1, "Could not allocate function filter files");
8594
8595#ifdef CONFIG_TRACER_SNAPSHOT
8596 trace_create_file("snapshot", 0644, d_tracer,
8597 tr, &snapshot_fops);
8598#endif
8599
8600 trace_create_file("error_log", 0644, d_tracer,
8601 tr, &tracing_err_log_fops);
8602
8603 for_each_tracing_cpu(cpu)
8604 tracing_init_tracefs_percpu(tr, cpu);
8605
8606 ftrace_init_tracefs(tr, d_tracer);
8607}
8608
8609static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8610{
8611 struct vfsmount *mnt;
8612 struct file_system_type *type;
8613
8614 /*
8615 * To maintain backward compatibility for tools that mount
8616 * debugfs to get to the tracing facility, tracefs is automatically
8617 * mounted to the debugfs/tracing directory.
8618 */
8619 type = get_fs_type("tracefs");
8620 if (!type)
8621 return NULL;
8622 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8623 put_filesystem(type);
8624 if (IS_ERR(mnt))
8625 return NULL;
8626 mntget(mnt);
8627
8628 return mnt;
8629}
8630
8631/**
8632 * tracing_init_dentry - initialize top level trace array
8633 *
8634 * This is called when creating files or directories in the tracing
8635 * directory. It is called via fs_initcall() by any of the boot up code
8636 * and expects to return the dentry of the top level tracing directory.
8637 */
8638struct dentry *tracing_init_dentry(void)
8639{
8640 struct trace_array *tr = &global_trace;
8641
8642 /* The top level trace array uses NULL as parent */
8643 if (tr->dir)
8644 return NULL;
8645
8646 if (WARN_ON(!tracefs_initialized()) ||
8647 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8648 WARN_ON(!debugfs_initialized())))
8649 return ERR_PTR(-ENODEV);
8650
8651 /*
8652 * As there may still be users that expect the tracing
8653 * files to exist in debugfs/tracing, we must automount
8654 * the tracefs file system there, so older tools still
8655 * work with the newer kerenl.
8656 */
8657 tr->dir = debugfs_create_automount("tracing", NULL,
8658 trace_automount, NULL);
8659
8660 return NULL;
8661}
8662
8663extern struct trace_eval_map *__start_ftrace_eval_maps[];
8664extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8665
8666static void __init trace_eval_init(void)
8667{
8668 int len;
8669
8670 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8671 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8672}
8673
8674#ifdef CONFIG_MODULES
8675static void trace_module_add_evals(struct module *mod)
8676{
8677 if (!mod->num_trace_evals)
8678 return;
8679
8680 /*
8681 * Modules with bad taint do not have events created, do
8682 * not bother with enums either.
8683 */
8684 if (trace_module_has_bad_taint(mod))
8685 return;
8686
8687 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8688}
8689
8690#ifdef CONFIG_TRACE_EVAL_MAP_FILE
8691static void trace_module_remove_evals(struct module *mod)
8692{
8693 union trace_eval_map_item *map;
8694 union trace_eval_map_item **last = &trace_eval_maps;
8695
8696 if (!mod->num_trace_evals)
8697 return;
8698
8699 mutex_lock(&trace_eval_mutex);
8700
8701 map = trace_eval_maps;
8702
8703 while (map) {
8704 if (map->head.mod == mod)
8705 break;
8706 map = trace_eval_jmp_to_tail(map);
8707 last = &map->tail.next;
8708 map = map->tail.next;
8709 }
8710 if (!map)
8711 goto out;
8712
8713 *last = trace_eval_jmp_to_tail(map)->tail.next;
8714 kfree(map);
8715 out:
8716 mutex_unlock(&trace_eval_mutex);
8717}
8718#else
8719static inline void trace_module_remove_evals(struct module *mod) { }
8720#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8721
8722static int trace_module_notify(struct notifier_block *self,
8723 unsigned long val, void *data)
8724{
8725 struct module *mod = data;
8726
8727 switch (val) {
8728 case MODULE_STATE_COMING:
8729 trace_module_add_evals(mod);
8730 break;
8731 case MODULE_STATE_GOING:
8732 trace_module_remove_evals(mod);
8733 break;
8734 }
8735
8736 return 0;
8737}
8738
8739static struct notifier_block trace_module_nb = {
8740 .notifier_call = trace_module_notify,
8741 .priority = 0,
8742};
8743#endif /* CONFIG_MODULES */
8744
8745static __init int tracer_init_tracefs(void)
8746{
8747 struct dentry *d_tracer;
8748
8749 trace_access_lock_init();
8750
8751 d_tracer = tracing_init_dentry();
8752 if (IS_ERR(d_tracer))
8753 return 0;
8754
8755 event_trace_init();
8756
8757 init_tracer_tracefs(&global_trace, d_tracer);
8758 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8759
8760 trace_create_file("tracing_thresh", 0644, d_tracer,
8761 &global_trace, &tracing_thresh_fops);
8762
8763 trace_create_file("README", 0444, d_tracer,
8764 NULL, &tracing_readme_fops);
8765
8766 trace_create_file("saved_cmdlines", 0444, d_tracer,
8767 NULL, &tracing_saved_cmdlines_fops);
8768
8769 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8770 NULL, &tracing_saved_cmdlines_size_fops);
8771
8772 trace_create_file("saved_tgids", 0444, d_tracer,
8773 NULL, &tracing_saved_tgids_fops);
8774
8775 trace_eval_init();
8776
8777 trace_create_eval_file(d_tracer);
8778
8779#ifdef CONFIG_MODULES
8780 register_module_notifier(&trace_module_nb);
8781#endif
8782
8783#ifdef CONFIG_DYNAMIC_FTRACE
8784 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8785 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8786#endif
8787
8788 create_trace_instances(d_tracer);
8789
8790 update_tracer_options(&global_trace);
8791
8792 return 0;
8793}
8794
8795static int trace_panic_handler(struct notifier_block *this,
8796 unsigned long event, void *unused)
8797{
8798 if (ftrace_dump_on_oops)
8799 ftrace_dump(ftrace_dump_on_oops);
8800 return NOTIFY_OK;
8801}
8802
8803static struct notifier_block trace_panic_notifier = {
8804 .notifier_call = trace_panic_handler,
8805 .next = NULL,
8806 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8807};
8808
8809static int trace_die_handler(struct notifier_block *self,
8810 unsigned long val,
8811 void *data)
8812{
8813 switch (val) {
8814 case DIE_OOPS:
8815 if (ftrace_dump_on_oops)
8816 ftrace_dump(ftrace_dump_on_oops);
8817 break;
8818 default:
8819 break;
8820 }
8821 return NOTIFY_OK;
8822}
8823
8824static struct notifier_block trace_die_notifier = {
8825 .notifier_call = trace_die_handler,
8826 .priority = 200
8827};
8828
8829/*
8830 * printk is set to max of 1024, we really don't need it that big.
8831 * Nothing should be printing 1000 characters anyway.
8832 */
8833#define TRACE_MAX_PRINT 1000
8834
8835/*
8836 * Define here KERN_TRACE so that we have one place to modify
8837 * it if we decide to change what log level the ftrace dump
8838 * should be at.
8839 */
8840#define KERN_TRACE KERN_EMERG
8841
8842void
8843trace_printk_seq(struct trace_seq *s)
8844{
8845 /* Probably should print a warning here. */
8846 if (s->seq.len >= TRACE_MAX_PRINT)
8847 s->seq.len = TRACE_MAX_PRINT;
8848
8849 /*
8850 * More paranoid code. Although the buffer size is set to
8851 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8852 * an extra layer of protection.
8853 */
8854 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8855 s->seq.len = s->seq.size - 1;
8856
8857 /* should be zero ended, but we are paranoid. */
8858 s->buffer[s->seq.len] = 0;
8859
8860 printk(KERN_TRACE "%s", s->buffer);
8861
8862 trace_seq_init(s);
8863}
8864
8865void trace_init_global_iter(struct trace_iterator *iter)
8866{
8867 iter->tr = &global_trace;
8868 iter->trace = iter->tr->current_trace;
8869 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8870 iter->trace_buffer = &global_trace.trace_buffer;
8871
8872 if (iter->trace && iter->trace->open)
8873 iter->trace->open(iter);
8874
8875 /* Annotate start of buffers if we had overruns */
8876 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8877 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8878
8879 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8880 if (trace_clocks[iter->tr->clock_id].in_ns)
8881 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8882}
8883
8884void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8885{
8886 /* use static because iter can be a bit big for the stack */
8887 static struct trace_iterator iter;
8888 static atomic_t dump_running;
8889 struct trace_array *tr = &global_trace;
8890 unsigned int old_userobj;
8891 unsigned long flags;
8892 int cnt = 0, cpu;
8893
8894 /* Only allow one dump user at a time. */
8895 if (atomic_inc_return(&dump_running) != 1) {
8896 atomic_dec(&dump_running);
8897 return;
8898 }
8899
8900 /*
8901 * Always turn off tracing when we dump.
8902 * We don't need to show trace output of what happens
8903 * between multiple crashes.
8904 *
8905 * If the user does a sysrq-z, then they can re-enable
8906 * tracing with echo 1 > tracing_on.
8907 */
8908 tracing_off();
8909
8910 local_irq_save(flags);
8911 printk_nmi_direct_enter();
8912
8913 /* Simulate the iterator */
8914 trace_init_global_iter(&iter);
8915
8916 for_each_tracing_cpu(cpu) {
8917 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8918 }
8919
8920 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8921
8922 /* don't look at user memory in panic mode */
8923 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8924
8925 switch (oops_dump_mode) {
8926 case DUMP_ALL:
8927 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8928 break;
8929 case DUMP_ORIG:
8930 iter.cpu_file = raw_smp_processor_id();
8931 break;
8932 case DUMP_NONE:
8933 goto out_enable;
8934 default:
8935 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8936 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8937 }
8938
8939 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8940
8941 /* Did function tracer already get disabled? */
8942 if (ftrace_is_dead()) {
8943 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8944 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8945 }
8946
8947 /*
8948 * We need to stop all tracing on all CPUS to read the
8949 * the next buffer. This is a bit expensive, but is
8950 * not done often. We fill all what we can read,
8951 * and then release the locks again.
8952 */
8953
8954 while (!trace_empty(&iter)) {
8955
8956 if (!cnt)
8957 printk(KERN_TRACE "---------------------------------\n");
8958
8959 cnt++;
8960
8961 trace_iterator_reset(&iter);
8962 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8963
8964 if (trace_find_next_entry_inc(&iter) != NULL) {
8965 int ret;
8966
8967 ret = print_trace_line(&iter);
8968 if (ret != TRACE_TYPE_NO_CONSUME)
8969 trace_consume(&iter);
8970 }
8971 touch_nmi_watchdog();
8972
8973 trace_printk_seq(&iter.seq);
8974 }
8975
8976 if (!cnt)
8977 printk(KERN_TRACE " (ftrace buffer empty)\n");
8978 else
8979 printk(KERN_TRACE "---------------------------------\n");
8980
8981 out_enable:
8982 tr->trace_flags |= old_userobj;
8983
8984 for_each_tracing_cpu(cpu) {
8985 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8986 }
8987 atomic_dec(&dump_running);
8988 printk_nmi_direct_exit();
8989 local_irq_restore(flags);
8990}
8991EXPORT_SYMBOL_GPL(ftrace_dump);
8992
8993int trace_run_command(const char *buf, int (*createfn)(int, char **))
8994{
8995 char **argv;
8996 int argc, ret;
8997
8998 argc = 0;
8999 ret = 0;
9000 argv = argv_split(GFP_KERNEL, buf, &argc);
9001 if (!argv)
9002 return -ENOMEM;
9003
9004 if (argc)
9005 ret = createfn(argc, argv);
9006
9007 argv_free(argv);
9008
9009 return ret;
9010}
9011
9012#define WRITE_BUFSIZE 4096
9013
9014ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9015 size_t count, loff_t *ppos,
9016 int (*createfn)(int, char **))
9017{
9018 char *kbuf, *buf, *tmp;
9019 int ret = 0;
9020 size_t done = 0;
9021 size_t size;
9022
9023 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9024 if (!kbuf)
9025 return -ENOMEM;
9026
9027 while (done < count) {
9028 size = count - done;
9029
9030 if (size >= WRITE_BUFSIZE)
9031 size = WRITE_BUFSIZE - 1;
9032
9033 if (copy_from_user(kbuf, buffer + done, size)) {
9034 ret = -EFAULT;
9035 goto out;
9036 }
9037 kbuf[size] = '\0';
9038 buf = kbuf;
9039 do {
9040 tmp = strchr(buf, '\n');
9041 if (tmp) {
9042 *tmp = '\0';
9043 size = tmp - buf + 1;
9044 } else {
9045 size = strlen(buf);
9046 if (done + size < count) {
9047 if (buf != kbuf)
9048 break;
9049 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9050 pr_warn("Line length is too long: Should be less than %d\n",
9051 WRITE_BUFSIZE - 2);
9052 ret = -EINVAL;
9053 goto out;
9054 }
9055 }
9056 done += size;
9057
9058 /* Remove comments */
9059 tmp = strchr(buf, '#');
9060
9061 if (tmp)
9062 *tmp = '\0';
9063
9064 ret = trace_run_command(buf, createfn);
9065 if (ret)
9066 goto out;
9067 buf += size;
9068
9069 } while (done < count);
9070 }
9071 ret = done;
9072
9073out:
9074 kfree(kbuf);
9075
9076 return ret;
9077}
9078
9079__init static int tracer_alloc_buffers(void)
9080{
9081 int ring_buf_size;
9082 int ret = -ENOMEM;
9083
9084 /*
9085 * Make sure we don't accidently add more trace options
9086 * than we have bits for.
9087 */
9088 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9089
9090 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9091 goto out;
9092
9093 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9094 goto out_free_buffer_mask;
9095
9096 /* Only allocate trace_printk buffers if a trace_printk exists */
9097 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
9098 /* Must be called before global_trace.buffer is allocated */
9099 trace_printk_init_buffers();
9100
9101 /* To save memory, keep the ring buffer size to its minimum */
9102 if (ring_buffer_expanded)
9103 ring_buf_size = trace_buf_size;
9104 else
9105 ring_buf_size = 1;
9106
9107 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9108 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9109
9110 raw_spin_lock_init(&global_trace.start_lock);
9111
9112 /*
9113 * The prepare callbacks allocates some memory for the ring buffer. We
9114 * don't free the buffer if the if the CPU goes down. If we were to free
9115 * the buffer, then the user would lose any trace that was in the
9116 * buffer. The memory will be removed once the "instance" is removed.
9117 */
9118 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9119 "trace/RB:preapre", trace_rb_cpu_prepare,
9120 NULL);
9121 if (ret < 0)
9122 goto out_free_cpumask;
9123 /* Used for event triggers */
9124 ret = -ENOMEM;
9125 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9126 if (!temp_buffer)
9127 goto out_rm_hp_state;
9128
9129 if (trace_create_savedcmd() < 0)
9130 goto out_free_temp_buffer;
9131
9132 /* TODO: make the number of buffers hot pluggable with CPUS */
9133 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9134 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
9135 WARN_ON(1);
9136 goto out_free_savedcmd;
9137 }
9138
9139 if (global_trace.buffer_disabled)
9140 tracing_off();
9141
9142 if (trace_boot_clock) {
9143 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9144 if (ret < 0)
9145 pr_warn("Trace clock %s not defined, going back to default\n",
9146 trace_boot_clock);
9147 }
9148
9149 /*
9150 * register_tracer() might reference current_trace, so it
9151 * needs to be set before we register anything. This is
9152 * just a bootstrap of current_trace anyway.
9153 */
9154 global_trace.current_trace = &nop_trace;
9155
9156 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9157
9158 ftrace_init_global_array_ops(&global_trace);
9159
9160 init_trace_flags_index(&global_trace);
9161
9162 register_tracer(&nop_trace);
9163
9164 /* Function tracing may start here (via kernel command line) */
9165 init_function_trace();
9166
9167 /* All seems OK, enable tracing */
9168 tracing_disabled = 0;
9169
9170 atomic_notifier_chain_register(&panic_notifier_list,
9171 &trace_panic_notifier);
9172
9173 register_die_notifier(&trace_die_notifier);
9174
9175 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9176
9177 INIT_LIST_HEAD(&global_trace.systems);
9178 INIT_LIST_HEAD(&global_trace.events);
9179 INIT_LIST_HEAD(&global_trace.hist_vars);
9180 INIT_LIST_HEAD(&global_trace.err_log);
9181 list_add(&global_trace.list, &ftrace_trace_arrays);
9182
9183 apply_trace_boot_options();
9184
9185 register_snapshot_cmd();
9186
9187 return 0;
9188
9189out_free_savedcmd:
9190 free_saved_cmdlines_buffer(savedcmd);
9191out_free_temp_buffer:
9192 ring_buffer_free(temp_buffer);
9193out_rm_hp_state:
9194 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9195out_free_cpumask:
9196 free_cpumask_var(global_trace.tracing_cpumask);
9197out_free_buffer_mask:
9198 free_cpumask_var(tracing_buffer_mask);
9199out:
9200 return ret;
9201}
9202
9203void __init early_trace_init(void)
9204{
9205 if (tracepoint_printk) {
9206 tracepoint_print_iter =
9207 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9208 if (WARN_ON(!tracepoint_print_iter))
9209 tracepoint_printk = 0;
9210 else
9211 static_key_enable(&tracepoint_printk_key.key);
9212 }
9213 tracer_alloc_buffers();
9214}
9215
9216void __init trace_init(void)
9217{
9218 trace_event_init();
9219}
9220
9221__init static int clear_boot_tracer(void)
9222{
9223 /*
9224 * The default tracer at boot buffer is an init section.
9225 * This function is called in lateinit. If we did not
9226 * find the boot tracer, then clear it out, to prevent
9227 * later registration from accessing the buffer that is
9228 * about to be freed.
9229 */
9230 if (!default_bootup_tracer)
9231 return 0;
9232
9233 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9234 default_bootup_tracer);
9235 default_bootup_tracer = NULL;
9236
9237 return 0;
9238}
9239
9240fs_initcall(tracer_init_tracefs);
9241late_initcall_sync(clear_boot_tracer);
9242
9243#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9244__init static int tracing_set_default_clock(void)
9245{
9246 /* sched_clock_stable() is determined in late_initcall */
9247 if (!trace_boot_clock && !sched_clock_stable()) {
9248 printk(KERN_WARNING
9249 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9250 "If you want to keep using the local clock, then add:\n"
9251 " \"trace_clock=local\"\n"
9252 "on the kernel command line\n");
9253 tracing_set_clock(&global_trace, "global");
9254 }
9255
9256 return 0;
9257}
9258late_initcall_sync(tracing_set_default_clock);
9259#endif
1/*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
13 */
14#include <linux/ring_buffer.h>
15#include <generated/utsrelease.h>
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
20#include <linux/notifier.h>
21#include <linux/irqflags.h>
22#include <linux/debugfs.h>
23#include <linux/pagemap.h>
24#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
27#include <linux/kprobes.h>
28#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
31#include <linux/splice.h>
32#include <linux/kdebug.h>
33#include <linux/string.h>
34#include <linux/rwsem.h>
35#include <linux/slab.h>
36#include <linux/ctype.h>
37#include <linux/init.h>
38#include <linux/poll.h>
39#include <linux/nmi.h>
40#include <linux/fs.h>
41
42#include "trace.h"
43#include "trace_output.h"
44
45/*
46 * On boot up, the ring buffer is set to the minimum size, so that
47 * we do not waste memory on systems that are not using tracing.
48 */
49int ring_buffer_expanded;
50
51/*
52 * We need to change this state when a selftest is running.
53 * A selftest will lurk into the ring-buffer to count the
54 * entries inserted during the selftest although some concurrent
55 * insertions into the ring-buffer such as trace_printk could occurred
56 * at the same time, giving false positive or negative results.
57 */
58static bool __read_mostly tracing_selftest_running;
59
60/*
61 * If a tracer is running, we do not want to run SELFTEST.
62 */
63bool __read_mostly tracing_selftest_disabled;
64
65/* For tracers that don't implement custom flags */
66static struct tracer_opt dummy_tracer_opt[] = {
67 { }
68};
69
70static struct tracer_flags dummy_tracer_flags = {
71 .val = 0,
72 .opts = dummy_tracer_opt
73};
74
75static int dummy_set_flag(u32 old_flags, u32 bit, int set)
76{
77 return 0;
78}
79
80/*
81 * Kill all tracing for good (never come back).
82 * It is initialized to 1 but will turn to zero if the initialization
83 * of the tracer is successful. But that is the only place that sets
84 * this back to zero.
85 */
86static int tracing_disabled = 1;
87
88DEFINE_PER_CPU(int, ftrace_cpu_disabled);
89
90cpumask_var_t __read_mostly tracing_buffer_mask;
91
92/*
93 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
94 *
95 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
96 * is set, then ftrace_dump is called. This will output the contents
97 * of the ftrace buffers to the console. This is very useful for
98 * capturing traces that lead to crashes and outputing it to a
99 * serial console.
100 *
101 * It is default off, but you can enable it with either specifying
102 * "ftrace_dump_on_oops" in the kernel command line, or setting
103 * /proc/sys/kernel/ftrace_dump_on_oops
104 * Set 1 if you want to dump buffers of all CPUs
105 * Set 2 if you want to dump the buffer of the CPU that triggered oops
106 */
107
108enum ftrace_dump_mode ftrace_dump_on_oops;
109
110static int tracing_set_tracer(const char *buf);
111
112#define MAX_TRACER_SIZE 100
113static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
114static char *default_bootup_tracer;
115
116static int __init set_cmdline_ftrace(char *str)
117{
118 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
119 default_bootup_tracer = bootup_tracer_buf;
120 /* We are using ftrace early, expand it */
121 ring_buffer_expanded = 1;
122 return 1;
123}
124__setup("ftrace=", set_cmdline_ftrace);
125
126static int __init set_ftrace_dump_on_oops(char *str)
127{
128 if (*str++ != '=' || !*str) {
129 ftrace_dump_on_oops = DUMP_ALL;
130 return 1;
131 }
132
133 if (!strcmp("orig_cpu", str)) {
134 ftrace_dump_on_oops = DUMP_ORIG;
135 return 1;
136 }
137
138 return 0;
139}
140__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
141
142unsigned long long ns2usecs(cycle_t nsec)
143{
144 nsec += 500;
145 do_div(nsec, 1000);
146 return nsec;
147}
148
149/*
150 * The global_trace is the descriptor that holds the tracing
151 * buffers for the live tracing. For each CPU, it contains
152 * a link list of pages that will store trace entries. The
153 * page descriptor of the pages in the memory is used to hold
154 * the link list by linking the lru item in the page descriptor
155 * to each of the pages in the buffer per CPU.
156 *
157 * For each active CPU there is a data field that holds the
158 * pages for the buffer for that CPU. Each CPU has the same number
159 * of pages allocated for its buffer.
160 */
161static struct trace_array global_trace;
162
163static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
164
165int filter_current_check_discard(struct ring_buffer *buffer,
166 struct ftrace_event_call *call, void *rec,
167 struct ring_buffer_event *event)
168{
169 return filter_check_discard(call, rec, buffer, event);
170}
171EXPORT_SYMBOL_GPL(filter_current_check_discard);
172
173cycle_t ftrace_now(int cpu)
174{
175 u64 ts;
176
177 /* Early boot up does not have a buffer yet */
178 if (!global_trace.buffer)
179 return trace_clock_local();
180
181 ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
182 ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
183
184 return ts;
185}
186
187/*
188 * The max_tr is used to snapshot the global_trace when a maximum
189 * latency is reached. Some tracers will use this to store a maximum
190 * trace while it continues examining live traces.
191 *
192 * The buffers for the max_tr are set up the same as the global_trace.
193 * When a snapshot is taken, the link list of the max_tr is swapped
194 * with the link list of the global_trace and the buffers are reset for
195 * the global_trace so the tracing can continue.
196 */
197static struct trace_array max_tr;
198
199static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
200
201/* tracer_enabled is used to toggle activation of a tracer */
202static int tracer_enabled = 1;
203
204/**
205 * tracing_is_enabled - return tracer_enabled status
206 *
207 * This function is used by other tracers to know the status
208 * of the tracer_enabled flag. Tracers may use this function
209 * to know if it should enable their features when starting
210 * up. See irqsoff tracer for an example (start_irqsoff_tracer).
211 */
212int tracing_is_enabled(void)
213{
214 return tracer_enabled;
215}
216
217/*
218 * trace_buf_size is the size in bytes that is allocated
219 * for a buffer. Note, the number of bytes is always rounded
220 * to page size.
221 *
222 * This number is purposely set to a low number of 16384.
223 * If the dump on oops happens, it will be much appreciated
224 * to not have to wait for all that output. Anyway this can be
225 * boot time and run time configurable.
226 */
227#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
228
229static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
230
231/* trace_types holds a link list of available tracers. */
232static struct tracer *trace_types __read_mostly;
233
234/* current_trace points to the tracer that is currently active */
235static struct tracer *current_trace __read_mostly;
236
237/*
238 * trace_types_lock is used to protect the trace_types list.
239 */
240static DEFINE_MUTEX(trace_types_lock);
241
242/*
243 * serialize the access of the ring buffer
244 *
245 * ring buffer serializes readers, but it is low level protection.
246 * The validity of the events (which returns by ring_buffer_peek() ..etc)
247 * are not protected by ring buffer.
248 *
249 * The content of events may become garbage if we allow other process consumes
250 * these events concurrently:
251 * A) the page of the consumed events may become a normal page
252 * (not reader page) in ring buffer, and this page will be rewrited
253 * by events producer.
254 * B) The page of the consumed events may become a page for splice_read,
255 * and this page will be returned to system.
256 *
257 * These primitives allow multi process access to different cpu ring buffer
258 * concurrently.
259 *
260 * These primitives don't distinguish read-only and read-consume access.
261 * Multi read-only access are also serialized.
262 */
263
264#ifdef CONFIG_SMP
265static DECLARE_RWSEM(all_cpu_access_lock);
266static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
267
268static inline void trace_access_lock(int cpu)
269{
270 if (cpu == TRACE_PIPE_ALL_CPU) {
271 /* gain it for accessing the whole ring buffer. */
272 down_write(&all_cpu_access_lock);
273 } else {
274 /* gain it for accessing a cpu ring buffer. */
275
276 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
277 down_read(&all_cpu_access_lock);
278
279 /* Secondly block other access to this @cpu ring buffer. */
280 mutex_lock(&per_cpu(cpu_access_lock, cpu));
281 }
282}
283
284static inline void trace_access_unlock(int cpu)
285{
286 if (cpu == TRACE_PIPE_ALL_CPU) {
287 up_write(&all_cpu_access_lock);
288 } else {
289 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
290 up_read(&all_cpu_access_lock);
291 }
292}
293
294static inline void trace_access_lock_init(void)
295{
296 int cpu;
297
298 for_each_possible_cpu(cpu)
299 mutex_init(&per_cpu(cpu_access_lock, cpu));
300}
301
302#else
303
304static DEFINE_MUTEX(access_lock);
305
306static inline void trace_access_lock(int cpu)
307{
308 (void)cpu;
309 mutex_lock(&access_lock);
310}
311
312static inline void trace_access_unlock(int cpu)
313{
314 (void)cpu;
315 mutex_unlock(&access_lock);
316}
317
318static inline void trace_access_lock_init(void)
319{
320}
321
322#endif
323
324/* trace_wait is a waitqueue for tasks blocked on trace_poll */
325static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
326
327/* trace_flags holds trace_options default values */
328unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
329 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
330 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
331 TRACE_ITER_IRQ_INFO;
332
333static int trace_stop_count;
334static DEFINE_RAW_SPINLOCK(tracing_start_lock);
335
336static void wakeup_work_handler(struct work_struct *work)
337{
338 wake_up(&trace_wait);
339}
340
341static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
342
343/**
344 * tracing_on - enable tracing buffers
345 *
346 * This function enables tracing buffers that may have been
347 * disabled with tracing_off.
348 */
349void tracing_on(void)
350{
351 if (global_trace.buffer)
352 ring_buffer_record_on(global_trace.buffer);
353 /*
354 * This flag is only looked at when buffers haven't been
355 * allocated yet. We don't really care about the race
356 * between setting this flag and actually turning
357 * on the buffer.
358 */
359 global_trace.buffer_disabled = 0;
360}
361EXPORT_SYMBOL_GPL(tracing_on);
362
363/**
364 * tracing_off - turn off tracing buffers
365 *
366 * This function stops the tracing buffers from recording data.
367 * It does not disable any overhead the tracers themselves may
368 * be causing. This function simply causes all recording to
369 * the ring buffers to fail.
370 */
371void tracing_off(void)
372{
373 if (global_trace.buffer)
374 ring_buffer_record_off(global_trace.buffer);
375 /*
376 * This flag is only looked at when buffers haven't been
377 * allocated yet. We don't really care about the race
378 * between setting this flag and actually turning
379 * on the buffer.
380 */
381 global_trace.buffer_disabled = 1;
382}
383EXPORT_SYMBOL_GPL(tracing_off);
384
385/**
386 * tracing_is_on - show state of ring buffers enabled
387 */
388int tracing_is_on(void)
389{
390 if (global_trace.buffer)
391 return ring_buffer_record_is_on(global_trace.buffer);
392 return !global_trace.buffer_disabled;
393}
394EXPORT_SYMBOL_GPL(tracing_is_on);
395
396/**
397 * trace_wake_up - wake up tasks waiting for trace input
398 *
399 * Schedules a delayed work to wake up any task that is blocked on the
400 * trace_wait queue. These is used with trace_poll for tasks polling the
401 * trace.
402 */
403void trace_wake_up(void)
404{
405 const unsigned long delay = msecs_to_jiffies(2);
406
407 if (trace_flags & TRACE_ITER_BLOCK)
408 return;
409 schedule_delayed_work(&wakeup_work, delay);
410}
411
412static int __init set_buf_size(char *str)
413{
414 unsigned long buf_size;
415
416 if (!str)
417 return 0;
418 buf_size = memparse(str, &str);
419 /* nr_entries can not be zero */
420 if (buf_size == 0)
421 return 0;
422 trace_buf_size = buf_size;
423 return 1;
424}
425__setup("trace_buf_size=", set_buf_size);
426
427static int __init set_tracing_thresh(char *str)
428{
429 unsigned long threshhold;
430 int ret;
431
432 if (!str)
433 return 0;
434 ret = strict_strtoul(str, 0, &threshhold);
435 if (ret < 0)
436 return 0;
437 tracing_thresh = threshhold * 1000;
438 return 1;
439}
440__setup("tracing_thresh=", set_tracing_thresh);
441
442unsigned long nsecs_to_usecs(unsigned long nsecs)
443{
444 return nsecs / 1000;
445}
446
447/* These must match the bit postions in trace_iterator_flags */
448static const char *trace_options[] = {
449 "print-parent",
450 "sym-offset",
451 "sym-addr",
452 "verbose",
453 "raw",
454 "hex",
455 "bin",
456 "block",
457 "stacktrace",
458 "trace_printk",
459 "ftrace_preempt",
460 "branch",
461 "annotate",
462 "userstacktrace",
463 "sym-userobj",
464 "printk-msg-only",
465 "context-info",
466 "latency-format",
467 "sleep-time",
468 "graph-time",
469 "record-cmd",
470 "overwrite",
471 "disable_on_free",
472 "irq-info",
473 NULL
474};
475
476static struct {
477 u64 (*func)(void);
478 const char *name;
479} trace_clocks[] = {
480 { trace_clock_local, "local" },
481 { trace_clock_global, "global" },
482 { trace_clock_counter, "counter" },
483};
484
485int trace_clock_id;
486
487/*
488 * trace_parser_get_init - gets the buffer for trace parser
489 */
490int trace_parser_get_init(struct trace_parser *parser, int size)
491{
492 memset(parser, 0, sizeof(*parser));
493
494 parser->buffer = kmalloc(size, GFP_KERNEL);
495 if (!parser->buffer)
496 return 1;
497
498 parser->size = size;
499 return 0;
500}
501
502/*
503 * trace_parser_put - frees the buffer for trace parser
504 */
505void trace_parser_put(struct trace_parser *parser)
506{
507 kfree(parser->buffer);
508}
509
510/*
511 * trace_get_user - reads the user input string separated by space
512 * (matched by isspace(ch))
513 *
514 * For each string found the 'struct trace_parser' is updated,
515 * and the function returns.
516 *
517 * Returns number of bytes read.
518 *
519 * See kernel/trace/trace.h for 'struct trace_parser' details.
520 */
521int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
522 size_t cnt, loff_t *ppos)
523{
524 char ch;
525 size_t read = 0;
526 ssize_t ret;
527
528 if (!*ppos)
529 trace_parser_clear(parser);
530
531 ret = get_user(ch, ubuf++);
532 if (ret)
533 goto out;
534
535 read++;
536 cnt--;
537
538 /*
539 * The parser is not finished with the last write,
540 * continue reading the user input without skipping spaces.
541 */
542 if (!parser->cont) {
543 /* skip white space */
544 while (cnt && isspace(ch)) {
545 ret = get_user(ch, ubuf++);
546 if (ret)
547 goto out;
548 read++;
549 cnt--;
550 }
551
552 /* only spaces were written */
553 if (isspace(ch)) {
554 *ppos += read;
555 ret = read;
556 goto out;
557 }
558
559 parser->idx = 0;
560 }
561
562 /* read the non-space input */
563 while (cnt && !isspace(ch)) {
564 if (parser->idx < parser->size - 1)
565 parser->buffer[parser->idx++] = ch;
566 else {
567 ret = -EINVAL;
568 goto out;
569 }
570 ret = get_user(ch, ubuf++);
571 if (ret)
572 goto out;
573 read++;
574 cnt--;
575 }
576
577 /* We either got finished input or we have to wait for another call. */
578 if (isspace(ch)) {
579 parser->buffer[parser->idx] = 0;
580 parser->cont = false;
581 } else {
582 parser->cont = true;
583 parser->buffer[parser->idx++] = ch;
584 }
585
586 *ppos += read;
587 ret = read;
588
589out:
590 return ret;
591}
592
593ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
594{
595 int len;
596 int ret;
597
598 if (!cnt)
599 return 0;
600
601 if (s->len <= s->readpos)
602 return -EBUSY;
603
604 len = s->len - s->readpos;
605 if (cnt > len)
606 cnt = len;
607 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
608 if (ret == cnt)
609 return -EFAULT;
610
611 cnt -= ret;
612
613 s->readpos += cnt;
614 return cnt;
615}
616
617static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
618{
619 int len;
620
621 if (s->len <= s->readpos)
622 return -EBUSY;
623
624 len = s->len - s->readpos;
625 if (cnt > len)
626 cnt = len;
627 memcpy(buf, s->buffer + s->readpos, cnt);
628
629 s->readpos += cnt;
630 return cnt;
631}
632
633/*
634 * ftrace_max_lock is used to protect the swapping of buffers
635 * when taking a max snapshot. The buffers themselves are
636 * protected by per_cpu spinlocks. But the action of the swap
637 * needs its own lock.
638 *
639 * This is defined as a arch_spinlock_t in order to help
640 * with performance when lockdep debugging is enabled.
641 *
642 * It is also used in other places outside the update_max_tr
643 * so it needs to be defined outside of the
644 * CONFIG_TRACER_MAX_TRACE.
645 */
646static arch_spinlock_t ftrace_max_lock =
647 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
648
649unsigned long __read_mostly tracing_thresh;
650
651#ifdef CONFIG_TRACER_MAX_TRACE
652unsigned long __read_mostly tracing_max_latency;
653
654/*
655 * Copy the new maximum trace into the separate maximum-trace
656 * structure. (this way the maximum trace is permanently saved,
657 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
658 */
659static void
660__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
661{
662 struct trace_array_cpu *data = tr->data[cpu];
663 struct trace_array_cpu *max_data;
664
665 max_tr.cpu = cpu;
666 max_tr.time_start = data->preempt_timestamp;
667
668 max_data = max_tr.data[cpu];
669 max_data->saved_latency = tracing_max_latency;
670 max_data->critical_start = data->critical_start;
671 max_data->critical_end = data->critical_end;
672
673 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
674 max_data->pid = tsk->pid;
675 max_data->uid = task_uid(tsk);
676 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
677 max_data->policy = tsk->policy;
678 max_data->rt_priority = tsk->rt_priority;
679
680 /* record this tasks comm */
681 tracing_record_cmdline(tsk);
682}
683
684/**
685 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
686 * @tr: tracer
687 * @tsk: the task with the latency
688 * @cpu: The cpu that initiated the trace.
689 *
690 * Flip the buffers between the @tr and the max_tr and record information
691 * about which task was the cause of this latency.
692 */
693void
694update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
695{
696 struct ring_buffer *buf = tr->buffer;
697
698 if (trace_stop_count)
699 return;
700
701 WARN_ON_ONCE(!irqs_disabled());
702 if (!current_trace->use_max_tr) {
703 WARN_ON_ONCE(1);
704 return;
705 }
706 arch_spin_lock(&ftrace_max_lock);
707
708 tr->buffer = max_tr.buffer;
709 max_tr.buffer = buf;
710
711 __update_max_tr(tr, tsk, cpu);
712 arch_spin_unlock(&ftrace_max_lock);
713}
714
715/**
716 * update_max_tr_single - only copy one trace over, and reset the rest
717 * @tr - tracer
718 * @tsk - task with the latency
719 * @cpu - the cpu of the buffer to copy.
720 *
721 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
722 */
723void
724update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
725{
726 int ret;
727
728 if (trace_stop_count)
729 return;
730
731 WARN_ON_ONCE(!irqs_disabled());
732 if (!current_trace->use_max_tr) {
733 WARN_ON_ONCE(1);
734 return;
735 }
736
737 arch_spin_lock(&ftrace_max_lock);
738
739 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
740
741 if (ret == -EBUSY) {
742 /*
743 * We failed to swap the buffer due to a commit taking
744 * place on this CPU. We fail to record, but we reset
745 * the max trace buffer (no one writes directly to it)
746 * and flag that it failed.
747 */
748 trace_array_printk(&max_tr, _THIS_IP_,
749 "Failed to swap buffers due to commit in progress\n");
750 }
751
752 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
753
754 __update_max_tr(tr, tsk, cpu);
755 arch_spin_unlock(&ftrace_max_lock);
756}
757#endif /* CONFIG_TRACER_MAX_TRACE */
758
759/**
760 * register_tracer - register a tracer with the ftrace system.
761 * @type - the plugin for the tracer
762 *
763 * Register a new plugin tracer.
764 */
765int register_tracer(struct tracer *type)
766{
767 struct tracer *t;
768 int ret = 0;
769
770 if (!type->name) {
771 pr_info("Tracer must have a name\n");
772 return -1;
773 }
774
775 if (strlen(type->name) >= MAX_TRACER_SIZE) {
776 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
777 return -1;
778 }
779
780 mutex_lock(&trace_types_lock);
781
782 tracing_selftest_running = true;
783
784 for (t = trace_types; t; t = t->next) {
785 if (strcmp(type->name, t->name) == 0) {
786 /* already found */
787 pr_info("Tracer %s already registered\n",
788 type->name);
789 ret = -1;
790 goto out;
791 }
792 }
793
794 if (!type->set_flag)
795 type->set_flag = &dummy_set_flag;
796 if (!type->flags)
797 type->flags = &dummy_tracer_flags;
798 else
799 if (!type->flags->opts)
800 type->flags->opts = dummy_tracer_opt;
801 if (!type->wait_pipe)
802 type->wait_pipe = default_wait_pipe;
803
804
805#ifdef CONFIG_FTRACE_STARTUP_TEST
806 if (type->selftest && !tracing_selftest_disabled) {
807 struct tracer *saved_tracer = current_trace;
808 struct trace_array *tr = &global_trace;
809
810 /*
811 * Run a selftest on this tracer.
812 * Here we reset the trace buffer, and set the current
813 * tracer to be this tracer. The tracer can then run some
814 * internal tracing to verify that everything is in order.
815 * If we fail, we do not register this tracer.
816 */
817 tracing_reset_online_cpus(tr);
818
819 current_trace = type;
820
821 /* If we expanded the buffers, make sure the max is expanded too */
822 if (ring_buffer_expanded && type->use_max_tr)
823 ring_buffer_resize(max_tr.buffer, trace_buf_size,
824 RING_BUFFER_ALL_CPUS);
825
826 /* the test is responsible for initializing and enabling */
827 pr_info("Testing tracer %s: ", type->name);
828 ret = type->selftest(type, tr);
829 /* the test is responsible for resetting too */
830 current_trace = saved_tracer;
831 if (ret) {
832 printk(KERN_CONT "FAILED!\n");
833 goto out;
834 }
835 /* Only reset on passing, to avoid touching corrupted buffers */
836 tracing_reset_online_cpus(tr);
837
838 /* Shrink the max buffer again */
839 if (ring_buffer_expanded && type->use_max_tr)
840 ring_buffer_resize(max_tr.buffer, 1,
841 RING_BUFFER_ALL_CPUS);
842
843 printk(KERN_CONT "PASSED\n");
844 }
845#endif
846
847 type->next = trace_types;
848 trace_types = type;
849
850 out:
851 tracing_selftest_running = false;
852 mutex_unlock(&trace_types_lock);
853
854 if (ret || !default_bootup_tracer)
855 goto out_unlock;
856
857 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
858 goto out_unlock;
859
860 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
861 /* Do we want this tracer to start on bootup? */
862 tracing_set_tracer(type->name);
863 default_bootup_tracer = NULL;
864 /* disable other selftests, since this will break it. */
865 tracing_selftest_disabled = 1;
866#ifdef CONFIG_FTRACE_STARTUP_TEST
867 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
868 type->name);
869#endif
870
871 out_unlock:
872 return ret;
873}
874
875void unregister_tracer(struct tracer *type)
876{
877 struct tracer **t;
878
879 mutex_lock(&trace_types_lock);
880 for (t = &trace_types; *t; t = &(*t)->next) {
881 if (*t == type)
882 goto found;
883 }
884 pr_info("Tracer %s not registered\n", type->name);
885 goto out;
886
887 found:
888 *t = (*t)->next;
889
890 if (type == current_trace && tracer_enabled) {
891 tracer_enabled = 0;
892 tracing_stop();
893 if (current_trace->stop)
894 current_trace->stop(&global_trace);
895 current_trace = &nop_trace;
896 }
897out:
898 mutex_unlock(&trace_types_lock);
899}
900
901void tracing_reset(struct trace_array *tr, int cpu)
902{
903 struct ring_buffer *buffer = tr->buffer;
904
905 ring_buffer_record_disable(buffer);
906
907 /* Make sure all commits have finished */
908 synchronize_sched();
909 ring_buffer_reset_cpu(buffer, cpu);
910
911 ring_buffer_record_enable(buffer);
912}
913
914void tracing_reset_online_cpus(struct trace_array *tr)
915{
916 struct ring_buffer *buffer = tr->buffer;
917 int cpu;
918
919 ring_buffer_record_disable(buffer);
920
921 /* Make sure all commits have finished */
922 synchronize_sched();
923
924 tr->time_start = ftrace_now(tr->cpu);
925
926 for_each_online_cpu(cpu)
927 ring_buffer_reset_cpu(buffer, cpu);
928
929 ring_buffer_record_enable(buffer);
930}
931
932void tracing_reset_current(int cpu)
933{
934 tracing_reset(&global_trace, cpu);
935}
936
937void tracing_reset_current_online_cpus(void)
938{
939 tracing_reset_online_cpus(&global_trace);
940}
941
942#define SAVED_CMDLINES 128
943#define NO_CMDLINE_MAP UINT_MAX
944static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
945static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
946static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
947static int cmdline_idx;
948static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
949
950/* temporary disable recording */
951static atomic_t trace_record_cmdline_disabled __read_mostly;
952
953static void trace_init_cmdlines(void)
954{
955 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
956 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
957 cmdline_idx = 0;
958}
959
960int is_tracing_stopped(void)
961{
962 return trace_stop_count;
963}
964
965/**
966 * ftrace_off_permanent - disable all ftrace code permanently
967 *
968 * This should only be called when a serious anomally has
969 * been detected. This will turn off the function tracing,
970 * ring buffers, and other tracing utilites. It takes no
971 * locks and can be called from any context.
972 */
973void ftrace_off_permanent(void)
974{
975 tracing_disabled = 1;
976 ftrace_stop();
977 tracing_off_permanent();
978}
979
980/**
981 * tracing_start - quick start of the tracer
982 *
983 * If tracing is enabled but was stopped by tracing_stop,
984 * this will start the tracer back up.
985 */
986void tracing_start(void)
987{
988 struct ring_buffer *buffer;
989 unsigned long flags;
990
991 if (tracing_disabled)
992 return;
993
994 raw_spin_lock_irqsave(&tracing_start_lock, flags);
995 if (--trace_stop_count) {
996 if (trace_stop_count < 0) {
997 /* Someone screwed up their debugging */
998 WARN_ON_ONCE(1);
999 trace_stop_count = 0;
1000 }
1001 goto out;
1002 }
1003
1004 /* Prevent the buffers from switching */
1005 arch_spin_lock(&ftrace_max_lock);
1006
1007 buffer = global_trace.buffer;
1008 if (buffer)
1009 ring_buffer_record_enable(buffer);
1010
1011 buffer = max_tr.buffer;
1012 if (buffer)
1013 ring_buffer_record_enable(buffer);
1014
1015 arch_spin_unlock(&ftrace_max_lock);
1016
1017 ftrace_start();
1018 out:
1019 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1020}
1021
1022/**
1023 * tracing_stop - quick stop of the tracer
1024 *
1025 * Light weight way to stop tracing. Use in conjunction with
1026 * tracing_start.
1027 */
1028void tracing_stop(void)
1029{
1030 struct ring_buffer *buffer;
1031 unsigned long flags;
1032
1033 ftrace_stop();
1034 raw_spin_lock_irqsave(&tracing_start_lock, flags);
1035 if (trace_stop_count++)
1036 goto out;
1037
1038 /* Prevent the buffers from switching */
1039 arch_spin_lock(&ftrace_max_lock);
1040
1041 buffer = global_trace.buffer;
1042 if (buffer)
1043 ring_buffer_record_disable(buffer);
1044
1045 buffer = max_tr.buffer;
1046 if (buffer)
1047 ring_buffer_record_disable(buffer);
1048
1049 arch_spin_unlock(&ftrace_max_lock);
1050
1051 out:
1052 raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1053}
1054
1055void trace_stop_cmdline_recording(void);
1056
1057static void trace_save_cmdline(struct task_struct *tsk)
1058{
1059 unsigned pid, idx;
1060
1061 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1062 return;
1063
1064 /*
1065 * It's not the end of the world if we don't get
1066 * the lock, but we also don't want to spin
1067 * nor do we want to disable interrupts,
1068 * so if we miss here, then better luck next time.
1069 */
1070 if (!arch_spin_trylock(&trace_cmdline_lock))
1071 return;
1072
1073 idx = map_pid_to_cmdline[tsk->pid];
1074 if (idx == NO_CMDLINE_MAP) {
1075 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1076
1077 /*
1078 * Check whether the cmdline buffer at idx has a pid
1079 * mapped. We are going to overwrite that entry so we
1080 * need to clear the map_pid_to_cmdline. Otherwise we
1081 * would read the new comm for the old pid.
1082 */
1083 pid = map_cmdline_to_pid[idx];
1084 if (pid != NO_CMDLINE_MAP)
1085 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1086
1087 map_cmdline_to_pid[idx] = tsk->pid;
1088 map_pid_to_cmdline[tsk->pid] = idx;
1089
1090 cmdline_idx = idx;
1091 }
1092
1093 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1094
1095 arch_spin_unlock(&trace_cmdline_lock);
1096}
1097
1098void trace_find_cmdline(int pid, char comm[])
1099{
1100 unsigned map;
1101
1102 if (!pid) {
1103 strcpy(comm, "<idle>");
1104 return;
1105 }
1106
1107 if (WARN_ON_ONCE(pid < 0)) {
1108 strcpy(comm, "<XXX>");
1109 return;
1110 }
1111
1112 if (pid > PID_MAX_DEFAULT) {
1113 strcpy(comm, "<...>");
1114 return;
1115 }
1116
1117 preempt_disable();
1118 arch_spin_lock(&trace_cmdline_lock);
1119 map = map_pid_to_cmdline[pid];
1120 if (map != NO_CMDLINE_MAP)
1121 strcpy(comm, saved_cmdlines[map]);
1122 else
1123 strcpy(comm, "<...>");
1124
1125 arch_spin_unlock(&trace_cmdline_lock);
1126 preempt_enable();
1127}
1128
1129void tracing_record_cmdline(struct task_struct *tsk)
1130{
1131 if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
1132 !tracing_is_on())
1133 return;
1134
1135 trace_save_cmdline(tsk);
1136}
1137
1138void
1139tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1140 int pc)
1141{
1142 struct task_struct *tsk = current;
1143
1144 entry->preempt_count = pc & 0xff;
1145 entry->pid = (tsk) ? tsk->pid : 0;
1146 entry->padding = 0;
1147 entry->flags =
1148#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1149 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1150#else
1151 TRACE_FLAG_IRQS_NOSUPPORT |
1152#endif
1153 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1154 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1155 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1156}
1157EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1158
1159struct ring_buffer_event *
1160trace_buffer_lock_reserve(struct ring_buffer *buffer,
1161 int type,
1162 unsigned long len,
1163 unsigned long flags, int pc)
1164{
1165 struct ring_buffer_event *event;
1166
1167 event = ring_buffer_lock_reserve(buffer, len);
1168 if (event != NULL) {
1169 struct trace_entry *ent = ring_buffer_event_data(event);
1170
1171 tracing_generic_entry_update(ent, flags, pc);
1172 ent->type = type;
1173 }
1174
1175 return event;
1176}
1177
1178static inline void
1179__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1180 struct ring_buffer_event *event,
1181 unsigned long flags, int pc,
1182 int wake)
1183{
1184 ring_buffer_unlock_commit(buffer, event);
1185
1186 ftrace_trace_stack(buffer, flags, 6, pc);
1187 ftrace_trace_userstack(buffer, flags, pc);
1188
1189 if (wake)
1190 trace_wake_up();
1191}
1192
1193void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1194 struct ring_buffer_event *event,
1195 unsigned long flags, int pc)
1196{
1197 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1198}
1199
1200struct ring_buffer_event *
1201trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1202 int type, unsigned long len,
1203 unsigned long flags, int pc)
1204{
1205 *current_rb = global_trace.buffer;
1206 return trace_buffer_lock_reserve(*current_rb,
1207 type, len, flags, pc);
1208}
1209EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1210
1211void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1212 struct ring_buffer_event *event,
1213 unsigned long flags, int pc)
1214{
1215 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
1216}
1217EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1218
1219void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
1220 struct ring_buffer_event *event,
1221 unsigned long flags, int pc)
1222{
1223 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
1224}
1225EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
1226
1227void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1228 struct ring_buffer_event *event,
1229 unsigned long flags, int pc,
1230 struct pt_regs *regs)
1231{
1232 ring_buffer_unlock_commit(buffer, event);
1233
1234 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1235 ftrace_trace_userstack(buffer, flags, pc);
1236}
1237EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
1238
1239void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1240 struct ring_buffer_event *event)
1241{
1242 ring_buffer_discard_commit(buffer, event);
1243}
1244EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1245
1246void
1247trace_function(struct trace_array *tr,
1248 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1249 int pc)
1250{
1251 struct ftrace_event_call *call = &event_function;
1252 struct ring_buffer *buffer = tr->buffer;
1253 struct ring_buffer_event *event;
1254 struct ftrace_entry *entry;
1255
1256 /* If we are reading the ring buffer, don't trace */
1257 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1258 return;
1259
1260 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1261 flags, pc);
1262 if (!event)
1263 return;
1264 entry = ring_buffer_event_data(event);
1265 entry->ip = ip;
1266 entry->parent_ip = parent_ip;
1267
1268 if (!filter_check_discard(call, entry, buffer, event))
1269 ring_buffer_unlock_commit(buffer, event);
1270}
1271
1272void
1273ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1274 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1275 int pc)
1276{
1277 if (likely(!atomic_read(&data->disabled)))
1278 trace_function(tr, ip, parent_ip, flags, pc);
1279}
1280
1281#ifdef CONFIG_STACKTRACE
1282
1283#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1284struct ftrace_stack {
1285 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1286};
1287
1288static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1289static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1290
1291static void __ftrace_trace_stack(struct ring_buffer *buffer,
1292 unsigned long flags,
1293 int skip, int pc, struct pt_regs *regs)
1294{
1295 struct ftrace_event_call *call = &event_kernel_stack;
1296 struct ring_buffer_event *event;
1297 struct stack_entry *entry;
1298 struct stack_trace trace;
1299 int use_stack;
1300 int size = FTRACE_STACK_ENTRIES;
1301
1302 trace.nr_entries = 0;
1303 trace.skip = skip;
1304
1305 /*
1306 * Since events can happen in NMIs there's no safe way to
1307 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1308 * or NMI comes in, it will just have to use the default
1309 * FTRACE_STACK_SIZE.
1310 */
1311 preempt_disable_notrace();
1312
1313 use_stack = ++__get_cpu_var(ftrace_stack_reserve);
1314 /*
1315 * We don't need any atomic variables, just a barrier.
1316 * If an interrupt comes in, we don't care, because it would
1317 * have exited and put the counter back to what we want.
1318 * We just need a barrier to keep gcc from moving things
1319 * around.
1320 */
1321 barrier();
1322 if (use_stack == 1) {
1323 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1324 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1325
1326 if (regs)
1327 save_stack_trace_regs(regs, &trace);
1328 else
1329 save_stack_trace(&trace);
1330
1331 if (trace.nr_entries > size)
1332 size = trace.nr_entries;
1333 } else
1334 /* From now on, use_stack is a boolean */
1335 use_stack = 0;
1336
1337 size *= sizeof(unsigned long);
1338
1339 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1340 sizeof(*entry) + size, flags, pc);
1341 if (!event)
1342 goto out;
1343 entry = ring_buffer_event_data(event);
1344
1345 memset(&entry->caller, 0, size);
1346
1347 if (use_stack)
1348 memcpy(&entry->caller, trace.entries,
1349 trace.nr_entries * sizeof(unsigned long));
1350 else {
1351 trace.max_entries = FTRACE_STACK_ENTRIES;
1352 trace.entries = entry->caller;
1353 if (regs)
1354 save_stack_trace_regs(regs, &trace);
1355 else
1356 save_stack_trace(&trace);
1357 }
1358
1359 entry->size = trace.nr_entries;
1360
1361 if (!filter_check_discard(call, entry, buffer, event))
1362 ring_buffer_unlock_commit(buffer, event);
1363
1364 out:
1365 /* Again, don't let gcc optimize things here */
1366 barrier();
1367 __get_cpu_var(ftrace_stack_reserve)--;
1368 preempt_enable_notrace();
1369
1370}
1371
1372void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1373 int skip, int pc, struct pt_regs *regs)
1374{
1375 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1376 return;
1377
1378 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1379}
1380
1381void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1382 int skip, int pc)
1383{
1384 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1385 return;
1386
1387 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1388}
1389
1390void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1391 int pc)
1392{
1393 __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
1394}
1395
1396/**
1397 * trace_dump_stack - record a stack back trace in the trace buffer
1398 */
1399void trace_dump_stack(void)
1400{
1401 unsigned long flags;
1402
1403 if (tracing_disabled || tracing_selftest_running)
1404 return;
1405
1406 local_save_flags(flags);
1407
1408 /* skipping 3 traces, seems to get us at the caller of this function */
1409 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
1410}
1411
1412static DEFINE_PER_CPU(int, user_stack_count);
1413
1414void
1415ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1416{
1417 struct ftrace_event_call *call = &event_user_stack;
1418 struct ring_buffer_event *event;
1419 struct userstack_entry *entry;
1420 struct stack_trace trace;
1421
1422 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1423 return;
1424
1425 /*
1426 * NMIs can not handle page faults, even with fix ups.
1427 * The save user stack can (and often does) fault.
1428 */
1429 if (unlikely(in_nmi()))
1430 return;
1431
1432 /*
1433 * prevent recursion, since the user stack tracing may
1434 * trigger other kernel events.
1435 */
1436 preempt_disable();
1437 if (__this_cpu_read(user_stack_count))
1438 goto out;
1439
1440 __this_cpu_inc(user_stack_count);
1441
1442 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1443 sizeof(*entry), flags, pc);
1444 if (!event)
1445 goto out_drop_count;
1446 entry = ring_buffer_event_data(event);
1447
1448 entry->tgid = current->tgid;
1449 memset(&entry->caller, 0, sizeof(entry->caller));
1450
1451 trace.nr_entries = 0;
1452 trace.max_entries = FTRACE_STACK_ENTRIES;
1453 trace.skip = 0;
1454 trace.entries = entry->caller;
1455
1456 save_stack_trace_user(&trace);
1457 if (!filter_check_discard(call, entry, buffer, event))
1458 ring_buffer_unlock_commit(buffer, event);
1459
1460 out_drop_count:
1461 __this_cpu_dec(user_stack_count);
1462 out:
1463 preempt_enable();
1464}
1465
1466#ifdef UNUSED
1467static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1468{
1469 ftrace_trace_userstack(tr, flags, preempt_count());
1470}
1471#endif /* UNUSED */
1472
1473#endif /* CONFIG_STACKTRACE */
1474
1475/* created for use with alloc_percpu */
1476struct trace_buffer_struct {
1477 char buffer[TRACE_BUF_SIZE];
1478};
1479
1480static struct trace_buffer_struct *trace_percpu_buffer;
1481static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1482static struct trace_buffer_struct *trace_percpu_irq_buffer;
1483static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1484
1485/*
1486 * The buffer used is dependent on the context. There is a per cpu
1487 * buffer for normal context, softirq contex, hard irq context and
1488 * for NMI context. Thise allows for lockless recording.
1489 *
1490 * Note, if the buffers failed to be allocated, then this returns NULL
1491 */
1492static char *get_trace_buf(void)
1493{
1494 struct trace_buffer_struct *percpu_buffer;
1495 struct trace_buffer_struct *buffer;
1496
1497 /*
1498 * If we have allocated per cpu buffers, then we do not
1499 * need to do any locking.
1500 */
1501 if (in_nmi())
1502 percpu_buffer = trace_percpu_nmi_buffer;
1503 else if (in_irq())
1504 percpu_buffer = trace_percpu_irq_buffer;
1505 else if (in_softirq())
1506 percpu_buffer = trace_percpu_sirq_buffer;
1507 else
1508 percpu_buffer = trace_percpu_buffer;
1509
1510 if (!percpu_buffer)
1511 return NULL;
1512
1513 buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
1514
1515 return buffer->buffer;
1516}
1517
1518static int alloc_percpu_trace_buffer(void)
1519{
1520 struct trace_buffer_struct *buffers;
1521 struct trace_buffer_struct *sirq_buffers;
1522 struct trace_buffer_struct *irq_buffers;
1523 struct trace_buffer_struct *nmi_buffers;
1524
1525 buffers = alloc_percpu(struct trace_buffer_struct);
1526 if (!buffers)
1527 goto err_warn;
1528
1529 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1530 if (!sirq_buffers)
1531 goto err_sirq;
1532
1533 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1534 if (!irq_buffers)
1535 goto err_irq;
1536
1537 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1538 if (!nmi_buffers)
1539 goto err_nmi;
1540
1541 trace_percpu_buffer = buffers;
1542 trace_percpu_sirq_buffer = sirq_buffers;
1543 trace_percpu_irq_buffer = irq_buffers;
1544 trace_percpu_nmi_buffer = nmi_buffers;
1545
1546 return 0;
1547
1548 err_nmi:
1549 free_percpu(irq_buffers);
1550 err_irq:
1551 free_percpu(sirq_buffers);
1552 err_sirq:
1553 free_percpu(buffers);
1554 err_warn:
1555 WARN(1, "Could not allocate percpu trace_printk buffer");
1556 return -ENOMEM;
1557}
1558
1559void trace_printk_init_buffers(void)
1560{
1561 static int buffers_allocated;
1562
1563 if (buffers_allocated)
1564 return;
1565
1566 if (alloc_percpu_trace_buffer())
1567 return;
1568
1569 pr_info("ftrace: Allocated trace_printk buffers\n");
1570
1571 buffers_allocated = 1;
1572}
1573
1574/**
1575 * trace_vbprintk - write binary msg to tracing buffer
1576 *
1577 */
1578int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1579{
1580 struct ftrace_event_call *call = &event_bprint;
1581 struct ring_buffer_event *event;
1582 struct ring_buffer *buffer;
1583 struct trace_array *tr = &global_trace;
1584 struct bprint_entry *entry;
1585 unsigned long flags;
1586 char *tbuffer;
1587 int len = 0, size, pc;
1588
1589 if (unlikely(tracing_selftest_running || tracing_disabled))
1590 return 0;
1591
1592 /* Don't pollute graph traces with trace_vprintk internals */
1593 pause_graph_tracing();
1594
1595 pc = preempt_count();
1596 preempt_disable_notrace();
1597
1598 tbuffer = get_trace_buf();
1599 if (!tbuffer) {
1600 len = 0;
1601 goto out;
1602 }
1603
1604 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1605
1606 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1607 goto out;
1608
1609 local_save_flags(flags);
1610 size = sizeof(*entry) + sizeof(u32) * len;
1611 buffer = tr->buffer;
1612 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1613 flags, pc);
1614 if (!event)
1615 goto out;
1616 entry = ring_buffer_event_data(event);
1617 entry->ip = ip;
1618 entry->fmt = fmt;
1619
1620 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1621 if (!filter_check_discard(call, entry, buffer, event)) {
1622 ring_buffer_unlock_commit(buffer, event);
1623 ftrace_trace_stack(buffer, flags, 6, pc);
1624 }
1625
1626out:
1627 preempt_enable_notrace();
1628 unpause_graph_tracing();
1629
1630 return len;
1631}
1632EXPORT_SYMBOL_GPL(trace_vbprintk);
1633
1634int trace_array_printk(struct trace_array *tr,
1635 unsigned long ip, const char *fmt, ...)
1636{
1637 int ret;
1638 va_list ap;
1639
1640 if (!(trace_flags & TRACE_ITER_PRINTK))
1641 return 0;
1642
1643 va_start(ap, fmt);
1644 ret = trace_array_vprintk(tr, ip, fmt, ap);
1645 va_end(ap);
1646 return ret;
1647}
1648
1649int trace_array_vprintk(struct trace_array *tr,
1650 unsigned long ip, const char *fmt, va_list args)
1651{
1652 struct ftrace_event_call *call = &event_print;
1653 struct ring_buffer_event *event;
1654 struct ring_buffer *buffer;
1655 int len = 0, size, pc;
1656 struct print_entry *entry;
1657 unsigned long flags;
1658 char *tbuffer;
1659
1660 if (tracing_disabled || tracing_selftest_running)
1661 return 0;
1662
1663 /* Don't pollute graph traces with trace_vprintk internals */
1664 pause_graph_tracing();
1665
1666 pc = preempt_count();
1667 preempt_disable_notrace();
1668
1669
1670 tbuffer = get_trace_buf();
1671 if (!tbuffer) {
1672 len = 0;
1673 goto out;
1674 }
1675
1676 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1677 if (len > TRACE_BUF_SIZE)
1678 goto out;
1679
1680 local_save_flags(flags);
1681 size = sizeof(*entry) + len + 1;
1682 buffer = tr->buffer;
1683 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1684 flags, pc);
1685 if (!event)
1686 goto out;
1687 entry = ring_buffer_event_data(event);
1688 entry->ip = ip;
1689
1690 memcpy(&entry->buf, tbuffer, len);
1691 entry->buf[len] = '\0';
1692 if (!filter_check_discard(call, entry, buffer, event)) {
1693 ring_buffer_unlock_commit(buffer, event);
1694 ftrace_trace_stack(buffer, flags, 6, pc);
1695 }
1696 out:
1697 preempt_enable_notrace();
1698 unpause_graph_tracing();
1699
1700 return len;
1701}
1702
1703int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1704{
1705 return trace_array_vprintk(&global_trace, ip, fmt, args);
1706}
1707EXPORT_SYMBOL_GPL(trace_vprintk);
1708
1709static void trace_iterator_increment(struct trace_iterator *iter)
1710{
1711 iter->idx++;
1712 if (iter->buffer_iter[iter->cpu])
1713 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1714}
1715
1716static struct trace_entry *
1717peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1718 unsigned long *lost_events)
1719{
1720 struct ring_buffer_event *event;
1721 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1722
1723 if (buf_iter)
1724 event = ring_buffer_iter_peek(buf_iter, ts);
1725 else
1726 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1727 lost_events);
1728
1729 if (event) {
1730 iter->ent_size = ring_buffer_event_length(event);
1731 return ring_buffer_event_data(event);
1732 }
1733 iter->ent_size = 0;
1734 return NULL;
1735}
1736
1737static struct trace_entry *
1738__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1739 unsigned long *missing_events, u64 *ent_ts)
1740{
1741 struct ring_buffer *buffer = iter->tr->buffer;
1742 struct trace_entry *ent, *next = NULL;
1743 unsigned long lost_events = 0, next_lost = 0;
1744 int cpu_file = iter->cpu_file;
1745 u64 next_ts = 0, ts;
1746 int next_cpu = -1;
1747 int next_size = 0;
1748 int cpu;
1749
1750 /*
1751 * If we are in a per_cpu trace file, don't bother by iterating over
1752 * all cpu and peek directly.
1753 */
1754 if (cpu_file > TRACE_PIPE_ALL_CPU) {
1755 if (ring_buffer_empty_cpu(buffer, cpu_file))
1756 return NULL;
1757 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1758 if (ent_cpu)
1759 *ent_cpu = cpu_file;
1760
1761 return ent;
1762 }
1763
1764 for_each_tracing_cpu(cpu) {
1765
1766 if (ring_buffer_empty_cpu(buffer, cpu))
1767 continue;
1768
1769 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1770
1771 /*
1772 * Pick the entry with the smallest timestamp:
1773 */
1774 if (ent && (!next || ts < next_ts)) {
1775 next = ent;
1776 next_cpu = cpu;
1777 next_ts = ts;
1778 next_lost = lost_events;
1779 next_size = iter->ent_size;
1780 }
1781 }
1782
1783 iter->ent_size = next_size;
1784
1785 if (ent_cpu)
1786 *ent_cpu = next_cpu;
1787
1788 if (ent_ts)
1789 *ent_ts = next_ts;
1790
1791 if (missing_events)
1792 *missing_events = next_lost;
1793
1794 return next;
1795}
1796
1797/* Find the next real entry, without updating the iterator itself */
1798struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1799 int *ent_cpu, u64 *ent_ts)
1800{
1801 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1802}
1803
1804/* Find the next real entry, and increment the iterator to the next entry */
1805void *trace_find_next_entry_inc(struct trace_iterator *iter)
1806{
1807 iter->ent = __find_next_entry(iter, &iter->cpu,
1808 &iter->lost_events, &iter->ts);
1809
1810 if (iter->ent)
1811 trace_iterator_increment(iter);
1812
1813 return iter->ent ? iter : NULL;
1814}
1815
1816static void trace_consume(struct trace_iterator *iter)
1817{
1818 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1819 &iter->lost_events);
1820}
1821
1822static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1823{
1824 struct trace_iterator *iter = m->private;
1825 int i = (int)*pos;
1826 void *ent;
1827
1828 WARN_ON_ONCE(iter->leftover);
1829
1830 (*pos)++;
1831
1832 /* can't go backwards */
1833 if (iter->idx > i)
1834 return NULL;
1835
1836 if (iter->idx < 0)
1837 ent = trace_find_next_entry_inc(iter);
1838 else
1839 ent = iter;
1840
1841 while (ent && iter->idx < i)
1842 ent = trace_find_next_entry_inc(iter);
1843
1844 iter->pos = *pos;
1845
1846 return ent;
1847}
1848
1849void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1850{
1851 struct trace_array *tr = iter->tr;
1852 struct ring_buffer_event *event;
1853 struct ring_buffer_iter *buf_iter;
1854 unsigned long entries = 0;
1855 u64 ts;
1856
1857 tr->data[cpu]->skipped_entries = 0;
1858
1859 if (!iter->buffer_iter[cpu])
1860 return;
1861
1862 buf_iter = iter->buffer_iter[cpu];
1863 ring_buffer_iter_reset(buf_iter);
1864
1865 /*
1866 * We could have the case with the max latency tracers
1867 * that a reset never took place on a cpu. This is evident
1868 * by the timestamp being before the start of the buffer.
1869 */
1870 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1871 if (ts >= iter->tr->time_start)
1872 break;
1873 entries++;
1874 ring_buffer_read(buf_iter, NULL);
1875 }
1876
1877 tr->data[cpu]->skipped_entries = entries;
1878}
1879
1880/*
1881 * The current tracer is copied to avoid a global locking
1882 * all around.
1883 */
1884static void *s_start(struct seq_file *m, loff_t *pos)
1885{
1886 struct trace_iterator *iter = m->private;
1887 static struct tracer *old_tracer;
1888 int cpu_file = iter->cpu_file;
1889 void *p = NULL;
1890 loff_t l = 0;
1891 int cpu;
1892
1893 /* copy the tracer to avoid using a global lock all around */
1894 mutex_lock(&trace_types_lock);
1895 if (unlikely(old_tracer != current_trace && current_trace)) {
1896 old_tracer = current_trace;
1897 *iter->trace = *current_trace;
1898 }
1899 mutex_unlock(&trace_types_lock);
1900
1901 atomic_inc(&trace_record_cmdline_disabled);
1902
1903 if (*pos != iter->pos) {
1904 iter->ent = NULL;
1905 iter->cpu = 0;
1906 iter->idx = -1;
1907
1908 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1909 for_each_tracing_cpu(cpu)
1910 tracing_iter_reset(iter, cpu);
1911 } else
1912 tracing_iter_reset(iter, cpu_file);
1913
1914 iter->leftover = 0;
1915 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1916 ;
1917
1918 } else {
1919 /*
1920 * If we overflowed the seq_file before, then we want
1921 * to just reuse the trace_seq buffer again.
1922 */
1923 if (iter->leftover)
1924 p = iter;
1925 else {
1926 l = *pos - 1;
1927 p = s_next(m, p, &l);
1928 }
1929 }
1930
1931 trace_event_read_lock();
1932 trace_access_lock(cpu_file);
1933 return p;
1934}
1935
1936static void s_stop(struct seq_file *m, void *p)
1937{
1938 struct trace_iterator *iter = m->private;
1939
1940 atomic_dec(&trace_record_cmdline_disabled);
1941 trace_access_unlock(iter->cpu_file);
1942 trace_event_read_unlock();
1943}
1944
1945static void
1946get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
1947{
1948 unsigned long count;
1949 int cpu;
1950
1951 *total = 0;
1952 *entries = 0;
1953
1954 for_each_tracing_cpu(cpu) {
1955 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1956 /*
1957 * If this buffer has skipped entries, then we hold all
1958 * entries for the trace and we need to ignore the
1959 * ones before the time stamp.
1960 */
1961 if (tr->data[cpu]->skipped_entries) {
1962 count -= tr->data[cpu]->skipped_entries;
1963 /* total is the same as the entries */
1964 *total += count;
1965 } else
1966 *total += count +
1967 ring_buffer_overrun_cpu(tr->buffer, cpu);
1968 *entries += count;
1969 }
1970}
1971
1972static void print_lat_help_header(struct seq_file *m)
1973{
1974 seq_puts(m, "# _------=> CPU# \n");
1975 seq_puts(m, "# / _-----=> irqs-off \n");
1976 seq_puts(m, "# | / _----=> need-resched \n");
1977 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1978 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1979 seq_puts(m, "# |||| / delay \n");
1980 seq_puts(m, "# cmd pid ||||| time | caller \n");
1981 seq_puts(m, "# \\ / ||||| \\ | / \n");
1982}
1983
1984static void print_event_info(struct trace_array *tr, struct seq_file *m)
1985{
1986 unsigned long total;
1987 unsigned long entries;
1988
1989 get_total_entries(tr, &total, &entries);
1990 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
1991 entries, total, num_online_cpus());
1992 seq_puts(m, "#\n");
1993}
1994
1995static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
1996{
1997 print_event_info(tr, m);
1998 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1999 seq_puts(m, "# | | | | |\n");
2000}
2001
2002static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
2003{
2004 print_event_info(tr, m);
2005 seq_puts(m, "# _-----=> irqs-off\n");
2006 seq_puts(m, "# / _----=> need-resched\n");
2007 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2008 seq_puts(m, "# || / _--=> preempt-depth\n");
2009 seq_puts(m, "# ||| / delay\n");
2010 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2011 seq_puts(m, "# | | | |||| | |\n");
2012}
2013
2014void
2015print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2016{
2017 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2018 struct trace_array *tr = iter->tr;
2019 struct trace_array_cpu *data = tr->data[tr->cpu];
2020 struct tracer *type = current_trace;
2021 unsigned long entries;
2022 unsigned long total;
2023 const char *name = "preemption";
2024
2025 if (type)
2026 name = type->name;
2027
2028 get_total_entries(tr, &total, &entries);
2029
2030 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2031 name, UTS_RELEASE);
2032 seq_puts(m, "# -----------------------------------"
2033 "---------------------------------\n");
2034 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2035 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2036 nsecs_to_usecs(data->saved_latency),
2037 entries,
2038 total,
2039 tr->cpu,
2040#if defined(CONFIG_PREEMPT_NONE)
2041 "server",
2042#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2043 "desktop",
2044#elif defined(CONFIG_PREEMPT)
2045 "preempt",
2046#else
2047 "unknown",
2048#endif
2049 /* These are reserved for later use */
2050 0, 0, 0, 0);
2051#ifdef CONFIG_SMP
2052 seq_printf(m, " #P:%d)\n", num_online_cpus());
2053#else
2054 seq_puts(m, ")\n");
2055#endif
2056 seq_puts(m, "# -----------------\n");
2057 seq_printf(m, "# | task: %.16s-%d "
2058 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2059 data->comm, data->pid, data->uid, data->nice,
2060 data->policy, data->rt_priority);
2061 seq_puts(m, "# -----------------\n");
2062
2063 if (data->critical_start) {
2064 seq_puts(m, "# => started at: ");
2065 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2066 trace_print_seq(m, &iter->seq);
2067 seq_puts(m, "\n# => ended at: ");
2068 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2069 trace_print_seq(m, &iter->seq);
2070 seq_puts(m, "\n#\n");
2071 }
2072
2073 seq_puts(m, "#\n");
2074}
2075
2076static void test_cpu_buff_start(struct trace_iterator *iter)
2077{
2078 struct trace_seq *s = &iter->seq;
2079
2080 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2081 return;
2082
2083 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2084 return;
2085
2086 if (cpumask_test_cpu(iter->cpu, iter->started))
2087 return;
2088
2089 if (iter->tr->data[iter->cpu]->skipped_entries)
2090 return;
2091
2092 cpumask_set_cpu(iter->cpu, iter->started);
2093
2094 /* Don't print started cpu buffer for the first entry of the trace */
2095 if (iter->idx > 1)
2096 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2097 iter->cpu);
2098}
2099
2100static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2101{
2102 struct trace_seq *s = &iter->seq;
2103 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2104 struct trace_entry *entry;
2105 struct trace_event *event;
2106
2107 entry = iter->ent;
2108
2109 test_cpu_buff_start(iter);
2110
2111 event = ftrace_find_event(entry->type);
2112
2113 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2114 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2115 if (!trace_print_lat_context(iter))
2116 goto partial;
2117 } else {
2118 if (!trace_print_context(iter))
2119 goto partial;
2120 }
2121 }
2122
2123 if (event)
2124 return event->funcs->trace(iter, sym_flags, event);
2125
2126 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2127 goto partial;
2128
2129 return TRACE_TYPE_HANDLED;
2130partial:
2131 return TRACE_TYPE_PARTIAL_LINE;
2132}
2133
2134static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2135{
2136 struct trace_seq *s = &iter->seq;
2137 struct trace_entry *entry;
2138 struct trace_event *event;
2139
2140 entry = iter->ent;
2141
2142 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2143 if (!trace_seq_printf(s, "%d %d %llu ",
2144 entry->pid, iter->cpu, iter->ts))
2145 goto partial;
2146 }
2147
2148 event = ftrace_find_event(entry->type);
2149 if (event)
2150 return event->funcs->raw(iter, 0, event);
2151
2152 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2153 goto partial;
2154
2155 return TRACE_TYPE_HANDLED;
2156partial:
2157 return TRACE_TYPE_PARTIAL_LINE;
2158}
2159
2160static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2161{
2162 struct trace_seq *s = &iter->seq;
2163 unsigned char newline = '\n';
2164 struct trace_entry *entry;
2165 struct trace_event *event;
2166
2167 entry = iter->ent;
2168
2169 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2170 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2171 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2172 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2173 }
2174
2175 event = ftrace_find_event(entry->type);
2176 if (event) {
2177 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2178 if (ret != TRACE_TYPE_HANDLED)
2179 return ret;
2180 }
2181
2182 SEQ_PUT_FIELD_RET(s, newline);
2183
2184 return TRACE_TYPE_HANDLED;
2185}
2186
2187static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2188{
2189 struct trace_seq *s = &iter->seq;
2190 struct trace_entry *entry;
2191 struct trace_event *event;
2192
2193 entry = iter->ent;
2194
2195 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2196 SEQ_PUT_FIELD_RET(s, entry->pid);
2197 SEQ_PUT_FIELD_RET(s, iter->cpu);
2198 SEQ_PUT_FIELD_RET(s, iter->ts);
2199 }
2200
2201 event = ftrace_find_event(entry->type);
2202 return event ? event->funcs->binary(iter, 0, event) :
2203 TRACE_TYPE_HANDLED;
2204}
2205
2206int trace_empty(struct trace_iterator *iter)
2207{
2208 int cpu;
2209
2210 /* If we are looking at one CPU buffer, only check that one */
2211 if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
2212 cpu = iter->cpu_file;
2213 if (iter->buffer_iter[cpu]) {
2214 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2215 return 0;
2216 } else {
2217 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2218 return 0;
2219 }
2220 return 1;
2221 }
2222
2223 for_each_tracing_cpu(cpu) {
2224 if (iter->buffer_iter[cpu]) {
2225 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
2226 return 0;
2227 } else {
2228 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2229 return 0;
2230 }
2231 }
2232
2233 return 1;
2234}
2235
2236/* Called with trace_event_read_lock() held. */
2237enum print_line_t print_trace_line(struct trace_iterator *iter)
2238{
2239 enum print_line_t ret;
2240
2241 if (iter->lost_events &&
2242 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2243 iter->cpu, iter->lost_events))
2244 return TRACE_TYPE_PARTIAL_LINE;
2245
2246 if (iter->trace && iter->trace->print_line) {
2247 ret = iter->trace->print_line(iter);
2248 if (ret != TRACE_TYPE_UNHANDLED)
2249 return ret;
2250 }
2251
2252 if (iter->ent->type == TRACE_BPRINT &&
2253 trace_flags & TRACE_ITER_PRINTK &&
2254 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2255 return trace_print_bprintk_msg_only(iter);
2256
2257 if (iter->ent->type == TRACE_PRINT &&
2258 trace_flags & TRACE_ITER_PRINTK &&
2259 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2260 return trace_print_printk_msg_only(iter);
2261
2262 if (trace_flags & TRACE_ITER_BIN)
2263 return print_bin_fmt(iter);
2264
2265 if (trace_flags & TRACE_ITER_HEX)
2266 return print_hex_fmt(iter);
2267
2268 if (trace_flags & TRACE_ITER_RAW)
2269 return print_raw_fmt(iter);
2270
2271 return print_trace_fmt(iter);
2272}
2273
2274void trace_latency_header(struct seq_file *m)
2275{
2276 struct trace_iterator *iter = m->private;
2277
2278 /* print nothing if the buffers are empty */
2279 if (trace_empty(iter))
2280 return;
2281
2282 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2283 print_trace_header(m, iter);
2284
2285 if (!(trace_flags & TRACE_ITER_VERBOSE))
2286 print_lat_help_header(m);
2287}
2288
2289void trace_default_header(struct seq_file *m)
2290{
2291 struct trace_iterator *iter = m->private;
2292
2293 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2294 return;
2295
2296 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2297 /* print nothing if the buffers are empty */
2298 if (trace_empty(iter))
2299 return;
2300 print_trace_header(m, iter);
2301 if (!(trace_flags & TRACE_ITER_VERBOSE))
2302 print_lat_help_header(m);
2303 } else {
2304 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2305 if (trace_flags & TRACE_ITER_IRQ_INFO)
2306 print_func_help_header_irq(iter->tr, m);
2307 else
2308 print_func_help_header(iter->tr, m);
2309 }
2310 }
2311}
2312
2313static void test_ftrace_alive(struct seq_file *m)
2314{
2315 if (!ftrace_is_dead())
2316 return;
2317 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2318 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2319}
2320
2321static int s_show(struct seq_file *m, void *v)
2322{
2323 struct trace_iterator *iter = v;
2324 int ret;
2325
2326 if (iter->ent == NULL) {
2327 if (iter->tr) {
2328 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2329 seq_puts(m, "#\n");
2330 test_ftrace_alive(m);
2331 }
2332 if (iter->trace && iter->trace->print_header)
2333 iter->trace->print_header(m);
2334 else
2335 trace_default_header(m);
2336
2337 } else if (iter->leftover) {
2338 /*
2339 * If we filled the seq_file buffer earlier, we
2340 * want to just show it now.
2341 */
2342 ret = trace_print_seq(m, &iter->seq);
2343
2344 /* ret should this time be zero, but you never know */
2345 iter->leftover = ret;
2346
2347 } else {
2348 print_trace_line(iter);
2349 ret = trace_print_seq(m, &iter->seq);
2350 /*
2351 * If we overflow the seq_file buffer, then it will
2352 * ask us for this data again at start up.
2353 * Use that instead.
2354 * ret is 0 if seq_file write succeeded.
2355 * -1 otherwise.
2356 */
2357 iter->leftover = ret;
2358 }
2359
2360 return 0;
2361}
2362
2363static const struct seq_operations tracer_seq_ops = {
2364 .start = s_start,
2365 .next = s_next,
2366 .stop = s_stop,
2367 .show = s_show,
2368};
2369
2370static struct trace_iterator *
2371__tracing_open(struct inode *inode, struct file *file)
2372{
2373 long cpu_file = (long) inode->i_private;
2374 struct trace_iterator *iter;
2375 int cpu;
2376
2377 if (tracing_disabled)
2378 return ERR_PTR(-ENODEV);
2379
2380 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2381 if (!iter)
2382 return ERR_PTR(-ENOMEM);
2383
2384 /*
2385 * We make a copy of the current tracer to avoid concurrent
2386 * changes on it while we are reading.
2387 */
2388 mutex_lock(&trace_types_lock);
2389 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2390 if (!iter->trace)
2391 goto fail;
2392
2393 if (current_trace)
2394 *iter->trace = *current_trace;
2395
2396 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2397 goto fail;
2398
2399 if (current_trace && current_trace->print_max)
2400 iter->tr = &max_tr;
2401 else
2402 iter->tr = &global_trace;
2403 iter->pos = -1;
2404 mutex_init(&iter->mutex);
2405 iter->cpu_file = cpu_file;
2406
2407 /* Notify the tracer early; before we stop tracing. */
2408 if (iter->trace && iter->trace->open)
2409 iter->trace->open(iter);
2410
2411 /* Annotate start of buffers if we had overruns */
2412 if (ring_buffer_overruns(iter->tr->buffer))
2413 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2414
2415 /* stop the trace while dumping */
2416 tracing_stop();
2417
2418 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2419 for_each_tracing_cpu(cpu) {
2420 iter->buffer_iter[cpu] =
2421 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2422 }
2423 ring_buffer_read_prepare_sync();
2424 for_each_tracing_cpu(cpu) {
2425 ring_buffer_read_start(iter->buffer_iter[cpu]);
2426 tracing_iter_reset(iter, cpu);
2427 }
2428 } else {
2429 cpu = iter->cpu_file;
2430 iter->buffer_iter[cpu] =
2431 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2432 ring_buffer_read_prepare_sync();
2433 ring_buffer_read_start(iter->buffer_iter[cpu]);
2434 tracing_iter_reset(iter, cpu);
2435 }
2436
2437 mutex_unlock(&trace_types_lock);
2438
2439 return iter;
2440
2441 fail:
2442 mutex_unlock(&trace_types_lock);
2443 kfree(iter->trace);
2444 seq_release_private(inode, file);
2445 return ERR_PTR(-ENOMEM);
2446}
2447
2448int tracing_open_generic(struct inode *inode, struct file *filp)
2449{
2450 if (tracing_disabled)
2451 return -ENODEV;
2452
2453 filp->private_data = inode->i_private;
2454 return 0;
2455}
2456
2457static int tracing_release(struct inode *inode, struct file *file)
2458{
2459 struct seq_file *m = file->private_data;
2460 struct trace_iterator *iter;
2461 int cpu;
2462
2463 if (!(file->f_mode & FMODE_READ))
2464 return 0;
2465
2466 iter = m->private;
2467
2468 mutex_lock(&trace_types_lock);
2469 for_each_tracing_cpu(cpu) {
2470 if (iter->buffer_iter[cpu])
2471 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2472 }
2473
2474 if (iter->trace && iter->trace->close)
2475 iter->trace->close(iter);
2476
2477 /* reenable tracing if it was previously enabled */
2478 tracing_start();
2479 mutex_unlock(&trace_types_lock);
2480
2481 mutex_destroy(&iter->mutex);
2482 free_cpumask_var(iter->started);
2483 kfree(iter->trace);
2484 seq_release_private(inode, file);
2485 return 0;
2486}
2487
2488static int tracing_open(struct inode *inode, struct file *file)
2489{
2490 struct trace_iterator *iter;
2491 int ret = 0;
2492
2493 /* If this file was open for write, then erase contents */
2494 if ((file->f_mode & FMODE_WRITE) &&
2495 (file->f_flags & O_TRUNC)) {
2496 long cpu = (long) inode->i_private;
2497
2498 if (cpu == TRACE_PIPE_ALL_CPU)
2499 tracing_reset_online_cpus(&global_trace);
2500 else
2501 tracing_reset(&global_trace, cpu);
2502 }
2503
2504 if (file->f_mode & FMODE_READ) {
2505 iter = __tracing_open(inode, file);
2506 if (IS_ERR(iter))
2507 ret = PTR_ERR(iter);
2508 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2509 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2510 }
2511 return ret;
2512}
2513
2514static void *
2515t_next(struct seq_file *m, void *v, loff_t *pos)
2516{
2517 struct tracer *t = v;
2518
2519 (*pos)++;
2520
2521 if (t)
2522 t = t->next;
2523
2524 return t;
2525}
2526
2527static void *t_start(struct seq_file *m, loff_t *pos)
2528{
2529 struct tracer *t;
2530 loff_t l = 0;
2531
2532 mutex_lock(&trace_types_lock);
2533 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2534 ;
2535
2536 return t;
2537}
2538
2539static void t_stop(struct seq_file *m, void *p)
2540{
2541 mutex_unlock(&trace_types_lock);
2542}
2543
2544static int t_show(struct seq_file *m, void *v)
2545{
2546 struct tracer *t = v;
2547
2548 if (!t)
2549 return 0;
2550
2551 seq_printf(m, "%s", t->name);
2552 if (t->next)
2553 seq_putc(m, ' ');
2554 else
2555 seq_putc(m, '\n');
2556
2557 return 0;
2558}
2559
2560static const struct seq_operations show_traces_seq_ops = {
2561 .start = t_start,
2562 .next = t_next,
2563 .stop = t_stop,
2564 .show = t_show,
2565};
2566
2567static int show_traces_open(struct inode *inode, struct file *file)
2568{
2569 if (tracing_disabled)
2570 return -ENODEV;
2571
2572 return seq_open(file, &show_traces_seq_ops);
2573}
2574
2575static ssize_t
2576tracing_write_stub(struct file *filp, const char __user *ubuf,
2577 size_t count, loff_t *ppos)
2578{
2579 return count;
2580}
2581
2582static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2583{
2584 if (file->f_mode & FMODE_READ)
2585 return seq_lseek(file, offset, origin);
2586 else
2587 return 0;
2588}
2589
2590static const struct file_operations tracing_fops = {
2591 .open = tracing_open,
2592 .read = seq_read,
2593 .write = tracing_write_stub,
2594 .llseek = tracing_seek,
2595 .release = tracing_release,
2596};
2597
2598static const struct file_operations show_traces_fops = {
2599 .open = show_traces_open,
2600 .read = seq_read,
2601 .release = seq_release,
2602 .llseek = seq_lseek,
2603};
2604
2605/*
2606 * Only trace on a CPU if the bitmask is set:
2607 */
2608static cpumask_var_t tracing_cpumask;
2609
2610/*
2611 * The tracer itself will not take this lock, but still we want
2612 * to provide a consistent cpumask to user-space:
2613 */
2614static DEFINE_MUTEX(tracing_cpumask_update_lock);
2615
2616/*
2617 * Temporary storage for the character representation of the
2618 * CPU bitmask (and one more byte for the newline):
2619 */
2620static char mask_str[NR_CPUS + 1];
2621
2622static ssize_t
2623tracing_cpumask_read(struct file *filp, char __user *ubuf,
2624 size_t count, loff_t *ppos)
2625{
2626 int len;
2627
2628 mutex_lock(&tracing_cpumask_update_lock);
2629
2630 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2631 if (count - len < 2) {
2632 count = -EINVAL;
2633 goto out_err;
2634 }
2635 len += sprintf(mask_str + len, "\n");
2636 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2637
2638out_err:
2639 mutex_unlock(&tracing_cpumask_update_lock);
2640
2641 return count;
2642}
2643
2644static ssize_t
2645tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2646 size_t count, loff_t *ppos)
2647{
2648 int err, cpu;
2649 cpumask_var_t tracing_cpumask_new;
2650
2651 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2652 return -ENOMEM;
2653
2654 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2655 if (err)
2656 goto err_unlock;
2657
2658 mutex_lock(&tracing_cpumask_update_lock);
2659
2660 local_irq_disable();
2661 arch_spin_lock(&ftrace_max_lock);
2662 for_each_tracing_cpu(cpu) {
2663 /*
2664 * Increase/decrease the disabled counter if we are
2665 * about to flip a bit in the cpumask:
2666 */
2667 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2668 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2669 atomic_inc(&global_trace.data[cpu]->disabled);
2670 ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
2671 }
2672 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2673 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2674 atomic_dec(&global_trace.data[cpu]->disabled);
2675 ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
2676 }
2677 }
2678 arch_spin_unlock(&ftrace_max_lock);
2679 local_irq_enable();
2680
2681 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2682
2683 mutex_unlock(&tracing_cpumask_update_lock);
2684 free_cpumask_var(tracing_cpumask_new);
2685
2686 return count;
2687
2688err_unlock:
2689 free_cpumask_var(tracing_cpumask_new);
2690
2691 return err;
2692}
2693
2694static const struct file_operations tracing_cpumask_fops = {
2695 .open = tracing_open_generic,
2696 .read = tracing_cpumask_read,
2697 .write = tracing_cpumask_write,
2698 .llseek = generic_file_llseek,
2699};
2700
2701static int tracing_trace_options_show(struct seq_file *m, void *v)
2702{
2703 struct tracer_opt *trace_opts;
2704 u32 tracer_flags;
2705 int i;
2706
2707 mutex_lock(&trace_types_lock);
2708 tracer_flags = current_trace->flags->val;
2709 trace_opts = current_trace->flags->opts;
2710
2711 for (i = 0; trace_options[i]; i++) {
2712 if (trace_flags & (1 << i))
2713 seq_printf(m, "%s\n", trace_options[i]);
2714 else
2715 seq_printf(m, "no%s\n", trace_options[i]);
2716 }
2717
2718 for (i = 0; trace_opts[i].name; i++) {
2719 if (tracer_flags & trace_opts[i].bit)
2720 seq_printf(m, "%s\n", trace_opts[i].name);
2721 else
2722 seq_printf(m, "no%s\n", trace_opts[i].name);
2723 }
2724 mutex_unlock(&trace_types_lock);
2725
2726 return 0;
2727}
2728
2729static int __set_tracer_option(struct tracer *trace,
2730 struct tracer_flags *tracer_flags,
2731 struct tracer_opt *opts, int neg)
2732{
2733 int ret;
2734
2735 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2736 if (ret)
2737 return ret;
2738
2739 if (neg)
2740 tracer_flags->val &= ~opts->bit;
2741 else
2742 tracer_flags->val |= opts->bit;
2743 return 0;
2744}
2745
2746/* Try to assign a tracer specific option */
2747static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2748{
2749 struct tracer_flags *tracer_flags = trace->flags;
2750 struct tracer_opt *opts = NULL;
2751 int i;
2752
2753 for (i = 0; tracer_flags->opts[i].name; i++) {
2754 opts = &tracer_flags->opts[i];
2755
2756 if (strcmp(cmp, opts->name) == 0)
2757 return __set_tracer_option(trace, trace->flags,
2758 opts, neg);
2759 }
2760
2761 return -EINVAL;
2762}
2763
2764static void set_tracer_flags(unsigned int mask, int enabled)
2765{
2766 /* do nothing if flag is already set */
2767 if (!!(trace_flags & mask) == !!enabled)
2768 return;
2769
2770 if (enabled)
2771 trace_flags |= mask;
2772 else
2773 trace_flags &= ~mask;
2774
2775 if (mask == TRACE_ITER_RECORD_CMD)
2776 trace_event_enable_cmd_record(enabled);
2777
2778 if (mask == TRACE_ITER_OVERWRITE)
2779 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2780}
2781
2782static ssize_t
2783tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2784 size_t cnt, loff_t *ppos)
2785{
2786 char buf[64];
2787 char *cmp;
2788 int neg = 0;
2789 int ret;
2790 int i;
2791
2792 if (cnt >= sizeof(buf))
2793 return -EINVAL;
2794
2795 if (copy_from_user(&buf, ubuf, cnt))
2796 return -EFAULT;
2797
2798 buf[cnt] = 0;
2799 cmp = strstrip(buf);
2800
2801 if (strncmp(cmp, "no", 2) == 0) {
2802 neg = 1;
2803 cmp += 2;
2804 }
2805
2806 for (i = 0; trace_options[i]; i++) {
2807 if (strcmp(cmp, trace_options[i]) == 0) {
2808 set_tracer_flags(1 << i, !neg);
2809 break;
2810 }
2811 }
2812
2813 /* If no option could be set, test the specific tracer options */
2814 if (!trace_options[i]) {
2815 mutex_lock(&trace_types_lock);
2816 ret = set_tracer_option(current_trace, cmp, neg);
2817 mutex_unlock(&trace_types_lock);
2818 if (ret)
2819 return ret;
2820 }
2821
2822 *ppos += cnt;
2823
2824 return cnt;
2825}
2826
2827static int tracing_trace_options_open(struct inode *inode, struct file *file)
2828{
2829 if (tracing_disabled)
2830 return -ENODEV;
2831 return single_open(file, tracing_trace_options_show, NULL);
2832}
2833
2834static const struct file_operations tracing_iter_fops = {
2835 .open = tracing_trace_options_open,
2836 .read = seq_read,
2837 .llseek = seq_lseek,
2838 .release = single_release,
2839 .write = tracing_trace_options_write,
2840};
2841
2842static const char readme_msg[] =
2843 "tracing mini-HOWTO:\n\n"
2844 "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2845 "# cat /sys/kernel/debug/tracing/available_tracers\n"
2846 "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
2847 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2848 "nop\n"
2849 "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
2850 "# cat /sys/kernel/debug/tracing/current_tracer\n"
2851 "wakeup\n"
2852 "# cat /sys/kernel/debug/tracing/trace_options\n"
2853 "noprint-parent nosym-offset nosym-addr noverbose\n"
2854 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2855 "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
2856 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2857 "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
2858;
2859
2860static ssize_t
2861tracing_readme_read(struct file *filp, char __user *ubuf,
2862 size_t cnt, loff_t *ppos)
2863{
2864 return simple_read_from_buffer(ubuf, cnt, ppos,
2865 readme_msg, strlen(readme_msg));
2866}
2867
2868static const struct file_operations tracing_readme_fops = {
2869 .open = tracing_open_generic,
2870 .read = tracing_readme_read,
2871 .llseek = generic_file_llseek,
2872};
2873
2874static ssize_t
2875tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2876 size_t cnt, loff_t *ppos)
2877{
2878 char *buf_comm;
2879 char *file_buf;
2880 char *buf;
2881 int len = 0;
2882 int pid;
2883 int i;
2884
2885 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2886 if (!file_buf)
2887 return -ENOMEM;
2888
2889 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2890 if (!buf_comm) {
2891 kfree(file_buf);
2892 return -ENOMEM;
2893 }
2894
2895 buf = file_buf;
2896
2897 for (i = 0; i < SAVED_CMDLINES; i++) {
2898 int r;
2899
2900 pid = map_cmdline_to_pid[i];
2901 if (pid == -1 || pid == NO_CMDLINE_MAP)
2902 continue;
2903
2904 trace_find_cmdline(pid, buf_comm);
2905 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2906 buf += r;
2907 len += r;
2908 }
2909
2910 len = simple_read_from_buffer(ubuf, cnt, ppos,
2911 file_buf, len);
2912
2913 kfree(file_buf);
2914 kfree(buf_comm);
2915
2916 return len;
2917}
2918
2919static const struct file_operations tracing_saved_cmdlines_fops = {
2920 .open = tracing_open_generic,
2921 .read = tracing_saved_cmdlines_read,
2922 .llseek = generic_file_llseek,
2923};
2924
2925static ssize_t
2926tracing_ctrl_read(struct file *filp, char __user *ubuf,
2927 size_t cnt, loff_t *ppos)
2928{
2929 char buf[64];
2930 int r;
2931
2932 r = sprintf(buf, "%u\n", tracer_enabled);
2933 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2934}
2935
2936static ssize_t
2937tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2938 size_t cnt, loff_t *ppos)
2939{
2940 struct trace_array *tr = filp->private_data;
2941 unsigned long val;
2942 int ret;
2943
2944 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
2945 if (ret)
2946 return ret;
2947
2948 val = !!val;
2949
2950 mutex_lock(&trace_types_lock);
2951 if (tracer_enabled ^ val) {
2952
2953 /* Only need to warn if this is used to change the state */
2954 WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
2955
2956 if (val) {
2957 tracer_enabled = 1;
2958 if (current_trace->start)
2959 current_trace->start(tr);
2960 tracing_start();
2961 } else {
2962 tracer_enabled = 0;
2963 tracing_stop();
2964 if (current_trace->stop)
2965 current_trace->stop(tr);
2966 }
2967 }
2968 mutex_unlock(&trace_types_lock);
2969
2970 *ppos += cnt;
2971
2972 return cnt;
2973}
2974
2975static ssize_t
2976tracing_set_trace_read(struct file *filp, char __user *ubuf,
2977 size_t cnt, loff_t *ppos)
2978{
2979 char buf[MAX_TRACER_SIZE+2];
2980 int r;
2981
2982 mutex_lock(&trace_types_lock);
2983 if (current_trace)
2984 r = sprintf(buf, "%s\n", current_trace->name);
2985 else
2986 r = sprintf(buf, "\n");
2987 mutex_unlock(&trace_types_lock);
2988
2989 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2990}
2991
2992int tracer_init(struct tracer *t, struct trace_array *tr)
2993{
2994 tracing_reset_online_cpus(tr);
2995 return t->init(tr);
2996}
2997
2998static void set_buffer_entries(struct trace_array *tr, unsigned long val)
2999{
3000 int cpu;
3001 for_each_tracing_cpu(cpu)
3002 tr->data[cpu]->entries = val;
3003}
3004
3005static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3006{
3007 int ret;
3008
3009 /*
3010 * If kernel or user changes the size of the ring buffer
3011 * we use the size that was given, and we can forget about
3012 * expanding it later.
3013 */
3014 ring_buffer_expanded = 1;
3015
3016 ret = ring_buffer_resize(global_trace.buffer, size, cpu);
3017 if (ret < 0)
3018 return ret;
3019
3020 if (!current_trace->use_max_tr)
3021 goto out;
3022
3023 ret = ring_buffer_resize(max_tr.buffer, size, cpu);
3024 if (ret < 0) {
3025 int r = 0;
3026
3027 if (cpu == RING_BUFFER_ALL_CPUS) {
3028 int i;
3029 for_each_tracing_cpu(i) {
3030 r = ring_buffer_resize(global_trace.buffer,
3031 global_trace.data[i]->entries,
3032 i);
3033 if (r < 0)
3034 break;
3035 }
3036 } else {
3037 r = ring_buffer_resize(global_trace.buffer,
3038 global_trace.data[cpu]->entries,
3039 cpu);
3040 }
3041
3042 if (r < 0) {
3043 /*
3044 * AARGH! We are left with different
3045 * size max buffer!!!!
3046 * The max buffer is our "snapshot" buffer.
3047 * When a tracer needs a snapshot (one of the
3048 * latency tracers), it swaps the max buffer
3049 * with the saved snap shot. We succeeded to
3050 * update the size of the main buffer, but failed to
3051 * update the size of the max buffer. But when we tried
3052 * to reset the main buffer to the original size, we
3053 * failed there too. This is very unlikely to
3054 * happen, but if it does, warn and kill all
3055 * tracing.
3056 */
3057 WARN_ON(1);
3058 tracing_disabled = 1;
3059 }
3060 return ret;
3061 }
3062
3063 if (cpu == RING_BUFFER_ALL_CPUS)
3064 set_buffer_entries(&max_tr, size);
3065 else
3066 max_tr.data[cpu]->entries = size;
3067
3068 out:
3069 if (cpu == RING_BUFFER_ALL_CPUS)
3070 set_buffer_entries(&global_trace, size);
3071 else
3072 global_trace.data[cpu]->entries = size;
3073
3074 return ret;
3075}
3076
3077static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
3078{
3079 int ret = size;
3080
3081 mutex_lock(&trace_types_lock);
3082
3083 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3084 /* make sure, this cpu is enabled in the mask */
3085 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3086 ret = -EINVAL;
3087 goto out;
3088 }
3089 }
3090
3091 ret = __tracing_resize_ring_buffer(size, cpu_id);
3092 if (ret < 0)
3093 ret = -ENOMEM;
3094
3095out:
3096 mutex_unlock(&trace_types_lock);
3097
3098 return ret;
3099}
3100
3101
3102/**
3103 * tracing_update_buffers - used by tracing facility to expand ring buffers
3104 *
3105 * To save on memory when the tracing is never used on a system with it
3106 * configured in. The ring buffers are set to a minimum size. But once
3107 * a user starts to use the tracing facility, then they need to grow
3108 * to their default size.
3109 *
3110 * This function is to be called when a tracer is about to be used.
3111 */
3112int tracing_update_buffers(void)
3113{
3114 int ret = 0;
3115
3116 mutex_lock(&trace_types_lock);
3117 if (!ring_buffer_expanded)
3118 ret = __tracing_resize_ring_buffer(trace_buf_size,
3119 RING_BUFFER_ALL_CPUS);
3120 mutex_unlock(&trace_types_lock);
3121
3122 return ret;
3123}
3124
3125struct trace_option_dentry;
3126
3127static struct trace_option_dentry *
3128create_trace_option_files(struct tracer *tracer);
3129
3130static void
3131destroy_trace_option_files(struct trace_option_dentry *topts);
3132
3133static int tracing_set_tracer(const char *buf)
3134{
3135 static struct trace_option_dentry *topts;
3136 struct trace_array *tr = &global_trace;
3137 struct tracer *t;
3138 int ret = 0;
3139
3140 mutex_lock(&trace_types_lock);
3141
3142 if (!ring_buffer_expanded) {
3143 ret = __tracing_resize_ring_buffer(trace_buf_size,
3144 RING_BUFFER_ALL_CPUS);
3145 if (ret < 0)
3146 goto out;
3147 ret = 0;
3148 }
3149
3150 for (t = trace_types; t; t = t->next) {
3151 if (strcmp(t->name, buf) == 0)
3152 break;
3153 }
3154 if (!t) {
3155 ret = -EINVAL;
3156 goto out;
3157 }
3158 if (t == current_trace)
3159 goto out;
3160
3161 trace_branch_disable();
3162 if (current_trace && current_trace->reset)
3163 current_trace->reset(tr);
3164 if (current_trace && current_trace->use_max_tr) {
3165 /*
3166 * We don't free the ring buffer. instead, resize it because
3167 * The max_tr ring buffer has some state (e.g. ring->clock) and
3168 * we want preserve it.
3169 */
3170 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3171 set_buffer_entries(&max_tr, 1);
3172 }
3173 destroy_trace_option_files(topts);
3174
3175 current_trace = t;
3176
3177 topts = create_trace_option_files(current_trace);
3178 if (current_trace->use_max_tr) {
3179 int cpu;
3180 /* we need to make per cpu buffer sizes equivalent */
3181 for_each_tracing_cpu(cpu) {
3182 ret = ring_buffer_resize(max_tr.buffer,
3183 global_trace.data[cpu]->entries,
3184 cpu);
3185 if (ret < 0)
3186 goto out;
3187 max_tr.data[cpu]->entries =
3188 global_trace.data[cpu]->entries;
3189 }
3190 }
3191
3192 if (t->init) {
3193 ret = tracer_init(t, tr);
3194 if (ret)
3195 goto out;
3196 }
3197
3198 trace_branch_enable(tr);
3199 out:
3200 mutex_unlock(&trace_types_lock);
3201
3202 return ret;
3203}
3204
3205static ssize_t
3206tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3207 size_t cnt, loff_t *ppos)
3208{
3209 char buf[MAX_TRACER_SIZE+1];
3210 int i;
3211 size_t ret;
3212 int err;
3213
3214 ret = cnt;
3215
3216 if (cnt > MAX_TRACER_SIZE)
3217 cnt = MAX_TRACER_SIZE;
3218
3219 if (copy_from_user(&buf, ubuf, cnt))
3220 return -EFAULT;
3221
3222 buf[cnt] = 0;
3223
3224 /* strip ending whitespace. */
3225 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3226 buf[i] = 0;
3227
3228 err = tracing_set_tracer(buf);
3229 if (err)
3230 return err;
3231
3232 *ppos += ret;
3233
3234 return ret;
3235}
3236
3237static ssize_t
3238tracing_max_lat_read(struct file *filp, char __user *ubuf,
3239 size_t cnt, loff_t *ppos)
3240{
3241 unsigned long *ptr = filp->private_data;
3242 char buf[64];
3243 int r;
3244
3245 r = snprintf(buf, sizeof(buf), "%ld\n",
3246 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3247 if (r > sizeof(buf))
3248 r = sizeof(buf);
3249 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3250}
3251
3252static ssize_t
3253tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3254 size_t cnt, loff_t *ppos)
3255{
3256 unsigned long *ptr = filp->private_data;
3257 unsigned long val;
3258 int ret;
3259
3260 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3261 if (ret)
3262 return ret;
3263
3264 *ptr = val * 1000;
3265
3266 return cnt;
3267}
3268
3269static int tracing_open_pipe(struct inode *inode, struct file *filp)
3270{
3271 long cpu_file = (long) inode->i_private;
3272 struct trace_iterator *iter;
3273 int ret = 0;
3274
3275 if (tracing_disabled)
3276 return -ENODEV;
3277
3278 mutex_lock(&trace_types_lock);
3279
3280 /* create a buffer to store the information to pass to userspace */
3281 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3282 if (!iter) {
3283 ret = -ENOMEM;
3284 goto out;
3285 }
3286
3287 /*
3288 * We make a copy of the current tracer to avoid concurrent
3289 * changes on it while we are reading.
3290 */
3291 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3292 if (!iter->trace) {
3293 ret = -ENOMEM;
3294 goto fail;
3295 }
3296 if (current_trace)
3297 *iter->trace = *current_trace;
3298
3299 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3300 ret = -ENOMEM;
3301 goto fail;
3302 }
3303
3304 /* trace pipe does not show start of buffer */
3305 cpumask_setall(iter->started);
3306
3307 if (trace_flags & TRACE_ITER_LATENCY_FMT)
3308 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3309
3310 iter->cpu_file = cpu_file;
3311 iter->tr = &global_trace;
3312 mutex_init(&iter->mutex);
3313 filp->private_data = iter;
3314
3315 if (iter->trace->pipe_open)
3316 iter->trace->pipe_open(iter);
3317
3318 nonseekable_open(inode, filp);
3319out:
3320 mutex_unlock(&trace_types_lock);
3321 return ret;
3322
3323fail:
3324 kfree(iter->trace);
3325 kfree(iter);
3326 mutex_unlock(&trace_types_lock);
3327 return ret;
3328}
3329
3330static int tracing_release_pipe(struct inode *inode, struct file *file)
3331{
3332 struct trace_iterator *iter = file->private_data;
3333
3334 mutex_lock(&trace_types_lock);
3335
3336 if (iter->trace->pipe_close)
3337 iter->trace->pipe_close(iter);
3338
3339 mutex_unlock(&trace_types_lock);
3340
3341 free_cpumask_var(iter->started);
3342 mutex_destroy(&iter->mutex);
3343 kfree(iter->trace);
3344 kfree(iter);
3345
3346 return 0;
3347}
3348
3349static unsigned int
3350tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3351{
3352 struct trace_iterator *iter = filp->private_data;
3353
3354 if (trace_flags & TRACE_ITER_BLOCK) {
3355 /*
3356 * Always select as readable when in blocking mode
3357 */
3358 return POLLIN | POLLRDNORM;
3359 } else {
3360 if (!trace_empty(iter))
3361 return POLLIN | POLLRDNORM;
3362 poll_wait(filp, &trace_wait, poll_table);
3363 if (!trace_empty(iter))
3364 return POLLIN | POLLRDNORM;
3365
3366 return 0;
3367 }
3368}
3369
3370
3371void default_wait_pipe(struct trace_iterator *iter)
3372{
3373 DEFINE_WAIT(wait);
3374
3375 prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
3376
3377 if (trace_empty(iter))
3378 schedule();
3379
3380 finish_wait(&trace_wait, &wait);
3381}
3382
3383/*
3384 * This is a make-shift waitqueue.
3385 * A tracer might use this callback on some rare cases:
3386 *
3387 * 1) the current tracer might hold the runqueue lock when it wakes up
3388 * a reader, hence a deadlock (sched, function, and function graph tracers)
3389 * 2) the function tracers, trace all functions, we don't want
3390 * the overhead of calling wake_up and friends
3391 * (and tracing them too)
3392 *
3393 * Anyway, this is really very primitive wakeup.
3394 */
3395void poll_wait_pipe(struct trace_iterator *iter)
3396{
3397 set_current_state(TASK_INTERRUPTIBLE);
3398 /* sleep for 100 msecs, and try again. */
3399 schedule_timeout(HZ / 10);
3400}
3401
3402/* Must be called with trace_types_lock mutex held. */
3403static int tracing_wait_pipe(struct file *filp)
3404{
3405 struct trace_iterator *iter = filp->private_data;
3406
3407 while (trace_empty(iter)) {
3408
3409 if ((filp->f_flags & O_NONBLOCK)) {
3410 return -EAGAIN;
3411 }
3412
3413 mutex_unlock(&iter->mutex);
3414
3415 iter->trace->wait_pipe(iter);
3416
3417 mutex_lock(&iter->mutex);
3418
3419 if (signal_pending(current))
3420 return -EINTR;
3421
3422 /*
3423 * We block until we read something and tracing is disabled.
3424 * We still block if tracing is disabled, but we have never
3425 * read anything. This allows a user to cat this file, and
3426 * then enable tracing. But after we have read something,
3427 * we give an EOF when tracing is again disabled.
3428 *
3429 * iter->pos will be 0 if we haven't read anything.
3430 */
3431 if (!tracer_enabled && iter->pos)
3432 break;
3433 }
3434
3435 return 1;
3436}
3437
3438/*
3439 * Consumer reader.
3440 */
3441static ssize_t
3442tracing_read_pipe(struct file *filp, char __user *ubuf,
3443 size_t cnt, loff_t *ppos)
3444{
3445 struct trace_iterator *iter = filp->private_data;
3446 static struct tracer *old_tracer;
3447 ssize_t sret;
3448
3449 /* return any leftover data */
3450 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3451 if (sret != -EBUSY)
3452 return sret;
3453
3454 trace_seq_init(&iter->seq);
3455
3456 /* copy the tracer to avoid using a global lock all around */
3457 mutex_lock(&trace_types_lock);
3458 if (unlikely(old_tracer != current_trace && current_trace)) {
3459 old_tracer = current_trace;
3460 *iter->trace = *current_trace;
3461 }
3462 mutex_unlock(&trace_types_lock);
3463
3464 /*
3465 * Avoid more than one consumer on a single file descriptor
3466 * This is just a matter of traces coherency, the ring buffer itself
3467 * is protected.
3468 */
3469 mutex_lock(&iter->mutex);
3470 if (iter->trace->read) {
3471 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3472 if (sret)
3473 goto out;
3474 }
3475
3476waitagain:
3477 sret = tracing_wait_pipe(filp);
3478 if (sret <= 0)
3479 goto out;
3480
3481 /* stop when tracing is finished */
3482 if (trace_empty(iter)) {
3483 sret = 0;
3484 goto out;
3485 }
3486
3487 if (cnt >= PAGE_SIZE)
3488 cnt = PAGE_SIZE - 1;
3489
3490 /* reset all but tr, trace, and overruns */
3491 memset(&iter->seq, 0,
3492 sizeof(struct trace_iterator) -
3493 offsetof(struct trace_iterator, seq));
3494 iter->pos = -1;
3495
3496 trace_event_read_lock();
3497 trace_access_lock(iter->cpu_file);
3498 while (trace_find_next_entry_inc(iter) != NULL) {
3499 enum print_line_t ret;
3500 int len = iter->seq.len;
3501
3502 ret = print_trace_line(iter);
3503 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3504 /* don't print partial lines */
3505 iter->seq.len = len;
3506 break;
3507 }
3508 if (ret != TRACE_TYPE_NO_CONSUME)
3509 trace_consume(iter);
3510
3511 if (iter->seq.len >= cnt)
3512 break;
3513
3514 /*
3515 * Setting the full flag means we reached the trace_seq buffer
3516 * size and we should leave by partial output condition above.
3517 * One of the trace_seq_* functions is not used properly.
3518 */
3519 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3520 iter->ent->type);
3521 }
3522 trace_access_unlock(iter->cpu_file);
3523 trace_event_read_unlock();
3524
3525 /* Now copy what we have to the user */
3526 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3527 if (iter->seq.readpos >= iter->seq.len)
3528 trace_seq_init(&iter->seq);
3529
3530 /*
3531 * If there was nothing to send to user, in spite of consuming trace
3532 * entries, go back to wait for more entries.
3533 */
3534 if (sret == -EBUSY)
3535 goto waitagain;
3536
3537out:
3538 mutex_unlock(&iter->mutex);
3539
3540 return sret;
3541}
3542
3543static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3544 struct pipe_buffer *buf)
3545{
3546 __free_page(buf->page);
3547}
3548
3549static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3550 unsigned int idx)
3551{
3552 __free_page(spd->pages[idx]);
3553}
3554
3555static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3556 .can_merge = 0,
3557 .map = generic_pipe_buf_map,
3558 .unmap = generic_pipe_buf_unmap,
3559 .confirm = generic_pipe_buf_confirm,
3560 .release = tracing_pipe_buf_release,
3561 .steal = generic_pipe_buf_steal,
3562 .get = generic_pipe_buf_get,
3563};
3564
3565static size_t
3566tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3567{
3568 size_t count;
3569 int ret;
3570
3571 /* Seq buffer is page-sized, exactly what we need. */
3572 for (;;) {
3573 count = iter->seq.len;
3574 ret = print_trace_line(iter);
3575 count = iter->seq.len - count;
3576 if (rem < count) {
3577 rem = 0;
3578 iter->seq.len -= count;
3579 break;
3580 }
3581 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3582 iter->seq.len -= count;
3583 break;
3584 }
3585
3586 if (ret != TRACE_TYPE_NO_CONSUME)
3587 trace_consume(iter);
3588 rem -= count;
3589 if (!trace_find_next_entry_inc(iter)) {
3590 rem = 0;
3591 iter->ent = NULL;
3592 break;
3593 }
3594 }
3595
3596 return rem;
3597}
3598
3599static ssize_t tracing_splice_read_pipe(struct file *filp,
3600 loff_t *ppos,
3601 struct pipe_inode_info *pipe,
3602 size_t len,
3603 unsigned int flags)
3604{
3605 struct page *pages_def[PIPE_DEF_BUFFERS];
3606 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3607 struct trace_iterator *iter = filp->private_data;
3608 struct splice_pipe_desc spd = {
3609 .pages = pages_def,
3610 .partial = partial_def,
3611 .nr_pages = 0, /* This gets updated below. */
3612 .nr_pages_max = PIPE_DEF_BUFFERS,
3613 .flags = flags,
3614 .ops = &tracing_pipe_buf_ops,
3615 .spd_release = tracing_spd_release_pipe,
3616 };
3617 static struct tracer *old_tracer;
3618 ssize_t ret;
3619 size_t rem;
3620 unsigned int i;
3621
3622 if (splice_grow_spd(pipe, &spd))
3623 return -ENOMEM;
3624
3625 /* copy the tracer to avoid using a global lock all around */
3626 mutex_lock(&trace_types_lock);
3627 if (unlikely(old_tracer != current_trace && current_trace)) {
3628 old_tracer = current_trace;
3629 *iter->trace = *current_trace;
3630 }
3631 mutex_unlock(&trace_types_lock);
3632
3633 mutex_lock(&iter->mutex);
3634
3635 if (iter->trace->splice_read) {
3636 ret = iter->trace->splice_read(iter, filp,
3637 ppos, pipe, len, flags);
3638 if (ret)
3639 goto out_err;
3640 }
3641
3642 ret = tracing_wait_pipe(filp);
3643 if (ret <= 0)
3644 goto out_err;
3645
3646 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3647 ret = -EFAULT;
3648 goto out_err;
3649 }
3650
3651 trace_event_read_lock();
3652 trace_access_lock(iter->cpu_file);
3653
3654 /* Fill as many pages as possible. */
3655 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3656 spd.pages[i] = alloc_page(GFP_KERNEL);
3657 if (!spd.pages[i])
3658 break;
3659
3660 rem = tracing_fill_pipe_page(rem, iter);
3661
3662 /* Copy the data into the page, so we can start over. */
3663 ret = trace_seq_to_buffer(&iter->seq,
3664 page_address(spd.pages[i]),
3665 iter->seq.len);
3666 if (ret < 0) {
3667 __free_page(spd.pages[i]);
3668 break;
3669 }
3670 spd.partial[i].offset = 0;
3671 spd.partial[i].len = iter->seq.len;
3672
3673 trace_seq_init(&iter->seq);
3674 }
3675
3676 trace_access_unlock(iter->cpu_file);
3677 trace_event_read_unlock();
3678 mutex_unlock(&iter->mutex);
3679
3680 spd.nr_pages = i;
3681
3682 ret = splice_to_pipe(pipe, &spd);
3683out:
3684 splice_shrink_spd(&spd);
3685 return ret;
3686
3687out_err:
3688 mutex_unlock(&iter->mutex);
3689 goto out;
3690}
3691
3692struct ftrace_entries_info {
3693 struct trace_array *tr;
3694 int cpu;
3695};
3696
3697static int tracing_entries_open(struct inode *inode, struct file *filp)
3698{
3699 struct ftrace_entries_info *info;
3700
3701 if (tracing_disabled)
3702 return -ENODEV;
3703
3704 info = kzalloc(sizeof(*info), GFP_KERNEL);
3705 if (!info)
3706 return -ENOMEM;
3707
3708 info->tr = &global_trace;
3709 info->cpu = (unsigned long)inode->i_private;
3710
3711 filp->private_data = info;
3712
3713 return 0;
3714}
3715
3716static ssize_t
3717tracing_entries_read(struct file *filp, char __user *ubuf,
3718 size_t cnt, loff_t *ppos)
3719{
3720 struct ftrace_entries_info *info = filp->private_data;
3721 struct trace_array *tr = info->tr;
3722 char buf[64];
3723 int r = 0;
3724 ssize_t ret;
3725
3726 mutex_lock(&trace_types_lock);
3727
3728 if (info->cpu == RING_BUFFER_ALL_CPUS) {
3729 int cpu, buf_size_same;
3730 unsigned long size;
3731
3732 size = 0;
3733 buf_size_same = 1;
3734 /* check if all cpu sizes are same */
3735 for_each_tracing_cpu(cpu) {
3736 /* fill in the size from first enabled cpu */
3737 if (size == 0)
3738 size = tr->data[cpu]->entries;
3739 if (size != tr->data[cpu]->entries) {
3740 buf_size_same = 0;
3741 break;
3742 }
3743 }
3744
3745 if (buf_size_same) {
3746 if (!ring_buffer_expanded)
3747 r = sprintf(buf, "%lu (expanded: %lu)\n",
3748 size >> 10,
3749 trace_buf_size >> 10);
3750 else
3751 r = sprintf(buf, "%lu\n", size >> 10);
3752 } else
3753 r = sprintf(buf, "X\n");
3754 } else
3755 r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
3756
3757 mutex_unlock(&trace_types_lock);
3758
3759 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3760 return ret;
3761}
3762
3763static ssize_t
3764tracing_entries_write(struct file *filp, const char __user *ubuf,
3765 size_t cnt, loff_t *ppos)
3766{
3767 struct ftrace_entries_info *info = filp->private_data;
3768 unsigned long val;
3769 int ret;
3770
3771 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3772 if (ret)
3773 return ret;
3774
3775 /* must have at least 1 entry */
3776 if (!val)
3777 return -EINVAL;
3778
3779 /* value is in KB */
3780 val <<= 10;
3781
3782 ret = tracing_resize_ring_buffer(val, info->cpu);
3783 if (ret < 0)
3784 return ret;
3785
3786 *ppos += cnt;
3787
3788 return cnt;
3789}
3790
3791static int
3792tracing_entries_release(struct inode *inode, struct file *filp)
3793{
3794 struct ftrace_entries_info *info = filp->private_data;
3795
3796 kfree(info);
3797
3798 return 0;
3799}
3800
3801static ssize_t
3802tracing_total_entries_read(struct file *filp, char __user *ubuf,
3803 size_t cnt, loff_t *ppos)
3804{
3805 struct trace_array *tr = filp->private_data;
3806 char buf[64];
3807 int r, cpu;
3808 unsigned long size = 0, expanded_size = 0;
3809
3810 mutex_lock(&trace_types_lock);
3811 for_each_tracing_cpu(cpu) {
3812 size += tr->data[cpu]->entries >> 10;
3813 if (!ring_buffer_expanded)
3814 expanded_size += trace_buf_size >> 10;
3815 }
3816 if (ring_buffer_expanded)
3817 r = sprintf(buf, "%lu\n", size);
3818 else
3819 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
3820 mutex_unlock(&trace_types_lock);
3821
3822 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3823}
3824
3825static ssize_t
3826tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3827 size_t cnt, loff_t *ppos)
3828{
3829 /*
3830 * There is no need to read what the user has written, this function
3831 * is just to make sure that there is no error when "echo" is used
3832 */
3833
3834 *ppos += cnt;
3835
3836 return cnt;
3837}
3838
3839static int
3840tracing_free_buffer_release(struct inode *inode, struct file *filp)
3841{
3842 /* disable tracing ? */
3843 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3844 tracing_off();
3845 /* resize the ring buffer to 0 */
3846 tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
3847
3848 return 0;
3849}
3850
3851static ssize_t
3852tracing_mark_write(struct file *filp, const char __user *ubuf,
3853 size_t cnt, loff_t *fpos)
3854{
3855 unsigned long addr = (unsigned long)ubuf;
3856 struct ring_buffer_event *event;
3857 struct ring_buffer *buffer;
3858 struct print_entry *entry;
3859 unsigned long irq_flags;
3860 struct page *pages[2];
3861 void *map_page[2];
3862 int nr_pages = 1;
3863 ssize_t written;
3864 int offset;
3865 int size;
3866 int len;
3867 int ret;
3868 int i;
3869
3870 if (tracing_disabled)
3871 return -EINVAL;
3872
3873 if (cnt > TRACE_BUF_SIZE)
3874 cnt = TRACE_BUF_SIZE;
3875
3876 /*
3877 * Userspace is injecting traces into the kernel trace buffer.
3878 * We want to be as non intrusive as possible.
3879 * To do so, we do not want to allocate any special buffers
3880 * or take any locks, but instead write the userspace data
3881 * straight into the ring buffer.
3882 *
3883 * First we need to pin the userspace buffer into memory,
3884 * which, most likely it is, because it just referenced it.
3885 * But there's no guarantee that it is. By using get_user_pages_fast()
3886 * and kmap_atomic/kunmap_atomic() we can get access to the
3887 * pages directly. We then write the data directly into the
3888 * ring buffer.
3889 */
3890 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
3891
3892 /* check if we cross pages */
3893 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
3894 nr_pages = 2;
3895
3896 offset = addr & (PAGE_SIZE - 1);
3897 addr &= PAGE_MASK;
3898
3899 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
3900 if (ret < nr_pages) {
3901 while (--ret >= 0)
3902 put_page(pages[ret]);
3903 written = -EFAULT;
3904 goto out;
3905 }
3906
3907 for (i = 0; i < nr_pages; i++)
3908 map_page[i] = kmap_atomic(pages[i]);
3909
3910 local_save_flags(irq_flags);
3911 size = sizeof(*entry) + cnt + 2; /* possible \n added */
3912 buffer = global_trace.buffer;
3913 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3914 irq_flags, preempt_count());
3915 if (!event) {
3916 /* Ring buffer disabled, return as if not open for write */
3917 written = -EBADF;
3918 goto out_unlock;
3919 }
3920
3921 entry = ring_buffer_event_data(event);
3922 entry->ip = _THIS_IP_;
3923
3924 if (nr_pages == 2) {
3925 len = PAGE_SIZE - offset;
3926 memcpy(&entry->buf, map_page[0] + offset, len);
3927 memcpy(&entry->buf[len], map_page[1], cnt - len);
3928 } else
3929 memcpy(&entry->buf, map_page[0] + offset, cnt);
3930
3931 if (entry->buf[cnt - 1] != '\n') {
3932 entry->buf[cnt] = '\n';
3933 entry->buf[cnt + 1] = '\0';
3934 } else
3935 entry->buf[cnt] = '\0';
3936
3937 ring_buffer_unlock_commit(buffer, event);
3938
3939 written = cnt;
3940
3941 *fpos += written;
3942
3943 out_unlock:
3944 for (i = 0; i < nr_pages; i++){
3945 kunmap_atomic(map_page[i]);
3946 put_page(pages[i]);
3947 }
3948 out:
3949 return written;
3950}
3951
3952static int tracing_clock_show(struct seq_file *m, void *v)
3953{
3954 int i;
3955
3956 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3957 seq_printf(m,
3958 "%s%s%s%s", i ? " " : "",
3959 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3960 i == trace_clock_id ? "]" : "");
3961 seq_putc(m, '\n');
3962
3963 return 0;
3964}
3965
3966static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3967 size_t cnt, loff_t *fpos)
3968{
3969 char buf[64];
3970 const char *clockstr;
3971 int i;
3972
3973 if (cnt >= sizeof(buf))
3974 return -EINVAL;
3975
3976 if (copy_from_user(&buf, ubuf, cnt))
3977 return -EFAULT;
3978
3979 buf[cnt] = 0;
3980
3981 clockstr = strstrip(buf);
3982
3983 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3984 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3985 break;
3986 }
3987 if (i == ARRAY_SIZE(trace_clocks))
3988 return -EINVAL;
3989
3990 trace_clock_id = i;
3991
3992 mutex_lock(&trace_types_lock);
3993
3994 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3995 if (max_tr.buffer)
3996 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3997
3998 mutex_unlock(&trace_types_lock);
3999
4000 *fpos += cnt;
4001
4002 return cnt;
4003}
4004
4005static int tracing_clock_open(struct inode *inode, struct file *file)
4006{
4007 if (tracing_disabled)
4008 return -ENODEV;
4009 return single_open(file, tracing_clock_show, NULL);
4010}
4011
4012static const struct file_operations tracing_max_lat_fops = {
4013 .open = tracing_open_generic,
4014 .read = tracing_max_lat_read,
4015 .write = tracing_max_lat_write,
4016 .llseek = generic_file_llseek,
4017};
4018
4019static const struct file_operations tracing_ctrl_fops = {
4020 .open = tracing_open_generic,
4021 .read = tracing_ctrl_read,
4022 .write = tracing_ctrl_write,
4023 .llseek = generic_file_llseek,
4024};
4025
4026static const struct file_operations set_tracer_fops = {
4027 .open = tracing_open_generic,
4028 .read = tracing_set_trace_read,
4029 .write = tracing_set_trace_write,
4030 .llseek = generic_file_llseek,
4031};
4032
4033static const struct file_operations tracing_pipe_fops = {
4034 .open = tracing_open_pipe,
4035 .poll = tracing_poll_pipe,
4036 .read = tracing_read_pipe,
4037 .splice_read = tracing_splice_read_pipe,
4038 .release = tracing_release_pipe,
4039 .llseek = no_llseek,
4040};
4041
4042static const struct file_operations tracing_entries_fops = {
4043 .open = tracing_entries_open,
4044 .read = tracing_entries_read,
4045 .write = tracing_entries_write,
4046 .release = tracing_entries_release,
4047 .llseek = generic_file_llseek,
4048};
4049
4050static const struct file_operations tracing_total_entries_fops = {
4051 .open = tracing_open_generic,
4052 .read = tracing_total_entries_read,
4053 .llseek = generic_file_llseek,
4054};
4055
4056static const struct file_operations tracing_free_buffer_fops = {
4057 .write = tracing_free_buffer_write,
4058 .release = tracing_free_buffer_release,
4059};
4060
4061static const struct file_operations tracing_mark_fops = {
4062 .open = tracing_open_generic,
4063 .write = tracing_mark_write,
4064 .llseek = generic_file_llseek,
4065};
4066
4067static const struct file_operations trace_clock_fops = {
4068 .open = tracing_clock_open,
4069 .read = seq_read,
4070 .llseek = seq_lseek,
4071 .release = single_release,
4072 .write = tracing_clock_write,
4073};
4074
4075struct ftrace_buffer_info {
4076 struct trace_array *tr;
4077 void *spare;
4078 int cpu;
4079 unsigned int read;
4080};
4081
4082static int tracing_buffers_open(struct inode *inode, struct file *filp)
4083{
4084 int cpu = (int)(long)inode->i_private;
4085 struct ftrace_buffer_info *info;
4086
4087 if (tracing_disabled)
4088 return -ENODEV;
4089
4090 info = kzalloc(sizeof(*info), GFP_KERNEL);
4091 if (!info)
4092 return -ENOMEM;
4093
4094 info->tr = &global_trace;
4095 info->cpu = cpu;
4096 info->spare = NULL;
4097 /* Force reading ring buffer for first read */
4098 info->read = (unsigned int)-1;
4099
4100 filp->private_data = info;
4101
4102 return nonseekable_open(inode, filp);
4103}
4104
4105static ssize_t
4106tracing_buffers_read(struct file *filp, char __user *ubuf,
4107 size_t count, loff_t *ppos)
4108{
4109 struct ftrace_buffer_info *info = filp->private_data;
4110 ssize_t ret;
4111 size_t size;
4112
4113 if (!count)
4114 return 0;
4115
4116 if (!info->spare)
4117 info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
4118 if (!info->spare)
4119 return -ENOMEM;
4120
4121 /* Do we have previous read data to read? */
4122 if (info->read < PAGE_SIZE)
4123 goto read;
4124
4125 trace_access_lock(info->cpu);
4126 ret = ring_buffer_read_page(info->tr->buffer,
4127 &info->spare,
4128 count,
4129 info->cpu, 0);
4130 trace_access_unlock(info->cpu);
4131 if (ret < 0)
4132 return 0;
4133
4134 info->read = 0;
4135
4136read:
4137 size = PAGE_SIZE - info->read;
4138 if (size > count)
4139 size = count;
4140
4141 ret = copy_to_user(ubuf, info->spare + info->read, size);
4142 if (ret == size)
4143 return -EFAULT;
4144 size -= ret;
4145
4146 *ppos += size;
4147 info->read += size;
4148
4149 return size;
4150}
4151
4152static int tracing_buffers_release(struct inode *inode, struct file *file)
4153{
4154 struct ftrace_buffer_info *info = file->private_data;
4155
4156 if (info->spare)
4157 ring_buffer_free_read_page(info->tr->buffer, info->spare);
4158 kfree(info);
4159
4160 return 0;
4161}
4162
4163struct buffer_ref {
4164 struct ring_buffer *buffer;
4165 void *page;
4166 int ref;
4167};
4168
4169static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4170 struct pipe_buffer *buf)
4171{
4172 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4173
4174 if (--ref->ref)
4175 return;
4176
4177 ring_buffer_free_read_page(ref->buffer, ref->page);
4178 kfree(ref);
4179 buf->private = 0;
4180}
4181
4182static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
4183 struct pipe_buffer *buf)
4184{
4185 return 1;
4186}
4187
4188static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4189 struct pipe_buffer *buf)
4190{
4191 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4192
4193 ref->ref++;
4194}
4195
4196/* Pipe buffer operations for a buffer. */
4197static const struct pipe_buf_operations buffer_pipe_buf_ops = {
4198 .can_merge = 0,
4199 .map = generic_pipe_buf_map,
4200 .unmap = generic_pipe_buf_unmap,
4201 .confirm = generic_pipe_buf_confirm,
4202 .release = buffer_pipe_buf_release,
4203 .steal = buffer_pipe_buf_steal,
4204 .get = buffer_pipe_buf_get,
4205};
4206
4207/*
4208 * Callback from splice_to_pipe(), if we need to release some pages
4209 * at the end of the spd in case we error'ed out in filling the pipe.
4210 */
4211static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4212{
4213 struct buffer_ref *ref =
4214 (struct buffer_ref *)spd->partial[i].private;
4215
4216 if (--ref->ref)
4217 return;
4218
4219 ring_buffer_free_read_page(ref->buffer, ref->page);
4220 kfree(ref);
4221 spd->partial[i].private = 0;
4222}
4223
4224static ssize_t
4225tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4226 struct pipe_inode_info *pipe, size_t len,
4227 unsigned int flags)
4228{
4229 struct ftrace_buffer_info *info = file->private_data;
4230 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4231 struct page *pages_def[PIPE_DEF_BUFFERS];
4232 struct splice_pipe_desc spd = {
4233 .pages = pages_def,
4234 .partial = partial_def,
4235 .nr_pages_max = PIPE_DEF_BUFFERS,
4236 .flags = flags,
4237 .ops = &buffer_pipe_buf_ops,
4238 .spd_release = buffer_spd_release,
4239 };
4240 struct buffer_ref *ref;
4241 int entries, size, i;
4242 size_t ret;
4243
4244 if (splice_grow_spd(pipe, &spd))
4245 return -ENOMEM;
4246
4247 if (*ppos & (PAGE_SIZE - 1)) {
4248 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
4249 ret = -EINVAL;
4250 goto out;
4251 }
4252
4253 if (len & (PAGE_SIZE - 1)) {
4254 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
4255 if (len < PAGE_SIZE) {
4256 ret = -EINVAL;
4257 goto out;
4258 }
4259 len &= PAGE_MASK;
4260 }
4261
4262 trace_access_lock(info->cpu);
4263 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4264
4265 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4266 struct page *page;
4267 int r;
4268
4269 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4270 if (!ref)
4271 break;
4272
4273 ref->ref = 1;
4274 ref->buffer = info->tr->buffer;
4275 ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
4276 if (!ref->page) {
4277 kfree(ref);
4278 break;
4279 }
4280
4281 r = ring_buffer_read_page(ref->buffer, &ref->page,
4282 len, info->cpu, 1);
4283 if (r < 0) {
4284 ring_buffer_free_read_page(ref->buffer, ref->page);
4285 kfree(ref);
4286 break;
4287 }
4288
4289 /*
4290 * zero out any left over data, this is going to
4291 * user land.
4292 */
4293 size = ring_buffer_page_len(ref->page);
4294 if (size < PAGE_SIZE)
4295 memset(ref->page + size, 0, PAGE_SIZE - size);
4296
4297 page = virt_to_page(ref->page);
4298
4299 spd.pages[i] = page;
4300 spd.partial[i].len = PAGE_SIZE;
4301 spd.partial[i].offset = 0;
4302 spd.partial[i].private = (unsigned long)ref;
4303 spd.nr_pages++;
4304 *ppos += PAGE_SIZE;
4305
4306 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4307 }
4308
4309 trace_access_unlock(info->cpu);
4310 spd.nr_pages = i;
4311
4312 /* did we read anything? */
4313 if (!spd.nr_pages) {
4314 if (flags & SPLICE_F_NONBLOCK)
4315 ret = -EAGAIN;
4316 else
4317 ret = 0;
4318 /* TODO: block */
4319 goto out;
4320 }
4321
4322 ret = splice_to_pipe(pipe, &spd);
4323 splice_shrink_spd(&spd);
4324out:
4325 return ret;
4326}
4327
4328static const struct file_operations tracing_buffers_fops = {
4329 .open = tracing_buffers_open,
4330 .read = tracing_buffers_read,
4331 .release = tracing_buffers_release,
4332 .splice_read = tracing_buffers_splice_read,
4333 .llseek = no_llseek,
4334};
4335
4336static ssize_t
4337tracing_stats_read(struct file *filp, char __user *ubuf,
4338 size_t count, loff_t *ppos)
4339{
4340 unsigned long cpu = (unsigned long)filp->private_data;
4341 struct trace_array *tr = &global_trace;
4342 struct trace_seq *s;
4343 unsigned long cnt;
4344 unsigned long long t;
4345 unsigned long usec_rem;
4346
4347 s = kmalloc(sizeof(*s), GFP_KERNEL);
4348 if (!s)
4349 return -ENOMEM;
4350
4351 trace_seq_init(s);
4352
4353 cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
4354 trace_seq_printf(s, "entries: %ld\n", cnt);
4355
4356 cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
4357 trace_seq_printf(s, "overrun: %ld\n", cnt);
4358
4359 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
4360 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4361
4362 cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4363 trace_seq_printf(s, "bytes: %ld\n", cnt);
4364
4365 t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4366 usec_rem = do_div(t, USEC_PER_SEC);
4367 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
4368
4369 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4370 usec_rem = do_div(t, USEC_PER_SEC);
4371 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4372
4373 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4374
4375 kfree(s);
4376
4377 return count;
4378}
4379
4380static const struct file_operations tracing_stats_fops = {
4381 .open = tracing_open_generic,
4382 .read = tracing_stats_read,
4383 .llseek = generic_file_llseek,
4384};
4385
4386#ifdef CONFIG_DYNAMIC_FTRACE
4387
4388int __weak ftrace_arch_read_dyn_info(char *buf, int size)
4389{
4390 return 0;
4391}
4392
4393static ssize_t
4394tracing_read_dyn_info(struct file *filp, char __user *ubuf,
4395 size_t cnt, loff_t *ppos)
4396{
4397 static char ftrace_dyn_info_buffer[1024];
4398 static DEFINE_MUTEX(dyn_info_mutex);
4399 unsigned long *p = filp->private_data;
4400 char *buf = ftrace_dyn_info_buffer;
4401 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
4402 int r;
4403
4404 mutex_lock(&dyn_info_mutex);
4405 r = sprintf(buf, "%ld ", *p);
4406
4407 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
4408 buf[r++] = '\n';
4409
4410 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4411
4412 mutex_unlock(&dyn_info_mutex);
4413
4414 return r;
4415}
4416
4417static const struct file_operations tracing_dyn_info_fops = {
4418 .open = tracing_open_generic,
4419 .read = tracing_read_dyn_info,
4420 .llseek = generic_file_llseek,
4421};
4422#endif
4423
4424static struct dentry *d_tracer;
4425
4426struct dentry *tracing_init_dentry(void)
4427{
4428 static int once;
4429
4430 if (d_tracer)
4431 return d_tracer;
4432
4433 if (!debugfs_initialized())
4434 return NULL;
4435
4436 d_tracer = debugfs_create_dir("tracing", NULL);
4437
4438 if (!d_tracer && !once) {
4439 once = 1;
4440 pr_warning("Could not create debugfs directory 'tracing'\n");
4441 return NULL;
4442 }
4443
4444 return d_tracer;
4445}
4446
4447static struct dentry *d_percpu;
4448
4449struct dentry *tracing_dentry_percpu(void)
4450{
4451 static int once;
4452 struct dentry *d_tracer;
4453
4454 if (d_percpu)
4455 return d_percpu;
4456
4457 d_tracer = tracing_init_dentry();
4458
4459 if (!d_tracer)
4460 return NULL;
4461
4462 d_percpu = debugfs_create_dir("per_cpu", d_tracer);
4463
4464 if (!d_percpu && !once) {
4465 once = 1;
4466 pr_warning("Could not create debugfs directory 'per_cpu'\n");
4467 return NULL;
4468 }
4469
4470 return d_percpu;
4471}
4472
4473static void tracing_init_debugfs_percpu(long cpu)
4474{
4475 struct dentry *d_percpu = tracing_dentry_percpu();
4476 struct dentry *d_cpu;
4477 char cpu_dir[30]; /* 30 characters should be more than enough */
4478
4479 if (!d_percpu)
4480 return;
4481
4482 snprintf(cpu_dir, 30, "cpu%ld", cpu);
4483 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4484 if (!d_cpu) {
4485 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
4486 return;
4487 }
4488
4489 /* per cpu trace_pipe */
4490 trace_create_file("trace_pipe", 0444, d_cpu,
4491 (void *) cpu, &tracing_pipe_fops);
4492
4493 /* per cpu trace */
4494 trace_create_file("trace", 0644, d_cpu,
4495 (void *) cpu, &tracing_fops);
4496
4497 trace_create_file("trace_pipe_raw", 0444, d_cpu,
4498 (void *) cpu, &tracing_buffers_fops);
4499
4500 trace_create_file("stats", 0444, d_cpu,
4501 (void *) cpu, &tracing_stats_fops);
4502
4503 trace_create_file("buffer_size_kb", 0444, d_cpu,
4504 (void *) cpu, &tracing_entries_fops);
4505}
4506
4507#ifdef CONFIG_FTRACE_SELFTEST
4508/* Let selftest have access to static functions in this file */
4509#include "trace_selftest.c"
4510#endif
4511
4512struct trace_option_dentry {
4513 struct tracer_opt *opt;
4514 struct tracer_flags *flags;
4515 struct dentry *entry;
4516};
4517
4518static ssize_t
4519trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
4520 loff_t *ppos)
4521{
4522 struct trace_option_dentry *topt = filp->private_data;
4523 char *buf;
4524
4525 if (topt->flags->val & topt->opt->bit)
4526 buf = "1\n";
4527 else
4528 buf = "0\n";
4529
4530 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4531}
4532
4533static ssize_t
4534trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
4535 loff_t *ppos)
4536{
4537 struct trace_option_dentry *topt = filp->private_data;
4538 unsigned long val;
4539 int ret;
4540
4541 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4542 if (ret)
4543 return ret;
4544
4545 if (val != 0 && val != 1)
4546 return -EINVAL;
4547
4548 if (!!(topt->flags->val & topt->opt->bit) != val) {
4549 mutex_lock(&trace_types_lock);
4550 ret = __set_tracer_option(current_trace, topt->flags,
4551 topt->opt, !val);
4552 mutex_unlock(&trace_types_lock);
4553 if (ret)
4554 return ret;
4555 }
4556
4557 *ppos += cnt;
4558
4559 return cnt;
4560}
4561
4562
4563static const struct file_operations trace_options_fops = {
4564 .open = tracing_open_generic,
4565 .read = trace_options_read,
4566 .write = trace_options_write,
4567 .llseek = generic_file_llseek,
4568};
4569
4570static ssize_t
4571trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
4572 loff_t *ppos)
4573{
4574 long index = (long)filp->private_data;
4575 char *buf;
4576
4577 if (trace_flags & (1 << index))
4578 buf = "1\n";
4579 else
4580 buf = "0\n";
4581
4582 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4583}
4584
4585static ssize_t
4586trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4587 loff_t *ppos)
4588{
4589 long index = (long)filp->private_data;
4590 unsigned long val;
4591 int ret;
4592
4593 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4594 if (ret)
4595 return ret;
4596
4597 if (val != 0 && val != 1)
4598 return -EINVAL;
4599 set_tracer_flags(1 << index, val);
4600
4601 *ppos += cnt;
4602
4603 return cnt;
4604}
4605
4606static const struct file_operations trace_options_core_fops = {
4607 .open = tracing_open_generic,
4608 .read = trace_options_core_read,
4609 .write = trace_options_core_write,
4610 .llseek = generic_file_llseek,
4611};
4612
4613struct dentry *trace_create_file(const char *name,
4614 umode_t mode,
4615 struct dentry *parent,
4616 void *data,
4617 const struct file_operations *fops)
4618{
4619 struct dentry *ret;
4620
4621 ret = debugfs_create_file(name, mode, parent, data, fops);
4622 if (!ret)
4623 pr_warning("Could not create debugfs '%s' entry\n", name);
4624
4625 return ret;
4626}
4627
4628
4629static struct dentry *trace_options_init_dentry(void)
4630{
4631 struct dentry *d_tracer;
4632 static struct dentry *t_options;
4633
4634 if (t_options)
4635 return t_options;
4636
4637 d_tracer = tracing_init_dentry();
4638 if (!d_tracer)
4639 return NULL;
4640
4641 t_options = debugfs_create_dir("options", d_tracer);
4642 if (!t_options) {
4643 pr_warning("Could not create debugfs directory 'options'\n");
4644 return NULL;
4645 }
4646
4647 return t_options;
4648}
4649
4650static void
4651create_trace_option_file(struct trace_option_dentry *topt,
4652 struct tracer_flags *flags,
4653 struct tracer_opt *opt)
4654{
4655 struct dentry *t_options;
4656
4657 t_options = trace_options_init_dentry();
4658 if (!t_options)
4659 return;
4660
4661 topt->flags = flags;
4662 topt->opt = opt;
4663
4664 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
4665 &trace_options_fops);
4666
4667}
4668
4669static struct trace_option_dentry *
4670create_trace_option_files(struct tracer *tracer)
4671{
4672 struct trace_option_dentry *topts;
4673 struct tracer_flags *flags;
4674 struct tracer_opt *opts;
4675 int cnt;
4676
4677 if (!tracer)
4678 return NULL;
4679
4680 flags = tracer->flags;
4681
4682 if (!flags || !flags->opts)
4683 return NULL;
4684
4685 opts = flags->opts;
4686
4687 for (cnt = 0; opts[cnt].name; cnt++)
4688 ;
4689
4690 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4691 if (!topts)
4692 return NULL;
4693
4694 for (cnt = 0; opts[cnt].name; cnt++)
4695 create_trace_option_file(&topts[cnt], flags,
4696 &opts[cnt]);
4697
4698 return topts;
4699}
4700
4701static void
4702destroy_trace_option_files(struct trace_option_dentry *topts)
4703{
4704 int cnt;
4705
4706 if (!topts)
4707 return;
4708
4709 for (cnt = 0; topts[cnt].opt; cnt++) {
4710 if (topts[cnt].entry)
4711 debugfs_remove(topts[cnt].entry);
4712 }
4713
4714 kfree(topts);
4715}
4716
4717static struct dentry *
4718create_trace_option_core_file(const char *option, long index)
4719{
4720 struct dentry *t_options;
4721
4722 t_options = trace_options_init_dentry();
4723 if (!t_options)
4724 return NULL;
4725
4726 return trace_create_file(option, 0644, t_options, (void *)index,
4727 &trace_options_core_fops);
4728}
4729
4730static __init void create_trace_options_dir(void)
4731{
4732 struct dentry *t_options;
4733 int i;
4734
4735 t_options = trace_options_init_dentry();
4736 if (!t_options)
4737 return;
4738
4739 for (i = 0; trace_options[i]; i++)
4740 create_trace_option_core_file(trace_options[i], i);
4741}
4742
4743static ssize_t
4744rb_simple_read(struct file *filp, char __user *ubuf,
4745 size_t cnt, loff_t *ppos)
4746{
4747 struct trace_array *tr = filp->private_data;
4748 struct ring_buffer *buffer = tr->buffer;
4749 char buf[64];
4750 int r;
4751
4752 if (buffer)
4753 r = ring_buffer_record_is_on(buffer);
4754 else
4755 r = 0;
4756
4757 r = sprintf(buf, "%d\n", r);
4758
4759 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4760}
4761
4762static ssize_t
4763rb_simple_write(struct file *filp, const char __user *ubuf,
4764 size_t cnt, loff_t *ppos)
4765{
4766 struct trace_array *tr = filp->private_data;
4767 struct ring_buffer *buffer = tr->buffer;
4768 unsigned long val;
4769 int ret;
4770
4771 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4772 if (ret)
4773 return ret;
4774
4775 if (buffer) {
4776 if (val)
4777 ring_buffer_record_on(buffer);
4778 else
4779 ring_buffer_record_off(buffer);
4780 }
4781
4782 (*ppos)++;
4783
4784 return cnt;
4785}
4786
4787static const struct file_operations rb_simple_fops = {
4788 .open = tracing_open_generic,
4789 .read = rb_simple_read,
4790 .write = rb_simple_write,
4791 .llseek = default_llseek,
4792};
4793
4794static __init int tracer_init_debugfs(void)
4795{
4796 struct dentry *d_tracer;
4797 int cpu;
4798
4799 trace_access_lock_init();
4800
4801 d_tracer = tracing_init_dentry();
4802
4803 trace_create_file("tracing_enabled", 0644, d_tracer,
4804 &global_trace, &tracing_ctrl_fops);
4805
4806 trace_create_file("trace_options", 0644, d_tracer,
4807 NULL, &tracing_iter_fops);
4808
4809 trace_create_file("tracing_cpumask", 0644, d_tracer,
4810 NULL, &tracing_cpumask_fops);
4811
4812 trace_create_file("trace", 0644, d_tracer,
4813 (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4814
4815 trace_create_file("available_tracers", 0444, d_tracer,
4816 &global_trace, &show_traces_fops);
4817
4818 trace_create_file("current_tracer", 0644, d_tracer,
4819 &global_trace, &set_tracer_fops);
4820
4821#ifdef CONFIG_TRACER_MAX_TRACE
4822 trace_create_file("tracing_max_latency", 0644, d_tracer,
4823 &tracing_max_latency, &tracing_max_lat_fops);
4824#endif
4825
4826 trace_create_file("tracing_thresh", 0644, d_tracer,
4827 &tracing_thresh, &tracing_max_lat_fops);
4828
4829 trace_create_file("README", 0444, d_tracer,
4830 NULL, &tracing_readme_fops);
4831
4832 trace_create_file("trace_pipe", 0444, d_tracer,
4833 (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4834
4835 trace_create_file("buffer_size_kb", 0644, d_tracer,
4836 (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
4837
4838 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4839 &global_trace, &tracing_total_entries_fops);
4840
4841 trace_create_file("free_buffer", 0644, d_tracer,
4842 &global_trace, &tracing_free_buffer_fops);
4843
4844 trace_create_file("trace_marker", 0220, d_tracer,
4845 NULL, &tracing_mark_fops);
4846
4847 trace_create_file("saved_cmdlines", 0444, d_tracer,
4848 NULL, &tracing_saved_cmdlines_fops);
4849
4850 trace_create_file("trace_clock", 0644, d_tracer, NULL,
4851 &trace_clock_fops);
4852
4853 trace_create_file("tracing_on", 0644, d_tracer,
4854 &global_trace, &rb_simple_fops);
4855
4856#ifdef CONFIG_DYNAMIC_FTRACE
4857 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4858 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4859#endif
4860
4861 create_trace_options_dir();
4862
4863 for_each_tracing_cpu(cpu)
4864 tracing_init_debugfs_percpu(cpu);
4865
4866 return 0;
4867}
4868
4869static int trace_panic_handler(struct notifier_block *this,
4870 unsigned long event, void *unused)
4871{
4872 if (ftrace_dump_on_oops)
4873 ftrace_dump(ftrace_dump_on_oops);
4874 return NOTIFY_OK;
4875}
4876
4877static struct notifier_block trace_panic_notifier = {
4878 .notifier_call = trace_panic_handler,
4879 .next = NULL,
4880 .priority = 150 /* priority: INT_MAX >= x >= 0 */
4881};
4882
4883static int trace_die_handler(struct notifier_block *self,
4884 unsigned long val,
4885 void *data)
4886{
4887 switch (val) {
4888 case DIE_OOPS:
4889 if (ftrace_dump_on_oops)
4890 ftrace_dump(ftrace_dump_on_oops);
4891 break;
4892 default:
4893 break;
4894 }
4895 return NOTIFY_OK;
4896}
4897
4898static struct notifier_block trace_die_notifier = {
4899 .notifier_call = trace_die_handler,
4900 .priority = 200
4901};
4902
4903/*
4904 * printk is set to max of 1024, we really don't need it that big.
4905 * Nothing should be printing 1000 characters anyway.
4906 */
4907#define TRACE_MAX_PRINT 1000
4908
4909/*
4910 * Define here KERN_TRACE so that we have one place to modify
4911 * it if we decide to change what log level the ftrace dump
4912 * should be at.
4913 */
4914#define KERN_TRACE KERN_EMERG
4915
4916void
4917trace_printk_seq(struct trace_seq *s)
4918{
4919 /* Probably should print a warning here. */
4920 if (s->len >= 1000)
4921 s->len = 1000;
4922
4923 /* should be zero ended, but we are paranoid. */
4924 s->buffer[s->len] = 0;
4925
4926 printk(KERN_TRACE "%s", s->buffer);
4927
4928 trace_seq_init(s);
4929}
4930
4931void trace_init_global_iter(struct trace_iterator *iter)
4932{
4933 iter->tr = &global_trace;
4934 iter->trace = current_trace;
4935 iter->cpu_file = TRACE_PIPE_ALL_CPU;
4936}
4937
4938static void
4939__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4940{
4941 static arch_spinlock_t ftrace_dump_lock =
4942 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4943 /* use static because iter can be a bit big for the stack */
4944 static struct trace_iterator iter;
4945 unsigned int old_userobj;
4946 static int dump_ran;
4947 unsigned long flags;
4948 int cnt = 0, cpu;
4949
4950 /* only one dump */
4951 local_irq_save(flags);
4952 arch_spin_lock(&ftrace_dump_lock);
4953 if (dump_ran)
4954 goto out;
4955
4956 dump_ran = 1;
4957
4958 tracing_off();
4959
4960 /* Did function tracer already get disabled? */
4961 if (ftrace_is_dead()) {
4962 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
4963 printk("# MAY BE MISSING FUNCTION EVENTS\n");
4964 }
4965
4966 if (disable_tracing)
4967 ftrace_kill();
4968
4969 trace_init_global_iter(&iter);
4970
4971 for_each_tracing_cpu(cpu) {
4972 atomic_inc(&iter.tr->data[cpu]->disabled);
4973 }
4974
4975 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4976
4977 /* don't look at user memory in panic mode */
4978 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4979
4980 /* Simulate the iterator */
4981 iter.tr = &global_trace;
4982 iter.trace = current_trace;
4983
4984 switch (oops_dump_mode) {
4985 case DUMP_ALL:
4986 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4987 break;
4988 case DUMP_ORIG:
4989 iter.cpu_file = raw_smp_processor_id();
4990 break;
4991 case DUMP_NONE:
4992 goto out_enable;
4993 default:
4994 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
4995 iter.cpu_file = TRACE_PIPE_ALL_CPU;
4996 }
4997
4998 printk(KERN_TRACE "Dumping ftrace buffer:\n");
4999
5000 /*
5001 * We need to stop all tracing on all CPUS to read the
5002 * the next buffer. This is a bit expensive, but is
5003 * not done often. We fill all what we can read,
5004 * and then release the locks again.
5005 */
5006
5007 while (!trace_empty(&iter)) {
5008
5009 if (!cnt)
5010 printk(KERN_TRACE "---------------------------------\n");
5011
5012 cnt++;
5013
5014 /* reset all but tr, trace, and overruns */
5015 memset(&iter.seq, 0,
5016 sizeof(struct trace_iterator) -
5017 offsetof(struct trace_iterator, seq));
5018 iter.iter_flags |= TRACE_FILE_LAT_FMT;
5019 iter.pos = -1;
5020
5021 if (trace_find_next_entry_inc(&iter) != NULL) {
5022 int ret;
5023
5024 ret = print_trace_line(&iter);
5025 if (ret != TRACE_TYPE_NO_CONSUME)
5026 trace_consume(&iter);
5027 }
5028 touch_nmi_watchdog();
5029
5030 trace_printk_seq(&iter.seq);
5031 }
5032
5033 if (!cnt)
5034 printk(KERN_TRACE " (ftrace buffer empty)\n");
5035 else
5036 printk(KERN_TRACE "---------------------------------\n");
5037
5038 out_enable:
5039 /* Re-enable tracing if requested */
5040 if (!disable_tracing) {
5041 trace_flags |= old_userobj;
5042
5043 for_each_tracing_cpu(cpu) {
5044 atomic_dec(&iter.tr->data[cpu]->disabled);
5045 }
5046 tracing_on();
5047 }
5048
5049 out:
5050 arch_spin_unlock(&ftrace_dump_lock);
5051 local_irq_restore(flags);
5052}
5053
5054/* By default: disable tracing after the dump */
5055void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
5056{
5057 __ftrace_dump(true, oops_dump_mode);
5058}
5059EXPORT_SYMBOL_GPL(ftrace_dump);
5060
5061__init static int tracer_alloc_buffers(void)
5062{
5063 int ring_buf_size;
5064 enum ring_buffer_flags rb_flags;
5065 int i;
5066 int ret = -ENOMEM;
5067
5068
5069 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
5070 goto out;
5071
5072 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
5073 goto out_free_buffer_mask;
5074
5075 /* Only allocate trace_printk buffers if a trace_printk exists */
5076 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
5077 trace_printk_init_buffers();
5078
5079 /* To save memory, keep the ring buffer size to its minimum */
5080 if (ring_buffer_expanded)
5081 ring_buf_size = trace_buf_size;
5082 else
5083 ring_buf_size = 1;
5084
5085 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5086
5087 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
5088 cpumask_copy(tracing_cpumask, cpu_all_mask);
5089
5090 /* TODO: make the number of buffers hot pluggable with CPUS */
5091 global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
5092 if (!global_trace.buffer) {
5093 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
5094 WARN_ON(1);
5095 goto out_free_cpumask;
5096 }
5097 if (global_trace.buffer_disabled)
5098 tracing_off();
5099
5100
5101#ifdef CONFIG_TRACER_MAX_TRACE
5102 max_tr.buffer = ring_buffer_alloc(1, rb_flags);
5103 if (!max_tr.buffer) {
5104 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5105 WARN_ON(1);
5106 ring_buffer_free(global_trace.buffer);
5107 goto out_free_cpumask;
5108 }
5109#endif
5110
5111 /* Allocate the first page for all buffers */
5112 for_each_tracing_cpu(i) {
5113 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
5114 max_tr.data[i] = &per_cpu(max_tr_data, i);
5115 }
5116
5117 set_buffer_entries(&global_trace,
5118 ring_buffer_size(global_trace.buffer, 0));
5119#ifdef CONFIG_TRACER_MAX_TRACE
5120 set_buffer_entries(&max_tr, 1);
5121#endif
5122
5123 trace_init_cmdlines();
5124
5125 register_tracer(&nop_trace);
5126 current_trace = &nop_trace;
5127 /* All seems OK, enable tracing */
5128 tracing_disabled = 0;
5129
5130 atomic_notifier_chain_register(&panic_notifier_list,
5131 &trace_panic_notifier);
5132
5133 register_die_notifier(&trace_die_notifier);
5134
5135 return 0;
5136
5137out_free_cpumask:
5138 free_cpumask_var(tracing_cpumask);
5139out_free_buffer_mask:
5140 free_cpumask_var(tracing_buffer_mask);
5141out:
5142 return ret;
5143}
5144
5145__init static int clear_boot_tracer(void)
5146{
5147 /*
5148 * The default tracer at boot buffer is an init section.
5149 * This function is called in lateinit. If we did not
5150 * find the boot tracer, then clear it out, to prevent
5151 * later registration from accessing the buffer that is
5152 * about to be freed.
5153 */
5154 if (!default_bootup_tracer)
5155 return 0;
5156
5157 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
5158 default_bootup_tracer);
5159 default_bootup_tracer = NULL;
5160
5161 return 0;
5162}
5163
5164early_initcall(tracer_alloc_buffers);
5165fs_initcall(tracer_init_debugfs);
5166late_initcall(clear_boot_tracer);