Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/interrupt.h>
13#include <linux/slab.h>
14#include <linux/fs.h>
15
16#include "trace.h"
17#include "trace_output.h"
18
19static bool kill_ftrace_graph;
20
21/**
22 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
23 *
24 * ftrace_graph_stop() is called when a severe error is detected in
25 * the function graph tracing. This function is called by the critical
26 * paths of function graph to keep those paths from doing any more harm.
27 */
28bool ftrace_graph_is_dead(void)
29{
30 return kill_ftrace_graph;
31}
32
33/**
34 * ftrace_graph_stop - set to permanently disable function graph tracincg
35 *
36 * In case of an error int function graph tracing, this is called
37 * to try to keep function graph tracing from causing any more harm.
38 * Usually this is pretty severe and this is called to try to at least
39 * get a warning out to the user.
40 */
41void ftrace_graph_stop(void)
42{
43 kill_ftrace_graph = true;
44}
45
46/* When set, irq functions will be ignored */
47static int ftrace_graph_skip_irqs;
48
49struct fgraph_cpu_data {
50 pid_t last_pid;
51 int depth;
52 int depth_irq;
53 int ignore;
54 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
55};
56
57struct fgraph_data {
58 struct fgraph_cpu_data __percpu *cpu_data;
59
60 /* Place to preserve last processed entry. */
61 struct ftrace_graph_ent_entry ent;
62 struct ftrace_graph_ret_entry ret;
63 int failed;
64 int cpu;
65};
66
67#define TRACE_GRAPH_INDENT 2
68
69unsigned int fgraph_max_depth;
70
71static struct tracer_opt trace_opts[] = {
72 /* Display overruns? (for self-debug purpose) */
73 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
74 /* Display CPU ? */
75 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
76 /* Display Overhead ? */
77 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
78 /* Display proc name/pid */
79 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
80 /* Display duration of execution */
81 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
82 /* Display absolute time of an entry */
83 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
84 /* Display interrupts */
85 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
86 /* Display function name after trailing } */
87 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
88 /* Include sleep time (scheduled out) between entry and return */
89 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
90 /* Include time within nested functions */
91 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
92 { } /* Empty entry */
93};
94
95static struct tracer_flags tracer_flags = {
96 /* Don't display overruns, proc, or tail by default */
97 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
98 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
99 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
100 .opts = trace_opts
101};
102
103static struct trace_array *graph_array;
104
105/*
106 * DURATION column is being also used to display IRQ signs,
107 * following values are used by print_graph_irq and others
108 * to fill in space into DURATION column.
109 */
110enum {
111 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
114};
115
116static void
117print_graph_duration(struct trace_array *tr, unsigned long long duration,
118 struct trace_seq *s, u32 flags);
119
120/* Add a function return address to the trace stack on thread info.*/
121int
122ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
123 unsigned long frame_pointer, unsigned long *retp)
124{
125 unsigned long long calltime;
126 int index;
127
128 if (unlikely(ftrace_graph_is_dead()))
129 return -EBUSY;
130
131 if (!current->ret_stack)
132 return -EBUSY;
133
134 /*
135 * We must make sure the ret_stack is tested before we read
136 * anything else.
137 */
138 smp_rmb();
139
140 /* The return trace stack is full */
141 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
142 atomic_inc(¤t->trace_overrun);
143 return -EBUSY;
144 }
145
146 /*
147 * The curr_ret_stack is an index to ftrace return stack of
148 * current task. Its value should be in [0, FTRACE_RETFUNC_
149 * DEPTH) when the function graph tracer is used. To support
150 * filtering out specific functions, it makes the index
151 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
152 * so when it sees a negative index the ftrace will ignore
153 * the record. And the index gets recovered when returning
154 * from the filtered function by adding the FTRACE_NOTRACE_
155 * DEPTH and then it'll continue to record functions normally.
156 *
157 * The curr_ret_stack is initialized to -1 and get increased
158 * in this function. So it can be less than -1 only if it was
159 * filtered out via ftrace_graph_notrace_addr() which can be
160 * set from set_graph_notrace file in tracefs by user.
161 */
162 if (current->curr_ret_stack < -1)
163 return -EBUSY;
164
165 calltime = trace_clock_local();
166
167 index = ++current->curr_ret_stack;
168 if (ftrace_graph_notrace_addr(func))
169 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
170 barrier();
171 current->ret_stack[index].ret = ret;
172 current->ret_stack[index].func = func;
173 current->ret_stack[index].calltime = calltime;
174#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175 current->ret_stack[index].fp = frame_pointer;
176#endif
177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp;
179#endif
180 *depth = current->curr_ret_stack;
181
182 return 0;
183}
184
185/* Retrieve a function return address to the trace stack on thread info.*/
186static void
187ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
188 unsigned long frame_pointer)
189{
190 int index;
191
192 index = current->curr_ret_stack;
193
194 /*
195 * A negative index here means that it's just returned from a
196 * notrace'd function. Recover index to get an original
197 * return address. See ftrace_push_return_trace().
198 *
199 * TODO: Need to check whether the stack gets corrupted.
200 */
201 if (index < 0)
202 index += FTRACE_NOTRACE_DEPTH;
203
204 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
205 ftrace_graph_stop();
206 WARN_ON(1);
207 /* Might as well panic, otherwise we have no where to go */
208 *ret = (unsigned long)panic;
209 return;
210 }
211
212#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
213 /*
214 * The arch may choose to record the frame pointer used
215 * and check it here to make sure that it is what we expect it
216 * to be. If gcc does not set the place holder of the return
217 * address in the frame pointer, and does a copy instead, then
218 * the function graph trace will fail. This test detects this
219 * case.
220 *
221 * Currently, x86_32 with optimize for size (-Os) makes the latest
222 * gcc do the above.
223 *
224 * Note, -mfentry does not use frame pointers, and this test
225 * is not needed if CC_USING_FENTRY is set.
226 */
227 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
228 ftrace_graph_stop();
229 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
230 " from func %ps return to %lx\n",
231 current->ret_stack[index].fp,
232 frame_pointer,
233 (void *)current->ret_stack[index].func,
234 current->ret_stack[index].ret);
235 *ret = (unsigned long)panic;
236 return;
237 }
238#endif
239
240 *ret = current->ret_stack[index].ret;
241 trace->func = current->ret_stack[index].func;
242 trace->calltime = current->ret_stack[index].calltime;
243 trace->overrun = atomic_read(¤t->trace_overrun);
244 trace->depth = index;
245}
246
247/*
248 * Send the trace to the ring-buffer.
249 * @return the original return address.
250 */
251unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
252{
253 struct ftrace_graph_ret trace;
254 unsigned long ret;
255
256 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
257 trace.rettime = trace_clock_local();
258 barrier();
259 current->curr_ret_stack--;
260 /*
261 * The curr_ret_stack can be less than -1 only if it was
262 * filtered out and it's about to return from the function.
263 * Recover the index and continue to trace normal functions.
264 */
265 if (current->curr_ret_stack < -1) {
266 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
267 return ret;
268 }
269
270 /*
271 * The trace should run after decrementing the ret counter
272 * in case an interrupt were to come in. We don't want to
273 * lose the interrupt if max_depth is set.
274 */
275 ftrace_graph_return(&trace);
276
277 if (unlikely(!ret)) {
278 ftrace_graph_stop();
279 WARN_ON(1);
280 /* Might as well panic. What else to do? */
281 ret = (unsigned long)panic;
282 }
283
284 return ret;
285}
286
287/**
288 * ftrace_graph_ret_addr - convert a potentially modified stack return address
289 * to its original value
290 *
291 * This function can be called by stack unwinding code to convert a found stack
292 * return address ('ret') to its original value, in case the function graph
293 * tracer has modified it to be 'return_to_handler'. If the address hasn't
294 * been modified, the unchanged value of 'ret' is returned.
295 *
296 * 'idx' is a state variable which should be initialized by the caller to zero
297 * before the first call.
298 *
299 * 'retp' is a pointer to the return address on the stack. It's ignored if
300 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
301 */
302#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
303unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
304 unsigned long ret, unsigned long *retp)
305{
306 int index = task->curr_ret_stack;
307 int i;
308
309 if (ret != (unsigned long)return_to_handler)
310 return ret;
311
312 if (index < -1)
313 index += FTRACE_NOTRACE_DEPTH;
314
315 if (index < 0)
316 return ret;
317
318 for (i = 0; i <= index; i++)
319 if (task->ret_stack[i].retp == retp)
320 return task->ret_stack[i].ret;
321
322 return ret;
323}
324#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
325unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
326 unsigned long ret, unsigned long *retp)
327{
328 int task_idx;
329
330 if (ret != (unsigned long)return_to_handler)
331 return ret;
332
333 task_idx = task->curr_ret_stack;
334
335 if (!task->ret_stack || task_idx < *idx)
336 return ret;
337
338 task_idx -= *idx;
339 (*idx)++;
340
341 return task->ret_stack[task_idx].ret;
342}
343#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
344
345int __trace_graph_entry(struct trace_array *tr,
346 struct ftrace_graph_ent *trace,
347 unsigned long flags,
348 int pc)
349{
350 struct trace_event_call *call = &event_funcgraph_entry;
351 struct ring_buffer_event *event;
352 struct ring_buffer *buffer = tr->trace_buffer.buffer;
353 struct ftrace_graph_ent_entry *entry;
354
355 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
356 sizeof(*entry), flags, pc);
357 if (!event)
358 return 0;
359 entry = ring_buffer_event_data(event);
360 entry->graph_ent = *trace;
361 if (!call_filter_check_discard(call, entry, buffer, event))
362 trace_buffer_unlock_commit_nostack(buffer, event);
363
364 return 1;
365}
366
367static inline int ftrace_graph_ignore_irqs(void)
368{
369 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
370 return 0;
371
372 return in_irq();
373}
374
375int trace_graph_entry(struct ftrace_graph_ent *trace)
376{
377 struct trace_array *tr = graph_array;
378 struct trace_array_cpu *data;
379 unsigned long flags;
380 long disabled;
381 int ret;
382 int cpu;
383 int pc;
384
385 if (!ftrace_trace_task(tr))
386 return 0;
387
388 if (ftrace_graph_ignore_func(trace))
389 return 0;
390
391 if (ftrace_graph_ignore_irqs())
392 return 0;
393
394 /*
395 * Do not trace a function if it's filtered by set_graph_notrace.
396 * Make the index of ret stack negative to indicate that it should
397 * ignore further functions. But it needs its own ret stack entry
398 * to recover the original index in order to continue tracing after
399 * returning from the function.
400 */
401 if (ftrace_graph_notrace_addr(trace->func))
402 return 1;
403
404 /*
405 * Stop here if tracing_threshold is set. We only write function return
406 * events to the ring buffer.
407 */
408 if (tracing_thresh)
409 return 1;
410
411 local_irq_save(flags);
412 cpu = raw_smp_processor_id();
413 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
414 disabled = atomic_inc_return(&data->disabled);
415 if (likely(disabled == 1)) {
416 pc = preempt_count();
417 ret = __trace_graph_entry(tr, trace, flags, pc);
418 } else {
419 ret = 0;
420 }
421
422 atomic_dec(&data->disabled);
423 local_irq_restore(flags);
424
425 return ret;
426}
427
428static void
429__trace_graph_function(struct trace_array *tr,
430 unsigned long ip, unsigned long flags, int pc)
431{
432 u64 time = trace_clock_local();
433 struct ftrace_graph_ent ent = {
434 .func = ip,
435 .depth = 0,
436 };
437 struct ftrace_graph_ret ret = {
438 .func = ip,
439 .depth = 0,
440 .calltime = time,
441 .rettime = time,
442 };
443
444 __trace_graph_entry(tr, &ent, flags, pc);
445 __trace_graph_return(tr, &ret, flags, pc);
446}
447
448void
449trace_graph_function(struct trace_array *tr,
450 unsigned long ip, unsigned long parent_ip,
451 unsigned long flags, int pc)
452{
453 __trace_graph_function(tr, ip, flags, pc);
454}
455
456void __trace_graph_return(struct trace_array *tr,
457 struct ftrace_graph_ret *trace,
458 unsigned long flags,
459 int pc)
460{
461 struct trace_event_call *call = &event_funcgraph_exit;
462 struct ring_buffer_event *event;
463 struct ring_buffer *buffer = tr->trace_buffer.buffer;
464 struct ftrace_graph_ret_entry *entry;
465
466 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
467 sizeof(*entry), flags, pc);
468 if (!event)
469 return;
470 entry = ring_buffer_event_data(event);
471 entry->ret = *trace;
472 if (!call_filter_check_discard(call, entry, buffer, event))
473 trace_buffer_unlock_commit_nostack(buffer, event);
474}
475
476void trace_graph_return(struct ftrace_graph_ret *trace)
477{
478 struct trace_array *tr = graph_array;
479 struct trace_array_cpu *data;
480 unsigned long flags;
481 long disabled;
482 int cpu;
483 int pc;
484
485 local_irq_save(flags);
486 cpu = raw_smp_processor_id();
487 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
488 disabled = atomic_inc_return(&data->disabled);
489 if (likely(disabled == 1)) {
490 pc = preempt_count();
491 __trace_graph_return(tr, trace, flags, pc);
492 }
493 atomic_dec(&data->disabled);
494 local_irq_restore(flags);
495}
496
497void set_graph_array(struct trace_array *tr)
498{
499 graph_array = tr;
500
501 /* Make graph_array visible before we start tracing */
502
503 smp_mb();
504}
505
506static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
507{
508 if (tracing_thresh &&
509 (trace->rettime - trace->calltime < tracing_thresh))
510 return;
511 else
512 trace_graph_return(trace);
513}
514
515static int graph_trace_init(struct trace_array *tr)
516{
517 int ret;
518
519 set_graph_array(tr);
520 if (tracing_thresh)
521 ret = register_ftrace_graph(&trace_graph_thresh_return,
522 &trace_graph_entry);
523 else
524 ret = register_ftrace_graph(&trace_graph_return,
525 &trace_graph_entry);
526 if (ret)
527 return ret;
528 tracing_start_cmdline_record();
529
530 return 0;
531}
532
533static void graph_trace_reset(struct trace_array *tr)
534{
535 tracing_stop_cmdline_record();
536 unregister_ftrace_graph();
537}
538
539static int graph_trace_update_thresh(struct trace_array *tr)
540{
541 graph_trace_reset(tr);
542 return graph_trace_init(tr);
543}
544
545static int max_bytes_for_cpu;
546
547static void print_graph_cpu(struct trace_seq *s, int cpu)
548{
549 /*
550 * Start with a space character - to make it stand out
551 * to the right a bit when trace output is pasted into
552 * email:
553 */
554 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
555}
556
557#define TRACE_GRAPH_PROCINFO_LENGTH 14
558
559static void print_graph_proc(struct trace_seq *s, pid_t pid)
560{
561 char comm[TASK_COMM_LEN];
562 /* sign + log10(MAX_INT) + '\0' */
563 char pid_str[11];
564 int spaces = 0;
565 int len;
566 int i;
567
568 trace_find_cmdline(pid, comm);
569 comm[7] = '\0';
570 sprintf(pid_str, "%d", pid);
571
572 /* 1 stands for the "-" character */
573 len = strlen(comm) + strlen(pid_str) + 1;
574
575 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
576 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
577
578 /* First spaces to align center */
579 for (i = 0; i < spaces / 2; i++)
580 trace_seq_putc(s, ' ');
581
582 trace_seq_printf(s, "%s-%s", comm, pid_str);
583
584 /* Last spaces to align center */
585 for (i = 0; i < spaces - (spaces / 2); i++)
586 trace_seq_putc(s, ' ');
587}
588
589
590static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
591{
592 trace_seq_putc(s, ' ');
593 trace_print_lat_fmt(s, entry);
594}
595
596/* If the pid changed since the last trace, output this event */
597static void
598verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
599{
600 pid_t prev_pid;
601 pid_t *last_pid;
602
603 if (!data)
604 return;
605
606 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
607
608 if (*last_pid == pid)
609 return;
610
611 prev_pid = *last_pid;
612 *last_pid = pid;
613
614 if (prev_pid == -1)
615 return;
616/*
617 * Context-switch trace line:
618
619 ------------------------------------------
620 | 1) migration/0--1 => sshd-1755
621 ------------------------------------------
622
623 */
624 trace_seq_puts(s, " ------------------------------------------\n");
625 print_graph_cpu(s, cpu);
626 print_graph_proc(s, prev_pid);
627 trace_seq_puts(s, " => ");
628 print_graph_proc(s, pid);
629 trace_seq_puts(s, "\n ------------------------------------------\n\n");
630}
631
632static struct ftrace_graph_ret_entry *
633get_return_for_leaf(struct trace_iterator *iter,
634 struct ftrace_graph_ent_entry *curr)
635{
636 struct fgraph_data *data = iter->private;
637 struct ring_buffer_iter *ring_iter = NULL;
638 struct ring_buffer_event *event;
639 struct ftrace_graph_ret_entry *next;
640
641 /*
642 * If the previous output failed to write to the seq buffer,
643 * then we just reuse the data from before.
644 */
645 if (data && data->failed) {
646 curr = &data->ent;
647 next = &data->ret;
648 } else {
649
650 ring_iter = trace_buffer_iter(iter, iter->cpu);
651
652 /* First peek to compare current entry and the next one */
653 if (ring_iter)
654 event = ring_buffer_iter_peek(ring_iter, NULL);
655 else {
656 /*
657 * We need to consume the current entry to see
658 * the next one.
659 */
660 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
661 NULL, NULL);
662 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
663 NULL, NULL);
664 }
665
666 if (!event)
667 return NULL;
668
669 next = ring_buffer_event_data(event);
670
671 if (data) {
672 /*
673 * Save current and next entries for later reference
674 * if the output fails.
675 */
676 data->ent = *curr;
677 /*
678 * If the next event is not a return type, then
679 * we only care about what type it is. Otherwise we can
680 * safely copy the entire event.
681 */
682 if (next->ent.type == TRACE_GRAPH_RET)
683 data->ret = *next;
684 else
685 data->ret.ent.type = next->ent.type;
686 }
687 }
688
689 if (next->ent.type != TRACE_GRAPH_RET)
690 return NULL;
691
692 if (curr->ent.pid != next->ent.pid ||
693 curr->graph_ent.func != next->ret.func)
694 return NULL;
695
696 /* this is a leaf, now advance the iterator */
697 if (ring_iter)
698 ring_buffer_read(ring_iter, NULL);
699
700 return next;
701}
702
703static void print_graph_abs_time(u64 t, struct trace_seq *s)
704{
705 unsigned long usecs_rem;
706
707 usecs_rem = do_div(t, NSEC_PER_SEC);
708 usecs_rem /= 1000;
709
710 trace_seq_printf(s, "%5lu.%06lu | ",
711 (unsigned long)t, usecs_rem);
712}
713
714static void
715print_graph_irq(struct trace_iterator *iter, unsigned long addr,
716 enum trace_type type, int cpu, pid_t pid, u32 flags)
717{
718 struct trace_array *tr = iter->tr;
719 struct trace_seq *s = &iter->seq;
720 struct trace_entry *ent = iter->ent;
721
722 if (addr < (unsigned long)__irqentry_text_start ||
723 addr >= (unsigned long)__irqentry_text_end)
724 return;
725
726 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
727 /* Absolute time */
728 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
729 print_graph_abs_time(iter->ts, s);
730
731 /* Cpu */
732 if (flags & TRACE_GRAPH_PRINT_CPU)
733 print_graph_cpu(s, cpu);
734
735 /* Proc */
736 if (flags & TRACE_GRAPH_PRINT_PROC) {
737 print_graph_proc(s, pid);
738 trace_seq_puts(s, " | ");
739 }
740
741 /* Latency format */
742 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
743 print_graph_lat_fmt(s, ent);
744 }
745
746 /* No overhead */
747 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
748
749 if (type == TRACE_GRAPH_ENT)
750 trace_seq_puts(s, "==========>");
751 else
752 trace_seq_puts(s, "<==========");
753
754 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
755 trace_seq_putc(s, '\n');
756}
757
758void
759trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
760{
761 unsigned long nsecs_rem = do_div(duration, 1000);
762 /* log10(ULONG_MAX) + '\0' */
763 char usecs_str[21];
764 char nsecs_str[5];
765 int len;
766 int i;
767
768 sprintf(usecs_str, "%lu", (unsigned long) duration);
769
770 /* Print msecs */
771 trace_seq_printf(s, "%s", usecs_str);
772
773 len = strlen(usecs_str);
774
775 /* Print nsecs (we don't want to exceed 7 numbers) */
776 if (len < 7) {
777 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
778
779 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
780 trace_seq_printf(s, ".%s", nsecs_str);
781 len += strlen(nsecs_str) + 1;
782 }
783
784 trace_seq_puts(s, " us ");
785
786 /* Print remaining spaces to fit the row's width */
787 for (i = len; i < 8; i++)
788 trace_seq_putc(s, ' ');
789}
790
791static void
792print_graph_duration(struct trace_array *tr, unsigned long long duration,
793 struct trace_seq *s, u32 flags)
794{
795 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
796 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
797 return;
798
799 /* No real adata, just filling the column with spaces */
800 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
801 case FLAGS_FILL_FULL:
802 trace_seq_puts(s, " | ");
803 return;
804 case FLAGS_FILL_START:
805 trace_seq_puts(s, " ");
806 return;
807 case FLAGS_FILL_END:
808 trace_seq_puts(s, " |");
809 return;
810 }
811
812 /* Signal a overhead of time execution to the output */
813 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
814 trace_seq_printf(s, "%c ", trace_find_mark(duration));
815 else
816 trace_seq_puts(s, " ");
817
818 trace_print_graph_duration(duration, s);
819 trace_seq_puts(s, "| ");
820}
821
822/* Case of a leaf function on its call entry */
823static enum print_line_t
824print_graph_entry_leaf(struct trace_iterator *iter,
825 struct ftrace_graph_ent_entry *entry,
826 struct ftrace_graph_ret_entry *ret_entry,
827 struct trace_seq *s, u32 flags)
828{
829 struct fgraph_data *data = iter->private;
830 struct trace_array *tr = iter->tr;
831 struct ftrace_graph_ret *graph_ret;
832 struct ftrace_graph_ent *call;
833 unsigned long long duration;
834 int i;
835
836 graph_ret = &ret_entry->ret;
837 call = &entry->graph_ent;
838 duration = graph_ret->rettime - graph_ret->calltime;
839
840 if (data) {
841 struct fgraph_cpu_data *cpu_data;
842 int cpu = iter->cpu;
843
844 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
845
846 /* If a graph tracer ignored set_graph_notrace */
847 if (call->depth < -1)
848 call->depth += FTRACE_NOTRACE_DEPTH;
849
850 /*
851 * Comments display at + 1 to depth. Since
852 * this is a leaf function, keep the comments
853 * equal to this depth.
854 */
855 cpu_data->depth = call->depth - 1;
856
857 /* No need to keep this function around for this depth */
858 if (call->depth < FTRACE_RETFUNC_DEPTH &&
859 !WARN_ON_ONCE(call->depth < 0))
860 cpu_data->enter_funcs[call->depth] = 0;
861 }
862
863 /* Overhead and duration */
864 print_graph_duration(tr, duration, s, flags);
865
866 /* Function */
867 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
868 trace_seq_putc(s, ' ');
869
870 trace_seq_printf(s, "%ps();\n", (void *)call->func);
871
872 return trace_handle_return(s);
873}
874
875static enum print_line_t
876print_graph_entry_nested(struct trace_iterator *iter,
877 struct ftrace_graph_ent_entry *entry,
878 struct trace_seq *s, int cpu, u32 flags)
879{
880 struct ftrace_graph_ent *call = &entry->graph_ent;
881 struct fgraph_data *data = iter->private;
882 struct trace_array *tr = iter->tr;
883 int i;
884
885 if (data) {
886 struct fgraph_cpu_data *cpu_data;
887 int cpu = iter->cpu;
888
889 /* If a graph tracer ignored set_graph_notrace */
890 if (call->depth < -1)
891 call->depth += FTRACE_NOTRACE_DEPTH;
892
893 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
894 cpu_data->depth = call->depth;
895
896 /* Save this function pointer to see if the exit matches */
897 if (call->depth < FTRACE_RETFUNC_DEPTH &&
898 !WARN_ON_ONCE(call->depth < 0))
899 cpu_data->enter_funcs[call->depth] = call->func;
900 }
901
902 /* No time */
903 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
904
905 /* Function */
906 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
907 trace_seq_putc(s, ' ');
908
909 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
910
911 if (trace_seq_has_overflowed(s))
912 return TRACE_TYPE_PARTIAL_LINE;
913
914 /*
915 * we already consumed the current entry to check the next one
916 * and see if this is a leaf.
917 */
918 return TRACE_TYPE_NO_CONSUME;
919}
920
921static void
922print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
923 int type, unsigned long addr, u32 flags)
924{
925 struct fgraph_data *data = iter->private;
926 struct trace_entry *ent = iter->ent;
927 struct trace_array *tr = iter->tr;
928 int cpu = iter->cpu;
929
930 /* Pid */
931 verif_pid(s, ent->pid, cpu, data);
932
933 if (type)
934 /* Interrupt */
935 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
936
937 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
938 return;
939
940 /* Absolute time */
941 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
942 print_graph_abs_time(iter->ts, s);
943
944 /* Cpu */
945 if (flags & TRACE_GRAPH_PRINT_CPU)
946 print_graph_cpu(s, cpu);
947
948 /* Proc */
949 if (flags & TRACE_GRAPH_PRINT_PROC) {
950 print_graph_proc(s, ent->pid);
951 trace_seq_puts(s, " | ");
952 }
953
954 /* Latency format */
955 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
956 print_graph_lat_fmt(s, ent);
957
958 return;
959}
960
961/*
962 * Entry check for irq code
963 *
964 * returns 1 if
965 * - we are inside irq code
966 * - we just entered irq code
967 *
968 * retunns 0 if
969 * - funcgraph-interrupts option is set
970 * - we are not inside irq code
971 */
972static int
973check_irq_entry(struct trace_iterator *iter, u32 flags,
974 unsigned long addr, int depth)
975{
976 int cpu = iter->cpu;
977 int *depth_irq;
978 struct fgraph_data *data = iter->private;
979
980 /*
981 * If we are either displaying irqs, or we got called as
982 * a graph event and private data does not exist,
983 * then we bypass the irq check.
984 */
985 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
986 (!data))
987 return 0;
988
989 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
990
991 /*
992 * We are inside the irq code
993 */
994 if (*depth_irq >= 0)
995 return 1;
996
997 if ((addr < (unsigned long)__irqentry_text_start) ||
998 (addr >= (unsigned long)__irqentry_text_end))
999 return 0;
1000
1001 /*
1002 * We are entering irq code.
1003 */
1004 *depth_irq = depth;
1005 return 1;
1006}
1007
1008/*
1009 * Return check for irq code
1010 *
1011 * returns 1 if
1012 * - we are inside irq code
1013 * - we just left irq code
1014 *
1015 * returns 0 if
1016 * - funcgraph-interrupts option is set
1017 * - we are not inside irq code
1018 */
1019static int
1020check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1021{
1022 int cpu = iter->cpu;
1023 int *depth_irq;
1024 struct fgraph_data *data = iter->private;
1025
1026 /*
1027 * If we are either displaying irqs, or we got called as
1028 * a graph event and private data does not exist,
1029 * then we bypass the irq check.
1030 */
1031 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1032 (!data))
1033 return 0;
1034
1035 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1036
1037 /*
1038 * We are not inside the irq code.
1039 */
1040 if (*depth_irq == -1)
1041 return 0;
1042
1043 /*
1044 * We are inside the irq code, and this is returning entry.
1045 * Let's not trace it and clear the entry depth, since
1046 * we are out of irq code.
1047 *
1048 * This condition ensures that we 'leave the irq code' once
1049 * we are out of the entry depth. Thus protecting us from
1050 * the RETURN entry loss.
1051 */
1052 if (*depth_irq >= depth) {
1053 *depth_irq = -1;
1054 return 1;
1055 }
1056
1057 /*
1058 * We are inside the irq code, and this is not the entry.
1059 */
1060 return 1;
1061}
1062
1063static enum print_line_t
1064print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1065 struct trace_iterator *iter, u32 flags)
1066{
1067 struct fgraph_data *data = iter->private;
1068 struct ftrace_graph_ent *call = &field->graph_ent;
1069 struct ftrace_graph_ret_entry *leaf_ret;
1070 static enum print_line_t ret;
1071 int cpu = iter->cpu;
1072
1073 if (check_irq_entry(iter, flags, call->func, call->depth))
1074 return TRACE_TYPE_HANDLED;
1075
1076 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1077
1078 leaf_ret = get_return_for_leaf(iter, field);
1079 if (leaf_ret)
1080 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1081 else
1082 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1083
1084 if (data) {
1085 /*
1086 * If we failed to write our output, then we need to make
1087 * note of it. Because we already consumed our entry.
1088 */
1089 if (s->full) {
1090 data->failed = 1;
1091 data->cpu = cpu;
1092 } else
1093 data->failed = 0;
1094 }
1095
1096 return ret;
1097}
1098
1099static enum print_line_t
1100print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1101 struct trace_entry *ent, struct trace_iterator *iter,
1102 u32 flags)
1103{
1104 unsigned long long duration = trace->rettime - trace->calltime;
1105 struct fgraph_data *data = iter->private;
1106 struct trace_array *tr = iter->tr;
1107 pid_t pid = ent->pid;
1108 int cpu = iter->cpu;
1109 int func_match = 1;
1110 int i;
1111
1112 if (check_irq_return(iter, flags, trace->depth))
1113 return TRACE_TYPE_HANDLED;
1114
1115 if (data) {
1116 struct fgraph_cpu_data *cpu_data;
1117 int cpu = iter->cpu;
1118
1119 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1120
1121 /*
1122 * Comments display at + 1 to depth. This is the
1123 * return from a function, we now want the comments
1124 * to display at the same level of the bracket.
1125 */
1126 cpu_data->depth = trace->depth - 1;
1127
1128 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1129 !WARN_ON_ONCE(trace->depth < 0)) {
1130 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1131 func_match = 0;
1132 cpu_data->enter_funcs[trace->depth] = 0;
1133 }
1134 }
1135
1136 print_graph_prologue(iter, s, 0, 0, flags);
1137
1138 /* Overhead and duration */
1139 print_graph_duration(tr, duration, s, flags);
1140
1141 /* Closing brace */
1142 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1143 trace_seq_putc(s, ' ');
1144
1145 /*
1146 * If the return function does not have a matching entry,
1147 * then the entry was lost. Instead of just printing
1148 * the '}' and letting the user guess what function this
1149 * belongs to, write out the function name. Always do
1150 * that if the funcgraph-tail option is enabled.
1151 */
1152 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1153 trace_seq_puts(s, "}\n");
1154 else
1155 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1156
1157 /* Overrun */
1158 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1159 trace_seq_printf(s, " (Overruns: %lu)\n",
1160 trace->overrun);
1161
1162 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1163 cpu, pid, flags);
1164
1165 return trace_handle_return(s);
1166}
1167
1168static enum print_line_t
1169print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1170 struct trace_iterator *iter, u32 flags)
1171{
1172 struct trace_array *tr = iter->tr;
1173 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1174 struct fgraph_data *data = iter->private;
1175 struct trace_event *event;
1176 int depth = 0;
1177 int ret;
1178 int i;
1179
1180 if (data)
1181 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1182
1183 print_graph_prologue(iter, s, 0, 0, flags);
1184
1185 /* No time */
1186 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1187
1188 /* Indentation */
1189 if (depth > 0)
1190 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1191 trace_seq_putc(s, ' ');
1192
1193 /* The comment */
1194 trace_seq_puts(s, "/* ");
1195
1196 switch (iter->ent->type) {
1197 case TRACE_BPUTS:
1198 ret = trace_print_bputs_msg_only(iter);
1199 if (ret != TRACE_TYPE_HANDLED)
1200 return ret;
1201 break;
1202 case TRACE_BPRINT:
1203 ret = trace_print_bprintk_msg_only(iter);
1204 if (ret != TRACE_TYPE_HANDLED)
1205 return ret;
1206 break;
1207 case TRACE_PRINT:
1208 ret = trace_print_printk_msg_only(iter);
1209 if (ret != TRACE_TYPE_HANDLED)
1210 return ret;
1211 break;
1212 default:
1213 event = ftrace_find_event(ent->type);
1214 if (!event)
1215 return TRACE_TYPE_UNHANDLED;
1216
1217 ret = event->funcs->trace(iter, sym_flags, event);
1218 if (ret != TRACE_TYPE_HANDLED)
1219 return ret;
1220 }
1221
1222 if (trace_seq_has_overflowed(s))
1223 goto out;
1224
1225 /* Strip ending newline */
1226 if (s->buffer[s->seq.len - 1] == '\n') {
1227 s->buffer[s->seq.len - 1] = '\0';
1228 s->seq.len--;
1229 }
1230
1231 trace_seq_puts(s, " */\n");
1232 out:
1233 return trace_handle_return(s);
1234}
1235
1236
1237enum print_line_t
1238print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1239{
1240 struct ftrace_graph_ent_entry *field;
1241 struct fgraph_data *data = iter->private;
1242 struct trace_entry *entry = iter->ent;
1243 struct trace_seq *s = &iter->seq;
1244 int cpu = iter->cpu;
1245 int ret;
1246
1247 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1248 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1249 return TRACE_TYPE_HANDLED;
1250 }
1251
1252 /*
1253 * If the last output failed, there's a possibility we need
1254 * to print out the missing entry which would never go out.
1255 */
1256 if (data && data->failed) {
1257 field = &data->ent;
1258 iter->cpu = data->cpu;
1259 ret = print_graph_entry(field, s, iter, flags);
1260 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1261 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1262 ret = TRACE_TYPE_NO_CONSUME;
1263 }
1264 iter->cpu = cpu;
1265 return ret;
1266 }
1267
1268 switch (entry->type) {
1269 case TRACE_GRAPH_ENT: {
1270 /*
1271 * print_graph_entry() may consume the current event,
1272 * thus @field may become invalid, so we need to save it.
1273 * sizeof(struct ftrace_graph_ent_entry) is very small,
1274 * it can be safely saved at the stack.
1275 */
1276 struct ftrace_graph_ent_entry saved;
1277 trace_assign_type(field, entry);
1278 saved = *field;
1279 return print_graph_entry(&saved, s, iter, flags);
1280 }
1281 case TRACE_GRAPH_RET: {
1282 struct ftrace_graph_ret_entry *field;
1283 trace_assign_type(field, entry);
1284 return print_graph_return(&field->ret, s, entry, iter, flags);
1285 }
1286 case TRACE_STACK:
1287 case TRACE_FN:
1288 /* dont trace stack and functions as comments */
1289 return TRACE_TYPE_UNHANDLED;
1290
1291 default:
1292 return print_graph_comment(s, entry, iter, flags);
1293 }
1294
1295 return TRACE_TYPE_HANDLED;
1296}
1297
1298static enum print_line_t
1299print_graph_function(struct trace_iterator *iter)
1300{
1301 return print_graph_function_flags(iter, tracer_flags.val);
1302}
1303
1304static enum print_line_t
1305print_graph_function_event(struct trace_iterator *iter, int flags,
1306 struct trace_event *event)
1307{
1308 return print_graph_function(iter);
1309}
1310
1311static void print_lat_header(struct seq_file *s, u32 flags)
1312{
1313 static const char spaces[] = " " /* 16 spaces */
1314 " " /* 4 spaces */
1315 " "; /* 17 spaces */
1316 int size = 0;
1317
1318 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1319 size += 16;
1320 if (flags & TRACE_GRAPH_PRINT_CPU)
1321 size += 4;
1322 if (flags & TRACE_GRAPH_PRINT_PROC)
1323 size += 17;
1324
1325 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1326 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1327 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1328 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1329 seq_printf(s, "#%.*s||| / \n", size, spaces);
1330}
1331
1332static void __print_graph_headers_flags(struct trace_array *tr,
1333 struct seq_file *s, u32 flags)
1334{
1335 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1336
1337 if (lat)
1338 print_lat_header(s, flags);
1339
1340 /* 1st line */
1341 seq_putc(s, '#');
1342 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1343 seq_puts(s, " TIME ");
1344 if (flags & TRACE_GRAPH_PRINT_CPU)
1345 seq_puts(s, " CPU");
1346 if (flags & TRACE_GRAPH_PRINT_PROC)
1347 seq_puts(s, " TASK/PID ");
1348 if (lat)
1349 seq_puts(s, "||||");
1350 if (flags & TRACE_GRAPH_PRINT_DURATION)
1351 seq_puts(s, " DURATION ");
1352 seq_puts(s, " FUNCTION CALLS\n");
1353
1354 /* 2nd line */
1355 seq_putc(s, '#');
1356 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1357 seq_puts(s, " | ");
1358 if (flags & TRACE_GRAPH_PRINT_CPU)
1359 seq_puts(s, " | ");
1360 if (flags & TRACE_GRAPH_PRINT_PROC)
1361 seq_puts(s, " | | ");
1362 if (lat)
1363 seq_puts(s, "||||");
1364 if (flags & TRACE_GRAPH_PRINT_DURATION)
1365 seq_puts(s, " | | ");
1366 seq_puts(s, " | | | |\n");
1367}
1368
1369static void print_graph_headers(struct seq_file *s)
1370{
1371 print_graph_headers_flags(s, tracer_flags.val);
1372}
1373
1374void print_graph_headers_flags(struct seq_file *s, u32 flags)
1375{
1376 struct trace_iterator *iter = s->private;
1377 struct trace_array *tr = iter->tr;
1378
1379 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1380 return;
1381
1382 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1383 /* print nothing if the buffers are empty */
1384 if (trace_empty(iter))
1385 return;
1386
1387 print_trace_header(s, iter);
1388 }
1389
1390 __print_graph_headers_flags(tr, s, flags);
1391}
1392
1393void graph_trace_open(struct trace_iterator *iter)
1394{
1395 /* pid and depth on the last trace processed */
1396 struct fgraph_data *data;
1397 gfp_t gfpflags;
1398 int cpu;
1399
1400 iter->private = NULL;
1401
1402 /* We can be called in atomic context via ftrace_dump() */
1403 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1404
1405 data = kzalloc(sizeof(*data), gfpflags);
1406 if (!data)
1407 goto out_err;
1408
1409 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1410 if (!data->cpu_data)
1411 goto out_err_free;
1412
1413 for_each_possible_cpu(cpu) {
1414 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1415 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1416 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1417 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1418
1419 *pid = -1;
1420 *depth = 0;
1421 *ignore = 0;
1422 *depth_irq = -1;
1423 }
1424
1425 iter->private = data;
1426
1427 return;
1428
1429 out_err_free:
1430 kfree(data);
1431 out_err:
1432 pr_warn("function graph tracer: not enough memory\n");
1433}
1434
1435void graph_trace_close(struct trace_iterator *iter)
1436{
1437 struct fgraph_data *data = iter->private;
1438
1439 if (data) {
1440 free_percpu(data->cpu_data);
1441 kfree(data);
1442 }
1443}
1444
1445static int
1446func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1447{
1448 if (bit == TRACE_GRAPH_PRINT_IRQS)
1449 ftrace_graph_skip_irqs = !set;
1450
1451 if (bit == TRACE_GRAPH_SLEEP_TIME)
1452 ftrace_graph_sleep_time_control(set);
1453
1454 if (bit == TRACE_GRAPH_GRAPH_TIME)
1455 ftrace_graph_graph_time_control(set);
1456
1457 return 0;
1458}
1459
1460static struct trace_event_functions graph_functions = {
1461 .trace = print_graph_function_event,
1462};
1463
1464static struct trace_event graph_trace_entry_event = {
1465 .type = TRACE_GRAPH_ENT,
1466 .funcs = &graph_functions,
1467};
1468
1469static struct trace_event graph_trace_ret_event = {
1470 .type = TRACE_GRAPH_RET,
1471 .funcs = &graph_functions
1472};
1473
1474static struct tracer graph_trace __tracer_data = {
1475 .name = "function_graph",
1476 .update_thresh = graph_trace_update_thresh,
1477 .open = graph_trace_open,
1478 .pipe_open = graph_trace_open,
1479 .close = graph_trace_close,
1480 .pipe_close = graph_trace_close,
1481 .init = graph_trace_init,
1482 .reset = graph_trace_reset,
1483 .print_line = print_graph_function,
1484 .print_header = print_graph_headers,
1485 .flags = &tracer_flags,
1486 .set_flag = func_graph_set_flag,
1487#ifdef CONFIG_FTRACE_SELFTEST
1488 .selftest = trace_selftest_startup_function_graph,
1489#endif
1490};
1491
1492
1493static ssize_t
1494graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1495 loff_t *ppos)
1496{
1497 unsigned long val;
1498 int ret;
1499
1500 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1501 if (ret)
1502 return ret;
1503
1504 fgraph_max_depth = val;
1505
1506 *ppos += cnt;
1507
1508 return cnt;
1509}
1510
1511static ssize_t
1512graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1513 loff_t *ppos)
1514{
1515 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1516 int n;
1517
1518 n = sprintf(buf, "%d\n", fgraph_max_depth);
1519
1520 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1521}
1522
1523static const struct file_operations graph_depth_fops = {
1524 .open = tracing_open_generic,
1525 .write = graph_depth_write,
1526 .read = graph_depth_read,
1527 .llseek = generic_file_llseek,
1528};
1529
1530static __init int init_graph_tracefs(void)
1531{
1532 struct dentry *d_tracer;
1533
1534 d_tracer = tracing_init_dentry();
1535 if (IS_ERR(d_tracer))
1536 return 0;
1537
1538 trace_create_file("max_graph_depth", 0644, d_tracer,
1539 NULL, &graph_depth_fops);
1540
1541 return 0;
1542}
1543fs_initcall(init_graph_tracefs);
1544
1545static __init int init_graph_trace(void)
1546{
1547 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1548
1549 if (!register_trace_event(&graph_trace_entry_event)) {
1550 pr_warn("Warning: could not register graph trace events\n");
1551 return 1;
1552 }
1553
1554 if (!register_trace_event(&graph_trace_ret_event)) {
1555 pr_warn("Warning: could not register graph trace events\n");
1556 return 1;
1557 }
1558
1559 return register_tracer(&graph_trace);
1560}
1561
1562core_initcall(init_graph_trace);
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/uaccess.h>
10#include <linux/ftrace.h>
11#include <linux/interrupt.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14
15#include "trace.h"
16#include "trace_output.h"
17
18static bool kill_ftrace_graph;
19
20/**
21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 *
23 * ftrace_graph_stop() is called when a severe error is detected in
24 * the function graph tracing. This function is called by the critical
25 * paths of function graph to keep those paths from doing any more harm.
26 */
27bool ftrace_graph_is_dead(void)
28{
29 return kill_ftrace_graph;
30}
31
32/**
33 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 *
35 * In case of an error int function graph tracing, this is called
36 * to try to keep function graph tracing from causing any more harm.
37 * Usually this is pretty severe and this is called to try to at least
38 * get a warning out to the user.
39 */
40void ftrace_graph_stop(void)
41{
42 kill_ftrace_graph = true;
43}
44
45/* When set, irq functions will be ignored */
46static int ftrace_graph_skip_irqs;
47
48struct fgraph_cpu_data {
49 pid_t last_pid;
50 int depth;
51 int depth_irq;
52 int ignore;
53 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
54};
55
56struct fgraph_data {
57 struct fgraph_cpu_data __percpu *cpu_data;
58
59 /* Place to preserve last processed entry. */
60 struct ftrace_graph_ent_entry ent;
61 struct ftrace_graph_ret_entry ret;
62 int failed;
63 int cpu;
64};
65
66#define TRACE_GRAPH_INDENT 2
67
68unsigned int fgraph_max_depth;
69
70static struct tracer_opt trace_opts[] = {
71 /* Display overruns? (for self-debug purpose) */
72 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
73 /* Display CPU ? */
74 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
75 /* Display Overhead ? */
76 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 /* Display proc name/pid */
78 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 /* Display duration of execution */
80 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
81 /* Display absolute time of an entry */
82 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 /* Display interrupts */
84 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 /* Display function name after trailing } */
86 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 /* Include sleep time (scheduled out) between entry and return */
88 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
89 /* Include time within nested functions */
90 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
91 { } /* Empty entry */
92};
93
94static struct tracer_flags tracer_flags = {
95 /* Don't display overruns, proc, or tail by default */
96 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
97 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
98 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
99 .opts = trace_opts
100};
101
102static struct trace_array *graph_array;
103
104/*
105 * DURATION column is being also used to display IRQ signs,
106 * following values are used by print_graph_irq and others
107 * to fill in space into DURATION column.
108 */
109enum {
110 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113};
114
115static void
116print_graph_duration(struct trace_array *tr, unsigned long long duration,
117 struct trace_seq *s, u32 flags);
118
119/* Add a function return address to the trace stack on thread info.*/
120int
121ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
122 unsigned long frame_pointer, unsigned long *retp)
123{
124 unsigned long long calltime;
125 int index;
126
127 if (unlikely(ftrace_graph_is_dead()))
128 return -EBUSY;
129
130 if (!current->ret_stack)
131 return -EBUSY;
132
133 /*
134 * We must make sure the ret_stack is tested before we read
135 * anything else.
136 */
137 smp_rmb();
138
139 /* The return trace stack is full */
140 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
141 atomic_inc(¤t->trace_overrun);
142 return -EBUSY;
143 }
144
145 /*
146 * The curr_ret_stack is an index to ftrace return stack of
147 * current task. Its value should be in [0, FTRACE_RETFUNC_
148 * DEPTH) when the function graph tracer is used. To support
149 * filtering out specific functions, it makes the index
150 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
151 * so when it sees a negative index the ftrace will ignore
152 * the record. And the index gets recovered when returning
153 * from the filtered function by adding the FTRACE_NOTRACE_
154 * DEPTH and then it'll continue to record functions normally.
155 *
156 * The curr_ret_stack is initialized to -1 and get increased
157 * in this function. So it can be less than -1 only if it was
158 * filtered out via ftrace_graph_notrace_addr() which can be
159 * set from set_graph_notrace file in tracefs by user.
160 */
161 if (current->curr_ret_stack < -1)
162 return -EBUSY;
163
164 calltime = trace_clock_local();
165
166 index = ++current->curr_ret_stack;
167 if (ftrace_graph_notrace_addr(func))
168 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
169 barrier();
170 current->ret_stack[index].ret = ret;
171 current->ret_stack[index].func = func;
172 current->ret_stack[index].calltime = calltime;
173#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
174 current->ret_stack[index].fp = frame_pointer;
175#endif
176#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
177 current->ret_stack[index].retp = retp;
178#endif
179 *depth = current->curr_ret_stack;
180
181 return 0;
182}
183
184/* Retrieve a function return address to the trace stack on thread info.*/
185static void
186ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
187 unsigned long frame_pointer)
188{
189 int index;
190
191 index = current->curr_ret_stack;
192
193 /*
194 * A negative index here means that it's just returned from a
195 * notrace'd function. Recover index to get an original
196 * return address. See ftrace_push_return_trace().
197 *
198 * TODO: Need to check whether the stack gets corrupted.
199 */
200 if (index < 0)
201 index += FTRACE_NOTRACE_DEPTH;
202
203 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
204 ftrace_graph_stop();
205 WARN_ON(1);
206 /* Might as well panic, otherwise we have no where to go */
207 *ret = (unsigned long)panic;
208 return;
209 }
210
211#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
212 /*
213 * The arch may choose to record the frame pointer used
214 * and check it here to make sure that it is what we expect it
215 * to be. If gcc does not set the place holder of the return
216 * address in the frame pointer, and does a copy instead, then
217 * the function graph trace will fail. This test detects this
218 * case.
219 *
220 * Currently, x86_32 with optimize for size (-Os) makes the latest
221 * gcc do the above.
222 *
223 * Note, -mfentry does not use frame pointers, and this test
224 * is not needed if CC_USING_FENTRY is set.
225 */
226 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
227 ftrace_graph_stop();
228 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
229 " from func %ps return to %lx\n",
230 current->ret_stack[index].fp,
231 frame_pointer,
232 (void *)current->ret_stack[index].func,
233 current->ret_stack[index].ret);
234 *ret = (unsigned long)panic;
235 return;
236 }
237#endif
238
239 *ret = current->ret_stack[index].ret;
240 trace->func = current->ret_stack[index].func;
241 trace->calltime = current->ret_stack[index].calltime;
242 trace->overrun = atomic_read(¤t->trace_overrun);
243 trace->depth = index;
244}
245
246/*
247 * Send the trace to the ring-buffer.
248 * @return the original return address.
249 */
250unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
251{
252 struct ftrace_graph_ret trace;
253 unsigned long ret;
254
255 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
256 trace.rettime = trace_clock_local();
257 barrier();
258 current->curr_ret_stack--;
259 /*
260 * The curr_ret_stack can be less than -1 only if it was
261 * filtered out and it's about to return from the function.
262 * Recover the index and continue to trace normal functions.
263 */
264 if (current->curr_ret_stack < -1) {
265 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
266 return ret;
267 }
268
269 /*
270 * The trace should run after decrementing the ret counter
271 * in case an interrupt were to come in. We don't want to
272 * lose the interrupt if max_depth is set.
273 */
274 ftrace_graph_return(&trace);
275
276 if (unlikely(!ret)) {
277 ftrace_graph_stop();
278 WARN_ON(1);
279 /* Might as well panic. What else to do? */
280 ret = (unsigned long)panic;
281 }
282
283 return ret;
284}
285
286/**
287 * ftrace_graph_ret_addr - convert a potentially modified stack return address
288 * to its original value
289 *
290 * This function can be called by stack unwinding code to convert a found stack
291 * return address ('ret') to its original value, in case the function graph
292 * tracer has modified it to be 'return_to_handler'. If the address hasn't
293 * been modified, the unchanged value of 'ret' is returned.
294 *
295 * 'idx' is a state variable which should be initialized by the caller to zero
296 * before the first call.
297 *
298 * 'retp' is a pointer to the return address on the stack. It's ignored if
299 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
300 */
301#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
302unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
303 unsigned long ret, unsigned long *retp)
304{
305 int index = task->curr_ret_stack;
306 int i;
307
308 if (ret != (unsigned long)return_to_handler)
309 return ret;
310
311 if (index < -1)
312 index += FTRACE_NOTRACE_DEPTH;
313
314 if (index < 0)
315 return ret;
316
317 for (i = 0; i <= index; i++)
318 if (task->ret_stack[i].retp == retp)
319 return task->ret_stack[i].ret;
320
321 return ret;
322}
323#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
324unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
325 unsigned long ret, unsigned long *retp)
326{
327 int task_idx;
328
329 if (ret != (unsigned long)return_to_handler)
330 return ret;
331
332 task_idx = task->curr_ret_stack;
333
334 if (!task->ret_stack || task_idx < *idx)
335 return ret;
336
337 task_idx -= *idx;
338 (*idx)++;
339
340 return task->ret_stack[task_idx].ret;
341}
342#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
343
344int __trace_graph_entry(struct trace_array *tr,
345 struct ftrace_graph_ent *trace,
346 unsigned long flags,
347 int pc)
348{
349 struct trace_event_call *call = &event_funcgraph_entry;
350 struct ring_buffer_event *event;
351 struct ring_buffer *buffer = tr->trace_buffer.buffer;
352 struct ftrace_graph_ent_entry *entry;
353
354 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
355 sizeof(*entry), flags, pc);
356 if (!event)
357 return 0;
358 entry = ring_buffer_event_data(event);
359 entry->graph_ent = *trace;
360 if (!call_filter_check_discard(call, entry, buffer, event))
361 trace_buffer_unlock_commit_nostack(buffer, event);
362
363 return 1;
364}
365
366static inline int ftrace_graph_ignore_irqs(void)
367{
368 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
369 return 0;
370
371 return in_irq();
372}
373
374int trace_graph_entry(struct ftrace_graph_ent *trace)
375{
376 struct trace_array *tr = graph_array;
377 struct trace_array_cpu *data;
378 unsigned long flags;
379 long disabled;
380 int ret;
381 int cpu;
382 int pc;
383
384 if (!ftrace_trace_task(tr))
385 return 0;
386
387 if (ftrace_graph_ignore_func(trace))
388 return 0;
389
390 if (ftrace_graph_ignore_irqs())
391 return 0;
392
393 /*
394 * Do not trace a function if it's filtered by set_graph_notrace.
395 * Make the index of ret stack negative to indicate that it should
396 * ignore further functions. But it needs its own ret stack entry
397 * to recover the original index in order to continue tracing after
398 * returning from the function.
399 */
400 if (ftrace_graph_notrace_addr(trace->func))
401 return 1;
402
403 /*
404 * Stop here if tracing_threshold is set. We only write function return
405 * events to the ring buffer.
406 */
407 if (tracing_thresh)
408 return 1;
409
410 local_irq_save(flags);
411 cpu = raw_smp_processor_id();
412 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
413 disabled = atomic_inc_return(&data->disabled);
414 if (likely(disabled == 1)) {
415 pc = preempt_count();
416 ret = __trace_graph_entry(tr, trace, flags, pc);
417 } else {
418 ret = 0;
419 }
420
421 atomic_dec(&data->disabled);
422 local_irq_restore(flags);
423
424 return ret;
425}
426
427static void
428__trace_graph_function(struct trace_array *tr,
429 unsigned long ip, unsigned long flags, int pc)
430{
431 u64 time = trace_clock_local();
432 struct ftrace_graph_ent ent = {
433 .func = ip,
434 .depth = 0,
435 };
436 struct ftrace_graph_ret ret = {
437 .func = ip,
438 .depth = 0,
439 .calltime = time,
440 .rettime = time,
441 };
442
443 __trace_graph_entry(tr, &ent, flags, pc);
444 __trace_graph_return(tr, &ret, flags, pc);
445}
446
447void
448trace_graph_function(struct trace_array *tr,
449 unsigned long ip, unsigned long parent_ip,
450 unsigned long flags, int pc)
451{
452 __trace_graph_function(tr, ip, flags, pc);
453}
454
455void __trace_graph_return(struct trace_array *tr,
456 struct ftrace_graph_ret *trace,
457 unsigned long flags,
458 int pc)
459{
460 struct trace_event_call *call = &event_funcgraph_exit;
461 struct ring_buffer_event *event;
462 struct ring_buffer *buffer = tr->trace_buffer.buffer;
463 struct ftrace_graph_ret_entry *entry;
464
465 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
466 sizeof(*entry), flags, pc);
467 if (!event)
468 return;
469 entry = ring_buffer_event_data(event);
470 entry->ret = *trace;
471 if (!call_filter_check_discard(call, entry, buffer, event))
472 trace_buffer_unlock_commit_nostack(buffer, event);
473}
474
475void trace_graph_return(struct ftrace_graph_ret *trace)
476{
477 struct trace_array *tr = graph_array;
478 struct trace_array_cpu *data;
479 unsigned long flags;
480 long disabled;
481 int cpu;
482 int pc;
483
484 local_irq_save(flags);
485 cpu = raw_smp_processor_id();
486 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
487 disabled = atomic_inc_return(&data->disabled);
488 if (likely(disabled == 1)) {
489 pc = preempt_count();
490 __trace_graph_return(tr, trace, flags, pc);
491 }
492 atomic_dec(&data->disabled);
493 local_irq_restore(flags);
494}
495
496void set_graph_array(struct trace_array *tr)
497{
498 graph_array = tr;
499
500 /* Make graph_array visible before we start tracing */
501
502 smp_mb();
503}
504
505static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
506{
507 if (tracing_thresh &&
508 (trace->rettime - trace->calltime < tracing_thresh))
509 return;
510 else
511 trace_graph_return(trace);
512}
513
514static int graph_trace_init(struct trace_array *tr)
515{
516 int ret;
517
518 set_graph_array(tr);
519 if (tracing_thresh)
520 ret = register_ftrace_graph(&trace_graph_thresh_return,
521 &trace_graph_entry);
522 else
523 ret = register_ftrace_graph(&trace_graph_return,
524 &trace_graph_entry);
525 if (ret)
526 return ret;
527 tracing_start_cmdline_record();
528
529 return 0;
530}
531
532static void graph_trace_reset(struct trace_array *tr)
533{
534 tracing_stop_cmdline_record();
535 unregister_ftrace_graph();
536}
537
538static int graph_trace_update_thresh(struct trace_array *tr)
539{
540 graph_trace_reset(tr);
541 return graph_trace_init(tr);
542}
543
544static int max_bytes_for_cpu;
545
546static void print_graph_cpu(struct trace_seq *s, int cpu)
547{
548 /*
549 * Start with a space character - to make it stand out
550 * to the right a bit when trace output is pasted into
551 * email:
552 */
553 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
554}
555
556#define TRACE_GRAPH_PROCINFO_LENGTH 14
557
558static void print_graph_proc(struct trace_seq *s, pid_t pid)
559{
560 char comm[TASK_COMM_LEN];
561 /* sign + log10(MAX_INT) + '\0' */
562 char pid_str[11];
563 int spaces = 0;
564 int len;
565 int i;
566
567 trace_find_cmdline(pid, comm);
568 comm[7] = '\0';
569 sprintf(pid_str, "%d", pid);
570
571 /* 1 stands for the "-" character */
572 len = strlen(comm) + strlen(pid_str) + 1;
573
574 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
575 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
576
577 /* First spaces to align center */
578 for (i = 0; i < spaces / 2; i++)
579 trace_seq_putc(s, ' ');
580
581 trace_seq_printf(s, "%s-%s", comm, pid_str);
582
583 /* Last spaces to align center */
584 for (i = 0; i < spaces - (spaces / 2); i++)
585 trace_seq_putc(s, ' ');
586}
587
588
589static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
590{
591 trace_seq_putc(s, ' ');
592 trace_print_lat_fmt(s, entry);
593}
594
595/* If the pid changed since the last trace, output this event */
596static void
597verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
598{
599 pid_t prev_pid;
600 pid_t *last_pid;
601
602 if (!data)
603 return;
604
605 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
606
607 if (*last_pid == pid)
608 return;
609
610 prev_pid = *last_pid;
611 *last_pid = pid;
612
613 if (prev_pid == -1)
614 return;
615/*
616 * Context-switch trace line:
617
618 ------------------------------------------
619 | 1) migration/0--1 => sshd-1755
620 ------------------------------------------
621
622 */
623 trace_seq_puts(s, " ------------------------------------------\n");
624 print_graph_cpu(s, cpu);
625 print_graph_proc(s, prev_pid);
626 trace_seq_puts(s, " => ");
627 print_graph_proc(s, pid);
628 trace_seq_puts(s, "\n ------------------------------------------\n\n");
629}
630
631static struct ftrace_graph_ret_entry *
632get_return_for_leaf(struct trace_iterator *iter,
633 struct ftrace_graph_ent_entry *curr)
634{
635 struct fgraph_data *data = iter->private;
636 struct ring_buffer_iter *ring_iter = NULL;
637 struct ring_buffer_event *event;
638 struct ftrace_graph_ret_entry *next;
639
640 /*
641 * If the previous output failed to write to the seq buffer,
642 * then we just reuse the data from before.
643 */
644 if (data && data->failed) {
645 curr = &data->ent;
646 next = &data->ret;
647 } else {
648
649 ring_iter = trace_buffer_iter(iter, iter->cpu);
650
651 /* First peek to compare current entry and the next one */
652 if (ring_iter)
653 event = ring_buffer_iter_peek(ring_iter, NULL);
654 else {
655 /*
656 * We need to consume the current entry to see
657 * the next one.
658 */
659 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
660 NULL, NULL);
661 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
662 NULL, NULL);
663 }
664
665 if (!event)
666 return NULL;
667
668 next = ring_buffer_event_data(event);
669
670 if (data) {
671 /*
672 * Save current and next entries for later reference
673 * if the output fails.
674 */
675 data->ent = *curr;
676 /*
677 * If the next event is not a return type, then
678 * we only care about what type it is. Otherwise we can
679 * safely copy the entire event.
680 */
681 if (next->ent.type == TRACE_GRAPH_RET)
682 data->ret = *next;
683 else
684 data->ret.ent.type = next->ent.type;
685 }
686 }
687
688 if (next->ent.type != TRACE_GRAPH_RET)
689 return NULL;
690
691 if (curr->ent.pid != next->ent.pid ||
692 curr->graph_ent.func != next->ret.func)
693 return NULL;
694
695 /* this is a leaf, now advance the iterator */
696 if (ring_iter)
697 ring_buffer_read(ring_iter, NULL);
698
699 return next;
700}
701
702static void print_graph_abs_time(u64 t, struct trace_seq *s)
703{
704 unsigned long usecs_rem;
705
706 usecs_rem = do_div(t, NSEC_PER_SEC);
707 usecs_rem /= 1000;
708
709 trace_seq_printf(s, "%5lu.%06lu | ",
710 (unsigned long)t, usecs_rem);
711}
712
713static void
714print_graph_irq(struct trace_iterator *iter, unsigned long addr,
715 enum trace_type type, int cpu, pid_t pid, u32 flags)
716{
717 struct trace_array *tr = iter->tr;
718 struct trace_seq *s = &iter->seq;
719 struct trace_entry *ent = iter->ent;
720
721 if (addr < (unsigned long)__irqentry_text_start ||
722 addr >= (unsigned long)__irqentry_text_end)
723 return;
724
725 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
726 /* Absolute time */
727 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
728 print_graph_abs_time(iter->ts, s);
729
730 /* Cpu */
731 if (flags & TRACE_GRAPH_PRINT_CPU)
732 print_graph_cpu(s, cpu);
733
734 /* Proc */
735 if (flags & TRACE_GRAPH_PRINT_PROC) {
736 print_graph_proc(s, pid);
737 trace_seq_puts(s, " | ");
738 }
739
740 /* Latency format */
741 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
742 print_graph_lat_fmt(s, ent);
743 }
744
745 /* No overhead */
746 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
747
748 if (type == TRACE_GRAPH_ENT)
749 trace_seq_puts(s, "==========>");
750 else
751 trace_seq_puts(s, "<==========");
752
753 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
754 trace_seq_putc(s, '\n');
755}
756
757void
758trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
759{
760 unsigned long nsecs_rem = do_div(duration, 1000);
761 /* log10(ULONG_MAX) + '\0' */
762 char usecs_str[21];
763 char nsecs_str[5];
764 int len;
765 int i;
766
767 sprintf(usecs_str, "%lu", (unsigned long) duration);
768
769 /* Print msecs */
770 trace_seq_printf(s, "%s", usecs_str);
771
772 len = strlen(usecs_str);
773
774 /* Print nsecs (we don't want to exceed 7 numbers) */
775 if (len < 7) {
776 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
777
778 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
779 trace_seq_printf(s, ".%s", nsecs_str);
780 len += strlen(nsecs_str) + 1;
781 }
782
783 trace_seq_puts(s, " us ");
784
785 /* Print remaining spaces to fit the row's width */
786 for (i = len; i < 8; i++)
787 trace_seq_putc(s, ' ');
788}
789
790static void
791print_graph_duration(struct trace_array *tr, unsigned long long duration,
792 struct trace_seq *s, u32 flags)
793{
794 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
795 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
796 return;
797
798 /* No real adata, just filling the column with spaces */
799 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
800 case FLAGS_FILL_FULL:
801 trace_seq_puts(s, " | ");
802 return;
803 case FLAGS_FILL_START:
804 trace_seq_puts(s, " ");
805 return;
806 case FLAGS_FILL_END:
807 trace_seq_puts(s, " |");
808 return;
809 }
810
811 /* Signal a overhead of time execution to the output */
812 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
813 trace_seq_printf(s, "%c ", trace_find_mark(duration));
814 else
815 trace_seq_puts(s, " ");
816
817 trace_print_graph_duration(duration, s);
818 trace_seq_puts(s, "| ");
819}
820
821/* Case of a leaf function on its call entry */
822static enum print_line_t
823print_graph_entry_leaf(struct trace_iterator *iter,
824 struct ftrace_graph_ent_entry *entry,
825 struct ftrace_graph_ret_entry *ret_entry,
826 struct trace_seq *s, u32 flags)
827{
828 struct fgraph_data *data = iter->private;
829 struct trace_array *tr = iter->tr;
830 struct ftrace_graph_ret *graph_ret;
831 struct ftrace_graph_ent *call;
832 unsigned long long duration;
833 int i;
834
835 graph_ret = &ret_entry->ret;
836 call = &entry->graph_ent;
837 duration = graph_ret->rettime - graph_ret->calltime;
838
839 if (data) {
840 struct fgraph_cpu_data *cpu_data;
841 int cpu = iter->cpu;
842
843 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
844
845 /* If a graph tracer ignored set_graph_notrace */
846 if (call->depth < -1)
847 call->depth += FTRACE_NOTRACE_DEPTH;
848
849 /*
850 * Comments display at + 1 to depth. Since
851 * this is a leaf function, keep the comments
852 * equal to this depth.
853 */
854 cpu_data->depth = call->depth - 1;
855
856 /* No need to keep this function around for this depth */
857 if (call->depth < FTRACE_RETFUNC_DEPTH &&
858 !WARN_ON_ONCE(call->depth < 0))
859 cpu_data->enter_funcs[call->depth] = 0;
860 }
861
862 /* Overhead and duration */
863 print_graph_duration(tr, duration, s, flags);
864
865 /* Function */
866 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
867 trace_seq_putc(s, ' ');
868
869 trace_seq_printf(s, "%ps();\n", (void *)call->func);
870
871 return trace_handle_return(s);
872}
873
874static enum print_line_t
875print_graph_entry_nested(struct trace_iterator *iter,
876 struct ftrace_graph_ent_entry *entry,
877 struct trace_seq *s, int cpu, u32 flags)
878{
879 struct ftrace_graph_ent *call = &entry->graph_ent;
880 struct fgraph_data *data = iter->private;
881 struct trace_array *tr = iter->tr;
882 int i;
883
884 if (data) {
885 struct fgraph_cpu_data *cpu_data;
886 int cpu = iter->cpu;
887
888 /* If a graph tracer ignored set_graph_notrace */
889 if (call->depth < -1)
890 call->depth += FTRACE_NOTRACE_DEPTH;
891
892 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
893 cpu_data->depth = call->depth;
894
895 /* Save this function pointer to see if the exit matches */
896 if (call->depth < FTRACE_RETFUNC_DEPTH &&
897 !WARN_ON_ONCE(call->depth < 0))
898 cpu_data->enter_funcs[call->depth] = call->func;
899 }
900
901 /* No time */
902 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
903
904 /* Function */
905 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
906 trace_seq_putc(s, ' ');
907
908 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
909
910 if (trace_seq_has_overflowed(s))
911 return TRACE_TYPE_PARTIAL_LINE;
912
913 /*
914 * we already consumed the current entry to check the next one
915 * and see if this is a leaf.
916 */
917 return TRACE_TYPE_NO_CONSUME;
918}
919
920static void
921print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
922 int type, unsigned long addr, u32 flags)
923{
924 struct fgraph_data *data = iter->private;
925 struct trace_entry *ent = iter->ent;
926 struct trace_array *tr = iter->tr;
927 int cpu = iter->cpu;
928
929 /* Pid */
930 verif_pid(s, ent->pid, cpu, data);
931
932 if (type)
933 /* Interrupt */
934 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
935
936 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
937 return;
938
939 /* Absolute time */
940 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
941 print_graph_abs_time(iter->ts, s);
942
943 /* Cpu */
944 if (flags & TRACE_GRAPH_PRINT_CPU)
945 print_graph_cpu(s, cpu);
946
947 /* Proc */
948 if (flags & TRACE_GRAPH_PRINT_PROC) {
949 print_graph_proc(s, ent->pid);
950 trace_seq_puts(s, " | ");
951 }
952
953 /* Latency format */
954 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
955 print_graph_lat_fmt(s, ent);
956
957 return;
958}
959
960/*
961 * Entry check for irq code
962 *
963 * returns 1 if
964 * - we are inside irq code
965 * - we just entered irq code
966 *
967 * retunns 0 if
968 * - funcgraph-interrupts option is set
969 * - we are not inside irq code
970 */
971static int
972check_irq_entry(struct trace_iterator *iter, u32 flags,
973 unsigned long addr, int depth)
974{
975 int cpu = iter->cpu;
976 int *depth_irq;
977 struct fgraph_data *data = iter->private;
978
979 /*
980 * If we are either displaying irqs, or we got called as
981 * a graph event and private data does not exist,
982 * then we bypass the irq check.
983 */
984 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
985 (!data))
986 return 0;
987
988 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
989
990 /*
991 * We are inside the irq code
992 */
993 if (*depth_irq >= 0)
994 return 1;
995
996 if ((addr < (unsigned long)__irqentry_text_start) ||
997 (addr >= (unsigned long)__irqentry_text_end))
998 return 0;
999
1000 /*
1001 * We are entering irq code.
1002 */
1003 *depth_irq = depth;
1004 return 1;
1005}
1006
1007/*
1008 * Return check for irq code
1009 *
1010 * returns 1 if
1011 * - we are inside irq code
1012 * - we just left irq code
1013 *
1014 * returns 0 if
1015 * - funcgraph-interrupts option is set
1016 * - we are not inside irq code
1017 */
1018static int
1019check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1020{
1021 int cpu = iter->cpu;
1022 int *depth_irq;
1023 struct fgraph_data *data = iter->private;
1024
1025 /*
1026 * If we are either displaying irqs, or we got called as
1027 * a graph event and private data does not exist,
1028 * then we bypass the irq check.
1029 */
1030 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1031 (!data))
1032 return 0;
1033
1034 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1035
1036 /*
1037 * We are not inside the irq code.
1038 */
1039 if (*depth_irq == -1)
1040 return 0;
1041
1042 /*
1043 * We are inside the irq code, and this is returning entry.
1044 * Let's not trace it and clear the entry depth, since
1045 * we are out of irq code.
1046 *
1047 * This condition ensures that we 'leave the irq code' once
1048 * we are out of the entry depth. Thus protecting us from
1049 * the RETURN entry loss.
1050 */
1051 if (*depth_irq >= depth) {
1052 *depth_irq = -1;
1053 return 1;
1054 }
1055
1056 /*
1057 * We are inside the irq code, and this is not the entry.
1058 */
1059 return 1;
1060}
1061
1062static enum print_line_t
1063print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1064 struct trace_iterator *iter, u32 flags)
1065{
1066 struct fgraph_data *data = iter->private;
1067 struct ftrace_graph_ent *call = &field->graph_ent;
1068 struct ftrace_graph_ret_entry *leaf_ret;
1069 static enum print_line_t ret;
1070 int cpu = iter->cpu;
1071
1072 if (check_irq_entry(iter, flags, call->func, call->depth))
1073 return TRACE_TYPE_HANDLED;
1074
1075 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1076
1077 leaf_ret = get_return_for_leaf(iter, field);
1078 if (leaf_ret)
1079 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1080 else
1081 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1082
1083 if (data) {
1084 /*
1085 * If we failed to write our output, then we need to make
1086 * note of it. Because we already consumed our entry.
1087 */
1088 if (s->full) {
1089 data->failed = 1;
1090 data->cpu = cpu;
1091 } else
1092 data->failed = 0;
1093 }
1094
1095 return ret;
1096}
1097
1098static enum print_line_t
1099print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1100 struct trace_entry *ent, struct trace_iterator *iter,
1101 u32 flags)
1102{
1103 unsigned long long duration = trace->rettime - trace->calltime;
1104 struct fgraph_data *data = iter->private;
1105 struct trace_array *tr = iter->tr;
1106 pid_t pid = ent->pid;
1107 int cpu = iter->cpu;
1108 int func_match = 1;
1109 int i;
1110
1111 if (check_irq_return(iter, flags, trace->depth))
1112 return TRACE_TYPE_HANDLED;
1113
1114 if (data) {
1115 struct fgraph_cpu_data *cpu_data;
1116 int cpu = iter->cpu;
1117
1118 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1119
1120 /*
1121 * Comments display at + 1 to depth. This is the
1122 * return from a function, we now want the comments
1123 * to display at the same level of the bracket.
1124 */
1125 cpu_data->depth = trace->depth - 1;
1126
1127 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1128 !WARN_ON_ONCE(trace->depth < 0)) {
1129 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1130 func_match = 0;
1131 cpu_data->enter_funcs[trace->depth] = 0;
1132 }
1133 }
1134
1135 print_graph_prologue(iter, s, 0, 0, flags);
1136
1137 /* Overhead and duration */
1138 print_graph_duration(tr, duration, s, flags);
1139
1140 /* Closing brace */
1141 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1142 trace_seq_putc(s, ' ');
1143
1144 /*
1145 * If the return function does not have a matching entry,
1146 * then the entry was lost. Instead of just printing
1147 * the '}' and letting the user guess what function this
1148 * belongs to, write out the function name. Always do
1149 * that if the funcgraph-tail option is enabled.
1150 */
1151 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1152 trace_seq_puts(s, "}\n");
1153 else
1154 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1155
1156 /* Overrun */
1157 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1158 trace_seq_printf(s, " (Overruns: %lu)\n",
1159 trace->overrun);
1160
1161 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1162 cpu, pid, flags);
1163
1164 return trace_handle_return(s);
1165}
1166
1167static enum print_line_t
1168print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1169 struct trace_iterator *iter, u32 flags)
1170{
1171 struct trace_array *tr = iter->tr;
1172 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1173 struct fgraph_data *data = iter->private;
1174 struct trace_event *event;
1175 int depth = 0;
1176 int ret;
1177 int i;
1178
1179 if (data)
1180 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1181
1182 print_graph_prologue(iter, s, 0, 0, flags);
1183
1184 /* No time */
1185 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1186
1187 /* Indentation */
1188 if (depth > 0)
1189 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1190 trace_seq_putc(s, ' ');
1191
1192 /* The comment */
1193 trace_seq_puts(s, "/* ");
1194
1195 switch (iter->ent->type) {
1196 case TRACE_BPUTS:
1197 ret = trace_print_bputs_msg_only(iter);
1198 if (ret != TRACE_TYPE_HANDLED)
1199 return ret;
1200 break;
1201 case TRACE_BPRINT:
1202 ret = trace_print_bprintk_msg_only(iter);
1203 if (ret != TRACE_TYPE_HANDLED)
1204 return ret;
1205 break;
1206 case TRACE_PRINT:
1207 ret = trace_print_printk_msg_only(iter);
1208 if (ret != TRACE_TYPE_HANDLED)
1209 return ret;
1210 break;
1211 default:
1212 event = ftrace_find_event(ent->type);
1213 if (!event)
1214 return TRACE_TYPE_UNHANDLED;
1215
1216 ret = event->funcs->trace(iter, sym_flags, event);
1217 if (ret != TRACE_TYPE_HANDLED)
1218 return ret;
1219 }
1220
1221 if (trace_seq_has_overflowed(s))
1222 goto out;
1223
1224 /* Strip ending newline */
1225 if (s->buffer[s->seq.len - 1] == '\n') {
1226 s->buffer[s->seq.len - 1] = '\0';
1227 s->seq.len--;
1228 }
1229
1230 trace_seq_puts(s, " */\n");
1231 out:
1232 return trace_handle_return(s);
1233}
1234
1235
1236enum print_line_t
1237print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1238{
1239 struct ftrace_graph_ent_entry *field;
1240 struct fgraph_data *data = iter->private;
1241 struct trace_entry *entry = iter->ent;
1242 struct trace_seq *s = &iter->seq;
1243 int cpu = iter->cpu;
1244 int ret;
1245
1246 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1247 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1248 return TRACE_TYPE_HANDLED;
1249 }
1250
1251 /*
1252 * If the last output failed, there's a possibility we need
1253 * to print out the missing entry which would never go out.
1254 */
1255 if (data && data->failed) {
1256 field = &data->ent;
1257 iter->cpu = data->cpu;
1258 ret = print_graph_entry(field, s, iter, flags);
1259 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1260 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1261 ret = TRACE_TYPE_NO_CONSUME;
1262 }
1263 iter->cpu = cpu;
1264 return ret;
1265 }
1266
1267 switch (entry->type) {
1268 case TRACE_GRAPH_ENT: {
1269 /*
1270 * print_graph_entry() may consume the current event,
1271 * thus @field may become invalid, so we need to save it.
1272 * sizeof(struct ftrace_graph_ent_entry) is very small,
1273 * it can be safely saved at the stack.
1274 */
1275 struct ftrace_graph_ent_entry saved;
1276 trace_assign_type(field, entry);
1277 saved = *field;
1278 return print_graph_entry(&saved, s, iter, flags);
1279 }
1280 case TRACE_GRAPH_RET: {
1281 struct ftrace_graph_ret_entry *field;
1282 trace_assign_type(field, entry);
1283 return print_graph_return(&field->ret, s, entry, iter, flags);
1284 }
1285 case TRACE_STACK:
1286 case TRACE_FN:
1287 /* dont trace stack and functions as comments */
1288 return TRACE_TYPE_UNHANDLED;
1289
1290 default:
1291 return print_graph_comment(s, entry, iter, flags);
1292 }
1293
1294 return TRACE_TYPE_HANDLED;
1295}
1296
1297static enum print_line_t
1298print_graph_function(struct trace_iterator *iter)
1299{
1300 return print_graph_function_flags(iter, tracer_flags.val);
1301}
1302
1303static enum print_line_t
1304print_graph_function_event(struct trace_iterator *iter, int flags,
1305 struct trace_event *event)
1306{
1307 return print_graph_function(iter);
1308}
1309
1310static void print_lat_header(struct seq_file *s, u32 flags)
1311{
1312 static const char spaces[] = " " /* 16 spaces */
1313 " " /* 4 spaces */
1314 " "; /* 17 spaces */
1315 int size = 0;
1316
1317 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1318 size += 16;
1319 if (flags & TRACE_GRAPH_PRINT_CPU)
1320 size += 4;
1321 if (flags & TRACE_GRAPH_PRINT_PROC)
1322 size += 17;
1323
1324 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1325 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1326 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1327 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1328 seq_printf(s, "#%.*s||| / \n", size, spaces);
1329}
1330
1331static void __print_graph_headers_flags(struct trace_array *tr,
1332 struct seq_file *s, u32 flags)
1333{
1334 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1335
1336 if (lat)
1337 print_lat_header(s, flags);
1338
1339 /* 1st line */
1340 seq_putc(s, '#');
1341 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1342 seq_puts(s, " TIME ");
1343 if (flags & TRACE_GRAPH_PRINT_CPU)
1344 seq_puts(s, " CPU");
1345 if (flags & TRACE_GRAPH_PRINT_PROC)
1346 seq_puts(s, " TASK/PID ");
1347 if (lat)
1348 seq_puts(s, "||||");
1349 if (flags & TRACE_GRAPH_PRINT_DURATION)
1350 seq_puts(s, " DURATION ");
1351 seq_puts(s, " FUNCTION CALLS\n");
1352
1353 /* 2nd line */
1354 seq_putc(s, '#');
1355 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1356 seq_puts(s, " | ");
1357 if (flags & TRACE_GRAPH_PRINT_CPU)
1358 seq_puts(s, " | ");
1359 if (flags & TRACE_GRAPH_PRINT_PROC)
1360 seq_puts(s, " | | ");
1361 if (lat)
1362 seq_puts(s, "||||");
1363 if (flags & TRACE_GRAPH_PRINT_DURATION)
1364 seq_puts(s, " | | ");
1365 seq_puts(s, " | | | |\n");
1366}
1367
1368static void print_graph_headers(struct seq_file *s)
1369{
1370 print_graph_headers_flags(s, tracer_flags.val);
1371}
1372
1373void print_graph_headers_flags(struct seq_file *s, u32 flags)
1374{
1375 struct trace_iterator *iter = s->private;
1376 struct trace_array *tr = iter->tr;
1377
1378 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1379 return;
1380
1381 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1382 /* print nothing if the buffers are empty */
1383 if (trace_empty(iter))
1384 return;
1385
1386 print_trace_header(s, iter);
1387 }
1388
1389 __print_graph_headers_flags(tr, s, flags);
1390}
1391
1392void graph_trace_open(struct trace_iterator *iter)
1393{
1394 /* pid and depth on the last trace processed */
1395 struct fgraph_data *data;
1396 gfp_t gfpflags;
1397 int cpu;
1398
1399 iter->private = NULL;
1400
1401 /* We can be called in atomic context via ftrace_dump() */
1402 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1403
1404 data = kzalloc(sizeof(*data), gfpflags);
1405 if (!data)
1406 goto out_err;
1407
1408 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1409 if (!data->cpu_data)
1410 goto out_err_free;
1411
1412 for_each_possible_cpu(cpu) {
1413 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1414 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1415 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1416 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1417
1418 *pid = -1;
1419 *depth = 0;
1420 *ignore = 0;
1421 *depth_irq = -1;
1422 }
1423
1424 iter->private = data;
1425
1426 return;
1427
1428 out_err_free:
1429 kfree(data);
1430 out_err:
1431 pr_warn("function graph tracer: not enough memory\n");
1432}
1433
1434void graph_trace_close(struct trace_iterator *iter)
1435{
1436 struct fgraph_data *data = iter->private;
1437
1438 if (data) {
1439 free_percpu(data->cpu_data);
1440 kfree(data);
1441 }
1442}
1443
1444static int
1445func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1446{
1447 if (bit == TRACE_GRAPH_PRINT_IRQS)
1448 ftrace_graph_skip_irqs = !set;
1449
1450 if (bit == TRACE_GRAPH_SLEEP_TIME)
1451 ftrace_graph_sleep_time_control(set);
1452
1453 if (bit == TRACE_GRAPH_GRAPH_TIME)
1454 ftrace_graph_graph_time_control(set);
1455
1456 return 0;
1457}
1458
1459static struct trace_event_functions graph_functions = {
1460 .trace = print_graph_function_event,
1461};
1462
1463static struct trace_event graph_trace_entry_event = {
1464 .type = TRACE_GRAPH_ENT,
1465 .funcs = &graph_functions,
1466};
1467
1468static struct trace_event graph_trace_ret_event = {
1469 .type = TRACE_GRAPH_RET,
1470 .funcs = &graph_functions
1471};
1472
1473static struct tracer graph_trace __tracer_data = {
1474 .name = "function_graph",
1475 .update_thresh = graph_trace_update_thresh,
1476 .open = graph_trace_open,
1477 .pipe_open = graph_trace_open,
1478 .close = graph_trace_close,
1479 .pipe_close = graph_trace_close,
1480 .init = graph_trace_init,
1481 .reset = graph_trace_reset,
1482 .print_line = print_graph_function,
1483 .print_header = print_graph_headers,
1484 .flags = &tracer_flags,
1485 .set_flag = func_graph_set_flag,
1486#ifdef CONFIG_FTRACE_SELFTEST
1487 .selftest = trace_selftest_startup_function_graph,
1488#endif
1489};
1490
1491
1492static ssize_t
1493graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1494 loff_t *ppos)
1495{
1496 unsigned long val;
1497 int ret;
1498
1499 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1500 if (ret)
1501 return ret;
1502
1503 fgraph_max_depth = val;
1504
1505 *ppos += cnt;
1506
1507 return cnt;
1508}
1509
1510static ssize_t
1511graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1512 loff_t *ppos)
1513{
1514 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1515 int n;
1516
1517 n = sprintf(buf, "%d\n", fgraph_max_depth);
1518
1519 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1520}
1521
1522static const struct file_operations graph_depth_fops = {
1523 .open = tracing_open_generic,
1524 .write = graph_depth_write,
1525 .read = graph_depth_read,
1526 .llseek = generic_file_llseek,
1527};
1528
1529static __init int init_graph_tracefs(void)
1530{
1531 struct dentry *d_tracer;
1532
1533 d_tracer = tracing_init_dentry();
1534 if (IS_ERR(d_tracer))
1535 return 0;
1536
1537 trace_create_file("max_graph_depth", 0644, d_tracer,
1538 NULL, &graph_depth_fops);
1539
1540 return 0;
1541}
1542fs_initcall(init_graph_tracefs);
1543
1544static __init int init_graph_trace(void)
1545{
1546 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1547
1548 if (!register_trace_event(&graph_trace_entry_event)) {
1549 pr_warn("Warning: could not register graph trace events\n");
1550 return 1;
1551 }
1552
1553 if (!register_trace_event(&graph_trace_ret_event)) {
1554 pr_warn("Warning: could not register graph trace events\n");
1555 return 1;
1556 }
1557
1558 return register_tracer(&graph_trace);
1559}
1560
1561core_initcall(init_graph_trace);