Loading...
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14
15#include "trace.h"
16#include "trace_output.h"
17
18/* When set, irq functions will be ignored */
19static int ftrace_graph_skip_irqs;
20
21struct fgraph_cpu_data {
22 pid_t last_pid;
23 int depth;
24 int depth_irq;
25 int ignore;
26 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
27};
28
29struct fgraph_data {
30 struct fgraph_cpu_data __percpu *cpu_data;
31
32 /* Place to preserve last processed entry. */
33 struct ftrace_graph_ent_entry ent;
34 struct ftrace_graph_ret_entry ret;
35 int failed;
36 int cpu;
37};
38
39#define TRACE_GRAPH_INDENT 2
40
41/* Flag options */
42#define TRACE_GRAPH_PRINT_OVERRUN 0x1
43#define TRACE_GRAPH_PRINT_CPU 0x2
44#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
45#define TRACE_GRAPH_PRINT_PROC 0x8
46#define TRACE_GRAPH_PRINT_DURATION 0x10
47#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
48#define TRACE_GRAPH_PRINT_IRQS 0x40
49
50static unsigned int max_depth;
51
52static struct tracer_opt trace_opts[] = {
53 /* Display overruns? (for self-debug purpose) */
54 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
55 /* Display CPU ? */
56 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
57 /* Display Overhead ? */
58 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
59 /* Display proc name/pid */
60 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
61 /* Display duration of execution */
62 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
63 /* Display absolute time of an entry */
64 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
65 /* Display interrupts */
66 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
67 { } /* Empty entry */
68};
69
70static struct tracer_flags tracer_flags = {
71 /* Don't display overruns and proc by default */
72 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
73 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
74 .opts = trace_opts
75};
76
77static struct trace_array *graph_array;
78
79/*
80 * DURATION column is being also used to display IRQ signs,
81 * following values are used by print_graph_irq and others
82 * to fill in space into DURATION column.
83 */
84enum {
85 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
86 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
87 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
88};
89
90static enum print_line_t
91print_graph_duration(unsigned long long duration, struct trace_seq *s,
92 u32 flags);
93
94/* Add a function return address to the trace stack on thread info.*/
95int
96ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
97 unsigned long frame_pointer)
98{
99 unsigned long long calltime;
100 int index;
101
102 if (!current->ret_stack)
103 return -EBUSY;
104
105 /*
106 * We must make sure the ret_stack is tested before we read
107 * anything else.
108 */
109 smp_rmb();
110
111 /* The return trace stack is full */
112 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
113 atomic_inc(¤t->trace_overrun);
114 return -EBUSY;
115 }
116
117 /*
118 * The curr_ret_stack is an index to ftrace return stack of
119 * current task. Its value should be in [0, FTRACE_RETFUNC_
120 * DEPTH) when the function graph tracer is used. To support
121 * filtering out specific functions, it makes the index
122 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
123 * so when it sees a negative index the ftrace will ignore
124 * the record. And the index gets recovered when returning
125 * from the filtered function by adding the FTRACE_NOTRACE_
126 * DEPTH and then it'll continue to record functions normally.
127 *
128 * The curr_ret_stack is initialized to -1 and get increased
129 * in this function. So it can be less than -1 only if it was
130 * filtered out via ftrace_graph_notrace_addr() which can be
131 * set from set_graph_notrace file in debugfs by user.
132 */
133 if (current->curr_ret_stack < -1)
134 return -EBUSY;
135
136 calltime = trace_clock_local();
137
138 index = ++current->curr_ret_stack;
139 if (ftrace_graph_notrace_addr(func))
140 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
141 barrier();
142 current->ret_stack[index].ret = ret;
143 current->ret_stack[index].func = func;
144 current->ret_stack[index].calltime = calltime;
145 current->ret_stack[index].subtime = 0;
146 current->ret_stack[index].fp = frame_pointer;
147 *depth = current->curr_ret_stack;
148
149 return 0;
150}
151
152/* Retrieve a function return address to the trace stack on thread info.*/
153static void
154ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
155 unsigned long frame_pointer)
156{
157 int index;
158
159 index = current->curr_ret_stack;
160
161 /*
162 * A negative index here means that it's just returned from a
163 * notrace'd function. Recover index to get an original
164 * return address. See ftrace_push_return_trace().
165 *
166 * TODO: Need to check whether the stack gets corrupted.
167 */
168 if (index < 0)
169 index += FTRACE_NOTRACE_DEPTH;
170
171 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
172 ftrace_graph_stop();
173 WARN_ON(1);
174 /* Might as well panic, otherwise we have no where to go */
175 *ret = (unsigned long)panic;
176 return;
177 }
178
179#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
180 /*
181 * The arch may choose to record the frame pointer used
182 * and check it here to make sure that it is what we expect it
183 * to be. If gcc does not set the place holder of the return
184 * address in the frame pointer, and does a copy instead, then
185 * the function graph trace will fail. This test detects this
186 * case.
187 *
188 * Currently, x86_32 with optimize for size (-Os) makes the latest
189 * gcc do the above.
190 *
191 * Note, -mfentry does not use frame pointers, and this test
192 * is not needed if CC_USING_FENTRY is set.
193 */
194 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
195 ftrace_graph_stop();
196 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
197 " from func %ps return to %lx\n",
198 current->ret_stack[index].fp,
199 frame_pointer,
200 (void *)current->ret_stack[index].func,
201 current->ret_stack[index].ret);
202 *ret = (unsigned long)panic;
203 return;
204 }
205#endif
206
207 *ret = current->ret_stack[index].ret;
208 trace->func = current->ret_stack[index].func;
209 trace->calltime = current->ret_stack[index].calltime;
210 trace->overrun = atomic_read(¤t->trace_overrun);
211 trace->depth = index;
212}
213
214/*
215 * Send the trace to the ring-buffer.
216 * @return the original return address.
217 */
218unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
219{
220 struct ftrace_graph_ret trace;
221 unsigned long ret;
222
223 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
224 trace.rettime = trace_clock_local();
225 barrier();
226 current->curr_ret_stack--;
227 /*
228 * The curr_ret_stack can be less than -1 only if it was
229 * filtered out and it's about to return from the function.
230 * Recover the index and continue to trace normal functions.
231 */
232 if (current->curr_ret_stack < -1) {
233 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
234 return ret;
235 }
236
237 /*
238 * The trace should run after decrementing the ret counter
239 * in case an interrupt were to come in. We don't want to
240 * lose the interrupt if max_depth is set.
241 */
242 ftrace_graph_return(&trace);
243
244 if (unlikely(!ret)) {
245 ftrace_graph_stop();
246 WARN_ON(1);
247 /* Might as well panic. What else to do? */
248 ret = (unsigned long)panic;
249 }
250
251 return ret;
252}
253
254int __trace_graph_entry(struct trace_array *tr,
255 struct ftrace_graph_ent *trace,
256 unsigned long flags,
257 int pc)
258{
259 struct ftrace_event_call *call = &event_funcgraph_entry;
260 struct ring_buffer_event *event;
261 struct ring_buffer *buffer = tr->trace_buffer.buffer;
262 struct ftrace_graph_ent_entry *entry;
263
264 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
265 return 0;
266
267 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
268 sizeof(*entry), flags, pc);
269 if (!event)
270 return 0;
271 entry = ring_buffer_event_data(event);
272 entry->graph_ent = *trace;
273 if (!call_filter_check_discard(call, entry, buffer, event))
274 __buffer_unlock_commit(buffer, event);
275
276 return 1;
277}
278
279static inline int ftrace_graph_ignore_irqs(void)
280{
281 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
282 return 0;
283
284 return in_irq();
285}
286
287int trace_graph_entry(struct ftrace_graph_ent *trace)
288{
289 struct trace_array *tr = graph_array;
290 struct trace_array_cpu *data;
291 unsigned long flags;
292 long disabled;
293 int ret;
294 int cpu;
295 int pc;
296
297 if (!ftrace_trace_task(current))
298 return 0;
299
300 /* trace it when it is-nested-in or is a function enabled. */
301 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
302 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
303 (max_depth && trace->depth >= max_depth))
304 return 0;
305
306 /*
307 * Do not trace a function if it's filtered by set_graph_notrace.
308 * Make the index of ret stack negative to indicate that it should
309 * ignore further functions. But it needs its own ret stack entry
310 * to recover the original index in order to continue tracing after
311 * returning from the function.
312 */
313 if (ftrace_graph_notrace_addr(trace->func))
314 return 1;
315
316 local_irq_save(flags);
317 cpu = raw_smp_processor_id();
318 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
319 disabled = atomic_inc_return(&data->disabled);
320 if (likely(disabled == 1)) {
321 pc = preempt_count();
322 ret = __trace_graph_entry(tr, trace, flags, pc);
323 } else {
324 ret = 0;
325 }
326
327 atomic_dec(&data->disabled);
328 local_irq_restore(flags);
329
330 return ret;
331}
332
333int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
334{
335 if (tracing_thresh)
336 return 1;
337 else
338 return trace_graph_entry(trace);
339}
340
341static void
342__trace_graph_function(struct trace_array *tr,
343 unsigned long ip, unsigned long flags, int pc)
344{
345 u64 time = trace_clock_local();
346 struct ftrace_graph_ent ent = {
347 .func = ip,
348 .depth = 0,
349 };
350 struct ftrace_graph_ret ret = {
351 .func = ip,
352 .depth = 0,
353 .calltime = time,
354 .rettime = time,
355 };
356
357 __trace_graph_entry(tr, &ent, flags, pc);
358 __trace_graph_return(tr, &ret, flags, pc);
359}
360
361void
362trace_graph_function(struct trace_array *tr,
363 unsigned long ip, unsigned long parent_ip,
364 unsigned long flags, int pc)
365{
366 __trace_graph_function(tr, ip, flags, pc);
367}
368
369void __trace_graph_return(struct trace_array *tr,
370 struct ftrace_graph_ret *trace,
371 unsigned long flags,
372 int pc)
373{
374 struct ftrace_event_call *call = &event_funcgraph_exit;
375 struct ring_buffer_event *event;
376 struct ring_buffer *buffer = tr->trace_buffer.buffer;
377 struct ftrace_graph_ret_entry *entry;
378
379 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
380 return;
381
382 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
383 sizeof(*entry), flags, pc);
384 if (!event)
385 return;
386 entry = ring_buffer_event_data(event);
387 entry->ret = *trace;
388 if (!call_filter_check_discard(call, entry, buffer, event))
389 __buffer_unlock_commit(buffer, event);
390}
391
392void trace_graph_return(struct ftrace_graph_ret *trace)
393{
394 struct trace_array *tr = graph_array;
395 struct trace_array_cpu *data;
396 unsigned long flags;
397 long disabled;
398 int cpu;
399 int pc;
400
401 local_irq_save(flags);
402 cpu = raw_smp_processor_id();
403 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
404 disabled = atomic_inc_return(&data->disabled);
405 if (likely(disabled == 1)) {
406 pc = preempt_count();
407 __trace_graph_return(tr, trace, flags, pc);
408 }
409 atomic_dec(&data->disabled);
410 local_irq_restore(flags);
411}
412
413void set_graph_array(struct trace_array *tr)
414{
415 graph_array = tr;
416
417 /* Make graph_array visible before we start tracing */
418
419 smp_mb();
420}
421
422void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
423{
424 if (tracing_thresh &&
425 (trace->rettime - trace->calltime < tracing_thresh))
426 return;
427 else
428 trace_graph_return(trace);
429}
430
431static int graph_trace_init(struct trace_array *tr)
432{
433 int ret;
434
435 set_graph_array(tr);
436 if (tracing_thresh)
437 ret = register_ftrace_graph(&trace_graph_thresh_return,
438 &trace_graph_thresh_entry);
439 else
440 ret = register_ftrace_graph(&trace_graph_return,
441 &trace_graph_entry);
442 if (ret)
443 return ret;
444 tracing_start_cmdline_record();
445
446 return 0;
447}
448
449static void graph_trace_reset(struct trace_array *tr)
450{
451 tracing_stop_cmdline_record();
452 unregister_ftrace_graph();
453}
454
455static int max_bytes_for_cpu;
456
457static enum print_line_t
458print_graph_cpu(struct trace_seq *s, int cpu)
459{
460 int ret;
461
462 /*
463 * Start with a space character - to make it stand out
464 * to the right a bit when trace output is pasted into
465 * email:
466 */
467 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
468 if (!ret)
469 return TRACE_TYPE_PARTIAL_LINE;
470
471 return TRACE_TYPE_HANDLED;
472}
473
474#define TRACE_GRAPH_PROCINFO_LENGTH 14
475
476static enum print_line_t
477print_graph_proc(struct trace_seq *s, pid_t pid)
478{
479 char comm[TASK_COMM_LEN];
480 /* sign + log10(MAX_INT) + '\0' */
481 char pid_str[11];
482 int spaces = 0;
483 int ret;
484 int len;
485 int i;
486
487 trace_find_cmdline(pid, comm);
488 comm[7] = '\0';
489 sprintf(pid_str, "%d", pid);
490
491 /* 1 stands for the "-" character */
492 len = strlen(comm) + strlen(pid_str) + 1;
493
494 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
495 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
496
497 /* First spaces to align center */
498 for (i = 0; i < spaces / 2; i++) {
499 ret = trace_seq_putc(s, ' ');
500 if (!ret)
501 return TRACE_TYPE_PARTIAL_LINE;
502 }
503
504 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
505 if (!ret)
506 return TRACE_TYPE_PARTIAL_LINE;
507
508 /* Last spaces to align center */
509 for (i = 0; i < spaces - (spaces / 2); i++) {
510 ret = trace_seq_putc(s, ' ');
511 if (!ret)
512 return TRACE_TYPE_PARTIAL_LINE;
513 }
514 return TRACE_TYPE_HANDLED;
515}
516
517
518static enum print_line_t
519print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
520{
521 if (!trace_seq_putc(s, ' '))
522 return 0;
523
524 return trace_print_lat_fmt(s, entry);
525}
526
527/* If the pid changed since the last trace, output this event */
528static enum print_line_t
529verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
530{
531 pid_t prev_pid;
532 pid_t *last_pid;
533 int ret;
534
535 if (!data)
536 return TRACE_TYPE_HANDLED;
537
538 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
539
540 if (*last_pid == pid)
541 return TRACE_TYPE_HANDLED;
542
543 prev_pid = *last_pid;
544 *last_pid = pid;
545
546 if (prev_pid == -1)
547 return TRACE_TYPE_HANDLED;
548/*
549 * Context-switch trace line:
550
551 ------------------------------------------
552 | 1) migration/0--1 => sshd-1755
553 ------------------------------------------
554
555 */
556 ret = trace_seq_puts(s,
557 " ------------------------------------------\n");
558 if (!ret)
559 return TRACE_TYPE_PARTIAL_LINE;
560
561 ret = print_graph_cpu(s, cpu);
562 if (ret == TRACE_TYPE_PARTIAL_LINE)
563 return TRACE_TYPE_PARTIAL_LINE;
564
565 ret = print_graph_proc(s, prev_pid);
566 if (ret == TRACE_TYPE_PARTIAL_LINE)
567 return TRACE_TYPE_PARTIAL_LINE;
568
569 ret = trace_seq_puts(s, " => ");
570 if (!ret)
571 return TRACE_TYPE_PARTIAL_LINE;
572
573 ret = print_graph_proc(s, pid);
574 if (ret == TRACE_TYPE_PARTIAL_LINE)
575 return TRACE_TYPE_PARTIAL_LINE;
576
577 ret = trace_seq_puts(s,
578 "\n ------------------------------------------\n\n");
579 if (!ret)
580 return TRACE_TYPE_PARTIAL_LINE;
581
582 return TRACE_TYPE_HANDLED;
583}
584
585static struct ftrace_graph_ret_entry *
586get_return_for_leaf(struct trace_iterator *iter,
587 struct ftrace_graph_ent_entry *curr)
588{
589 struct fgraph_data *data = iter->private;
590 struct ring_buffer_iter *ring_iter = NULL;
591 struct ring_buffer_event *event;
592 struct ftrace_graph_ret_entry *next;
593
594 /*
595 * If the previous output failed to write to the seq buffer,
596 * then we just reuse the data from before.
597 */
598 if (data && data->failed) {
599 curr = &data->ent;
600 next = &data->ret;
601 } else {
602
603 ring_iter = trace_buffer_iter(iter, iter->cpu);
604
605 /* First peek to compare current entry and the next one */
606 if (ring_iter)
607 event = ring_buffer_iter_peek(ring_iter, NULL);
608 else {
609 /*
610 * We need to consume the current entry to see
611 * the next one.
612 */
613 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
614 NULL, NULL);
615 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
616 NULL, NULL);
617 }
618
619 if (!event)
620 return NULL;
621
622 next = ring_buffer_event_data(event);
623
624 if (data) {
625 /*
626 * Save current and next entries for later reference
627 * if the output fails.
628 */
629 data->ent = *curr;
630 /*
631 * If the next event is not a return type, then
632 * we only care about what type it is. Otherwise we can
633 * safely copy the entire event.
634 */
635 if (next->ent.type == TRACE_GRAPH_RET)
636 data->ret = *next;
637 else
638 data->ret.ent.type = next->ent.type;
639 }
640 }
641
642 if (next->ent.type != TRACE_GRAPH_RET)
643 return NULL;
644
645 if (curr->ent.pid != next->ent.pid ||
646 curr->graph_ent.func != next->ret.func)
647 return NULL;
648
649 /* this is a leaf, now advance the iterator */
650 if (ring_iter)
651 ring_buffer_read(ring_iter, NULL);
652
653 return next;
654}
655
656static int print_graph_abs_time(u64 t, struct trace_seq *s)
657{
658 unsigned long usecs_rem;
659
660 usecs_rem = do_div(t, NSEC_PER_SEC);
661 usecs_rem /= 1000;
662
663 return trace_seq_printf(s, "%5lu.%06lu | ",
664 (unsigned long)t, usecs_rem);
665}
666
667static enum print_line_t
668print_graph_irq(struct trace_iterator *iter, unsigned long addr,
669 enum trace_type type, int cpu, pid_t pid, u32 flags)
670{
671 int ret;
672 struct trace_seq *s = &iter->seq;
673
674 if (addr < (unsigned long)__irqentry_text_start ||
675 addr >= (unsigned long)__irqentry_text_end)
676 return TRACE_TYPE_UNHANDLED;
677
678 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
679 /* Absolute time */
680 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
681 ret = print_graph_abs_time(iter->ts, s);
682 if (!ret)
683 return TRACE_TYPE_PARTIAL_LINE;
684 }
685
686 /* Cpu */
687 if (flags & TRACE_GRAPH_PRINT_CPU) {
688 ret = print_graph_cpu(s, cpu);
689 if (ret == TRACE_TYPE_PARTIAL_LINE)
690 return TRACE_TYPE_PARTIAL_LINE;
691 }
692
693 /* Proc */
694 if (flags & TRACE_GRAPH_PRINT_PROC) {
695 ret = print_graph_proc(s, pid);
696 if (ret == TRACE_TYPE_PARTIAL_LINE)
697 return TRACE_TYPE_PARTIAL_LINE;
698 ret = trace_seq_puts(s, " | ");
699 if (!ret)
700 return TRACE_TYPE_PARTIAL_LINE;
701 }
702 }
703
704 /* No overhead */
705 ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
706 if (ret != TRACE_TYPE_HANDLED)
707 return ret;
708
709 if (type == TRACE_GRAPH_ENT)
710 ret = trace_seq_puts(s, "==========>");
711 else
712 ret = trace_seq_puts(s, "<==========");
713
714 if (!ret)
715 return TRACE_TYPE_PARTIAL_LINE;
716
717 ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
718 if (ret != TRACE_TYPE_HANDLED)
719 return ret;
720
721 ret = trace_seq_putc(s, '\n');
722
723 if (!ret)
724 return TRACE_TYPE_PARTIAL_LINE;
725 return TRACE_TYPE_HANDLED;
726}
727
728enum print_line_t
729trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
730{
731 unsigned long nsecs_rem = do_div(duration, 1000);
732 /* log10(ULONG_MAX) + '\0' */
733 char msecs_str[21];
734 char nsecs_str[5];
735 int ret, len;
736 int i;
737
738 sprintf(msecs_str, "%lu", (unsigned long) duration);
739
740 /* Print msecs */
741 ret = trace_seq_printf(s, "%s", msecs_str);
742 if (!ret)
743 return TRACE_TYPE_PARTIAL_LINE;
744
745 len = strlen(msecs_str);
746
747 /* Print nsecs (we don't want to exceed 7 numbers) */
748 if (len < 7) {
749 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
750
751 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
752 ret = trace_seq_printf(s, ".%s", nsecs_str);
753 if (!ret)
754 return TRACE_TYPE_PARTIAL_LINE;
755 len += strlen(nsecs_str);
756 }
757
758 ret = trace_seq_puts(s, " us ");
759 if (!ret)
760 return TRACE_TYPE_PARTIAL_LINE;
761
762 /* Print remaining spaces to fit the row's width */
763 for (i = len; i < 7; i++) {
764 ret = trace_seq_putc(s, ' ');
765 if (!ret)
766 return TRACE_TYPE_PARTIAL_LINE;
767 }
768 return TRACE_TYPE_HANDLED;
769}
770
771static enum print_line_t
772print_graph_duration(unsigned long long duration, struct trace_seq *s,
773 u32 flags)
774{
775 int ret = -1;
776
777 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
778 !(trace_flags & TRACE_ITER_CONTEXT_INFO))
779 return TRACE_TYPE_HANDLED;
780
781 /* No real adata, just filling the column with spaces */
782 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
783 case FLAGS_FILL_FULL:
784 ret = trace_seq_puts(s, " | ");
785 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
786 case FLAGS_FILL_START:
787 ret = trace_seq_puts(s, " ");
788 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
789 case FLAGS_FILL_END:
790 ret = trace_seq_puts(s, " |");
791 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
792 }
793
794 /* Signal a overhead of time execution to the output */
795 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
796 /* Duration exceeded 100 msecs */
797 if (duration > 100000ULL)
798 ret = trace_seq_puts(s, "! ");
799 /* Duration exceeded 10 msecs */
800 else if (duration > 10000ULL)
801 ret = trace_seq_puts(s, "+ ");
802 }
803
804 /*
805 * The -1 means we either did not exceed the duration tresholds
806 * or we dont want to print out the overhead. Either way we need
807 * to fill out the space.
808 */
809 if (ret == -1)
810 ret = trace_seq_puts(s, " ");
811
812 /* Catching here any failure happenned above */
813 if (!ret)
814 return TRACE_TYPE_PARTIAL_LINE;
815
816 ret = trace_print_graph_duration(duration, s);
817 if (ret != TRACE_TYPE_HANDLED)
818 return ret;
819
820 ret = trace_seq_puts(s, "| ");
821 if (!ret)
822 return TRACE_TYPE_PARTIAL_LINE;
823
824 return TRACE_TYPE_HANDLED;
825}
826
827/* Case of a leaf function on its call entry */
828static enum print_line_t
829print_graph_entry_leaf(struct trace_iterator *iter,
830 struct ftrace_graph_ent_entry *entry,
831 struct ftrace_graph_ret_entry *ret_entry,
832 struct trace_seq *s, u32 flags)
833{
834 struct fgraph_data *data = iter->private;
835 struct ftrace_graph_ret *graph_ret;
836 struct ftrace_graph_ent *call;
837 unsigned long long duration;
838 int ret;
839 int i;
840
841 graph_ret = &ret_entry->ret;
842 call = &entry->graph_ent;
843 duration = graph_ret->rettime - graph_ret->calltime;
844
845 if (data) {
846 struct fgraph_cpu_data *cpu_data;
847 int cpu = iter->cpu;
848
849 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
850
851 /*
852 * Comments display at + 1 to depth. Since
853 * this is a leaf function, keep the comments
854 * equal to this depth.
855 */
856 cpu_data->depth = call->depth - 1;
857
858 /* No need to keep this function around for this depth */
859 if (call->depth < FTRACE_RETFUNC_DEPTH)
860 cpu_data->enter_funcs[call->depth] = 0;
861 }
862
863 /* Overhead and duration */
864 ret = print_graph_duration(duration, s, flags);
865 if (ret == TRACE_TYPE_PARTIAL_LINE)
866 return TRACE_TYPE_PARTIAL_LINE;
867
868 /* Function */
869 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
870 ret = trace_seq_putc(s, ' ');
871 if (!ret)
872 return TRACE_TYPE_PARTIAL_LINE;
873 }
874
875 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
876 if (!ret)
877 return TRACE_TYPE_PARTIAL_LINE;
878
879 return TRACE_TYPE_HANDLED;
880}
881
882static enum print_line_t
883print_graph_entry_nested(struct trace_iterator *iter,
884 struct ftrace_graph_ent_entry *entry,
885 struct trace_seq *s, int cpu, u32 flags)
886{
887 struct ftrace_graph_ent *call = &entry->graph_ent;
888 struct fgraph_data *data = iter->private;
889 int ret;
890 int i;
891
892 if (data) {
893 struct fgraph_cpu_data *cpu_data;
894 int cpu = iter->cpu;
895
896 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
897 cpu_data->depth = call->depth;
898
899 /* Save this function pointer to see if the exit matches */
900 if (call->depth < FTRACE_RETFUNC_DEPTH)
901 cpu_data->enter_funcs[call->depth] = call->func;
902 }
903
904 /* No time */
905 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
906 if (ret != TRACE_TYPE_HANDLED)
907 return ret;
908
909 /* Function */
910 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
911 ret = trace_seq_putc(s, ' ');
912 if (!ret)
913 return TRACE_TYPE_PARTIAL_LINE;
914 }
915
916 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
917 if (!ret)
918 return TRACE_TYPE_PARTIAL_LINE;
919
920 /*
921 * we already consumed the current entry to check the next one
922 * and see if this is a leaf.
923 */
924 return TRACE_TYPE_NO_CONSUME;
925}
926
927static enum print_line_t
928print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
929 int type, unsigned long addr, u32 flags)
930{
931 struct fgraph_data *data = iter->private;
932 struct trace_entry *ent = iter->ent;
933 int cpu = iter->cpu;
934 int ret;
935
936 /* Pid */
937 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
938 return TRACE_TYPE_PARTIAL_LINE;
939
940 if (type) {
941 /* Interrupt */
942 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
943 if (ret == TRACE_TYPE_PARTIAL_LINE)
944 return TRACE_TYPE_PARTIAL_LINE;
945 }
946
947 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
948 return 0;
949
950 /* Absolute time */
951 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
952 ret = print_graph_abs_time(iter->ts, s);
953 if (!ret)
954 return TRACE_TYPE_PARTIAL_LINE;
955 }
956
957 /* Cpu */
958 if (flags & TRACE_GRAPH_PRINT_CPU) {
959 ret = print_graph_cpu(s, cpu);
960 if (ret == TRACE_TYPE_PARTIAL_LINE)
961 return TRACE_TYPE_PARTIAL_LINE;
962 }
963
964 /* Proc */
965 if (flags & TRACE_GRAPH_PRINT_PROC) {
966 ret = print_graph_proc(s, ent->pid);
967 if (ret == TRACE_TYPE_PARTIAL_LINE)
968 return TRACE_TYPE_PARTIAL_LINE;
969
970 ret = trace_seq_puts(s, " | ");
971 if (!ret)
972 return TRACE_TYPE_PARTIAL_LINE;
973 }
974
975 /* Latency format */
976 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
977 ret = print_graph_lat_fmt(s, ent);
978 if (ret == TRACE_TYPE_PARTIAL_LINE)
979 return TRACE_TYPE_PARTIAL_LINE;
980 }
981
982 return 0;
983}
984
985/*
986 * Entry check for irq code
987 *
988 * returns 1 if
989 * - we are inside irq code
990 * - we just entered irq code
991 *
992 * retunns 0 if
993 * - funcgraph-interrupts option is set
994 * - we are not inside irq code
995 */
996static int
997check_irq_entry(struct trace_iterator *iter, u32 flags,
998 unsigned long addr, int depth)
999{
1000 int cpu = iter->cpu;
1001 int *depth_irq;
1002 struct fgraph_data *data = iter->private;
1003
1004 /*
1005 * If we are either displaying irqs, or we got called as
1006 * a graph event and private data does not exist,
1007 * then we bypass the irq check.
1008 */
1009 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1010 (!data))
1011 return 0;
1012
1013 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1014
1015 /*
1016 * We are inside the irq code
1017 */
1018 if (*depth_irq >= 0)
1019 return 1;
1020
1021 if ((addr < (unsigned long)__irqentry_text_start) ||
1022 (addr >= (unsigned long)__irqentry_text_end))
1023 return 0;
1024
1025 /*
1026 * We are entering irq code.
1027 */
1028 *depth_irq = depth;
1029 return 1;
1030}
1031
1032/*
1033 * Return check for irq code
1034 *
1035 * returns 1 if
1036 * - we are inside irq code
1037 * - we just left irq code
1038 *
1039 * returns 0 if
1040 * - funcgraph-interrupts option is set
1041 * - we are not inside irq code
1042 */
1043static int
1044check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1045{
1046 int cpu = iter->cpu;
1047 int *depth_irq;
1048 struct fgraph_data *data = iter->private;
1049
1050 /*
1051 * If we are either displaying irqs, or we got called as
1052 * a graph event and private data does not exist,
1053 * then we bypass the irq check.
1054 */
1055 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1056 (!data))
1057 return 0;
1058
1059 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1060
1061 /*
1062 * We are not inside the irq code.
1063 */
1064 if (*depth_irq == -1)
1065 return 0;
1066
1067 /*
1068 * We are inside the irq code, and this is returning entry.
1069 * Let's not trace it and clear the entry depth, since
1070 * we are out of irq code.
1071 *
1072 * This condition ensures that we 'leave the irq code' once
1073 * we are out of the entry depth. Thus protecting us from
1074 * the RETURN entry loss.
1075 */
1076 if (*depth_irq >= depth) {
1077 *depth_irq = -1;
1078 return 1;
1079 }
1080
1081 /*
1082 * We are inside the irq code, and this is not the entry.
1083 */
1084 return 1;
1085}
1086
1087static enum print_line_t
1088print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1089 struct trace_iterator *iter, u32 flags)
1090{
1091 struct fgraph_data *data = iter->private;
1092 struct ftrace_graph_ent *call = &field->graph_ent;
1093 struct ftrace_graph_ret_entry *leaf_ret;
1094 static enum print_line_t ret;
1095 int cpu = iter->cpu;
1096
1097 if (check_irq_entry(iter, flags, call->func, call->depth))
1098 return TRACE_TYPE_HANDLED;
1099
1100 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1101 return TRACE_TYPE_PARTIAL_LINE;
1102
1103 leaf_ret = get_return_for_leaf(iter, field);
1104 if (leaf_ret)
1105 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1106 else
1107 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1108
1109 if (data) {
1110 /*
1111 * If we failed to write our output, then we need to make
1112 * note of it. Because we already consumed our entry.
1113 */
1114 if (s->full) {
1115 data->failed = 1;
1116 data->cpu = cpu;
1117 } else
1118 data->failed = 0;
1119 }
1120
1121 return ret;
1122}
1123
1124static enum print_line_t
1125print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1126 struct trace_entry *ent, struct trace_iterator *iter,
1127 u32 flags)
1128{
1129 unsigned long long duration = trace->rettime - trace->calltime;
1130 struct fgraph_data *data = iter->private;
1131 pid_t pid = ent->pid;
1132 int cpu = iter->cpu;
1133 int func_match = 1;
1134 int ret;
1135 int i;
1136
1137 if (check_irq_return(iter, flags, trace->depth))
1138 return TRACE_TYPE_HANDLED;
1139
1140 if (data) {
1141 struct fgraph_cpu_data *cpu_data;
1142 int cpu = iter->cpu;
1143
1144 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1145
1146 /*
1147 * Comments display at + 1 to depth. This is the
1148 * return from a function, we now want the comments
1149 * to display at the same level of the bracket.
1150 */
1151 cpu_data->depth = trace->depth - 1;
1152
1153 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1154 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1155 func_match = 0;
1156 cpu_data->enter_funcs[trace->depth] = 0;
1157 }
1158 }
1159
1160 if (print_graph_prologue(iter, s, 0, 0, flags))
1161 return TRACE_TYPE_PARTIAL_LINE;
1162
1163 /* Overhead and duration */
1164 ret = print_graph_duration(duration, s, flags);
1165 if (ret == TRACE_TYPE_PARTIAL_LINE)
1166 return TRACE_TYPE_PARTIAL_LINE;
1167
1168 /* Closing brace */
1169 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1170 ret = trace_seq_putc(s, ' ');
1171 if (!ret)
1172 return TRACE_TYPE_PARTIAL_LINE;
1173 }
1174
1175 /*
1176 * If the return function does not have a matching entry,
1177 * then the entry was lost. Instead of just printing
1178 * the '}' and letting the user guess what function this
1179 * belongs to, write out the function name.
1180 */
1181 if (func_match) {
1182 ret = trace_seq_puts(s, "}\n");
1183 if (!ret)
1184 return TRACE_TYPE_PARTIAL_LINE;
1185 } else {
1186 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1187 if (!ret)
1188 return TRACE_TYPE_PARTIAL_LINE;
1189 }
1190
1191 /* Overrun */
1192 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1193 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1194 trace->overrun);
1195 if (!ret)
1196 return TRACE_TYPE_PARTIAL_LINE;
1197 }
1198
1199 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1200 cpu, pid, flags);
1201 if (ret == TRACE_TYPE_PARTIAL_LINE)
1202 return TRACE_TYPE_PARTIAL_LINE;
1203
1204 return TRACE_TYPE_HANDLED;
1205}
1206
1207static enum print_line_t
1208print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1209 struct trace_iterator *iter, u32 flags)
1210{
1211 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1212 struct fgraph_data *data = iter->private;
1213 struct trace_event *event;
1214 int depth = 0;
1215 int ret;
1216 int i;
1217
1218 if (data)
1219 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1220
1221 if (print_graph_prologue(iter, s, 0, 0, flags))
1222 return TRACE_TYPE_PARTIAL_LINE;
1223
1224 /* No time */
1225 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
1226 if (ret != TRACE_TYPE_HANDLED)
1227 return ret;
1228
1229 /* Indentation */
1230 if (depth > 0)
1231 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1232 ret = trace_seq_putc(s, ' ');
1233 if (!ret)
1234 return TRACE_TYPE_PARTIAL_LINE;
1235 }
1236
1237 /* The comment */
1238 ret = trace_seq_puts(s, "/* ");
1239 if (!ret)
1240 return TRACE_TYPE_PARTIAL_LINE;
1241
1242 switch (iter->ent->type) {
1243 case TRACE_BPRINT:
1244 ret = trace_print_bprintk_msg_only(iter);
1245 if (ret != TRACE_TYPE_HANDLED)
1246 return ret;
1247 break;
1248 case TRACE_PRINT:
1249 ret = trace_print_printk_msg_only(iter);
1250 if (ret != TRACE_TYPE_HANDLED)
1251 return ret;
1252 break;
1253 default:
1254 event = ftrace_find_event(ent->type);
1255 if (!event)
1256 return TRACE_TYPE_UNHANDLED;
1257
1258 ret = event->funcs->trace(iter, sym_flags, event);
1259 if (ret != TRACE_TYPE_HANDLED)
1260 return ret;
1261 }
1262
1263 /* Strip ending newline */
1264 if (s->buffer[s->len - 1] == '\n') {
1265 s->buffer[s->len - 1] = '\0';
1266 s->len--;
1267 }
1268
1269 ret = trace_seq_puts(s, " */\n");
1270 if (!ret)
1271 return TRACE_TYPE_PARTIAL_LINE;
1272
1273 return TRACE_TYPE_HANDLED;
1274}
1275
1276
1277enum print_line_t
1278print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1279{
1280 struct ftrace_graph_ent_entry *field;
1281 struct fgraph_data *data = iter->private;
1282 struct trace_entry *entry = iter->ent;
1283 struct trace_seq *s = &iter->seq;
1284 int cpu = iter->cpu;
1285 int ret;
1286
1287 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1288 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1289 return TRACE_TYPE_HANDLED;
1290 }
1291
1292 /*
1293 * If the last output failed, there's a possibility we need
1294 * to print out the missing entry which would never go out.
1295 */
1296 if (data && data->failed) {
1297 field = &data->ent;
1298 iter->cpu = data->cpu;
1299 ret = print_graph_entry(field, s, iter, flags);
1300 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1301 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1302 ret = TRACE_TYPE_NO_CONSUME;
1303 }
1304 iter->cpu = cpu;
1305 return ret;
1306 }
1307
1308 switch (entry->type) {
1309 case TRACE_GRAPH_ENT: {
1310 /*
1311 * print_graph_entry() may consume the current event,
1312 * thus @field may become invalid, so we need to save it.
1313 * sizeof(struct ftrace_graph_ent_entry) is very small,
1314 * it can be safely saved at the stack.
1315 */
1316 struct ftrace_graph_ent_entry saved;
1317 trace_assign_type(field, entry);
1318 saved = *field;
1319 return print_graph_entry(&saved, s, iter, flags);
1320 }
1321 case TRACE_GRAPH_RET: {
1322 struct ftrace_graph_ret_entry *field;
1323 trace_assign_type(field, entry);
1324 return print_graph_return(&field->ret, s, entry, iter, flags);
1325 }
1326 case TRACE_STACK:
1327 case TRACE_FN:
1328 /* dont trace stack and functions as comments */
1329 return TRACE_TYPE_UNHANDLED;
1330
1331 default:
1332 return print_graph_comment(s, entry, iter, flags);
1333 }
1334
1335 return TRACE_TYPE_HANDLED;
1336}
1337
1338static enum print_line_t
1339print_graph_function(struct trace_iterator *iter)
1340{
1341 return print_graph_function_flags(iter, tracer_flags.val);
1342}
1343
1344static enum print_line_t
1345print_graph_function_event(struct trace_iterator *iter, int flags,
1346 struct trace_event *event)
1347{
1348 return print_graph_function(iter);
1349}
1350
1351static void print_lat_header(struct seq_file *s, u32 flags)
1352{
1353 static const char spaces[] = " " /* 16 spaces */
1354 " " /* 4 spaces */
1355 " "; /* 17 spaces */
1356 int size = 0;
1357
1358 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1359 size += 16;
1360 if (flags & TRACE_GRAPH_PRINT_CPU)
1361 size += 4;
1362 if (flags & TRACE_GRAPH_PRINT_PROC)
1363 size += 17;
1364
1365 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1366 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1367 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1368 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1369 seq_printf(s, "#%.*s||| / \n", size, spaces);
1370}
1371
1372static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1373{
1374 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1375
1376 if (lat)
1377 print_lat_header(s, flags);
1378
1379 /* 1st line */
1380 seq_printf(s, "#");
1381 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1382 seq_printf(s, " TIME ");
1383 if (flags & TRACE_GRAPH_PRINT_CPU)
1384 seq_printf(s, " CPU");
1385 if (flags & TRACE_GRAPH_PRINT_PROC)
1386 seq_printf(s, " TASK/PID ");
1387 if (lat)
1388 seq_printf(s, "||||");
1389 if (flags & TRACE_GRAPH_PRINT_DURATION)
1390 seq_printf(s, " DURATION ");
1391 seq_printf(s, " FUNCTION CALLS\n");
1392
1393 /* 2nd line */
1394 seq_printf(s, "#");
1395 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1396 seq_printf(s, " | ");
1397 if (flags & TRACE_GRAPH_PRINT_CPU)
1398 seq_printf(s, " | ");
1399 if (flags & TRACE_GRAPH_PRINT_PROC)
1400 seq_printf(s, " | | ");
1401 if (lat)
1402 seq_printf(s, "||||");
1403 if (flags & TRACE_GRAPH_PRINT_DURATION)
1404 seq_printf(s, " | | ");
1405 seq_printf(s, " | | | |\n");
1406}
1407
1408void print_graph_headers(struct seq_file *s)
1409{
1410 print_graph_headers_flags(s, tracer_flags.val);
1411}
1412
1413void print_graph_headers_flags(struct seq_file *s, u32 flags)
1414{
1415 struct trace_iterator *iter = s->private;
1416
1417 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1418 return;
1419
1420 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1421 /* print nothing if the buffers are empty */
1422 if (trace_empty(iter))
1423 return;
1424
1425 print_trace_header(s, iter);
1426 }
1427
1428 __print_graph_headers_flags(s, flags);
1429}
1430
1431void graph_trace_open(struct trace_iterator *iter)
1432{
1433 /* pid and depth on the last trace processed */
1434 struct fgraph_data *data;
1435 int cpu;
1436
1437 iter->private = NULL;
1438
1439 data = kzalloc(sizeof(*data), GFP_KERNEL);
1440 if (!data)
1441 goto out_err;
1442
1443 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1444 if (!data->cpu_data)
1445 goto out_err_free;
1446
1447 for_each_possible_cpu(cpu) {
1448 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1449 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1450 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1451 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1452
1453 *pid = -1;
1454 *depth = 0;
1455 *ignore = 0;
1456 *depth_irq = -1;
1457 }
1458
1459 iter->private = data;
1460
1461 return;
1462
1463 out_err_free:
1464 kfree(data);
1465 out_err:
1466 pr_warning("function graph tracer: not enough memory\n");
1467}
1468
1469void graph_trace_close(struct trace_iterator *iter)
1470{
1471 struct fgraph_data *data = iter->private;
1472
1473 if (data) {
1474 free_percpu(data->cpu_data);
1475 kfree(data);
1476 }
1477}
1478
1479static int
1480func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1481{
1482 if (bit == TRACE_GRAPH_PRINT_IRQS)
1483 ftrace_graph_skip_irqs = !set;
1484
1485 return 0;
1486}
1487
1488static struct trace_event_functions graph_functions = {
1489 .trace = print_graph_function_event,
1490};
1491
1492static struct trace_event graph_trace_entry_event = {
1493 .type = TRACE_GRAPH_ENT,
1494 .funcs = &graph_functions,
1495};
1496
1497static struct trace_event graph_trace_ret_event = {
1498 .type = TRACE_GRAPH_RET,
1499 .funcs = &graph_functions
1500};
1501
1502static struct tracer graph_trace __tracer_data = {
1503 .name = "function_graph",
1504 .open = graph_trace_open,
1505 .pipe_open = graph_trace_open,
1506 .close = graph_trace_close,
1507 .pipe_close = graph_trace_close,
1508 .wait_pipe = poll_wait_pipe,
1509 .init = graph_trace_init,
1510 .reset = graph_trace_reset,
1511 .print_line = print_graph_function,
1512 .print_header = print_graph_headers,
1513 .flags = &tracer_flags,
1514 .set_flag = func_graph_set_flag,
1515#ifdef CONFIG_FTRACE_SELFTEST
1516 .selftest = trace_selftest_startup_function_graph,
1517#endif
1518};
1519
1520
1521static ssize_t
1522graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1523 loff_t *ppos)
1524{
1525 unsigned long val;
1526 int ret;
1527
1528 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1529 if (ret)
1530 return ret;
1531
1532 max_depth = val;
1533
1534 *ppos += cnt;
1535
1536 return cnt;
1537}
1538
1539static ssize_t
1540graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1541 loff_t *ppos)
1542{
1543 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1544 int n;
1545
1546 n = sprintf(buf, "%d\n", max_depth);
1547
1548 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1549}
1550
1551static const struct file_operations graph_depth_fops = {
1552 .open = tracing_open_generic,
1553 .write = graph_depth_write,
1554 .read = graph_depth_read,
1555 .llseek = generic_file_llseek,
1556};
1557
1558static __init int init_graph_debugfs(void)
1559{
1560 struct dentry *d_tracer;
1561
1562 d_tracer = tracing_init_dentry();
1563 if (!d_tracer)
1564 return 0;
1565
1566 trace_create_file("max_graph_depth", 0644, d_tracer,
1567 NULL, &graph_depth_fops);
1568
1569 return 0;
1570}
1571fs_initcall(init_graph_debugfs);
1572
1573static __init int init_graph_trace(void)
1574{
1575 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1576
1577 if (!register_ftrace_event(&graph_trace_entry_event)) {
1578 pr_warning("Warning: could not register graph trace events\n");
1579 return 1;
1580 }
1581
1582 if (!register_ftrace_event(&graph_trace_ret_event)) {
1583 pr_warning("Warning: could not register graph trace events\n");
1584 return 1;
1585 }
1586
1587 return register_tracer(&graph_trace);
1588}
1589
1590core_initcall(init_graph_trace);
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14
15#include "trace.h"
16#include "trace_output.h"
17
18/* When set, irq functions will be ignored */
19static int ftrace_graph_skip_irqs;
20
21struct fgraph_cpu_data {
22 pid_t last_pid;
23 int depth;
24 int depth_irq;
25 int ignore;
26 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
27};
28
29struct fgraph_data {
30 struct fgraph_cpu_data __percpu *cpu_data;
31
32 /* Place to preserve last processed entry. */
33 struct ftrace_graph_ent_entry ent;
34 struct ftrace_graph_ret_entry ret;
35 int failed;
36 int cpu;
37};
38
39#define TRACE_GRAPH_INDENT 2
40
41/* Flag options */
42#define TRACE_GRAPH_PRINT_OVERRUN 0x1
43#define TRACE_GRAPH_PRINT_CPU 0x2
44#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
45#define TRACE_GRAPH_PRINT_PROC 0x8
46#define TRACE_GRAPH_PRINT_DURATION 0x10
47#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
48#define TRACE_GRAPH_PRINT_IRQS 0x40
49
50static struct tracer_opt trace_opts[] = {
51 /* Display overruns? (for self-debug purpose) */
52 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
53 /* Display CPU ? */
54 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
55 /* Display Overhead ? */
56 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
57 /* Display proc name/pid */
58 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
59 /* Display duration of execution */
60 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
61 /* Display absolute time of an entry */
62 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
63 /* Display interrupts */
64 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
65 { } /* Empty entry */
66};
67
68static struct tracer_flags tracer_flags = {
69 /* Don't display overruns and proc by default */
70 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
71 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
72 .opts = trace_opts
73};
74
75static struct trace_array *graph_array;
76
77/*
78 * DURATION column is being also used to display IRQ signs,
79 * following values are used by print_graph_irq and others
80 * to fill in space into DURATION column.
81 */
82enum {
83 DURATION_FILL_FULL = -1,
84 DURATION_FILL_START = -2,
85 DURATION_FILL_END = -3,
86};
87
88static enum print_line_t
89print_graph_duration(unsigned long long duration, struct trace_seq *s,
90 u32 flags);
91
92/* Add a function return address to the trace stack on thread info.*/
93int
94ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
95 unsigned long frame_pointer)
96{
97 unsigned long long calltime;
98 int index;
99
100 if (!current->ret_stack)
101 return -EBUSY;
102
103 /*
104 * We must make sure the ret_stack is tested before we read
105 * anything else.
106 */
107 smp_rmb();
108
109 /* The return trace stack is full */
110 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
111 atomic_inc(¤t->trace_overrun);
112 return -EBUSY;
113 }
114
115 calltime = trace_clock_local();
116
117 index = ++current->curr_ret_stack;
118 barrier();
119 current->ret_stack[index].ret = ret;
120 current->ret_stack[index].func = func;
121 current->ret_stack[index].calltime = calltime;
122 current->ret_stack[index].subtime = 0;
123 current->ret_stack[index].fp = frame_pointer;
124 *depth = index;
125
126 return 0;
127}
128
129/* Retrieve a function return address to the trace stack on thread info.*/
130static void
131ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
132 unsigned long frame_pointer)
133{
134 int index;
135
136 index = current->curr_ret_stack;
137
138 if (unlikely(index < 0)) {
139 ftrace_graph_stop();
140 WARN_ON(1);
141 /* Might as well panic, otherwise we have no where to go */
142 *ret = (unsigned long)panic;
143 return;
144 }
145
146#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
147 /*
148 * The arch may choose to record the frame pointer used
149 * and check it here to make sure that it is what we expect it
150 * to be. If gcc does not set the place holder of the return
151 * address in the frame pointer, and does a copy instead, then
152 * the function graph trace will fail. This test detects this
153 * case.
154 *
155 * Currently, x86_32 with optimize for size (-Os) makes the latest
156 * gcc do the above.
157 */
158 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
159 ftrace_graph_stop();
160 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
161 " from func %ps return to %lx\n",
162 current->ret_stack[index].fp,
163 frame_pointer,
164 (void *)current->ret_stack[index].func,
165 current->ret_stack[index].ret);
166 *ret = (unsigned long)panic;
167 return;
168 }
169#endif
170
171 *ret = current->ret_stack[index].ret;
172 trace->func = current->ret_stack[index].func;
173 trace->calltime = current->ret_stack[index].calltime;
174 trace->overrun = atomic_read(¤t->trace_overrun);
175 trace->depth = index;
176}
177
178/*
179 * Send the trace to the ring-buffer.
180 * @return the original return address.
181 */
182unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
183{
184 struct ftrace_graph_ret trace;
185 unsigned long ret;
186
187 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
188 trace.rettime = trace_clock_local();
189 ftrace_graph_return(&trace);
190 barrier();
191 current->curr_ret_stack--;
192
193 if (unlikely(!ret)) {
194 ftrace_graph_stop();
195 WARN_ON(1);
196 /* Might as well panic. What else to do? */
197 ret = (unsigned long)panic;
198 }
199
200 return ret;
201}
202
203int __trace_graph_entry(struct trace_array *tr,
204 struct ftrace_graph_ent *trace,
205 unsigned long flags,
206 int pc)
207{
208 struct ftrace_event_call *call = &event_funcgraph_entry;
209 struct ring_buffer_event *event;
210 struct ring_buffer *buffer = tr->buffer;
211 struct ftrace_graph_ent_entry *entry;
212
213 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
214 return 0;
215
216 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
217 sizeof(*entry), flags, pc);
218 if (!event)
219 return 0;
220 entry = ring_buffer_event_data(event);
221 entry->graph_ent = *trace;
222 if (!filter_current_check_discard(buffer, call, entry, event))
223 ring_buffer_unlock_commit(buffer, event);
224
225 return 1;
226}
227
228static inline int ftrace_graph_ignore_irqs(void)
229{
230 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
231 return 0;
232
233 return in_irq();
234}
235
236int trace_graph_entry(struct ftrace_graph_ent *trace)
237{
238 struct trace_array *tr = graph_array;
239 struct trace_array_cpu *data;
240 unsigned long flags;
241 long disabled;
242 int ret;
243 int cpu;
244 int pc;
245
246 if (!ftrace_trace_task(current))
247 return 0;
248
249 /* trace it when it is-nested-in or is a function enabled. */
250 if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
251 ftrace_graph_ignore_irqs())
252 return 0;
253
254 local_irq_save(flags);
255 cpu = raw_smp_processor_id();
256 data = tr->data[cpu];
257 disabled = atomic_inc_return(&data->disabled);
258 if (likely(disabled == 1)) {
259 pc = preempt_count();
260 ret = __trace_graph_entry(tr, trace, flags, pc);
261 } else {
262 ret = 0;
263 }
264
265 atomic_dec(&data->disabled);
266 local_irq_restore(flags);
267
268 return ret;
269}
270
271int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
272{
273 if (tracing_thresh)
274 return 1;
275 else
276 return trace_graph_entry(trace);
277}
278
279static void
280__trace_graph_function(struct trace_array *tr,
281 unsigned long ip, unsigned long flags, int pc)
282{
283 u64 time = trace_clock_local();
284 struct ftrace_graph_ent ent = {
285 .func = ip,
286 .depth = 0,
287 };
288 struct ftrace_graph_ret ret = {
289 .func = ip,
290 .depth = 0,
291 .calltime = time,
292 .rettime = time,
293 };
294
295 __trace_graph_entry(tr, &ent, flags, pc);
296 __trace_graph_return(tr, &ret, flags, pc);
297}
298
299void
300trace_graph_function(struct trace_array *tr,
301 unsigned long ip, unsigned long parent_ip,
302 unsigned long flags, int pc)
303{
304 __trace_graph_function(tr, ip, flags, pc);
305}
306
307void __trace_graph_return(struct trace_array *tr,
308 struct ftrace_graph_ret *trace,
309 unsigned long flags,
310 int pc)
311{
312 struct ftrace_event_call *call = &event_funcgraph_exit;
313 struct ring_buffer_event *event;
314 struct ring_buffer *buffer = tr->buffer;
315 struct ftrace_graph_ret_entry *entry;
316
317 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
318 return;
319
320 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
321 sizeof(*entry), flags, pc);
322 if (!event)
323 return;
324 entry = ring_buffer_event_data(event);
325 entry->ret = *trace;
326 if (!filter_current_check_discard(buffer, call, entry, event))
327 ring_buffer_unlock_commit(buffer, event);
328}
329
330void trace_graph_return(struct ftrace_graph_ret *trace)
331{
332 struct trace_array *tr = graph_array;
333 struct trace_array_cpu *data;
334 unsigned long flags;
335 long disabled;
336 int cpu;
337 int pc;
338
339 local_irq_save(flags);
340 cpu = raw_smp_processor_id();
341 data = tr->data[cpu];
342 disabled = atomic_inc_return(&data->disabled);
343 if (likely(disabled == 1)) {
344 pc = preempt_count();
345 __trace_graph_return(tr, trace, flags, pc);
346 }
347 atomic_dec(&data->disabled);
348 local_irq_restore(flags);
349}
350
351void set_graph_array(struct trace_array *tr)
352{
353 graph_array = tr;
354
355 /* Make graph_array visible before we start tracing */
356
357 smp_mb();
358}
359
360void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
361{
362 if (tracing_thresh &&
363 (trace->rettime - trace->calltime < tracing_thresh))
364 return;
365 else
366 trace_graph_return(trace);
367}
368
369static int graph_trace_init(struct trace_array *tr)
370{
371 int ret;
372
373 set_graph_array(tr);
374 if (tracing_thresh)
375 ret = register_ftrace_graph(&trace_graph_thresh_return,
376 &trace_graph_thresh_entry);
377 else
378 ret = register_ftrace_graph(&trace_graph_return,
379 &trace_graph_entry);
380 if (ret)
381 return ret;
382 tracing_start_cmdline_record();
383
384 return 0;
385}
386
387static void graph_trace_reset(struct trace_array *tr)
388{
389 tracing_stop_cmdline_record();
390 unregister_ftrace_graph();
391}
392
393static int max_bytes_for_cpu;
394
395static enum print_line_t
396print_graph_cpu(struct trace_seq *s, int cpu)
397{
398 int ret;
399
400 /*
401 * Start with a space character - to make it stand out
402 * to the right a bit when trace output is pasted into
403 * email:
404 */
405 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
406 if (!ret)
407 return TRACE_TYPE_PARTIAL_LINE;
408
409 return TRACE_TYPE_HANDLED;
410}
411
412#define TRACE_GRAPH_PROCINFO_LENGTH 14
413
414static enum print_line_t
415print_graph_proc(struct trace_seq *s, pid_t pid)
416{
417 char comm[TASK_COMM_LEN];
418 /* sign + log10(MAX_INT) + '\0' */
419 char pid_str[11];
420 int spaces = 0;
421 int ret;
422 int len;
423 int i;
424
425 trace_find_cmdline(pid, comm);
426 comm[7] = '\0';
427 sprintf(pid_str, "%d", pid);
428
429 /* 1 stands for the "-" character */
430 len = strlen(comm) + strlen(pid_str) + 1;
431
432 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
433 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
434
435 /* First spaces to align center */
436 for (i = 0; i < spaces / 2; i++) {
437 ret = trace_seq_printf(s, " ");
438 if (!ret)
439 return TRACE_TYPE_PARTIAL_LINE;
440 }
441
442 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
443 if (!ret)
444 return TRACE_TYPE_PARTIAL_LINE;
445
446 /* Last spaces to align center */
447 for (i = 0; i < spaces - (spaces / 2); i++) {
448 ret = trace_seq_printf(s, " ");
449 if (!ret)
450 return TRACE_TYPE_PARTIAL_LINE;
451 }
452 return TRACE_TYPE_HANDLED;
453}
454
455
456static enum print_line_t
457print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
458{
459 if (!trace_seq_putc(s, ' '))
460 return 0;
461
462 return trace_print_lat_fmt(s, entry);
463}
464
465/* If the pid changed since the last trace, output this event */
466static enum print_line_t
467verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
468{
469 pid_t prev_pid;
470 pid_t *last_pid;
471 int ret;
472
473 if (!data)
474 return TRACE_TYPE_HANDLED;
475
476 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
477
478 if (*last_pid == pid)
479 return TRACE_TYPE_HANDLED;
480
481 prev_pid = *last_pid;
482 *last_pid = pid;
483
484 if (prev_pid == -1)
485 return TRACE_TYPE_HANDLED;
486/*
487 * Context-switch trace line:
488
489 ------------------------------------------
490 | 1) migration/0--1 => sshd-1755
491 ------------------------------------------
492
493 */
494 ret = trace_seq_printf(s,
495 " ------------------------------------------\n");
496 if (!ret)
497 return TRACE_TYPE_PARTIAL_LINE;
498
499 ret = print_graph_cpu(s, cpu);
500 if (ret == TRACE_TYPE_PARTIAL_LINE)
501 return TRACE_TYPE_PARTIAL_LINE;
502
503 ret = print_graph_proc(s, prev_pid);
504 if (ret == TRACE_TYPE_PARTIAL_LINE)
505 return TRACE_TYPE_PARTIAL_LINE;
506
507 ret = trace_seq_printf(s, " => ");
508 if (!ret)
509 return TRACE_TYPE_PARTIAL_LINE;
510
511 ret = print_graph_proc(s, pid);
512 if (ret == TRACE_TYPE_PARTIAL_LINE)
513 return TRACE_TYPE_PARTIAL_LINE;
514
515 ret = trace_seq_printf(s,
516 "\n ------------------------------------------\n\n");
517 if (!ret)
518 return TRACE_TYPE_PARTIAL_LINE;
519
520 return TRACE_TYPE_HANDLED;
521}
522
523static struct ftrace_graph_ret_entry *
524get_return_for_leaf(struct trace_iterator *iter,
525 struct ftrace_graph_ent_entry *curr)
526{
527 struct fgraph_data *data = iter->private;
528 struct ring_buffer_iter *ring_iter = NULL;
529 struct ring_buffer_event *event;
530 struct ftrace_graph_ret_entry *next;
531
532 /*
533 * If the previous output failed to write to the seq buffer,
534 * then we just reuse the data from before.
535 */
536 if (data && data->failed) {
537 curr = &data->ent;
538 next = &data->ret;
539 } else {
540
541 ring_iter = iter->buffer_iter[iter->cpu];
542
543 /* First peek to compare current entry and the next one */
544 if (ring_iter)
545 event = ring_buffer_iter_peek(ring_iter, NULL);
546 else {
547 /*
548 * We need to consume the current entry to see
549 * the next one.
550 */
551 ring_buffer_consume(iter->tr->buffer, iter->cpu,
552 NULL, NULL);
553 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
554 NULL, NULL);
555 }
556
557 if (!event)
558 return NULL;
559
560 next = ring_buffer_event_data(event);
561
562 if (data) {
563 /*
564 * Save current and next entries for later reference
565 * if the output fails.
566 */
567 data->ent = *curr;
568 /*
569 * If the next event is not a return type, then
570 * we only care about what type it is. Otherwise we can
571 * safely copy the entire event.
572 */
573 if (next->ent.type == TRACE_GRAPH_RET)
574 data->ret = *next;
575 else
576 data->ret.ent.type = next->ent.type;
577 }
578 }
579
580 if (next->ent.type != TRACE_GRAPH_RET)
581 return NULL;
582
583 if (curr->ent.pid != next->ent.pid ||
584 curr->graph_ent.func != next->ret.func)
585 return NULL;
586
587 /* this is a leaf, now advance the iterator */
588 if (ring_iter)
589 ring_buffer_read(ring_iter, NULL);
590
591 return next;
592}
593
594static int print_graph_abs_time(u64 t, struct trace_seq *s)
595{
596 unsigned long usecs_rem;
597
598 usecs_rem = do_div(t, NSEC_PER_SEC);
599 usecs_rem /= 1000;
600
601 return trace_seq_printf(s, "%5lu.%06lu | ",
602 (unsigned long)t, usecs_rem);
603}
604
605static enum print_line_t
606print_graph_irq(struct trace_iterator *iter, unsigned long addr,
607 enum trace_type type, int cpu, pid_t pid, u32 flags)
608{
609 int ret;
610 struct trace_seq *s = &iter->seq;
611
612 if (addr < (unsigned long)__irqentry_text_start ||
613 addr >= (unsigned long)__irqentry_text_end)
614 return TRACE_TYPE_UNHANDLED;
615
616 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
617 /* Absolute time */
618 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
619 ret = print_graph_abs_time(iter->ts, s);
620 if (!ret)
621 return TRACE_TYPE_PARTIAL_LINE;
622 }
623
624 /* Cpu */
625 if (flags & TRACE_GRAPH_PRINT_CPU) {
626 ret = print_graph_cpu(s, cpu);
627 if (ret == TRACE_TYPE_PARTIAL_LINE)
628 return TRACE_TYPE_PARTIAL_LINE;
629 }
630
631 /* Proc */
632 if (flags & TRACE_GRAPH_PRINT_PROC) {
633 ret = print_graph_proc(s, pid);
634 if (ret == TRACE_TYPE_PARTIAL_LINE)
635 return TRACE_TYPE_PARTIAL_LINE;
636 ret = trace_seq_printf(s, " | ");
637 if (!ret)
638 return TRACE_TYPE_PARTIAL_LINE;
639 }
640 }
641
642 /* No overhead */
643 ret = print_graph_duration(DURATION_FILL_START, s, flags);
644 if (ret != TRACE_TYPE_HANDLED)
645 return ret;
646
647 if (type == TRACE_GRAPH_ENT)
648 ret = trace_seq_printf(s, "==========>");
649 else
650 ret = trace_seq_printf(s, "<==========");
651
652 if (!ret)
653 return TRACE_TYPE_PARTIAL_LINE;
654
655 ret = print_graph_duration(DURATION_FILL_END, s, flags);
656 if (ret != TRACE_TYPE_HANDLED)
657 return ret;
658
659 ret = trace_seq_printf(s, "\n");
660
661 if (!ret)
662 return TRACE_TYPE_PARTIAL_LINE;
663 return TRACE_TYPE_HANDLED;
664}
665
666enum print_line_t
667trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
668{
669 unsigned long nsecs_rem = do_div(duration, 1000);
670 /* log10(ULONG_MAX) + '\0' */
671 char msecs_str[21];
672 char nsecs_str[5];
673 int ret, len;
674 int i;
675
676 sprintf(msecs_str, "%lu", (unsigned long) duration);
677
678 /* Print msecs */
679 ret = trace_seq_printf(s, "%s", msecs_str);
680 if (!ret)
681 return TRACE_TYPE_PARTIAL_LINE;
682
683 len = strlen(msecs_str);
684
685 /* Print nsecs (we don't want to exceed 7 numbers) */
686 if (len < 7) {
687 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
688
689 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
690 ret = trace_seq_printf(s, ".%s", nsecs_str);
691 if (!ret)
692 return TRACE_TYPE_PARTIAL_LINE;
693 len += strlen(nsecs_str);
694 }
695
696 ret = trace_seq_printf(s, " us ");
697 if (!ret)
698 return TRACE_TYPE_PARTIAL_LINE;
699
700 /* Print remaining spaces to fit the row's width */
701 for (i = len; i < 7; i++) {
702 ret = trace_seq_printf(s, " ");
703 if (!ret)
704 return TRACE_TYPE_PARTIAL_LINE;
705 }
706 return TRACE_TYPE_HANDLED;
707}
708
709static enum print_line_t
710print_graph_duration(unsigned long long duration, struct trace_seq *s,
711 u32 flags)
712{
713 int ret = -1;
714
715 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
716 !(trace_flags & TRACE_ITER_CONTEXT_INFO))
717 return TRACE_TYPE_HANDLED;
718
719 /* No real adata, just filling the column with spaces */
720 switch (duration) {
721 case DURATION_FILL_FULL:
722 ret = trace_seq_printf(s, " | ");
723 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
724 case DURATION_FILL_START:
725 ret = trace_seq_printf(s, " ");
726 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
727 case DURATION_FILL_END:
728 ret = trace_seq_printf(s, " |");
729 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
730 }
731
732 /* Signal a overhead of time execution to the output */
733 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
734 /* Duration exceeded 100 msecs */
735 if (duration > 100000ULL)
736 ret = trace_seq_printf(s, "! ");
737 /* Duration exceeded 10 msecs */
738 else if (duration > 10000ULL)
739 ret = trace_seq_printf(s, "+ ");
740 }
741
742 /*
743 * The -1 means we either did not exceed the duration tresholds
744 * or we dont want to print out the overhead. Either way we need
745 * to fill out the space.
746 */
747 if (ret == -1)
748 ret = trace_seq_printf(s, " ");
749
750 /* Catching here any failure happenned above */
751 if (!ret)
752 return TRACE_TYPE_PARTIAL_LINE;
753
754 ret = trace_print_graph_duration(duration, s);
755 if (ret != TRACE_TYPE_HANDLED)
756 return ret;
757
758 ret = trace_seq_printf(s, "| ");
759 if (!ret)
760 return TRACE_TYPE_PARTIAL_LINE;
761
762 return TRACE_TYPE_HANDLED;
763}
764
765/* Case of a leaf function on its call entry */
766static enum print_line_t
767print_graph_entry_leaf(struct trace_iterator *iter,
768 struct ftrace_graph_ent_entry *entry,
769 struct ftrace_graph_ret_entry *ret_entry,
770 struct trace_seq *s, u32 flags)
771{
772 struct fgraph_data *data = iter->private;
773 struct ftrace_graph_ret *graph_ret;
774 struct ftrace_graph_ent *call;
775 unsigned long long duration;
776 int ret;
777 int i;
778
779 graph_ret = &ret_entry->ret;
780 call = &entry->graph_ent;
781 duration = graph_ret->rettime - graph_ret->calltime;
782
783 if (data) {
784 struct fgraph_cpu_data *cpu_data;
785 int cpu = iter->cpu;
786
787 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
788
789 /*
790 * Comments display at + 1 to depth. Since
791 * this is a leaf function, keep the comments
792 * equal to this depth.
793 */
794 cpu_data->depth = call->depth - 1;
795
796 /* No need to keep this function around for this depth */
797 if (call->depth < FTRACE_RETFUNC_DEPTH)
798 cpu_data->enter_funcs[call->depth] = 0;
799 }
800
801 /* Overhead and duration */
802 ret = print_graph_duration(duration, s, flags);
803 if (ret == TRACE_TYPE_PARTIAL_LINE)
804 return TRACE_TYPE_PARTIAL_LINE;
805
806 /* Function */
807 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
808 ret = trace_seq_printf(s, " ");
809 if (!ret)
810 return TRACE_TYPE_PARTIAL_LINE;
811 }
812
813 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
814 if (!ret)
815 return TRACE_TYPE_PARTIAL_LINE;
816
817 return TRACE_TYPE_HANDLED;
818}
819
820static enum print_line_t
821print_graph_entry_nested(struct trace_iterator *iter,
822 struct ftrace_graph_ent_entry *entry,
823 struct trace_seq *s, int cpu, u32 flags)
824{
825 struct ftrace_graph_ent *call = &entry->graph_ent;
826 struct fgraph_data *data = iter->private;
827 int ret;
828 int i;
829
830 if (data) {
831 struct fgraph_cpu_data *cpu_data;
832 int cpu = iter->cpu;
833
834 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
835 cpu_data->depth = call->depth;
836
837 /* Save this function pointer to see if the exit matches */
838 if (call->depth < FTRACE_RETFUNC_DEPTH)
839 cpu_data->enter_funcs[call->depth] = call->func;
840 }
841
842 /* No time */
843 ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
844 if (ret != TRACE_TYPE_HANDLED)
845 return ret;
846
847 /* Function */
848 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
849 ret = trace_seq_printf(s, " ");
850 if (!ret)
851 return TRACE_TYPE_PARTIAL_LINE;
852 }
853
854 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
855 if (!ret)
856 return TRACE_TYPE_PARTIAL_LINE;
857
858 /*
859 * we already consumed the current entry to check the next one
860 * and see if this is a leaf.
861 */
862 return TRACE_TYPE_NO_CONSUME;
863}
864
865static enum print_line_t
866print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
867 int type, unsigned long addr, u32 flags)
868{
869 struct fgraph_data *data = iter->private;
870 struct trace_entry *ent = iter->ent;
871 int cpu = iter->cpu;
872 int ret;
873
874 /* Pid */
875 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
876 return TRACE_TYPE_PARTIAL_LINE;
877
878 if (type) {
879 /* Interrupt */
880 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
881 if (ret == TRACE_TYPE_PARTIAL_LINE)
882 return TRACE_TYPE_PARTIAL_LINE;
883 }
884
885 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
886 return 0;
887
888 /* Absolute time */
889 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
890 ret = print_graph_abs_time(iter->ts, s);
891 if (!ret)
892 return TRACE_TYPE_PARTIAL_LINE;
893 }
894
895 /* Cpu */
896 if (flags & TRACE_GRAPH_PRINT_CPU) {
897 ret = print_graph_cpu(s, cpu);
898 if (ret == TRACE_TYPE_PARTIAL_LINE)
899 return TRACE_TYPE_PARTIAL_LINE;
900 }
901
902 /* Proc */
903 if (flags & TRACE_GRAPH_PRINT_PROC) {
904 ret = print_graph_proc(s, ent->pid);
905 if (ret == TRACE_TYPE_PARTIAL_LINE)
906 return TRACE_TYPE_PARTIAL_LINE;
907
908 ret = trace_seq_printf(s, " | ");
909 if (!ret)
910 return TRACE_TYPE_PARTIAL_LINE;
911 }
912
913 /* Latency format */
914 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
915 ret = print_graph_lat_fmt(s, ent);
916 if (ret == TRACE_TYPE_PARTIAL_LINE)
917 return TRACE_TYPE_PARTIAL_LINE;
918 }
919
920 return 0;
921}
922
923/*
924 * Entry check for irq code
925 *
926 * returns 1 if
927 * - we are inside irq code
928 * - we just entered irq code
929 *
930 * retunns 0 if
931 * - funcgraph-interrupts option is set
932 * - we are not inside irq code
933 */
934static int
935check_irq_entry(struct trace_iterator *iter, u32 flags,
936 unsigned long addr, int depth)
937{
938 int cpu = iter->cpu;
939 int *depth_irq;
940 struct fgraph_data *data = iter->private;
941
942 /*
943 * If we are either displaying irqs, or we got called as
944 * a graph event and private data does not exist,
945 * then we bypass the irq check.
946 */
947 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
948 (!data))
949 return 0;
950
951 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
952
953 /*
954 * We are inside the irq code
955 */
956 if (*depth_irq >= 0)
957 return 1;
958
959 if ((addr < (unsigned long)__irqentry_text_start) ||
960 (addr >= (unsigned long)__irqentry_text_end))
961 return 0;
962
963 /*
964 * We are entering irq code.
965 */
966 *depth_irq = depth;
967 return 1;
968}
969
970/*
971 * Return check for irq code
972 *
973 * returns 1 if
974 * - we are inside irq code
975 * - we just left irq code
976 *
977 * returns 0 if
978 * - funcgraph-interrupts option is set
979 * - we are not inside irq code
980 */
981static int
982check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
983{
984 int cpu = iter->cpu;
985 int *depth_irq;
986 struct fgraph_data *data = iter->private;
987
988 /*
989 * If we are either displaying irqs, or we got called as
990 * a graph event and private data does not exist,
991 * then we bypass the irq check.
992 */
993 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
994 (!data))
995 return 0;
996
997 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
998
999 /*
1000 * We are not inside the irq code.
1001 */
1002 if (*depth_irq == -1)
1003 return 0;
1004
1005 /*
1006 * We are inside the irq code, and this is returning entry.
1007 * Let's not trace it and clear the entry depth, since
1008 * we are out of irq code.
1009 *
1010 * This condition ensures that we 'leave the irq code' once
1011 * we are out of the entry depth. Thus protecting us from
1012 * the RETURN entry loss.
1013 */
1014 if (*depth_irq >= depth) {
1015 *depth_irq = -1;
1016 return 1;
1017 }
1018
1019 /*
1020 * We are inside the irq code, and this is not the entry.
1021 */
1022 return 1;
1023}
1024
1025static enum print_line_t
1026print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1027 struct trace_iterator *iter, u32 flags)
1028{
1029 struct fgraph_data *data = iter->private;
1030 struct ftrace_graph_ent *call = &field->graph_ent;
1031 struct ftrace_graph_ret_entry *leaf_ret;
1032 static enum print_line_t ret;
1033 int cpu = iter->cpu;
1034
1035 if (check_irq_entry(iter, flags, call->func, call->depth))
1036 return TRACE_TYPE_HANDLED;
1037
1038 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1039 return TRACE_TYPE_PARTIAL_LINE;
1040
1041 leaf_ret = get_return_for_leaf(iter, field);
1042 if (leaf_ret)
1043 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1044 else
1045 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1046
1047 if (data) {
1048 /*
1049 * If we failed to write our output, then we need to make
1050 * note of it. Because we already consumed our entry.
1051 */
1052 if (s->full) {
1053 data->failed = 1;
1054 data->cpu = cpu;
1055 } else
1056 data->failed = 0;
1057 }
1058
1059 return ret;
1060}
1061
1062static enum print_line_t
1063print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1064 struct trace_entry *ent, struct trace_iterator *iter,
1065 u32 flags)
1066{
1067 unsigned long long duration = trace->rettime - trace->calltime;
1068 struct fgraph_data *data = iter->private;
1069 pid_t pid = ent->pid;
1070 int cpu = iter->cpu;
1071 int func_match = 1;
1072 int ret;
1073 int i;
1074
1075 if (check_irq_return(iter, flags, trace->depth))
1076 return TRACE_TYPE_HANDLED;
1077
1078 if (data) {
1079 struct fgraph_cpu_data *cpu_data;
1080 int cpu = iter->cpu;
1081
1082 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1083
1084 /*
1085 * Comments display at + 1 to depth. This is the
1086 * return from a function, we now want the comments
1087 * to display at the same level of the bracket.
1088 */
1089 cpu_data->depth = trace->depth - 1;
1090
1091 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1092 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1093 func_match = 0;
1094 cpu_data->enter_funcs[trace->depth] = 0;
1095 }
1096 }
1097
1098 if (print_graph_prologue(iter, s, 0, 0, flags))
1099 return TRACE_TYPE_PARTIAL_LINE;
1100
1101 /* Overhead and duration */
1102 ret = print_graph_duration(duration, s, flags);
1103 if (ret == TRACE_TYPE_PARTIAL_LINE)
1104 return TRACE_TYPE_PARTIAL_LINE;
1105
1106 /* Closing brace */
1107 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1108 ret = trace_seq_printf(s, " ");
1109 if (!ret)
1110 return TRACE_TYPE_PARTIAL_LINE;
1111 }
1112
1113 /*
1114 * If the return function does not have a matching entry,
1115 * then the entry was lost. Instead of just printing
1116 * the '}' and letting the user guess what function this
1117 * belongs to, write out the function name.
1118 */
1119 if (func_match) {
1120 ret = trace_seq_printf(s, "}\n");
1121 if (!ret)
1122 return TRACE_TYPE_PARTIAL_LINE;
1123 } else {
1124 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1125 if (!ret)
1126 return TRACE_TYPE_PARTIAL_LINE;
1127 }
1128
1129 /* Overrun */
1130 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1131 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1132 trace->overrun);
1133 if (!ret)
1134 return TRACE_TYPE_PARTIAL_LINE;
1135 }
1136
1137 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1138 cpu, pid, flags);
1139 if (ret == TRACE_TYPE_PARTIAL_LINE)
1140 return TRACE_TYPE_PARTIAL_LINE;
1141
1142 return TRACE_TYPE_HANDLED;
1143}
1144
1145static enum print_line_t
1146print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1147 struct trace_iterator *iter, u32 flags)
1148{
1149 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1150 struct fgraph_data *data = iter->private;
1151 struct trace_event *event;
1152 int depth = 0;
1153 int ret;
1154 int i;
1155
1156 if (data)
1157 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1158
1159 if (print_graph_prologue(iter, s, 0, 0, flags))
1160 return TRACE_TYPE_PARTIAL_LINE;
1161
1162 /* No time */
1163 ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
1164 if (ret != TRACE_TYPE_HANDLED)
1165 return ret;
1166
1167 /* Indentation */
1168 if (depth > 0)
1169 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1170 ret = trace_seq_printf(s, " ");
1171 if (!ret)
1172 return TRACE_TYPE_PARTIAL_LINE;
1173 }
1174
1175 /* The comment */
1176 ret = trace_seq_printf(s, "/* ");
1177 if (!ret)
1178 return TRACE_TYPE_PARTIAL_LINE;
1179
1180 switch (iter->ent->type) {
1181 case TRACE_BPRINT:
1182 ret = trace_print_bprintk_msg_only(iter);
1183 if (ret != TRACE_TYPE_HANDLED)
1184 return ret;
1185 break;
1186 case TRACE_PRINT:
1187 ret = trace_print_printk_msg_only(iter);
1188 if (ret != TRACE_TYPE_HANDLED)
1189 return ret;
1190 break;
1191 default:
1192 event = ftrace_find_event(ent->type);
1193 if (!event)
1194 return TRACE_TYPE_UNHANDLED;
1195
1196 ret = event->funcs->trace(iter, sym_flags, event);
1197 if (ret != TRACE_TYPE_HANDLED)
1198 return ret;
1199 }
1200
1201 /* Strip ending newline */
1202 if (s->buffer[s->len - 1] == '\n') {
1203 s->buffer[s->len - 1] = '\0';
1204 s->len--;
1205 }
1206
1207 ret = trace_seq_printf(s, " */\n");
1208 if (!ret)
1209 return TRACE_TYPE_PARTIAL_LINE;
1210
1211 return TRACE_TYPE_HANDLED;
1212}
1213
1214
1215enum print_line_t
1216print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1217{
1218 struct ftrace_graph_ent_entry *field;
1219 struct fgraph_data *data = iter->private;
1220 struct trace_entry *entry = iter->ent;
1221 struct trace_seq *s = &iter->seq;
1222 int cpu = iter->cpu;
1223 int ret;
1224
1225 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1226 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1227 return TRACE_TYPE_HANDLED;
1228 }
1229
1230 /*
1231 * If the last output failed, there's a possibility we need
1232 * to print out the missing entry which would never go out.
1233 */
1234 if (data && data->failed) {
1235 field = &data->ent;
1236 iter->cpu = data->cpu;
1237 ret = print_graph_entry(field, s, iter, flags);
1238 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1239 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1240 ret = TRACE_TYPE_NO_CONSUME;
1241 }
1242 iter->cpu = cpu;
1243 return ret;
1244 }
1245
1246 switch (entry->type) {
1247 case TRACE_GRAPH_ENT: {
1248 /*
1249 * print_graph_entry() may consume the current event,
1250 * thus @field may become invalid, so we need to save it.
1251 * sizeof(struct ftrace_graph_ent_entry) is very small,
1252 * it can be safely saved at the stack.
1253 */
1254 struct ftrace_graph_ent_entry saved;
1255 trace_assign_type(field, entry);
1256 saved = *field;
1257 return print_graph_entry(&saved, s, iter, flags);
1258 }
1259 case TRACE_GRAPH_RET: {
1260 struct ftrace_graph_ret_entry *field;
1261 trace_assign_type(field, entry);
1262 return print_graph_return(&field->ret, s, entry, iter, flags);
1263 }
1264 case TRACE_STACK:
1265 case TRACE_FN:
1266 /* dont trace stack and functions as comments */
1267 return TRACE_TYPE_UNHANDLED;
1268
1269 default:
1270 return print_graph_comment(s, entry, iter, flags);
1271 }
1272
1273 return TRACE_TYPE_HANDLED;
1274}
1275
1276static enum print_line_t
1277print_graph_function(struct trace_iterator *iter)
1278{
1279 return print_graph_function_flags(iter, tracer_flags.val);
1280}
1281
1282static enum print_line_t
1283print_graph_function_event(struct trace_iterator *iter, int flags,
1284 struct trace_event *event)
1285{
1286 return print_graph_function(iter);
1287}
1288
1289static void print_lat_header(struct seq_file *s, u32 flags)
1290{
1291 static const char spaces[] = " " /* 16 spaces */
1292 " " /* 4 spaces */
1293 " "; /* 17 spaces */
1294 int size = 0;
1295
1296 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1297 size += 16;
1298 if (flags & TRACE_GRAPH_PRINT_CPU)
1299 size += 4;
1300 if (flags & TRACE_GRAPH_PRINT_PROC)
1301 size += 17;
1302
1303 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1304 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1305 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1306 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1307 seq_printf(s, "#%.*s||| / \n", size, spaces);
1308}
1309
1310static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1311{
1312 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1313
1314 if (lat)
1315 print_lat_header(s, flags);
1316
1317 /* 1st line */
1318 seq_printf(s, "#");
1319 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1320 seq_printf(s, " TIME ");
1321 if (flags & TRACE_GRAPH_PRINT_CPU)
1322 seq_printf(s, " CPU");
1323 if (flags & TRACE_GRAPH_PRINT_PROC)
1324 seq_printf(s, " TASK/PID ");
1325 if (lat)
1326 seq_printf(s, "||||");
1327 if (flags & TRACE_GRAPH_PRINT_DURATION)
1328 seq_printf(s, " DURATION ");
1329 seq_printf(s, " FUNCTION CALLS\n");
1330
1331 /* 2nd line */
1332 seq_printf(s, "#");
1333 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1334 seq_printf(s, " | ");
1335 if (flags & TRACE_GRAPH_PRINT_CPU)
1336 seq_printf(s, " | ");
1337 if (flags & TRACE_GRAPH_PRINT_PROC)
1338 seq_printf(s, " | | ");
1339 if (lat)
1340 seq_printf(s, "||||");
1341 if (flags & TRACE_GRAPH_PRINT_DURATION)
1342 seq_printf(s, " | | ");
1343 seq_printf(s, " | | | |\n");
1344}
1345
1346void print_graph_headers(struct seq_file *s)
1347{
1348 print_graph_headers_flags(s, tracer_flags.val);
1349}
1350
1351void print_graph_headers_flags(struct seq_file *s, u32 flags)
1352{
1353 struct trace_iterator *iter = s->private;
1354
1355 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1356 return;
1357
1358 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1359 /* print nothing if the buffers are empty */
1360 if (trace_empty(iter))
1361 return;
1362
1363 print_trace_header(s, iter);
1364 }
1365
1366 __print_graph_headers_flags(s, flags);
1367}
1368
1369void graph_trace_open(struct trace_iterator *iter)
1370{
1371 /* pid and depth on the last trace processed */
1372 struct fgraph_data *data;
1373 int cpu;
1374
1375 iter->private = NULL;
1376
1377 data = kzalloc(sizeof(*data), GFP_KERNEL);
1378 if (!data)
1379 goto out_err;
1380
1381 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1382 if (!data->cpu_data)
1383 goto out_err_free;
1384
1385 for_each_possible_cpu(cpu) {
1386 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1387 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1388 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1389 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1390
1391 *pid = -1;
1392 *depth = 0;
1393 *ignore = 0;
1394 *depth_irq = -1;
1395 }
1396
1397 iter->private = data;
1398
1399 return;
1400
1401 out_err_free:
1402 kfree(data);
1403 out_err:
1404 pr_warning("function graph tracer: not enough memory\n");
1405}
1406
1407void graph_trace_close(struct trace_iterator *iter)
1408{
1409 struct fgraph_data *data = iter->private;
1410
1411 if (data) {
1412 free_percpu(data->cpu_data);
1413 kfree(data);
1414 }
1415}
1416
1417static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
1418{
1419 if (bit == TRACE_GRAPH_PRINT_IRQS)
1420 ftrace_graph_skip_irqs = !set;
1421
1422 return 0;
1423}
1424
1425static struct trace_event_functions graph_functions = {
1426 .trace = print_graph_function_event,
1427};
1428
1429static struct trace_event graph_trace_entry_event = {
1430 .type = TRACE_GRAPH_ENT,
1431 .funcs = &graph_functions,
1432};
1433
1434static struct trace_event graph_trace_ret_event = {
1435 .type = TRACE_GRAPH_RET,
1436 .funcs = &graph_functions
1437};
1438
1439static struct tracer graph_trace __read_mostly = {
1440 .name = "function_graph",
1441 .open = graph_trace_open,
1442 .pipe_open = graph_trace_open,
1443 .close = graph_trace_close,
1444 .pipe_close = graph_trace_close,
1445 .wait_pipe = poll_wait_pipe,
1446 .init = graph_trace_init,
1447 .reset = graph_trace_reset,
1448 .print_line = print_graph_function,
1449 .print_header = print_graph_headers,
1450 .flags = &tracer_flags,
1451 .set_flag = func_graph_set_flag,
1452#ifdef CONFIG_FTRACE_SELFTEST
1453 .selftest = trace_selftest_startup_function_graph,
1454#endif
1455};
1456
1457static __init int init_graph_trace(void)
1458{
1459 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1460
1461 if (!register_ftrace_event(&graph_trace_entry_event)) {
1462 pr_warning("Warning: could not register graph trace events\n");
1463 return 1;
1464 }
1465
1466 if (!register_ftrace_event(&graph_trace_ret_event)) {
1467 pr_warning("Warning: could not register graph trace events\n");
1468 return 1;
1469 }
1470
1471 return register_tracer(&graph_trace);
1472}
1473
1474device_initcall(init_graph_trace);