Loading...
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/uaccess.h>
10#include <linux/ftrace.h>
11#include <linux/interrupt.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14
15#include "trace.h"
16#include "trace_output.h"
17
18static bool kill_ftrace_graph;
19
20/**
21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 *
23 * ftrace_graph_stop() is called when a severe error is detected in
24 * the function graph tracing. This function is called by the critical
25 * paths of function graph to keep those paths from doing any more harm.
26 */
27bool ftrace_graph_is_dead(void)
28{
29 return kill_ftrace_graph;
30}
31
32/**
33 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 *
35 * In case of an error int function graph tracing, this is called
36 * to try to keep function graph tracing from causing any more harm.
37 * Usually this is pretty severe and this is called to try to at least
38 * get a warning out to the user.
39 */
40void ftrace_graph_stop(void)
41{
42 kill_ftrace_graph = true;
43}
44
45/* When set, irq functions will be ignored */
46static int ftrace_graph_skip_irqs;
47
48struct fgraph_cpu_data {
49 pid_t last_pid;
50 int depth;
51 int depth_irq;
52 int ignore;
53 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
54};
55
56struct fgraph_data {
57 struct fgraph_cpu_data __percpu *cpu_data;
58
59 /* Place to preserve last processed entry. */
60 struct ftrace_graph_ent_entry ent;
61 struct ftrace_graph_ret_entry ret;
62 int failed;
63 int cpu;
64};
65
66#define TRACE_GRAPH_INDENT 2
67
68static unsigned int max_depth;
69
70static struct tracer_opt trace_opts[] = {
71 /* Display overruns? (for self-debug purpose) */
72 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
73 /* Display CPU ? */
74 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
75 /* Display Overhead ? */
76 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77 /* Display proc name/pid */
78 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79 /* Display duration of execution */
80 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
81 /* Display absolute time of an entry */
82 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83 /* Display interrupts */
84 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85 /* Display function name after trailing } */
86 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87 /* Include sleep time (scheduled out) between entry and return */
88 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
89 /* Include time within nested functions */
90 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
91 { } /* Empty entry */
92};
93
94static struct tracer_flags tracer_flags = {
95 /* Don't display overruns, proc, or tail by default */
96 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
97 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
98 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
99 .opts = trace_opts
100};
101
102static struct trace_array *graph_array;
103
104/*
105 * DURATION column is being also used to display IRQ signs,
106 * following values are used by print_graph_irq and others
107 * to fill in space into DURATION column.
108 */
109enum {
110 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113};
114
115static void
116print_graph_duration(struct trace_array *tr, unsigned long long duration,
117 struct trace_seq *s, u32 flags);
118
119/* Add a function return address to the trace stack on thread info.*/
120int
121ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
122 unsigned long frame_pointer)
123{
124 unsigned long long calltime;
125 int index;
126
127 if (unlikely(ftrace_graph_is_dead()))
128 return -EBUSY;
129
130 if (!current->ret_stack)
131 return -EBUSY;
132
133 /*
134 * We must make sure the ret_stack is tested before we read
135 * anything else.
136 */
137 smp_rmb();
138
139 /* The return trace stack is full */
140 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
141 atomic_inc(¤t->trace_overrun);
142 return -EBUSY;
143 }
144
145 /*
146 * The curr_ret_stack is an index to ftrace return stack of
147 * current task. Its value should be in [0, FTRACE_RETFUNC_
148 * DEPTH) when the function graph tracer is used. To support
149 * filtering out specific functions, it makes the index
150 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
151 * so when it sees a negative index the ftrace will ignore
152 * the record. And the index gets recovered when returning
153 * from the filtered function by adding the FTRACE_NOTRACE_
154 * DEPTH and then it'll continue to record functions normally.
155 *
156 * The curr_ret_stack is initialized to -1 and get increased
157 * in this function. So it can be less than -1 only if it was
158 * filtered out via ftrace_graph_notrace_addr() which can be
159 * set from set_graph_notrace file in tracefs by user.
160 */
161 if (current->curr_ret_stack < -1)
162 return -EBUSY;
163
164 calltime = trace_clock_local();
165
166 index = ++current->curr_ret_stack;
167 if (ftrace_graph_notrace_addr(func))
168 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
169 barrier();
170 current->ret_stack[index].ret = ret;
171 current->ret_stack[index].func = func;
172 current->ret_stack[index].calltime = calltime;
173 current->ret_stack[index].subtime = 0;
174 current->ret_stack[index].fp = frame_pointer;
175 *depth = current->curr_ret_stack;
176
177 return 0;
178}
179
180/* Retrieve a function return address to the trace stack on thread info.*/
181static void
182ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
183 unsigned long frame_pointer)
184{
185 int index;
186
187 index = current->curr_ret_stack;
188
189 /*
190 * A negative index here means that it's just returned from a
191 * notrace'd function. Recover index to get an original
192 * return address. See ftrace_push_return_trace().
193 *
194 * TODO: Need to check whether the stack gets corrupted.
195 */
196 if (index < 0)
197 index += FTRACE_NOTRACE_DEPTH;
198
199 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
200 ftrace_graph_stop();
201 WARN_ON(1);
202 /* Might as well panic, otherwise we have no where to go */
203 *ret = (unsigned long)panic;
204 return;
205 }
206
207#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
208 /*
209 * The arch may choose to record the frame pointer used
210 * and check it here to make sure that it is what we expect it
211 * to be. If gcc does not set the place holder of the return
212 * address in the frame pointer, and does a copy instead, then
213 * the function graph trace will fail. This test detects this
214 * case.
215 *
216 * Currently, x86_32 with optimize for size (-Os) makes the latest
217 * gcc do the above.
218 *
219 * Note, -mfentry does not use frame pointers, and this test
220 * is not needed if CC_USING_FENTRY is set.
221 */
222 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
223 ftrace_graph_stop();
224 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
225 " from func %ps return to %lx\n",
226 current->ret_stack[index].fp,
227 frame_pointer,
228 (void *)current->ret_stack[index].func,
229 current->ret_stack[index].ret);
230 *ret = (unsigned long)panic;
231 return;
232 }
233#endif
234
235 *ret = current->ret_stack[index].ret;
236 trace->func = current->ret_stack[index].func;
237 trace->calltime = current->ret_stack[index].calltime;
238 trace->overrun = atomic_read(¤t->trace_overrun);
239 trace->depth = index;
240}
241
242/*
243 * Send the trace to the ring-buffer.
244 * @return the original return address.
245 */
246unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
247{
248 struct ftrace_graph_ret trace;
249 unsigned long ret;
250
251 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
252 trace.rettime = trace_clock_local();
253 barrier();
254 current->curr_ret_stack--;
255 /*
256 * The curr_ret_stack can be less than -1 only if it was
257 * filtered out and it's about to return from the function.
258 * Recover the index and continue to trace normal functions.
259 */
260 if (current->curr_ret_stack < -1) {
261 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
262 return ret;
263 }
264
265 /*
266 * The trace should run after decrementing the ret counter
267 * in case an interrupt were to come in. We don't want to
268 * lose the interrupt if max_depth is set.
269 */
270 ftrace_graph_return(&trace);
271
272 if (unlikely(!ret)) {
273 ftrace_graph_stop();
274 WARN_ON(1);
275 /* Might as well panic. What else to do? */
276 ret = (unsigned long)panic;
277 }
278
279 return ret;
280}
281
282int __trace_graph_entry(struct trace_array *tr,
283 struct ftrace_graph_ent *trace,
284 unsigned long flags,
285 int pc)
286{
287 struct trace_event_call *call = &event_funcgraph_entry;
288 struct ring_buffer_event *event;
289 struct ring_buffer *buffer = tr->trace_buffer.buffer;
290 struct ftrace_graph_ent_entry *entry;
291
292 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
293 sizeof(*entry), flags, pc);
294 if (!event)
295 return 0;
296 entry = ring_buffer_event_data(event);
297 entry->graph_ent = *trace;
298 if (!call_filter_check_discard(call, entry, buffer, event))
299 __buffer_unlock_commit(buffer, event);
300
301 return 1;
302}
303
304static inline int ftrace_graph_ignore_irqs(void)
305{
306 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
307 return 0;
308
309 return in_irq();
310}
311
312int trace_graph_entry(struct ftrace_graph_ent *trace)
313{
314 struct trace_array *tr = graph_array;
315 struct trace_array_cpu *data;
316 unsigned long flags;
317 long disabled;
318 int ret;
319 int cpu;
320 int pc;
321
322 if (!ftrace_trace_task(current))
323 return 0;
324
325 /* trace it when it is-nested-in or is a function enabled. */
326 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
327 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
328 (max_depth && trace->depth >= max_depth))
329 return 0;
330
331 /*
332 * Do not trace a function if it's filtered by set_graph_notrace.
333 * Make the index of ret stack negative to indicate that it should
334 * ignore further functions. But it needs its own ret stack entry
335 * to recover the original index in order to continue tracing after
336 * returning from the function.
337 */
338 if (ftrace_graph_notrace_addr(trace->func))
339 return 1;
340
341 local_irq_save(flags);
342 cpu = raw_smp_processor_id();
343 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
344 disabled = atomic_inc_return(&data->disabled);
345 if (likely(disabled == 1)) {
346 pc = preempt_count();
347 ret = __trace_graph_entry(tr, trace, flags, pc);
348 } else {
349 ret = 0;
350 }
351
352 atomic_dec(&data->disabled);
353 local_irq_restore(flags);
354
355 return ret;
356}
357
358static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
359{
360 if (tracing_thresh)
361 return 1;
362 else
363 return trace_graph_entry(trace);
364}
365
366static void
367__trace_graph_function(struct trace_array *tr,
368 unsigned long ip, unsigned long flags, int pc)
369{
370 u64 time = trace_clock_local();
371 struct ftrace_graph_ent ent = {
372 .func = ip,
373 .depth = 0,
374 };
375 struct ftrace_graph_ret ret = {
376 .func = ip,
377 .depth = 0,
378 .calltime = time,
379 .rettime = time,
380 };
381
382 __trace_graph_entry(tr, &ent, flags, pc);
383 __trace_graph_return(tr, &ret, flags, pc);
384}
385
386void
387trace_graph_function(struct trace_array *tr,
388 unsigned long ip, unsigned long parent_ip,
389 unsigned long flags, int pc)
390{
391 __trace_graph_function(tr, ip, flags, pc);
392}
393
394void __trace_graph_return(struct trace_array *tr,
395 struct ftrace_graph_ret *trace,
396 unsigned long flags,
397 int pc)
398{
399 struct trace_event_call *call = &event_funcgraph_exit;
400 struct ring_buffer_event *event;
401 struct ring_buffer *buffer = tr->trace_buffer.buffer;
402 struct ftrace_graph_ret_entry *entry;
403
404 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
405 sizeof(*entry), flags, pc);
406 if (!event)
407 return;
408 entry = ring_buffer_event_data(event);
409 entry->ret = *trace;
410 if (!call_filter_check_discard(call, entry, buffer, event))
411 __buffer_unlock_commit(buffer, event);
412}
413
414void trace_graph_return(struct ftrace_graph_ret *trace)
415{
416 struct trace_array *tr = graph_array;
417 struct trace_array_cpu *data;
418 unsigned long flags;
419 long disabled;
420 int cpu;
421 int pc;
422
423 local_irq_save(flags);
424 cpu = raw_smp_processor_id();
425 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
426 disabled = atomic_inc_return(&data->disabled);
427 if (likely(disabled == 1)) {
428 pc = preempt_count();
429 __trace_graph_return(tr, trace, flags, pc);
430 }
431 atomic_dec(&data->disabled);
432 local_irq_restore(flags);
433}
434
435void set_graph_array(struct trace_array *tr)
436{
437 graph_array = tr;
438
439 /* Make graph_array visible before we start tracing */
440
441 smp_mb();
442}
443
444static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
445{
446 if (tracing_thresh &&
447 (trace->rettime - trace->calltime < tracing_thresh))
448 return;
449 else
450 trace_graph_return(trace);
451}
452
453static int graph_trace_init(struct trace_array *tr)
454{
455 int ret;
456
457 set_graph_array(tr);
458 if (tracing_thresh)
459 ret = register_ftrace_graph(&trace_graph_thresh_return,
460 &trace_graph_thresh_entry);
461 else
462 ret = register_ftrace_graph(&trace_graph_return,
463 &trace_graph_entry);
464 if (ret)
465 return ret;
466 tracing_start_cmdline_record();
467
468 return 0;
469}
470
471static void graph_trace_reset(struct trace_array *tr)
472{
473 tracing_stop_cmdline_record();
474 unregister_ftrace_graph();
475}
476
477static int graph_trace_update_thresh(struct trace_array *tr)
478{
479 graph_trace_reset(tr);
480 return graph_trace_init(tr);
481}
482
483static int max_bytes_for_cpu;
484
485static void print_graph_cpu(struct trace_seq *s, int cpu)
486{
487 /*
488 * Start with a space character - to make it stand out
489 * to the right a bit when trace output is pasted into
490 * email:
491 */
492 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
493}
494
495#define TRACE_GRAPH_PROCINFO_LENGTH 14
496
497static void print_graph_proc(struct trace_seq *s, pid_t pid)
498{
499 char comm[TASK_COMM_LEN];
500 /* sign + log10(MAX_INT) + '\0' */
501 char pid_str[11];
502 int spaces = 0;
503 int len;
504 int i;
505
506 trace_find_cmdline(pid, comm);
507 comm[7] = '\0';
508 sprintf(pid_str, "%d", pid);
509
510 /* 1 stands for the "-" character */
511 len = strlen(comm) + strlen(pid_str) + 1;
512
513 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
514 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
515
516 /* First spaces to align center */
517 for (i = 0; i < spaces / 2; i++)
518 trace_seq_putc(s, ' ');
519
520 trace_seq_printf(s, "%s-%s", comm, pid_str);
521
522 /* Last spaces to align center */
523 for (i = 0; i < spaces - (spaces / 2); i++)
524 trace_seq_putc(s, ' ');
525}
526
527
528static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
529{
530 trace_seq_putc(s, ' ');
531 trace_print_lat_fmt(s, entry);
532}
533
534/* If the pid changed since the last trace, output this event */
535static void
536verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
537{
538 pid_t prev_pid;
539 pid_t *last_pid;
540
541 if (!data)
542 return;
543
544 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
545
546 if (*last_pid == pid)
547 return;
548
549 prev_pid = *last_pid;
550 *last_pid = pid;
551
552 if (prev_pid == -1)
553 return;
554/*
555 * Context-switch trace line:
556
557 ------------------------------------------
558 | 1) migration/0--1 => sshd-1755
559 ------------------------------------------
560
561 */
562 trace_seq_puts(s, " ------------------------------------------\n");
563 print_graph_cpu(s, cpu);
564 print_graph_proc(s, prev_pid);
565 trace_seq_puts(s, " => ");
566 print_graph_proc(s, pid);
567 trace_seq_puts(s, "\n ------------------------------------------\n\n");
568}
569
570static struct ftrace_graph_ret_entry *
571get_return_for_leaf(struct trace_iterator *iter,
572 struct ftrace_graph_ent_entry *curr)
573{
574 struct fgraph_data *data = iter->private;
575 struct ring_buffer_iter *ring_iter = NULL;
576 struct ring_buffer_event *event;
577 struct ftrace_graph_ret_entry *next;
578
579 /*
580 * If the previous output failed to write to the seq buffer,
581 * then we just reuse the data from before.
582 */
583 if (data && data->failed) {
584 curr = &data->ent;
585 next = &data->ret;
586 } else {
587
588 ring_iter = trace_buffer_iter(iter, iter->cpu);
589
590 /* First peek to compare current entry and the next one */
591 if (ring_iter)
592 event = ring_buffer_iter_peek(ring_iter, NULL);
593 else {
594 /*
595 * We need to consume the current entry to see
596 * the next one.
597 */
598 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
599 NULL, NULL);
600 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
601 NULL, NULL);
602 }
603
604 if (!event)
605 return NULL;
606
607 next = ring_buffer_event_data(event);
608
609 if (data) {
610 /*
611 * Save current and next entries for later reference
612 * if the output fails.
613 */
614 data->ent = *curr;
615 /*
616 * If the next event is not a return type, then
617 * we only care about what type it is. Otherwise we can
618 * safely copy the entire event.
619 */
620 if (next->ent.type == TRACE_GRAPH_RET)
621 data->ret = *next;
622 else
623 data->ret.ent.type = next->ent.type;
624 }
625 }
626
627 if (next->ent.type != TRACE_GRAPH_RET)
628 return NULL;
629
630 if (curr->ent.pid != next->ent.pid ||
631 curr->graph_ent.func != next->ret.func)
632 return NULL;
633
634 /* this is a leaf, now advance the iterator */
635 if (ring_iter)
636 ring_buffer_read(ring_iter, NULL);
637
638 return next;
639}
640
641static void print_graph_abs_time(u64 t, struct trace_seq *s)
642{
643 unsigned long usecs_rem;
644
645 usecs_rem = do_div(t, NSEC_PER_SEC);
646 usecs_rem /= 1000;
647
648 trace_seq_printf(s, "%5lu.%06lu | ",
649 (unsigned long)t, usecs_rem);
650}
651
652static void
653print_graph_irq(struct trace_iterator *iter, unsigned long addr,
654 enum trace_type type, int cpu, pid_t pid, u32 flags)
655{
656 struct trace_array *tr = iter->tr;
657 struct trace_seq *s = &iter->seq;
658 struct trace_entry *ent = iter->ent;
659
660 if (addr < (unsigned long)__irqentry_text_start ||
661 addr >= (unsigned long)__irqentry_text_end)
662 return;
663
664 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
665 /* Absolute time */
666 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
667 print_graph_abs_time(iter->ts, s);
668
669 /* Cpu */
670 if (flags & TRACE_GRAPH_PRINT_CPU)
671 print_graph_cpu(s, cpu);
672
673 /* Proc */
674 if (flags & TRACE_GRAPH_PRINT_PROC) {
675 print_graph_proc(s, pid);
676 trace_seq_puts(s, " | ");
677 }
678
679 /* Latency format */
680 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
681 print_graph_lat_fmt(s, ent);
682 }
683
684 /* No overhead */
685 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
686
687 if (type == TRACE_GRAPH_ENT)
688 trace_seq_puts(s, "==========>");
689 else
690 trace_seq_puts(s, "<==========");
691
692 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
693 trace_seq_putc(s, '\n');
694}
695
696void
697trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
698{
699 unsigned long nsecs_rem = do_div(duration, 1000);
700 /* log10(ULONG_MAX) + '\0' */
701 char usecs_str[21];
702 char nsecs_str[5];
703 int len;
704 int i;
705
706 sprintf(usecs_str, "%lu", (unsigned long) duration);
707
708 /* Print msecs */
709 trace_seq_printf(s, "%s", usecs_str);
710
711 len = strlen(usecs_str);
712
713 /* Print nsecs (we don't want to exceed 7 numbers) */
714 if (len < 7) {
715 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
716
717 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
718 trace_seq_printf(s, ".%s", nsecs_str);
719 len += strlen(nsecs_str) + 1;
720 }
721
722 trace_seq_puts(s, " us ");
723
724 /* Print remaining spaces to fit the row's width */
725 for (i = len; i < 8; i++)
726 trace_seq_putc(s, ' ');
727}
728
729static void
730print_graph_duration(struct trace_array *tr, unsigned long long duration,
731 struct trace_seq *s, u32 flags)
732{
733 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
734 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
735 return;
736
737 /* No real adata, just filling the column with spaces */
738 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
739 case FLAGS_FILL_FULL:
740 trace_seq_puts(s, " | ");
741 return;
742 case FLAGS_FILL_START:
743 trace_seq_puts(s, " ");
744 return;
745 case FLAGS_FILL_END:
746 trace_seq_puts(s, " |");
747 return;
748 }
749
750 /* Signal a overhead of time execution to the output */
751 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
752 trace_seq_printf(s, "%c ", trace_find_mark(duration));
753 else
754 trace_seq_puts(s, " ");
755
756 trace_print_graph_duration(duration, s);
757 trace_seq_puts(s, "| ");
758}
759
760/* Case of a leaf function on its call entry */
761static enum print_line_t
762print_graph_entry_leaf(struct trace_iterator *iter,
763 struct ftrace_graph_ent_entry *entry,
764 struct ftrace_graph_ret_entry *ret_entry,
765 struct trace_seq *s, u32 flags)
766{
767 struct fgraph_data *data = iter->private;
768 struct trace_array *tr = iter->tr;
769 struct ftrace_graph_ret *graph_ret;
770 struct ftrace_graph_ent *call;
771 unsigned long long duration;
772 int i;
773
774 graph_ret = &ret_entry->ret;
775 call = &entry->graph_ent;
776 duration = graph_ret->rettime - graph_ret->calltime;
777
778 if (data) {
779 struct fgraph_cpu_data *cpu_data;
780 int cpu = iter->cpu;
781
782 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
783
784 /*
785 * Comments display at + 1 to depth. Since
786 * this is a leaf function, keep the comments
787 * equal to this depth.
788 */
789 cpu_data->depth = call->depth - 1;
790
791 /* No need to keep this function around for this depth */
792 if (call->depth < FTRACE_RETFUNC_DEPTH)
793 cpu_data->enter_funcs[call->depth] = 0;
794 }
795
796 /* Overhead and duration */
797 print_graph_duration(tr, duration, s, flags);
798
799 /* Function */
800 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
801 trace_seq_putc(s, ' ');
802
803 trace_seq_printf(s, "%ps();\n", (void *)call->func);
804
805 return trace_handle_return(s);
806}
807
808static enum print_line_t
809print_graph_entry_nested(struct trace_iterator *iter,
810 struct ftrace_graph_ent_entry *entry,
811 struct trace_seq *s, int cpu, u32 flags)
812{
813 struct ftrace_graph_ent *call = &entry->graph_ent;
814 struct fgraph_data *data = iter->private;
815 struct trace_array *tr = iter->tr;
816 int i;
817
818 if (data) {
819 struct fgraph_cpu_data *cpu_data;
820 int cpu = iter->cpu;
821
822 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
823 cpu_data->depth = call->depth;
824
825 /* Save this function pointer to see if the exit matches */
826 if (call->depth < FTRACE_RETFUNC_DEPTH)
827 cpu_data->enter_funcs[call->depth] = call->func;
828 }
829
830 /* No time */
831 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
832
833 /* Function */
834 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
835 trace_seq_putc(s, ' ');
836
837 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
838
839 if (trace_seq_has_overflowed(s))
840 return TRACE_TYPE_PARTIAL_LINE;
841
842 /*
843 * we already consumed the current entry to check the next one
844 * and see if this is a leaf.
845 */
846 return TRACE_TYPE_NO_CONSUME;
847}
848
849static void
850print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
851 int type, unsigned long addr, u32 flags)
852{
853 struct fgraph_data *data = iter->private;
854 struct trace_entry *ent = iter->ent;
855 struct trace_array *tr = iter->tr;
856 int cpu = iter->cpu;
857
858 /* Pid */
859 verif_pid(s, ent->pid, cpu, data);
860
861 if (type)
862 /* Interrupt */
863 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
864
865 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
866 return;
867
868 /* Absolute time */
869 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
870 print_graph_abs_time(iter->ts, s);
871
872 /* Cpu */
873 if (flags & TRACE_GRAPH_PRINT_CPU)
874 print_graph_cpu(s, cpu);
875
876 /* Proc */
877 if (flags & TRACE_GRAPH_PRINT_PROC) {
878 print_graph_proc(s, ent->pid);
879 trace_seq_puts(s, " | ");
880 }
881
882 /* Latency format */
883 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
884 print_graph_lat_fmt(s, ent);
885
886 return;
887}
888
889/*
890 * Entry check for irq code
891 *
892 * returns 1 if
893 * - we are inside irq code
894 * - we just entered irq code
895 *
896 * retunns 0 if
897 * - funcgraph-interrupts option is set
898 * - we are not inside irq code
899 */
900static int
901check_irq_entry(struct trace_iterator *iter, u32 flags,
902 unsigned long addr, int depth)
903{
904 int cpu = iter->cpu;
905 int *depth_irq;
906 struct fgraph_data *data = iter->private;
907
908 /*
909 * If we are either displaying irqs, or we got called as
910 * a graph event and private data does not exist,
911 * then we bypass the irq check.
912 */
913 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
914 (!data))
915 return 0;
916
917 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
918
919 /*
920 * We are inside the irq code
921 */
922 if (*depth_irq >= 0)
923 return 1;
924
925 if ((addr < (unsigned long)__irqentry_text_start) ||
926 (addr >= (unsigned long)__irqentry_text_end))
927 return 0;
928
929 /*
930 * We are entering irq code.
931 */
932 *depth_irq = depth;
933 return 1;
934}
935
936/*
937 * Return check for irq code
938 *
939 * returns 1 if
940 * - we are inside irq code
941 * - we just left irq code
942 *
943 * returns 0 if
944 * - funcgraph-interrupts option is set
945 * - we are not inside irq code
946 */
947static int
948check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
949{
950 int cpu = iter->cpu;
951 int *depth_irq;
952 struct fgraph_data *data = iter->private;
953
954 /*
955 * If we are either displaying irqs, or we got called as
956 * a graph event and private data does not exist,
957 * then we bypass the irq check.
958 */
959 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
960 (!data))
961 return 0;
962
963 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
964
965 /*
966 * We are not inside the irq code.
967 */
968 if (*depth_irq == -1)
969 return 0;
970
971 /*
972 * We are inside the irq code, and this is returning entry.
973 * Let's not trace it and clear the entry depth, since
974 * we are out of irq code.
975 *
976 * This condition ensures that we 'leave the irq code' once
977 * we are out of the entry depth. Thus protecting us from
978 * the RETURN entry loss.
979 */
980 if (*depth_irq >= depth) {
981 *depth_irq = -1;
982 return 1;
983 }
984
985 /*
986 * We are inside the irq code, and this is not the entry.
987 */
988 return 1;
989}
990
991static enum print_line_t
992print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
993 struct trace_iterator *iter, u32 flags)
994{
995 struct fgraph_data *data = iter->private;
996 struct ftrace_graph_ent *call = &field->graph_ent;
997 struct ftrace_graph_ret_entry *leaf_ret;
998 static enum print_line_t ret;
999 int cpu = iter->cpu;
1000
1001 if (check_irq_entry(iter, flags, call->func, call->depth))
1002 return TRACE_TYPE_HANDLED;
1003
1004 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1005
1006 leaf_ret = get_return_for_leaf(iter, field);
1007 if (leaf_ret)
1008 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1009 else
1010 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1011
1012 if (data) {
1013 /*
1014 * If we failed to write our output, then we need to make
1015 * note of it. Because we already consumed our entry.
1016 */
1017 if (s->full) {
1018 data->failed = 1;
1019 data->cpu = cpu;
1020 } else
1021 data->failed = 0;
1022 }
1023
1024 return ret;
1025}
1026
1027static enum print_line_t
1028print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1029 struct trace_entry *ent, struct trace_iterator *iter,
1030 u32 flags)
1031{
1032 unsigned long long duration = trace->rettime - trace->calltime;
1033 struct fgraph_data *data = iter->private;
1034 struct trace_array *tr = iter->tr;
1035 pid_t pid = ent->pid;
1036 int cpu = iter->cpu;
1037 int func_match = 1;
1038 int i;
1039
1040 if (check_irq_return(iter, flags, trace->depth))
1041 return TRACE_TYPE_HANDLED;
1042
1043 if (data) {
1044 struct fgraph_cpu_data *cpu_data;
1045 int cpu = iter->cpu;
1046
1047 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1048
1049 /*
1050 * Comments display at + 1 to depth. This is the
1051 * return from a function, we now want the comments
1052 * to display at the same level of the bracket.
1053 */
1054 cpu_data->depth = trace->depth - 1;
1055
1056 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1057 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1058 func_match = 0;
1059 cpu_data->enter_funcs[trace->depth] = 0;
1060 }
1061 }
1062
1063 print_graph_prologue(iter, s, 0, 0, flags);
1064
1065 /* Overhead and duration */
1066 print_graph_duration(tr, duration, s, flags);
1067
1068 /* Closing brace */
1069 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1070 trace_seq_putc(s, ' ');
1071
1072 /*
1073 * If the return function does not have a matching entry,
1074 * then the entry was lost. Instead of just printing
1075 * the '}' and letting the user guess what function this
1076 * belongs to, write out the function name. Always do
1077 * that if the funcgraph-tail option is enabled.
1078 */
1079 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1080 trace_seq_puts(s, "}\n");
1081 else
1082 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1083
1084 /* Overrun */
1085 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1086 trace_seq_printf(s, " (Overruns: %lu)\n",
1087 trace->overrun);
1088
1089 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1090 cpu, pid, flags);
1091
1092 return trace_handle_return(s);
1093}
1094
1095static enum print_line_t
1096print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1097 struct trace_iterator *iter, u32 flags)
1098{
1099 struct trace_array *tr = iter->tr;
1100 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1101 struct fgraph_data *data = iter->private;
1102 struct trace_event *event;
1103 int depth = 0;
1104 int ret;
1105 int i;
1106
1107 if (data)
1108 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1109
1110 print_graph_prologue(iter, s, 0, 0, flags);
1111
1112 /* No time */
1113 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1114
1115 /* Indentation */
1116 if (depth > 0)
1117 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1118 trace_seq_putc(s, ' ');
1119
1120 /* The comment */
1121 trace_seq_puts(s, "/* ");
1122
1123 switch (iter->ent->type) {
1124 case TRACE_BPRINT:
1125 ret = trace_print_bprintk_msg_only(iter);
1126 if (ret != TRACE_TYPE_HANDLED)
1127 return ret;
1128 break;
1129 case TRACE_PRINT:
1130 ret = trace_print_printk_msg_only(iter);
1131 if (ret != TRACE_TYPE_HANDLED)
1132 return ret;
1133 break;
1134 default:
1135 event = ftrace_find_event(ent->type);
1136 if (!event)
1137 return TRACE_TYPE_UNHANDLED;
1138
1139 ret = event->funcs->trace(iter, sym_flags, event);
1140 if (ret != TRACE_TYPE_HANDLED)
1141 return ret;
1142 }
1143
1144 if (trace_seq_has_overflowed(s))
1145 goto out;
1146
1147 /* Strip ending newline */
1148 if (s->buffer[s->seq.len - 1] == '\n') {
1149 s->buffer[s->seq.len - 1] = '\0';
1150 s->seq.len--;
1151 }
1152
1153 trace_seq_puts(s, " */\n");
1154 out:
1155 return trace_handle_return(s);
1156}
1157
1158
1159enum print_line_t
1160print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1161{
1162 struct ftrace_graph_ent_entry *field;
1163 struct fgraph_data *data = iter->private;
1164 struct trace_entry *entry = iter->ent;
1165 struct trace_seq *s = &iter->seq;
1166 int cpu = iter->cpu;
1167 int ret;
1168
1169 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1170 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1171 return TRACE_TYPE_HANDLED;
1172 }
1173
1174 /*
1175 * If the last output failed, there's a possibility we need
1176 * to print out the missing entry which would never go out.
1177 */
1178 if (data && data->failed) {
1179 field = &data->ent;
1180 iter->cpu = data->cpu;
1181 ret = print_graph_entry(field, s, iter, flags);
1182 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1183 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1184 ret = TRACE_TYPE_NO_CONSUME;
1185 }
1186 iter->cpu = cpu;
1187 return ret;
1188 }
1189
1190 switch (entry->type) {
1191 case TRACE_GRAPH_ENT: {
1192 /*
1193 * print_graph_entry() may consume the current event,
1194 * thus @field may become invalid, so we need to save it.
1195 * sizeof(struct ftrace_graph_ent_entry) is very small,
1196 * it can be safely saved at the stack.
1197 */
1198 struct ftrace_graph_ent_entry saved;
1199 trace_assign_type(field, entry);
1200 saved = *field;
1201 return print_graph_entry(&saved, s, iter, flags);
1202 }
1203 case TRACE_GRAPH_RET: {
1204 struct ftrace_graph_ret_entry *field;
1205 trace_assign_type(field, entry);
1206 return print_graph_return(&field->ret, s, entry, iter, flags);
1207 }
1208 case TRACE_STACK:
1209 case TRACE_FN:
1210 /* dont trace stack and functions as comments */
1211 return TRACE_TYPE_UNHANDLED;
1212
1213 default:
1214 return print_graph_comment(s, entry, iter, flags);
1215 }
1216
1217 return TRACE_TYPE_HANDLED;
1218}
1219
1220static enum print_line_t
1221print_graph_function(struct trace_iterator *iter)
1222{
1223 return print_graph_function_flags(iter, tracer_flags.val);
1224}
1225
1226static enum print_line_t
1227print_graph_function_event(struct trace_iterator *iter, int flags,
1228 struct trace_event *event)
1229{
1230 return print_graph_function(iter);
1231}
1232
1233static void print_lat_header(struct seq_file *s, u32 flags)
1234{
1235 static const char spaces[] = " " /* 16 spaces */
1236 " " /* 4 spaces */
1237 " "; /* 17 spaces */
1238 int size = 0;
1239
1240 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1241 size += 16;
1242 if (flags & TRACE_GRAPH_PRINT_CPU)
1243 size += 4;
1244 if (flags & TRACE_GRAPH_PRINT_PROC)
1245 size += 17;
1246
1247 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1248 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1249 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1250 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1251 seq_printf(s, "#%.*s||| / \n", size, spaces);
1252}
1253
1254static void __print_graph_headers_flags(struct trace_array *tr,
1255 struct seq_file *s, u32 flags)
1256{
1257 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1258
1259 if (lat)
1260 print_lat_header(s, flags);
1261
1262 /* 1st line */
1263 seq_putc(s, '#');
1264 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1265 seq_puts(s, " TIME ");
1266 if (flags & TRACE_GRAPH_PRINT_CPU)
1267 seq_puts(s, " CPU");
1268 if (flags & TRACE_GRAPH_PRINT_PROC)
1269 seq_puts(s, " TASK/PID ");
1270 if (lat)
1271 seq_puts(s, "||||");
1272 if (flags & TRACE_GRAPH_PRINT_DURATION)
1273 seq_puts(s, " DURATION ");
1274 seq_puts(s, " FUNCTION CALLS\n");
1275
1276 /* 2nd line */
1277 seq_putc(s, '#');
1278 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1279 seq_puts(s, " | ");
1280 if (flags & TRACE_GRAPH_PRINT_CPU)
1281 seq_puts(s, " | ");
1282 if (flags & TRACE_GRAPH_PRINT_PROC)
1283 seq_puts(s, " | | ");
1284 if (lat)
1285 seq_puts(s, "||||");
1286 if (flags & TRACE_GRAPH_PRINT_DURATION)
1287 seq_puts(s, " | | ");
1288 seq_puts(s, " | | | |\n");
1289}
1290
1291static void print_graph_headers(struct seq_file *s)
1292{
1293 print_graph_headers_flags(s, tracer_flags.val);
1294}
1295
1296void print_graph_headers_flags(struct seq_file *s, u32 flags)
1297{
1298 struct trace_iterator *iter = s->private;
1299 struct trace_array *tr = iter->tr;
1300
1301 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1302 return;
1303
1304 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1305 /* print nothing if the buffers are empty */
1306 if (trace_empty(iter))
1307 return;
1308
1309 print_trace_header(s, iter);
1310 }
1311
1312 __print_graph_headers_flags(tr, s, flags);
1313}
1314
1315void graph_trace_open(struct trace_iterator *iter)
1316{
1317 /* pid and depth on the last trace processed */
1318 struct fgraph_data *data;
1319 gfp_t gfpflags;
1320 int cpu;
1321
1322 iter->private = NULL;
1323
1324 /* We can be called in atomic context via ftrace_dump() */
1325 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1326
1327 data = kzalloc(sizeof(*data), gfpflags);
1328 if (!data)
1329 goto out_err;
1330
1331 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1332 if (!data->cpu_data)
1333 goto out_err_free;
1334
1335 for_each_possible_cpu(cpu) {
1336 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1337 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1338 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1339 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1340
1341 *pid = -1;
1342 *depth = 0;
1343 *ignore = 0;
1344 *depth_irq = -1;
1345 }
1346
1347 iter->private = data;
1348
1349 return;
1350
1351 out_err_free:
1352 kfree(data);
1353 out_err:
1354 pr_warn("function graph tracer: not enough memory\n");
1355}
1356
1357void graph_trace_close(struct trace_iterator *iter)
1358{
1359 struct fgraph_data *data = iter->private;
1360
1361 if (data) {
1362 free_percpu(data->cpu_data);
1363 kfree(data);
1364 }
1365}
1366
1367static int
1368func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1369{
1370 if (bit == TRACE_GRAPH_PRINT_IRQS)
1371 ftrace_graph_skip_irqs = !set;
1372
1373 if (bit == TRACE_GRAPH_SLEEP_TIME)
1374 ftrace_graph_sleep_time_control(set);
1375
1376 if (bit == TRACE_GRAPH_GRAPH_TIME)
1377 ftrace_graph_graph_time_control(set);
1378
1379 return 0;
1380}
1381
1382static struct trace_event_functions graph_functions = {
1383 .trace = print_graph_function_event,
1384};
1385
1386static struct trace_event graph_trace_entry_event = {
1387 .type = TRACE_GRAPH_ENT,
1388 .funcs = &graph_functions,
1389};
1390
1391static struct trace_event graph_trace_ret_event = {
1392 .type = TRACE_GRAPH_RET,
1393 .funcs = &graph_functions
1394};
1395
1396static struct tracer graph_trace __tracer_data = {
1397 .name = "function_graph",
1398 .update_thresh = graph_trace_update_thresh,
1399 .open = graph_trace_open,
1400 .pipe_open = graph_trace_open,
1401 .close = graph_trace_close,
1402 .pipe_close = graph_trace_close,
1403 .init = graph_trace_init,
1404 .reset = graph_trace_reset,
1405 .print_line = print_graph_function,
1406 .print_header = print_graph_headers,
1407 .flags = &tracer_flags,
1408 .set_flag = func_graph_set_flag,
1409#ifdef CONFIG_FTRACE_SELFTEST
1410 .selftest = trace_selftest_startup_function_graph,
1411#endif
1412};
1413
1414
1415static ssize_t
1416graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1417 loff_t *ppos)
1418{
1419 unsigned long val;
1420 int ret;
1421
1422 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1423 if (ret)
1424 return ret;
1425
1426 max_depth = val;
1427
1428 *ppos += cnt;
1429
1430 return cnt;
1431}
1432
1433static ssize_t
1434graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1435 loff_t *ppos)
1436{
1437 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1438 int n;
1439
1440 n = sprintf(buf, "%d\n", max_depth);
1441
1442 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1443}
1444
1445static const struct file_operations graph_depth_fops = {
1446 .open = tracing_open_generic,
1447 .write = graph_depth_write,
1448 .read = graph_depth_read,
1449 .llseek = generic_file_llseek,
1450};
1451
1452static __init int init_graph_tracefs(void)
1453{
1454 struct dentry *d_tracer;
1455
1456 d_tracer = tracing_init_dentry();
1457 if (IS_ERR(d_tracer))
1458 return 0;
1459
1460 trace_create_file("max_graph_depth", 0644, d_tracer,
1461 NULL, &graph_depth_fops);
1462
1463 return 0;
1464}
1465fs_initcall(init_graph_tracefs);
1466
1467static __init int init_graph_trace(void)
1468{
1469 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1470
1471 if (!register_trace_event(&graph_trace_entry_event)) {
1472 pr_warn("Warning: could not register graph trace events\n");
1473 return 1;
1474 }
1475
1476 if (!register_trace_event(&graph_trace_ret_event)) {
1477 pr_warn("Warning: could not register graph trace events\n");
1478 return 1;
1479 }
1480
1481 return register_tracer(&graph_trace);
1482}
1483
1484core_initcall(init_graph_trace);
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14
15#include "trace.h"
16#include "trace_output.h"
17
18/* When set, irq functions will be ignored */
19static int ftrace_graph_skip_irqs;
20
21struct fgraph_cpu_data {
22 pid_t last_pid;
23 int depth;
24 int depth_irq;
25 int ignore;
26 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
27};
28
29struct fgraph_data {
30 struct fgraph_cpu_data __percpu *cpu_data;
31
32 /* Place to preserve last processed entry. */
33 struct ftrace_graph_ent_entry ent;
34 struct ftrace_graph_ret_entry ret;
35 int failed;
36 int cpu;
37};
38
39#define TRACE_GRAPH_INDENT 2
40
41/* Flag options */
42#define TRACE_GRAPH_PRINT_OVERRUN 0x1
43#define TRACE_GRAPH_PRINT_CPU 0x2
44#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
45#define TRACE_GRAPH_PRINT_PROC 0x8
46#define TRACE_GRAPH_PRINT_DURATION 0x10
47#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
48#define TRACE_GRAPH_PRINT_IRQS 0x40
49
50static struct tracer_opt trace_opts[] = {
51 /* Display overruns? (for self-debug purpose) */
52 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
53 /* Display CPU ? */
54 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
55 /* Display Overhead ? */
56 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
57 /* Display proc name/pid */
58 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
59 /* Display duration of execution */
60 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
61 /* Display absolute time of an entry */
62 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
63 /* Display interrupts */
64 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
65 { } /* Empty entry */
66};
67
68static struct tracer_flags tracer_flags = {
69 /* Don't display overruns and proc by default */
70 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
71 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
72 .opts = trace_opts
73};
74
75static struct trace_array *graph_array;
76
77/*
78 * DURATION column is being also used to display IRQ signs,
79 * following values are used by print_graph_irq and others
80 * to fill in space into DURATION column.
81 */
82enum {
83 DURATION_FILL_FULL = -1,
84 DURATION_FILL_START = -2,
85 DURATION_FILL_END = -3,
86};
87
88static enum print_line_t
89print_graph_duration(unsigned long long duration, struct trace_seq *s,
90 u32 flags);
91
92/* Add a function return address to the trace stack on thread info.*/
93int
94ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
95 unsigned long frame_pointer)
96{
97 unsigned long long calltime;
98 int index;
99
100 if (!current->ret_stack)
101 return -EBUSY;
102
103 /*
104 * We must make sure the ret_stack is tested before we read
105 * anything else.
106 */
107 smp_rmb();
108
109 /* The return trace stack is full */
110 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
111 atomic_inc(¤t->trace_overrun);
112 return -EBUSY;
113 }
114
115 calltime = trace_clock_local();
116
117 index = ++current->curr_ret_stack;
118 barrier();
119 current->ret_stack[index].ret = ret;
120 current->ret_stack[index].func = func;
121 current->ret_stack[index].calltime = calltime;
122 current->ret_stack[index].subtime = 0;
123 current->ret_stack[index].fp = frame_pointer;
124 *depth = index;
125
126 return 0;
127}
128
129/* Retrieve a function return address to the trace stack on thread info.*/
130static void
131ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
132 unsigned long frame_pointer)
133{
134 int index;
135
136 index = current->curr_ret_stack;
137
138 if (unlikely(index < 0)) {
139 ftrace_graph_stop();
140 WARN_ON(1);
141 /* Might as well panic, otherwise we have no where to go */
142 *ret = (unsigned long)panic;
143 return;
144 }
145
146#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
147 /*
148 * The arch may choose to record the frame pointer used
149 * and check it here to make sure that it is what we expect it
150 * to be. If gcc does not set the place holder of the return
151 * address in the frame pointer, and does a copy instead, then
152 * the function graph trace will fail. This test detects this
153 * case.
154 *
155 * Currently, x86_32 with optimize for size (-Os) makes the latest
156 * gcc do the above.
157 */
158 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
159 ftrace_graph_stop();
160 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
161 " from func %ps return to %lx\n",
162 current->ret_stack[index].fp,
163 frame_pointer,
164 (void *)current->ret_stack[index].func,
165 current->ret_stack[index].ret);
166 *ret = (unsigned long)panic;
167 return;
168 }
169#endif
170
171 *ret = current->ret_stack[index].ret;
172 trace->func = current->ret_stack[index].func;
173 trace->calltime = current->ret_stack[index].calltime;
174 trace->overrun = atomic_read(¤t->trace_overrun);
175 trace->depth = index;
176}
177
178/*
179 * Send the trace to the ring-buffer.
180 * @return the original return address.
181 */
182unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
183{
184 struct ftrace_graph_ret trace;
185 unsigned long ret;
186
187 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
188 trace.rettime = trace_clock_local();
189 ftrace_graph_return(&trace);
190 barrier();
191 current->curr_ret_stack--;
192
193 if (unlikely(!ret)) {
194 ftrace_graph_stop();
195 WARN_ON(1);
196 /* Might as well panic. What else to do? */
197 ret = (unsigned long)panic;
198 }
199
200 return ret;
201}
202
203int __trace_graph_entry(struct trace_array *tr,
204 struct ftrace_graph_ent *trace,
205 unsigned long flags,
206 int pc)
207{
208 struct ftrace_event_call *call = &event_funcgraph_entry;
209 struct ring_buffer_event *event;
210 struct ring_buffer *buffer = tr->buffer;
211 struct ftrace_graph_ent_entry *entry;
212
213 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
214 return 0;
215
216 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
217 sizeof(*entry), flags, pc);
218 if (!event)
219 return 0;
220 entry = ring_buffer_event_data(event);
221 entry->graph_ent = *trace;
222 if (!filter_current_check_discard(buffer, call, entry, event))
223 ring_buffer_unlock_commit(buffer, event);
224
225 return 1;
226}
227
228static inline int ftrace_graph_ignore_irqs(void)
229{
230 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
231 return 0;
232
233 return in_irq();
234}
235
236int trace_graph_entry(struct ftrace_graph_ent *trace)
237{
238 struct trace_array *tr = graph_array;
239 struct trace_array_cpu *data;
240 unsigned long flags;
241 long disabled;
242 int ret;
243 int cpu;
244 int pc;
245
246 if (!ftrace_trace_task(current))
247 return 0;
248
249 /* trace it when it is-nested-in or is a function enabled. */
250 if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
251 ftrace_graph_ignore_irqs())
252 return 0;
253
254 local_irq_save(flags);
255 cpu = raw_smp_processor_id();
256 data = tr->data[cpu];
257 disabled = atomic_inc_return(&data->disabled);
258 if (likely(disabled == 1)) {
259 pc = preempt_count();
260 ret = __trace_graph_entry(tr, trace, flags, pc);
261 } else {
262 ret = 0;
263 }
264
265 atomic_dec(&data->disabled);
266 local_irq_restore(flags);
267
268 return ret;
269}
270
271int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
272{
273 if (tracing_thresh)
274 return 1;
275 else
276 return trace_graph_entry(trace);
277}
278
279static void
280__trace_graph_function(struct trace_array *tr,
281 unsigned long ip, unsigned long flags, int pc)
282{
283 u64 time = trace_clock_local();
284 struct ftrace_graph_ent ent = {
285 .func = ip,
286 .depth = 0,
287 };
288 struct ftrace_graph_ret ret = {
289 .func = ip,
290 .depth = 0,
291 .calltime = time,
292 .rettime = time,
293 };
294
295 __trace_graph_entry(tr, &ent, flags, pc);
296 __trace_graph_return(tr, &ret, flags, pc);
297}
298
299void
300trace_graph_function(struct trace_array *tr,
301 unsigned long ip, unsigned long parent_ip,
302 unsigned long flags, int pc)
303{
304 __trace_graph_function(tr, ip, flags, pc);
305}
306
307void __trace_graph_return(struct trace_array *tr,
308 struct ftrace_graph_ret *trace,
309 unsigned long flags,
310 int pc)
311{
312 struct ftrace_event_call *call = &event_funcgraph_exit;
313 struct ring_buffer_event *event;
314 struct ring_buffer *buffer = tr->buffer;
315 struct ftrace_graph_ret_entry *entry;
316
317 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
318 return;
319
320 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
321 sizeof(*entry), flags, pc);
322 if (!event)
323 return;
324 entry = ring_buffer_event_data(event);
325 entry->ret = *trace;
326 if (!filter_current_check_discard(buffer, call, entry, event))
327 ring_buffer_unlock_commit(buffer, event);
328}
329
330void trace_graph_return(struct ftrace_graph_ret *trace)
331{
332 struct trace_array *tr = graph_array;
333 struct trace_array_cpu *data;
334 unsigned long flags;
335 long disabled;
336 int cpu;
337 int pc;
338
339 local_irq_save(flags);
340 cpu = raw_smp_processor_id();
341 data = tr->data[cpu];
342 disabled = atomic_inc_return(&data->disabled);
343 if (likely(disabled == 1)) {
344 pc = preempt_count();
345 __trace_graph_return(tr, trace, flags, pc);
346 }
347 atomic_dec(&data->disabled);
348 local_irq_restore(flags);
349}
350
351void set_graph_array(struct trace_array *tr)
352{
353 graph_array = tr;
354
355 /* Make graph_array visible before we start tracing */
356
357 smp_mb();
358}
359
360void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
361{
362 if (tracing_thresh &&
363 (trace->rettime - trace->calltime < tracing_thresh))
364 return;
365 else
366 trace_graph_return(trace);
367}
368
369static int graph_trace_init(struct trace_array *tr)
370{
371 int ret;
372
373 set_graph_array(tr);
374 if (tracing_thresh)
375 ret = register_ftrace_graph(&trace_graph_thresh_return,
376 &trace_graph_thresh_entry);
377 else
378 ret = register_ftrace_graph(&trace_graph_return,
379 &trace_graph_entry);
380 if (ret)
381 return ret;
382 tracing_start_cmdline_record();
383
384 return 0;
385}
386
387static void graph_trace_reset(struct trace_array *tr)
388{
389 tracing_stop_cmdline_record();
390 unregister_ftrace_graph();
391}
392
393static int max_bytes_for_cpu;
394
395static enum print_line_t
396print_graph_cpu(struct trace_seq *s, int cpu)
397{
398 int ret;
399
400 /*
401 * Start with a space character - to make it stand out
402 * to the right a bit when trace output is pasted into
403 * email:
404 */
405 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
406 if (!ret)
407 return TRACE_TYPE_PARTIAL_LINE;
408
409 return TRACE_TYPE_HANDLED;
410}
411
412#define TRACE_GRAPH_PROCINFO_LENGTH 14
413
414static enum print_line_t
415print_graph_proc(struct trace_seq *s, pid_t pid)
416{
417 char comm[TASK_COMM_LEN];
418 /* sign + log10(MAX_INT) + '\0' */
419 char pid_str[11];
420 int spaces = 0;
421 int ret;
422 int len;
423 int i;
424
425 trace_find_cmdline(pid, comm);
426 comm[7] = '\0';
427 sprintf(pid_str, "%d", pid);
428
429 /* 1 stands for the "-" character */
430 len = strlen(comm) + strlen(pid_str) + 1;
431
432 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
433 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
434
435 /* First spaces to align center */
436 for (i = 0; i < spaces / 2; i++) {
437 ret = trace_seq_printf(s, " ");
438 if (!ret)
439 return TRACE_TYPE_PARTIAL_LINE;
440 }
441
442 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
443 if (!ret)
444 return TRACE_TYPE_PARTIAL_LINE;
445
446 /* Last spaces to align center */
447 for (i = 0; i < spaces - (spaces / 2); i++) {
448 ret = trace_seq_printf(s, " ");
449 if (!ret)
450 return TRACE_TYPE_PARTIAL_LINE;
451 }
452 return TRACE_TYPE_HANDLED;
453}
454
455
456static enum print_line_t
457print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
458{
459 if (!trace_seq_putc(s, ' '))
460 return 0;
461
462 return trace_print_lat_fmt(s, entry);
463}
464
465/* If the pid changed since the last trace, output this event */
466static enum print_line_t
467verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
468{
469 pid_t prev_pid;
470 pid_t *last_pid;
471 int ret;
472
473 if (!data)
474 return TRACE_TYPE_HANDLED;
475
476 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
477
478 if (*last_pid == pid)
479 return TRACE_TYPE_HANDLED;
480
481 prev_pid = *last_pid;
482 *last_pid = pid;
483
484 if (prev_pid == -1)
485 return TRACE_TYPE_HANDLED;
486/*
487 * Context-switch trace line:
488
489 ------------------------------------------
490 | 1) migration/0--1 => sshd-1755
491 ------------------------------------------
492
493 */
494 ret = trace_seq_printf(s,
495 " ------------------------------------------\n");
496 if (!ret)
497 return TRACE_TYPE_PARTIAL_LINE;
498
499 ret = print_graph_cpu(s, cpu);
500 if (ret == TRACE_TYPE_PARTIAL_LINE)
501 return TRACE_TYPE_PARTIAL_LINE;
502
503 ret = print_graph_proc(s, prev_pid);
504 if (ret == TRACE_TYPE_PARTIAL_LINE)
505 return TRACE_TYPE_PARTIAL_LINE;
506
507 ret = trace_seq_printf(s, " => ");
508 if (!ret)
509 return TRACE_TYPE_PARTIAL_LINE;
510
511 ret = print_graph_proc(s, pid);
512 if (ret == TRACE_TYPE_PARTIAL_LINE)
513 return TRACE_TYPE_PARTIAL_LINE;
514
515 ret = trace_seq_printf(s,
516 "\n ------------------------------------------\n\n");
517 if (!ret)
518 return TRACE_TYPE_PARTIAL_LINE;
519
520 return TRACE_TYPE_HANDLED;
521}
522
523static struct ftrace_graph_ret_entry *
524get_return_for_leaf(struct trace_iterator *iter,
525 struct ftrace_graph_ent_entry *curr)
526{
527 struct fgraph_data *data = iter->private;
528 struct ring_buffer_iter *ring_iter = NULL;
529 struct ring_buffer_event *event;
530 struct ftrace_graph_ret_entry *next;
531
532 /*
533 * If the previous output failed to write to the seq buffer,
534 * then we just reuse the data from before.
535 */
536 if (data && data->failed) {
537 curr = &data->ent;
538 next = &data->ret;
539 } else {
540
541 ring_iter = iter->buffer_iter[iter->cpu];
542
543 /* First peek to compare current entry and the next one */
544 if (ring_iter)
545 event = ring_buffer_iter_peek(ring_iter, NULL);
546 else {
547 /*
548 * We need to consume the current entry to see
549 * the next one.
550 */
551 ring_buffer_consume(iter->tr->buffer, iter->cpu,
552 NULL, NULL);
553 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
554 NULL, NULL);
555 }
556
557 if (!event)
558 return NULL;
559
560 next = ring_buffer_event_data(event);
561
562 if (data) {
563 /*
564 * Save current and next entries for later reference
565 * if the output fails.
566 */
567 data->ent = *curr;
568 /*
569 * If the next event is not a return type, then
570 * we only care about what type it is. Otherwise we can
571 * safely copy the entire event.
572 */
573 if (next->ent.type == TRACE_GRAPH_RET)
574 data->ret = *next;
575 else
576 data->ret.ent.type = next->ent.type;
577 }
578 }
579
580 if (next->ent.type != TRACE_GRAPH_RET)
581 return NULL;
582
583 if (curr->ent.pid != next->ent.pid ||
584 curr->graph_ent.func != next->ret.func)
585 return NULL;
586
587 /* this is a leaf, now advance the iterator */
588 if (ring_iter)
589 ring_buffer_read(ring_iter, NULL);
590
591 return next;
592}
593
594static int print_graph_abs_time(u64 t, struct trace_seq *s)
595{
596 unsigned long usecs_rem;
597
598 usecs_rem = do_div(t, NSEC_PER_SEC);
599 usecs_rem /= 1000;
600
601 return trace_seq_printf(s, "%5lu.%06lu | ",
602 (unsigned long)t, usecs_rem);
603}
604
605static enum print_line_t
606print_graph_irq(struct trace_iterator *iter, unsigned long addr,
607 enum trace_type type, int cpu, pid_t pid, u32 flags)
608{
609 int ret;
610 struct trace_seq *s = &iter->seq;
611
612 if (addr < (unsigned long)__irqentry_text_start ||
613 addr >= (unsigned long)__irqentry_text_end)
614 return TRACE_TYPE_UNHANDLED;
615
616 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
617 /* Absolute time */
618 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
619 ret = print_graph_abs_time(iter->ts, s);
620 if (!ret)
621 return TRACE_TYPE_PARTIAL_LINE;
622 }
623
624 /* Cpu */
625 if (flags & TRACE_GRAPH_PRINT_CPU) {
626 ret = print_graph_cpu(s, cpu);
627 if (ret == TRACE_TYPE_PARTIAL_LINE)
628 return TRACE_TYPE_PARTIAL_LINE;
629 }
630
631 /* Proc */
632 if (flags & TRACE_GRAPH_PRINT_PROC) {
633 ret = print_graph_proc(s, pid);
634 if (ret == TRACE_TYPE_PARTIAL_LINE)
635 return TRACE_TYPE_PARTIAL_LINE;
636 ret = trace_seq_printf(s, " | ");
637 if (!ret)
638 return TRACE_TYPE_PARTIAL_LINE;
639 }
640 }
641
642 /* No overhead */
643 ret = print_graph_duration(DURATION_FILL_START, s, flags);
644 if (ret != TRACE_TYPE_HANDLED)
645 return ret;
646
647 if (type == TRACE_GRAPH_ENT)
648 ret = trace_seq_printf(s, "==========>");
649 else
650 ret = trace_seq_printf(s, "<==========");
651
652 if (!ret)
653 return TRACE_TYPE_PARTIAL_LINE;
654
655 ret = print_graph_duration(DURATION_FILL_END, s, flags);
656 if (ret != TRACE_TYPE_HANDLED)
657 return ret;
658
659 ret = trace_seq_printf(s, "\n");
660
661 if (!ret)
662 return TRACE_TYPE_PARTIAL_LINE;
663 return TRACE_TYPE_HANDLED;
664}
665
666enum print_line_t
667trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
668{
669 unsigned long nsecs_rem = do_div(duration, 1000);
670 /* log10(ULONG_MAX) + '\0' */
671 char msecs_str[21];
672 char nsecs_str[5];
673 int ret, len;
674 int i;
675
676 sprintf(msecs_str, "%lu", (unsigned long) duration);
677
678 /* Print msecs */
679 ret = trace_seq_printf(s, "%s", msecs_str);
680 if (!ret)
681 return TRACE_TYPE_PARTIAL_LINE;
682
683 len = strlen(msecs_str);
684
685 /* Print nsecs (we don't want to exceed 7 numbers) */
686 if (len < 7) {
687 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
688
689 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
690 ret = trace_seq_printf(s, ".%s", nsecs_str);
691 if (!ret)
692 return TRACE_TYPE_PARTIAL_LINE;
693 len += strlen(nsecs_str);
694 }
695
696 ret = trace_seq_printf(s, " us ");
697 if (!ret)
698 return TRACE_TYPE_PARTIAL_LINE;
699
700 /* Print remaining spaces to fit the row's width */
701 for (i = len; i < 7; i++) {
702 ret = trace_seq_printf(s, " ");
703 if (!ret)
704 return TRACE_TYPE_PARTIAL_LINE;
705 }
706 return TRACE_TYPE_HANDLED;
707}
708
709static enum print_line_t
710print_graph_duration(unsigned long long duration, struct trace_seq *s,
711 u32 flags)
712{
713 int ret = -1;
714
715 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
716 !(trace_flags & TRACE_ITER_CONTEXT_INFO))
717 return TRACE_TYPE_HANDLED;
718
719 /* No real adata, just filling the column with spaces */
720 switch (duration) {
721 case DURATION_FILL_FULL:
722 ret = trace_seq_printf(s, " | ");
723 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
724 case DURATION_FILL_START:
725 ret = trace_seq_printf(s, " ");
726 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
727 case DURATION_FILL_END:
728 ret = trace_seq_printf(s, " |");
729 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
730 }
731
732 /* Signal a overhead of time execution to the output */
733 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
734 /* Duration exceeded 100 msecs */
735 if (duration > 100000ULL)
736 ret = trace_seq_printf(s, "! ");
737 /* Duration exceeded 10 msecs */
738 else if (duration > 10000ULL)
739 ret = trace_seq_printf(s, "+ ");
740 }
741
742 /*
743 * The -1 means we either did not exceed the duration tresholds
744 * or we dont want to print out the overhead. Either way we need
745 * to fill out the space.
746 */
747 if (ret == -1)
748 ret = trace_seq_printf(s, " ");
749
750 /* Catching here any failure happenned above */
751 if (!ret)
752 return TRACE_TYPE_PARTIAL_LINE;
753
754 ret = trace_print_graph_duration(duration, s);
755 if (ret != TRACE_TYPE_HANDLED)
756 return ret;
757
758 ret = trace_seq_printf(s, "| ");
759 if (!ret)
760 return TRACE_TYPE_PARTIAL_LINE;
761
762 return TRACE_TYPE_HANDLED;
763}
764
765/* Case of a leaf function on its call entry */
766static enum print_line_t
767print_graph_entry_leaf(struct trace_iterator *iter,
768 struct ftrace_graph_ent_entry *entry,
769 struct ftrace_graph_ret_entry *ret_entry,
770 struct trace_seq *s, u32 flags)
771{
772 struct fgraph_data *data = iter->private;
773 struct ftrace_graph_ret *graph_ret;
774 struct ftrace_graph_ent *call;
775 unsigned long long duration;
776 int ret;
777 int i;
778
779 graph_ret = &ret_entry->ret;
780 call = &entry->graph_ent;
781 duration = graph_ret->rettime - graph_ret->calltime;
782
783 if (data) {
784 struct fgraph_cpu_data *cpu_data;
785 int cpu = iter->cpu;
786
787 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
788
789 /*
790 * Comments display at + 1 to depth. Since
791 * this is a leaf function, keep the comments
792 * equal to this depth.
793 */
794 cpu_data->depth = call->depth - 1;
795
796 /* No need to keep this function around for this depth */
797 if (call->depth < FTRACE_RETFUNC_DEPTH)
798 cpu_data->enter_funcs[call->depth] = 0;
799 }
800
801 /* Overhead and duration */
802 ret = print_graph_duration(duration, s, flags);
803 if (ret == TRACE_TYPE_PARTIAL_LINE)
804 return TRACE_TYPE_PARTIAL_LINE;
805
806 /* Function */
807 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
808 ret = trace_seq_printf(s, " ");
809 if (!ret)
810 return TRACE_TYPE_PARTIAL_LINE;
811 }
812
813 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
814 if (!ret)
815 return TRACE_TYPE_PARTIAL_LINE;
816
817 return TRACE_TYPE_HANDLED;
818}
819
820static enum print_line_t
821print_graph_entry_nested(struct trace_iterator *iter,
822 struct ftrace_graph_ent_entry *entry,
823 struct trace_seq *s, int cpu, u32 flags)
824{
825 struct ftrace_graph_ent *call = &entry->graph_ent;
826 struct fgraph_data *data = iter->private;
827 int ret;
828 int i;
829
830 if (data) {
831 struct fgraph_cpu_data *cpu_data;
832 int cpu = iter->cpu;
833
834 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
835 cpu_data->depth = call->depth;
836
837 /* Save this function pointer to see if the exit matches */
838 if (call->depth < FTRACE_RETFUNC_DEPTH)
839 cpu_data->enter_funcs[call->depth] = call->func;
840 }
841
842 /* No time */
843 ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
844 if (ret != TRACE_TYPE_HANDLED)
845 return ret;
846
847 /* Function */
848 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
849 ret = trace_seq_printf(s, " ");
850 if (!ret)
851 return TRACE_TYPE_PARTIAL_LINE;
852 }
853
854 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
855 if (!ret)
856 return TRACE_TYPE_PARTIAL_LINE;
857
858 /*
859 * we already consumed the current entry to check the next one
860 * and see if this is a leaf.
861 */
862 return TRACE_TYPE_NO_CONSUME;
863}
864
865static enum print_line_t
866print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
867 int type, unsigned long addr, u32 flags)
868{
869 struct fgraph_data *data = iter->private;
870 struct trace_entry *ent = iter->ent;
871 int cpu = iter->cpu;
872 int ret;
873
874 /* Pid */
875 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
876 return TRACE_TYPE_PARTIAL_LINE;
877
878 if (type) {
879 /* Interrupt */
880 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
881 if (ret == TRACE_TYPE_PARTIAL_LINE)
882 return TRACE_TYPE_PARTIAL_LINE;
883 }
884
885 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
886 return 0;
887
888 /* Absolute time */
889 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
890 ret = print_graph_abs_time(iter->ts, s);
891 if (!ret)
892 return TRACE_TYPE_PARTIAL_LINE;
893 }
894
895 /* Cpu */
896 if (flags & TRACE_GRAPH_PRINT_CPU) {
897 ret = print_graph_cpu(s, cpu);
898 if (ret == TRACE_TYPE_PARTIAL_LINE)
899 return TRACE_TYPE_PARTIAL_LINE;
900 }
901
902 /* Proc */
903 if (flags & TRACE_GRAPH_PRINT_PROC) {
904 ret = print_graph_proc(s, ent->pid);
905 if (ret == TRACE_TYPE_PARTIAL_LINE)
906 return TRACE_TYPE_PARTIAL_LINE;
907
908 ret = trace_seq_printf(s, " | ");
909 if (!ret)
910 return TRACE_TYPE_PARTIAL_LINE;
911 }
912
913 /* Latency format */
914 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
915 ret = print_graph_lat_fmt(s, ent);
916 if (ret == TRACE_TYPE_PARTIAL_LINE)
917 return TRACE_TYPE_PARTIAL_LINE;
918 }
919
920 return 0;
921}
922
923/*
924 * Entry check for irq code
925 *
926 * returns 1 if
927 * - we are inside irq code
928 * - we just entered irq code
929 *
930 * retunns 0 if
931 * - funcgraph-interrupts option is set
932 * - we are not inside irq code
933 */
934static int
935check_irq_entry(struct trace_iterator *iter, u32 flags,
936 unsigned long addr, int depth)
937{
938 int cpu = iter->cpu;
939 int *depth_irq;
940 struct fgraph_data *data = iter->private;
941
942 /*
943 * If we are either displaying irqs, or we got called as
944 * a graph event and private data does not exist,
945 * then we bypass the irq check.
946 */
947 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
948 (!data))
949 return 0;
950
951 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
952
953 /*
954 * We are inside the irq code
955 */
956 if (*depth_irq >= 0)
957 return 1;
958
959 if ((addr < (unsigned long)__irqentry_text_start) ||
960 (addr >= (unsigned long)__irqentry_text_end))
961 return 0;
962
963 /*
964 * We are entering irq code.
965 */
966 *depth_irq = depth;
967 return 1;
968}
969
970/*
971 * Return check for irq code
972 *
973 * returns 1 if
974 * - we are inside irq code
975 * - we just left irq code
976 *
977 * returns 0 if
978 * - funcgraph-interrupts option is set
979 * - we are not inside irq code
980 */
981static int
982check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
983{
984 int cpu = iter->cpu;
985 int *depth_irq;
986 struct fgraph_data *data = iter->private;
987
988 /*
989 * If we are either displaying irqs, or we got called as
990 * a graph event and private data does not exist,
991 * then we bypass the irq check.
992 */
993 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
994 (!data))
995 return 0;
996
997 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
998
999 /*
1000 * We are not inside the irq code.
1001 */
1002 if (*depth_irq == -1)
1003 return 0;
1004
1005 /*
1006 * We are inside the irq code, and this is returning entry.
1007 * Let's not trace it and clear the entry depth, since
1008 * we are out of irq code.
1009 *
1010 * This condition ensures that we 'leave the irq code' once
1011 * we are out of the entry depth. Thus protecting us from
1012 * the RETURN entry loss.
1013 */
1014 if (*depth_irq >= depth) {
1015 *depth_irq = -1;
1016 return 1;
1017 }
1018
1019 /*
1020 * We are inside the irq code, and this is not the entry.
1021 */
1022 return 1;
1023}
1024
1025static enum print_line_t
1026print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1027 struct trace_iterator *iter, u32 flags)
1028{
1029 struct fgraph_data *data = iter->private;
1030 struct ftrace_graph_ent *call = &field->graph_ent;
1031 struct ftrace_graph_ret_entry *leaf_ret;
1032 static enum print_line_t ret;
1033 int cpu = iter->cpu;
1034
1035 if (check_irq_entry(iter, flags, call->func, call->depth))
1036 return TRACE_TYPE_HANDLED;
1037
1038 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1039 return TRACE_TYPE_PARTIAL_LINE;
1040
1041 leaf_ret = get_return_for_leaf(iter, field);
1042 if (leaf_ret)
1043 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1044 else
1045 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1046
1047 if (data) {
1048 /*
1049 * If we failed to write our output, then we need to make
1050 * note of it. Because we already consumed our entry.
1051 */
1052 if (s->full) {
1053 data->failed = 1;
1054 data->cpu = cpu;
1055 } else
1056 data->failed = 0;
1057 }
1058
1059 return ret;
1060}
1061
1062static enum print_line_t
1063print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1064 struct trace_entry *ent, struct trace_iterator *iter,
1065 u32 flags)
1066{
1067 unsigned long long duration = trace->rettime - trace->calltime;
1068 struct fgraph_data *data = iter->private;
1069 pid_t pid = ent->pid;
1070 int cpu = iter->cpu;
1071 int func_match = 1;
1072 int ret;
1073 int i;
1074
1075 if (check_irq_return(iter, flags, trace->depth))
1076 return TRACE_TYPE_HANDLED;
1077
1078 if (data) {
1079 struct fgraph_cpu_data *cpu_data;
1080 int cpu = iter->cpu;
1081
1082 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1083
1084 /*
1085 * Comments display at + 1 to depth. This is the
1086 * return from a function, we now want the comments
1087 * to display at the same level of the bracket.
1088 */
1089 cpu_data->depth = trace->depth - 1;
1090
1091 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1092 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1093 func_match = 0;
1094 cpu_data->enter_funcs[trace->depth] = 0;
1095 }
1096 }
1097
1098 if (print_graph_prologue(iter, s, 0, 0, flags))
1099 return TRACE_TYPE_PARTIAL_LINE;
1100
1101 /* Overhead and duration */
1102 ret = print_graph_duration(duration, s, flags);
1103 if (ret == TRACE_TYPE_PARTIAL_LINE)
1104 return TRACE_TYPE_PARTIAL_LINE;
1105
1106 /* Closing brace */
1107 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1108 ret = trace_seq_printf(s, " ");
1109 if (!ret)
1110 return TRACE_TYPE_PARTIAL_LINE;
1111 }
1112
1113 /*
1114 * If the return function does not have a matching entry,
1115 * then the entry was lost. Instead of just printing
1116 * the '}' and letting the user guess what function this
1117 * belongs to, write out the function name.
1118 */
1119 if (func_match) {
1120 ret = trace_seq_printf(s, "}\n");
1121 if (!ret)
1122 return TRACE_TYPE_PARTIAL_LINE;
1123 } else {
1124 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1125 if (!ret)
1126 return TRACE_TYPE_PARTIAL_LINE;
1127 }
1128
1129 /* Overrun */
1130 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1131 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1132 trace->overrun);
1133 if (!ret)
1134 return TRACE_TYPE_PARTIAL_LINE;
1135 }
1136
1137 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1138 cpu, pid, flags);
1139 if (ret == TRACE_TYPE_PARTIAL_LINE)
1140 return TRACE_TYPE_PARTIAL_LINE;
1141
1142 return TRACE_TYPE_HANDLED;
1143}
1144
1145static enum print_line_t
1146print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1147 struct trace_iterator *iter, u32 flags)
1148{
1149 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1150 struct fgraph_data *data = iter->private;
1151 struct trace_event *event;
1152 int depth = 0;
1153 int ret;
1154 int i;
1155
1156 if (data)
1157 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1158
1159 if (print_graph_prologue(iter, s, 0, 0, flags))
1160 return TRACE_TYPE_PARTIAL_LINE;
1161
1162 /* No time */
1163 ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
1164 if (ret != TRACE_TYPE_HANDLED)
1165 return ret;
1166
1167 /* Indentation */
1168 if (depth > 0)
1169 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1170 ret = trace_seq_printf(s, " ");
1171 if (!ret)
1172 return TRACE_TYPE_PARTIAL_LINE;
1173 }
1174
1175 /* The comment */
1176 ret = trace_seq_printf(s, "/* ");
1177 if (!ret)
1178 return TRACE_TYPE_PARTIAL_LINE;
1179
1180 switch (iter->ent->type) {
1181 case TRACE_BPRINT:
1182 ret = trace_print_bprintk_msg_only(iter);
1183 if (ret != TRACE_TYPE_HANDLED)
1184 return ret;
1185 break;
1186 case TRACE_PRINT:
1187 ret = trace_print_printk_msg_only(iter);
1188 if (ret != TRACE_TYPE_HANDLED)
1189 return ret;
1190 break;
1191 default:
1192 event = ftrace_find_event(ent->type);
1193 if (!event)
1194 return TRACE_TYPE_UNHANDLED;
1195
1196 ret = event->funcs->trace(iter, sym_flags, event);
1197 if (ret != TRACE_TYPE_HANDLED)
1198 return ret;
1199 }
1200
1201 /* Strip ending newline */
1202 if (s->buffer[s->len - 1] == '\n') {
1203 s->buffer[s->len - 1] = '\0';
1204 s->len--;
1205 }
1206
1207 ret = trace_seq_printf(s, " */\n");
1208 if (!ret)
1209 return TRACE_TYPE_PARTIAL_LINE;
1210
1211 return TRACE_TYPE_HANDLED;
1212}
1213
1214
1215enum print_line_t
1216print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1217{
1218 struct ftrace_graph_ent_entry *field;
1219 struct fgraph_data *data = iter->private;
1220 struct trace_entry *entry = iter->ent;
1221 struct trace_seq *s = &iter->seq;
1222 int cpu = iter->cpu;
1223 int ret;
1224
1225 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1226 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1227 return TRACE_TYPE_HANDLED;
1228 }
1229
1230 /*
1231 * If the last output failed, there's a possibility we need
1232 * to print out the missing entry which would never go out.
1233 */
1234 if (data && data->failed) {
1235 field = &data->ent;
1236 iter->cpu = data->cpu;
1237 ret = print_graph_entry(field, s, iter, flags);
1238 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1239 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1240 ret = TRACE_TYPE_NO_CONSUME;
1241 }
1242 iter->cpu = cpu;
1243 return ret;
1244 }
1245
1246 switch (entry->type) {
1247 case TRACE_GRAPH_ENT: {
1248 /*
1249 * print_graph_entry() may consume the current event,
1250 * thus @field may become invalid, so we need to save it.
1251 * sizeof(struct ftrace_graph_ent_entry) is very small,
1252 * it can be safely saved at the stack.
1253 */
1254 struct ftrace_graph_ent_entry saved;
1255 trace_assign_type(field, entry);
1256 saved = *field;
1257 return print_graph_entry(&saved, s, iter, flags);
1258 }
1259 case TRACE_GRAPH_RET: {
1260 struct ftrace_graph_ret_entry *field;
1261 trace_assign_type(field, entry);
1262 return print_graph_return(&field->ret, s, entry, iter, flags);
1263 }
1264 case TRACE_STACK:
1265 case TRACE_FN:
1266 /* dont trace stack and functions as comments */
1267 return TRACE_TYPE_UNHANDLED;
1268
1269 default:
1270 return print_graph_comment(s, entry, iter, flags);
1271 }
1272
1273 return TRACE_TYPE_HANDLED;
1274}
1275
1276static enum print_line_t
1277print_graph_function(struct trace_iterator *iter)
1278{
1279 return print_graph_function_flags(iter, tracer_flags.val);
1280}
1281
1282static enum print_line_t
1283print_graph_function_event(struct trace_iterator *iter, int flags,
1284 struct trace_event *event)
1285{
1286 return print_graph_function(iter);
1287}
1288
1289static void print_lat_header(struct seq_file *s, u32 flags)
1290{
1291 static const char spaces[] = " " /* 16 spaces */
1292 " " /* 4 spaces */
1293 " "; /* 17 spaces */
1294 int size = 0;
1295
1296 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1297 size += 16;
1298 if (flags & TRACE_GRAPH_PRINT_CPU)
1299 size += 4;
1300 if (flags & TRACE_GRAPH_PRINT_PROC)
1301 size += 17;
1302
1303 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1304 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1305 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1306 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1307 seq_printf(s, "#%.*s||| / \n", size, spaces);
1308}
1309
1310static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1311{
1312 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1313
1314 if (lat)
1315 print_lat_header(s, flags);
1316
1317 /* 1st line */
1318 seq_printf(s, "#");
1319 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1320 seq_printf(s, " TIME ");
1321 if (flags & TRACE_GRAPH_PRINT_CPU)
1322 seq_printf(s, " CPU");
1323 if (flags & TRACE_GRAPH_PRINT_PROC)
1324 seq_printf(s, " TASK/PID ");
1325 if (lat)
1326 seq_printf(s, "||||");
1327 if (flags & TRACE_GRAPH_PRINT_DURATION)
1328 seq_printf(s, " DURATION ");
1329 seq_printf(s, " FUNCTION CALLS\n");
1330
1331 /* 2nd line */
1332 seq_printf(s, "#");
1333 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1334 seq_printf(s, " | ");
1335 if (flags & TRACE_GRAPH_PRINT_CPU)
1336 seq_printf(s, " | ");
1337 if (flags & TRACE_GRAPH_PRINT_PROC)
1338 seq_printf(s, " | | ");
1339 if (lat)
1340 seq_printf(s, "||||");
1341 if (flags & TRACE_GRAPH_PRINT_DURATION)
1342 seq_printf(s, " | | ");
1343 seq_printf(s, " | | | |\n");
1344}
1345
1346void print_graph_headers(struct seq_file *s)
1347{
1348 print_graph_headers_flags(s, tracer_flags.val);
1349}
1350
1351void print_graph_headers_flags(struct seq_file *s, u32 flags)
1352{
1353 struct trace_iterator *iter = s->private;
1354
1355 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1356 return;
1357
1358 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1359 /* print nothing if the buffers are empty */
1360 if (trace_empty(iter))
1361 return;
1362
1363 print_trace_header(s, iter);
1364 }
1365
1366 __print_graph_headers_flags(s, flags);
1367}
1368
1369void graph_trace_open(struct trace_iterator *iter)
1370{
1371 /* pid and depth on the last trace processed */
1372 struct fgraph_data *data;
1373 int cpu;
1374
1375 iter->private = NULL;
1376
1377 data = kzalloc(sizeof(*data), GFP_KERNEL);
1378 if (!data)
1379 goto out_err;
1380
1381 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1382 if (!data->cpu_data)
1383 goto out_err_free;
1384
1385 for_each_possible_cpu(cpu) {
1386 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1387 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1388 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1389 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1390
1391 *pid = -1;
1392 *depth = 0;
1393 *ignore = 0;
1394 *depth_irq = -1;
1395 }
1396
1397 iter->private = data;
1398
1399 return;
1400
1401 out_err_free:
1402 kfree(data);
1403 out_err:
1404 pr_warning("function graph tracer: not enough memory\n");
1405}
1406
1407void graph_trace_close(struct trace_iterator *iter)
1408{
1409 struct fgraph_data *data = iter->private;
1410
1411 if (data) {
1412 free_percpu(data->cpu_data);
1413 kfree(data);
1414 }
1415}
1416
1417static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
1418{
1419 if (bit == TRACE_GRAPH_PRINT_IRQS)
1420 ftrace_graph_skip_irqs = !set;
1421
1422 return 0;
1423}
1424
1425static struct trace_event_functions graph_functions = {
1426 .trace = print_graph_function_event,
1427};
1428
1429static struct trace_event graph_trace_entry_event = {
1430 .type = TRACE_GRAPH_ENT,
1431 .funcs = &graph_functions,
1432};
1433
1434static struct trace_event graph_trace_ret_event = {
1435 .type = TRACE_GRAPH_RET,
1436 .funcs = &graph_functions
1437};
1438
1439static struct tracer graph_trace __read_mostly = {
1440 .name = "function_graph",
1441 .open = graph_trace_open,
1442 .pipe_open = graph_trace_open,
1443 .close = graph_trace_close,
1444 .pipe_close = graph_trace_close,
1445 .wait_pipe = poll_wait_pipe,
1446 .init = graph_trace_init,
1447 .reset = graph_trace_reset,
1448 .print_line = print_graph_function,
1449 .print_header = print_graph_headers,
1450 .flags = &tracer_flags,
1451 .set_flag = func_graph_set_flag,
1452#ifdef CONFIG_FTRACE_SELFTEST
1453 .selftest = trace_selftest_startup_function_graph,
1454#endif
1455};
1456
1457static __init int init_graph_trace(void)
1458{
1459 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1460
1461 if (!register_ftrace_event(&graph_trace_entry_event)) {
1462 pr_warning("Warning: could not register graph trace events\n");
1463 return 1;
1464 }
1465
1466 if (!register_ftrace_event(&graph_trace_ret_event)) {
1467 pr_warning("Warning: could not register graph trace events\n");
1468 return 1;
1469 }
1470
1471 return register_tracer(&graph_trace);
1472}
1473
1474device_initcall(init_graph_trace);