Loading...
1/*
2 *
3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 */
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14
15#include "trace.h"
16#include "trace_output.h"
17
18/* When set, irq functions will be ignored */
19static int ftrace_graph_skip_irqs;
20
21struct fgraph_cpu_data {
22 pid_t last_pid;
23 int depth;
24 int depth_irq;
25 int ignore;
26 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
27};
28
29struct fgraph_data {
30 struct fgraph_cpu_data __percpu *cpu_data;
31
32 /* Place to preserve last processed entry. */
33 struct ftrace_graph_ent_entry ent;
34 struct ftrace_graph_ret_entry ret;
35 int failed;
36 int cpu;
37};
38
39#define TRACE_GRAPH_INDENT 2
40
41/* Flag options */
42#define TRACE_GRAPH_PRINT_OVERRUN 0x1
43#define TRACE_GRAPH_PRINT_CPU 0x2
44#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
45#define TRACE_GRAPH_PRINT_PROC 0x8
46#define TRACE_GRAPH_PRINT_DURATION 0x10
47#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
48#define TRACE_GRAPH_PRINT_IRQS 0x40
49
50static unsigned int max_depth;
51
52static struct tracer_opt trace_opts[] = {
53 /* Display overruns? (for self-debug purpose) */
54 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
55 /* Display CPU ? */
56 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
57 /* Display Overhead ? */
58 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
59 /* Display proc name/pid */
60 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
61 /* Display duration of execution */
62 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
63 /* Display absolute time of an entry */
64 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
65 /* Display interrupts */
66 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
67 { } /* Empty entry */
68};
69
70static struct tracer_flags tracer_flags = {
71 /* Don't display overruns and proc by default */
72 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
73 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
74 .opts = trace_opts
75};
76
77static struct trace_array *graph_array;
78
79/*
80 * DURATION column is being also used to display IRQ signs,
81 * following values are used by print_graph_irq and others
82 * to fill in space into DURATION column.
83 */
84enum {
85 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
86 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
87 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
88};
89
90static enum print_line_t
91print_graph_duration(unsigned long long duration, struct trace_seq *s,
92 u32 flags);
93
94/* Add a function return address to the trace stack on thread info.*/
95int
96ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
97 unsigned long frame_pointer)
98{
99 unsigned long long calltime;
100 int index;
101
102 if (!current->ret_stack)
103 return -EBUSY;
104
105 /*
106 * We must make sure the ret_stack is tested before we read
107 * anything else.
108 */
109 smp_rmb();
110
111 /* The return trace stack is full */
112 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
113 atomic_inc(¤t->trace_overrun);
114 return -EBUSY;
115 }
116
117 /*
118 * The curr_ret_stack is an index to ftrace return stack of
119 * current task. Its value should be in [0, FTRACE_RETFUNC_
120 * DEPTH) when the function graph tracer is used. To support
121 * filtering out specific functions, it makes the index
122 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
123 * so when it sees a negative index the ftrace will ignore
124 * the record. And the index gets recovered when returning
125 * from the filtered function by adding the FTRACE_NOTRACE_
126 * DEPTH and then it'll continue to record functions normally.
127 *
128 * The curr_ret_stack is initialized to -1 and get increased
129 * in this function. So it can be less than -1 only if it was
130 * filtered out via ftrace_graph_notrace_addr() which can be
131 * set from set_graph_notrace file in debugfs by user.
132 */
133 if (current->curr_ret_stack < -1)
134 return -EBUSY;
135
136 calltime = trace_clock_local();
137
138 index = ++current->curr_ret_stack;
139 if (ftrace_graph_notrace_addr(func))
140 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
141 barrier();
142 current->ret_stack[index].ret = ret;
143 current->ret_stack[index].func = func;
144 current->ret_stack[index].calltime = calltime;
145 current->ret_stack[index].subtime = 0;
146 current->ret_stack[index].fp = frame_pointer;
147 *depth = current->curr_ret_stack;
148
149 return 0;
150}
151
152/* Retrieve a function return address to the trace stack on thread info.*/
153static void
154ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
155 unsigned long frame_pointer)
156{
157 int index;
158
159 index = current->curr_ret_stack;
160
161 /*
162 * A negative index here means that it's just returned from a
163 * notrace'd function. Recover index to get an original
164 * return address. See ftrace_push_return_trace().
165 *
166 * TODO: Need to check whether the stack gets corrupted.
167 */
168 if (index < 0)
169 index += FTRACE_NOTRACE_DEPTH;
170
171 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
172 ftrace_graph_stop();
173 WARN_ON(1);
174 /* Might as well panic, otherwise we have no where to go */
175 *ret = (unsigned long)panic;
176 return;
177 }
178
179#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
180 /*
181 * The arch may choose to record the frame pointer used
182 * and check it here to make sure that it is what we expect it
183 * to be. If gcc does not set the place holder of the return
184 * address in the frame pointer, and does a copy instead, then
185 * the function graph trace will fail. This test detects this
186 * case.
187 *
188 * Currently, x86_32 with optimize for size (-Os) makes the latest
189 * gcc do the above.
190 *
191 * Note, -mfentry does not use frame pointers, and this test
192 * is not needed if CC_USING_FENTRY is set.
193 */
194 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
195 ftrace_graph_stop();
196 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
197 " from func %ps return to %lx\n",
198 current->ret_stack[index].fp,
199 frame_pointer,
200 (void *)current->ret_stack[index].func,
201 current->ret_stack[index].ret);
202 *ret = (unsigned long)panic;
203 return;
204 }
205#endif
206
207 *ret = current->ret_stack[index].ret;
208 trace->func = current->ret_stack[index].func;
209 trace->calltime = current->ret_stack[index].calltime;
210 trace->overrun = atomic_read(¤t->trace_overrun);
211 trace->depth = index;
212}
213
214/*
215 * Send the trace to the ring-buffer.
216 * @return the original return address.
217 */
218unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
219{
220 struct ftrace_graph_ret trace;
221 unsigned long ret;
222
223 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
224 trace.rettime = trace_clock_local();
225 barrier();
226 current->curr_ret_stack--;
227 /*
228 * The curr_ret_stack can be less than -1 only if it was
229 * filtered out and it's about to return from the function.
230 * Recover the index and continue to trace normal functions.
231 */
232 if (current->curr_ret_stack < -1) {
233 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
234 return ret;
235 }
236
237 /*
238 * The trace should run after decrementing the ret counter
239 * in case an interrupt were to come in. We don't want to
240 * lose the interrupt if max_depth is set.
241 */
242 ftrace_graph_return(&trace);
243
244 if (unlikely(!ret)) {
245 ftrace_graph_stop();
246 WARN_ON(1);
247 /* Might as well panic. What else to do? */
248 ret = (unsigned long)panic;
249 }
250
251 return ret;
252}
253
254int __trace_graph_entry(struct trace_array *tr,
255 struct ftrace_graph_ent *trace,
256 unsigned long flags,
257 int pc)
258{
259 struct ftrace_event_call *call = &event_funcgraph_entry;
260 struct ring_buffer_event *event;
261 struct ring_buffer *buffer = tr->trace_buffer.buffer;
262 struct ftrace_graph_ent_entry *entry;
263
264 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
265 return 0;
266
267 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
268 sizeof(*entry), flags, pc);
269 if (!event)
270 return 0;
271 entry = ring_buffer_event_data(event);
272 entry->graph_ent = *trace;
273 if (!call_filter_check_discard(call, entry, buffer, event))
274 __buffer_unlock_commit(buffer, event);
275
276 return 1;
277}
278
279static inline int ftrace_graph_ignore_irqs(void)
280{
281 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
282 return 0;
283
284 return in_irq();
285}
286
287int trace_graph_entry(struct ftrace_graph_ent *trace)
288{
289 struct trace_array *tr = graph_array;
290 struct trace_array_cpu *data;
291 unsigned long flags;
292 long disabled;
293 int ret;
294 int cpu;
295 int pc;
296
297 if (!ftrace_trace_task(current))
298 return 0;
299
300 /* trace it when it is-nested-in or is a function enabled. */
301 if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
302 ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
303 (max_depth && trace->depth >= max_depth))
304 return 0;
305
306 /*
307 * Do not trace a function if it's filtered by set_graph_notrace.
308 * Make the index of ret stack negative to indicate that it should
309 * ignore further functions. But it needs its own ret stack entry
310 * to recover the original index in order to continue tracing after
311 * returning from the function.
312 */
313 if (ftrace_graph_notrace_addr(trace->func))
314 return 1;
315
316 local_irq_save(flags);
317 cpu = raw_smp_processor_id();
318 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
319 disabled = atomic_inc_return(&data->disabled);
320 if (likely(disabled == 1)) {
321 pc = preempt_count();
322 ret = __trace_graph_entry(tr, trace, flags, pc);
323 } else {
324 ret = 0;
325 }
326
327 atomic_dec(&data->disabled);
328 local_irq_restore(flags);
329
330 return ret;
331}
332
333int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
334{
335 if (tracing_thresh)
336 return 1;
337 else
338 return trace_graph_entry(trace);
339}
340
341static void
342__trace_graph_function(struct trace_array *tr,
343 unsigned long ip, unsigned long flags, int pc)
344{
345 u64 time = trace_clock_local();
346 struct ftrace_graph_ent ent = {
347 .func = ip,
348 .depth = 0,
349 };
350 struct ftrace_graph_ret ret = {
351 .func = ip,
352 .depth = 0,
353 .calltime = time,
354 .rettime = time,
355 };
356
357 __trace_graph_entry(tr, &ent, flags, pc);
358 __trace_graph_return(tr, &ret, flags, pc);
359}
360
361void
362trace_graph_function(struct trace_array *tr,
363 unsigned long ip, unsigned long parent_ip,
364 unsigned long flags, int pc)
365{
366 __trace_graph_function(tr, ip, flags, pc);
367}
368
369void __trace_graph_return(struct trace_array *tr,
370 struct ftrace_graph_ret *trace,
371 unsigned long flags,
372 int pc)
373{
374 struct ftrace_event_call *call = &event_funcgraph_exit;
375 struct ring_buffer_event *event;
376 struct ring_buffer *buffer = tr->trace_buffer.buffer;
377 struct ftrace_graph_ret_entry *entry;
378
379 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
380 return;
381
382 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
383 sizeof(*entry), flags, pc);
384 if (!event)
385 return;
386 entry = ring_buffer_event_data(event);
387 entry->ret = *trace;
388 if (!call_filter_check_discard(call, entry, buffer, event))
389 __buffer_unlock_commit(buffer, event);
390}
391
392void trace_graph_return(struct ftrace_graph_ret *trace)
393{
394 struct trace_array *tr = graph_array;
395 struct trace_array_cpu *data;
396 unsigned long flags;
397 long disabled;
398 int cpu;
399 int pc;
400
401 local_irq_save(flags);
402 cpu = raw_smp_processor_id();
403 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
404 disabled = atomic_inc_return(&data->disabled);
405 if (likely(disabled == 1)) {
406 pc = preempt_count();
407 __trace_graph_return(tr, trace, flags, pc);
408 }
409 atomic_dec(&data->disabled);
410 local_irq_restore(flags);
411}
412
413void set_graph_array(struct trace_array *tr)
414{
415 graph_array = tr;
416
417 /* Make graph_array visible before we start tracing */
418
419 smp_mb();
420}
421
422void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
423{
424 if (tracing_thresh &&
425 (trace->rettime - trace->calltime < tracing_thresh))
426 return;
427 else
428 trace_graph_return(trace);
429}
430
431static int graph_trace_init(struct trace_array *tr)
432{
433 int ret;
434
435 set_graph_array(tr);
436 if (tracing_thresh)
437 ret = register_ftrace_graph(&trace_graph_thresh_return,
438 &trace_graph_thresh_entry);
439 else
440 ret = register_ftrace_graph(&trace_graph_return,
441 &trace_graph_entry);
442 if (ret)
443 return ret;
444 tracing_start_cmdline_record();
445
446 return 0;
447}
448
449static void graph_trace_reset(struct trace_array *tr)
450{
451 tracing_stop_cmdline_record();
452 unregister_ftrace_graph();
453}
454
455static int max_bytes_for_cpu;
456
457static enum print_line_t
458print_graph_cpu(struct trace_seq *s, int cpu)
459{
460 int ret;
461
462 /*
463 * Start with a space character - to make it stand out
464 * to the right a bit when trace output is pasted into
465 * email:
466 */
467 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
468 if (!ret)
469 return TRACE_TYPE_PARTIAL_LINE;
470
471 return TRACE_TYPE_HANDLED;
472}
473
474#define TRACE_GRAPH_PROCINFO_LENGTH 14
475
476static enum print_line_t
477print_graph_proc(struct trace_seq *s, pid_t pid)
478{
479 char comm[TASK_COMM_LEN];
480 /* sign + log10(MAX_INT) + '\0' */
481 char pid_str[11];
482 int spaces = 0;
483 int ret;
484 int len;
485 int i;
486
487 trace_find_cmdline(pid, comm);
488 comm[7] = '\0';
489 sprintf(pid_str, "%d", pid);
490
491 /* 1 stands for the "-" character */
492 len = strlen(comm) + strlen(pid_str) + 1;
493
494 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
495 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
496
497 /* First spaces to align center */
498 for (i = 0; i < spaces / 2; i++) {
499 ret = trace_seq_putc(s, ' ');
500 if (!ret)
501 return TRACE_TYPE_PARTIAL_LINE;
502 }
503
504 ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
505 if (!ret)
506 return TRACE_TYPE_PARTIAL_LINE;
507
508 /* Last spaces to align center */
509 for (i = 0; i < spaces - (spaces / 2); i++) {
510 ret = trace_seq_putc(s, ' ');
511 if (!ret)
512 return TRACE_TYPE_PARTIAL_LINE;
513 }
514 return TRACE_TYPE_HANDLED;
515}
516
517
518static enum print_line_t
519print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
520{
521 if (!trace_seq_putc(s, ' '))
522 return 0;
523
524 return trace_print_lat_fmt(s, entry);
525}
526
527/* If the pid changed since the last trace, output this event */
528static enum print_line_t
529verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
530{
531 pid_t prev_pid;
532 pid_t *last_pid;
533 int ret;
534
535 if (!data)
536 return TRACE_TYPE_HANDLED;
537
538 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
539
540 if (*last_pid == pid)
541 return TRACE_TYPE_HANDLED;
542
543 prev_pid = *last_pid;
544 *last_pid = pid;
545
546 if (prev_pid == -1)
547 return TRACE_TYPE_HANDLED;
548/*
549 * Context-switch trace line:
550
551 ------------------------------------------
552 | 1) migration/0--1 => sshd-1755
553 ------------------------------------------
554
555 */
556 ret = trace_seq_puts(s,
557 " ------------------------------------------\n");
558 if (!ret)
559 return TRACE_TYPE_PARTIAL_LINE;
560
561 ret = print_graph_cpu(s, cpu);
562 if (ret == TRACE_TYPE_PARTIAL_LINE)
563 return TRACE_TYPE_PARTIAL_LINE;
564
565 ret = print_graph_proc(s, prev_pid);
566 if (ret == TRACE_TYPE_PARTIAL_LINE)
567 return TRACE_TYPE_PARTIAL_LINE;
568
569 ret = trace_seq_puts(s, " => ");
570 if (!ret)
571 return TRACE_TYPE_PARTIAL_LINE;
572
573 ret = print_graph_proc(s, pid);
574 if (ret == TRACE_TYPE_PARTIAL_LINE)
575 return TRACE_TYPE_PARTIAL_LINE;
576
577 ret = trace_seq_puts(s,
578 "\n ------------------------------------------\n\n");
579 if (!ret)
580 return TRACE_TYPE_PARTIAL_LINE;
581
582 return TRACE_TYPE_HANDLED;
583}
584
585static struct ftrace_graph_ret_entry *
586get_return_for_leaf(struct trace_iterator *iter,
587 struct ftrace_graph_ent_entry *curr)
588{
589 struct fgraph_data *data = iter->private;
590 struct ring_buffer_iter *ring_iter = NULL;
591 struct ring_buffer_event *event;
592 struct ftrace_graph_ret_entry *next;
593
594 /*
595 * If the previous output failed to write to the seq buffer,
596 * then we just reuse the data from before.
597 */
598 if (data && data->failed) {
599 curr = &data->ent;
600 next = &data->ret;
601 } else {
602
603 ring_iter = trace_buffer_iter(iter, iter->cpu);
604
605 /* First peek to compare current entry and the next one */
606 if (ring_iter)
607 event = ring_buffer_iter_peek(ring_iter, NULL);
608 else {
609 /*
610 * We need to consume the current entry to see
611 * the next one.
612 */
613 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
614 NULL, NULL);
615 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
616 NULL, NULL);
617 }
618
619 if (!event)
620 return NULL;
621
622 next = ring_buffer_event_data(event);
623
624 if (data) {
625 /*
626 * Save current and next entries for later reference
627 * if the output fails.
628 */
629 data->ent = *curr;
630 /*
631 * If the next event is not a return type, then
632 * we only care about what type it is. Otherwise we can
633 * safely copy the entire event.
634 */
635 if (next->ent.type == TRACE_GRAPH_RET)
636 data->ret = *next;
637 else
638 data->ret.ent.type = next->ent.type;
639 }
640 }
641
642 if (next->ent.type != TRACE_GRAPH_RET)
643 return NULL;
644
645 if (curr->ent.pid != next->ent.pid ||
646 curr->graph_ent.func != next->ret.func)
647 return NULL;
648
649 /* this is a leaf, now advance the iterator */
650 if (ring_iter)
651 ring_buffer_read(ring_iter, NULL);
652
653 return next;
654}
655
656static int print_graph_abs_time(u64 t, struct trace_seq *s)
657{
658 unsigned long usecs_rem;
659
660 usecs_rem = do_div(t, NSEC_PER_SEC);
661 usecs_rem /= 1000;
662
663 return trace_seq_printf(s, "%5lu.%06lu | ",
664 (unsigned long)t, usecs_rem);
665}
666
667static enum print_line_t
668print_graph_irq(struct trace_iterator *iter, unsigned long addr,
669 enum trace_type type, int cpu, pid_t pid, u32 flags)
670{
671 int ret;
672 struct trace_seq *s = &iter->seq;
673
674 if (addr < (unsigned long)__irqentry_text_start ||
675 addr >= (unsigned long)__irqentry_text_end)
676 return TRACE_TYPE_UNHANDLED;
677
678 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
679 /* Absolute time */
680 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
681 ret = print_graph_abs_time(iter->ts, s);
682 if (!ret)
683 return TRACE_TYPE_PARTIAL_LINE;
684 }
685
686 /* Cpu */
687 if (flags & TRACE_GRAPH_PRINT_CPU) {
688 ret = print_graph_cpu(s, cpu);
689 if (ret == TRACE_TYPE_PARTIAL_LINE)
690 return TRACE_TYPE_PARTIAL_LINE;
691 }
692
693 /* Proc */
694 if (flags & TRACE_GRAPH_PRINT_PROC) {
695 ret = print_graph_proc(s, pid);
696 if (ret == TRACE_TYPE_PARTIAL_LINE)
697 return TRACE_TYPE_PARTIAL_LINE;
698 ret = trace_seq_puts(s, " | ");
699 if (!ret)
700 return TRACE_TYPE_PARTIAL_LINE;
701 }
702 }
703
704 /* No overhead */
705 ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
706 if (ret != TRACE_TYPE_HANDLED)
707 return ret;
708
709 if (type == TRACE_GRAPH_ENT)
710 ret = trace_seq_puts(s, "==========>");
711 else
712 ret = trace_seq_puts(s, "<==========");
713
714 if (!ret)
715 return TRACE_TYPE_PARTIAL_LINE;
716
717 ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
718 if (ret != TRACE_TYPE_HANDLED)
719 return ret;
720
721 ret = trace_seq_putc(s, '\n');
722
723 if (!ret)
724 return TRACE_TYPE_PARTIAL_LINE;
725 return TRACE_TYPE_HANDLED;
726}
727
728enum print_line_t
729trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
730{
731 unsigned long nsecs_rem = do_div(duration, 1000);
732 /* log10(ULONG_MAX) + '\0' */
733 char msecs_str[21];
734 char nsecs_str[5];
735 int ret, len;
736 int i;
737
738 sprintf(msecs_str, "%lu", (unsigned long) duration);
739
740 /* Print msecs */
741 ret = trace_seq_printf(s, "%s", msecs_str);
742 if (!ret)
743 return TRACE_TYPE_PARTIAL_LINE;
744
745 len = strlen(msecs_str);
746
747 /* Print nsecs (we don't want to exceed 7 numbers) */
748 if (len < 7) {
749 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
750
751 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
752 ret = trace_seq_printf(s, ".%s", nsecs_str);
753 if (!ret)
754 return TRACE_TYPE_PARTIAL_LINE;
755 len += strlen(nsecs_str);
756 }
757
758 ret = trace_seq_puts(s, " us ");
759 if (!ret)
760 return TRACE_TYPE_PARTIAL_LINE;
761
762 /* Print remaining spaces to fit the row's width */
763 for (i = len; i < 7; i++) {
764 ret = trace_seq_putc(s, ' ');
765 if (!ret)
766 return TRACE_TYPE_PARTIAL_LINE;
767 }
768 return TRACE_TYPE_HANDLED;
769}
770
771static enum print_line_t
772print_graph_duration(unsigned long long duration, struct trace_seq *s,
773 u32 flags)
774{
775 int ret = -1;
776
777 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
778 !(trace_flags & TRACE_ITER_CONTEXT_INFO))
779 return TRACE_TYPE_HANDLED;
780
781 /* No real adata, just filling the column with spaces */
782 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
783 case FLAGS_FILL_FULL:
784 ret = trace_seq_puts(s, " | ");
785 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
786 case FLAGS_FILL_START:
787 ret = trace_seq_puts(s, " ");
788 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
789 case FLAGS_FILL_END:
790 ret = trace_seq_puts(s, " |");
791 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
792 }
793
794 /* Signal a overhead of time execution to the output */
795 if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
796 /* Duration exceeded 100 msecs */
797 if (duration > 100000ULL)
798 ret = trace_seq_puts(s, "! ");
799 /* Duration exceeded 10 msecs */
800 else if (duration > 10000ULL)
801 ret = trace_seq_puts(s, "+ ");
802 }
803
804 /*
805 * The -1 means we either did not exceed the duration tresholds
806 * or we dont want to print out the overhead. Either way we need
807 * to fill out the space.
808 */
809 if (ret == -1)
810 ret = trace_seq_puts(s, " ");
811
812 /* Catching here any failure happenned above */
813 if (!ret)
814 return TRACE_TYPE_PARTIAL_LINE;
815
816 ret = trace_print_graph_duration(duration, s);
817 if (ret != TRACE_TYPE_HANDLED)
818 return ret;
819
820 ret = trace_seq_puts(s, "| ");
821 if (!ret)
822 return TRACE_TYPE_PARTIAL_LINE;
823
824 return TRACE_TYPE_HANDLED;
825}
826
827/* Case of a leaf function on its call entry */
828static enum print_line_t
829print_graph_entry_leaf(struct trace_iterator *iter,
830 struct ftrace_graph_ent_entry *entry,
831 struct ftrace_graph_ret_entry *ret_entry,
832 struct trace_seq *s, u32 flags)
833{
834 struct fgraph_data *data = iter->private;
835 struct ftrace_graph_ret *graph_ret;
836 struct ftrace_graph_ent *call;
837 unsigned long long duration;
838 int ret;
839 int i;
840
841 graph_ret = &ret_entry->ret;
842 call = &entry->graph_ent;
843 duration = graph_ret->rettime - graph_ret->calltime;
844
845 if (data) {
846 struct fgraph_cpu_data *cpu_data;
847 int cpu = iter->cpu;
848
849 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
850
851 /*
852 * Comments display at + 1 to depth. Since
853 * this is a leaf function, keep the comments
854 * equal to this depth.
855 */
856 cpu_data->depth = call->depth - 1;
857
858 /* No need to keep this function around for this depth */
859 if (call->depth < FTRACE_RETFUNC_DEPTH)
860 cpu_data->enter_funcs[call->depth] = 0;
861 }
862
863 /* Overhead and duration */
864 ret = print_graph_duration(duration, s, flags);
865 if (ret == TRACE_TYPE_PARTIAL_LINE)
866 return TRACE_TYPE_PARTIAL_LINE;
867
868 /* Function */
869 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
870 ret = trace_seq_putc(s, ' ');
871 if (!ret)
872 return TRACE_TYPE_PARTIAL_LINE;
873 }
874
875 ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
876 if (!ret)
877 return TRACE_TYPE_PARTIAL_LINE;
878
879 return TRACE_TYPE_HANDLED;
880}
881
882static enum print_line_t
883print_graph_entry_nested(struct trace_iterator *iter,
884 struct ftrace_graph_ent_entry *entry,
885 struct trace_seq *s, int cpu, u32 flags)
886{
887 struct ftrace_graph_ent *call = &entry->graph_ent;
888 struct fgraph_data *data = iter->private;
889 int ret;
890 int i;
891
892 if (data) {
893 struct fgraph_cpu_data *cpu_data;
894 int cpu = iter->cpu;
895
896 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
897 cpu_data->depth = call->depth;
898
899 /* Save this function pointer to see if the exit matches */
900 if (call->depth < FTRACE_RETFUNC_DEPTH)
901 cpu_data->enter_funcs[call->depth] = call->func;
902 }
903
904 /* No time */
905 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
906 if (ret != TRACE_TYPE_HANDLED)
907 return ret;
908
909 /* Function */
910 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
911 ret = trace_seq_putc(s, ' ');
912 if (!ret)
913 return TRACE_TYPE_PARTIAL_LINE;
914 }
915
916 ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
917 if (!ret)
918 return TRACE_TYPE_PARTIAL_LINE;
919
920 /*
921 * we already consumed the current entry to check the next one
922 * and see if this is a leaf.
923 */
924 return TRACE_TYPE_NO_CONSUME;
925}
926
927static enum print_line_t
928print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
929 int type, unsigned long addr, u32 flags)
930{
931 struct fgraph_data *data = iter->private;
932 struct trace_entry *ent = iter->ent;
933 int cpu = iter->cpu;
934 int ret;
935
936 /* Pid */
937 if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
938 return TRACE_TYPE_PARTIAL_LINE;
939
940 if (type) {
941 /* Interrupt */
942 ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
943 if (ret == TRACE_TYPE_PARTIAL_LINE)
944 return TRACE_TYPE_PARTIAL_LINE;
945 }
946
947 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
948 return 0;
949
950 /* Absolute time */
951 if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
952 ret = print_graph_abs_time(iter->ts, s);
953 if (!ret)
954 return TRACE_TYPE_PARTIAL_LINE;
955 }
956
957 /* Cpu */
958 if (flags & TRACE_GRAPH_PRINT_CPU) {
959 ret = print_graph_cpu(s, cpu);
960 if (ret == TRACE_TYPE_PARTIAL_LINE)
961 return TRACE_TYPE_PARTIAL_LINE;
962 }
963
964 /* Proc */
965 if (flags & TRACE_GRAPH_PRINT_PROC) {
966 ret = print_graph_proc(s, ent->pid);
967 if (ret == TRACE_TYPE_PARTIAL_LINE)
968 return TRACE_TYPE_PARTIAL_LINE;
969
970 ret = trace_seq_puts(s, " | ");
971 if (!ret)
972 return TRACE_TYPE_PARTIAL_LINE;
973 }
974
975 /* Latency format */
976 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
977 ret = print_graph_lat_fmt(s, ent);
978 if (ret == TRACE_TYPE_PARTIAL_LINE)
979 return TRACE_TYPE_PARTIAL_LINE;
980 }
981
982 return 0;
983}
984
985/*
986 * Entry check for irq code
987 *
988 * returns 1 if
989 * - we are inside irq code
990 * - we just entered irq code
991 *
992 * retunns 0 if
993 * - funcgraph-interrupts option is set
994 * - we are not inside irq code
995 */
996static int
997check_irq_entry(struct trace_iterator *iter, u32 flags,
998 unsigned long addr, int depth)
999{
1000 int cpu = iter->cpu;
1001 int *depth_irq;
1002 struct fgraph_data *data = iter->private;
1003
1004 /*
1005 * If we are either displaying irqs, or we got called as
1006 * a graph event and private data does not exist,
1007 * then we bypass the irq check.
1008 */
1009 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1010 (!data))
1011 return 0;
1012
1013 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1014
1015 /*
1016 * We are inside the irq code
1017 */
1018 if (*depth_irq >= 0)
1019 return 1;
1020
1021 if ((addr < (unsigned long)__irqentry_text_start) ||
1022 (addr >= (unsigned long)__irqentry_text_end))
1023 return 0;
1024
1025 /*
1026 * We are entering irq code.
1027 */
1028 *depth_irq = depth;
1029 return 1;
1030}
1031
1032/*
1033 * Return check for irq code
1034 *
1035 * returns 1 if
1036 * - we are inside irq code
1037 * - we just left irq code
1038 *
1039 * returns 0 if
1040 * - funcgraph-interrupts option is set
1041 * - we are not inside irq code
1042 */
1043static int
1044check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1045{
1046 int cpu = iter->cpu;
1047 int *depth_irq;
1048 struct fgraph_data *data = iter->private;
1049
1050 /*
1051 * If we are either displaying irqs, or we got called as
1052 * a graph event and private data does not exist,
1053 * then we bypass the irq check.
1054 */
1055 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1056 (!data))
1057 return 0;
1058
1059 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1060
1061 /*
1062 * We are not inside the irq code.
1063 */
1064 if (*depth_irq == -1)
1065 return 0;
1066
1067 /*
1068 * We are inside the irq code, and this is returning entry.
1069 * Let's not trace it and clear the entry depth, since
1070 * we are out of irq code.
1071 *
1072 * This condition ensures that we 'leave the irq code' once
1073 * we are out of the entry depth. Thus protecting us from
1074 * the RETURN entry loss.
1075 */
1076 if (*depth_irq >= depth) {
1077 *depth_irq = -1;
1078 return 1;
1079 }
1080
1081 /*
1082 * We are inside the irq code, and this is not the entry.
1083 */
1084 return 1;
1085}
1086
1087static enum print_line_t
1088print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1089 struct trace_iterator *iter, u32 flags)
1090{
1091 struct fgraph_data *data = iter->private;
1092 struct ftrace_graph_ent *call = &field->graph_ent;
1093 struct ftrace_graph_ret_entry *leaf_ret;
1094 static enum print_line_t ret;
1095 int cpu = iter->cpu;
1096
1097 if (check_irq_entry(iter, flags, call->func, call->depth))
1098 return TRACE_TYPE_HANDLED;
1099
1100 if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1101 return TRACE_TYPE_PARTIAL_LINE;
1102
1103 leaf_ret = get_return_for_leaf(iter, field);
1104 if (leaf_ret)
1105 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1106 else
1107 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1108
1109 if (data) {
1110 /*
1111 * If we failed to write our output, then we need to make
1112 * note of it. Because we already consumed our entry.
1113 */
1114 if (s->full) {
1115 data->failed = 1;
1116 data->cpu = cpu;
1117 } else
1118 data->failed = 0;
1119 }
1120
1121 return ret;
1122}
1123
1124static enum print_line_t
1125print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1126 struct trace_entry *ent, struct trace_iterator *iter,
1127 u32 flags)
1128{
1129 unsigned long long duration = trace->rettime - trace->calltime;
1130 struct fgraph_data *data = iter->private;
1131 pid_t pid = ent->pid;
1132 int cpu = iter->cpu;
1133 int func_match = 1;
1134 int ret;
1135 int i;
1136
1137 if (check_irq_return(iter, flags, trace->depth))
1138 return TRACE_TYPE_HANDLED;
1139
1140 if (data) {
1141 struct fgraph_cpu_data *cpu_data;
1142 int cpu = iter->cpu;
1143
1144 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1145
1146 /*
1147 * Comments display at + 1 to depth. This is the
1148 * return from a function, we now want the comments
1149 * to display at the same level of the bracket.
1150 */
1151 cpu_data->depth = trace->depth - 1;
1152
1153 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1154 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1155 func_match = 0;
1156 cpu_data->enter_funcs[trace->depth] = 0;
1157 }
1158 }
1159
1160 if (print_graph_prologue(iter, s, 0, 0, flags))
1161 return TRACE_TYPE_PARTIAL_LINE;
1162
1163 /* Overhead and duration */
1164 ret = print_graph_duration(duration, s, flags);
1165 if (ret == TRACE_TYPE_PARTIAL_LINE)
1166 return TRACE_TYPE_PARTIAL_LINE;
1167
1168 /* Closing brace */
1169 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1170 ret = trace_seq_putc(s, ' ');
1171 if (!ret)
1172 return TRACE_TYPE_PARTIAL_LINE;
1173 }
1174
1175 /*
1176 * If the return function does not have a matching entry,
1177 * then the entry was lost. Instead of just printing
1178 * the '}' and letting the user guess what function this
1179 * belongs to, write out the function name.
1180 */
1181 if (func_match) {
1182 ret = trace_seq_puts(s, "}\n");
1183 if (!ret)
1184 return TRACE_TYPE_PARTIAL_LINE;
1185 } else {
1186 ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1187 if (!ret)
1188 return TRACE_TYPE_PARTIAL_LINE;
1189 }
1190
1191 /* Overrun */
1192 if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1193 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
1194 trace->overrun);
1195 if (!ret)
1196 return TRACE_TYPE_PARTIAL_LINE;
1197 }
1198
1199 ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1200 cpu, pid, flags);
1201 if (ret == TRACE_TYPE_PARTIAL_LINE)
1202 return TRACE_TYPE_PARTIAL_LINE;
1203
1204 return TRACE_TYPE_HANDLED;
1205}
1206
1207static enum print_line_t
1208print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1209 struct trace_iterator *iter, u32 flags)
1210{
1211 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1212 struct fgraph_data *data = iter->private;
1213 struct trace_event *event;
1214 int depth = 0;
1215 int ret;
1216 int i;
1217
1218 if (data)
1219 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1220
1221 if (print_graph_prologue(iter, s, 0, 0, flags))
1222 return TRACE_TYPE_PARTIAL_LINE;
1223
1224 /* No time */
1225 ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
1226 if (ret != TRACE_TYPE_HANDLED)
1227 return ret;
1228
1229 /* Indentation */
1230 if (depth > 0)
1231 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1232 ret = trace_seq_putc(s, ' ');
1233 if (!ret)
1234 return TRACE_TYPE_PARTIAL_LINE;
1235 }
1236
1237 /* The comment */
1238 ret = trace_seq_puts(s, "/* ");
1239 if (!ret)
1240 return TRACE_TYPE_PARTIAL_LINE;
1241
1242 switch (iter->ent->type) {
1243 case TRACE_BPRINT:
1244 ret = trace_print_bprintk_msg_only(iter);
1245 if (ret != TRACE_TYPE_HANDLED)
1246 return ret;
1247 break;
1248 case TRACE_PRINT:
1249 ret = trace_print_printk_msg_only(iter);
1250 if (ret != TRACE_TYPE_HANDLED)
1251 return ret;
1252 break;
1253 default:
1254 event = ftrace_find_event(ent->type);
1255 if (!event)
1256 return TRACE_TYPE_UNHANDLED;
1257
1258 ret = event->funcs->trace(iter, sym_flags, event);
1259 if (ret != TRACE_TYPE_HANDLED)
1260 return ret;
1261 }
1262
1263 /* Strip ending newline */
1264 if (s->buffer[s->len - 1] == '\n') {
1265 s->buffer[s->len - 1] = '\0';
1266 s->len--;
1267 }
1268
1269 ret = trace_seq_puts(s, " */\n");
1270 if (!ret)
1271 return TRACE_TYPE_PARTIAL_LINE;
1272
1273 return TRACE_TYPE_HANDLED;
1274}
1275
1276
1277enum print_line_t
1278print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1279{
1280 struct ftrace_graph_ent_entry *field;
1281 struct fgraph_data *data = iter->private;
1282 struct trace_entry *entry = iter->ent;
1283 struct trace_seq *s = &iter->seq;
1284 int cpu = iter->cpu;
1285 int ret;
1286
1287 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1288 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1289 return TRACE_TYPE_HANDLED;
1290 }
1291
1292 /*
1293 * If the last output failed, there's a possibility we need
1294 * to print out the missing entry which would never go out.
1295 */
1296 if (data && data->failed) {
1297 field = &data->ent;
1298 iter->cpu = data->cpu;
1299 ret = print_graph_entry(field, s, iter, flags);
1300 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1301 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1302 ret = TRACE_TYPE_NO_CONSUME;
1303 }
1304 iter->cpu = cpu;
1305 return ret;
1306 }
1307
1308 switch (entry->type) {
1309 case TRACE_GRAPH_ENT: {
1310 /*
1311 * print_graph_entry() may consume the current event,
1312 * thus @field may become invalid, so we need to save it.
1313 * sizeof(struct ftrace_graph_ent_entry) is very small,
1314 * it can be safely saved at the stack.
1315 */
1316 struct ftrace_graph_ent_entry saved;
1317 trace_assign_type(field, entry);
1318 saved = *field;
1319 return print_graph_entry(&saved, s, iter, flags);
1320 }
1321 case TRACE_GRAPH_RET: {
1322 struct ftrace_graph_ret_entry *field;
1323 trace_assign_type(field, entry);
1324 return print_graph_return(&field->ret, s, entry, iter, flags);
1325 }
1326 case TRACE_STACK:
1327 case TRACE_FN:
1328 /* dont trace stack and functions as comments */
1329 return TRACE_TYPE_UNHANDLED;
1330
1331 default:
1332 return print_graph_comment(s, entry, iter, flags);
1333 }
1334
1335 return TRACE_TYPE_HANDLED;
1336}
1337
1338static enum print_line_t
1339print_graph_function(struct trace_iterator *iter)
1340{
1341 return print_graph_function_flags(iter, tracer_flags.val);
1342}
1343
1344static enum print_line_t
1345print_graph_function_event(struct trace_iterator *iter, int flags,
1346 struct trace_event *event)
1347{
1348 return print_graph_function(iter);
1349}
1350
1351static void print_lat_header(struct seq_file *s, u32 flags)
1352{
1353 static const char spaces[] = " " /* 16 spaces */
1354 " " /* 4 spaces */
1355 " "; /* 17 spaces */
1356 int size = 0;
1357
1358 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1359 size += 16;
1360 if (flags & TRACE_GRAPH_PRINT_CPU)
1361 size += 4;
1362 if (flags & TRACE_GRAPH_PRINT_PROC)
1363 size += 17;
1364
1365 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1366 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1367 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1368 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1369 seq_printf(s, "#%.*s||| / \n", size, spaces);
1370}
1371
1372static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1373{
1374 int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
1375
1376 if (lat)
1377 print_lat_header(s, flags);
1378
1379 /* 1st line */
1380 seq_printf(s, "#");
1381 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1382 seq_printf(s, " TIME ");
1383 if (flags & TRACE_GRAPH_PRINT_CPU)
1384 seq_printf(s, " CPU");
1385 if (flags & TRACE_GRAPH_PRINT_PROC)
1386 seq_printf(s, " TASK/PID ");
1387 if (lat)
1388 seq_printf(s, "||||");
1389 if (flags & TRACE_GRAPH_PRINT_DURATION)
1390 seq_printf(s, " DURATION ");
1391 seq_printf(s, " FUNCTION CALLS\n");
1392
1393 /* 2nd line */
1394 seq_printf(s, "#");
1395 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1396 seq_printf(s, " | ");
1397 if (flags & TRACE_GRAPH_PRINT_CPU)
1398 seq_printf(s, " | ");
1399 if (flags & TRACE_GRAPH_PRINT_PROC)
1400 seq_printf(s, " | | ");
1401 if (lat)
1402 seq_printf(s, "||||");
1403 if (flags & TRACE_GRAPH_PRINT_DURATION)
1404 seq_printf(s, " | | ");
1405 seq_printf(s, " | | | |\n");
1406}
1407
1408void print_graph_headers(struct seq_file *s)
1409{
1410 print_graph_headers_flags(s, tracer_flags.val);
1411}
1412
1413void print_graph_headers_flags(struct seq_file *s, u32 flags)
1414{
1415 struct trace_iterator *iter = s->private;
1416
1417 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
1418 return;
1419
1420 if (trace_flags & TRACE_ITER_LATENCY_FMT) {
1421 /* print nothing if the buffers are empty */
1422 if (trace_empty(iter))
1423 return;
1424
1425 print_trace_header(s, iter);
1426 }
1427
1428 __print_graph_headers_flags(s, flags);
1429}
1430
1431void graph_trace_open(struct trace_iterator *iter)
1432{
1433 /* pid and depth on the last trace processed */
1434 struct fgraph_data *data;
1435 int cpu;
1436
1437 iter->private = NULL;
1438
1439 data = kzalloc(sizeof(*data), GFP_KERNEL);
1440 if (!data)
1441 goto out_err;
1442
1443 data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
1444 if (!data->cpu_data)
1445 goto out_err_free;
1446
1447 for_each_possible_cpu(cpu) {
1448 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1449 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1450 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1451 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1452
1453 *pid = -1;
1454 *depth = 0;
1455 *ignore = 0;
1456 *depth_irq = -1;
1457 }
1458
1459 iter->private = data;
1460
1461 return;
1462
1463 out_err_free:
1464 kfree(data);
1465 out_err:
1466 pr_warning("function graph tracer: not enough memory\n");
1467}
1468
1469void graph_trace_close(struct trace_iterator *iter)
1470{
1471 struct fgraph_data *data = iter->private;
1472
1473 if (data) {
1474 free_percpu(data->cpu_data);
1475 kfree(data);
1476 }
1477}
1478
1479static int
1480func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1481{
1482 if (bit == TRACE_GRAPH_PRINT_IRQS)
1483 ftrace_graph_skip_irqs = !set;
1484
1485 return 0;
1486}
1487
1488static struct trace_event_functions graph_functions = {
1489 .trace = print_graph_function_event,
1490};
1491
1492static struct trace_event graph_trace_entry_event = {
1493 .type = TRACE_GRAPH_ENT,
1494 .funcs = &graph_functions,
1495};
1496
1497static struct trace_event graph_trace_ret_event = {
1498 .type = TRACE_GRAPH_RET,
1499 .funcs = &graph_functions
1500};
1501
1502static struct tracer graph_trace __tracer_data = {
1503 .name = "function_graph",
1504 .open = graph_trace_open,
1505 .pipe_open = graph_trace_open,
1506 .close = graph_trace_close,
1507 .pipe_close = graph_trace_close,
1508 .wait_pipe = poll_wait_pipe,
1509 .init = graph_trace_init,
1510 .reset = graph_trace_reset,
1511 .print_line = print_graph_function,
1512 .print_header = print_graph_headers,
1513 .flags = &tracer_flags,
1514 .set_flag = func_graph_set_flag,
1515#ifdef CONFIG_FTRACE_SELFTEST
1516 .selftest = trace_selftest_startup_function_graph,
1517#endif
1518};
1519
1520
1521static ssize_t
1522graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1523 loff_t *ppos)
1524{
1525 unsigned long val;
1526 int ret;
1527
1528 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1529 if (ret)
1530 return ret;
1531
1532 max_depth = val;
1533
1534 *ppos += cnt;
1535
1536 return cnt;
1537}
1538
1539static ssize_t
1540graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1541 loff_t *ppos)
1542{
1543 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1544 int n;
1545
1546 n = sprintf(buf, "%d\n", max_depth);
1547
1548 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1549}
1550
1551static const struct file_operations graph_depth_fops = {
1552 .open = tracing_open_generic,
1553 .write = graph_depth_write,
1554 .read = graph_depth_read,
1555 .llseek = generic_file_llseek,
1556};
1557
1558static __init int init_graph_debugfs(void)
1559{
1560 struct dentry *d_tracer;
1561
1562 d_tracer = tracing_init_dentry();
1563 if (!d_tracer)
1564 return 0;
1565
1566 trace_create_file("max_graph_depth", 0644, d_tracer,
1567 NULL, &graph_depth_fops);
1568
1569 return 0;
1570}
1571fs_initcall(init_graph_debugfs);
1572
1573static __init int init_graph_trace(void)
1574{
1575 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1576
1577 if (!register_ftrace_event(&graph_trace_entry_event)) {
1578 pr_warning("Warning: could not register graph trace events\n");
1579 return 1;
1580 }
1581
1582 if (!register_ftrace_event(&graph_trace_ret_event)) {
1583 pr_warning("Warning: could not register graph trace events\n");
1584 return 1;
1585 }
1586
1587 return register_tracer(&graph_trace);
1588}
1589
1590core_initcall(init_graph_trace);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/interrupt.h>
13#include <linux/slab.h>
14#include <linux/fs.h>
15
16#include "trace.h"
17#include "trace_output.h"
18
19/* When set, irq functions will be ignored */
20static int ftrace_graph_skip_irqs;
21
22struct fgraph_cpu_data {
23 pid_t last_pid;
24 int depth;
25 int depth_irq;
26 int ignore;
27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
28};
29
30struct fgraph_data {
31 struct fgraph_cpu_data __percpu *cpu_data;
32
33 /* Place to preserve last processed entry. */
34 struct ftrace_graph_ent_entry ent;
35 struct ftrace_graph_ret_entry ret;
36 int failed;
37 int cpu;
38};
39
40#define TRACE_GRAPH_INDENT 2
41
42unsigned int fgraph_max_depth;
43
44static struct tracer_opt trace_opts[] = {
45 /* Display overruns? (for self-debug purpose) */
46 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 /* Display CPU ? */
48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 /* Display Overhead ? */
50 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 /* Display proc name/pid */
52 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 /* Display duration of execution */
54 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 /* Display absolute time of an entry */
56 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 /* Display interrupts */
58 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 /* Display function name after trailing } */
60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 /* Include sleep time (scheduled out) between entry and return */
62 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
63
64#ifdef CONFIG_FUNCTION_PROFILER
65 /* Include time within nested functions */
66 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
67#endif
68
69 { } /* Empty entry */
70};
71
72static struct tracer_flags tracer_flags = {
73 /* Don't display overruns, proc, or tail by default */
74 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
75 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
76 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
77 .opts = trace_opts
78};
79
80static struct trace_array *graph_array;
81
82/*
83 * DURATION column is being also used to display IRQ signs,
84 * following values are used by print_graph_irq and others
85 * to fill in space into DURATION column.
86 */
87enum {
88 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
89 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
90 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
91};
92
93static void
94print_graph_duration(struct trace_array *tr, unsigned long long duration,
95 struct trace_seq *s, u32 flags);
96
97int __trace_graph_entry(struct trace_array *tr,
98 struct ftrace_graph_ent *trace,
99 unsigned int trace_ctx)
100{
101 struct trace_event_call *call = &event_funcgraph_entry;
102 struct ring_buffer_event *event;
103 struct trace_buffer *buffer = tr->array_buffer.buffer;
104 struct ftrace_graph_ent_entry *entry;
105
106 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
107 sizeof(*entry), trace_ctx);
108 if (!event)
109 return 0;
110 entry = ring_buffer_event_data(event);
111 entry->graph_ent = *trace;
112 if (!call_filter_check_discard(call, entry, buffer, event))
113 trace_buffer_unlock_commit_nostack(buffer, event);
114
115 return 1;
116}
117
118static inline int ftrace_graph_ignore_irqs(void)
119{
120 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
121 return 0;
122
123 return in_irq();
124}
125
126int trace_graph_entry(struct ftrace_graph_ent *trace)
127{
128 struct trace_array *tr = graph_array;
129 struct trace_array_cpu *data;
130 unsigned long flags;
131 unsigned int trace_ctx;
132 long disabled;
133 int ret;
134 int cpu;
135
136 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
137 return 0;
138
139 /*
140 * Do not trace a function if it's filtered by set_graph_notrace.
141 * Make the index of ret stack negative to indicate that it should
142 * ignore further functions. But it needs its own ret stack entry
143 * to recover the original index in order to continue tracing after
144 * returning from the function.
145 */
146 if (ftrace_graph_notrace_addr(trace->func)) {
147 trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
148 /*
149 * Need to return 1 to have the return called
150 * that will clear the NOTRACE bit.
151 */
152 return 1;
153 }
154
155 if (!ftrace_trace_task(tr))
156 return 0;
157
158 if (ftrace_graph_ignore_func(trace))
159 return 0;
160
161 if (ftrace_graph_ignore_irqs())
162 return 0;
163
164 /*
165 * Stop here if tracing_threshold is set. We only write function return
166 * events to the ring buffer.
167 */
168 if (tracing_thresh)
169 return 1;
170
171 local_irq_save(flags);
172 cpu = raw_smp_processor_id();
173 data = per_cpu_ptr(tr->array_buffer.data, cpu);
174 disabled = atomic_inc_return(&data->disabled);
175 if (likely(disabled == 1)) {
176 trace_ctx = tracing_gen_ctx_flags(flags);
177 ret = __trace_graph_entry(tr, trace, trace_ctx);
178 } else {
179 ret = 0;
180 }
181
182 atomic_dec(&data->disabled);
183 local_irq_restore(flags);
184
185 return ret;
186}
187
188static void
189__trace_graph_function(struct trace_array *tr,
190 unsigned long ip, unsigned int trace_ctx)
191{
192 u64 time = trace_clock_local();
193 struct ftrace_graph_ent ent = {
194 .func = ip,
195 .depth = 0,
196 };
197 struct ftrace_graph_ret ret = {
198 .func = ip,
199 .depth = 0,
200 .calltime = time,
201 .rettime = time,
202 };
203
204 __trace_graph_entry(tr, &ent, trace_ctx);
205 __trace_graph_return(tr, &ret, trace_ctx);
206}
207
208void
209trace_graph_function(struct trace_array *tr,
210 unsigned long ip, unsigned long parent_ip,
211 unsigned int trace_ctx)
212{
213 __trace_graph_function(tr, ip, trace_ctx);
214}
215
216void __trace_graph_return(struct trace_array *tr,
217 struct ftrace_graph_ret *trace,
218 unsigned int trace_ctx)
219{
220 struct trace_event_call *call = &event_funcgraph_exit;
221 struct ring_buffer_event *event;
222 struct trace_buffer *buffer = tr->array_buffer.buffer;
223 struct ftrace_graph_ret_entry *entry;
224
225 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
226 sizeof(*entry), trace_ctx);
227 if (!event)
228 return;
229 entry = ring_buffer_event_data(event);
230 entry->ret = *trace;
231 if (!call_filter_check_discard(call, entry, buffer, event))
232 trace_buffer_unlock_commit_nostack(buffer, event);
233}
234
235void trace_graph_return(struct ftrace_graph_ret *trace)
236{
237 struct trace_array *tr = graph_array;
238 struct trace_array_cpu *data;
239 unsigned long flags;
240 unsigned int trace_ctx;
241 long disabled;
242 int cpu;
243
244 ftrace_graph_addr_finish(trace);
245
246 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
247 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
248 return;
249 }
250
251 local_irq_save(flags);
252 cpu = raw_smp_processor_id();
253 data = per_cpu_ptr(tr->array_buffer.data, cpu);
254 disabled = atomic_inc_return(&data->disabled);
255 if (likely(disabled == 1)) {
256 trace_ctx = tracing_gen_ctx_flags(flags);
257 __trace_graph_return(tr, trace, trace_ctx);
258 }
259 atomic_dec(&data->disabled);
260 local_irq_restore(flags);
261}
262
263void set_graph_array(struct trace_array *tr)
264{
265 graph_array = tr;
266
267 /* Make graph_array visible before we start tracing */
268
269 smp_mb();
270}
271
272static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
273{
274 ftrace_graph_addr_finish(trace);
275
276 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
277 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
278 return;
279 }
280
281 if (tracing_thresh &&
282 (trace->rettime - trace->calltime < tracing_thresh))
283 return;
284 else
285 trace_graph_return(trace);
286}
287
288static struct fgraph_ops funcgraph_thresh_ops = {
289 .entryfunc = &trace_graph_entry,
290 .retfunc = &trace_graph_thresh_return,
291};
292
293static struct fgraph_ops funcgraph_ops = {
294 .entryfunc = &trace_graph_entry,
295 .retfunc = &trace_graph_return,
296};
297
298static int graph_trace_init(struct trace_array *tr)
299{
300 int ret;
301
302 set_graph_array(tr);
303 if (tracing_thresh)
304 ret = register_ftrace_graph(&funcgraph_thresh_ops);
305 else
306 ret = register_ftrace_graph(&funcgraph_ops);
307 if (ret)
308 return ret;
309 tracing_start_cmdline_record();
310
311 return 0;
312}
313
314static void graph_trace_reset(struct trace_array *tr)
315{
316 tracing_stop_cmdline_record();
317 if (tracing_thresh)
318 unregister_ftrace_graph(&funcgraph_thresh_ops);
319 else
320 unregister_ftrace_graph(&funcgraph_ops);
321}
322
323static int graph_trace_update_thresh(struct trace_array *tr)
324{
325 graph_trace_reset(tr);
326 return graph_trace_init(tr);
327}
328
329static int max_bytes_for_cpu;
330
331static void print_graph_cpu(struct trace_seq *s, int cpu)
332{
333 /*
334 * Start with a space character - to make it stand out
335 * to the right a bit when trace output is pasted into
336 * email:
337 */
338 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
339}
340
341#define TRACE_GRAPH_PROCINFO_LENGTH 14
342
343static void print_graph_proc(struct trace_seq *s, pid_t pid)
344{
345 char comm[TASK_COMM_LEN];
346 /* sign + log10(MAX_INT) + '\0' */
347 char pid_str[11];
348 int spaces = 0;
349 int len;
350 int i;
351
352 trace_find_cmdline(pid, comm);
353 comm[7] = '\0';
354 sprintf(pid_str, "%d", pid);
355
356 /* 1 stands for the "-" character */
357 len = strlen(comm) + strlen(pid_str) + 1;
358
359 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
360 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
361
362 /* First spaces to align center */
363 for (i = 0; i < spaces / 2; i++)
364 trace_seq_putc(s, ' ');
365
366 trace_seq_printf(s, "%s-%s", comm, pid_str);
367
368 /* Last spaces to align center */
369 for (i = 0; i < spaces - (spaces / 2); i++)
370 trace_seq_putc(s, ' ');
371}
372
373
374static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
375{
376 trace_seq_putc(s, ' ');
377 trace_print_lat_fmt(s, entry);
378 trace_seq_puts(s, " | ");
379}
380
381/* If the pid changed since the last trace, output this event */
382static void
383verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
384{
385 pid_t prev_pid;
386 pid_t *last_pid;
387
388 if (!data)
389 return;
390
391 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
392
393 if (*last_pid == pid)
394 return;
395
396 prev_pid = *last_pid;
397 *last_pid = pid;
398
399 if (prev_pid == -1)
400 return;
401/*
402 * Context-switch trace line:
403
404 ------------------------------------------
405 | 1) migration/0--1 => sshd-1755
406 ------------------------------------------
407
408 */
409 trace_seq_puts(s, " ------------------------------------------\n");
410 print_graph_cpu(s, cpu);
411 print_graph_proc(s, prev_pid);
412 trace_seq_puts(s, " => ");
413 print_graph_proc(s, pid);
414 trace_seq_puts(s, "\n ------------------------------------------\n\n");
415}
416
417static struct ftrace_graph_ret_entry *
418get_return_for_leaf(struct trace_iterator *iter,
419 struct ftrace_graph_ent_entry *curr)
420{
421 struct fgraph_data *data = iter->private;
422 struct ring_buffer_iter *ring_iter = NULL;
423 struct ring_buffer_event *event;
424 struct ftrace_graph_ret_entry *next;
425
426 /*
427 * If the previous output failed to write to the seq buffer,
428 * then we just reuse the data from before.
429 */
430 if (data && data->failed) {
431 curr = &data->ent;
432 next = &data->ret;
433 } else {
434
435 ring_iter = trace_buffer_iter(iter, iter->cpu);
436
437 /* First peek to compare current entry and the next one */
438 if (ring_iter)
439 event = ring_buffer_iter_peek(ring_iter, NULL);
440 else {
441 /*
442 * We need to consume the current entry to see
443 * the next one.
444 */
445 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
446 NULL, NULL);
447 event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
448 NULL, NULL);
449 }
450
451 if (!event)
452 return NULL;
453
454 next = ring_buffer_event_data(event);
455
456 if (data) {
457 /*
458 * Save current and next entries for later reference
459 * if the output fails.
460 */
461 data->ent = *curr;
462 /*
463 * If the next event is not a return type, then
464 * we only care about what type it is. Otherwise we can
465 * safely copy the entire event.
466 */
467 if (next->ent.type == TRACE_GRAPH_RET)
468 data->ret = *next;
469 else
470 data->ret.ent.type = next->ent.type;
471 }
472 }
473
474 if (next->ent.type != TRACE_GRAPH_RET)
475 return NULL;
476
477 if (curr->ent.pid != next->ent.pid ||
478 curr->graph_ent.func != next->ret.func)
479 return NULL;
480
481 /* this is a leaf, now advance the iterator */
482 if (ring_iter)
483 ring_buffer_iter_advance(ring_iter);
484
485 return next;
486}
487
488static void print_graph_abs_time(u64 t, struct trace_seq *s)
489{
490 unsigned long usecs_rem;
491
492 usecs_rem = do_div(t, NSEC_PER_SEC);
493 usecs_rem /= 1000;
494
495 trace_seq_printf(s, "%5lu.%06lu | ",
496 (unsigned long)t, usecs_rem);
497}
498
499static void
500print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
501{
502 unsigned long long usecs;
503
504 usecs = iter->ts - iter->array_buffer->time_start;
505 do_div(usecs, NSEC_PER_USEC);
506
507 trace_seq_printf(s, "%9llu us | ", usecs);
508}
509
510static void
511print_graph_irq(struct trace_iterator *iter, unsigned long addr,
512 enum trace_type type, int cpu, pid_t pid, u32 flags)
513{
514 struct trace_array *tr = iter->tr;
515 struct trace_seq *s = &iter->seq;
516 struct trace_entry *ent = iter->ent;
517
518 if (addr < (unsigned long)__irqentry_text_start ||
519 addr >= (unsigned long)__irqentry_text_end)
520 return;
521
522 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
523 /* Absolute time */
524 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
525 print_graph_abs_time(iter->ts, s);
526
527 /* Relative time */
528 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
529 print_graph_rel_time(iter, s);
530
531 /* Cpu */
532 if (flags & TRACE_GRAPH_PRINT_CPU)
533 print_graph_cpu(s, cpu);
534
535 /* Proc */
536 if (flags & TRACE_GRAPH_PRINT_PROC) {
537 print_graph_proc(s, pid);
538 trace_seq_puts(s, " | ");
539 }
540
541 /* Latency format */
542 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
543 print_graph_lat_fmt(s, ent);
544 }
545
546 /* No overhead */
547 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
548
549 if (type == TRACE_GRAPH_ENT)
550 trace_seq_puts(s, "==========>");
551 else
552 trace_seq_puts(s, "<==========");
553
554 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
555 trace_seq_putc(s, '\n');
556}
557
558void
559trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
560{
561 unsigned long nsecs_rem = do_div(duration, 1000);
562 /* log10(ULONG_MAX) + '\0' */
563 char usecs_str[21];
564 char nsecs_str[5];
565 int len;
566 int i;
567
568 sprintf(usecs_str, "%lu", (unsigned long) duration);
569
570 /* Print msecs */
571 trace_seq_printf(s, "%s", usecs_str);
572
573 len = strlen(usecs_str);
574
575 /* Print nsecs (we don't want to exceed 7 numbers) */
576 if (len < 7) {
577 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
578
579 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
580 trace_seq_printf(s, ".%s", nsecs_str);
581 len += strlen(nsecs_str) + 1;
582 }
583
584 trace_seq_puts(s, " us ");
585
586 /* Print remaining spaces to fit the row's width */
587 for (i = len; i < 8; i++)
588 trace_seq_putc(s, ' ');
589}
590
591static void
592print_graph_duration(struct trace_array *tr, unsigned long long duration,
593 struct trace_seq *s, u32 flags)
594{
595 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
596 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
597 return;
598
599 /* No real adata, just filling the column with spaces */
600 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
601 case FLAGS_FILL_FULL:
602 trace_seq_puts(s, " | ");
603 return;
604 case FLAGS_FILL_START:
605 trace_seq_puts(s, " ");
606 return;
607 case FLAGS_FILL_END:
608 trace_seq_puts(s, " |");
609 return;
610 }
611
612 /* Signal a overhead of time execution to the output */
613 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
614 trace_seq_printf(s, "%c ", trace_find_mark(duration));
615 else
616 trace_seq_puts(s, " ");
617
618 trace_print_graph_duration(duration, s);
619 trace_seq_puts(s, "| ");
620}
621
622/* Case of a leaf function on its call entry */
623static enum print_line_t
624print_graph_entry_leaf(struct trace_iterator *iter,
625 struct ftrace_graph_ent_entry *entry,
626 struct ftrace_graph_ret_entry *ret_entry,
627 struct trace_seq *s, u32 flags)
628{
629 struct fgraph_data *data = iter->private;
630 struct trace_array *tr = iter->tr;
631 struct ftrace_graph_ret *graph_ret;
632 struct ftrace_graph_ent *call;
633 unsigned long long duration;
634 int cpu = iter->cpu;
635 int i;
636
637 graph_ret = &ret_entry->ret;
638 call = &entry->graph_ent;
639 duration = graph_ret->rettime - graph_ret->calltime;
640
641 if (data) {
642 struct fgraph_cpu_data *cpu_data;
643
644 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
645
646 /*
647 * Comments display at + 1 to depth. Since
648 * this is a leaf function, keep the comments
649 * equal to this depth.
650 */
651 cpu_data->depth = call->depth - 1;
652
653 /* No need to keep this function around for this depth */
654 if (call->depth < FTRACE_RETFUNC_DEPTH &&
655 !WARN_ON_ONCE(call->depth < 0))
656 cpu_data->enter_funcs[call->depth] = 0;
657 }
658
659 /* Overhead and duration */
660 print_graph_duration(tr, duration, s, flags);
661
662 /* Function */
663 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
664 trace_seq_putc(s, ' ');
665
666 trace_seq_printf(s, "%ps();\n", (void *)call->func);
667
668 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
669 cpu, iter->ent->pid, flags);
670
671 return trace_handle_return(s);
672}
673
674static enum print_line_t
675print_graph_entry_nested(struct trace_iterator *iter,
676 struct ftrace_graph_ent_entry *entry,
677 struct trace_seq *s, int cpu, u32 flags)
678{
679 struct ftrace_graph_ent *call = &entry->graph_ent;
680 struct fgraph_data *data = iter->private;
681 struct trace_array *tr = iter->tr;
682 int i;
683
684 if (data) {
685 struct fgraph_cpu_data *cpu_data;
686 int cpu = iter->cpu;
687
688 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
689 cpu_data->depth = call->depth;
690
691 /* Save this function pointer to see if the exit matches */
692 if (call->depth < FTRACE_RETFUNC_DEPTH &&
693 !WARN_ON_ONCE(call->depth < 0))
694 cpu_data->enter_funcs[call->depth] = call->func;
695 }
696
697 /* No time */
698 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
699
700 /* Function */
701 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
702 trace_seq_putc(s, ' ');
703
704 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
705
706 if (trace_seq_has_overflowed(s))
707 return TRACE_TYPE_PARTIAL_LINE;
708
709 /*
710 * we already consumed the current entry to check the next one
711 * and see if this is a leaf.
712 */
713 return TRACE_TYPE_NO_CONSUME;
714}
715
716static void
717print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
718 int type, unsigned long addr, u32 flags)
719{
720 struct fgraph_data *data = iter->private;
721 struct trace_entry *ent = iter->ent;
722 struct trace_array *tr = iter->tr;
723 int cpu = iter->cpu;
724
725 /* Pid */
726 verif_pid(s, ent->pid, cpu, data);
727
728 if (type)
729 /* Interrupt */
730 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
731
732 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
733 return;
734
735 /* Absolute time */
736 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
737 print_graph_abs_time(iter->ts, s);
738
739 /* Relative time */
740 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
741 print_graph_rel_time(iter, s);
742
743 /* Cpu */
744 if (flags & TRACE_GRAPH_PRINT_CPU)
745 print_graph_cpu(s, cpu);
746
747 /* Proc */
748 if (flags & TRACE_GRAPH_PRINT_PROC) {
749 print_graph_proc(s, ent->pid);
750 trace_seq_puts(s, " | ");
751 }
752
753 /* Latency format */
754 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
755 print_graph_lat_fmt(s, ent);
756
757 return;
758}
759
760/*
761 * Entry check for irq code
762 *
763 * returns 1 if
764 * - we are inside irq code
765 * - we just entered irq code
766 *
767 * returns 0 if
768 * - funcgraph-interrupts option is set
769 * - we are not inside irq code
770 */
771static int
772check_irq_entry(struct trace_iterator *iter, u32 flags,
773 unsigned long addr, int depth)
774{
775 int cpu = iter->cpu;
776 int *depth_irq;
777 struct fgraph_data *data = iter->private;
778
779 /*
780 * If we are either displaying irqs, or we got called as
781 * a graph event and private data does not exist,
782 * then we bypass the irq check.
783 */
784 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
785 (!data))
786 return 0;
787
788 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
789
790 /*
791 * We are inside the irq code
792 */
793 if (*depth_irq >= 0)
794 return 1;
795
796 if ((addr < (unsigned long)__irqentry_text_start) ||
797 (addr >= (unsigned long)__irqentry_text_end))
798 return 0;
799
800 /*
801 * We are entering irq code.
802 */
803 *depth_irq = depth;
804 return 1;
805}
806
807/*
808 * Return check for irq code
809 *
810 * returns 1 if
811 * - we are inside irq code
812 * - we just left irq code
813 *
814 * returns 0 if
815 * - funcgraph-interrupts option is set
816 * - we are not inside irq code
817 */
818static int
819check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
820{
821 int cpu = iter->cpu;
822 int *depth_irq;
823 struct fgraph_data *data = iter->private;
824
825 /*
826 * If we are either displaying irqs, or we got called as
827 * a graph event and private data does not exist,
828 * then we bypass the irq check.
829 */
830 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
831 (!data))
832 return 0;
833
834 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
835
836 /*
837 * We are not inside the irq code.
838 */
839 if (*depth_irq == -1)
840 return 0;
841
842 /*
843 * We are inside the irq code, and this is returning entry.
844 * Let's not trace it and clear the entry depth, since
845 * we are out of irq code.
846 *
847 * This condition ensures that we 'leave the irq code' once
848 * we are out of the entry depth. Thus protecting us from
849 * the RETURN entry loss.
850 */
851 if (*depth_irq >= depth) {
852 *depth_irq = -1;
853 return 1;
854 }
855
856 /*
857 * We are inside the irq code, and this is not the entry.
858 */
859 return 1;
860}
861
862static enum print_line_t
863print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
864 struct trace_iterator *iter, u32 flags)
865{
866 struct fgraph_data *data = iter->private;
867 struct ftrace_graph_ent *call = &field->graph_ent;
868 struct ftrace_graph_ret_entry *leaf_ret;
869 static enum print_line_t ret;
870 int cpu = iter->cpu;
871
872 if (check_irq_entry(iter, flags, call->func, call->depth))
873 return TRACE_TYPE_HANDLED;
874
875 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
876
877 leaf_ret = get_return_for_leaf(iter, field);
878 if (leaf_ret)
879 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
880 else
881 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
882
883 if (data) {
884 /*
885 * If we failed to write our output, then we need to make
886 * note of it. Because we already consumed our entry.
887 */
888 if (s->full) {
889 data->failed = 1;
890 data->cpu = cpu;
891 } else
892 data->failed = 0;
893 }
894
895 return ret;
896}
897
898static enum print_line_t
899print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
900 struct trace_entry *ent, struct trace_iterator *iter,
901 u32 flags)
902{
903 unsigned long long duration = trace->rettime - trace->calltime;
904 struct fgraph_data *data = iter->private;
905 struct trace_array *tr = iter->tr;
906 pid_t pid = ent->pid;
907 int cpu = iter->cpu;
908 int func_match = 1;
909 int i;
910
911 if (check_irq_return(iter, flags, trace->depth))
912 return TRACE_TYPE_HANDLED;
913
914 if (data) {
915 struct fgraph_cpu_data *cpu_data;
916 int cpu = iter->cpu;
917
918 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
919
920 /*
921 * Comments display at + 1 to depth. This is the
922 * return from a function, we now want the comments
923 * to display at the same level of the bracket.
924 */
925 cpu_data->depth = trace->depth - 1;
926
927 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
928 !WARN_ON_ONCE(trace->depth < 0)) {
929 if (cpu_data->enter_funcs[trace->depth] != trace->func)
930 func_match = 0;
931 cpu_data->enter_funcs[trace->depth] = 0;
932 }
933 }
934
935 print_graph_prologue(iter, s, 0, 0, flags);
936
937 /* Overhead and duration */
938 print_graph_duration(tr, duration, s, flags);
939
940 /* Closing brace */
941 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
942 trace_seq_putc(s, ' ');
943
944 /*
945 * If the return function does not have a matching entry,
946 * then the entry was lost. Instead of just printing
947 * the '}' and letting the user guess what function this
948 * belongs to, write out the function name. Always do
949 * that if the funcgraph-tail option is enabled.
950 */
951 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
952 trace_seq_puts(s, "}\n");
953 else
954 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
955
956 /* Overrun */
957 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
958 trace_seq_printf(s, " (Overruns: %u)\n",
959 trace->overrun);
960
961 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
962 cpu, pid, flags);
963
964 return trace_handle_return(s);
965}
966
967static enum print_line_t
968print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
969 struct trace_iterator *iter, u32 flags)
970{
971 struct trace_array *tr = iter->tr;
972 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
973 struct fgraph_data *data = iter->private;
974 struct trace_event *event;
975 int depth = 0;
976 int ret;
977 int i;
978
979 if (data)
980 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
981
982 print_graph_prologue(iter, s, 0, 0, flags);
983
984 /* No time */
985 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
986
987 /* Indentation */
988 if (depth > 0)
989 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
990 trace_seq_putc(s, ' ');
991
992 /* The comment */
993 trace_seq_puts(s, "/* ");
994
995 switch (iter->ent->type) {
996 case TRACE_BPUTS:
997 ret = trace_print_bputs_msg_only(iter);
998 if (ret != TRACE_TYPE_HANDLED)
999 return ret;
1000 break;
1001 case TRACE_BPRINT:
1002 ret = trace_print_bprintk_msg_only(iter);
1003 if (ret != TRACE_TYPE_HANDLED)
1004 return ret;
1005 break;
1006 case TRACE_PRINT:
1007 ret = trace_print_printk_msg_only(iter);
1008 if (ret != TRACE_TYPE_HANDLED)
1009 return ret;
1010 break;
1011 default:
1012 event = ftrace_find_event(ent->type);
1013 if (!event)
1014 return TRACE_TYPE_UNHANDLED;
1015
1016 ret = event->funcs->trace(iter, sym_flags, event);
1017 if (ret != TRACE_TYPE_HANDLED)
1018 return ret;
1019 }
1020
1021 if (trace_seq_has_overflowed(s))
1022 goto out;
1023
1024 /* Strip ending newline */
1025 if (s->buffer[s->seq.len - 1] == '\n') {
1026 s->buffer[s->seq.len - 1] = '\0';
1027 s->seq.len--;
1028 }
1029
1030 trace_seq_puts(s, " */\n");
1031 out:
1032 return trace_handle_return(s);
1033}
1034
1035
1036enum print_line_t
1037print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1038{
1039 struct ftrace_graph_ent_entry *field;
1040 struct fgraph_data *data = iter->private;
1041 struct trace_entry *entry = iter->ent;
1042 struct trace_seq *s = &iter->seq;
1043 int cpu = iter->cpu;
1044 int ret;
1045
1046 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1047 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1048 return TRACE_TYPE_HANDLED;
1049 }
1050
1051 /*
1052 * If the last output failed, there's a possibility we need
1053 * to print out the missing entry which would never go out.
1054 */
1055 if (data && data->failed) {
1056 field = &data->ent;
1057 iter->cpu = data->cpu;
1058 ret = print_graph_entry(field, s, iter, flags);
1059 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1060 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1061 ret = TRACE_TYPE_NO_CONSUME;
1062 }
1063 iter->cpu = cpu;
1064 return ret;
1065 }
1066
1067 switch (entry->type) {
1068 case TRACE_GRAPH_ENT: {
1069 /*
1070 * print_graph_entry() may consume the current event,
1071 * thus @field may become invalid, so we need to save it.
1072 * sizeof(struct ftrace_graph_ent_entry) is very small,
1073 * it can be safely saved at the stack.
1074 */
1075 struct ftrace_graph_ent_entry saved;
1076 trace_assign_type(field, entry);
1077 saved = *field;
1078 return print_graph_entry(&saved, s, iter, flags);
1079 }
1080 case TRACE_GRAPH_RET: {
1081 struct ftrace_graph_ret_entry *field;
1082 trace_assign_type(field, entry);
1083 return print_graph_return(&field->ret, s, entry, iter, flags);
1084 }
1085 case TRACE_STACK:
1086 case TRACE_FN:
1087 /* dont trace stack and functions as comments */
1088 return TRACE_TYPE_UNHANDLED;
1089
1090 default:
1091 return print_graph_comment(s, entry, iter, flags);
1092 }
1093
1094 return TRACE_TYPE_HANDLED;
1095}
1096
1097static enum print_line_t
1098print_graph_function(struct trace_iterator *iter)
1099{
1100 return print_graph_function_flags(iter, tracer_flags.val);
1101}
1102
1103static enum print_line_t
1104print_graph_function_event(struct trace_iterator *iter, int flags,
1105 struct trace_event *event)
1106{
1107 return print_graph_function(iter);
1108}
1109
1110static void print_lat_header(struct seq_file *s, u32 flags)
1111{
1112 static const char spaces[] = " " /* 16 spaces */
1113 " " /* 4 spaces */
1114 " "; /* 17 spaces */
1115 int size = 0;
1116
1117 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1118 size += 16;
1119 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1120 size += 16;
1121 if (flags & TRACE_GRAPH_PRINT_CPU)
1122 size += 4;
1123 if (flags & TRACE_GRAPH_PRINT_PROC)
1124 size += 17;
1125
1126 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1127 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1128 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1129 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1130 seq_printf(s, "#%.*s||| / \n", size, spaces);
1131}
1132
1133static void __print_graph_headers_flags(struct trace_array *tr,
1134 struct seq_file *s, u32 flags)
1135{
1136 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1137
1138 if (lat)
1139 print_lat_header(s, flags);
1140
1141 /* 1st line */
1142 seq_putc(s, '#');
1143 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1144 seq_puts(s, " TIME ");
1145 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1146 seq_puts(s, " REL TIME ");
1147 if (flags & TRACE_GRAPH_PRINT_CPU)
1148 seq_puts(s, " CPU");
1149 if (flags & TRACE_GRAPH_PRINT_PROC)
1150 seq_puts(s, " TASK/PID ");
1151 if (lat)
1152 seq_puts(s, "|||| ");
1153 if (flags & TRACE_GRAPH_PRINT_DURATION)
1154 seq_puts(s, " DURATION ");
1155 seq_puts(s, " FUNCTION CALLS\n");
1156
1157 /* 2nd line */
1158 seq_putc(s, '#');
1159 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1160 seq_puts(s, " | ");
1161 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1162 seq_puts(s, " | ");
1163 if (flags & TRACE_GRAPH_PRINT_CPU)
1164 seq_puts(s, " | ");
1165 if (flags & TRACE_GRAPH_PRINT_PROC)
1166 seq_puts(s, " | | ");
1167 if (lat)
1168 seq_puts(s, "|||| ");
1169 if (flags & TRACE_GRAPH_PRINT_DURATION)
1170 seq_puts(s, " | | ");
1171 seq_puts(s, " | | | |\n");
1172}
1173
1174static void print_graph_headers(struct seq_file *s)
1175{
1176 print_graph_headers_flags(s, tracer_flags.val);
1177}
1178
1179void print_graph_headers_flags(struct seq_file *s, u32 flags)
1180{
1181 struct trace_iterator *iter = s->private;
1182 struct trace_array *tr = iter->tr;
1183
1184 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1185 return;
1186
1187 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1188 /* print nothing if the buffers are empty */
1189 if (trace_empty(iter))
1190 return;
1191
1192 print_trace_header(s, iter);
1193 }
1194
1195 __print_graph_headers_flags(tr, s, flags);
1196}
1197
1198void graph_trace_open(struct trace_iterator *iter)
1199{
1200 /* pid and depth on the last trace processed */
1201 struct fgraph_data *data;
1202 gfp_t gfpflags;
1203 int cpu;
1204
1205 iter->private = NULL;
1206
1207 /* We can be called in atomic context via ftrace_dump() */
1208 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1209
1210 data = kzalloc(sizeof(*data), gfpflags);
1211 if (!data)
1212 goto out_err;
1213
1214 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1215 if (!data->cpu_data)
1216 goto out_err_free;
1217
1218 for_each_possible_cpu(cpu) {
1219 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1220 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1221 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1222 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1223
1224 *pid = -1;
1225 *depth = 0;
1226 *ignore = 0;
1227 *depth_irq = -1;
1228 }
1229
1230 iter->private = data;
1231
1232 return;
1233
1234 out_err_free:
1235 kfree(data);
1236 out_err:
1237 pr_warn("function graph tracer: not enough memory\n");
1238}
1239
1240void graph_trace_close(struct trace_iterator *iter)
1241{
1242 struct fgraph_data *data = iter->private;
1243
1244 if (data) {
1245 free_percpu(data->cpu_data);
1246 kfree(data);
1247 }
1248}
1249
1250static int
1251func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1252{
1253 if (bit == TRACE_GRAPH_PRINT_IRQS)
1254 ftrace_graph_skip_irqs = !set;
1255
1256 if (bit == TRACE_GRAPH_SLEEP_TIME)
1257 ftrace_graph_sleep_time_control(set);
1258
1259 if (bit == TRACE_GRAPH_GRAPH_TIME)
1260 ftrace_graph_graph_time_control(set);
1261
1262 return 0;
1263}
1264
1265static struct trace_event_functions graph_functions = {
1266 .trace = print_graph_function_event,
1267};
1268
1269static struct trace_event graph_trace_entry_event = {
1270 .type = TRACE_GRAPH_ENT,
1271 .funcs = &graph_functions,
1272};
1273
1274static struct trace_event graph_trace_ret_event = {
1275 .type = TRACE_GRAPH_RET,
1276 .funcs = &graph_functions
1277};
1278
1279static struct tracer graph_trace __tracer_data = {
1280 .name = "function_graph",
1281 .update_thresh = graph_trace_update_thresh,
1282 .open = graph_trace_open,
1283 .pipe_open = graph_trace_open,
1284 .close = graph_trace_close,
1285 .pipe_close = graph_trace_close,
1286 .init = graph_trace_init,
1287 .reset = graph_trace_reset,
1288 .print_line = print_graph_function,
1289 .print_header = print_graph_headers,
1290 .flags = &tracer_flags,
1291 .set_flag = func_graph_set_flag,
1292#ifdef CONFIG_FTRACE_SELFTEST
1293 .selftest = trace_selftest_startup_function_graph,
1294#endif
1295};
1296
1297
1298static ssize_t
1299graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1300 loff_t *ppos)
1301{
1302 unsigned long val;
1303 int ret;
1304
1305 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1306 if (ret)
1307 return ret;
1308
1309 fgraph_max_depth = val;
1310
1311 *ppos += cnt;
1312
1313 return cnt;
1314}
1315
1316static ssize_t
1317graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1318 loff_t *ppos)
1319{
1320 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1321 int n;
1322
1323 n = sprintf(buf, "%d\n", fgraph_max_depth);
1324
1325 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1326}
1327
1328static const struct file_operations graph_depth_fops = {
1329 .open = tracing_open_generic,
1330 .write = graph_depth_write,
1331 .read = graph_depth_read,
1332 .llseek = generic_file_llseek,
1333};
1334
1335static __init int init_graph_tracefs(void)
1336{
1337 int ret;
1338
1339 ret = tracing_init_dentry();
1340 if (ret)
1341 return 0;
1342
1343 trace_create_file("max_graph_depth", 0644, NULL,
1344 NULL, &graph_depth_fops);
1345
1346 return 0;
1347}
1348fs_initcall(init_graph_tracefs);
1349
1350static __init int init_graph_trace(void)
1351{
1352 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1353
1354 if (!register_trace_event(&graph_trace_entry_event)) {
1355 pr_warn("Warning: could not register graph trace events\n");
1356 return 1;
1357 }
1358
1359 if (!register_trace_event(&graph_trace_ret_event)) {
1360 pr_warn("Warning: could not register graph trace events\n");
1361 return 1;
1362 }
1363
1364 return register_tracer(&graph_trace);
1365}
1366
1367core_initcall(init_graph_trace);