Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/interrupt.h>
13#include <linux/slab.h>
14#include <linux/fs.h>
15
16#include "trace.h"
17#include "trace_output.h"
18
19/* When set, irq functions will be ignored */
20static int ftrace_graph_skip_irqs;
21
22struct fgraph_cpu_data {
23 pid_t last_pid;
24 int depth;
25 int depth_irq;
26 int ignore;
27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
28};
29
30struct fgraph_data {
31 struct fgraph_cpu_data __percpu *cpu_data;
32
33 /* Place to preserve last processed entry. */
34 struct ftrace_graph_ent_entry ent;
35 struct ftrace_graph_ret_entry ret;
36 int failed;
37 int cpu;
38};
39
40#define TRACE_GRAPH_INDENT 2
41
42unsigned int fgraph_max_depth;
43
44static struct tracer_opt trace_opts[] = {
45 /* Display overruns? (for self-debug purpose) */
46 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 /* Display CPU ? */
48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 /* Display Overhead ? */
50 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 /* Display proc name/pid */
52 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 /* Display duration of execution */
54 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 /* Display absolute time of an entry */
56 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 /* Display interrupts */
58 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 /* Display function name after trailing } */
60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 /* Include sleep time (scheduled out) between entry and return */
62 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
63
64#ifdef CONFIG_FUNCTION_PROFILER
65 /* Include time within nested functions */
66 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
67#endif
68
69 { } /* Empty entry */
70};
71
72static struct tracer_flags tracer_flags = {
73 /* Don't display overruns, proc, or tail by default */
74 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
75 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
76 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
77 .opts = trace_opts
78};
79
80static struct trace_array *graph_array;
81
82/*
83 * DURATION column is being also used to display IRQ signs,
84 * following values are used by print_graph_irq and others
85 * to fill in space into DURATION column.
86 */
87enum {
88 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
89 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
90 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
91};
92
93static void
94print_graph_duration(struct trace_array *tr, unsigned long long duration,
95 struct trace_seq *s, u32 flags);
96
97int __trace_graph_entry(struct trace_array *tr,
98 struct ftrace_graph_ent *trace,
99 unsigned int trace_ctx)
100{
101 struct trace_event_call *call = &event_funcgraph_entry;
102 struct ring_buffer_event *event;
103 struct trace_buffer *buffer = tr->array_buffer.buffer;
104 struct ftrace_graph_ent_entry *entry;
105
106 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
107 sizeof(*entry), trace_ctx);
108 if (!event)
109 return 0;
110 entry = ring_buffer_event_data(event);
111 entry->graph_ent = *trace;
112 if (!call_filter_check_discard(call, entry, buffer, event))
113 trace_buffer_unlock_commit_nostack(buffer, event);
114
115 return 1;
116}
117
118static inline int ftrace_graph_ignore_irqs(void)
119{
120 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
121 return 0;
122
123 return in_hardirq();
124}
125
126int trace_graph_entry(struct ftrace_graph_ent *trace)
127{
128 struct trace_array *tr = graph_array;
129 struct trace_array_cpu *data;
130 unsigned long flags;
131 unsigned int trace_ctx;
132 long disabled;
133 int ret;
134 int cpu;
135
136 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
137 return 0;
138
139 /*
140 * Do not trace a function if it's filtered by set_graph_notrace.
141 * Make the index of ret stack negative to indicate that it should
142 * ignore further functions. But it needs its own ret stack entry
143 * to recover the original index in order to continue tracing after
144 * returning from the function.
145 */
146 if (ftrace_graph_notrace_addr(trace->func)) {
147 trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
148 /*
149 * Need to return 1 to have the return called
150 * that will clear the NOTRACE bit.
151 */
152 return 1;
153 }
154
155 if (!ftrace_trace_task(tr))
156 return 0;
157
158 if (ftrace_graph_ignore_func(trace))
159 return 0;
160
161 if (ftrace_graph_ignore_irqs())
162 return 0;
163
164 /*
165 * Stop here if tracing_threshold is set. We only write function return
166 * events to the ring buffer.
167 */
168 if (tracing_thresh)
169 return 1;
170
171 local_irq_save(flags);
172 cpu = raw_smp_processor_id();
173 data = per_cpu_ptr(tr->array_buffer.data, cpu);
174 disabled = atomic_inc_return(&data->disabled);
175 if (likely(disabled == 1)) {
176 trace_ctx = tracing_gen_ctx_flags(flags);
177 ret = __trace_graph_entry(tr, trace, trace_ctx);
178 } else {
179 ret = 0;
180 }
181
182 atomic_dec(&data->disabled);
183 local_irq_restore(flags);
184
185 return ret;
186}
187
188static void
189__trace_graph_function(struct trace_array *tr,
190 unsigned long ip, unsigned int trace_ctx)
191{
192 u64 time = trace_clock_local();
193 struct ftrace_graph_ent ent = {
194 .func = ip,
195 .depth = 0,
196 };
197 struct ftrace_graph_ret ret = {
198 .func = ip,
199 .depth = 0,
200 .calltime = time,
201 .rettime = time,
202 };
203
204 __trace_graph_entry(tr, &ent, trace_ctx);
205 __trace_graph_return(tr, &ret, trace_ctx);
206}
207
208void
209trace_graph_function(struct trace_array *tr,
210 unsigned long ip, unsigned long parent_ip,
211 unsigned int trace_ctx)
212{
213 __trace_graph_function(tr, ip, trace_ctx);
214}
215
216void __trace_graph_return(struct trace_array *tr,
217 struct ftrace_graph_ret *trace,
218 unsigned int trace_ctx)
219{
220 struct trace_event_call *call = &event_funcgraph_exit;
221 struct ring_buffer_event *event;
222 struct trace_buffer *buffer = tr->array_buffer.buffer;
223 struct ftrace_graph_ret_entry *entry;
224
225 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
226 sizeof(*entry), trace_ctx);
227 if (!event)
228 return;
229 entry = ring_buffer_event_data(event);
230 entry->ret = *trace;
231 if (!call_filter_check_discard(call, entry, buffer, event))
232 trace_buffer_unlock_commit_nostack(buffer, event);
233}
234
235void trace_graph_return(struct ftrace_graph_ret *trace)
236{
237 struct trace_array *tr = graph_array;
238 struct trace_array_cpu *data;
239 unsigned long flags;
240 unsigned int trace_ctx;
241 long disabled;
242 int cpu;
243
244 ftrace_graph_addr_finish(trace);
245
246 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
247 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
248 return;
249 }
250
251 local_irq_save(flags);
252 cpu = raw_smp_processor_id();
253 data = per_cpu_ptr(tr->array_buffer.data, cpu);
254 disabled = atomic_inc_return(&data->disabled);
255 if (likely(disabled == 1)) {
256 trace_ctx = tracing_gen_ctx_flags(flags);
257 __trace_graph_return(tr, trace, trace_ctx);
258 }
259 atomic_dec(&data->disabled);
260 local_irq_restore(flags);
261}
262
263void set_graph_array(struct trace_array *tr)
264{
265 graph_array = tr;
266
267 /* Make graph_array visible before we start tracing */
268
269 smp_mb();
270}
271
272static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
273{
274 ftrace_graph_addr_finish(trace);
275
276 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
277 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
278 return;
279 }
280
281 if (tracing_thresh &&
282 (trace->rettime - trace->calltime < tracing_thresh))
283 return;
284 else
285 trace_graph_return(trace);
286}
287
288static struct fgraph_ops funcgraph_thresh_ops = {
289 .entryfunc = &trace_graph_entry,
290 .retfunc = &trace_graph_thresh_return,
291};
292
293static struct fgraph_ops funcgraph_ops = {
294 .entryfunc = &trace_graph_entry,
295 .retfunc = &trace_graph_return,
296};
297
298static int graph_trace_init(struct trace_array *tr)
299{
300 int ret;
301
302 set_graph_array(tr);
303 if (tracing_thresh)
304 ret = register_ftrace_graph(&funcgraph_thresh_ops);
305 else
306 ret = register_ftrace_graph(&funcgraph_ops);
307 if (ret)
308 return ret;
309 tracing_start_cmdline_record();
310
311 return 0;
312}
313
314static void graph_trace_reset(struct trace_array *tr)
315{
316 tracing_stop_cmdline_record();
317 if (tracing_thresh)
318 unregister_ftrace_graph(&funcgraph_thresh_ops);
319 else
320 unregister_ftrace_graph(&funcgraph_ops);
321}
322
323static int graph_trace_update_thresh(struct trace_array *tr)
324{
325 graph_trace_reset(tr);
326 return graph_trace_init(tr);
327}
328
329static int max_bytes_for_cpu;
330
331static void print_graph_cpu(struct trace_seq *s, int cpu)
332{
333 /*
334 * Start with a space character - to make it stand out
335 * to the right a bit when trace output is pasted into
336 * email:
337 */
338 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
339}
340
341#define TRACE_GRAPH_PROCINFO_LENGTH 14
342
343static void print_graph_proc(struct trace_seq *s, pid_t pid)
344{
345 char comm[TASK_COMM_LEN];
346 /* sign + log10(MAX_INT) + '\0' */
347 char pid_str[11];
348 int spaces = 0;
349 int len;
350 int i;
351
352 trace_find_cmdline(pid, comm);
353 comm[7] = '\0';
354 sprintf(pid_str, "%d", pid);
355
356 /* 1 stands for the "-" character */
357 len = strlen(comm) + strlen(pid_str) + 1;
358
359 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
360 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
361
362 /* First spaces to align center */
363 for (i = 0; i < spaces / 2; i++)
364 trace_seq_putc(s, ' ');
365
366 trace_seq_printf(s, "%s-%s", comm, pid_str);
367
368 /* Last spaces to align center */
369 for (i = 0; i < spaces - (spaces / 2); i++)
370 trace_seq_putc(s, ' ');
371}
372
373
374static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
375{
376 trace_seq_putc(s, ' ');
377 trace_print_lat_fmt(s, entry);
378 trace_seq_puts(s, " | ");
379}
380
381/* If the pid changed since the last trace, output this event */
382static void
383verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
384{
385 pid_t prev_pid;
386 pid_t *last_pid;
387
388 if (!data)
389 return;
390
391 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
392
393 if (*last_pid == pid)
394 return;
395
396 prev_pid = *last_pid;
397 *last_pid = pid;
398
399 if (prev_pid == -1)
400 return;
401/*
402 * Context-switch trace line:
403
404 ------------------------------------------
405 | 1) migration/0--1 => sshd-1755
406 ------------------------------------------
407
408 */
409 trace_seq_puts(s, " ------------------------------------------\n");
410 print_graph_cpu(s, cpu);
411 print_graph_proc(s, prev_pid);
412 trace_seq_puts(s, " => ");
413 print_graph_proc(s, pid);
414 trace_seq_puts(s, "\n ------------------------------------------\n\n");
415}
416
417static struct ftrace_graph_ret_entry *
418get_return_for_leaf(struct trace_iterator *iter,
419 struct ftrace_graph_ent_entry *curr)
420{
421 struct fgraph_data *data = iter->private;
422 struct ring_buffer_iter *ring_iter = NULL;
423 struct ring_buffer_event *event;
424 struct ftrace_graph_ret_entry *next;
425
426 /*
427 * If the previous output failed to write to the seq buffer,
428 * then we just reuse the data from before.
429 */
430 if (data && data->failed) {
431 curr = &data->ent;
432 next = &data->ret;
433 } else {
434
435 ring_iter = trace_buffer_iter(iter, iter->cpu);
436
437 /* First peek to compare current entry and the next one */
438 if (ring_iter)
439 event = ring_buffer_iter_peek(ring_iter, NULL);
440 else {
441 /*
442 * We need to consume the current entry to see
443 * the next one.
444 */
445 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
446 NULL, NULL);
447 event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
448 NULL, NULL);
449 }
450
451 if (!event)
452 return NULL;
453
454 next = ring_buffer_event_data(event);
455
456 if (data) {
457 /*
458 * Save current and next entries for later reference
459 * if the output fails.
460 */
461 data->ent = *curr;
462 /*
463 * If the next event is not a return type, then
464 * we only care about what type it is. Otherwise we can
465 * safely copy the entire event.
466 */
467 if (next->ent.type == TRACE_GRAPH_RET)
468 data->ret = *next;
469 else
470 data->ret.ent.type = next->ent.type;
471 }
472 }
473
474 if (next->ent.type != TRACE_GRAPH_RET)
475 return NULL;
476
477 if (curr->ent.pid != next->ent.pid ||
478 curr->graph_ent.func != next->ret.func)
479 return NULL;
480
481 /* this is a leaf, now advance the iterator */
482 if (ring_iter)
483 ring_buffer_iter_advance(ring_iter);
484
485 return next;
486}
487
488static void print_graph_abs_time(u64 t, struct trace_seq *s)
489{
490 unsigned long usecs_rem;
491
492 usecs_rem = do_div(t, NSEC_PER_SEC);
493 usecs_rem /= 1000;
494
495 trace_seq_printf(s, "%5lu.%06lu | ",
496 (unsigned long)t, usecs_rem);
497}
498
499static void
500print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
501{
502 unsigned long long usecs;
503
504 usecs = iter->ts - iter->array_buffer->time_start;
505 do_div(usecs, NSEC_PER_USEC);
506
507 trace_seq_printf(s, "%9llu us | ", usecs);
508}
509
510static void
511print_graph_irq(struct trace_iterator *iter, unsigned long addr,
512 enum trace_type type, int cpu, pid_t pid, u32 flags)
513{
514 struct trace_array *tr = iter->tr;
515 struct trace_seq *s = &iter->seq;
516 struct trace_entry *ent = iter->ent;
517
518 if (addr < (unsigned long)__irqentry_text_start ||
519 addr >= (unsigned long)__irqentry_text_end)
520 return;
521
522 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
523 /* Absolute time */
524 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
525 print_graph_abs_time(iter->ts, s);
526
527 /* Relative time */
528 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
529 print_graph_rel_time(iter, s);
530
531 /* Cpu */
532 if (flags & TRACE_GRAPH_PRINT_CPU)
533 print_graph_cpu(s, cpu);
534
535 /* Proc */
536 if (flags & TRACE_GRAPH_PRINT_PROC) {
537 print_graph_proc(s, pid);
538 trace_seq_puts(s, " | ");
539 }
540
541 /* Latency format */
542 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
543 print_graph_lat_fmt(s, ent);
544 }
545
546 /* No overhead */
547 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
548
549 if (type == TRACE_GRAPH_ENT)
550 trace_seq_puts(s, "==========>");
551 else
552 trace_seq_puts(s, "<==========");
553
554 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
555 trace_seq_putc(s, '\n');
556}
557
558void
559trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
560{
561 unsigned long nsecs_rem = do_div(duration, 1000);
562 /* log10(ULONG_MAX) + '\0' */
563 char usecs_str[21];
564 char nsecs_str[5];
565 int len;
566 int i;
567
568 sprintf(usecs_str, "%lu", (unsigned long) duration);
569
570 /* Print msecs */
571 trace_seq_printf(s, "%s", usecs_str);
572
573 len = strlen(usecs_str);
574
575 /* Print nsecs (we don't want to exceed 7 numbers) */
576 if (len < 7) {
577 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
578
579 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
580 trace_seq_printf(s, ".%s", nsecs_str);
581 len += strlen(nsecs_str) + 1;
582 }
583
584 trace_seq_puts(s, " us ");
585
586 /* Print remaining spaces to fit the row's width */
587 for (i = len; i < 8; i++)
588 trace_seq_putc(s, ' ');
589}
590
591static void
592print_graph_duration(struct trace_array *tr, unsigned long long duration,
593 struct trace_seq *s, u32 flags)
594{
595 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
596 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
597 return;
598
599 /* No real adata, just filling the column with spaces */
600 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
601 case FLAGS_FILL_FULL:
602 trace_seq_puts(s, " | ");
603 return;
604 case FLAGS_FILL_START:
605 trace_seq_puts(s, " ");
606 return;
607 case FLAGS_FILL_END:
608 trace_seq_puts(s, " |");
609 return;
610 }
611
612 /* Signal a overhead of time execution to the output */
613 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
614 trace_seq_printf(s, "%c ", trace_find_mark(duration));
615 else
616 trace_seq_puts(s, " ");
617
618 trace_print_graph_duration(duration, s);
619 trace_seq_puts(s, "| ");
620}
621
622/* Case of a leaf function on its call entry */
623static enum print_line_t
624print_graph_entry_leaf(struct trace_iterator *iter,
625 struct ftrace_graph_ent_entry *entry,
626 struct ftrace_graph_ret_entry *ret_entry,
627 struct trace_seq *s, u32 flags)
628{
629 struct fgraph_data *data = iter->private;
630 struct trace_array *tr = iter->tr;
631 struct ftrace_graph_ret *graph_ret;
632 struct ftrace_graph_ent *call;
633 unsigned long long duration;
634 int cpu = iter->cpu;
635 int i;
636
637 graph_ret = &ret_entry->ret;
638 call = &entry->graph_ent;
639 duration = graph_ret->rettime - graph_ret->calltime;
640
641 if (data) {
642 struct fgraph_cpu_data *cpu_data;
643
644 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
645
646 /*
647 * Comments display at + 1 to depth. Since
648 * this is a leaf function, keep the comments
649 * equal to this depth.
650 */
651 cpu_data->depth = call->depth - 1;
652
653 /* No need to keep this function around for this depth */
654 if (call->depth < FTRACE_RETFUNC_DEPTH &&
655 !WARN_ON_ONCE(call->depth < 0))
656 cpu_data->enter_funcs[call->depth] = 0;
657 }
658
659 /* Overhead and duration */
660 print_graph_duration(tr, duration, s, flags);
661
662 /* Function */
663 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
664 trace_seq_putc(s, ' ');
665
666 trace_seq_printf(s, "%ps();\n", (void *)call->func);
667
668 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
669 cpu, iter->ent->pid, flags);
670
671 return trace_handle_return(s);
672}
673
674static enum print_line_t
675print_graph_entry_nested(struct trace_iterator *iter,
676 struct ftrace_graph_ent_entry *entry,
677 struct trace_seq *s, int cpu, u32 flags)
678{
679 struct ftrace_graph_ent *call = &entry->graph_ent;
680 struct fgraph_data *data = iter->private;
681 struct trace_array *tr = iter->tr;
682 int i;
683
684 if (data) {
685 struct fgraph_cpu_data *cpu_data;
686 int cpu = iter->cpu;
687
688 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
689 cpu_data->depth = call->depth;
690
691 /* Save this function pointer to see if the exit matches */
692 if (call->depth < FTRACE_RETFUNC_DEPTH &&
693 !WARN_ON_ONCE(call->depth < 0))
694 cpu_data->enter_funcs[call->depth] = call->func;
695 }
696
697 /* No time */
698 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
699
700 /* Function */
701 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
702 trace_seq_putc(s, ' ');
703
704 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
705
706 if (trace_seq_has_overflowed(s))
707 return TRACE_TYPE_PARTIAL_LINE;
708
709 /*
710 * we already consumed the current entry to check the next one
711 * and see if this is a leaf.
712 */
713 return TRACE_TYPE_NO_CONSUME;
714}
715
716static void
717print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
718 int type, unsigned long addr, u32 flags)
719{
720 struct fgraph_data *data = iter->private;
721 struct trace_entry *ent = iter->ent;
722 struct trace_array *tr = iter->tr;
723 int cpu = iter->cpu;
724
725 /* Pid */
726 verif_pid(s, ent->pid, cpu, data);
727
728 if (type)
729 /* Interrupt */
730 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
731
732 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
733 return;
734
735 /* Absolute time */
736 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
737 print_graph_abs_time(iter->ts, s);
738
739 /* Relative time */
740 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
741 print_graph_rel_time(iter, s);
742
743 /* Cpu */
744 if (flags & TRACE_GRAPH_PRINT_CPU)
745 print_graph_cpu(s, cpu);
746
747 /* Proc */
748 if (flags & TRACE_GRAPH_PRINT_PROC) {
749 print_graph_proc(s, ent->pid);
750 trace_seq_puts(s, " | ");
751 }
752
753 /* Latency format */
754 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
755 print_graph_lat_fmt(s, ent);
756
757 return;
758}
759
760/*
761 * Entry check for irq code
762 *
763 * returns 1 if
764 * - we are inside irq code
765 * - we just entered irq code
766 *
767 * returns 0 if
768 * - funcgraph-interrupts option is set
769 * - we are not inside irq code
770 */
771static int
772check_irq_entry(struct trace_iterator *iter, u32 flags,
773 unsigned long addr, int depth)
774{
775 int cpu = iter->cpu;
776 int *depth_irq;
777 struct fgraph_data *data = iter->private;
778
779 /*
780 * If we are either displaying irqs, or we got called as
781 * a graph event and private data does not exist,
782 * then we bypass the irq check.
783 */
784 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
785 (!data))
786 return 0;
787
788 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
789
790 /*
791 * We are inside the irq code
792 */
793 if (*depth_irq >= 0)
794 return 1;
795
796 if ((addr < (unsigned long)__irqentry_text_start) ||
797 (addr >= (unsigned long)__irqentry_text_end))
798 return 0;
799
800 /*
801 * We are entering irq code.
802 */
803 *depth_irq = depth;
804 return 1;
805}
806
807/*
808 * Return check for irq code
809 *
810 * returns 1 if
811 * - we are inside irq code
812 * - we just left irq code
813 *
814 * returns 0 if
815 * - funcgraph-interrupts option is set
816 * - we are not inside irq code
817 */
818static int
819check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
820{
821 int cpu = iter->cpu;
822 int *depth_irq;
823 struct fgraph_data *data = iter->private;
824
825 /*
826 * If we are either displaying irqs, or we got called as
827 * a graph event and private data does not exist,
828 * then we bypass the irq check.
829 */
830 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
831 (!data))
832 return 0;
833
834 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
835
836 /*
837 * We are not inside the irq code.
838 */
839 if (*depth_irq == -1)
840 return 0;
841
842 /*
843 * We are inside the irq code, and this is returning entry.
844 * Let's not trace it and clear the entry depth, since
845 * we are out of irq code.
846 *
847 * This condition ensures that we 'leave the irq code' once
848 * we are out of the entry depth. Thus protecting us from
849 * the RETURN entry loss.
850 */
851 if (*depth_irq >= depth) {
852 *depth_irq = -1;
853 return 1;
854 }
855
856 /*
857 * We are inside the irq code, and this is not the entry.
858 */
859 return 1;
860}
861
862static enum print_line_t
863print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
864 struct trace_iterator *iter, u32 flags)
865{
866 struct fgraph_data *data = iter->private;
867 struct ftrace_graph_ent *call = &field->graph_ent;
868 struct ftrace_graph_ret_entry *leaf_ret;
869 static enum print_line_t ret;
870 int cpu = iter->cpu;
871
872 if (check_irq_entry(iter, flags, call->func, call->depth))
873 return TRACE_TYPE_HANDLED;
874
875 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
876
877 leaf_ret = get_return_for_leaf(iter, field);
878 if (leaf_ret)
879 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
880 else
881 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
882
883 if (data) {
884 /*
885 * If we failed to write our output, then we need to make
886 * note of it. Because we already consumed our entry.
887 */
888 if (s->full) {
889 data->failed = 1;
890 data->cpu = cpu;
891 } else
892 data->failed = 0;
893 }
894
895 return ret;
896}
897
898static enum print_line_t
899print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
900 struct trace_entry *ent, struct trace_iterator *iter,
901 u32 flags)
902{
903 unsigned long long duration = trace->rettime - trace->calltime;
904 struct fgraph_data *data = iter->private;
905 struct trace_array *tr = iter->tr;
906 pid_t pid = ent->pid;
907 int cpu = iter->cpu;
908 int func_match = 1;
909 int i;
910
911 if (check_irq_return(iter, flags, trace->depth))
912 return TRACE_TYPE_HANDLED;
913
914 if (data) {
915 struct fgraph_cpu_data *cpu_data;
916 int cpu = iter->cpu;
917
918 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
919
920 /*
921 * Comments display at + 1 to depth. This is the
922 * return from a function, we now want the comments
923 * to display at the same level of the bracket.
924 */
925 cpu_data->depth = trace->depth - 1;
926
927 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
928 !WARN_ON_ONCE(trace->depth < 0)) {
929 if (cpu_data->enter_funcs[trace->depth] != trace->func)
930 func_match = 0;
931 cpu_data->enter_funcs[trace->depth] = 0;
932 }
933 }
934
935 print_graph_prologue(iter, s, 0, 0, flags);
936
937 /* Overhead and duration */
938 print_graph_duration(tr, duration, s, flags);
939
940 /* Closing brace */
941 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
942 trace_seq_putc(s, ' ');
943
944 /*
945 * If the return function does not have a matching entry,
946 * then the entry was lost. Instead of just printing
947 * the '}' and letting the user guess what function this
948 * belongs to, write out the function name. Always do
949 * that if the funcgraph-tail option is enabled.
950 */
951 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
952 trace_seq_puts(s, "}\n");
953 else
954 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
955
956 /* Overrun */
957 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
958 trace_seq_printf(s, " (Overruns: %u)\n",
959 trace->overrun);
960
961 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
962 cpu, pid, flags);
963
964 return trace_handle_return(s);
965}
966
967static enum print_line_t
968print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
969 struct trace_iterator *iter, u32 flags)
970{
971 struct trace_array *tr = iter->tr;
972 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
973 struct fgraph_data *data = iter->private;
974 struct trace_event *event;
975 int depth = 0;
976 int ret;
977 int i;
978
979 if (data)
980 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
981
982 print_graph_prologue(iter, s, 0, 0, flags);
983
984 /* No time */
985 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
986
987 /* Indentation */
988 if (depth > 0)
989 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
990 trace_seq_putc(s, ' ');
991
992 /* The comment */
993 trace_seq_puts(s, "/* ");
994
995 switch (iter->ent->type) {
996 case TRACE_BPUTS:
997 ret = trace_print_bputs_msg_only(iter);
998 if (ret != TRACE_TYPE_HANDLED)
999 return ret;
1000 break;
1001 case TRACE_BPRINT:
1002 ret = trace_print_bprintk_msg_only(iter);
1003 if (ret != TRACE_TYPE_HANDLED)
1004 return ret;
1005 break;
1006 case TRACE_PRINT:
1007 ret = trace_print_printk_msg_only(iter);
1008 if (ret != TRACE_TYPE_HANDLED)
1009 return ret;
1010 break;
1011 default:
1012 event = ftrace_find_event(ent->type);
1013 if (!event)
1014 return TRACE_TYPE_UNHANDLED;
1015
1016 ret = event->funcs->trace(iter, sym_flags, event);
1017 if (ret != TRACE_TYPE_HANDLED)
1018 return ret;
1019 }
1020
1021 if (trace_seq_has_overflowed(s))
1022 goto out;
1023
1024 /* Strip ending newline */
1025 if (s->buffer[s->seq.len - 1] == '\n') {
1026 s->buffer[s->seq.len - 1] = '\0';
1027 s->seq.len--;
1028 }
1029
1030 trace_seq_puts(s, " */\n");
1031 out:
1032 return trace_handle_return(s);
1033}
1034
1035
1036enum print_line_t
1037print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1038{
1039 struct ftrace_graph_ent_entry *field;
1040 struct fgraph_data *data = iter->private;
1041 struct trace_entry *entry = iter->ent;
1042 struct trace_seq *s = &iter->seq;
1043 int cpu = iter->cpu;
1044 int ret;
1045
1046 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1047 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1048 return TRACE_TYPE_HANDLED;
1049 }
1050
1051 /*
1052 * If the last output failed, there's a possibility we need
1053 * to print out the missing entry which would never go out.
1054 */
1055 if (data && data->failed) {
1056 field = &data->ent;
1057 iter->cpu = data->cpu;
1058 ret = print_graph_entry(field, s, iter, flags);
1059 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1060 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1061 ret = TRACE_TYPE_NO_CONSUME;
1062 }
1063 iter->cpu = cpu;
1064 return ret;
1065 }
1066
1067 switch (entry->type) {
1068 case TRACE_GRAPH_ENT: {
1069 /*
1070 * print_graph_entry() may consume the current event,
1071 * thus @field may become invalid, so we need to save it.
1072 * sizeof(struct ftrace_graph_ent_entry) is very small,
1073 * it can be safely saved at the stack.
1074 */
1075 struct ftrace_graph_ent_entry saved;
1076 trace_assign_type(field, entry);
1077 saved = *field;
1078 return print_graph_entry(&saved, s, iter, flags);
1079 }
1080 case TRACE_GRAPH_RET: {
1081 struct ftrace_graph_ret_entry *field;
1082 trace_assign_type(field, entry);
1083 return print_graph_return(&field->ret, s, entry, iter, flags);
1084 }
1085 case TRACE_STACK:
1086 case TRACE_FN:
1087 /* dont trace stack and functions as comments */
1088 return TRACE_TYPE_UNHANDLED;
1089
1090 default:
1091 return print_graph_comment(s, entry, iter, flags);
1092 }
1093
1094 return TRACE_TYPE_HANDLED;
1095}
1096
1097static enum print_line_t
1098print_graph_function(struct trace_iterator *iter)
1099{
1100 return print_graph_function_flags(iter, tracer_flags.val);
1101}
1102
1103static enum print_line_t
1104print_graph_function_event(struct trace_iterator *iter, int flags,
1105 struct trace_event *event)
1106{
1107 return print_graph_function(iter);
1108}
1109
1110static void print_lat_header(struct seq_file *s, u32 flags)
1111{
1112 static const char spaces[] = " " /* 16 spaces */
1113 " " /* 4 spaces */
1114 " "; /* 17 spaces */
1115 int size = 0;
1116
1117 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1118 size += 16;
1119 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1120 size += 16;
1121 if (flags & TRACE_GRAPH_PRINT_CPU)
1122 size += 4;
1123 if (flags & TRACE_GRAPH_PRINT_PROC)
1124 size += 17;
1125
1126 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1127 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1128 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1129 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1130 seq_printf(s, "#%.*s||| / \n", size, spaces);
1131}
1132
1133static void __print_graph_headers_flags(struct trace_array *tr,
1134 struct seq_file *s, u32 flags)
1135{
1136 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1137
1138 if (lat)
1139 print_lat_header(s, flags);
1140
1141 /* 1st line */
1142 seq_putc(s, '#');
1143 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1144 seq_puts(s, " TIME ");
1145 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1146 seq_puts(s, " REL TIME ");
1147 if (flags & TRACE_GRAPH_PRINT_CPU)
1148 seq_puts(s, " CPU");
1149 if (flags & TRACE_GRAPH_PRINT_PROC)
1150 seq_puts(s, " TASK/PID ");
1151 if (lat)
1152 seq_puts(s, "|||| ");
1153 if (flags & TRACE_GRAPH_PRINT_DURATION)
1154 seq_puts(s, " DURATION ");
1155 seq_puts(s, " FUNCTION CALLS\n");
1156
1157 /* 2nd line */
1158 seq_putc(s, '#');
1159 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1160 seq_puts(s, " | ");
1161 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1162 seq_puts(s, " | ");
1163 if (flags & TRACE_GRAPH_PRINT_CPU)
1164 seq_puts(s, " | ");
1165 if (flags & TRACE_GRAPH_PRINT_PROC)
1166 seq_puts(s, " | | ");
1167 if (lat)
1168 seq_puts(s, "|||| ");
1169 if (flags & TRACE_GRAPH_PRINT_DURATION)
1170 seq_puts(s, " | | ");
1171 seq_puts(s, " | | | |\n");
1172}
1173
1174static void print_graph_headers(struct seq_file *s)
1175{
1176 print_graph_headers_flags(s, tracer_flags.val);
1177}
1178
1179void print_graph_headers_flags(struct seq_file *s, u32 flags)
1180{
1181 struct trace_iterator *iter = s->private;
1182 struct trace_array *tr = iter->tr;
1183
1184 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1185 return;
1186
1187 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1188 /* print nothing if the buffers are empty */
1189 if (trace_empty(iter))
1190 return;
1191
1192 print_trace_header(s, iter);
1193 }
1194
1195 __print_graph_headers_flags(tr, s, flags);
1196}
1197
1198void graph_trace_open(struct trace_iterator *iter)
1199{
1200 /* pid and depth on the last trace processed */
1201 struct fgraph_data *data;
1202 gfp_t gfpflags;
1203 int cpu;
1204
1205 iter->private = NULL;
1206
1207 /* We can be called in atomic context via ftrace_dump() */
1208 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1209
1210 data = kzalloc(sizeof(*data), gfpflags);
1211 if (!data)
1212 goto out_err;
1213
1214 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1215 if (!data->cpu_data)
1216 goto out_err_free;
1217
1218 for_each_possible_cpu(cpu) {
1219 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1220 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1221 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1222 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1223
1224 *pid = -1;
1225 *depth = 0;
1226 *ignore = 0;
1227 *depth_irq = -1;
1228 }
1229
1230 iter->private = data;
1231
1232 return;
1233
1234 out_err_free:
1235 kfree(data);
1236 out_err:
1237 pr_warn("function graph tracer: not enough memory\n");
1238}
1239
1240void graph_trace_close(struct trace_iterator *iter)
1241{
1242 struct fgraph_data *data = iter->private;
1243
1244 if (data) {
1245 free_percpu(data->cpu_data);
1246 kfree(data);
1247 }
1248}
1249
1250static int
1251func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1252{
1253 if (bit == TRACE_GRAPH_PRINT_IRQS)
1254 ftrace_graph_skip_irqs = !set;
1255
1256 if (bit == TRACE_GRAPH_SLEEP_TIME)
1257 ftrace_graph_sleep_time_control(set);
1258
1259 if (bit == TRACE_GRAPH_GRAPH_TIME)
1260 ftrace_graph_graph_time_control(set);
1261
1262 return 0;
1263}
1264
1265static struct trace_event_functions graph_functions = {
1266 .trace = print_graph_function_event,
1267};
1268
1269static struct trace_event graph_trace_entry_event = {
1270 .type = TRACE_GRAPH_ENT,
1271 .funcs = &graph_functions,
1272};
1273
1274static struct trace_event graph_trace_ret_event = {
1275 .type = TRACE_GRAPH_RET,
1276 .funcs = &graph_functions
1277};
1278
1279static struct tracer graph_trace __tracer_data = {
1280 .name = "function_graph",
1281 .update_thresh = graph_trace_update_thresh,
1282 .open = graph_trace_open,
1283 .pipe_open = graph_trace_open,
1284 .close = graph_trace_close,
1285 .pipe_close = graph_trace_close,
1286 .init = graph_trace_init,
1287 .reset = graph_trace_reset,
1288 .print_line = print_graph_function,
1289 .print_header = print_graph_headers,
1290 .flags = &tracer_flags,
1291 .set_flag = func_graph_set_flag,
1292#ifdef CONFIG_FTRACE_SELFTEST
1293 .selftest = trace_selftest_startup_function_graph,
1294#endif
1295};
1296
1297
1298static ssize_t
1299graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1300 loff_t *ppos)
1301{
1302 unsigned long val;
1303 int ret;
1304
1305 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1306 if (ret)
1307 return ret;
1308
1309 fgraph_max_depth = val;
1310
1311 *ppos += cnt;
1312
1313 return cnt;
1314}
1315
1316static ssize_t
1317graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1318 loff_t *ppos)
1319{
1320 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1321 int n;
1322
1323 n = sprintf(buf, "%d\n", fgraph_max_depth);
1324
1325 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1326}
1327
1328static const struct file_operations graph_depth_fops = {
1329 .open = tracing_open_generic,
1330 .write = graph_depth_write,
1331 .read = graph_depth_read,
1332 .llseek = generic_file_llseek,
1333};
1334
1335static __init int init_graph_tracefs(void)
1336{
1337 int ret;
1338
1339 ret = tracing_init_dentry();
1340 if (ret)
1341 return 0;
1342
1343 trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1344 NULL, &graph_depth_fops);
1345
1346 return 0;
1347}
1348fs_initcall(init_graph_tracefs);
1349
1350static __init int init_graph_trace(void)
1351{
1352 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1353
1354 if (!register_trace_event(&graph_trace_entry_event)) {
1355 pr_warn("Warning: could not register graph trace events\n");
1356 return 1;
1357 }
1358
1359 if (!register_trace_event(&graph_trace_ret_event)) {
1360 pr_warn("Warning: could not register graph trace events\n");
1361 return 1;
1362 }
1363
1364 return register_tracer(&graph_trace);
1365}
1366
1367core_initcall(init_graph_trace);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/interrupt.h>
13#include <linux/slab.h>
14#include <linux/fs.h>
15
16#include "trace.h"
17#include "trace_output.h"
18
19/* When set, irq functions will be ignored */
20static int ftrace_graph_skip_irqs;
21
22struct fgraph_cpu_data {
23 pid_t last_pid;
24 int depth;
25 int depth_irq;
26 int ignore;
27 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
28};
29
30struct fgraph_data {
31 struct fgraph_cpu_data __percpu *cpu_data;
32
33 /* Place to preserve last processed entry. */
34 struct ftrace_graph_ent_entry ent;
35 struct ftrace_graph_ret_entry ret;
36 int failed;
37 int cpu;
38};
39
40#define TRACE_GRAPH_INDENT 2
41
42unsigned int fgraph_max_depth;
43
44static struct tracer_opt trace_opts[] = {
45 /* Display overruns? (for self-debug purpose) */
46 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
47 /* Display CPU ? */
48 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
49 /* Display Overhead ? */
50 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
51 /* Display proc name/pid */
52 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
53 /* Display duration of execution */
54 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
55 /* Display absolute time of an entry */
56 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
57 /* Display interrupts */
58 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
59 /* Display function name after trailing } */
60 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
61 /* Include sleep time (scheduled out) between entry and return */
62 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
63
64#ifdef CONFIG_FUNCTION_PROFILER
65 /* Include time within nested functions */
66 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
67#endif
68
69 { } /* Empty entry */
70};
71
72static struct tracer_flags tracer_flags = {
73 /* Don't display overruns, proc, or tail by default */
74 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
75 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
76 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
77 .opts = trace_opts
78};
79
80static struct trace_array *graph_array;
81
82/*
83 * DURATION column is being also used to display IRQ signs,
84 * following values are used by print_graph_irq and others
85 * to fill in space into DURATION column.
86 */
87enum {
88 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
89 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
90 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
91};
92
93static void
94print_graph_duration(struct trace_array *tr, unsigned long long duration,
95 struct trace_seq *s, u32 flags);
96
97int __trace_graph_entry(struct trace_array *tr,
98 struct ftrace_graph_ent *trace,
99 unsigned long flags,
100 int pc)
101{
102 struct trace_event_call *call = &event_funcgraph_entry;
103 struct ring_buffer_event *event;
104 struct ring_buffer *buffer = tr->trace_buffer.buffer;
105 struct ftrace_graph_ent_entry *entry;
106
107 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
108 sizeof(*entry), flags, pc);
109 if (!event)
110 return 0;
111 entry = ring_buffer_event_data(event);
112 entry->graph_ent = *trace;
113 if (!call_filter_check_discard(call, entry, buffer, event))
114 trace_buffer_unlock_commit_nostack(buffer, event);
115
116 return 1;
117}
118
119static inline int ftrace_graph_ignore_irqs(void)
120{
121 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
122 return 0;
123
124 return in_irq();
125}
126
127int trace_graph_entry(struct ftrace_graph_ent *trace)
128{
129 struct trace_array *tr = graph_array;
130 struct trace_array_cpu *data;
131 unsigned long flags;
132 long disabled;
133 int ret;
134 int cpu;
135 int pc;
136
137 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
138 return 0;
139
140 /*
141 * Do not trace a function if it's filtered by set_graph_notrace.
142 * Make the index of ret stack negative to indicate that it should
143 * ignore further functions. But it needs its own ret stack entry
144 * to recover the original index in order to continue tracing after
145 * returning from the function.
146 */
147 if (ftrace_graph_notrace_addr(trace->func)) {
148 trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
149 /*
150 * Need to return 1 to have the return called
151 * that will clear the NOTRACE bit.
152 */
153 return 1;
154 }
155
156 if (!ftrace_trace_task(tr))
157 return 0;
158
159 if (ftrace_graph_ignore_func(trace))
160 return 0;
161
162 if (ftrace_graph_ignore_irqs())
163 return 0;
164
165 /*
166 * Stop here if tracing_threshold is set. We only write function return
167 * events to the ring buffer.
168 */
169 if (tracing_thresh)
170 return 1;
171
172 local_irq_save(flags);
173 cpu = raw_smp_processor_id();
174 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
175 disabled = atomic_inc_return(&data->disabled);
176 if (likely(disabled == 1)) {
177 pc = preempt_count();
178 ret = __trace_graph_entry(tr, trace, flags, pc);
179 } else {
180 ret = 0;
181 }
182
183 atomic_dec(&data->disabled);
184 local_irq_restore(flags);
185
186 return ret;
187}
188
189static void
190__trace_graph_function(struct trace_array *tr,
191 unsigned long ip, unsigned long flags, int pc)
192{
193 u64 time = trace_clock_local();
194 struct ftrace_graph_ent ent = {
195 .func = ip,
196 .depth = 0,
197 };
198 struct ftrace_graph_ret ret = {
199 .func = ip,
200 .depth = 0,
201 .calltime = time,
202 .rettime = time,
203 };
204
205 __trace_graph_entry(tr, &ent, flags, pc);
206 __trace_graph_return(tr, &ret, flags, pc);
207}
208
209void
210trace_graph_function(struct trace_array *tr,
211 unsigned long ip, unsigned long parent_ip,
212 unsigned long flags, int pc)
213{
214 __trace_graph_function(tr, ip, flags, pc);
215}
216
217void __trace_graph_return(struct trace_array *tr,
218 struct ftrace_graph_ret *trace,
219 unsigned long flags,
220 int pc)
221{
222 struct trace_event_call *call = &event_funcgraph_exit;
223 struct ring_buffer_event *event;
224 struct ring_buffer *buffer = tr->trace_buffer.buffer;
225 struct ftrace_graph_ret_entry *entry;
226
227 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
228 sizeof(*entry), flags, pc);
229 if (!event)
230 return;
231 entry = ring_buffer_event_data(event);
232 entry->ret = *trace;
233 if (!call_filter_check_discard(call, entry, buffer, event))
234 trace_buffer_unlock_commit_nostack(buffer, event);
235}
236
237void trace_graph_return(struct ftrace_graph_ret *trace)
238{
239 struct trace_array *tr = graph_array;
240 struct trace_array_cpu *data;
241 unsigned long flags;
242 long disabled;
243 int cpu;
244 int pc;
245
246 ftrace_graph_addr_finish(trace);
247
248 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
249 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
250 return;
251 }
252
253 local_irq_save(flags);
254 cpu = raw_smp_processor_id();
255 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
256 disabled = atomic_inc_return(&data->disabled);
257 if (likely(disabled == 1)) {
258 pc = preempt_count();
259 __trace_graph_return(tr, trace, flags, pc);
260 }
261 atomic_dec(&data->disabled);
262 local_irq_restore(flags);
263}
264
265void set_graph_array(struct trace_array *tr)
266{
267 graph_array = tr;
268
269 /* Make graph_array visible before we start tracing */
270
271 smp_mb();
272}
273
274static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
275{
276 ftrace_graph_addr_finish(trace);
277
278 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
279 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
280 return;
281 }
282
283 if (tracing_thresh &&
284 (trace->rettime - trace->calltime < tracing_thresh))
285 return;
286 else
287 trace_graph_return(trace);
288}
289
290static struct fgraph_ops funcgraph_thresh_ops = {
291 .entryfunc = &trace_graph_entry,
292 .retfunc = &trace_graph_thresh_return,
293};
294
295static struct fgraph_ops funcgraph_ops = {
296 .entryfunc = &trace_graph_entry,
297 .retfunc = &trace_graph_return,
298};
299
300static int graph_trace_init(struct trace_array *tr)
301{
302 int ret;
303
304 set_graph_array(tr);
305 if (tracing_thresh)
306 ret = register_ftrace_graph(&funcgraph_thresh_ops);
307 else
308 ret = register_ftrace_graph(&funcgraph_ops);
309 if (ret)
310 return ret;
311 tracing_start_cmdline_record();
312
313 return 0;
314}
315
316static void graph_trace_reset(struct trace_array *tr)
317{
318 tracing_stop_cmdline_record();
319 if (tracing_thresh)
320 unregister_ftrace_graph(&funcgraph_thresh_ops);
321 else
322 unregister_ftrace_graph(&funcgraph_ops);
323}
324
325static int graph_trace_update_thresh(struct trace_array *tr)
326{
327 graph_trace_reset(tr);
328 return graph_trace_init(tr);
329}
330
331static int max_bytes_for_cpu;
332
333static void print_graph_cpu(struct trace_seq *s, int cpu)
334{
335 /*
336 * Start with a space character - to make it stand out
337 * to the right a bit when trace output is pasted into
338 * email:
339 */
340 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
341}
342
343#define TRACE_GRAPH_PROCINFO_LENGTH 14
344
345static void print_graph_proc(struct trace_seq *s, pid_t pid)
346{
347 char comm[TASK_COMM_LEN];
348 /* sign + log10(MAX_INT) + '\0' */
349 char pid_str[11];
350 int spaces = 0;
351 int len;
352 int i;
353
354 trace_find_cmdline(pid, comm);
355 comm[7] = '\0';
356 sprintf(pid_str, "%d", pid);
357
358 /* 1 stands for the "-" character */
359 len = strlen(comm) + strlen(pid_str) + 1;
360
361 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
362 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
363
364 /* First spaces to align center */
365 for (i = 0; i < spaces / 2; i++)
366 trace_seq_putc(s, ' ');
367
368 trace_seq_printf(s, "%s-%s", comm, pid_str);
369
370 /* Last spaces to align center */
371 for (i = 0; i < spaces - (spaces / 2); i++)
372 trace_seq_putc(s, ' ');
373}
374
375
376static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
377{
378 trace_seq_putc(s, ' ');
379 trace_print_lat_fmt(s, entry);
380 trace_seq_puts(s, " | ");
381}
382
383/* If the pid changed since the last trace, output this event */
384static void
385verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
386{
387 pid_t prev_pid;
388 pid_t *last_pid;
389
390 if (!data)
391 return;
392
393 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
394
395 if (*last_pid == pid)
396 return;
397
398 prev_pid = *last_pid;
399 *last_pid = pid;
400
401 if (prev_pid == -1)
402 return;
403/*
404 * Context-switch trace line:
405
406 ------------------------------------------
407 | 1) migration/0--1 => sshd-1755
408 ------------------------------------------
409
410 */
411 trace_seq_puts(s, " ------------------------------------------\n");
412 print_graph_cpu(s, cpu);
413 print_graph_proc(s, prev_pid);
414 trace_seq_puts(s, " => ");
415 print_graph_proc(s, pid);
416 trace_seq_puts(s, "\n ------------------------------------------\n\n");
417}
418
419static struct ftrace_graph_ret_entry *
420get_return_for_leaf(struct trace_iterator *iter,
421 struct ftrace_graph_ent_entry *curr)
422{
423 struct fgraph_data *data = iter->private;
424 struct ring_buffer_iter *ring_iter = NULL;
425 struct ring_buffer_event *event;
426 struct ftrace_graph_ret_entry *next;
427
428 /*
429 * If the previous output failed to write to the seq buffer,
430 * then we just reuse the data from before.
431 */
432 if (data && data->failed) {
433 curr = &data->ent;
434 next = &data->ret;
435 } else {
436
437 ring_iter = trace_buffer_iter(iter, iter->cpu);
438
439 /* First peek to compare current entry and the next one */
440 if (ring_iter)
441 event = ring_buffer_iter_peek(ring_iter, NULL);
442 else {
443 /*
444 * We need to consume the current entry to see
445 * the next one.
446 */
447 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
448 NULL, NULL);
449 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
450 NULL, NULL);
451 }
452
453 if (!event)
454 return NULL;
455
456 next = ring_buffer_event_data(event);
457
458 if (data) {
459 /*
460 * Save current and next entries for later reference
461 * if the output fails.
462 */
463 data->ent = *curr;
464 /*
465 * If the next event is not a return type, then
466 * we only care about what type it is. Otherwise we can
467 * safely copy the entire event.
468 */
469 if (next->ent.type == TRACE_GRAPH_RET)
470 data->ret = *next;
471 else
472 data->ret.ent.type = next->ent.type;
473 }
474 }
475
476 if (next->ent.type != TRACE_GRAPH_RET)
477 return NULL;
478
479 if (curr->ent.pid != next->ent.pid ||
480 curr->graph_ent.func != next->ret.func)
481 return NULL;
482
483 /* this is a leaf, now advance the iterator */
484 if (ring_iter)
485 ring_buffer_read(ring_iter, NULL);
486
487 return next;
488}
489
490static void print_graph_abs_time(u64 t, struct trace_seq *s)
491{
492 unsigned long usecs_rem;
493
494 usecs_rem = do_div(t, NSEC_PER_SEC);
495 usecs_rem /= 1000;
496
497 trace_seq_printf(s, "%5lu.%06lu | ",
498 (unsigned long)t, usecs_rem);
499}
500
501static void
502print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
503{
504 unsigned long long usecs;
505
506 usecs = iter->ts - iter->trace_buffer->time_start;
507 do_div(usecs, NSEC_PER_USEC);
508
509 trace_seq_printf(s, "%9llu us | ", usecs);
510}
511
512static void
513print_graph_irq(struct trace_iterator *iter, unsigned long addr,
514 enum trace_type type, int cpu, pid_t pid, u32 flags)
515{
516 struct trace_array *tr = iter->tr;
517 struct trace_seq *s = &iter->seq;
518 struct trace_entry *ent = iter->ent;
519
520 if (addr < (unsigned long)__irqentry_text_start ||
521 addr >= (unsigned long)__irqentry_text_end)
522 return;
523
524 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
525 /* Absolute time */
526 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
527 print_graph_abs_time(iter->ts, s);
528
529 /* Relative time */
530 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
531 print_graph_rel_time(iter, s);
532
533 /* Cpu */
534 if (flags & TRACE_GRAPH_PRINT_CPU)
535 print_graph_cpu(s, cpu);
536
537 /* Proc */
538 if (flags & TRACE_GRAPH_PRINT_PROC) {
539 print_graph_proc(s, pid);
540 trace_seq_puts(s, " | ");
541 }
542
543 /* Latency format */
544 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
545 print_graph_lat_fmt(s, ent);
546 }
547
548 /* No overhead */
549 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
550
551 if (type == TRACE_GRAPH_ENT)
552 trace_seq_puts(s, "==========>");
553 else
554 trace_seq_puts(s, "<==========");
555
556 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
557 trace_seq_putc(s, '\n');
558}
559
560void
561trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
562{
563 unsigned long nsecs_rem = do_div(duration, 1000);
564 /* log10(ULONG_MAX) + '\0' */
565 char usecs_str[21];
566 char nsecs_str[5];
567 int len;
568 int i;
569
570 sprintf(usecs_str, "%lu", (unsigned long) duration);
571
572 /* Print msecs */
573 trace_seq_printf(s, "%s", usecs_str);
574
575 len = strlen(usecs_str);
576
577 /* Print nsecs (we don't want to exceed 7 numbers) */
578 if (len < 7) {
579 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
580
581 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
582 trace_seq_printf(s, ".%s", nsecs_str);
583 len += strlen(nsecs_str) + 1;
584 }
585
586 trace_seq_puts(s, " us ");
587
588 /* Print remaining spaces to fit the row's width */
589 for (i = len; i < 8; i++)
590 trace_seq_putc(s, ' ');
591}
592
593static void
594print_graph_duration(struct trace_array *tr, unsigned long long duration,
595 struct trace_seq *s, u32 flags)
596{
597 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
598 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
599 return;
600
601 /* No real adata, just filling the column with spaces */
602 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
603 case FLAGS_FILL_FULL:
604 trace_seq_puts(s, " | ");
605 return;
606 case FLAGS_FILL_START:
607 trace_seq_puts(s, " ");
608 return;
609 case FLAGS_FILL_END:
610 trace_seq_puts(s, " |");
611 return;
612 }
613
614 /* Signal a overhead of time execution to the output */
615 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
616 trace_seq_printf(s, "%c ", trace_find_mark(duration));
617 else
618 trace_seq_puts(s, " ");
619
620 trace_print_graph_duration(duration, s);
621 trace_seq_puts(s, "| ");
622}
623
624/* Case of a leaf function on its call entry */
625static enum print_line_t
626print_graph_entry_leaf(struct trace_iterator *iter,
627 struct ftrace_graph_ent_entry *entry,
628 struct ftrace_graph_ret_entry *ret_entry,
629 struct trace_seq *s, u32 flags)
630{
631 struct fgraph_data *data = iter->private;
632 struct trace_array *tr = iter->tr;
633 struct ftrace_graph_ret *graph_ret;
634 struct ftrace_graph_ent *call;
635 unsigned long long duration;
636 int cpu = iter->cpu;
637 int i;
638
639 graph_ret = &ret_entry->ret;
640 call = &entry->graph_ent;
641 duration = graph_ret->rettime - graph_ret->calltime;
642
643 if (data) {
644 struct fgraph_cpu_data *cpu_data;
645
646 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
647
648 /*
649 * Comments display at + 1 to depth. Since
650 * this is a leaf function, keep the comments
651 * equal to this depth.
652 */
653 cpu_data->depth = call->depth - 1;
654
655 /* No need to keep this function around for this depth */
656 if (call->depth < FTRACE_RETFUNC_DEPTH &&
657 !WARN_ON_ONCE(call->depth < 0))
658 cpu_data->enter_funcs[call->depth] = 0;
659 }
660
661 /* Overhead and duration */
662 print_graph_duration(tr, duration, s, flags);
663
664 /* Function */
665 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
666 trace_seq_putc(s, ' ');
667
668 trace_seq_printf(s, "%ps();\n", (void *)call->func);
669
670 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
671 cpu, iter->ent->pid, flags);
672
673 return trace_handle_return(s);
674}
675
676static enum print_line_t
677print_graph_entry_nested(struct trace_iterator *iter,
678 struct ftrace_graph_ent_entry *entry,
679 struct trace_seq *s, int cpu, u32 flags)
680{
681 struct ftrace_graph_ent *call = &entry->graph_ent;
682 struct fgraph_data *data = iter->private;
683 struct trace_array *tr = iter->tr;
684 int i;
685
686 if (data) {
687 struct fgraph_cpu_data *cpu_data;
688 int cpu = iter->cpu;
689
690 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
691 cpu_data->depth = call->depth;
692
693 /* Save this function pointer to see if the exit matches */
694 if (call->depth < FTRACE_RETFUNC_DEPTH &&
695 !WARN_ON_ONCE(call->depth < 0))
696 cpu_data->enter_funcs[call->depth] = call->func;
697 }
698
699 /* No time */
700 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
701
702 /* Function */
703 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
704 trace_seq_putc(s, ' ');
705
706 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
707
708 if (trace_seq_has_overflowed(s))
709 return TRACE_TYPE_PARTIAL_LINE;
710
711 /*
712 * we already consumed the current entry to check the next one
713 * and see if this is a leaf.
714 */
715 return TRACE_TYPE_NO_CONSUME;
716}
717
718static void
719print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
720 int type, unsigned long addr, u32 flags)
721{
722 struct fgraph_data *data = iter->private;
723 struct trace_entry *ent = iter->ent;
724 struct trace_array *tr = iter->tr;
725 int cpu = iter->cpu;
726
727 /* Pid */
728 verif_pid(s, ent->pid, cpu, data);
729
730 if (type)
731 /* Interrupt */
732 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
733
734 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
735 return;
736
737 /* Absolute time */
738 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
739 print_graph_abs_time(iter->ts, s);
740
741 /* Relative time */
742 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
743 print_graph_rel_time(iter, s);
744
745 /* Cpu */
746 if (flags & TRACE_GRAPH_PRINT_CPU)
747 print_graph_cpu(s, cpu);
748
749 /* Proc */
750 if (flags & TRACE_GRAPH_PRINT_PROC) {
751 print_graph_proc(s, ent->pid);
752 trace_seq_puts(s, " | ");
753 }
754
755 /* Latency format */
756 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
757 print_graph_lat_fmt(s, ent);
758
759 return;
760}
761
762/*
763 * Entry check for irq code
764 *
765 * returns 1 if
766 * - we are inside irq code
767 * - we just entered irq code
768 *
769 * retunns 0 if
770 * - funcgraph-interrupts option is set
771 * - we are not inside irq code
772 */
773static int
774check_irq_entry(struct trace_iterator *iter, u32 flags,
775 unsigned long addr, int depth)
776{
777 int cpu = iter->cpu;
778 int *depth_irq;
779 struct fgraph_data *data = iter->private;
780
781 /*
782 * If we are either displaying irqs, or we got called as
783 * a graph event and private data does not exist,
784 * then we bypass the irq check.
785 */
786 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
787 (!data))
788 return 0;
789
790 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
791
792 /*
793 * We are inside the irq code
794 */
795 if (*depth_irq >= 0)
796 return 1;
797
798 if ((addr < (unsigned long)__irqentry_text_start) ||
799 (addr >= (unsigned long)__irqentry_text_end))
800 return 0;
801
802 /*
803 * We are entering irq code.
804 */
805 *depth_irq = depth;
806 return 1;
807}
808
809/*
810 * Return check for irq code
811 *
812 * returns 1 if
813 * - we are inside irq code
814 * - we just left irq code
815 *
816 * returns 0 if
817 * - funcgraph-interrupts option is set
818 * - we are not inside irq code
819 */
820static int
821check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
822{
823 int cpu = iter->cpu;
824 int *depth_irq;
825 struct fgraph_data *data = iter->private;
826
827 /*
828 * If we are either displaying irqs, or we got called as
829 * a graph event and private data does not exist,
830 * then we bypass the irq check.
831 */
832 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
833 (!data))
834 return 0;
835
836 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
837
838 /*
839 * We are not inside the irq code.
840 */
841 if (*depth_irq == -1)
842 return 0;
843
844 /*
845 * We are inside the irq code, and this is returning entry.
846 * Let's not trace it and clear the entry depth, since
847 * we are out of irq code.
848 *
849 * This condition ensures that we 'leave the irq code' once
850 * we are out of the entry depth. Thus protecting us from
851 * the RETURN entry loss.
852 */
853 if (*depth_irq >= depth) {
854 *depth_irq = -1;
855 return 1;
856 }
857
858 /*
859 * We are inside the irq code, and this is not the entry.
860 */
861 return 1;
862}
863
864static enum print_line_t
865print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
866 struct trace_iterator *iter, u32 flags)
867{
868 struct fgraph_data *data = iter->private;
869 struct ftrace_graph_ent *call = &field->graph_ent;
870 struct ftrace_graph_ret_entry *leaf_ret;
871 static enum print_line_t ret;
872 int cpu = iter->cpu;
873
874 if (check_irq_entry(iter, flags, call->func, call->depth))
875 return TRACE_TYPE_HANDLED;
876
877 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
878
879 leaf_ret = get_return_for_leaf(iter, field);
880 if (leaf_ret)
881 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
882 else
883 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
884
885 if (data) {
886 /*
887 * If we failed to write our output, then we need to make
888 * note of it. Because we already consumed our entry.
889 */
890 if (s->full) {
891 data->failed = 1;
892 data->cpu = cpu;
893 } else
894 data->failed = 0;
895 }
896
897 return ret;
898}
899
900static enum print_line_t
901print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
902 struct trace_entry *ent, struct trace_iterator *iter,
903 u32 flags)
904{
905 unsigned long long duration = trace->rettime - trace->calltime;
906 struct fgraph_data *data = iter->private;
907 struct trace_array *tr = iter->tr;
908 pid_t pid = ent->pid;
909 int cpu = iter->cpu;
910 int func_match = 1;
911 int i;
912
913 if (check_irq_return(iter, flags, trace->depth))
914 return TRACE_TYPE_HANDLED;
915
916 if (data) {
917 struct fgraph_cpu_data *cpu_data;
918 int cpu = iter->cpu;
919
920 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
921
922 /*
923 * Comments display at + 1 to depth. This is the
924 * return from a function, we now want the comments
925 * to display at the same level of the bracket.
926 */
927 cpu_data->depth = trace->depth - 1;
928
929 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
930 !WARN_ON_ONCE(trace->depth < 0)) {
931 if (cpu_data->enter_funcs[trace->depth] != trace->func)
932 func_match = 0;
933 cpu_data->enter_funcs[trace->depth] = 0;
934 }
935 }
936
937 print_graph_prologue(iter, s, 0, 0, flags);
938
939 /* Overhead and duration */
940 print_graph_duration(tr, duration, s, flags);
941
942 /* Closing brace */
943 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
944 trace_seq_putc(s, ' ');
945
946 /*
947 * If the return function does not have a matching entry,
948 * then the entry was lost. Instead of just printing
949 * the '}' and letting the user guess what function this
950 * belongs to, write out the function name. Always do
951 * that if the funcgraph-tail option is enabled.
952 */
953 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
954 trace_seq_puts(s, "}\n");
955 else
956 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
957
958 /* Overrun */
959 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
960 trace_seq_printf(s, " (Overruns: %lu)\n",
961 trace->overrun);
962
963 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
964 cpu, pid, flags);
965
966 return trace_handle_return(s);
967}
968
969static enum print_line_t
970print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
971 struct trace_iterator *iter, u32 flags)
972{
973 struct trace_array *tr = iter->tr;
974 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
975 struct fgraph_data *data = iter->private;
976 struct trace_event *event;
977 int depth = 0;
978 int ret;
979 int i;
980
981 if (data)
982 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
983
984 print_graph_prologue(iter, s, 0, 0, flags);
985
986 /* No time */
987 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
988
989 /* Indentation */
990 if (depth > 0)
991 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
992 trace_seq_putc(s, ' ');
993
994 /* The comment */
995 trace_seq_puts(s, "/* ");
996
997 switch (iter->ent->type) {
998 case TRACE_BPUTS:
999 ret = trace_print_bputs_msg_only(iter);
1000 if (ret != TRACE_TYPE_HANDLED)
1001 return ret;
1002 break;
1003 case TRACE_BPRINT:
1004 ret = trace_print_bprintk_msg_only(iter);
1005 if (ret != TRACE_TYPE_HANDLED)
1006 return ret;
1007 break;
1008 case TRACE_PRINT:
1009 ret = trace_print_printk_msg_only(iter);
1010 if (ret != TRACE_TYPE_HANDLED)
1011 return ret;
1012 break;
1013 default:
1014 event = ftrace_find_event(ent->type);
1015 if (!event)
1016 return TRACE_TYPE_UNHANDLED;
1017
1018 ret = event->funcs->trace(iter, sym_flags, event);
1019 if (ret != TRACE_TYPE_HANDLED)
1020 return ret;
1021 }
1022
1023 if (trace_seq_has_overflowed(s))
1024 goto out;
1025
1026 /* Strip ending newline */
1027 if (s->buffer[s->seq.len - 1] == '\n') {
1028 s->buffer[s->seq.len - 1] = '\0';
1029 s->seq.len--;
1030 }
1031
1032 trace_seq_puts(s, " */\n");
1033 out:
1034 return trace_handle_return(s);
1035}
1036
1037
1038enum print_line_t
1039print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1040{
1041 struct ftrace_graph_ent_entry *field;
1042 struct fgraph_data *data = iter->private;
1043 struct trace_entry *entry = iter->ent;
1044 struct trace_seq *s = &iter->seq;
1045 int cpu = iter->cpu;
1046 int ret;
1047
1048 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1049 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1050 return TRACE_TYPE_HANDLED;
1051 }
1052
1053 /*
1054 * If the last output failed, there's a possibility we need
1055 * to print out the missing entry which would never go out.
1056 */
1057 if (data && data->failed) {
1058 field = &data->ent;
1059 iter->cpu = data->cpu;
1060 ret = print_graph_entry(field, s, iter, flags);
1061 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1062 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1063 ret = TRACE_TYPE_NO_CONSUME;
1064 }
1065 iter->cpu = cpu;
1066 return ret;
1067 }
1068
1069 switch (entry->type) {
1070 case TRACE_GRAPH_ENT: {
1071 /*
1072 * print_graph_entry() may consume the current event,
1073 * thus @field may become invalid, so we need to save it.
1074 * sizeof(struct ftrace_graph_ent_entry) is very small,
1075 * it can be safely saved at the stack.
1076 */
1077 struct ftrace_graph_ent_entry saved;
1078 trace_assign_type(field, entry);
1079 saved = *field;
1080 return print_graph_entry(&saved, s, iter, flags);
1081 }
1082 case TRACE_GRAPH_RET: {
1083 struct ftrace_graph_ret_entry *field;
1084 trace_assign_type(field, entry);
1085 return print_graph_return(&field->ret, s, entry, iter, flags);
1086 }
1087 case TRACE_STACK:
1088 case TRACE_FN:
1089 /* dont trace stack and functions as comments */
1090 return TRACE_TYPE_UNHANDLED;
1091
1092 default:
1093 return print_graph_comment(s, entry, iter, flags);
1094 }
1095
1096 return TRACE_TYPE_HANDLED;
1097}
1098
1099static enum print_line_t
1100print_graph_function(struct trace_iterator *iter)
1101{
1102 return print_graph_function_flags(iter, tracer_flags.val);
1103}
1104
1105static enum print_line_t
1106print_graph_function_event(struct trace_iterator *iter, int flags,
1107 struct trace_event *event)
1108{
1109 return print_graph_function(iter);
1110}
1111
1112static void print_lat_header(struct seq_file *s, u32 flags)
1113{
1114 static const char spaces[] = " " /* 16 spaces */
1115 " " /* 4 spaces */
1116 " "; /* 17 spaces */
1117 int size = 0;
1118
1119 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1120 size += 16;
1121 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1122 size += 16;
1123 if (flags & TRACE_GRAPH_PRINT_CPU)
1124 size += 4;
1125 if (flags & TRACE_GRAPH_PRINT_PROC)
1126 size += 17;
1127
1128 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1129 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1130 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1131 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1132 seq_printf(s, "#%.*s||| / \n", size, spaces);
1133}
1134
1135static void __print_graph_headers_flags(struct trace_array *tr,
1136 struct seq_file *s, u32 flags)
1137{
1138 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1139
1140 if (lat)
1141 print_lat_header(s, flags);
1142
1143 /* 1st line */
1144 seq_putc(s, '#');
1145 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1146 seq_puts(s, " TIME ");
1147 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1148 seq_puts(s, " REL TIME ");
1149 if (flags & TRACE_GRAPH_PRINT_CPU)
1150 seq_puts(s, " CPU");
1151 if (flags & TRACE_GRAPH_PRINT_PROC)
1152 seq_puts(s, " TASK/PID ");
1153 if (lat)
1154 seq_puts(s, "|||| ");
1155 if (flags & TRACE_GRAPH_PRINT_DURATION)
1156 seq_puts(s, " DURATION ");
1157 seq_puts(s, " FUNCTION CALLS\n");
1158
1159 /* 2nd line */
1160 seq_putc(s, '#');
1161 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1162 seq_puts(s, " | ");
1163 if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1164 seq_puts(s, " | ");
1165 if (flags & TRACE_GRAPH_PRINT_CPU)
1166 seq_puts(s, " | ");
1167 if (flags & TRACE_GRAPH_PRINT_PROC)
1168 seq_puts(s, " | | ");
1169 if (lat)
1170 seq_puts(s, "|||| ");
1171 if (flags & TRACE_GRAPH_PRINT_DURATION)
1172 seq_puts(s, " | | ");
1173 seq_puts(s, " | | | |\n");
1174}
1175
1176static void print_graph_headers(struct seq_file *s)
1177{
1178 print_graph_headers_flags(s, tracer_flags.val);
1179}
1180
1181void print_graph_headers_flags(struct seq_file *s, u32 flags)
1182{
1183 struct trace_iterator *iter = s->private;
1184 struct trace_array *tr = iter->tr;
1185
1186 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1187 return;
1188
1189 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1190 /* print nothing if the buffers are empty */
1191 if (trace_empty(iter))
1192 return;
1193
1194 print_trace_header(s, iter);
1195 }
1196
1197 __print_graph_headers_flags(tr, s, flags);
1198}
1199
1200void graph_trace_open(struct trace_iterator *iter)
1201{
1202 /* pid and depth on the last trace processed */
1203 struct fgraph_data *data;
1204 gfp_t gfpflags;
1205 int cpu;
1206
1207 iter->private = NULL;
1208
1209 /* We can be called in atomic context via ftrace_dump() */
1210 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1211
1212 data = kzalloc(sizeof(*data), gfpflags);
1213 if (!data)
1214 goto out_err;
1215
1216 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1217 if (!data->cpu_data)
1218 goto out_err_free;
1219
1220 for_each_possible_cpu(cpu) {
1221 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1222 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1223 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1224 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1225
1226 *pid = -1;
1227 *depth = 0;
1228 *ignore = 0;
1229 *depth_irq = -1;
1230 }
1231
1232 iter->private = data;
1233
1234 return;
1235
1236 out_err_free:
1237 kfree(data);
1238 out_err:
1239 pr_warn("function graph tracer: not enough memory\n");
1240}
1241
1242void graph_trace_close(struct trace_iterator *iter)
1243{
1244 struct fgraph_data *data = iter->private;
1245
1246 if (data) {
1247 free_percpu(data->cpu_data);
1248 kfree(data);
1249 }
1250}
1251
1252static int
1253func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1254{
1255 if (bit == TRACE_GRAPH_PRINT_IRQS)
1256 ftrace_graph_skip_irqs = !set;
1257
1258 if (bit == TRACE_GRAPH_SLEEP_TIME)
1259 ftrace_graph_sleep_time_control(set);
1260
1261 if (bit == TRACE_GRAPH_GRAPH_TIME)
1262 ftrace_graph_graph_time_control(set);
1263
1264 return 0;
1265}
1266
1267static struct trace_event_functions graph_functions = {
1268 .trace = print_graph_function_event,
1269};
1270
1271static struct trace_event graph_trace_entry_event = {
1272 .type = TRACE_GRAPH_ENT,
1273 .funcs = &graph_functions,
1274};
1275
1276static struct trace_event graph_trace_ret_event = {
1277 .type = TRACE_GRAPH_RET,
1278 .funcs = &graph_functions
1279};
1280
1281static struct tracer graph_trace __tracer_data = {
1282 .name = "function_graph",
1283 .update_thresh = graph_trace_update_thresh,
1284 .open = graph_trace_open,
1285 .pipe_open = graph_trace_open,
1286 .close = graph_trace_close,
1287 .pipe_close = graph_trace_close,
1288 .init = graph_trace_init,
1289 .reset = graph_trace_reset,
1290 .print_line = print_graph_function,
1291 .print_header = print_graph_headers,
1292 .flags = &tracer_flags,
1293 .set_flag = func_graph_set_flag,
1294#ifdef CONFIG_FTRACE_SELFTEST
1295 .selftest = trace_selftest_startup_function_graph,
1296#endif
1297};
1298
1299
1300static ssize_t
1301graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1302 loff_t *ppos)
1303{
1304 unsigned long val;
1305 int ret;
1306
1307 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1308 if (ret)
1309 return ret;
1310
1311 fgraph_max_depth = val;
1312
1313 *ppos += cnt;
1314
1315 return cnt;
1316}
1317
1318static ssize_t
1319graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1320 loff_t *ppos)
1321{
1322 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1323 int n;
1324
1325 n = sprintf(buf, "%d\n", fgraph_max_depth);
1326
1327 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1328}
1329
1330static const struct file_operations graph_depth_fops = {
1331 .open = tracing_open_generic,
1332 .write = graph_depth_write,
1333 .read = graph_depth_read,
1334 .llseek = generic_file_llseek,
1335};
1336
1337static __init int init_graph_tracefs(void)
1338{
1339 struct dentry *d_tracer;
1340
1341 d_tracer = tracing_init_dentry();
1342 if (IS_ERR(d_tracer))
1343 return 0;
1344
1345 trace_create_file("max_graph_depth", 0644, d_tracer,
1346 NULL, &graph_depth_fops);
1347
1348 return 0;
1349}
1350fs_initcall(init_graph_tracefs);
1351
1352static __init int init_graph_trace(void)
1353{
1354 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1355
1356 if (!register_trace_event(&graph_trace_entry_event)) {
1357 pr_warn("Warning: could not register graph trace events\n");
1358 return 1;
1359 }
1360
1361 if (!register_trace_event(&graph_trace_ret_event)) {
1362 pr_warn("Warning: could not register graph trace events\n");
1363 return 1;
1364 }
1365
1366 return register_tracer(&graph_trace);
1367}
1368
1369core_initcall(init_graph_trace);