Loading...
1/*
2 * trace task wakeup timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12#include <linux/module.h>
13#include <linux/kallsyms.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/sched/rt.h>
17#include <linux/sched/deadline.h>
18#include <trace/events/sched.h>
19#include "trace.h"
20
21static struct trace_array *wakeup_trace;
22static int __read_mostly tracer_enabled;
23
24static struct task_struct *wakeup_task;
25static int wakeup_cpu;
26static int wakeup_current_cpu;
27static unsigned wakeup_prio = -1;
28static int wakeup_rt;
29static int wakeup_dl;
30static int tracing_dl = 0;
31
32static arch_spinlock_t wakeup_lock =
33 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
34
35static void wakeup_reset(struct trace_array *tr);
36static void __wakeup_reset(struct trace_array *tr);
37
38static int save_flags;
39
40#ifdef CONFIG_FUNCTION_GRAPH_TRACER
41static int wakeup_display_graph(struct trace_array *tr, int set);
42# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
43#else
44static inline int wakeup_display_graph(struct trace_array *tr, int set)
45{
46 return 0;
47}
48# define is_graph(tr) false
49#endif
50
51
52#ifdef CONFIG_FUNCTION_TRACER
53
54static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
55static void wakeup_graph_return(struct ftrace_graph_ret *trace);
56
57static bool function_enabled;
58
59/*
60 * Prologue for the wakeup function tracers.
61 *
62 * Returns 1 if it is OK to continue, and preemption
63 * is disabled and data->disabled is incremented.
64 * 0 if the trace is to be ignored, and preemption
65 * is not disabled and data->disabled is
66 * kept the same.
67 *
68 * Note, this function is also used outside this ifdef but
69 * inside the #ifdef of the function graph tracer below.
70 * This is OK, since the function graph tracer is
71 * dependent on the function tracer.
72 */
73static int
74func_prolog_preempt_disable(struct trace_array *tr,
75 struct trace_array_cpu **data,
76 int *pc)
77{
78 long disabled;
79 int cpu;
80
81 if (likely(!wakeup_task))
82 return 0;
83
84 *pc = preempt_count();
85 preempt_disable_notrace();
86
87 cpu = raw_smp_processor_id();
88 if (cpu != wakeup_current_cpu)
89 goto out_enable;
90
91 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
92 disabled = atomic_inc_return(&(*data)->disabled);
93 if (unlikely(disabled != 1))
94 goto out;
95
96 return 1;
97
98out:
99 atomic_dec(&(*data)->disabled);
100
101out_enable:
102 preempt_enable_notrace();
103 return 0;
104}
105
106/*
107 * wakeup uses its own tracer function to keep the overhead down:
108 */
109static void
110wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
111 struct ftrace_ops *op, struct pt_regs *pt_regs)
112{
113 struct trace_array *tr = wakeup_trace;
114 struct trace_array_cpu *data;
115 unsigned long flags;
116 int pc;
117
118 if (!func_prolog_preempt_disable(tr, &data, &pc))
119 return;
120
121 local_irq_save(flags);
122 trace_function(tr, ip, parent_ip, flags, pc);
123 local_irq_restore(flags);
124
125 atomic_dec(&data->disabled);
126 preempt_enable_notrace();
127}
128
129static int register_wakeup_function(struct trace_array *tr, int graph, int set)
130{
131 int ret;
132
133 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
134 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
135 return 0;
136
137 if (graph)
138 ret = register_ftrace_graph(&wakeup_graph_return,
139 &wakeup_graph_entry);
140 else
141 ret = register_ftrace_function(tr->ops);
142
143 if (!ret)
144 function_enabled = true;
145
146 return ret;
147}
148
149static void unregister_wakeup_function(struct trace_array *tr, int graph)
150{
151 if (!function_enabled)
152 return;
153
154 if (graph)
155 unregister_ftrace_graph();
156 else
157 unregister_ftrace_function(tr->ops);
158
159 function_enabled = false;
160}
161
162static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
163{
164 if (!(mask & TRACE_ITER_FUNCTION))
165 return 0;
166
167 if (set)
168 register_wakeup_function(tr, is_graph(tr), 1);
169 else
170 unregister_wakeup_function(tr, is_graph(tr));
171 return 1;
172}
173#else
174static int register_wakeup_function(struct trace_array *tr, int graph, int set)
175{
176 return 0;
177}
178static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
179static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
180{
181 return 0;
182}
183#endif /* CONFIG_FUNCTION_TRACER */
184
185static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
186{
187 struct tracer *tracer = tr->current_trace;
188
189 if (wakeup_function_set(tr, mask, set))
190 return 0;
191
192#ifdef CONFIG_FUNCTION_GRAPH_TRACER
193 if (mask & TRACE_ITER_DISPLAY_GRAPH)
194 return wakeup_display_graph(tr, set);
195#endif
196
197 return trace_keep_overwrite(tracer, mask, set);
198}
199
200static int start_func_tracer(struct trace_array *tr, int graph)
201{
202 int ret;
203
204 ret = register_wakeup_function(tr, graph, 0);
205
206 if (!ret && tracing_is_enabled())
207 tracer_enabled = 1;
208 else
209 tracer_enabled = 0;
210
211 return ret;
212}
213
214static void stop_func_tracer(struct trace_array *tr, int graph)
215{
216 tracer_enabled = 0;
217
218 unregister_wakeup_function(tr, graph);
219}
220
221#ifdef CONFIG_FUNCTION_GRAPH_TRACER
222static int wakeup_display_graph(struct trace_array *tr, int set)
223{
224 if (!(is_graph(tr) ^ set))
225 return 0;
226
227 stop_func_tracer(tr, !set);
228
229 wakeup_reset(wakeup_trace);
230 tr->max_latency = 0;
231
232 return start_func_tracer(tr, set);
233}
234
235static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
236{
237 struct trace_array *tr = wakeup_trace;
238 struct trace_array_cpu *data;
239 unsigned long flags;
240 int pc, ret = 0;
241
242 if (ftrace_graph_ignore_func(trace))
243 return 0;
244 /*
245 * Do not trace a function if it's filtered by set_graph_notrace.
246 * Make the index of ret stack negative to indicate that it should
247 * ignore further functions. But it needs its own ret stack entry
248 * to recover the original index in order to continue tracing after
249 * returning from the function.
250 */
251 if (ftrace_graph_notrace_addr(trace->func))
252 return 1;
253
254 if (!func_prolog_preempt_disable(tr, &data, &pc))
255 return 0;
256
257 local_save_flags(flags);
258 ret = __trace_graph_entry(tr, trace, flags, pc);
259 atomic_dec(&data->disabled);
260 preempt_enable_notrace();
261
262 return ret;
263}
264
265static void wakeup_graph_return(struct ftrace_graph_ret *trace)
266{
267 struct trace_array *tr = wakeup_trace;
268 struct trace_array_cpu *data;
269 unsigned long flags;
270 int pc;
271
272 if (!func_prolog_preempt_disable(tr, &data, &pc))
273 return;
274
275 local_save_flags(flags);
276 __trace_graph_return(tr, trace, flags, pc);
277 atomic_dec(&data->disabled);
278
279 preempt_enable_notrace();
280 return;
281}
282
283static void wakeup_trace_open(struct trace_iterator *iter)
284{
285 if (is_graph(iter->tr))
286 graph_trace_open(iter);
287}
288
289static void wakeup_trace_close(struct trace_iterator *iter)
290{
291 if (iter->private)
292 graph_trace_close(iter);
293}
294
295#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
296 TRACE_GRAPH_PRINT_ABS_TIME | \
297 TRACE_GRAPH_PRINT_DURATION)
298
299static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
300{
301 /*
302 * In graph mode call the graph tracer output function,
303 * otherwise go with the TRACE_FN event handler
304 */
305 if (is_graph(iter->tr))
306 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
307
308 return TRACE_TYPE_UNHANDLED;
309}
310
311static void wakeup_print_header(struct seq_file *s)
312{
313 if (is_graph(wakeup_trace))
314 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
315 else
316 trace_default_header(s);
317}
318
319static void
320__trace_function(struct trace_array *tr,
321 unsigned long ip, unsigned long parent_ip,
322 unsigned long flags, int pc)
323{
324 if (is_graph(tr))
325 trace_graph_function(tr, ip, parent_ip, flags, pc);
326 else
327 trace_function(tr, ip, parent_ip, flags, pc);
328}
329#else
330#define __trace_function trace_function
331
332static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
333{
334 return TRACE_TYPE_UNHANDLED;
335}
336
337static void wakeup_trace_open(struct trace_iterator *iter) { }
338static void wakeup_trace_close(struct trace_iterator *iter) { }
339
340#ifdef CONFIG_FUNCTION_TRACER
341static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
342{
343 return -1;
344}
345static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
346static void wakeup_print_header(struct seq_file *s)
347{
348 trace_default_header(s);
349}
350#else
351static void wakeup_print_header(struct seq_file *s)
352{
353 trace_latency_header(s);
354}
355#endif /* CONFIG_FUNCTION_TRACER */
356#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
357
358/*
359 * Should this new latency be reported/recorded?
360 */
361static bool report_latency(struct trace_array *tr, u64 delta)
362{
363 if (tracing_thresh) {
364 if (delta < tracing_thresh)
365 return false;
366 } else {
367 if (delta <= tr->max_latency)
368 return false;
369 }
370 return true;
371}
372
373static void
374probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
375{
376 if (task != wakeup_task)
377 return;
378
379 wakeup_current_cpu = cpu;
380}
381
382static void
383tracing_sched_switch_trace(struct trace_array *tr,
384 struct task_struct *prev,
385 struct task_struct *next,
386 unsigned long flags, int pc)
387{
388 struct trace_event_call *call = &event_context_switch;
389 struct ring_buffer *buffer = tr->trace_buffer.buffer;
390 struct ring_buffer_event *event;
391 struct ctx_switch_entry *entry;
392
393 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
394 sizeof(*entry), flags, pc);
395 if (!event)
396 return;
397 entry = ring_buffer_event_data(event);
398 entry->prev_pid = prev->pid;
399 entry->prev_prio = prev->prio;
400 entry->prev_state = prev->state;
401 entry->next_pid = next->pid;
402 entry->next_prio = next->prio;
403 entry->next_state = next->state;
404 entry->next_cpu = task_cpu(next);
405
406 if (!call_filter_check_discard(call, entry, buffer, event))
407 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
408}
409
410static void
411tracing_sched_wakeup_trace(struct trace_array *tr,
412 struct task_struct *wakee,
413 struct task_struct *curr,
414 unsigned long flags, int pc)
415{
416 struct trace_event_call *call = &event_wakeup;
417 struct ring_buffer_event *event;
418 struct ctx_switch_entry *entry;
419 struct ring_buffer *buffer = tr->trace_buffer.buffer;
420
421 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
422 sizeof(*entry), flags, pc);
423 if (!event)
424 return;
425 entry = ring_buffer_event_data(event);
426 entry->prev_pid = curr->pid;
427 entry->prev_prio = curr->prio;
428 entry->prev_state = curr->state;
429 entry->next_pid = wakee->pid;
430 entry->next_prio = wakee->prio;
431 entry->next_state = wakee->state;
432 entry->next_cpu = task_cpu(wakee);
433
434 if (!call_filter_check_discard(call, entry, buffer, event))
435 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
436}
437
438static void notrace
439probe_wakeup_sched_switch(void *ignore, bool preempt,
440 struct task_struct *prev, struct task_struct *next)
441{
442 struct trace_array_cpu *data;
443 u64 T0, T1, delta;
444 unsigned long flags;
445 long disabled;
446 int cpu;
447 int pc;
448
449 tracing_record_cmdline(prev);
450
451 if (unlikely(!tracer_enabled))
452 return;
453
454 /*
455 * When we start a new trace, we set wakeup_task to NULL
456 * and then set tracer_enabled = 1. We want to make sure
457 * that another CPU does not see the tracer_enabled = 1
458 * and the wakeup_task with an older task, that might
459 * actually be the same as next.
460 */
461 smp_rmb();
462
463 if (next != wakeup_task)
464 return;
465
466 pc = preempt_count();
467
468 /* disable local data, not wakeup_cpu data */
469 cpu = raw_smp_processor_id();
470 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
471 if (likely(disabled != 1))
472 goto out;
473
474 local_irq_save(flags);
475 arch_spin_lock(&wakeup_lock);
476
477 /* We could race with grabbing wakeup_lock */
478 if (unlikely(!tracer_enabled || next != wakeup_task))
479 goto out_unlock;
480
481 /* The task we are waiting for is waking up */
482 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
483
484 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
485 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
486
487 T0 = data->preempt_timestamp;
488 T1 = ftrace_now(cpu);
489 delta = T1-T0;
490
491 if (!report_latency(wakeup_trace, delta))
492 goto out_unlock;
493
494 if (likely(!is_tracing_stopped())) {
495 wakeup_trace->max_latency = delta;
496 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
497 }
498
499out_unlock:
500 __wakeup_reset(wakeup_trace);
501 arch_spin_unlock(&wakeup_lock);
502 local_irq_restore(flags);
503out:
504 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
505}
506
507static void __wakeup_reset(struct trace_array *tr)
508{
509 wakeup_cpu = -1;
510 wakeup_prio = -1;
511 tracing_dl = 0;
512
513 if (wakeup_task)
514 put_task_struct(wakeup_task);
515
516 wakeup_task = NULL;
517}
518
519static void wakeup_reset(struct trace_array *tr)
520{
521 unsigned long flags;
522
523 tracing_reset_online_cpus(&tr->trace_buffer);
524
525 local_irq_save(flags);
526 arch_spin_lock(&wakeup_lock);
527 __wakeup_reset(tr);
528 arch_spin_unlock(&wakeup_lock);
529 local_irq_restore(flags);
530}
531
532static void
533probe_wakeup(void *ignore, struct task_struct *p)
534{
535 struct trace_array_cpu *data;
536 int cpu = smp_processor_id();
537 unsigned long flags;
538 long disabled;
539 int pc;
540
541 if (likely(!tracer_enabled))
542 return;
543
544 tracing_record_cmdline(p);
545 tracing_record_cmdline(current);
546
547 /*
548 * Semantic is like this:
549 * - wakeup tracer handles all tasks in the system, independently
550 * from their scheduling class;
551 * - wakeup_rt tracer handles tasks belonging to sched_dl and
552 * sched_rt class;
553 * - wakeup_dl handles tasks belonging to sched_dl class only.
554 */
555 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
556 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
557 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
558 return;
559
560 pc = preempt_count();
561 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
562 if (unlikely(disabled != 1))
563 goto out;
564
565 /* interrupts should be off from try_to_wake_up */
566 arch_spin_lock(&wakeup_lock);
567
568 /* check for races. */
569 if (!tracer_enabled || tracing_dl ||
570 (!dl_task(p) && p->prio >= wakeup_prio))
571 goto out_locked;
572
573 /* reset the trace */
574 __wakeup_reset(wakeup_trace);
575
576 wakeup_cpu = task_cpu(p);
577 wakeup_current_cpu = wakeup_cpu;
578 wakeup_prio = p->prio;
579
580 /*
581 * Once you start tracing a -deadline task, don't bother tracing
582 * another task until the first one wakes up.
583 */
584 if (dl_task(p))
585 tracing_dl = 1;
586 else
587 tracing_dl = 0;
588
589 wakeup_task = p;
590 get_task_struct(wakeup_task);
591
592 local_save_flags(flags);
593
594 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
595 data->preempt_timestamp = ftrace_now(cpu);
596 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
597
598 /*
599 * We must be careful in using CALLER_ADDR2. But since wake_up
600 * is not called by an assembly function (where as schedule is)
601 * it should be safe to use it here.
602 */
603 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
604
605out_locked:
606 arch_spin_unlock(&wakeup_lock);
607out:
608 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
609}
610
611static void start_wakeup_tracer(struct trace_array *tr)
612{
613 int ret;
614
615 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
616 if (ret) {
617 pr_info("wakeup trace: Couldn't activate tracepoint"
618 " probe to kernel_sched_wakeup\n");
619 return;
620 }
621
622 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
623 if (ret) {
624 pr_info("wakeup trace: Couldn't activate tracepoint"
625 " probe to kernel_sched_wakeup_new\n");
626 goto fail_deprobe;
627 }
628
629 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
630 if (ret) {
631 pr_info("sched trace: Couldn't activate tracepoint"
632 " probe to kernel_sched_switch\n");
633 goto fail_deprobe_wake_new;
634 }
635
636 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
637 if (ret) {
638 pr_info("wakeup trace: Couldn't activate tracepoint"
639 " probe to kernel_sched_migrate_task\n");
640 return;
641 }
642
643 wakeup_reset(tr);
644
645 /*
646 * Don't let the tracer_enabled = 1 show up before
647 * the wakeup_task is reset. This may be overkill since
648 * wakeup_reset does a spin_unlock after setting the
649 * wakeup_task to NULL, but I want to be safe.
650 * This is a slow path anyway.
651 */
652 smp_wmb();
653
654 if (start_func_tracer(tr, is_graph(tr)))
655 printk(KERN_ERR "failed to start wakeup tracer\n");
656
657 return;
658fail_deprobe_wake_new:
659 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
660fail_deprobe:
661 unregister_trace_sched_wakeup(probe_wakeup, NULL);
662}
663
664static void stop_wakeup_tracer(struct trace_array *tr)
665{
666 tracer_enabled = 0;
667 stop_func_tracer(tr, is_graph(tr));
668 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
669 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
670 unregister_trace_sched_wakeup(probe_wakeup, NULL);
671 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
672}
673
674static bool wakeup_busy;
675
676static int __wakeup_tracer_init(struct trace_array *tr)
677{
678 save_flags = tr->trace_flags;
679
680 /* non overwrite screws up the latency tracers */
681 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
682 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
683
684 tr->max_latency = 0;
685 wakeup_trace = tr;
686 ftrace_init_array_ops(tr, wakeup_tracer_call);
687 start_wakeup_tracer(tr);
688
689 wakeup_busy = true;
690 return 0;
691}
692
693static int wakeup_tracer_init(struct trace_array *tr)
694{
695 if (wakeup_busy)
696 return -EBUSY;
697
698 wakeup_dl = 0;
699 wakeup_rt = 0;
700 return __wakeup_tracer_init(tr);
701}
702
703static int wakeup_rt_tracer_init(struct trace_array *tr)
704{
705 if (wakeup_busy)
706 return -EBUSY;
707
708 wakeup_dl = 0;
709 wakeup_rt = 1;
710 return __wakeup_tracer_init(tr);
711}
712
713static int wakeup_dl_tracer_init(struct trace_array *tr)
714{
715 if (wakeup_busy)
716 return -EBUSY;
717
718 wakeup_dl = 1;
719 wakeup_rt = 0;
720 return __wakeup_tracer_init(tr);
721}
722
723static void wakeup_tracer_reset(struct trace_array *tr)
724{
725 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
726 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
727
728 stop_wakeup_tracer(tr);
729 /* make sure we put back any tasks we are tracing */
730 wakeup_reset(tr);
731
732 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
733 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
734 ftrace_reset_array_ops(tr);
735 wakeup_busy = false;
736}
737
738static void wakeup_tracer_start(struct trace_array *tr)
739{
740 wakeup_reset(tr);
741 tracer_enabled = 1;
742}
743
744static void wakeup_tracer_stop(struct trace_array *tr)
745{
746 tracer_enabled = 0;
747}
748
749static struct tracer wakeup_tracer __read_mostly =
750{
751 .name = "wakeup",
752 .init = wakeup_tracer_init,
753 .reset = wakeup_tracer_reset,
754 .start = wakeup_tracer_start,
755 .stop = wakeup_tracer_stop,
756 .print_max = true,
757 .print_header = wakeup_print_header,
758 .print_line = wakeup_print_line,
759 .flag_changed = wakeup_flag_changed,
760#ifdef CONFIG_FTRACE_SELFTEST
761 .selftest = trace_selftest_startup_wakeup,
762#endif
763 .open = wakeup_trace_open,
764 .close = wakeup_trace_close,
765 .allow_instances = true,
766 .use_max_tr = true,
767};
768
769static struct tracer wakeup_rt_tracer __read_mostly =
770{
771 .name = "wakeup_rt",
772 .init = wakeup_rt_tracer_init,
773 .reset = wakeup_tracer_reset,
774 .start = wakeup_tracer_start,
775 .stop = wakeup_tracer_stop,
776 .print_max = true,
777 .print_header = wakeup_print_header,
778 .print_line = wakeup_print_line,
779 .flag_changed = wakeup_flag_changed,
780#ifdef CONFIG_FTRACE_SELFTEST
781 .selftest = trace_selftest_startup_wakeup,
782#endif
783 .open = wakeup_trace_open,
784 .close = wakeup_trace_close,
785 .allow_instances = true,
786 .use_max_tr = true,
787};
788
789static struct tracer wakeup_dl_tracer __read_mostly =
790{
791 .name = "wakeup_dl",
792 .init = wakeup_dl_tracer_init,
793 .reset = wakeup_tracer_reset,
794 .start = wakeup_tracer_start,
795 .stop = wakeup_tracer_stop,
796 .print_max = true,
797 .print_header = wakeup_print_header,
798 .print_line = wakeup_print_line,
799 .flag_changed = wakeup_flag_changed,
800#ifdef CONFIG_FTRACE_SELFTEST
801 .selftest = trace_selftest_startup_wakeup,
802#endif
803 .open = wakeup_trace_open,
804 .close = wakeup_trace_close,
805 .allow_instances = true,
806 .use_max_tr = true,
807};
808
809__init static int init_wakeup_tracer(void)
810{
811 int ret;
812
813 ret = register_tracer(&wakeup_tracer);
814 if (ret)
815 return ret;
816
817 ret = register_tracer(&wakeup_rt_tracer);
818 if (ret)
819 return ret;
820
821 ret = register_tracer(&wakeup_dl_tracer);
822 if (ret)
823 return ret;
824
825 return 0;
826}
827core_initcall(init_wakeup_tracer);
1/*
2 * trace task wakeup timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12#include <linux/module.h>
13#include <linux/fs.h>
14#include <linux/debugfs.h>
15#include <linux/kallsyms.h>
16#include <linux/uaccess.h>
17#include <linux/ftrace.h>
18#include <linux/sched/rt.h>
19#include <linux/sched/deadline.h>
20#include <trace/events/sched.h>
21#include "trace.h"
22
23static struct trace_array *wakeup_trace;
24static int __read_mostly tracer_enabled;
25
26static struct task_struct *wakeup_task;
27static int wakeup_cpu;
28static int wakeup_current_cpu;
29static unsigned wakeup_prio = -1;
30static int wakeup_rt;
31static int wakeup_dl;
32static int tracing_dl = 0;
33
34static arch_spinlock_t wakeup_lock =
35 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
36
37static void wakeup_reset(struct trace_array *tr);
38static void __wakeup_reset(struct trace_array *tr);
39static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
40static void wakeup_graph_return(struct ftrace_graph_ret *trace);
41
42static int save_flags;
43static bool function_enabled;
44
45#define TRACE_DISPLAY_GRAPH 1
46
47static struct tracer_opt trace_opts[] = {
48#ifdef CONFIG_FUNCTION_GRAPH_TRACER
49 /* display latency trace as call graph */
50 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
51#endif
52 { } /* Empty entry */
53};
54
55static struct tracer_flags tracer_flags = {
56 .val = 0,
57 .opts = trace_opts,
58};
59
60#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
61
62#ifdef CONFIG_FUNCTION_TRACER
63
64/*
65 * Prologue for the wakeup function tracers.
66 *
67 * Returns 1 if it is OK to continue, and preemption
68 * is disabled and data->disabled is incremented.
69 * 0 if the trace is to be ignored, and preemption
70 * is not disabled and data->disabled is
71 * kept the same.
72 *
73 * Note, this function is also used outside this ifdef but
74 * inside the #ifdef of the function graph tracer below.
75 * This is OK, since the function graph tracer is
76 * dependent on the function tracer.
77 */
78static int
79func_prolog_preempt_disable(struct trace_array *tr,
80 struct trace_array_cpu **data,
81 int *pc)
82{
83 long disabled;
84 int cpu;
85
86 if (likely(!wakeup_task))
87 return 0;
88
89 *pc = preempt_count();
90 preempt_disable_notrace();
91
92 cpu = raw_smp_processor_id();
93 if (cpu != wakeup_current_cpu)
94 goto out_enable;
95
96 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
97 disabled = atomic_inc_return(&(*data)->disabled);
98 if (unlikely(disabled != 1))
99 goto out;
100
101 return 1;
102
103out:
104 atomic_dec(&(*data)->disabled);
105
106out_enable:
107 preempt_enable_notrace();
108 return 0;
109}
110
111/*
112 * wakeup uses its own tracer function to keep the overhead down:
113 */
114static void
115wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
116 struct ftrace_ops *op, struct pt_regs *pt_regs)
117{
118 struct trace_array *tr = wakeup_trace;
119 struct trace_array_cpu *data;
120 unsigned long flags;
121 int pc;
122
123 if (!func_prolog_preempt_disable(tr, &data, &pc))
124 return;
125
126 local_irq_save(flags);
127 trace_function(tr, ip, parent_ip, flags, pc);
128 local_irq_restore(flags);
129
130 atomic_dec(&data->disabled);
131 preempt_enable_notrace();
132}
133
134static struct ftrace_ops trace_ops __read_mostly =
135{
136 .func = wakeup_tracer_call,
137 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
138};
139#endif /* CONFIG_FUNCTION_TRACER */
140
141static int register_wakeup_function(int graph, int set)
142{
143 int ret;
144
145 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
146 if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
147 return 0;
148
149 if (graph)
150 ret = register_ftrace_graph(&wakeup_graph_return,
151 &wakeup_graph_entry);
152 else
153 ret = register_ftrace_function(&trace_ops);
154
155 if (!ret)
156 function_enabled = true;
157
158 return ret;
159}
160
161static void unregister_wakeup_function(int graph)
162{
163 if (!function_enabled)
164 return;
165
166 if (graph)
167 unregister_ftrace_graph();
168 else
169 unregister_ftrace_function(&trace_ops);
170
171 function_enabled = false;
172}
173
174static void wakeup_function_set(int set)
175{
176 if (set)
177 register_wakeup_function(is_graph(), 1);
178 else
179 unregister_wakeup_function(is_graph());
180}
181
182static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
183{
184 struct tracer *tracer = tr->current_trace;
185
186 if (mask & TRACE_ITER_FUNCTION)
187 wakeup_function_set(set);
188
189 return trace_keep_overwrite(tracer, mask, set);
190}
191
192static int start_func_tracer(int graph)
193{
194 int ret;
195
196 ret = register_wakeup_function(graph, 0);
197
198 if (!ret && tracing_is_enabled())
199 tracer_enabled = 1;
200 else
201 tracer_enabled = 0;
202
203 return ret;
204}
205
206static void stop_func_tracer(int graph)
207{
208 tracer_enabled = 0;
209
210 unregister_wakeup_function(graph);
211}
212
213#ifdef CONFIG_FUNCTION_GRAPH_TRACER
214static int
215wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
216{
217
218 if (!(bit & TRACE_DISPLAY_GRAPH))
219 return -EINVAL;
220
221 if (!(is_graph() ^ set))
222 return 0;
223
224 stop_func_tracer(!set);
225
226 wakeup_reset(wakeup_trace);
227 tracing_max_latency = 0;
228
229 return start_func_tracer(set);
230}
231
232static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
233{
234 struct trace_array *tr = wakeup_trace;
235 struct trace_array_cpu *data;
236 unsigned long flags;
237 int pc, ret = 0;
238
239 if (!func_prolog_preempt_disable(tr, &data, &pc))
240 return 0;
241
242 local_save_flags(flags);
243 ret = __trace_graph_entry(tr, trace, flags, pc);
244 atomic_dec(&data->disabled);
245 preempt_enable_notrace();
246
247 return ret;
248}
249
250static void wakeup_graph_return(struct ftrace_graph_ret *trace)
251{
252 struct trace_array *tr = wakeup_trace;
253 struct trace_array_cpu *data;
254 unsigned long flags;
255 int pc;
256
257 if (!func_prolog_preempt_disable(tr, &data, &pc))
258 return;
259
260 local_save_flags(flags);
261 __trace_graph_return(tr, trace, flags, pc);
262 atomic_dec(&data->disabled);
263
264 preempt_enable_notrace();
265 return;
266}
267
268static void wakeup_trace_open(struct trace_iterator *iter)
269{
270 if (is_graph())
271 graph_trace_open(iter);
272}
273
274static void wakeup_trace_close(struct trace_iterator *iter)
275{
276 if (iter->private)
277 graph_trace_close(iter);
278}
279
280#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
281 TRACE_GRAPH_PRINT_ABS_TIME | \
282 TRACE_GRAPH_PRINT_DURATION)
283
284static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
285{
286 /*
287 * In graph mode call the graph tracer output function,
288 * otherwise go with the TRACE_FN event handler
289 */
290 if (is_graph())
291 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
292
293 return TRACE_TYPE_UNHANDLED;
294}
295
296static void wakeup_print_header(struct seq_file *s)
297{
298 if (is_graph())
299 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
300 else
301 trace_default_header(s);
302}
303
304static void
305__trace_function(struct trace_array *tr,
306 unsigned long ip, unsigned long parent_ip,
307 unsigned long flags, int pc)
308{
309 if (is_graph())
310 trace_graph_function(tr, ip, parent_ip, flags, pc);
311 else
312 trace_function(tr, ip, parent_ip, flags, pc);
313}
314#else
315#define __trace_function trace_function
316
317static int
318wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
319{
320 return -EINVAL;
321}
322
323static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
324{
325 return -1;
326}
327
328static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
329{
330 return TRACE_TYPE_UNHANDLED;
331}
332
333static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
334static void wakeup_trace_open(struct trace_iterator *iter) { }
335static void wakeup_trace_close(struct trace_iterator *iter) { }
336
337#ifdef CONFIG_FUNCTION_TRACER
338static void wakeup_print_header(struct seq_file *s)
339{
340 trace_default_header(s);
341}
342#else
343static void wakeup_print_header(struct seq_file *s)
344{
345 trace_latency_header(s);
346}
347#endif /* CONFIG_FUNCTION_TRACER */
348#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
349
350/*
351 * Should this new latency be reported/recorded?
352 */
353static int report_latency(cycle_t delta)
354{
355 if (tracing_thresh) {
356 if (delta < tracing_thresh)
357 return 0;
358 } else {
359 if (delta <= tracing_max_latency)
360 return 0;
361 }
362 return 1;
363}
364
365static void
366probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
367{
368 if (task != wakeup_task)
369 return;
370
371 wakeup_current_cpu = cpu;
372}
373
374static void notrace
375probe_wakeup_sched_switch(void *ignore,
376 struct task_struct *prev, struct task_struct *next)
377{
378 struct trace_array_cpu *data;
379 cycle_t T0, T1, delta;
380 unsigned long flags;
381 long disabled;
382 int cpu;
383 int pc;
384
385 tracing_record_cmdline(prev);
386
387 if (unlikely(!tracer_enabled))
388 return;
389
390 /*
391 * When we start a new trace, we set wakeup_task to NULL
392 * and then set tracer_enabled = 1. We want to make sure
393 * that another CPU does not see the tracer_enabled = 1
394 * and the wakeup_task with an older task, that might
395 * actually be the same as next.
396 */
397 smp_rmb();
398
399 if (next != wakeup_task)
400 return;
401
402 pc = preempt_count();
403
404 /* disable local data, not wakeup_cpu data */
405 cpu = raw_smp_processor_id();
406 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
407 if (likely(disabled != 1))
408 goto out;
409
410 local_irq_save(flags);
411 arch_spin_lock(&wakeup_lock);
412
413 /* We could race with grabbing wakeup_lock */
414 if (unlikely(!tracer_enabled || next != wakeup_task))
415 goto out_unlock;
416
417 /* The task we are waiting for is waking up */
418 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
419
420 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
421 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
422
423 T0 = data->preempt_timestamp;
424 T1 = ftrace_now(cpu);
425 delta = T1-T0;
426
427 if (!report_latency(delta))
428 goto out_unlock;
429
430 if (likely(!is_tracing_stopped())) {
431 tracing_max_latency = delta;
432 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
433 }
434
435out_unlock:
436 __wakeup_reset(wakeup_trace);
437 arch_spin_unlock(&wakeup_lock);
438 local_irq_restore(flags);
439out:
440 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
441}
442
443static void __wakeup_reset(struct trace_array *tr)
444{
445 wakeup_cpu = -1;
446 wakeup_prio = -1;
447 tracing_dl = 0;
448
449 if (wakeup_task)
450 put_task_struct(wakeup_task);
451
452 wakeup_task = NULL;
453}
454
455static void wakeup_reset(struct trace_array *tr)
456{
457 unsigned long flags;
458
459 tracing_reset_online_cpus(&tr->trace_buffer);
460
461 local_irq_save(flags);
462 arch_spin_lock(&wakeup_lock);
463 __wakeup_reset(tr);
464 arch_spin_unlock(&wakeup_lock);
465 local_irq_restore(flags);
466}
467
468static void
469probe_wakeup(void *ignore, struct task_struct *p, int success)
470{
471 struct trace_array_cpu *data;
472 int cpu = smp_processor_id();
473 unsigned long flags;
474 long disabled;
475 int pc;
476
477 if (likely(!tracer_enabled))
478 return;
479
480 tracing_record_cmdline(p);
481 tracing_record_cmdline(current);
482
483 /*
484 * Semantic is like this:
485 * - wakeup tracer handles all tasks in the system, independently
486 * from their scheduling class;
487 * - wakeup_rt tracer handles tasks belonging to sched_dl and
488 * sched_rt class;
489 * - wakeup_dl handles tasks belonging to sched_dl class only.
490 */
491 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
492 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
493 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
494 return;
495
496 pc = preempt_count();
497 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
498 if (unlikely(disabled != 1))
499 goto out;
500
501 /* interrupts should be off from try_to_wake_up */
502 arch_spin_lock(&wakeup_lock);
503
504 /* check for races. */
505 if (!tracer_enabled || tracing_dl ||
506 (!dl_task(p) && p->prio >= wakeup_prio))
507 goto out_locked;
508
509 /* reset the trace */
510 __wakeup_reset(wakeup_trace);
511
512 wakeup_cpu = task_cpu(p);
513 wakeup_current_cpu = wakeup_cpu;
514 wakeup_prio = p->prio;
515
516 /*
517 * Once you start tracing a -deadline task, don't bother tracing
518 * another task until the first one wakes up.
519 */
520 if (dl_task(p))
521 tracing_dl = 1;
522 else
523 tracing_dl = 0;
524
525 wakeup_task = p;
526 get_task_struct(wakeup_task);
527
528 local_save_flags(flags);
529
530 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
531 data->preempt_timestamp = ftrace_now(cpu);
532 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
533
534 /*
535 * We must be careful in using CALLER_ADDR2. But since wake_up
536 * is not called by an assembly function (where as schedule is)
537 * it should be safe to use it here.
538 */
539 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
540
541out_locked:
542 arch_spin_unlock(&wakeup_lock);
543out:
544 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
545}
546
547static void start_wakeup_tracer(struct trace_array *tr)
548{
549 int ret;
550
551 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
552 if (ret) {
553 pr_info("wakeup trace: Couldn't activate tracepoint"
554 " probe to kernel_sched_wakeup\n");
555 return;
556 }
557
558 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
559 if (ret) {
560 pr_info("wakeup trace: Couldn't activate tracepoint"
561 " probe to kernel_sched_wakeup_new\n");
562 goto fail_deprobe;
563 }
564
565 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
566 if (ret) {
567 pr_info("sched trace: Couldn't activate tracepoint"
568 " probe to kernel_sched_switch\n");
569 goto fail_deprobe_wake_new;
570 }
571
572 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
573 if (ret) {
574 pr_info("wakeup trace: Couldn't activate tracepoint"
575 " probe to kernel_sched_migrate_task\n");
576 return;
577 }
578
579 wakeup_reset(tr);
580
581 /*
582 * Don't let the tracer_enabled = 1 show up before
583 * the wakeup_task is reset. This may be overkill since
584 * wakeup_reset does a spin_unlock after setting the
585 * wakeup_task to NULL, but I want to be safe.
586 * This is a slow path anyway.
587 */
588 smp_wmb();
589
590 if (start_func_tracer(is_graph()))
591 printk(KERN_ERR "failed to start wakeup tracer\n");
592
593 return;
594fail_deprobe_wake_new:
595 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
596fail_deprobe:
597 unregister_trace_sched_wakeup(probe_wakeup, NULL);
598}
599
600static void stop_wakeup_tracer(struct trace_array *tr)
601{
602 tracer_enabled = 0;
603 stop_func_tracer(is_graph());
604 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
605 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
606 unregister_trace_sched_wakeup(probe_wakeup, NULL);
607 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
608}
609
610static int __wakeup_tracer_init(struct trace_array *tr)
611{
612 save_flags = trace_flags;
613
614 /* non overwrite screws up the latency tracers */
615 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
616 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
617
618 tracing_max_latency = 0;
619 wakeup_trace = tr;
620 start_wakeup_tracer(tr);
621 return 0;
622}
623
624static int wakeup_tracer_init(struct trace_array *tr)
625{
626 wakeup_dl = 0;
627 wakeup_rt = 0;
628 return __wakeup_tracer_init(tr);
629}
630
631static int wakeup_rt_tracer_init(struct trace_array *tr)
632{
633 wakeup_dl = 0;
634 wakeup_rt = 1;
635 return __wakeup_tracer_init(tr);
636}
637
638static int wakeup_dl_tracer_init(struct trace_array *tr)
639{
640 wakeup_dl = 1;
641 wakeup_rt = 0;
642 return __wakeup_tracer_init(tr);
643}
644
645static void wakeup_tracer_reset(struct trace_array *tr)
646{
647 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
648 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
649
650 stop_wakeup_tracer(tr);
651 /* make sure we put back any tasks we are tracing */
652 wakeup_reset(tr);
653
654 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
655 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
656}
657
658static void wakeup_tracer_start(struct trace_array *tr)
659{
660 wakeup_reset(tr);
661 tracer_enabled = 1;
662}
663
664static void wakeup_tracer_stop(struct trace_array *tr)
665{
666 tracer_enabled = 0;
667}
668
669static struct tracer wakeup_tracer __read_mostly =
670{
671 .name = "wakeup",
672 .init = wakeup_tracer_init,
673 .reset = wakeup_tracer_reset,
674 .start = wakeup_tracer_start,
675 .stop = wakeup_tracer_stop,
676 .print_max = true,
677 .print_header = wakeup_print_header,
678 .print_line = wakeup_print_line,
679 .flags = &tracer_flags,
680 .set_flag = wakeup_set_flag,
681 .flag_changed = wakeup_flag_changed,
682#ifdef CONFIG_FTRACE_SELFTEST
683 .selftest = trace_selftest_startup_wakeup,
684#endif
685 .open = wakeup_trace_open,
686 .close = wakeup_trace_close,
687 .use_max_tr = true,
688};
689
690static struct tracer wakeup_rt_tracer __read_mostly =
691{
692 .name = "wakeup_rt",
693 .init = wakeup_rt_tracer_init,
694 .reset = wakeup_tracer_reset,
695 .start = wakeup_tracer_start,
696 .stop = wakeup_tracer_stop,
697 .wait_pipe = poll_wait_pipe,
698 .print_max = true,
699 .print_header = wakeup_print_header,
700 .print_line = wakeup_print_line,
701 .flags = &tracer_flags,
702 .set_flag = wakeup_set_flag,
703 .flag_changed = wakeup_flag_changed,
704#ifdef CONFIG_FTRACE_SELFTEST
705 .selftest = trace_selftest_startup_wakeup,
706#endif
707 .open = wakeup_trace_open,
708 .close = wakeup_trace_close,
709 .use_max_tr = true,
710};
711
712static struct tracer wakeup_dl_tracer __read_mostly =
713{
714 .name = "wakeup_dl",
715 .init = wakeup_dl_tracer_init,
716 .reset = wakeup_tracer_reset,
717 .start = wakeup_tracer_start,
718 .stop = wakeup_tracer_stop,
719 .wait_pipe = poll_wait_pipe,
720 .print_max = true,
721 .print_header = wakeup_print_header,
722 .print_line = wakeup_print_line,
723 .flags = &tracer_flags,
724 .set_flag = wakeup_set_flag,
725 .flag_changed = wakeup_flag_changed,
726#ifdef CONFIG_FTRACE_SELFTEST
727 .selftest = trace_selftest_startup_wakeup,
728#endif
729 .open = wakeup_trace_open,
730 .close = wakeup_trace_close,
731 .use_max_tr = true,
732};
733
734__init static int init_wakeup_tracer(void)
735{
736 int ret;
737
738 ret = register_tracer(&wakeup_tracer);
739 if (ret)
740 return ret;
741
742 ret = register_tracer(&wakeup_rt_tracer);
743 if (ret)
744 return ret;
745
746 ret = register_tracer(&wakeup_dl_tracer);
747 if (ret)
748 return ret;
749
750 return 0;
751}
752core_initcall(init_wakeup_tracer);