Loading...
1/*
2 * trace task wakeup timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12#include <linux/module.h>
13#include <linux/kallsyms.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/sched/rt.h>
17#include <linux/sched/deadline.h>
18#include <trace/events/sched.h>
19#include "trace.h"
20
21static struct trace_array *wakeup_trace;
22static int __read_mostly tracer_enabled;
23
24static struct task_struct *wakeup_task;
25static int wakeup_cpu;
26static int wakeup_current_cpu;
27static unsigned wakeup_prio = -1;
28static int wakeup_rt;
29static int wakeup_dl;
30static int tracing_dl = 0;
31
32static arch_spinlock_t wakeup_lock =
33 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
34
35static void wakeup_reset(struct trace_array *tr);
36static void __wakeup_reset(struct trace_array *tr);
37
38static int save_flags;
39
40#ifdef CONFIG_FUNCTION_GRAPH_TRACER
41static int wakeup_display_graph(struct trace_array *tr, int set);
42# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
43#else
44static inline int wakeup_display_graph(struct trace_array *tr, int set)
45{
46 return 0;
47}
48# define is_graph(tr) false
49#endif
50
51
52#ifdef CONFIG_FUNCTION_TRACER
53
54static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
55static void wakeup_graph_return(struct ftrace_graph_ret *trace);
56
57static bool function_enabled;
58
59/*
60 * Prologue for the wakeup function tracers.
61 *
62 * Returns 1 if it is OK to continue, and preemption
63 * is disabled and data->disabled is incremented.
64 * 0 if the trace is to be ignored, and preemption
65 * is not disabled and data->disabled is
66 * kept the same.
67 *
68 * Note, this function is also used outside this ifdef but
69 * inside the #ifdef of the function graph tracer below.
70 * This is OK, since the function graph tracer is
71 * dependent on the function tracer.
72 */
73static int
74func_prolog_preempt_disable(struct trace_array *tr,
75 struct trace_array_cpu **data,
76 int *pc)
77{
78 long disabled;
79 int cpu;
80
81 if (likely(!wakeup_task))
82 return 0;
83
84 *pc = preempt_count();
85 preempt_disable_notrace();
86
87 cpu = raw_smp_processor_id();
88 if (cpu != wakeup_current_cpu)
89 goto out_enable;
90
91 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
92 disabled = atomic_inc_return(&(*data)->disabled);
93 if (unlikely(disabled != 1))
94 goto out;
95
96 return 1;
97
98out:
99 atomic_dec(&(*data)->disabled);
100
101out_enable:
102 preempt_enable_notrace();
103 return 0;
104}
105
106/*
107 * wakeup uses its own tracer function to keep the overhead down:
108 */
109static void
110wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
111 struct ftrace_ops *op, struct pt_regs *pt_regs)
112{
113 struct trace_array *tr = wakeup_trace;
114 struct trace_array_cpu *data;
115 unsigned long flags;
116 int pc;
117
118 if (!func_prolog_preempt_disable(tr, &data, &pc))
119 return;
120
121 local_irq_save(flags);
122 trace_function(tr, ip, parent_ip, flags, pc);
123 local_irq_restore(flags);
124
125 atomic_dec(&data->disabled);
126 preempt_enable_notrace();
127}
128
129static int register_wakeup_function(struct trace_array *tr, int graph, int set)
130{
131 int ret;
132
133 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
134 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
135 return 0;
136
137 if (graph)
138 ret = register_ftrace_graph(&wakeup_graph_return,
139 &wakeup_graph_entry);
140 else
141 ret = register_ftrace_function(tr->ops);
142
143 if (!ret)
144 function_enabled = true;
145
146 return ret;
147}
148
149static void unregister_wakeup_function(struct trace_array *tr, int graph)
150{
151 if (!function_enabled)
152 return;
153
154 if (graph)
155 unregister_ftrace_graph();
156 else
157 unregister_ftrace_function(tr->ops);
158
159 function_enabled = false;
160}
161
162static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
163{
164 if (!(mask & TRACE_ITER_FUNCTION))
165 return 0;
166
167 if (set)
168 register_wakeup_function(tr, is_graph(tr), 1);
169 else
170 unregister_wakeup_function(tr, is_graph(tr));
171 return 1;
172}
173#else
174static int register_wakeup_function(struct trace_array *tr, int graph, int set)
175{
176 return 0;
177}
178static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
179static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
180{
181 return 0;
182}
183#endif /* CONFIG_FUNCTION_TRACER */
184
185static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
186{
187 struct tracer *tracer = tr->current_trace;
188
189 if (wakeup_function_set(tr, mask, set))
190 return 0;
191
192#ifdef CONFIG_FUNCTION_GRAPH_TRACER
193 if (mask & TRACE_ITER_DISPLAY_GRAPH)
194 return wakeup_display_graph(tr, set);
195#endif
196
197 return trace_keep_overwrite(tracer, mask, set);
198}
199
200static int start_func_tracer(struct trace_array *tr, int graph)
201{
202 int ret;
203
204 ret = register_wakeup_function(tr, graph, 0);
205
206 if (!ret && tracing_is_enabled())
207 tracer_enabled = 1;
208 else
209 tracer_enabled = 0;
210
211 return ret;
212}
213
214static void stop_func_tracer(struct trace_array *tr, int graph)
215{
216 tracer_enabled = 0;
217
218 unregister_wakeup_function(tr, graph);
219}
220
221#ifdef CONFIG_FUNCTION_GRAPH_TRACER
222static int wakeup_display_graph(struct trace_array *tr, int set)
223{
224 if (!(is_graph(tr) ^ set))
225 return 0;
226
227 stop_func_tracer(tr, !set);
228
229 wakeup_reset(wakeup_trace);
230 tr->max_latency = 0;
231
232 return start_func_tracer(tr, set);
233}
234
235static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
236{
237 struct trace_array *tr = wakeup_trace;
238 struct trace_array_cpu *data;
239 unsigned long flags;
240 int pc, ret = 0;
241
242 if (ftrace_graph_ignore_func(trace))
243 return 0;
244 /*
245 * Do not trace a function if it's filtered by set_graph_notrace.
246 * Make the index of ret stack negative to indicate that it should
247 * ignore further functions. But it needs its own ret stack entry
248 * to recover the original index in order to continue tracing after
249 * returning from the function.
250 */
251 if (ftrace_graph_notrace_addr(trace->func))
252 return 1;
253
254 if (!func_prolog_preempt_disable(tr, &data, &pc))
255 return 0;
256
257 local_save_flags(flags);
258 ret = __trace_graph_entry(tr, trace, flags, pc);
259 atomic_dec(&data->disabled);
260 preempt_enable_notrace();
261
262 return ret;
263}
264
265static void wakeup_graph_return(struct ftrace_graph_ret *trace)
266{
267 struct trace_array *tr = wakeup_trace;
268 struct trace_array_cpu *data;
269 unsigned long flags;
270 int pc;
271
272 if (!func_prolog_preempt_disable(tr, &data, &pc))
273 return;
274
275 local_save_flags(flags);
276 __trace_graph_return(tr, trace, flags, pc);
277 atomic_dec(&data->disabled);
278
279 preempt_enable_notrace();
280 return;
281}
282
283static void wakeup_trace_open(struct trace_iterator *iter)
284{
285 if (is_graph(iter->tr))
286 graph_trace_open(iter);
287}
288
289static void wakeup_trace_close(struct trace_iterator *iter)
290{
291 if (iter->private)
292 graph_trace_close(iter);
293}
294
295#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
296 TRACE_GRAPH_PRINT_ABS_TIME | \
297 TRACE_GRAPH_PRINT_DURATION)
298
299static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
300{
301 /*
302 * In graph mode call the graph tracer output function,
303 * otherwise go with the TRACE_FN event handler
304 */
305 if (is_graph(iter->tr))
306 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
307
308 return TRACE_TYPE_UNHANDLED;
309}
310
311static void wakeup_print_header(struct seq_file *s)
312{
313 if (is_graph(wakeup_trace))
314 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
315 else
316 trace_default_header(s);
317}
318
319static void
320__trace_function(struct trace_array *tr,
321 unsigned long ip, unsigned long parent_ip,
322 unsigned long flags, int pc)
323{
324 if (is_graph(tr))
325 trace_graph_function(tr, ip, parent_ip, flags, pc);
326 else
327 trace_function(tr, ip, parent_ip, flags, pc);
328}
329#else
330#define __trace_function trace_function
331
332static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
333{
334 return TRACE_TYPE_UNHANDLED;
335}
336
337static void wakeup_trace_open(struct trace_iterator *iter) { }
338static void wakeup_trace_close(struct trace_iterator *iter) { }
339
340#ifdef CONFIG_FUNCTION_TRACER
341static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
342{
343 return -1;
344}
345static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
346static void wakeup_print_header(struct seq_file *s)
347{
348 trace_default_header(s);
349}
350#else
351static void wakeup_print_header(struct seq_file *s)
352{
353 trace_latency_header(s);
354}
355#endif /* CONFIG_FUNCTION_TRACER */
356#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
357
358/*
359 * Should this new latency be reported/recorded?
360 */
361static bool report_latency(struct trace_array *tr, u64 delta)
362{
363 if (tracing_thresh) {
364 if (delta < tracing_thresh)
365 return false;
366 } else {
367 if (delta <= tr->max_latency)
368 return false;
369 }
370 return true;
371}
372
373static void
374probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
375{
376 if (task != wakeup_task)
377 return;
378
379 wakeup_current_cpu = cpu;
380}
381
382static void
383tracing_sched_switch_trace(struct trace_array *tr,
384 struct task_struct *prev,
385 struct task_struct *next,
386 unsigned long flags, int pc)
387{
388 struct trace_event_call *call = &event_context_switch;
389 struct ring_buffer *buffer = tr->trace_buffer.buffer;
390 struct ring_buffer_event *event;
391 struct ctx_switch_entry *entry;
392
393 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
394 sizeof(*entry), flags, pc);
395 if (!event)
396 return;
397 entry = ring_buffer_event_data(event);
398 entry->prev_pid = prev->pid;
399 entry->prev_prio = prev->prio;
400 entry->prev_state = prev->state;
401 entry->next_pid = next->pid;
402 entry->next_prio = next->prio;
403 entry->next_state = next->state;
404 entry->next_cpu = task_cpu(next);
405
406 if (!call_filter_check_discard(call, entry, buffer, event))
407 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
408}
409
410static void
411tracing_sched_wakeup_trace(struct trace_array *tr,
412 struct task_struct *wakee,
413 struct task_struct *curr,
414 unsigned long flags, int pc)
415{
416 struct trace_event_call *call = &event_wakeup;
417 struct ring_buffer_event *event;
418 struct ctx_switch_entry *entry;
419 struct ring_buffer *buffer = tr->trace_buffer.buffer;
420
421 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
422 sizeof(*entry), flags, pc);
423 if (!event)
424 return;
425 entry = ring_buffer_event_data(event);
426 entry->prev_pid = curr->pid;
427 entry->prev_prio = curr->prio;
428 entry->prev_state = curr->state;
429 entry->next_pid = wakee->pid;
430 entry->next_prio = wakee->prio;
431 entry->next_state = wakee->state;
432 entry->next_cpu = task_cpu(wakee);
433
434 if (!call_filter_check_discard(call, entry, buffer, event))
435 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
436}
437
438static void notrace
439probe_wakeup_sched_switch(void *ignore, bool preempt,
440 struct task_struct *prev, struct task_struct *next)
441{
442 struct trace_array_cpu *data;
443 u64 T0, T1, delta;
444 unsigned long flags;
445 long disabled;
446 int cpu;
447 int pc;
448
449 tracing_record_cmdline(prev);
450
451 if (unlikely(!tracer_enabled))
452 return;
453
454 /*
455 * When we start a new trace, we set wakeup_task to NULL
456 * and then set tracer_enabled = 1. We want to make sure
457 * that another CPU does not see the tracer_enabled = 1
458 * and the wakeup_task with an older task, that might
459 * actually be the same as next.
460 */
461 smp_rmb();
462
463 if (next != wakeup_task)
464 return;
465
466 pc = preempt_count();
467
468 /* disable local data, not wakeup_cpu data */
469 cpu = raw_smp_processor_id();
470 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
471 if (likely(disabled != 1))
472 goto out;
473
474 local_irq_save(flags);
475 arch_spin_lock(&wakeup_lock);
476
477 /* We could race with grabbing wakeup_lock */
478 if (unlikely(!tracer_enabled || next != wakeup_task))
479 goto out_unlock;
480
481 /* The task we are waiting for is waking up */
482 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
483
484 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
485 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
486
487 T0 = data->preempt_timestamp;
488 T1 = ftrace_now(cpu);
489 delta = T1-T0;
490
491 if (!report_latency(wakeup_trace, delta))
492 goto out_unlock;
493
494 if (likely(!is_tracing_stopped())) {
495 wakeup_trace->max_latency = delta;
496 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
497 }
498
499out_unlock:
500 __wakeup_reset(wakeup_trace);
501 arch_spin_unlock(&wakeup_lock);
502 local_irq_restore(flags);
503out:
504 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
505}
506
507static void __wakeup_reset(struct trace_array *tr)
508{
509 wakeup_cpu = -1;
510 wakeup_prio = -1;
511 tracing_dl = 0;
512
513 if (wakeup_task)
514 put_task_struct(wakeup_task);
515
516 wakeup_task = NULL;
517}
518
519static void wakeup_reset(struct trace_array *tr)
520{
521 unsigned long flags;
522
523 tracing_reset_online_cpus(&tr->trace_buffer);
524
525 local_irq_save(flags);
526 arch_spin_lock(&wakeup_lock);
527 __wakeup_reset(tr);
528 arch_spin_unlock(&wakeup_lock);
529 local_irq_restore(flags);
530}
531
532static void
533probe_wakeup(void *ignore, struct task_struct *p)
534{
535 struct trace_array_cpu *data;
536 int cpu = smp_processor_id();
537 unsigned long flags;
538 long disabled;
539 int pc;
540
541 if (likely(!tracer_enabled))
542 return;
543
544 tracing_record_cmdline(p);
545 tracing_record_cmdline(current);
546
547 /*
548 * Semantic is like this:
549 * - wakeup tracer handles all tasks in the system, independently
550 * from their scheduling class;
551 * - wakeup_rt tracer handles tasks belonging to sched_dl and
552 * sched_rt class;
553 * - wakeup_dl handles tasks belonging to sched_dl class only.
554 */
555 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
556 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
557 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
558 return;
559
560 pc = preempt_count();
561 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
562 if (unlikely(disabled != 1))
563 goto out;
564
565 /* interrupts should be off from try_to_wake_up */
566 arch_spin_lock(&wakeup_lock);
567
568 /* check for races. */
569 if (!tracer_enabled || tracing_dl ||
570 (!dl_task(p) && p->prio >= wakeup_prio))
571 goto out_locked;
572
573 /* reset the trace */
574 __wakeup_reset(wakeup_trace);
575
576 wakeup_cpu = task_cpu(p);
577 wakeup_current_cpu = wakeup_cpu;
578 wakeup_prio = p->prio;
579
580 /*
581 * Once you start tracing a -deadline task, don't bother tracing
582 * another task until the first one wakes up.
583 */
584 if (dl_task(p))
585 tracing_dl = 1;
586 else
587 tracing_dl = 0;
588
589 wakeup_task = p;
590 get_task_struct(wakeup_task);
591
592 local_save_flags(flags);
593
594 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
595 data->preempt_timestamp = ftrace_now(cpu);
596 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
597
598 /*
599 * We must be careful in using CALLER_ADDR2. But since wake_up
600 * is not called by an assembly function (where as schedule is)
601 * it should be safe to use it here.
602 */
603 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
604
605out_locked:
606 arch_spin_unlock(&wakeup_lock);
607out:
608 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
609}
610
611static void start_wakeup_tracer(struct trace_array *tr)
612{
613 int ret;
614
615 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
616 if (ret) {
617 pr_info("wakeup trace: Couldn't activate tracepoint"
618 " probe to kernel_sched_wakeup\n");
619 return;
620 }
621
622 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
623 if (ret) {
624 pr_info("wakeup trace: Couldn't activate tracepoint"
625 " probe to kernel_sched_wakeup_new\n");
626 goto fail_deprobe;
627 }
628
629 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
630 if (ret) {
631 pr_info("sched trace: Couldn't activate tracepoint"
632 " probe to kernel_sched_switch\n");
633 goto fail_deprobe_wake_new;
634 }
635
636 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
637 if (ret) {
638 pr_info("wakeup trace: Couldn't activate tracepoint"
639 " probe to kernel_sched_migrate_task\n");
640 return;
641 }
642
643 wakeup_reset(tr);
644
645 /*
646 * Don't let the tracer_enabled = 1 show up before
647 * the wakeup_task is reset. This may be overkill since
648 * wakeup_reset does a spin_unlock after setting the
649 * wakeup_task to NULL, but I want to be safe.
650 * This is a slow path anyway.
651 */
652 smp_wmb();
653
654 if (start_func_tracer(tr, is_graph(tr)))
655 printk(KERN_ERR "failed to start wakeup tracer\n");
656
657 return;
658fail_deprobe_wake_new:
659 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
660fail_deprobe:
661 unregister_trace_sched_wakeup(probe_wakeup, NULL);
662}
663
664static void stop_wakeup_tracer(struct trace_array *tr)
665{
666 tracer_enabled = 0;
667 stop_func_tracer(tr, is_graph(tr));
668 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
669 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
670 unregister_trace_sched_wakeup(probe_wakeup, NULL);
671 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
672}
673
674static bool wakeup_busy;
675
676static int __wakeup_tracer_init(struct trace_array *tr)
677{
678 save_flags = tr->trace_flags;
679
680 /* non overwrite screws up the latency tracers */
681 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
682 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
683
684 tr->max_latency = 0;
685 wakeup_trace = tr;
686 ftrace_init_array_ops(tr, wakeup_tracer_call);
687 start_wakeup_tracer(tr);
688
689 wakeup_busy = true;
690 return 0;
691}
692
693static int wakeup_tracer_init(struct trace_array *tr)
694{
695 if (wakeup_busy)
696 return -EBUSY;
697
698 wakeup_dl = 0;
699 wakeup_rt = 0;
700 return __wakeup_tracer_init(tr);
701}
702
703static int wakeup_rt_tracer_init(struct trace_array *tr)
704{
705 if (wakeup_busy)
706 return -EBUSY;
707
708 wakeup_dl = 0;
709 wakeup_rt = 1;
710 return __wakeup_tracer_init(tr);
711}
712
713static int wakeup_dl_tracer_init(struct trace_array *tr)
714{
715 if (wakeup_busy)
716 return -EBUSY;
717
718 wakeup_dl = 1;
719 wakeup_rt = 0;
720 return __wakeup_tracer_init(tr);
721}
722
723static void wakeup_tracer_reset(struct trace_array *tr)
724{
725 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
726 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
727
728 stop_wakeup_tracer(tr);
729 /* make sure we put back any tasks we are tracing */
730 wakeup_reset(tr);
731
732 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
733 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
734 ftrace_reset_array_ops(tr);
735 wakeup_busy = false;
736}
737
738static void wakeup_tracer_start(struct trace_array *tr)
739{
740 wakeup_reset(tr);
741 tracer_enabled = 1;
742}
743
744static void wakeup_tracer_stop(struct trace_array *tr)
745{
746 tracer_enabled = 0;
747}
748
749static struct tracer wakeup_tracer __read_mostly =
750{
751 .name = "wakeup",
752 .init = wakeup_tracer_init,
753 .reset = wakeup_tracer_reset,
754 .start = wakeup_tracer_start,
755 .stop = wakeup_tracer_stop,
756 .print_max = true,
757 .print_header = wakeup_print_header,
758 .print_line = wakeup_print_line,
759 .flag_changed = wakeup_flag_changed,
760#ifdef CONFIG_FTRACE_SELFTEST
761 .selftest = trace_selftest_startup_wakeup,
762#endif
763 .open = wakeup_trace_open,
764 .close = wakeup_trace_close,
765 .allow_instances = true,
766 .use_max_tr = true,
767};
768
769static struct tracer wakeup_rt_tracer __read_mostly =
770{
771 .name = "wakeup_rt",
772 .init = wakeup_rt_tracer_init,
773 .reset = wakeup_tracer_reset,
774 .start = wakeup_tracer_start,
775 .stop = wakeup_tracer_stop,
776 .print_max = true,
777 .print_header = wakeup_print_header,
778 .print_line = wakeup_print_line,
779 .flag_changed = wakeup_flag_changed,
780#ifdef CONFIG_FTRACE_SELFTEST
781 .selftest = trace_selftest_startup_wakeup,
782#endif
783 .open = wakeup_trace_open,
784 .close = wakeup_trace_close,
785 .allow_instances = true,
786 .use_max_tr = true,
787};
788
789static struct tracer wakeup_dl_tracer __read_mostly =
790{
791 .name = "wakeup_dl",
792 .init = wakeup_dl_tracer_init,
793 .reset = wakeup_tracer_reset,
794 .start = wakeup_tracer_start,
795 .stop = wakeup_tracer_stop,
796 .print_max = true,
797 .print_header = wakeup_print_header,
798 .print_line = wakeup_print_line,
799 .flag_changed = wakeup_flag_changed,
800#ifdef CONFIG_FTRACE_SELFTEST
801 .selftest = trace_selftest_startup_wakeup,
802#endif
803 .open = wakeup_trace_open,
804 .close = wakeup_trace_close,
805 .allow_instances = true,
806 .use_max_tr = true,
807};
808
809__init static int init_wakeup_tracer(void)
810{
811 int ret;
812
813 ret = register_tracer(&wakeup_tracer);
814 if (ret)
815 return ret;
816
817 ret = register_tracer(&wakeup_rt_tracer);
818 if (ret)
819 return ret;
820
821 ret = register_tracer(&wakeup_dl_tracer);
822 if (ret)
823 return ret;
824
825 return 0;
826}
827core_initcall(init_wakeup_tracer);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace task wakeup timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/module.h>
14#include <linux/kallsyms.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/sched/rt.h>
18#include <linux/sched/deadline.h>
19#include <trace/events/sched.h>
20#include "trace.h"
21
22static struct trace_array *wakeup_trace;
23static int __read_mostly tracer_enabled;
24
25static struct task_struct *wakeup_task;
26static int wakeup_cpu;
27static int wakeup_current_cpu;
28static unsigned wakeup_prio = -1;
29static bool wakeup_rt;
30static bool wakeup_dl;
31static bool tracing_dl;
32
33static arch_spinlock_t wakeup_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35
36static void wakeup_reset(struct trace_array *tr);
37static void __wakeup_reset(struct trace_array *tr);
38static int start_func_tracer(struct trace_array *tr, int graph);
39static void stop_func_tracer(struct trace_array *tr, int graph);
40
41static int save_flags;
42
43#ifdef CONFIG_FUNCTION_GRAPH_TRACER
44# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
45#else
46# define is_graph(tr) false
47#endif
48
49#ifdef CONFIG_FUNCTION_TRACER
50
51static bool function_enabled;
52
53/*
54 * Prologue for the wakeup function tracers.
55 *
56 * Returns 1 if it is OK to continue, and preemption
57 * is disabled and data->disabled is incremented.
58 * 0 if the trace is to be ignored, and preemption
59 * is not disabled and data->disabled is
60 * kept the same.
61 *
62 * Note, this function is also used outside this ifdef but
63 * inside the #ifdef of the function graph tracer below.
64 * This is OK, since the function graph tracer is
65 * dependent on the function tracer.
66 */
67static int
68func_prolog_preempt_disable(struct trace_array *tr,
69 struct trace_array_cpu **data,
70 unsigned int *trace_ctx)
71{
72 long disabled;
73 int cpu;
74
75 if (likely(!wakeup_task))
76 return 0;
77
78 *trace_ctx = tracing_gen_ctx();
79 preempt_disable_notrace();
80
81 cpu = raw_smp_processor_id();
82 if (cpu != wakeup_current_cpu)
83 goto out_enable;
84
85 *data = per_cpu_ptr(tr->array_buffer.data, cpu);
86 disabled = atomic_inc_return(&(*data)->disabled);
87 if (unlikely(disabled != 1))
88 goto out;
89
90 return 1;
91
92out:
93 atomic_dec(&(*data)->disabled);
94
95out_enable:
96 preempt_enable_notrace();
97 return 0;
98}
99
100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
101
102static int wakeup_display_graph(struct trace_array *tr, int set)
103{
104 if (!(is_graph(tr) ^ set))
105 return 0;
106
107 stop_func_tracer(tr, !set);
108
109 wakeup_reset(wakeup_trace);
110 tr->max_latency = 0;
111
112 return start_func_tracer(tr, set);
113}
114
115static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
116 struct fgraph_ops *gops)
117{
118 struct trace_array *tr = wakeup_trace;
119 struct trace_array_cpu *data;
120 unsigned int trace_ctx;
121 u64 *calltime;
122 int ret = 0;
123
124 if (ftrace_graph_ignore_func(gops, trace))
125 return 0;
126 /*
127 * Do not trace a function if it's filtered by set_graph_notrace.
128 * Make the index of ret stack negative to indicate that it should
129 * ignore further functions. But it needs its own ret stack entry
130 * to recover the original index in order to continue tracing after
131 * returning from the function.
132 */
133 if (ftrace_graph_notrace_addr(trace->func))
134 return 1;
135
136 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
137 return 0;
138
139 calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
140 if (!calltime)
141 return 0;
142
143 *calltime = trace_clock_local();
144
145 ret = __trace_graph_entry(tr, trace, trace_ctx);
146 atomic_dec(&data->disabled);
147 preempt_enable_notrace();
148
149 return ret;
150}
151
152static void wakeup_graph_return(struct ftrace_graph_ret *trace,
153 struct fgraph_ops *gops)
154{
155 struct trace_array *tr = wakeup_trace;
156 struct trace_array_cpu *data;
157 unsigned int trace_ctx;
158 u64 *calltime;
159 int size;
160
161 ftrace_graph_addr_finish(gops, trace);
162
163 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
164 return;
165
166 calltime = fgraph_retrieve_data(gops->idx, &size);
167 if (!calltime)
168 return;
169 trace->calltime = *calltime;
170
171 __trace_graph_return(tr, trace, trace_ctx);
172 atomic_dec(&data->disabled);
173
174 preempt_enable_notrace();
175 return;
176}
177
178static struct fgraph_ops fgraph_wakeup_ops = {
179 .entryfunc = &wakeup_graph_entry,
180 .retfunc = &wakeup_graph_return,
181};
182
183static void wakeup_trace_open(struct trace_iterator *iter)
184{
185 if (is_graph(iter->tr))
186 graph_trace_open(iter);
187 else
188 iter->private = NULL;
189}
190
191static void wakeup_trace_close(struct trace_iterator *iter)
192{
193 if (iter->private)
194 graph_trace_close(iter);
195}
196
197#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
198 TRACE_GRAPH_PRINT_CPU | \
199 TRACE_GRAPH_PRINT_REL_TIME | \
200 TRACE_GRAPH_PRINT_DURATION | \
201 TRACE_GRAPH_PRINT_OVERHEAD | \
202 TRACE_GRAPH_PRINT_IRQS)
203
204static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
205{
206 /*
207 * In graph mode call the graph tracer output function,
208 * otherwise go with the TRACE_FN event handler
209 */
210 if (is_graph(iter->tr))
211 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
212
213 return TRACE_TYPE_UNHANDLED;
214}
215
216static void wakeup_print_header(struct seq_file *s)
217{
218 if (is_graph(wakeup_trace))
219 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
220 else
221 trace_default_header(s);
222}
223#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
224
225/*
226 * wakeup uses its own tracer function to keep the overhead down:
227 */
228static void
229wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
230 struct ftrace_ops *op, struct ftrace_regs *fregs)
231{
232 struct trace_array *tr = wakeup_trace;
233 struct trace_array_cpu *data;
234 unsigned long flags;
235 unsigned int trace_ctx;
236
237 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
238 return;
239
240 local_irq_save(flags);
241 trace_function(tr, ip, parent_ip, trace_ctx);
242 local_irq_restore(flags);
243
244 atomic_dec(&data->disabled);
245 preempt_enable_notrace();
246}
247
248static int register_wakeup_function(struct trace_array *tr, int graph, int set)
249{
250 int ret;
251
252 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
253 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
254 return 0;
255
256 if (graph)
257 ret = register_ftrace_graph(&fgraph_wakeup_ops);
258 else
259 ret = register_ftrace_function(tr->ops);
260
261 if (!ret)
262 function_enabled = true;
263
264 return ret;
265}
266
267static void unregister_wakeup_function(struct trace_array *tr, int graph)
268{
269 if (!function_enabled)
270 return;
271
272 if (graph)
273 unregister_ftrace_graph(&fgraph_wakeup_ops);
274 else
275 unregister_ftrace_function(tr->ops);
276
277 function_enabled = false;
278}
279
280static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
281{
282 if (!(mask & TRACE_ITER_FUNCTION))
283 return 0;
284
285 if (set)
286 register_wakeup_function(tr, is_graph(tr), 1);
287 else
288 unregister_wakeup_function(tr, is_graph(tr));
289 return 1;
290}
291#else /* CONFIG_FUNCTION_TRACER */
292static int register_wakeup_function(struct trace_array *tr, int graph, int set)
293{
294 return 0;
295}
296static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
297static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
298{
299 return 0;
300}
301#endif /* else CONFIG_FUNCTION_TRACER */
302
303#ifndef CONFIG_FUNCTION_GRAPH_TRACER
304static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
305{
306 return TRACE_TYPE_UNHANDLED;
307}
308
309static void wakeup_trace_open(struct trace_iterator *iter) { }
310static void wakeup_trace_close(struct trace_iterator *iter) { }
311
312static void wakeup_print_header(struct seq_file *s)
313{
314 trace_default_header(s);
315}
316#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
317
318static void
319__trace_function(struct trace_array *tr,
320 unsigned long ip, unsigned long parent_ip,
321 unsigned int trace_ctx)
322{
323 if (is_graph(tr))
324 trace_graph_function(tr, ip, parent_ip, trace_ctx);
325 else
326 trace_function(tr, ip, parent_ip, trace_ctx);
327}
328
329static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
330{
331 struct tracer *tracer = tr->current_trace;
332
333 if (wakeup_function_set(tr, mask, set))
334 return 0;
335
336#ifdef CONFIG_FUNCTION_GRAPH_TRACER
337 if (mask & TRACE_ITER_DISPLAY_GRAPH)
338 return wakeup_display_graph(tr, set);
339#endif
340
341 return trace_keep_overwrite(tracer, mask, set);
342}
343
344static int start_func_tracer(struct trace_array *tr, int graph)
345{
346 int ret;
347
348 ret = register_wakeup_function(tr, graph, 0);
349
350 if (!ret && tracing_is_enabled())
351 tracer_enabled = 1;
352 else
353 tracer_enabled = 0;
354
355 return ret;
356}
357
358static void stop_func_tracer(struct trace_array *tr, int graph)
359{
360 tracer_enabled = 0;
361
362 unregister_wakeup_function(tr, graph);
363}
364
365/*
366 * Should this new latency be reported/recorded?
367 */
368static bool report_latency(struct trace_array *tr, u64 delta)
369{
370 if (tracing_thresh) {
371 if (delta < tracing_thresh)
372 return false;
373 } else {
374 if (delta <= tr->max_latency)
375 return false;
376 }
377 return true;
378}
379
380static void
381probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
382{
383 if (task != wakeup_task)
384 return;
385
386 wakeup_current_cpu = cpu;
387}
388
389static void
390tracing_sched_switch_trace(struct trace_array *tr,
391 struct task_struct *prev,
392 struct task_struct *next,
393 unsigned int trace_ctx)
394{
395 struct trace_buffer *buffer = tr->array_buffer.buffer;
396 struct ring_buffer_event *event;
397 struct ctx_switch_entry *entry;
398
399 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
400 sizeof(*entry), trace_ctx);
401 if (!event)
402 return;
403 entry = ring_buffer_event_data(event);
404 entry->prev_pid = prev->pid;
405 entry->prev_prio = prev->prio;
406 entry->prev_state = task_state_index(prev);
407 entry->next_pid = next->pid;
408 entry->next_prio = next->prio;
409 entry->next_state = task_state_index(next);
410 entry->next_cpu = task_cpu(next);
411
412 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
413}
414
415static void
416tracing_sched_wakeup_trace(struct trace_array *tr,
417 struct task_struct *wakee,
418 struct task_struct *curr,
419 unsigned int trace_ctx)
420{
421 struct ring_buffer_event *event;
422 struct ctx_switch_entry *entry;
423 struct trace_buffer *buffer = tr->array_buffer.buffer;
424
425 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
426 sizeof(*entry), trace_ctx);
427 if (!event)
428 return;
429 entry = ring_buffer_event_data(event);
430 entry->prev_pid = curr->pid;
431 entry->prev_prio = curr->prio;
432 entry->prev_state = task_state_index(curr);
433 entry->next_pid = wakee->pid;
434 entry->next_prio = wakee->prio;
435 entry->next_state = task_state_index(wakee);
436 entry->next_cpu = task_cpu(wakee);
437
438 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
439}
440
441static void notrace
442probe_wakeup_sched_switch(void *ignore, bool preempt,
443 struct task_struct *prev, struct task_struct *next,
444 unsigned int prev_state)
445{
446 struct trace_array_cpu *data;
447 u64 T0, T1, delta;
448 unsigned long flags;
449 long disabled;
450 int cpu;
451 unsigned int trace_ctx;
452
453 tracing_record_cmdline(prev);
454
455 if (unlikely(!tracer_enabled))
456 return;
457
458 /*
459 * When we start a new trace, we set wakeup_task to NULL
460 * and then set tracer_enabled = 1. We want to make sure
461 * that another CPU does not see the tracer_enabled = 1
462 * and the wakeup_task with an older task, that might
463 * actually be the same as next.
464 */
465 smp_rmb();
466
467 if (next != wakeup_task)
468 return;
469
470 /* disable local data, not wakeup_cpu data */
471 cpu = raw_smp_processor_id();
472 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
473 if (likely(disabled != 1))
474 goto out;
475
476 local_irq_save(flags);
477 trace_ctx = tracing_gen_ctx_flags(flags);
478
479 arch_spin_lock(&wakeup_lock);
480
481 /* We could race with grabbing wakeup_lock */
482 if (unlikely(!tracer_enabled || next != wakeup_task))
483 goto out_unlock;
484
485 /* The task we are waiting for is waking up */
486 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
487
488 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
489 tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
490 __trace_stack(wakeup_trace, trace_ctx, 0);
491
492 T0 = data->preempt_timestamp;
493 T1 = ftrace_now(cpu);
494 delta = T1-T0;
495
496 if (!report_latency(wakeup_trace, delta))
497 goto out_unlock;
498
499 if (likely(!is_tracing_stopped())) {
500 wakeup_trace->max_latency = delta;
501 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
502 }
503
504out_unlock:
505 __wakeup_reset(wakeup_trace);
506 arch_spin_unlock(&wakeup_lock);
507 local_irq_restore(flags);
508out:
509 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
510}
511
512static void __wakeup_reset(struct trace_array *tr)
513{
514 wakeup_cpu = -1;
515 wakeup_prio = -1;
516 tracing_dl = false;
517
518 if (wakeup_task)
519 put_task_struct(wakeup_task);
520
521 wakeup_task = NULL;
522}
523
524static void wakeup_reset(struct trace_array *tr)
525{
526 unsigned long flags;
527
528 tracing_reset_online_cpus(&tr->array_buffer);
529
530 local_irq_save(flags);
531 arch_spin_lock(&wakeup_lock);
532 __wakeup_reset(tr);
533 arch_spin_unlock(&wakeup_lock);
534 local_irq_restore(flags);
535}
536
537static void
538probe_wakeup(void *ignore, struct task_struct *p)
539{
540 struct trace_array_cpu *data;
541 int cpu = smp_processor_id();
542 long disabled;
543 unsigned int trace_ctx;
544
545 if (likely(!tracer_enabled))
546 return;
547
548 tracing_record_cmdline(p);
549 tracing_record_cmdline(current);
550
551 /*
552 * Semantic is like this:
553 * - wakeup tracer handles all tasks in the system, independently
554 * from their scheduling class;
555 * - wakeup_rt tracer handles tasks belonging to sched_dl and
556 * sched_rt class;
557 * - wakeup_dl handles tasks belonging to sched_dl class only.
558 */
559 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
560 (wakeup_rt && !rt_or_dl_task(p)) ||
561 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
562 return;
563
564 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
565 if (unlikely(disabled != 1))
566 goto out;
567
568 trace_ctx = tracing_gen_ctx();
569
570 /* interrupts should be off from try_to_wake_up */
571 arch_spin_lock(&wakeup_lock);
572
573 /* check for races. */
574 if (!tracer_enabled || tracing_dl ||
575 (!dl_task(p) && p->prio >= wakeup_prio))
576 goto out_locked;
577
578 /* reset the trace */
579 __wakeup_reset(wakeup_trace);
580
581 wakeup_cpu = task_cpu(p);
582 wakeup_current_cpu = wakeup_cpu;
583 wakeup_prio = p->prio;
584
585 /*
586 * Once you start tracing a -deadline task, don't bother tracing
587 * another task until the first one wakes up.
588 */
589 if (dl_task(p))
590 tracing_dl = true;
591 else
592 tracing_dl = false;
593
594 wakeup_task = get_task_struct(p);
595
596 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
597 data->preempt_timestamp = ftrace_now(cpu);
598 tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
599 __trace_stack(wakeup_trace, trace_ctx, 0);
600
601 /*
602 * We must be careful in using CALLER_ADDR2. But since wake_up
603 * is not called by an assembly function (where as schedule is)
604 * it should be safe to use it here.
605 */
606 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
607
608out_locked:
609 arch_spin_unlock(&wakeup_lock);
610out:
611 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
612}
613
614static void start_wakeup_tracer(struct trace_array *tr)
615{
616 int ret;
617
618 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
619 if (ret) {
620 pr_info("wakeup trace: Couldn't activate tracepoint"
621 " probe to kernel_sched_wakeup\n");
622 return;
623 }
624
625 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
626 if (ret) {
627 pr_info("wakeup trace: Couldn't activate tracepoint"
628 " probe to kernel_sched_wakeup_new\n");
629 goto fail_deprobe;
630 }
631
632 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
633 if (ret) {
634 pr_info("sched trace: Couldn't activate tracepoint"
635 " probe to kernel_sched_switch\n");
636 goto fail_deprobe_wake_new;
637 }
638
639 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
640 if (ret) {
641 pr_info("wakeup trace: Couldn't activate tracepoint"
642 " probe to kernel_sched_migrate_task\n");
643 goto fail_deprobe_sched_switch;
644 }
645
646 wakeup_reset(tr);
647
648 /*
649 * Don't let the tracer_enabled = 1 show up before
650 * the wakeup_task is reset. This may be overkill since
651 * wakeup_reset does a spin_unlock after setting the
652 * wakeup_task to NULL, but I want to be safe.
653 * This is a slow path anyway.
654 */
655 smp_wmb();
656
657 if (start_func_tracer(tr, is_graph(tr)))
658 printk(KERN_ERR "failed to start wakeup tracer\n");
659
660 return;
661fail_deprobe_sched_switch:
662 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
663fail_deprobe_wake_new:
664 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
665fail_deprobe:
666 unregister_trace_sched_wakeup(probe_wakeup, NULL);
667}
668
669static void stop_wakeup_tracer(struct trace_array *tr)
670{
671 tracer_enabled = 0;
672 stop_func_tracer(tr, is_graph(tr));
673 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
674 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
675 unregister_trace_sched_wakeup(probe_wakeup, NULL);
676 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
677}
678
679static bool wakeup_busy;
680
681static int __wakeup_tracer_init(struct trace_array *tr)
682{
683 save_flags = tr->trace_flags;
684
685 /* non overwrite screws up the latency tracers */
686 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
687 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
688
689 tr->max_latency = 0;
690 wakeup_trace = tr;
691 ftrace_init_array_ops(tr, wakeup_tracer_call);
692 start_wakeup_tracer(tr);
693
694 wakeup_busy = true;
695 return 0;
696}
697
698static int wakeup_tracer_init(struct trace_array *tr)
699{
700 if (wakeup_busy)
701 return -EBUSY;
702
703 wakeup_dl = false;
704 wakeup_rt = false;
705 return __wakeup_tracer_init(tr);
706}
707
708static int wakeup_rt_tracer_init(struct trace_array *tr)
709{
710 if (wakeup_busy)
711 return -EBUSY;
712
713 wakeup_dl = false;
714 wakeup_rt = true;
715 return __wakeup_tracer_init(tr);
716}
717
718static int wakeup_dl_tracer_init(struct trace_array *tr)
719{
720 if (wakeup_busy)
721 return -EBUSY;
722
723 wakeup_dl = true;
724 wakeup_rt = false;
725 return __wakeup_tracer_init(tr);
726}
727
728static void wakeup_tracer_reset(struct trace_array *tr)
729{
730 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
731 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
732
733 stop_wakeup_tracer(tr);
734 /* make sure we put back any tasks we are tracing */
735 wakeup_reset(tr);
736
737 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
738 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
739 ftrace_reset_array_ops(tr);
740 wakeup_busy = false;
741}
742
743static void wakeup_tracer_start(struct trace_array *tr)
744{
745 wakeup_reset(tr);
746 tracer_enabled = 1;
747}
748
749static void wakeup_tracer_stop(struct trace_array *tr)
750{
751 tracer_enabled = 0;
752}
753
754static struct tracer wakeup_tracer __read_mostly =
755{
756 .name = "wakeup",
757 .init = wakeup_tracer_init,
758 .reset = wakeup_tracer_reset,
759 .start = wakeup_tracer_start,
760 .stop = wakeup_tracer_stop,
761 .print_max = true,
762 .print_header = wakeup_print_header,
763 .print_line = wakeup_print_line,
764 .flag_changed = wakeup_flag_changed,
765#ifdef CONFIG_FTRACE_SELFTEST
766 .selftest = trace_selftest_startup_wakeup,
767#endif
768 .open = wakeup_trace_open,
769 .close = wakeup_trace_close,
770 .allow_instances = true,
771 .use_max_tr = true,
772};
773
774static struct tracer wakeup_rt_tracer __read_mostly =
775{
776 .name = "wakeup_rt",
777 .init = wakeup_rt_tracer_init,
778 .reset = wakeup_tracer_reset,
779 .start = wakeup_tracer_start,
780 .stop = wakeup_tracer_stop,
781 .print_max = true,
782 .print_header = wakeup_print_header,
783 .print_line = wakeup_print_line,
784 .flag_changed = wakeup_flag_changed,
785#ifdef CONFIG_FTRACE_SELFTEST
786 .selftest = trace_selftest_startup_wakeup,
787#endif
788 .open = wakeup_trace_open,
789 .close = wakeup_trace_close,
790 .allow_instances = true,
791 .use_max_tr = true,
792};
793
794static struct tracer wakeup_dl_tracer __read_mostly =
795{
796 .name = "wakeup_dl",
797 .init = wakeup_dl_tracer_init,
798 .reset = wakeup_tracer_reset,
799 .start = wakeup_tracer_start,
800 .stop = wakeup_tracer_stop,
801 .print_max = true,
802 .print_header = wakeup_print_header,
803 .print_line = wakeup_print_line,
804 .flag_changed = wakeup_flag_changed,
805#ifdef CONFIG_FTRACE_SELFTEST
806 .selftest = trace_selftest_startup_wakeup,
807#endif
808 .open = wakeup_trace_open,
809 .close = wakeup_trace_close,
810 .allow_instances = true,
811 .use_max_tr = true,
812};
813
814__init static int init_wakeup_tracer(void)
815{
816 int ret;
817
818 ret = register_tracer(&wakeup_tracer);
819 if (ret)
820 return ret;
821
822 ret = register_tracer(&wakeup_rt_tracer);
823 if (ret)
824 return ret;
825
826 ret = register_tracer(&wakeup_dl_tracer);
827 if (ret)
828 return ret;
829
830 return 0;
831}
832core_initcall(init_wakeup_tracer);