Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace task wakeup timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/module.h>
14#include <linux/kallsyms.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/sched/rt.h>
18#include <linux/sched/deadline.h>
19#include <trace/events/sched.h>
20#include "trace.h"
21
22static struct trace_array *wakeup_trace;
23static int __read_mostly tracer_enabled;
24
25static struct task_struct *wakeup_task;
26static int wakeup_cpu;
27static int wakeup_current_cpu;
28static unsigned wakeup_prio = -1;
29static int wakeup_rt;
30static int wakeup_dl;
31static int tracing_dl = 0;
32
33static arch_spinlock_t wakeup_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35
36static void wakeup_reset(struct trace_array *tr);
37static void __wakeup_reset(struct trace_array *tr);
38
39static int save_flags;
40
41#ifdef CONFIG_FUNCTION_GRAPH_TRACER
42static int wakeup_display_graph(struct trace_array *tr, int set);
43# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
44#else
45static inline int wakeup_display_graph(struct trace_array *tr, int set)
46{
47 return 0;
48}
49# define is_graph(tr) false
50#endif
51
52
53#ifdef CONFIG_FUNCTION_TRACER
54
55static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
56static void wakeup_graph_return(struct ftrace_graph_ret *trace);
57
58static bool function_enabled;
59
60/*
61 * Prologue for the wakeup function tracers.
62 *
63 * Returns 1 if it is OK to continue, and preemption
64 * is disabled and data->disabled is incremented.
65 * 0 if the trace is to be ignored, and preemption
66 * is not disabled and data->disabled is
67 * kept the same.
68 *
69 * Note, this function is also used outside this ifdef but
70 * inside the #ifdef of the function graph tracer below.
71 * This is OK, since the function graph tracer is
72 * dependent on the function tracer.
73 */
74static int
75func_prolog_preempt_disable(struct trace_array *tr,
76 struct trace_array_cpu **data,
77 int *pc)
78{
79 long disabled;
80 int cpu;
81
82 if (likely(!wakeup_task))
83 return 0;
84
85 *pc = preempt_count();
86 preempt_disable_notrace();
87
88 cpu = raw_smp_processor_id();
89 if (cpu != wakeup_current_cpu)
90 goto out_enable;
91
92 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
93 disabled = atomic_inc_return(&(*data)->disabled);
94 if (unlikely(disabled != 1))
95 goto out;
96
97 return 1;
98
99out:
100 atomic_dec(&(*data)->disabled);
101
102out_enable:
103 preempt_enable_notrace();
104 return 0;
105}
106
107/*
108 * wakeup uses its own tracer function to keep the overhead down:
109 */
110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
112 struct ftrace_ops *op, struct pt_regs *pt_regs)
113{
114 struct trace_array *tr = wakeup_trace;
115 struct trace_array_cpu *data;
116 unsigned long flags;
117 int pc;
118
119 if (!func_prolog_preempt_disable(tr, &data, &pc))
120 return;
121
122 local_irq_save(flags);
123 trace_function(tr, ip, parent_ip, flags, pc);
124 local_irq_restore(flags);
125
126 atomic_dec(&data->disabled);
127 preempt_enable_notrace();
128}
129
130static int register_wakeup_function(struct trace_array *tr, int graph, int set)
131{
132 int ret;
133
134 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
135 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
136 return 0;
137
138 if (graph)
139 ret = register_ftrace_graph(&wakeup_graph_return,
140 &wakeup_graph_entry);
141 else
142 ret = register_ftrace_function(tr->ops);
143
144 if (!ret)
145 function_enabled = true;
146
147 return ret;
148}
149
150static void unregister_wakeup_function(struct trace_array *tr, int graph)
151{
152 if (!function_enabled)
153 return;
154
155 if (graph)
156 unregister_ftrace_graph();
157 else
158 unregister_ftrace_function(tr->ops);
159
160 function_enabled = false;
161}
162
163static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
164{
165 if (!(mask & TRACE_ITER_FUNCTION))
166 return 0;
167
168 if (set)
169 register_wakeup_function(tr, is_graph(tr), 1);
170 else
171 unregister_wakeup_function(tr, is_graph(tr));
172 return 1;
173}
174#else
175static int register_wakeup_function(struct trace_array *tr, int graph, int set)
176{
177 return 0;
178}
179static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
180static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
181{
182 return 0;
183}
184#endif /* CONFIG_FUNCTION_TRACER */
185
186static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
187{
188 struct tracer *tracer = tr->current_trace;
189
190 if (wakeup_function_set(tr, mask, set))
191 return 0;
192
193#ifdef CONFIG_FUNCTION_GRAPH_TRACER
194 if (mask & TRACE_ITER_DISPLAY_GRAPH)
195 return wakeup_display_graph(tr, set);
196#endif
197
198 return trace_keep_overwrite(tracer, mask, set);
199}
200
201static int start_func_tracer(struct trace_array *tr, int graph)
202{
203 int ret;
204
205 ret = register_wakeup_function(tr, graph, 0);
206
207 if (!ret && tracing_is_enabled())
208 tracer_enabled = 1;
209 else
210 tracer_enabled = 0;
211
212 return ret;
213}
214
215static void stop_func_tracer(struct trace_array *tr, int graph)
216{
217 tracer_enabled = 0;
218
219 unregister_wakeup_function(tr, graph);
220}
221
222#ifdef CONFIG_FUNCTION_GRAPH_TRACER
223static int wakeup_display_graph(struct trace_array *tr, int set)
224{
225 if (!(is_graph(tr) ^ set))
226 return 0;
227
228 stop_func_tracer(tr, !set);
229
230 wakeup_reset(wakeup_trace);
231 tr->max_latency = 0;
232
233 return start_func_tracer(tr, set);
234}
235
236static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
237{
238 struct trace_array *tr = wakeup_trace;
239 struct trace_array_cpu *data;
240 unsigned long flags;
241 int pc, ret = 0;
242
243 if (ftrace_graph_ignore_func(trace))
244 return 0;
245 /*
246 * Do not trace a function if it's filtered by set_graph_notrace.
247 * Make the index of ret stack negative to indicate that it should
248 * ignore further functions. But it needs its own ret stack entry
249 * to recover the original index in order to continue tracing after
250 * returning from the function.
251 */
252 if (ftrace_graph_notrace_addr(trace->func))
253 return 1;
254
255 if (!func_prolog_preempt_disable(tr, &data, &pc))
256 return 0;
257
258 local_save_flags(flags);
259 ret = __trace_graph_entry(tr, trace, flags, pc);
260 atomic_dec(&data->disabled);
261 preempt_enable_notrace();
262
263 return ret;
264}
265
266static void wakeup_graph_return(struct ftrace_graph_ret *trace)
267{
268 struct trace_array *tr = wakeup_trace;
269 struct trace_array_cpu *data;
270 unsigned long flags;
271 int pc;
272
273 if (!func_prolog_preempt_disable(tr, &data, &pc))
274 return;
275
276 local_save_flags(flags);
277 __trace_graph_return(tr, trace, flags, pc);
278 atomic_dec(&data->disabled);
279
280 preempt_enable_notrace();
281 return;
282}
283
284static void wakeup_trace_open(struct trace_iterator *iter)
285{
286 if (is_graph(iter->tr))
287 graph_trace_open(iter);
288}
289
290static void wakeup_trace_close(struct trace_iterator *iter)
291{
292 if (iter->private)
293 graph_trace_close(iter);
294}
295
296#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
297 TRACE_GRAPH_PRINT_ABS_TIME | \
298 TRACE_GRAPH_PRINT_DURATION)
299
300static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
301{
302 /*
303 * In graph mode call the graph tracer output function,
304 * otherwise go with the TRACE_FN event handler
305 */
306 if (is_graph(iter->tr))
307 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
308
309 return TRACE_TYPE_UNHANDLED;
310}
311
312static void wakeup_print_header(struct seq_file *s)
313{
314 if (is_graph(wakeup_trace))
315 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
316 else
317 trace_default_header(s);
318}
319
320static void
321__trace_function(struct trace_array *tr,
322 unsigned long ip, unsigned long parent_ip,
323 unsigned long flags, int pc)
324{
325 if (is_graph(tr))
326 trace_graph_function(tr, ip, parent_ip, flags, pc);
327 else
328 trace_function(tr, ip, parent_ip, flags, pc);
329}
330#else
331#define __trace_function trace_function
332
333static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
334{
335 return TRACE_TYPE_UNHANDLED;
336}
337
338static void wakeup_trace_open(struct trace_iterator *iter) { }
339static void wakeup_trace_close(struct trace_iterator *iter) { }
340
341#ifdef CONFIG_FUNCTION_TRACER
342static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
343{
344 return -1;
345}
346static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
347static void wakeup_print_header(struct seq_file *s)
348{
349 trace_default_header(s);
350}
351#else
352static void wakeup_print_header(struct seq_file *s)
353{
354 trace_latency_header(s);
355}
356#endif /* CONFIG_FUNCTION_TRACER */
357#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
358
359/*
360 * Should this new latency be reported/recorded?
361 */
362static bool report_latency(struct trace_array *tr, u64 delta)
363{
364 if (tracing_thresh) {
365 if (delta < tracing_thresh)
366 return false;
367 } else {
368 if (delta <= tr->max_latency)
369 return false;
370 }
371 return true;
372}
373
374static void
375probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
376{
377 if (task != wakeup_task)
378 return;
379
380 wakeup_current_cpu = cpu;
381}
382
383static void
384tracing_sched_switch_trace(struct trace_array *tr,
385 struct task_struct *prev,
386 struct task_struct *next,
387 unsigned long flags, int pc)
388{
389 struct trace_event_call *call = &event_context_switch;
390 struct ring_buffer *buffer = tr->trace_buffer.buffer;
391 struct ring_buffer_event *event;
392 struct ctx_switch_entry *entry;
393
394 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
395 sizeof(*entry), flags, pc);
396 if (!event)
397 return;
398 entry = ring_buffer_event_data(event);
399 entry->prev_pid = prev->pid;
400 entry->prev_prio = prev->prio;
401 entry->prev_state = task_state_index(prev);
402 entry->next_pid = next->pid;
403 entry->next_prio = next->prio;
404 entry->next_state = task_state_index(next);
405 entry->next_cpu = task_cpu(next);
406
407 if (!call_filter_check_discard(call, entry, buffer, event))
408 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
409}
410
411static void
412tracing_sched_wakeup_trace(struct trace_array *tr,
413 struct task_struct *wakee,
414 struct task_struct *curr,
415 unsigned long flags, int pc)
416{
417 struct trace_event_call *call = &event_wakeup;
418 struct ring_buffer_event *event;
419 struct ctx_switch_entry *entry;
420 struct ring_buffer *buffer = tr->trace_buffer.buffer;
421
422 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
423 sizeof(*entry), flags, pc);
424 if (!event)
425 return;
426 entry = ring_buffer_event_data(event);
427 entry->prev_pid = curr->pid;
428 entry->prev_prio = curr->prio;
429 entry->prev_state = task_state_index(curr);
430 entry->next_pid = wakee->pid;
431 entry->next_prio = wakee->prio;
432 entry->next_state = task_state_index(wakee);
433 entry->next_cpu = task_cpu(wakee);
434
435 if (!call_filter_check_discard(call, entry, buffer, event))
436 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
437}
438
439static void notrace
440probe_wakeup_sched_switch(void *ignore, bool preempt,
441 struct task_struct *prev, struct task_struct *next)
442{
443 struct trace_array_cpu *data;
444 u64 T0, T1, delta;
445 unsigned long flags;
446 long disabled;
447 int cpu;
448 int pc;
449
450 tracing_record_cmdline(prev);
451
452 if (unlikely(!tracer_enabled))
453 return;
454
455 /*
456 * When we start a new trace, we set wakeup_task to NULL
457 * and then set tracer_enabled = 1. We want to make sure
458 * that another CPU does not see the tracer_enabled = 1
459 * and the wakeup_task with an older task, that might
460 * actually be the same as next.
461 */
462 smp_rmb();
463
464 if (next != wakeup_task)
465 return;
466
467 pc = preempt_count();
468
469 /* disable local data, not wakeup_cpu data */
470 cpu = raw_smp_processor_id();
471 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
472 if (likely(disabled != 1))
473 goto out;
474
475 local_irq_save(flags);
476 arch_spin_lock(&wakeup_lock);
477
478 /* We could race with grabbing wakeup_lock */
479 if (unlikely(!tracer_enabled || next != wakeup_task))
480 goto out_unlock;
481
482 /* The task we are waiting for is waking up */
483 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
484
485 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
486 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
487
488 T0 = data->preempt_timestamp;
489 T1 = ftrace_now(cpu);
490 delta = T1-T0;
491
492 if (!report_latency(wakeup_trace, delta))
493 goto out_unlock;
494
495 if (likely(!is_tracing_stopped())) {
496 wakeup_trace->max_latency = delta;
497 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
498 }
499
500out_unlock:
501 __wakeup_reset(wakeup_trace);
502 arch_spin_unlock(&wakeup_lock);
503 local_irq_restore(flags);
504out:
505 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
506}
507
508static void __wakeup_reset(struct trace_array *tr)
509{
510 wakeup_cpu = -1;
511 wakeup_prio = -1;
512 tracing_dl = 0;
513
514 if (wakeup_task)
515 put_task_struct(wakeup_task);
516
517 wakeup_task = NULL;
518}
519
520static void wakeup_reset(struct trace_array *tr)
521{
522 unsigned long flags;
523
524 tracing_reset_online_cpus(&tr->trace_buffer);
525
526 local_irq_save(flags);
527 arch_spin_lock(&wakeup_lock);
528 __wakeup_reset(tr);
529 arch_spin_unlock(&wakeup_lock);
530 local_irq_restore(flags);
531}
532
533static void
534probe_wakeup(void *ignore, struct task_struct *p)
535{
536 struct trace_array_cpu *data;
537 int cpu = smp_processor_id();
538 unsigned long flags;
539 long disabled;
540 int pc;
541
542 if (likely(!tracer_enabled))
543 return;
544
545 tracing_record_cmdline(p);
546 tracing_record_cmdline(current);
547
548 /*
549 * Semantic is like this:
550 * - wakeup tracer handles all tasks in the system, independently
551 * from their scheduling class;
552 * - wakeup_rt tracer handles tasks belonging to sched_dl and
553 * sched_rt class;
554 * - wakeup_dl handles tasks belonging to sched_dl class only.
555 */
556 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
557 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
558 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
559 return;
560
561 pc = preempt_count();
562 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
563 if (unlikely(disabled != 1))
564 goto out;
565
566 /* interrupts should be off from try_to_wake_up */
567 arch_spin_lock(&wakeup_lock);
568
569 /* check for races. */
570 if (!tracer_enabled || tracing_dl ||
571 (!dl_task(p) && p->prio >= wakeup_prio))
572 goto out_locked;
573
574 /* reset the trace */
575 __wakeup_reset(wakeup_trace);
576
577 wakeup_cpu = task_cpu(p);
578 wakeup_current_cpu = wakeup_cpu;
579 wakeup_prio = p->prio;
580
581 /*
582 * Once you start tracing a -deadline task, don't bother tracing
583 * another task until the first one wakes up.
584 */
585 if (dl_task(p))
586 tracing_dl = 1;
587 else
588 tracing_dl = 0;
589
590 wakeup_task = p;
591 get_task_struct(wakeup_task);
592
593 local_save_flags(flags);
594
595 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
596 data->preempt_timestamp = ftrace_now(cpu);
597 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
598
599 /*
600 * We must be careful in using CALLER_ADDR2. But since wake_up
601 * is not called by an assembly function (where as schedule is)
602 * it should be safe to use it here.
603 */
604 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
605
606out_locked:
607 arch_spin_unlock(&wakeup_lock);
608out:
609 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
610}
611
612static void start_wakeup_tracer(struct trace_array *tr)
613{
614 int ret;
615
616 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
617 if (ret) {
618 pr_info("wakeup trace: Couldn't activate tracepoint"
619 " probe to kernel_sched_wakeup\n");
620 return;
621 }
622
623 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
624 if (ret) {
625 pr_info("wakeup trace: Couldn't activate tracepoint"
626 " probe to kernel_sched_wakeup_new\n");
627 goto fail_deprobe;
628 }
629
630 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
631 if (ret) {
632 pr_info("sched trace: Couldn't activate tracepoint"
633 " probe to kernel_sched_switch\n");
634 goto fail_deprobe_wake_new;
635 }
636
637 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
638 if (ret) {
639 pr_info("wakeup trace: Couldn't activate tracepoint"
640 " probe to kernel_sched_migrate_task\n");
641 return;
642 }
643
644 wakeup_reset(tr);
645
646 /*
647 * Don't let the tracer_enabled = 1 show up before
648 * the wakeup_task is reset. This may be overkill since
649 * wakeup_reset does a spin_unlock after setting the
650 * wakeup_task to NULL, but I want to be safe.
651 * This is a slow path anyway.
652 */
653 smp_wmb();
654
655 if (start_func_tracer(tr, is_graph(tr)))
656 printk(KERN_ERR "failed to start wakeup tracer\n");
657
658 return;
659fail_deprobe_wake_new:
660 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
661fail_deprobe:
662 unregister_trace_sched_wakeup(probe_wakeup, NULL);
663}
664
665static void stop_wakeup_tracer(struct trace_array *tr)
666{
667 tracer_enabled = 0;
668 stop_func_tracer(tr, is_graph(tr));
669 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
670 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
671 unregister_trace_sched_wakeup(probe_wakeup, NULL);
672 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
673}
674
675static bool wakeup_busy;
676
677static int __wakeup_tracer_init(struct trace_array *tr)
678{
679 save_flags = tr->trace_flags;
680
681 /* non overwrite screws up the latency tracers */
682 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
683 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
684
685 tr->max_latency = 0;
686 wakeup_trace = tr;
687 ftrace_init_array_ops(tr, wakeup_tracer_call);
688 start_wakeup_tracer(tr);
689
690 wakeup_busy = true;
691 return 0;
692}
693
694static int wakeup_tracer_init(struct trace_array *tr)
695{
696 if (wakeup_busy)
697 return -EBUSY;
698
699 wakeup_dl = 0;
700 wakeup_rt = 0;
701 return __wakeup_tracer_init(tr);
702}
703
704static int wakeup_rt_tracer_init(struct trace_array *tr)
705{
706 if (wakeup_busy)
707 return -EBUSY;
708
709 wakeup_dl = 0;
710 wakeup_rt = 1;
711 return __wakeup_tracer_init(tr);
712}
713
714static int wakeup_dl_tracer_init(struct trace_array *tr)
715{
716 if (wakeup_busy)
717 return -EBUSY;
718
719 wakeup_dl = 1;
720 wakeup_rt = 0;
721 return __wakeup_tracer_init(tr);
722}
723
724static void wakeup_tracer_reset(struct trace_array *tr)
725{
726 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
727 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
728
729 stop_wakeup_tracer(tr);
730 /* make sure we put back any tasks we are tracing */
731 wakeup_reset(tr);
732
733 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
734 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
735 ftrace_reset_array_ops(tr);
736 wakeup_busy = false;
737}
738
739static void wakeup_tracer_start(struct trace_array *tr)
740{
741 wakeup_reset(tr);
742 tracer_enabled = 1;
743}
744
745static void wakeup_tracer_stop(struct trace_array *tr)
746{
747 tracer_enabled = 0;
748}
749
750static struct tracer wakeup_tracer __read_mostly =
751{
752 .name = "wakeup",
753 .init = wakeup_tracer_init,
754 .reset = wakeup_tracer_reset,
755 .start = wakeup_tracer_start,
756 .stop = wakeup_tracer_stop,
757 .print_max = true,
758 .print_header = wakeup_print_header,
759 .print_line = wakeup_print_line,
760 .flag_changed = wakeup_flag_changed,
761#ifdef CONFIG_FTRACE_SELFTEST
762 .selftest = trace_selftest_startup_wakeup,
763#endif
764 .open = wakeup_trace_open,
765 .close = wakeup_trace_close,
766 .allow_instances = true,
767 .use_max_tr = true,
768};
769
770static struct tracer wakeup_rt_tracer __read_mostly =
771{
772 .name = "wakeup_rt",
773 .init = wakeup_rt_tracer_init,
774 .reset = wakeup_tracer_reset,
775 .start = wakeup_tracer_start,
776 .stop = wakeup_tracer_stop,
777 .print_max = true,
778 .print_header = wakeup_print_header,
779 .print_line = wakeup_print_line,
780 .flag_changed = wakeup_flag_changed,
781#ifdef CONFIG_FTRACE_SELFTEST
782 .selftest = trace_selftest_startup_wakeup,
783#endif
784 .open = wakeup_trace_open,
785 .close = wakeup_trace_close,
786 .allow_instances = true,
787 .use_max_tr = true,
788};
789
790static struct tracer wakeup_dl_tracer __read_mostly =
791{
792 .name = "wakeup_dl",
793 .init = wakeup_dl_tracer_init,
794 .reset = wakeup_tracer_reset,
795 .start = wakeup_tracer_start,
796 .stop = wakeup_tracer_stop,
797 .print_max = true,
798 .print_header = wakeup_print_header,
799 .print_line = wakeup_print_line,
800 .flag_changed = wakeup_flag_changed,
801#ifdef CONFIG_FTRACE_SELFTEST
802 .selftest = trace_selftest_startup_wakeup,
803#endif
804 .open = wakeup_trace_open,
805 .close = wakeup_trace_close,
806 .allow_instances = true,
807 .use_max_tr = true,
808};
809
810__init static int init_wakeup_tracer(void)
811{
812 int ret;
813
814 ret = register_tracer(&wakeup_tracer);
815 if (ret)
816 return ret;
817
818 ret = register_tracer(&wakeup_rt_tracer);
819 if (ret)
820 return ret;
821
822 ret = register_tracer(&wakeup_dl_tracer);
823 if (ret)
824 return ret;
825
826 return 0;
827}
828core_initcall(init_wakeup_tracer);
1/*
2 * trace task wakeup timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12#include <linux/module.h>
13#include <linux/kallsyms.h>
14#include <linux/uaccess.h>
15#include <linux/ftrace.h>
16#include <linux/sched/rt.h>
17#include <linux/sched/deadline.h>
18#include <trace/events/sched.h>
19#include "trace.h"
20
21static struct trace_array *wakeup_trace;
22static int __read_mostly tracer_enabled;
23
24static struct task_struct *wakeup_task;
25static int wakeup_cpu;
26static int wakeup_current_cpu;
27static unsigned wakeup_prio = -1;
28static int wakeup_rt;
29static int wakeup_dl;
30static int tracing_dl = 0;
31
32static arch_spinlock_t wakeup_lock =
33 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
34
35static void wakeup_reset(struct trace_array *tr);
36static void __wakeup_reset(struct trace_array *tr);
37
38static int save_flags;
39
40#ifdef CONFIG_FUNCTION_GRAPH_TRACER
41static int wakeup_display_graph(struct trace_array *tr, int set);
42# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
43#else
44static inline int wakeup_display_graph(struct trace_array *tr, int set)
45{
46 return 0;
47}
48# define is_graph(tr) false
49#endif
50
51
52#ifdef CONFIG_FUNCTION_TRACER
53
54static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
55static void wakeup_graph_return(struct ftrace_graph_ret *trace);
56
57static bool function_enabled;
58
59/*
60 * Prologue for the wakeup function tracers.
61 *
62 * Returns 1 if it is OK to continue, and preemption
63 * is disabled and data->disabled is incremented.
64 * 0 if the trace is to be ignored, and preemption
65 * is not disabled and data->disabled is
66 * kept the same.
67 *
68 * Note, this function is also used outside this ifdef but
69 * inside the #ifdef of the function graph tracer below.
70 * This is OK, since the function graph tracer is
71 * dependent on the function tracer.
72 */
73static int
74func_prolog_preempt_disable(struct trace_array *tr,
75 struct trace_array_cpu **data,
76 int *pc)
77{
78 long disabled;
79 int cpu;
80
81 if (likely(!wakeup_task))
82 return 0;
83
84 *pc = preempt_count();
85 preempt_disable_notrace();
86
87 cpu = raw_smp_processor_id();
88 if (cpu != wakeup_current_cpu)
89 goto out_enable;
90
91 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
92 disabled = atomic_inc_return(&(*data)->disabled);
93 if (unlikely(disabled != 1))
94 goto out;
95
96 return 1;
97
98out:
99 atomic_dec(&(*data)->disabled);
100
101out_enable:
102 preempt_enable_notrace();
103 return 0;
104}
105
106/*
107 * wakeup uses its own tracer function to keep the overhead down:
108 */
109static void
110wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
111 struct ftrace_ops *op, struct pt_regs *pt_regs)
112{
113 struct trace_array *tr = wakeup_trace;
114 struct trace_array_cpu *data;
115 unsigned long flags;
116 int pc;
117
118 if (!func_prolog_preempt_disable(tr, &data, &pc))
119 return;
120
121 local_irq_save(flags);
122 trace_function(tr, ip, parent_ip, flags, pc);
123 local_irq_restore(flags);
124
125 atomic_dec(&data->disabled);
126 preempt_enable_notrace();
127}
128
129static int register_wakeup_function(struct trace_array *tr, int graph, int set)
130{
131 int ret;
132
133 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
134 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
135 return 0;
136
137 if (graph)
138 ret = register_ftrace_graph(&wakeup_graph_return,
139 &wakeup_graph_entry);
140 else
141 ret = register_ftrace_function(tr->ops);
142
143 if (!ret)
144 function_enabled = true;
145
146 return ret;
147}
148
149static void unregister_wakeup_function(struct trace_array *tr, int graph)
150{
151 if (!function_enabled)
152 return;
153
154 if (graph)
155 unregister_ftrace_graph();
156 else
157 unregister_ftrace_function(tr->ops);
158
159 function_enabled = false;
160}
161
162static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
163{
164 if (!(mask & TRACE_ITER_FUNCTION))
165 return 0;
166
167 if (set)
168 register_wakeup_function(tr, is_graph(tr), 1);
169 else
170 unregister_wakeup_function(tr, is_graph(tr));
171 return 1;
172}
173#else
174static int register_wakeup_function(struct trace_array *tr, int graph, int set)
175{
176 return 0;
177}
178static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
179static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
180{
181 return 0;
182}
183#endif /* CONFIG_FUNCTION_TRACER */
184
185static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
186{
187 struct tracer *tracer = tr->current_trace;
188
189 if (wakeup_function_set(tr, mask, set))
190 return 0;
191
192#ifdef CONFIG_FUNCTION_GRAPH_TRACER
193 if (mask & TRACE_ITER_DISPLAY_GRAPH)
194 return wakeup_display_graph(tr, set);
195#endif
196
197 return trace_keep_overwrite(tracer, mask, set);
198}
199
200static int start_func_tracer(struct trace_array *tr, int graph)
201{
202 int ret;
203
204 ret = register_wakeup_function(tr, graph, 0);
205
206 if (!ret && tracing_is_enabled())
207 tracer_enabled = 1;
208 else
209 tracer_enabled = 0;
210
211 return ret;
212}
213
214static void stop_func_tracer(struct trace_array *tr, int graph)
215{
216 tracer_enabled = 0;
217
218 unregister_wakeup_function(tr, graph);
219}
220
221#ifdef CONFIG_FUNCTION_GRAPH_TRACER
222static int wakeup_display_graph(struct trace_array *tr, int set)
223{
224 if (!(is_graph(tr) ^ set))
225 return 0;
226
227 stop_func_tracer(tr, !set);
228
229 wakeup_reset(wakeup_trace);
230 tr->max_latency = 0;
231
232 return start_func_tracer(tr, set);
233}
234
235static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
236{
237 struct trace_array *tr = wakeup_trace;
238 struct trace_array_cpu *data;
239 unsigned long flags;
240 int pc, ret = 0;
241
242 if (ftrace_graph_ignore_func(trace))
243 return 0;
244 /*
245 * Do not trace a function if it's filtered by set_graph_notrace.
246 * Make the index of ret stack negative to indicate that it should
247 * ignore further functions. But it needs its own ret stack entry
248 * to recover the original index in order to continue tracing after
249 * returning from the function.
250 */
251 if (ftrace_graph_notrace_addr(trace->func))
252 return 1;
253
254 if (!func_prolog_preempt_disable(tr, &data, &pc))
255 return 0;
256
257 local_save_flags(flags);
258 ret = __trace_graph_entry(tr, trace, flags, pc);
259 atomic_dec(&data->disabled);
260 preempt_enable_notrace();
261
262 return ret;
263}
264
265static void wakeup_graph_return(struct ftrace_graph_ret *trace)
266{
267 struct trace_array *tr = wakeup_trace;
268 struct trace_array_cpu *data;
269 unsigned long flags;
270 int pc;
271
272 if (!func_prolog_preempt_disable(tr, &data, &pc))
273 return;
274
275 local_save_flags(flags);
276 __trace_graph_return(tr, trace, flags, pc);
277 atomic_dec(&data->disabled);
278
279 preempt_enable_notrace();
280 return;
281}
282
283static void wakeup_trace_open(struct trace_iterator *iter)
284{
285 if (is_graph(iter->tr))
286 graph_trace_open(iter);
287}
288
289static void wakeup_trace_close(struct trace_iterator *iter)
290{
291 if (iter->private)
292 graph_trace_close(iter);
293}
294
295#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
296 TRACE_GRAPH_PRINT_ABS_TIME | \
297 TRACE_GRAPH_PRINT_DURATION)
298
299static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
300{
301 /*
302 * In graph mode call the graph tracer output function,
303 * otherwise go with the TRACE_FN event handler
304 */
305 if (is_graph(iter->tr))
306 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
307
308 return TRACE_TYPE_UNHANDLED;
309}
310
311static void wakeup_print_header(struct seq_file *s)
312{
313 if (is_graph(wakeup_trace))
314 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
315 else
316 trace_default_header(s);
317}
318
319static void
320__trace_function(struct trace_array *tr,
321 unsigned long ip, unsigned long parent_ip,
322 unsigned long flags, int pc)
323{
324 if (is_graph(tr))
325 trace_graph_function(tr, ip, parent_ip, flags, pc);
326 else
327 trace_function(tr, ip, parent_ip, flags, pc);
328}
329#else
330#define __trace_function trace_function
331
332static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
333{
334 return TRACE_TYPE_UNHANDLED;
335}
336
337static void wakeup_trace_open(struct trace_iterator *iter) { }
338static void wakeup_trace_close(struct trace_iterator *iter) { }
339
340#ifdef CONFIG_FUNCTION_TRACER
341static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
342{
343 return -1;
344}
345static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
346static void wakeup_print_header(struct seq_file *s)
347{
348 trace_default_header(s);
349}
350#else
351static void wakeup_print_header(struct seq_file *s)
352{
353 trace_latency_header(s);
354}
355#endif /* CONFIG_FUNCTION_TRACER */
356#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
357
358/*
359 * Should this new latency be reported/recorded?
360 */
361static bool report_latency(struct trace_array *tr, u64 delta)
362{
363 if (tracing_thresh) {
364 if (delta < tracing_thresh)
365 return false;
366 } else {
367 if (delta <= tr->max_latency)
368 return false;
369 }
370 return true;
371}
372
373static void
374probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
375{
376 if (task != wakeup_task)
377 return;
378
379 wakeup_current_cpu = cpu;
380}
381
382static void
383tracing_sched_switch_trace(struct trace_array *tr,
384 struct task_struct *prev,
385 struct task_struct *next,
386 unsigned long flags, int pc)
387{
388 struct trace_event_call *call = &event_context_switch;
389 struct ring_buffer *buffer = tr->trace_buffer.buffer;
390 struct ring_buffer_event *event;
391 struct ctx_switch_entry *entry;
392
393 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
394 sizeof(*entry), flags, pc);
395 if (!event)
396 return;
397 entry = ring_buffer_event_data(event);
398 entry->prev_pid = prev->pid;
399 entry->prev_prio = prev->prio;
400 entry->prev_state = prev->state;
401 entry->next_pid = next->pid;
402 entry->next_prio = next->prio;
403 entry->next_state = next->state;
404 entry->next_cpu = task_cpu(next);
405
406 if (!call_filter_check_discard(call, entry, buffer, event))
407 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
408}
409
410static void
411tracing_sched_wakeup_trace(struct trace_array *tr,
412 struct task_struct *wakee,
413 struct task_struct *curr,
414 unsigned long flags, int pc)
415{
416 struct trace_event_call *call = &event_wakeup;
417 struct ring_buffer_event *event;
418 struct ctx_switch_entry *entry;
419 struct ring_buffer *buffer = tr->trace_buffer.buffer;
420
421 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
422 sizeof(*entry), flags, pc);
423 if (!event)
424 return;
425 entry = ring_buffer_event_data(event);
426 entry->prev_pid = curr->pid;
427 entry->prev_prio = curr->prio;
428 entry->prev_state = curr->state;
429 entry->next_pid = wakee->pid;
430 entry->next_prio = wakee->prio;
431 entry->next_state = wakee->state;
432 entry->next_cpu = task_cpu(wakee);
433
434 if (!call_filter_check_discard(call, entry, buffer, event))
435 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
436}
437
438static void notrace
439probe_wakeup_sched_switch(void *ignore, bool preempt,
440 struct task_struct *prev, struct task_struct *next)
441{
442 struct trace_array_cpu *data;
443 u64 T0, T1, delta;
444 unsigned long flags;
445 long disabled;
446 int cpu;
447 int pc;
448
449 tracing_record_cmdline(prev);
450
451 if (unlikely(!tracer_enabled))
452 return;
453
454 /*
455 * When we start a new trace, we set wakeup_task to NULL
456 * and then set tracer_enabled = 1. We want to make sure
457 * that another CPU does not see the tracer_enabled = 1
458 * and the wakeup_task with an older task, that might
459 * actually be the same as next.
460 */
461 smp_rmb();
462
463 if (next != wakeup_task)
464 return;
465
466 pc = preempt_count();
467
468 /* disable local data, not wakeup_cpu data */
469 cpu = raw_smp_processor_id();
470 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
471 if (likely(disabled != 1))
472 goto out;
473
474 local_irq_save(flags);
475 arch_spin_lock(&wakeup_lock);
476
477 /* We could race with grabbing wakeup_lock */
478 if (unlikely(!tracer_enabled || next != wakeup_task))
479 goto out_unlock;
480
481 /* The task we are waiting for is waking up */
482 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
483
484 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
485 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
486
487 T0 = data->preempt_timestamp;
488 T1 = ftrace_now(cpu);
489 delta = T1-T0;
490
491 if (!report_latency(wakeup_trace, delta))
492 goto out_unlock;
493
494 if (likely(!is_tracing_stopped())) {
495 wakeup_trace->max_latency = delta;
496 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
497 }
498
499out_unlock:
500 __wakeup_reset(wakeup_trace);
501 arch_spin_unlock(&wakeup_lock);
502 local_irq_restore(flags);
503out:
504 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
505}
506
507static void __wakeup_reset(struct trace_array *tr)
508{
509 wakeup_cpu = -1;
510 wakeup_prio = -1;
511 tracing_dl = 0;
512
513 if (wakeup_task)
514 put_task_struct(wakeup_task);
515
516 wakeup_task = NULL;
517}
518
519static void wakeup_reset(struct trace_array *tr)
520{
521 unsigned long flags;
522
523 tracing_reset_online_cpus(&tr->trace_buffer);
524
525 local_irq_save(flags);
526 arch_spin_lock(&wakeup_lock);
527 __wakeup_reset(tr);
528 arch_spin_unlock(&wakeup_lock);
529 local_irq_restore(flags);
530}
531
532static void
533probe_wakeup(void *ignore, struct task_struct *p)
534{
535 struct trace_array_cpu *data;
536 int cpu = smp_processor_id();
537 unsigned long flags;
538 long disabled;
539 int pc;
540
541 if (likely(!tracer_enabled))
542 return;
543
544 tracing_record_cmdline(p);
545 tracing_record_cmdline(current);
546
547 /*
548 * Semantic is like this:
549 * - wakeup tracer handles all tasks in the system, independently
550 * from their scheduling class;
551 * - wakeup_rt tracer handles tasks belonging to sched_dl and
552 * sched_rt class;
553 * - wakeup_dl handles tasks belonging to sched_dl class only.
554 */
555 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
556 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
557 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
558 return;
559
560 pc = preempt_count();
561 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
562 if (unlikely(disabled != 1))
563 goto out;
564
565 /* interrupts should be off from try_to_wake_up */
566 arch_spin_lock(&wakeup_lock);
567
568 /* check for races. */
569 if (!tracer_enabled || tracing_dl ||
570 (!dl_task(p) && p->prio >= wakeup_prio))
571 goto out_locked;
572
573 /* reset the trace */
574 __wakeup_reset(wakeup_trace);
575
576 wakeup_cpu = task_cpu(p);
577 wakeup_current_cpu = wakeup_cpu;
578 wakeup_prio = p->prio;
579
580 /*
581 * Once you start tracing a -deadline task, don't bother tracing
582 * another task until the first one wakes up.
583 */
584 if (dl_task(p))
585 tracing_dl = 1;
586 else
587 tracing_dl = 0;
588
589 wakeup_task = p;
590 get_task_struct(wakeup_task);
591
592 local_save_flags(flags);
593
594 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
595 data->preempt_timestamp = ftrace_now(cpu);
596 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
597
598 /*
599 * We must be careful in using CALLER_ADDR2. But since wake_up
600 * is not called by an assembly function (where as schedule is)
601 * it should be safe to use it here.
602 */
603 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
604
605out_locked:
606 arch_spin_unlock(&wakeup_lock);
607out:
608 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
609}
610
611static void start_wakeup_tracer(struct trace_array *tr)
612{
613 int ret;
614
615 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
616 if (ret) {
617 pr_info("wakeup trace: Couldn't activate tracepoint"
618 " probe to kernel_sched_wakeup\n");
619 return;
620 }
621
622 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
623 if (ret) {
624 pr_info("wakeup trace: Couldn't activate tracepoint"
625 " probe to kernel_sched_wakeup_new\n");
626 goto fail_deprobe;
627 }
628
629 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
630 if (ret) {
631 pr_info("sched trace: Couldn't activate tracepoint"
632 " probe to kernel_sched_switch\n");
633 goto fail_deprobe_wake_new;
634 }
635
636 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
637 if (ret) {
638 pr_info("wakeup trace: Couldn't activate tracepoint"
639 " probe to kernel_sched_migrate_task\n");
640 return;
641 }
642
643 wakeup_reset(tr);
644
645 /*
646 * Don't let the tracer_enabled = 1 show up before
647 * the wakeup_task is reset. This may be overkill since
648 * wakeup_reset does a spin_unlock after setting the
649 * wakeup_task to NULL, but I want to be safe.
650 * This is a slow path anyway.
651 */
652 smp_wmb();
653
654 if (start_func_tracer(tr, is_graph(tr)))
655 printk(KERN_ERR "failed to start wakeup tracer\n");
656
657 return;
658fail_deprobe_wake_new:
659 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
660fail_deprobe:
661 unregister_trace_sched_wakeup(probe_wakeup, NULL);
662}
663
664static void stop_wakeup_tracer(struct trace_array *tr)
665{
666 tracer_enabled = 0;
667 stop_func_tracer(tr, is_graph(tr));
668 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
669 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
670 unregister_trace_sched_wakeup(probe_wakeup, NULL);
671 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
672}
673
674static bool wakeup_busy;
675
676static int __wakeup_tracer_init(struct trace_array *tr)
677{
678 save_flags = tr->trace_flags;
679
680 /* non overwrite screws up the latency tracers */
681 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
682 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
683
684 tr->max_latency = 0;
685 wakeup_trace = tr;
686 ftrace_init_array_ops(tr, wakeup_tracer_call);
687 start_wakeup_tracer(tr);
688
689 wakeup_busy = true;
690 return 0;
691}
692
693static int wakeup_tracer_init(struct trace_array *tr)
694{
695 if (wakeup_busy)
696 return -EBUSY;
697
698 wakeup_dl = 0;
699 wakeup_rt = 0;
700 return __wakeup_tracer_init(tr);
701}
702
703static int wakeup_rt_tracer_init(struct trace_array *tr)
704{
705 if (wakeup_busy)
706 return -EBUSY;
707
708 wakeup_dl = 0;
709 wakeup_rt = 1;
710 return __wakeup_tracer_init(tr);
711}
712
713static int wakeup_dl_tracer_init(struct trace_array *tr)
714{
715 if (wakeup_busy)
716 return -EBUSY;
717
718 wakeup_dl = 1;
719 wakeup_rt = 0;
720 return __wakeup_tracer_init(tr);
721}
722
723static void wakeup_tracer_reset(struct trace_array *tr)
724{
725 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
726 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
727
728 stop_wakeup_tracer(tr);
729 /* make sure we put back any tasks we are tracing */
730 wakeup_reset(tr);
731
732 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
733 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
734 ftrace_reset_array_ops(tr);
735 wakeup_busy = false;
736}
737
738static void wakeup_tracer_start(struct trace_array *tr)
739{
740 wakeup_reset(tr);
741 tracer_enabled = 1;
742}
743
744static void wakeup_tracer_stop(struct trace_array *tr)
745{
746 tracer_enabled = 0;
747}
748
749static struct tracer wakeup_tracer __read_mostly =
750{
751 .name = "wakeup",
752 .init = wakeup_tracer_init,
753 .reset = wakeup_tracer_reset,
754 .start = wakeup_tracer_start,
755 .stop = wakeup_tracer_stop,
756 .print_max = true,
757 .print_header = wakeup_print_header,
758 .print_line = wakeup_print_line,
759 .flag_changed = wakeup_flag_changed,
760#ifdef CONFIG_FTRACE_SELFTEST
761 .selftest = trace_selftest_startup_wakeup,
762#endif
763 .open = wakeup_trace_open,
764 .close = wakeup_trace_close,
765 .allow_instances = true,
766 .use_max_tr = true,
767};
768
769static struct tracer wakeup_rt_tracer __read_mostly =
770{
771 .name = "wakeup_rt",
772 .init = wakeup_rt_tracer_init,
773 .reset = wakeup_tracer_reset,
774 .start = wakeup_tracer_start,
775 .stop = wakeup_tracer_stop,
776 .print_max = true,
777 .print_header = wakeup_print_header,
778 .print_line = wakeup_print_line,
779 .flag_changed = wakeup_flag_changed,
780#ifdef CONFIG_FTRACE_SELFTEST
781 .selftest = trace_selftest_startup_wakeup,
782#endif
783 .open = wakeup_trace_open,
784 .close = wakeup_trace_close,
785 .allow_instances = true,
786 .use_max_tr = true,
787};
788
789static struct tracer wakeup_dl_tracer __read_mostly =
790{
791 .name = "wakeup_dl",
792 .init = wakeup_dl_tracer_init,
793 .reset = wakeup_tracer_reset,
794 .start = wakeup_tracer_start,
795 .stop = wakeup_tracer_stop,
796 .print_max = true,
797 .print_header = wakeup_print_header,
798 .print_line = wakeup_print_line,
799 .flag_changed = wakeup_flag_changed,
800#ifdef CONFIG_FTRACE_SELFTEST
801 .selftest = trace_selftest_startup_wakeup,
802#endif
803 .open = wakeup_trace_open,
804 .close = wakeup_trace_close,
805 .allow_instances = true,
806 .use_max_tr = true,
807};
808
809__init static int init_wakeup_tracer(void)
810{
811 int ret;
812
813 ret = register_tracer(&wakeup_tracer);
814 if (ret)
815 return ret;
816
817 ret = register_tracer(&wakeup_rt_tracer);
818 if (ret)
819 return ret;
820
821 ret = register_tracer(&wakeup_dl_tracer);
822 if (ret)
823 return ret;
824
825 return 0;
826}
827core_initcall(init_wakeup_tracer);