Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace task wakeup timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/module.h>
14#include <linux/kallsyms.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/sched/rt.h>
18#include <linux/sched/deadline.h>
19#include <trace/events/sched.h>
20#include "trace.h"
21
22static struct trace_array *wakeup_trace;
23static int __read_mostly tracer_enabled;
24
25static struct task_struct *wakeup_task;
26static int wakeup_cpu;
27static int wakeup_current_cpu;
28static unsigned wakeup_prio = -1;
29static int wakeup_rt;
30static int wakeup_dl;
31static int tracing_dl = 0;
32
33static arch_spinlock_t wakeup_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35
36static void wakeup_reset(struct trace_array *tr);
37static void __wakeup_reset(struct trace_array *tr);
38
39static int save_flags;
40
41#ifdef CONFIG_FUNCTION_GRAPH_TRACER
42static int wakeup_display_graph(struct trace_array *tr, int set);
43# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
44#else
45static inline int wakeup_display_graph(struct trace_array *tr, int set)
46{
47 return 0;
48}
49# define is_graph(tr) false
50#endif
51
52
53#ifdef CONFIG_FUNCTION_TRACER
54
55static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
56static void wakeup_graph_return(struct ftrace_graph_ret *trace);
57
58static bool function_enabled;
59
60/*
61 * Prologue for the wakeup function tracers.
62 *
63 * Returns 1 if it is OK to continue, and preemption
64 * is disabled and data->disabled is incremented.
65 * 0 if the trace is to be ignored, and preemption
66 * is not disabled and data->disabled is
67 * kept the same.
68 *
69 * Note, this function is also used outside this ifdef but
70 * inside the #ifdef of the function graph tracer below.
71 * This is OK, since the function graph tracer is
72 * dependent on the function tracer.
73 */
74static int
75func_prolog_preempt_disable(struct trace_array *tr,
76 struct trace_array_cpu **data,
77 int *pc)
78{
79 long disabled;
80 int cpu;
81
82 if (likely(!wakeup_task))
83 return 0;
84
85 *pc = preempt_count();
86 preempt_disable_notrace();
87
88 cpu = raw_smp_processor_id();
89 if (cpu != wakeup_current_cpu)
90 goto out_enable;
91
92 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
93 disabled = atomic_inc_return(&(*data)->disabled);
94 if (unlikely(disabled != 1))
95 goto out;
96
97 return 1;
98
99out:
100 atomic_dec(&(*data)->disabled);
101
102out_enable:
103 preempt_enable_notrace();
104 return 0;
105}
106
107/*
108 * wakeup uses its own tracer function to keep the overhead down:
109 */
110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
112 struct ftrace_ops *op, struct pt_regs *pt_regs)
113{
114 struct trace_array *tr = wakeup_trace;
115 struct trace_array_cpu *data;
116 unsigned long flags;
117 int pc;
118
119 if (!func_prolog_preempt_disable(tr, &data, &pc))
120 return;
121
122 local_irq_save(flags);
123 trace_function(tr, ip, parent_ip, flags, pc);
124 local_irq_restore(flags);
125
126 atomic_dec(&data->disabled);
127 preempt_enable_notrace();
128}
129
130static int register_wakeup_function(struct trace_array *tr, int graph, int set)
131{
132 int ret;
133
134 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
135 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
136 return 0;
137
138 if (graph)
139 ret = register_ftrace_graph(&wakeup_graph_return,
140 &wakeup_graph_entry);
141 else
142 ret = register_ftrace_function(tr->ops);
143
144 if (!ret)
145 function_enabled = true;
146
147 return ret;
148}
149
150static void unregister_wakeup_function(struct trace_array *tr, int graph)
151{
152 if (!function_enabled)
153 return;
154
155 if (graph)
156 unregister_ftrace_graph();
157 else
158 unregister_ftrace_function(tr->ops);
159
160 function_enabled = false;
161}
162
163static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
164{
165 if (!(mask & TRACE_ITER_FUNCTION))
166 return 0;
167
168 if (set)
169 register_wakeup_function(tr, is_graph(tr), 1);
170 else
171 unregister_wakeup_function(tr, is_graph(tr));
172 return 1;
173}
174#else
175static int register_wakeup_function(struct trace_array *tr, int graph, int set)
176{
177 return 0;
178}
179static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
180static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
181{
182 return 0;
183}
184#endif /* CONFIG_FUNCTION_TRACER */
185
186static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
187{
188 struct tracer *tracer = tr->current_trace;
189
190 if (wakeup_function_set(tr, mask, set))
191 return 0;
192
193#ifdef CONFIG_FUNCTION_GRAPH_TRACER
194 if (mask & TRACE_ITER_DISPLAY_GRAPH)
195 return wakeup_display_graph(tr, set);
196#endif
197
198 return trace_keep_overwrite(tracer, mask, set);
199}
200
201static int start_func_tracer(struct trace_array *tr, int graph)
202{
203 int ret;
204
205 ret = register_wakeup_function(tr, graph, 0);
206
207 if (!ret && tracing_is_enabled())
208 tracer_enabled = 1;
209 else
210 tracer_enabled = 0;
211
212 return ret;
213}
214
215static void stop_func_tracer(struct trace_array *tr, int graph)
216{
217 tracer_enabled = 0;
218
219 unregister_wakeup_function(tr, graph);
220}
221
222#ifdef CONFIG_FUNCTION_GRAPH_TRACER
223static int wakeup_display_graph(struct trace_array *tr, int set)
224{
225 if (!(is_graph(tr) ^ set))
226 return 0;
227
228 stop_func_tracer(tr, !set);
229
230 wakeup_reset(wakeup_trace);
231 tr->max_latency = 0;
232
233 return start_func_tracer(tr, set);
234}
235
236static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
237{
238 struct trace_array *tr = wakeup_trace;
239 struct trace_array_cpu *data;
240 unsigned long flags;
241 int pc, ret = 0;
242
243 if (ftrace_graph_ignore_func(trace))
244 return 0;
245 /*
246 * Do not trace a function if it's filtered by set_graph_notrace.
247 * Make the index of ret stack negative to indicate that it should
248 * ignore further functions. But it needs its own ret stack entry
249 * to recover the original index in order to continue tracing after
250 * returning from the function.
251 */
252 if (ftrace_graph_notrace_addr(trace->func))
253 return 1;
254
255 if (!func_prolog_preempt_disable(tr, &data, &pc))
256 return 0;
257
258 local_save_flags(flags);
259 ret = __trace_graph_entry(tr, trace, flags, pc);
260 atomic_dec(&data->disabled);
261 preempt_enable_notrace();
262
263 return ret;
264}
265
266static void wakeup_graph_return(struct ftrace_graph_ret *trace)
267{
268 struct trace_array *tr = wakeup_trace;
269 struct trace_array_cpu *data;
270 unsigned long flags;
271 int pc;
272
273 if (!func_prolog_preempt_disable(tr, &data, &pc))
274 return;
275
276 local_save_flags(flags);
277 __trace_graph_return(tr, trace, flags, pc);
278 atomic_dec(&data->disabled);
279
280 preempt_enable_notrace();
281 return;
282}
283
284static void wakeup_trace_open(struct trace_iterator *iter)
285{
286 if (is_graph(iter->tr))
287 graph_trace_open(iter);
288}
289
290static void wakeup_trace_close(struct trace_iterator *iter)
291{
292 if (iter->private)
293 graph_trace_close(iter);
294}
295
296#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
297 TRACE_GRAPH_PRINT_ABS_TIME | \
298 TRACE_GRAPH_PRINT_DURATION)
299
300static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
301{
302 /*
303 * In graph mode call the graph tracer output function,
304 * otherwise go with the TRACE_FN event handler
305 */
306 if (is_graph(iter->tr))
307 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
308
309 return TRACE_TYPE_UNHANDLED;
310}
311
312static void wakeup_print_header(struct seq_file *s)
313{
314 if (is_graph(wakeup_trace))
315 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
316 else
317 trace_default_header(s);
318}
319
320static void
321__trace_function(struct trace_array *tr,
322 unsigned long ip, unsigned long parent_ip,
323 unsigned long flags, int pc)
324{
325 if (is_graph(tr))
326 trace_graph_function(tr, ip, parent_ip, flags, pc);
327 else
328 trace_function(tr, ip, parent_ip, flags, pc);
329}
330#else
331#define __trace_function trace_function
332
333static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
334{
335 return TRACE_TYPE_UNHANDLED;
336}
337
338static void wakeup_trace_open(struct trace_iterator *iter) { }
339static void wakeup_trace_close(struct trace_iterator *iter) { }
340
341#ifdef CONFIG_FUNCTION_TRACER
342static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
343{
344 return -1;
345}
346static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
347static void wakeup_print_header(struct seq_file *s)
348{
349 trace_default_header(s);
350}
351#else
352static void wakeup_print_header(struct seq_file *s)
353{
354 trace_latency_header(s);
355}
356#endif /* CONFIG_FUNCTION_TRACER */
357#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
358
359/*
360 * Should this new latency be reported/recorded?
361 */
362static bool report_latency(struct trace_array *tr, u64 delta)
363{
364 if (tracing_thresh) {
365 if (delta < tracing_thresh)
366 return false;
367 } else {
368 if (delta <= tr->max_latency)
369 return false;
370 }
371 return true;
372}
373
374static void
375probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
376{
377 if (task != wakeup_task)
378 return;
379
380 wakeup_current_cpu = cpu;
381}
382
383static void
384tracing_sched_switch_trace(struct trace_array *tr,
385 struct task_struct *prev,
386 struct task_struct *next,
387 unsigned long flags, int pc)
388{
389 struct trace_event_call *call = &event_context_switch;
390 struct ring_buffer *buffer = tr->trace_buffer.buffer;
391 struct ring_buffer_event *event;
392 struct ctx_switch_entry *entry;
393
394 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
395 sizeof(*entry), flags, pc);
396 if (!event)
397 return;
398 entry = ring_buffer_event_data(event);
399 entry->prev_pid = prev->pid;
400 entry->prev_prio = prev->prio;
401 entry->prev_state = task_state_index(prev);
402 entry->next_pid = next->pid;
403 entry->next_prio = next->prio;
404 entry->next_state = task_state_index(next);
405 entry->next_cpu = task_cpu(next);
406
407 if (!call_filter_check_discard(call, entry, buffer, event))
408 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
409}
410
411static void
412tracing_sched_wakeup_trace(struct trace_array *tr,
413 struct task_struct *wakee,
414 struct task_struct *curr,
415 unsigned long flags, int pc)
416{
417 struct trace_event_call *call = &event_wakeup;
418 struct ring_buffer_event *event;
419 struct ctx_switch_entry *entry;
420 struct ring_buffer *buffer = tr->trace_buffer.buffer;
421
422 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
423 sizeof(*entry), flags, pc);
424 if (!event)
425 return;
426 entry = ring_buffer_event_data(event);
427 entry->prev_pid = curr->pid;
428 entry->prev_prio = curr->prio;
429 entry->prev_state = task_state_index(curr);
430 entry->next_pid = wakee->pid;
431 entry->next_prio = wakee->prio;
432 entry->next_state = task_state_index(wakee);
433 entry->next_cpu = task_cpu(wakee);
434
435 if (!call_filter_check_discard(call, entry, buffer, event))
436 trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
437}
438
439static void notrace
440probe_wakeup_sched_switch(void *ignore, bool preempt,
441 struct task_struct *prev, struct task_struct *next)
442{
443 struct trace_array_cpu *data;
444 u64 T0, T1, delta;
445 unsigned long flags;
446 long disabled;
447 int cpu;
448 int pc;
449
450 tracing_record_cmdline(prev);
451
452 if (unlikely(!tracer_enabled))
453 return;
454
455 /*
456 * When we start a new trace, we set wakeup_task to NULL
457 * and then set tracer_enabled = 1. We want to make sure
458 * that another CPU does not see the tracer_enabled = 1
459 * and the wakeup_task with an older task, that might
460 * actually be the same as next.
461 */
462 smp_rmb();
463
464 if (next != wakeup_task)
465 return;
466
467 pc = preempt_count();
468
469 /* disable local data, not wakeup_cpu data */
470 cpu = raw_smp_processor_id();
471 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
472 if (likely(disabled != 1))
473 goto out;
474
475 local_irq_save(flags);
476 arch_spin_lock(&wakeup_lock);
477
478 /* We could race with grabbing wakeup_lock */
479 if (unlikely(!tracer_enabled || next != wakeup_task))
480 goto out_unlock;
481
482 /* The task we are waiting for is waking up */
483 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
484
485 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
486 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
487
488 T0 = data->preempt_timestamp;
489 T1 = ftrace_now(cpu);
490 delta = T1-T0;
491
492 if (!report_latency(wakeup_trace, delta))
493 goto out_unlock;
494
495 if (likely(!is_tracing_stopped())) {
496 wakeup_trace->max_latency = delta;
497 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
498 }
499
500out_unlock:
501 __wakeup_reset(wakeup_trace);
502 arch_spin_unlock(&wakeup_lock);
503 local_irq_restore(flags);
504out:
505 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
506}
507
508static void __wakeup_reset(struct trace_array *tr)
509{
510 wakeup_cpu = -1;
511 wakeup_prio = -1;
512 tracing_dl = 0;
513
514 if (wakeup_task)
515 put_task_struct(wakeup_task);
516
517 wakeup_task = NULL;
518}
519
520static void wakeup_reset(struct trace_array *tr)
521{
522 unsigned long flags;
523
524 tracing_reset_online_cpus(&tr->trace_buffer);
525
526 local_irq_save(flags);
527 arch_spin_lock(&wakeup_lock);
528 __wakeup_reset(tr);
529 arch_spin_unlock(&wakeup_lock);
530 local_irq_restore(flags);
531}
532
533static void
534probe_wakeup(void *ignore, struct task_struct *p)
535{
536 struct trace_array_cpu *data;
537 int cpu = smp_processor_id();
538 unsigned long flags;
539 long disabled;
540 int pc;
541
542 if (likely(!tracer_enabled))
543 return;
544
545 tracing_record_cmdline(p);
546 tracing_record_cmdline(current);
547
548 /*
549 * Semantic is like this:
550 * - wakeup tracer handles all tasks in the system, independently
551 * from their scheduling class;
552 * - wakeup_rt tracer handles tasks belonging to sched_dl and
553 * sched_rt class;
554 * - wakeup_dl handles tasks belonging to sched_dl class only.
555 */
556 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
557 (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
558 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
559 return;
560
561 pc = preempt_count();
562 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
563 if (unlikely(disabled != 1))
564 goto out;
565
566 /* interrupts should be off from try_to_wake_up */
567 arch_spin_lock(&wakeup_lock);
568
569 /* check for races. */
570 if (!tracer_enabled || tracing_dl ||
571 (!dl_task(p) && p->prio >= wakeup_prio))
572 goto out_locked;
573
574 /* reset the trace */
575 __wakeup_reset(wakeup_trace);
576
577 wakeup_cpu = task_cpu(p);
578 wakeup_current_cpu = wakeup_cpu;
579 wakeup_prio = p->prio;
580
581 /*
582 * Once you start tracing a -deadline task, don't bother tracing
583 * another task until the first one wakes up.
584 */
585 if (dl_task(p))
586 tracing_dl = 1;
587 else
588 tracing_dl = 0;
589
590 wakeup_task = p;
591 get_task_struct(wakeup_task);
592
593 local_save_flags(flags);
594
595 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
596 data->preempt_timestamp = ftrace_now(cpu);
597 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
598
599 /*
600 * We must be careful in using CALLER_ADDR2. But since wake_up
601 * is not called by an assembly function (where as schedule is)
602 * it should be safe to use it here.
603 */
604 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
605
606out_locked:
607 arch_spin_unlock(&wakeup_lock);
608out:
609 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
610}
611
612static void start_wakeup_tracer(struct trace_array *tr)
613{
614 int ret;
615
616 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
617 if (ret) {
618 pr_info("wakeup trace: Couldn't activate tracepoint"
619 " probe to kernel_sched_wakeup\n");
620 return;
621 }
622
623 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
624 if (ret) {
625 pr_info("wakeup trace: Couldn't activate tracepoint"
626 " probe to kernel_sched_wakeup_new\n");
627 goto fail_deprobe;
628 }
629
630 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
631 if (ret) {
632 pr_info("sched trace: Couldn't activate tracepoint"
633 " probe to kernel_sched_switch\n");
634 goto fail_deprobe_wake_new;
635 }
636
637 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
638 if (ret) {
639 pr_info("wakeup trace: Couldn't activate tracepoint"
640 " probe to kernel_sched_migrate_task\n");
641 return;
642 }
643
644 wakeup_reset(tr);
645
646 /*
647 * Don't let the tracer_enabled = 1 show up before
648 * the wakeup_task is reset. This may be overkill since
649 * wakeup_reset does a spin_unlock after setting the
650 * wakeup_task to NULL, but I want to be safe.
651 * This is a slow path anyway.
652 */
653 smp_wmb();
654
655 if (start_func_tracer(tr, is_graph(tr)))
656 printk(KERN_ERR "failed to start wakeup tracer\n");
657
658 return;
659fail_deprobe_wake_new:
660 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
661fail_deprobe:
662 unregister_trace_sched_wakeup(probe_wakeup, NULL);
663}
664
665static void stop_wakeup_tracer(struct trace_array *tr)
666{
667 tracer_enabled = 0;
668 stop_func_tracer(tr, is_graph(tr));
669 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
670 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
671 unregister_trace_sched_wakeup(probe_wakeup, NULL);
672 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
673}
674
675static bool wakeup_busy;
676
677static int __wakeup_tracer_init(struct trace_array *tr)
678{
679 save_flags = tr->trace_flags;
680
681 /* non overwrite screws up the latency tracers */
682 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
683 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
684
685 tr->max_latency = 0;
686 wakeup_trace = tr;
687 ftrace_init_array_ops(tr, wakeup_tracer_call);
688 start_wakeup_tracer(tr);
689
690 wakeup_busy = true;
691 return 0;
692}
693
694static int wakeup_tracer_init(struct trace_array *tr)
695{
696 if (wakeup_busy)
697 return -EBUSY;
698
699 wakeup_dl = 0;
700 wakeup_rt = 0;
701 return __wakeup_tracer_init(tr);
702}
703
704static int wakeup_rt_tracer_init(struct trace_array *tr)
705{
706 if (wakeup_busy)
707 return -EBUSY;
708
709 wakeup_dl = 0;
710 wakeup_rt = 1;
711 return __wakeup_tracer_init(tr);
712}
713
714static int wakeup_dl_tracer_init(struct trace_array *tr)
715{
716 if (wakeup_busy)
717 return -EBUSY;
718
719 wakeup_dl = 1;
720 wakeup_rt = 0;
721 return __wakeup_tracer_init(tr);
722}
723
724static void wakeup_tracer_reset(struct trace_array *tr)
725{
726 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
727 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
728
729 stop_wakeup_tracer(tr);
730 /* make sure we put back any tasks we are tracing */
731 wakeup_reset(tr);
732
733 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
734 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
735 ftrace_reset_array_ops(tr);
736 wakeup_busy = false;
737}
738
739static void wakeup_tracer_start(struct trace_array *tr)
740{
741 wakeup_reset(tr);
742 tracer_enabled = 1;
743}
744
745static void wakeup_tracer_stop(struct trace_array *tr)
746{
747 tracer_enabled = 0;
748}
749
750static struct tracer wakeup_tracer __read_mostly =
751{
752 .name = "wakeup",
753 .init = wakeup_tracer_init,
754 .reset = wakeup_tracer_reset,
755 .start = wakeup_tracer_start,
756 .stop = wakeup_tracer_stop,
757 .print_max = true,
758 .print_header = wakeup_print_header,
759 .print_line = wakeup_print_line,
760 .flag_changed = wakeup_flag_changed,
761#ifdef CONFIG_FTRACE_SELFTEST
762 .selftest = trace_selftest_startup_wakeup,
763#endif
764 .open = wakeup_trace_open,
765 .close = wakeup_trace_close,
766 .allow_instances = true,
767 .use_max_tr = true,
768};
769
770static struct tracer wakeup_rt_tracer __read_mostly =
771{
772 .name = "wakeup_rt",
773 .init = wakeup_rt_tracer_init,
774 .reset = wakeup_tracer_reset,
775 .start = wakeup_tracer_start,
776 .stop = wakeup_tracer_stop,
777 .print_max = true,
778 .print_header = wakeup_print_header,
779 .print_line = wakeup_print_line,
780 .flag_changed = wakeup_flag_changed,
781#ifdef CONFIG_FTRACE_SELFTEST
782 .selftest = trace_selftest_startup_wakeup,
783#endif
784 .open = wakeup_trace_open,
785 .close = wakeup_trace_close,
786 .allow_instances = true,
787 .use_max_tr = true,
788};
789
790static struct tracer wakeup_dl_tracer __read_mostly =
791{
792 .name = "wakeup_dl",
793 .init = wakeup_dl_tracer_init,
794 .reset = wakeup_tracer_reset,
795 .start = wakeup_tracer_start,
796 .stop = wakeup_tracer_stop,
797 .print_max = true,
798 .print_header = wakeup_print_header,
799 .print_line = wakeup_print_line,
800 .flag_changed = wakeup_flag_changed,
801#ifdef CONFIG_FTRACE_SELFTEST
802 .selftest = trace_selftest_startup_wakeup,
803#endif
804 .open = wakeup_trace_open,
805 .close = wakeup_trace_close,
806 .allow_instances = true,
807 .use_max_tr = true,
808};
809
810__init static int init_wakeup_tracer(void)
811{
812 int ret;
813
814 ret = register_tracer(&wakeup_tracer);
815 if (ret)
816 return ret;
817
818 ret = register_tracer(&wakeup_rt_tracer);
819 if (ret)
820 return ret;
821
822 ret = register_tracer(&wakeup_dl_tracer);
823 if (ret)
824 return ret;
825
826 return 0;
827}
828core_initcall(init_wakeup_tracer);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace task wakeup timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Based on code from the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/module.h>
14#include <linux/kallsyms.h>
15#include <linux/uaccess.h>
16#include <linux/ftrace.h>
17#include <linux/sched/rt.h>
18#include <linux/sched/deadline.h>
19#include <trace/events/sched.h>
20#include "trace.h"
21
22static struct trace_array *wakeup_trace;
23static int __read_mostly tracer_enabled;
24
25static struct task_struct *wakeup_task;
26static int wakeup_cpu;
27static int wakeup_current_cpu;
28static unsigned wakeup_prio = -1;
29static bool wakeup_rt;
30static bool wakeup_dl;
31static bool tracing_dl;
32
33static arch_spinlock_t wakeup_lock =
34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35
36static void wakeup_reset(struct trace_array *tr);
37static void __wakeup_reset(struct trace_array *tr);
38static int start_func_tracer(struct trace_array *tr, int graph);
39static void stop_func_tracer(struct trace_array *tr, int graph);
40
41static int save_flags;
42
43#ifdef CONFIG_FUNCTION_GRAPH_TRACER
44# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
45#else
46# define is_graph(tr) false
47#endif
48
49#ifdef CONFIG_FUNCTION_TRACER
50
51static bool function_enabled;
52
53/*
54 * Prologue for the wakeup function tracers.
55 *
56 * Returns 1 if it is OK to continue, and preemption
57 * is disabled and data->disabled is incremented.
58 * 0 if the trace is to be ignored, and preemption
59 * is not disabled and data->disabled is
60 * kept the same.
61 *
62 * Note, this function is also used outside this ifdef but
63 * inside the #ifdef of the function graph tracer below.
64 * This is OK, since the function graph tracer is
65 * dependent on the function tracer.
66 */
67static int
68func_prolog_preempt_disable(struct trace_array *tr,
69 struct trace_array_cpu **data,
70 unsigned int *trace_ctx)
71{
72 long disabled;
73 int cpu;
74
75 if (likely(!wakeup_task))
76 return 0;
77
78 *trace_ctx = tracing_gen_ctx();
79 preempt_disable_notrace();
80
81 cpu = raw_smp_processor_id();
82 if (cpu != wakeup_current_cpu)
83 goto out_enable;
84
85 *data = per_cpu_ptr(tr->array_buffer.data, cpu);
86 disabled = atomic_inc_return(&(*data)->disabled);
87 if (unlikely(disabled != 1))
88 goto out;
89
90 return 1;
91
92out:
93 atomic_dec(&(*data)->disabled);
94
95out_enable:
96 preempt_enable_notrace();
97 return 0;
98}
99
100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
101
102static int wakeup_display_graph(struct trace_array *tr, int set)
103{
104 if (!(is_graph(tr) ^ set))
105 return 0;
106
107 stop_func_tracer(tr, !set);
108
109 wakeup_reset(wakeup_trace);
110 tr->max_latency = 0;
111
112 return start_func_tracer(tr, set);
113}
114
115static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
116 struct fgraph_ops *gops)
117{
118 struct trace_array *tr = wakeup_trace;
119 struct trace_array_cpu *data;
120 unsigned int trace_ctx;
121 u64 *calltime;
122 int ret = 0;
123
124 if (ftrace_graph_ignore_func(gops, trace))
125 return 0;
126 /*
127 * Do not trace a function if it's filtered by set_graph_notrace.
128 * Make the index of ret stack negative to indicate that it should
129 * ignore further functions. But it needs its own ret stack entry
130 * to recover the original index in order to continue tracing after
131 * returning from the function.
132 */
133 if (ftrace_graph_notrace_addr(trace->func))
134 return 1;
135
136 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
137 return 0;
138
139 calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
140 if (!calltime)
141 return 0;
142
143 *calltime = trace_clock_local();
144
145 ret = __trace_graph_entry(tr, trace, trace_ctx);
146 atomic_dec(&data->disabled);
147 preempt_enable_notrace();
148
149 return ret;
150}
151
152static void wakeup_graph_return(struct ftrace_graph_ret *trace,
153 struct fgraph_ops *gops)
154{
155 struct trace_array *tr = wakeup_trace;
156 struct trace_array_cpu *data;
157 unsigned int trace_ctx;
158 u64 *calltime;
159 int size;
160
161 ftrace_graph_addr_finish(gops, trace);
162
163 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
164 return;
165
166 calltime = fgraph_retrieve_data(gops->idx, &size);
167 if (!calltime)
168 return;
169 trace->calltime = *calltime;
170
171 __trace_graph_return(tr, trace, trace_ctx);
172 atomic_dec(&data->disabled);
173
174 preempt_enable_notrace();
175 return;
176}
177
178static struct fgraph_ops fgraph_wakeup_ops = {
179 .entryfunc = &wakeup_graph_entry,
180 .retfunc = &wakeup_graph_return,
181};
182
183static void wakeup_trace_open(struct trace_iterator *iter)
184{
185 if (is_graph(iter->tr))
186 graph_trace_open(iter);
187 else
188 iter->private = NULL;
189}
190
191static void wakeup_trace_close(struct trace_iterator *iter)
192{
193 if (iter->private)
194 graph_trace_close(iter);
195}
196
197#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
198 TRACE_GRAPH_PRINT_CPU | \
199 TRACE_GRAPH_PRINT_REL_TIME | \
200 TRACE_GRAPH_PRINT_DURATION | \
201 TRACE_GRAPH_PRINT_OVERHEAD | \
202 TRACE_GRAPH_PRINT_IRQS)
203
204static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
205{
206 /*
207 * In graph mode call the graph tracer output function,
208 * otherwise go with the TRACE_FN event handler
209 */
210 if (is_graph(iter->tr))
211 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
212
213 return TRACE_TYPE_UNHANDLED;
214}
215
216static void wakeup_print_header(struct seq_file *s)
217{
218 if (is_graph(wakeup_trace))
219 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
220 else
221 trace_default_header(s);
222}
223#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
224
225/*
226 * wakeup uses its own tracer function to keep the overhead down:
227 */
228static void
229wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
230 struct ftrace_ops *op, struct ftrace_regs *fregs)
231{
232 struct trace_array *tr = wakeup_trace;
233 struct trace_array_cpu *data;
234 unsigned long flags;
235 unsigned int trace_ctx;
236
237 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
238 return;
239
240 local_irq_save(flags);
241 trace_function(tr, ip, parent_ip, trace_ctx);
242 local_irq_restore(flags);
243
244 atomic_dec(&data->disabled);
245 preempt_enable_notrace();
246}
247
248static int register_wakeup_function(struct trace_array *tr, int graph, int set)
249{
250 int ret;
251
252 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
253 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
254 return 0;
255
256 if (graph)
257 ret = register_ftrace_graph(&fgraph_wakeup_ops);
258 else
259 ret = register_ftrace_function(tr->ops);
260
261 if (!ret)
262 function_enabled = true;
263
264 return ret;
265}
266
267static void unregister_wakeup_function(struct trace_array *tr, int graph)
268{
269 if (!function_enabled)
270 return;
271
272 if (graph)
273 unregister_ftrace_graph(&fgraph_wakeup_ops);
274 else
275 unregister_ftrace_function(tr->ops);
276
277 function_enabled = false;
278}
279
280static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
281{
282 if (!(mask & TRACE_ITER_FUNCTION))
283 return 0;
284
285 if (set)
286 register_wakeup_function(tr, is_graph(tr), 1);
287 else
288 unregister_wakeup_function(tr, is_graph(tr));
289 return 1;
290}
291#else /* CONFIG_FUNCTION_TRACER */
292static int register_wakeup_function(struct trace_array *tr, int graph, int set)
293{
294 return 0;
295}
296static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
297static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
298{
299 return 0;
300}
301#endif /* else CONFIG_FUNCTION_TRACER */
302
303#ifndef CONFIG_FUNCTION_GRAPH_TRACER
304static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
305{
306 return TRACE_TYPE_UNHANDLED;
307}
308
309static void wakeup_trace_open(struct trace_iterator *iter) { }
310static void wakeup_trace_close(struct trace_iterator *iter) { }
311
312static void wakeup_print_header(struct seq_file *s)
313{
314 trace_default_header(s);
315}
316#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
317
318static void
319__trace_function(struct trace_array *tr,
320 unsigned long ip, unsigned long parent_ip,
321 unsigned int trace_ctx)
322{
323 if (is_graph(tr))
324 trace_graph_function(tr, ip, parent_ip, trace_ctx);
325 else
326 trace_function(tr, ip, parent_ip, trace_ctx);
327}
328
329static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
330{
331 struct tracer *tracer = tr->current_trace;
332
333 if (wakeup_function_set(tr, mask, set))
334 return 0;
335
336#ifdef CONFIG_FUNCTION_GRAPH_TRACER
337 if (mask & TRACE_ITER_DISPLAY_GRAPH)
338 return wakeup_display_graph(tr, set);
339#endif
340
341 return trace_keep_overwrite(tracer, mask, set);
342}
343
344static int start_func_tracer(struct trace_array *tr, int graph)
345{
346 int ret;
347
348 ret = register_wakeup_function(tr, graph, 0);
349
350 if (!ret && tracing_is_enabled())
351 tracer_enabled = 1;
352 else
353 tracer_enabled = 0;
354
355 return ret;
356}
357
358static void stop_func_tracer(struct trace_array *tr, int graph)
359{
360 tracer_enabled = 0;
361
362 unregister_wakeup_function(tr, graph);
363}
364
365/*
366 * Should this new latency be reported/recorded?
367 */
368static bool report_latency(struct trace_array *tr, u64 delta)
369{
370 if (tracing_thresh) {
371 if (delta < tracing_thresh)
372 return false;
373 } else {
374 if (delta <= tr->max_latency)
375 return false;
376 }
377 return true;
378}
379
380static void
381probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
382{
383 if (task != wakeup_task)
384 return;
385
386 wakeup_current_cpu = cpu;
387}
388
389static void
390tracing_sched_switch_trace(struct trace_array *tr,
391 struct task_struct *prev,
392 struct task_struct *next,
393 unsigned int trace_ctx)
394{
395 struct trace_buffer *buffer = tr->array_buffer.buffer;
396 struct ring_buffer_event *event;
397 struct ctx_switch_entry *entry;
398
399 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
400 sizeof(*entry), trace_ctx);
401 if (!event)
402 return;
403 entry = ring_buffer_event_data(event);
404 entry->prev_pid = prev->pid;
405 entry->prev_prio = prev->prio;
406 entry->prev_state = task_state_index(prev);
407 entry->next_pid = next->pid;
408 entry->next_prio = next->prio;
409 entry->next_state = task_state_index(next);
410 entry->next_cpu = task_cpu(next);
411
412 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
413}
414
415static void
416tracing_sched_wakeup_trace(struct trace_array *tr,
417 struct task_struct *wakee,
418 struct task_struct *curr,
419 unsigned int trace_ctx)
420{
421 struct ring_buffer_event *event;
422 struct ctx_switch_entry *entry;
423 struct trace_buffer *buffer = tr->array_buffer.buffer;
424
425 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
426 sizeof(*entry), trace_ctx);
427 if (!event)
428 return;
429 entry = ring_buffer_event_data(event);
430 entry->prev_pid = curr->pid;
431 entry->prev_prio = curr->prio;
432 entry->prev_state = task_state_index(curr);
433 entry->next_pid = wakee->pid;
434 entry->next_prio = wakee->prio;
435 entry->next_state = task_state_index(wakee);
436 entry->next_cpu = task_cpu(wakee);
437
438 trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
439}
440
441static void notrace
442probe_wakeup_sched_switch(void *ignore, bool preempt,
443 struct task_struct *prev, struct task_struct *next,
444 unsigned int prev_state)
445{
446 struct trace_array_cpu *data;
447 u64 T0, T1, delta;
448 unsigned long flags;
449 long disabled;
450 int cpu;
451 unsigned int trace_ctx;
452
453 tracing_record_cmdline(prev);
454
455 if (unlikely(!tracer_enabled))
456 return;
457
458 /*
459 * When we start a new trace, we set wakeup_task to NULL
460 * and then set tracer_enabled = 1. We want to make sure
461 * that another CPU does not see the tracer_enabled = 1
462 * and the wakeup_task with an older task, that might
463 * actually be the same as next.
464 */
465 smp_rmb();
466
467 if (next != wakeup_task)
468 return;
469
470 /* disable local data, not wakeup_cpu data */
471 cpu = raw_smp_processor_id();
472 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
473 if (likely(disabled != 1))
474 goto out;
475
476 local_irq_save(flags);
477 trace_ctx = tracing_gen_ctx_flags(flags);
478
479 arch_spin_lock(&wakeup_lock);
480
481 /* We could race with grabbing wakeup_lock */
482 if (unlikely(!tracer_enabled || next != wakeup_task))
483 goto out_unlock;
484
485 /* The task we are waiting for is waking up */
486 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
487
488 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
489 tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
490 __trace_stack(wakeup_trace, trace_ctx, 0);
491
492 T0 = data->preempt_timestamp;
493 T1 = ftrace_now(cpu);
494 delta = T1-T0;
495
496 if (!report_latency(wakeup_trace, delta))
497 goto out_unlock;
498
499 if (likely(!is_tracing_stopped())) {
500 wakeup_trace->max_latency = delta;
501 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
502 }
503
504out_unlock:
505 __wakeup_reset(wakeup_trace);
506 arch_spin_unlock(&wakeup_lock);
507 local_irq_restore(flags);
508out:
509 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
510}
511
512static void __wakeup_reset(struct trace_array *tr)
513{
514 wakeup_cpu = -1;
515 wakeup_prio = -1;
516 tracing_dl = false;
517
518 if (wakeup_task)
519 put_task_struct(wakeup_task);
520
521 wakeup_task = NULL;
522}
523
524static void wakeup_reset(struct trace_array *tr)
525{
526 unsigned long flags;
527
528 tracing_reset_online_cpus(&tr->array_buffer);
529
530 local_irq_save(flags);
531 arch_spin_lock(&wakeup_lock);
532 __wakeup_reset(tr);
533 arch_spin_unlock(&wakeup_lock);
534 local_irq_restore(flags);
535}
536
537static void
538probe_wakeup(void *ignore, struct task_struct *p)
539{
540 struct trace_array_cpu *data;
541 int cpu = smp_processor_id();
542 long disabled;
543 unsigned int trace_ctx;
544
545 if (likely(!tracer_enabled))
546 return;
547
548 tracing_record_cmdline(p);
549 tracing_record_cmdline(current);
550
551 /*
552 * Semantic is like this:
553 * - wakeup tracer handles all tasks in the system, independently
554 * from their scheduling class;
555 * - wakeup_rt tracer handles tasks belonging to sched_dl and
556 * sched_rt class;
557 * - wakeup_dl handles tasks belonging to sched_dl class only.
558 */
559 if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
560 (wakeup_rt && !rt_or_dl_task(p)) ||
561 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
562 return;
563
564 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
565 if (unlikely(disabled != 1))
566 goto out;
567
568 trace_ctx = tracing_gen_ctx();
569
570 /* interrupts should be off from try_to_wake_up */
571 arch_spin_lock(&wakeup_lock);
572
573 /* check for races. */
574 if (!tracer_enabled || tracing_dl ||
575 (!dl_task(p) && p->prio >= wakeup_prio))
576 goto out_locked;
577
578 /* reset the trace */
579 __wakeup_reset(wakeup_trace);
580
581 wakeup_cpu = task_cpu(p);
582 wakeup_current_cpu = wakeup_cpu;
583 wakeup_prio = p->prio;
584
585 /*
586 * Once you start tracing a -deadline task, don't bother tracing
587 * another task until the first one wakes up.
588 */
589 if (dl_task(p))
590 tracing_dl = true;
591 else
592 tracing_dl = false;
593
594 wakeup_task = get_task_struct(p);
595
596 data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
597 data->preempt_timestamp = ftrace_now(cpu);
598 tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
599 __trace_stack(wakeup_trace, trace_ctx, 0);
600
601 /*
602 * We must be careful in using CALLER_ADDR2. But since wake_up
603 * is not called by an assembly function (where as schedule is)
604 * it should be safe to use it here.
605 */
606 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
607
608out_locked:
609 arch_spin_unlock(&wakeup_lock);
610out:
611 atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
612}
613
614static void start_wakeup_tracer(struct trace_array *tr)
615{
616 int ret;
617
618 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
619 if (ret) {
620 pr_info("wakeup trace: Couldn't activate tracepoint"
621 " probe to kernel_sched_wakeup\n");
622 return;
623 }
624
625 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
626 if (ret) {
627 pr_info("wakeup trace: Couldn't activate tracepoint"
628 " probe to kernel_sched_wakeup_new\n");
629 goto fail_deprobe;
630 }
631
632 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
633 if (ret) {
634 pr_info("sched trace: Couldn't activate tracepoint"
635 " probe to kernel_sched_switch\n");
636 goto fail_deprobe_wake_new;
637 }
638
639 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
640 if (ret) {
641 pr_info("wakeup trace: Couldn't activate tracepoint"
642 " probe to kernel_sched_migrate_task\n");
643 goto fail_deprobe_sched_switch;
644 }
645
646 wakeup_reset(tr);
647
648 /*
649 * Don't let the tracer_enabled = 1 show up before
650 * the wakeup_task is reset. This may be overkill since
651 * wakeup_reset does a spin_unlock after setting the
652 * wakeup_task to NULL, but I want to be safe.
653 * This is a slow path anyway.
654 */
655 smp_wmb();
656
657 if (start_func_tracer(tr, is_graph(tr)))
658 printk(KERN_ERR "failed to start wakeup tracer\n");
659
660 return;
661fail_deprobe_sched_switch:
662 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
663fail_deprobe_wake_new:
664 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
665fail_deprobe:
666 unregister_trace_sched_wakeup(probe_wakeup, NULL);
667}
668
669static void stop_wakeup_tracer(struct trace_array *tr)
670{
671 tracer_enabled = 0;
672 stop_func_tracer(tr, is_graph(tr));
673 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
674 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
675 unregister_trace_sched_wakeup(probe_wakeup, NULL);
676 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
677}
678
679static bool wakeup_busy;
680
681static int __wakeup_tracer_init(struct trace_array *tr)
682{
683 save_flags = tr->trace_flags;
684
685 /* non overwrite screws up the latency tracers */
686 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
687 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
688
689 tr->max_latency = 0;
690 wakeup_trace = tr;
691 ftrace_init_array_ops(tr, wakeup_tracer_call);
692 start_wakeup_tracer(tr);
693
694 wakeup_busy = true;
695 return 0;
696}
697
698static int wakeup_tracer_init(struct trace_array *tr)
699{
700 if (wakeup_busy)
701 return -EBUSY;
702
703 wakeup_dl = false;
704 wakeup_rt = false;
705 return __wakeup_tracer_init(tr);
706}
707
708static int wakeup_rt_tracer_init(struct trace_array *tr)
709{
710 if (wakeup_busy)
711 return -EBUSY;
712
713 wakeup_dl = false;
714 wakeup_rt = true;
715 return __wakeup_tracer_init(tr);
716}
717
718static int wakeup_dl_tracer_init(struct trace_array *tr)
719{
720 if (wakeup_busy)
721 return -EBUSY;
722
723 wakeup_dl = true;
724 wakeup_rt = false;
725 return __wakeup_tracer_init(tr);
726}
727
728static void wakeup_tracer_reset(struct trace_array *tr)
729{
730 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
731 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
732
733 stop_wakeup_tracer(tr);
734 /* make sure we put back any tasks we are tracing */
735 wakeup_reset(tr);
736
737 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
738 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
739 ftrace_reset_array_ops(tr);
740 wakeup_busy = false;
741}
742
743static void wakeup_tracer_start(struct trace_array *tr)
744{
745 wakeup_reset(tr);
746 tracer_enabled = 1;
747}
748
749static void wakeup_tracer_stop(struct trace_array *tr)
750{
751 tracer_enabled = 0;
752}
753
754static struct tracer wakeup_tracer __read_mostly =
755{
756 .name = "wakeup",
757 .init = wakeup_tracer_init,
758 .reset = wakeup_tracer_reset,
759 .start = wakeup_tracer_start,
760 .stop = wakeup_tracer_stop,
761 .print_max = true,
762 .print_header = wakeup_print_header,
763 .print_line = wakeup_print_line,
764 .flag_changed = wakeup_flag_changed,
765#ifdef CONFIG_FTRACE_SELFTEST
766 .selftest = trace_selftest_startup_wakeup,
767#endif
768 .open = wakeup_trace_open,
769 .close = wakeup_trace_close,
770 .allow_instances = true,
771 .use_max_tr = true,
772};
773
774static struct tracer wakeup_rt_tracer __read_mostly =
775{
776 .name = "wakeup_rt",
777 .init = wakeup_rt_tracer_init,
778 .reset = wakeup_tracer_reset,
779 .start = wakeup_tracer_start,
780 .stop = wakeup_tracer_stop,
781 .print_max = true,
782 .print_header = wakeup_print_header,
783 .print_line = wakeup_print_line,
784 .flag_changed = wakeup_flag_changed,
785#ifdef CONFIG_FTRACE_SELFTEST
786 .selftest = trace_selftest_startup_wakeup,
787#endif
788 .open = wakeup_trace_open,
789 .close = wakeup_trace_close,
790 .allow_instances = true,
791 .use_max_tr = true,
792};
793
794static struct tracer wakeup_dl_tracer __read_mostly =
795{
796 .name = "wakeup_dl",
797 .init = wakeup_dl_tracer_init,
798 .reset = wakeup_tracer_reset,
799 .start = wakeup_tracer_start,
800 .stop = wakeup_tracer_stop,
801 .print_max = true,
802 .print_header = wakeup_print_header,
803 .print_line = wakeup_print_line,
804 .flag_changed = wakeup_flag_changed,
805#ifdef CONFIG_FTRACE_SELFTEST
806 .selftest = trace_selftest_startup_wakeup,
807#endif
808 .open = wakeup_trace_open,
809 .close = wakeup_trace_close,
810 .allow_instances = true,
811 .use_max_tr = true,
812};
813
814__init static int init_wakeup_tracer(void)
815{
816 int ret;
817
818 ret = register_tracer(&wakeup_tracer);
819 if (ret)
820 return ret;
821
822 ret = register_tracer(&wakeup_rt_tracer);
823 if (ret)
824 return ret;
825
826 ret = register_tracer(&wakeup_dl_tracer);
827 if (ret)
828 return ret;
829
830 return 0;
831}
832core_initcall(init_wakeup_tracer);