Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * trace task wakeup timings
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/module.h>
 14#include <linux/kallsyms.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/sched/rt.h>
 18#include <linux/sched/deadline.h>
 19#include <trace/events/sched.h>
 20#include "trace.h"
 21
 22static struct trace_array	*wakeup_trace;
 23static int __read_mostly	tracer_enabled;
 24
 25static struct task_struct	*wakeup_task;
 26static int			wakeup_cpu;
 27static int			wakeup_current_cpu;
 28static unsigned			wakeup_prio = -1;
 29static int			wakeup_rt;
 30static int			wakeup_dl;
 31static int			tracing_dl = 0;
 32
 33static arch_spinlock_t wakeup_lock =
 34	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 35
 36static void wakeup_reset(struct trace_array *tr);
 37static void __wakeup_reset(struct trace_array *tr);
 
 
 38
 39static int save_flags;
 40
 41#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 42static int wakeup_display_graph(struct trace_array *tr, int set);
 43# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
 44#else
 45static inline int wakeup_display_graph(struct trace_array *tr, int set)
 46{
 47	return 0;
 48}
 49# define is_graph(tr) false
 50#endif
 51
 52
 53#ifdef CONFIG_FUNCTION_TRACER
 54
 55static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
 56static void wakeup_graph_return(struct ftrace_graph_ret *trace);
 57
 58static bool function_enabled;
 59
 60/*
 61 * Prologue for the wakeup function tracers.
 62 *
 63 * Returns 1 if it is OK to continue, and preemption
 64 *            is disabled and data->disabled is incremented.
 65 *         0 if the trace is to be ignored, and preemption
 66 *            is not disabled and data->disabled is
 67 *            kept the same.
 68 *
 69 * Note, this function is also used outside this ifdef but
 70 *  inside the #ifdef of the function graph tracer below.
 71 *  This is OK, since the function graph tracer is
 72 *  dependent on the function tracer.
 73 */
 74static int
 75func_prolog_preempt_disable(struct trace_array *tr,
 76			    struct trace_array_cpu **data,
 77			    int *pc)
 78{
 79	long disabled;
 80	int cpu;
 81
 82	if (likely(!wakeup_task))
 83		return 0;
 84
 85	*pc = preempt_count();
 86	preempt_disable_notrace();
 87
 88	cpu = raw_smp_processor_id();
 89	if (cpu != wakeup_current_cpu)
 90		goto out_enable;
 91
 92	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 93	disabled = atomic_inc_return(&(*data)->disabled);
 94	if (unlikely(disabled != 1))
 95		goto out;
 96
 97	return 1;
 98
 99out:
100	atomic_dec(&(*data)->disabled);
101
102out_enable:
103	preempt_enable_notrace();
104	return 0;
105}
106
107/*
108 * wakeup uses its own tracer function to keep the overhead down:
109 */
110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
112		   struct ftrace_ops *op, struct pt_regs *pt_regs)
113{
114	struct trace_array *tr = wakeup_trace;
115	struct trace_array_cpu *data;
116	unsigned long flags;
117	int pc;
118
119	if (!func_prolog_preempt_disable(tr, &data, &pc))
120		return;
121
122	local_irq_save(flags);
123	trace_function(tr, ip, parent_ip, flags, pc);
124	local_irq_restore(flags);
125
126	atomic_dec(&data->disabled);
127	preempt_enable_notrace();
128}
129
130static int register_wakeup_function(struct trace_array *tr, int graph, int set)
131{
132	int ret;
133
134	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
135	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
136		return 0;
137
138	if (graph)
139		ret = register_ftrace_graph(&wakeup_graph_return,
140					    &wakeup_graph_entry);
141	else
142		ret = register_ftrace_function(tr->ops);
143
144	if (!ret)
145		function_enabled = true;
146
147	return ret;
148}
149
150static void unregister_wakeup_function(struct trace_array *tr, int graph)
151{
152	if (!function_enabled)
153		return;
154
155	if (graph)
156		unregister_ftrace_graph();
157	else
158		unregister_ftrace_function(tr->ops);
159
160	function_enabled = false;
161}
162
163static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
164{
165	if (!(mask & TRACE_ITER_FUNCTION))
166		return 0;
167
168	if (set)
169		register_wakeup_function(tr, is_graph(tr), 1);
170	else
171		unregister_wakeup_function(tr, is_graph(tr));
172	return 1;
173}
174#else
175static int register_wakeup_function(struct trace_array *tr, int graph, int set)
176{
177	return 0;
178}
179static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
180static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
181{
182	return 0;
183}
184#endif /* CONFIG_FUNCTION_TRACER */
185
186static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
187{
188	struct tracer *tracer = tr->current_trace;
189
190	if (wakeup_function_set(tr, mask, set))
191		return 0;
192
193#ifdef CONFIG_FUNCTION_GRAPH_TRACER
194	if (mask & TRACE_ITER_DISPLAY_GRAPH)
195		return wakeup_display_graph(tr, set);
196#endif
197
198	return trace_keep_overwrite(tracer, mask, set);
199}
200
201static int start_func_tracer(struct trace_array *tr, int graph)
202{
203	int ret;
204
205	ret = register_wakeup_function(tr, graph, 0);
206
207	if (!ret && tracing_is_enabled())
208		tracer_enabled = 1;
209	else
210		tracer_enabled = 0;
211
212	return ret;
213}
214
215static void stop_func_tracer(struct trace_array *tr, int graph)
216{
217	tracer_enabled = 0;
218
219	unregister_wakeup_function(tr, graph);
220}
221
222#ifdef CONFIG_FUNCTION_GRAPH_TRACER
223static int wakeup_display_graph(struct trace_array *tr, int set)
224{
225	if (!(is_graph(tr) ^ set))
226		return 0;
227
228	stop_func_tracer(tr, !set);
229
230	wakeup_reset(wakeup_trace);
231	tr->max_latency = 0;
232
233	return start_func_tracer(tr, set);
234}
235
236static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
237{
238	struct trace_array *tr = wakeup_trace;
239	struct trace_array_cpu *data;
240	unsigned long flags;
241	int pc, ret = 0;
242
243	if (ftrace_graph_ignore_func(trace))
244		return 0;
245	/*
246	 * Do not trace a function if it's filtered by set_graph_notrace.
247	 * Make the index of ret stack negative to indicate that it should
248	 * ignore further functions.  But it needs its own ret stack entry
249	 * to recover the original index in order to continue tracing after
250	 * returning from the function.
251	 */
252	if (ftrace_graph_notrace_addr(trace->func))
253		return 1;
254
255	if (!func_prolog_preempt_disable(tr, &data, &pc))
256		return 0;
257
258	local_save_flags(flags);
259	ret = __trace_graph_entry(tr, trace, flags, pc);
260	atomic_dec(&data->disabled);
261	preempt_enable_notrace();
262
263	return ret;
264}
265
266static void wakeup_graph_return(struct ftrace_graph_ret *trace)
267{
268	struct trace_array *tr = wakeup_trace;
269	struct trace_array_cpu *data;
270	unsigned long flags;
271	int pc;
272
273	if (!func_prolog_preempt_disable(tr, &data, &pc))
 
 
274		return;
275
276	local_save_flags(flags);
277	__trace_graph_return(tr, trace, flags, pc);
278	atomic_dec(&data->disabled);
279
280	preempt_enable_notrace();
281	return;
282}
283
 
 
 
 
 
284static void wakeup_trace_open(struct trace_iterator *iter)
285{
286	if (is_graph(iter->tr))
287		graph_trace_open(iter);
288}
289
290static void wakeup_trace_close(struct trace_iterator *iter)
291{
292	if (iter->private)
293		graph_trace_close(iter);
294}
295
296#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
297			    TRACE_GRAPH_PRINT_ABS_TIME | \
298			    TRACE_GRAPH_PRINT_DURATION)
 
 
 
299
300static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
301{
302	/*
303	 * In graph mode call the graph tracer output function,
304	 * otherwise go with the TRACE_FN event handler
305	 */
306	if (is_graph(iter->tr))
307		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
308
309	return TRACE_TYPE_UNHANDLED;
310}
311
312static void wakeup_print_header(struct seq_file *s)
313{
314	if (is_graph(wakeup_trace))
315		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
316	else
317		trace_default_header(s);
318}
 
319
 
 
 
320static void
321__trace_function(struct trace_array *tr,
322		 unsigned long ip, unsigned long parent_ip,
323		 unsigned long flags, int pc)
324{
325	if (is_graph(tr))
326		trace_graph_function(tr, ip, parent_ip, flags, pc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327	else
328		trace_function(tr, ip, parent_ip, flags, pc);
 
 
329}
330#else
331#define __trace_function trace_function
332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
334{
335	return TRACE_TYPE_UNHANDLED;
336}
337
338static void wakeup_trace_open(struct trace_iterator *iter) { }
339static void wakeup_trace_close(struct trace_iterator *iter) { }
340
341#ifdef CONFIG_FUNCTION_TRACER
342static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
343{
344	return -1;
345}
346static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
347static void wakeup_print_header(struct seq_file *s)
348{
349	trace_default_header(s);
350}
351#else
352static void wakeup_print_header(struct seq_file *s)
 
 
 
 
353{
354	trace_latency_header(s);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355}
356#endif /* CONFIG_FUNCTION_TRACER */
357#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
358
359/*
360 * Should this new latency be reported/recorded?
361 */
362static bool report_latency(struct trace_array *tr, u64 delta)
363{
364	if (tracing_thresh) {
365		if (delta < tracing_thresh)
366			return false;
367	} else {
368		if (delta <= tr->max_latency)
369			return false;
370	}
371	return true;
372}
373
374static void
375probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
376{
377	if (task != wakeup_task)
378		return;
379
380	wakeup_current_cpu = cpu;
381}
382
383static void
384tracing_sched_switch_trace(struct trace_array *tr,
385			   struct task_struct *prev,
386			   struct task_struct *next,
387			   unsigned long flags, int pc)
388{
389	struct trace_event_call *call = &event_context_switch;
390	struct ring_buffer *buffer = tr->trace_buffer.buffer;
391	struct ring_buffer_event *event;
392	struct ctx_switch_entry *entry;
393
394	event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
395					  sizeof(*entry), flags, pc);
396	if (!event)
397		return;
398	entry	= ring_buffer_event_data(event);
399	entry->prev_pid			= prev->pid;
400	entry->prev_prio		= prev->prio;
401	entry->prev_state		= task_state_index(prev);
402	entry->next_pid			= next->pid;
403	entry->next_prio		= next->prio;
404	entry->next_state		= task_state_index(next);
405	entry->next_cpu	= task_cpu(next);
406
407	if (!call_filter_check_discard(call, entry, buffer, event))
408		trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
409}
410
411static void
412tracing_sched_wakeup_trace(struct trace_array *tr,
413			   struct task_struct *wakee,
414			   struct task_struct *curr,
415			   unsigned long flags, int pc)
416{
417	struct trace_event_call *call = &event_wakeup;
418	struct ring_buffer_event *event;
419	struct ctx_switch_entry *entry;
420	struct ring_buffer *buffer = tr->trace_buffer.buffer;
421
422	event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
423					  sizeof(*entry), flags, pc);
424	if (!event)
425		return;
426	entry	= ring_buffer_event_data(event);
427	entry->prev_pid			= curr->pid;
428	entry->prev_prio		= curr->prio;
429	entry->prev_state		= task_state_index(curr);
430	entry->next_pid			= wakee->pid;
431	entry->next_prio		= wakee->prio;
432	entry->next_state		= task_state_index(wakee);
433	entry->next_cpu			= task_cpu(wakee);
434
435	if (!call_filter_check_discard(call, entry, buffer, event))
436		trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
437}
438
439static void notrace
440probe_wakeup_sched_switch(void *ignore, bool preempt,
441			  struct task_struct *prev, struct task_struct *next)
442{
443	struct trace_array_cpu *data;
444	u64 T0, T1, delta;
445	unsigned long flags;
446	long disabled;
447	int cpu;
448	int pc;
449
450	tracing_record_cmdline(prev);
451
452	if (unlikely(!tracer_enabled))
453		return;
454
455	/*
456	 * When we start a new trace, we set wakeup_task to NULL
457	 * and then set tracer_enabled = 1. We want to make sure
458	 * that another CPU does not see the tracer_enabled = 1
459	 * and the wakeup_task with an older task, that might
460	 * actually be the same as next.
461	 */
462	smp_rmb();
463
464	if (next != wakeup_task)
465		return;
466
467	pc = preempt_count();
468
469	/* disable local data, not wakeup_cpu data */
470	cpu = raw_smp_processor_id();
471	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
472	if (likely(disabled != 1))
473		goto out;
474
475	local_irq_save(flags);
 
 
476	arch_spin_lock(&wakeup_lock);
477
478	/* We could race with grabbing wakeup_lock */
479	if (unlikely(!tracer_enabled || next != wakeup_task))
480		goto out_unlock;
481
482	/* The task we are waiting for is waking up */
483	data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
484
485	__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
486	tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
 
487
488	T0 = data->preempt_timestamp;
489	T1 = ftrace_now(cpu);
490	delta = T1-T0;
491
492	if (!report_latency(wakeup_trace, delta))
493		goto out_unlock;
494
495	if (likely(!is_tracing_stopped())) {
496		wakeup_trace->max_latency = delta;
497		update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
498	}
499
500out_unlock:
501	__wakeup_reset(wakeup_trace);
502	arch_spin_unlock(&wakeup_lock);
503	local_irq_restore(flags);
504out:
505	atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
506}
507
508static void __wakeup_reset(struct trace_array *tr)
509{
510	wakeup_cpu = -1;
511	wakeup_prio = -1;
512	tracing_dl = 0;
513
514	if (wakeup_task)
515		put_task_struct(wakeup_task);
516
517	wakeup_task = NULL;
518}
519
520static void wakeup_reset(struct trace_array *tr)
521{
522	unsigned long flags;
523
524	tracing_reset_online_cpus(&tr->trace_buffer);
525
526	local_irq_save(flags);
527	arch_spin_lock(&wakeup_lock);
528	__wakeup_reset(tr);
529	arch_spin_unlock(&wakeup_lock);
530	local_irq_restore(flags);
531}
532
533static void
534probe_wakeup(void *ignore, struct task_struct *p)
535{
536	struct trace_array_cpu *data;
537	int cpu = smp_processor_id();
538	unsigned long flags;
539	long disabled;
540	int pc;
541
542	if (likely(!tracer_enabled))
543		return;
544
545	tracing_record_cmdline(p);
546	tracing_record_cmdline(current);
547
548	/*
549	 * Semantic is like this:
550	 *  - wakeup tracer handles all tasks in the system, independently
551	 *    from their scheduling class;
552	 *  - wakeup_rt tracer handles tasks belonging to sched_dl and
553	 *    sched_rt class;
554	 *  - wakeup_dl handles tasks belonging to sched_dl class only.
555	 */
556	if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
557	    (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
558	    (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
559		return;
560
561	pc = preempt_count();
562	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
563	if (unlikely(disabled != 1))
564		goto out;
565
 
 
566	/* interrupts should be off from try_to_wake_up */
567	arch_spin_lock(&wakeup_lock);
568
569	/* check for races. */
570	if (!tracer_enabled || tracing_dl ||
571	    (!dl_task(p) && p->prio >= wakeup_prio))
572		goto out_locked;
573
574	/* reset the trace */
575	__wakeup_reset(wakeup_trace);
576
577	wakeup_cpu = task_cpu(p);
578	wakeup_current_cpu = wakeup_cpu;
579	wakeup_prio = p->prio;
580
581	/*
582	 * Once you start tracing a -deadline task, don't bother tracing
583	 * another task until the first one wakes up.
584	 */
585	if (dl_task(p))
586		tracing_dl = 1;
587	else
588		tracing_dl = 0;
589
590	wakeup_task = p;
591	get_task_struct(wakeup_task);
592
593	local_save_flags(flags);
594
595	data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
596	data->preempt_timestamp = ftrace_now(cpu);
597	tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
 
598
599	/*
600	 * We must be careful in using CALLER_ADDR2. But since wake_up
601	 * is not called by an assembly function  (where as schedule is)
602	 * it should be safe to use it here.
603	 */
604	__trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
605
606out_locked:
607	arch_spin_unlock(&wakeup_lock);
608out:
609	atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
610}
611
612static void start_wakeup_tracer(struct trace_array *tr)
613{
614	int ret;
615
616	ret = register_trace_sched_wakeup(probe_wakeup, NULL);
617	if (ret) {
618		pr_info("wakeup trace: Couldn't activate tracepoint"
619			" probe to kernel_sched_wakeup\n");
620		return;
621	}
622
623	ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
624	if (ret) {
625		pr_info("wakeup trace: Couldn't activate tracepoint"
626			" probe to kernel_sched_wakeup_new\n");
627		goto fail_deprobe;
628	}
629
630	ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
631	if (ret) {
632		pr_info("sched trace: Couldn't activate tracepoint"
633			" probe to kernel_sched_switch\n");
634		goto fail_deprobe_wake_new;
635	}
636
637	ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
638	if (ret) {
639		pr_info("wakeup trace: Couldn't activate tracepoint"
640			" probe to kernel_sched_migrate_task\n");
641		return;
642	}
643
644	wakeup_reset(tr);
645
646	/*
647	 * Don't let the tracer_enabled = 1 show up before
648	 * the wakeup_task is reset. This may be overkill since
649	 * wakeup_reset does a spin_unlock after setting the
650	 * wakeup_task to NULL, but I want to be safe.
651	 * This is a slow path anyway.
652	 */
653	smp_wmb();
654
655	if (start_func_tracer(tr, is_graph(tr)))
656		printk(KERN_ERR "failed to start wakeup tracer\n");
657
658	return;
 
 
659fail_deprobe_wake_new:
660	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
661fail_deprobe:
662	unregister_trace_sched_wakeup(probe_wakeup, NULL);
663}
664
665static void stop_wakeup_tracer(struct trace_array *tr)
666{
667	tracer_enabled = 0;
668	stop_func_tracer(tr, is_graph(tr));
669	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
670	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
671	unregister_trace_sched_wakeup(probe_wakeup, NULL);
672	unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
673}
674
675static bool wakeup_busy;
676
677static int __wakeup_tracer_init(struct trace_array *tr)
678{
679	save_flags = tr->trace_flags;
680
681	/* non overwrite screws up the latency tracers */
682	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
683	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
684
685	tr->max_latency = 0;
686	wakeup_trace = tr;
687	ftrace_init_array_ops(tr, wakeup_tracer_call);
688	start_wakeup_tracer(tr);
689
690	wakeup_busy = true;
691	return 0;
692}
693
694static int wakeup_tracer_init(struct trace_array *tr)
695{
696	if (wakeup_busy)
697		return -EBUSY;
698
699	wakeup_dl = 0;
700	wakeup_rt = 0;
701	return __wakeup_tracer_init(tr);
702}
703
704static int wakeup_rt_tracer_init(struct trace_array *tr)
705{
706	if (wakeup_busy)
707		return -EBUSY;
708
709	wakeup_dl = 0;
710	wakeup_rt = 1;
711	return __wakeup_tracer_init(tr);
712}
713
714static int wakeup_dl_tracer_init(struct trace_array *tr)
715{
716	if (wakeup_busy)
717		return -EBUSY;
718
719	wakeup_dl = 1;
720	wakeup_rt = 0;
721	return __wakeup_tracer_init(tr);
722}
723
724static void wakeup_tracer_reset(struct trace_array *tr)
725{
726	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
727	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
728
729	stop_wakeup_tracer(tr);
730	/* make sure we put back any tasks we are tracing */
731	wakeup_reset(tr);
732
733	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
734	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
735	ftrace_reset_array_ops(tr);
736	wakeup_busy = false;
737}
738
739static void wakeup_tracer_start(struct trace_array *tr)
740{
741	wakeup_reset(tr);
742	tracer_enabled = 1;
743}
744
745static void wakeup_tracer_stop(struct trace_array *tr)
746{
747	tracer_enabled = 0;
748}
749
750static struct tracer wakeup_tracer __read_mostly =
751{
752	.name		= "wakeup",
753	.init		= wakeup_tracer_init,
754	.reset		= wakeup_tracer_reset,
755	.start		= wakeup_tracer_start,
756	.stop		= wakeup_tracer_stop,
757	.print_max	= true,
758	.print_header	= wakeup_print_header,
759	.print_line	= wakeup_print_line,
760	.flag_changed	= wakeup_flag_changed,
761#ifdef CONFIG_FTRACE_SELFTEST
762	.selftest    = trace_selftest_startup_wakeup,
763#endif
764	.open		= wakeup_trace_open,
765	.close		= wakeup_trace_close,
766	.allow_instances = true,
767	.use_max_tr	= true,
768};
769
770static struct tracer wakeup_rt_tracer __read_mostly =
771{
772	.name		= "wakeup_rt",
773	.init		= wakeup_rt_tracer_init,
774	.reset		= wakeup_tracer_reset,
775	.start		= wakeup_tracer_start,
776	.stop		= wakeup_tracer_stop,
777	.print_max	= true,
778	.print_header	= wakeup_print_header,
779	.print_line	= wakeup_print_line,
780	.flag_changed	= wakeup_flag_changed,
781#ifdef CONFIG_FTRACE_SELFTEST
782	.selftest    = trace_selftest_startup_wakeup,
783#endif
784	.open		= wakeup_trace_open,
785	.close		= wakeup_trace_close,
786	.allow_instances = true,
787	.use_max_tr	= true,
788};
789
790static struct tracer wakeup_dl_tracer __read_mostly =
791{
792	.name		= "wakeup_dl",
793	.init		= wakeup_dl_tracer_init,
794	.reset		= wakeup_tracer_reset,
795	.start		= wakeup_tracer_start,
796	.stop		= wakeup_tracer_stop,
797	.print_max	= true,
798	.print_header	= wakeup_print_header,
799	.print_line	= wakeup_print_line,
800	.flag_changed	= wakeup_flag_changed,
801#ifdef CONFIG_FTRACE_SELFTEST
802	.selftest    = trace_selftest_startup_wakeup,
803#endif
804	.open		= wakeup_trace_open,
805	.close		= wakeup_trace_close,
806	.allow_instances = true,
807	.use_max_tr	= true,
808};
809
810__init static int init_wakeup_tracer(void)
811{
812	int ret;
813
814	ret = register_tracer(&wakeup_tracer);
815	if (ret)
816		return ret;
817
818	ret = register_tracer(&wakeup_rt_tracer);
819	if (ret)
820		return ret;
821
822	ret = register_tracer(&wakeup_dl_tracer);
823	if (ret)
824		return ret;
825
826	return 0;
827}
828core_initcall(init_wakeup_tracer);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * trace task wakeup timings
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/module.h>
 14#include <linux/kallsyms.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/sched/rt.h>
 18#include <linux/sched/deadline.h>
 19#include <trace/events/sched.h>
 20#include "trace.h"
 21
 22static struct trace_array	*wakeup_trace;
 23static int __read_mostly	tracer_enabled;
 24
 25static struct task_struct	*wakeup_task;
 26static int			wakeup_cpu;
 27static int			wakeup_current_cpu;
 28static unsigned			wakeup_prio = -1;
 29static bool			wakeup_rt;
 30static bool			wakeup_dl;
 31static bool			tracing_dl;
 32
 33static arch_spinlock_t wakeup_lock =
 34	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 35
 36static void wakeup_reset(struct trace_array *tr);
 37static void __wakeup_reset(struct trace_array *tr);
 38static int start_func_tracer(struct trace_array *tr, int graph);
 39static void stop_func_tracer(struct trace_array *tr, int graph);
 40
 41static int save_flags;
 42
 43#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 44# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
 45#else
 
 
 
 
 46# define is_graph(tr) false
 47#endif
 48
 
 49#ifdef CONFIG_FUNCTION_TRACER
 50
 
 
 
 51static bool function_enabled;
 52
 53/*
 54 * Prologue for the wakeup function tracers.
 55 *
 56 * Returns 1 if it is OK to continue, and preemption
 57 *            is disabled and data->disabled is incremented.
 58 *         0 if the trace is to be ignored, and preemption
 59 *            is not disabled and data->disabled is
 60 *            kept the same.
 61 *
 62 * Note, this function is also used outside this ifdef but
 63 *  inside the #ifdef of the function graph tracer below.
 64 *  This is OK, since the function graph tracer is
 65 *  dependent on the function tracer.
 66 */
 67static int
 68func_prolog_preempt_disable(struct trace_array *tr,
 69			    struct trace_array_cpu **data,
 70			    unsigned int *trace_ctx)
 71{
 72	long disabled;
 73	int cpu;
 74
 75	if (likely(!wakeup_task))
 76		return 0;
 77
 78	*trace_ctx = tracing_gen_ctx();
 79	preempt_disable_notrace();
 80
 81	cpu = raw_smp_processor_id();
 82	if (cpu != wakeup_current_cpu)
 83		goto out_enable;
 84
 85	*data = per_cpu_ptr(tr->array_buffer.data, cpu);
 86	disabled = atomic_inc_return(&(*data)->disabled);
 87	if (unlikely(disabled != 1))
 88		goto out;
 89
 90	return 1;
 91
 92out:
 93	atomic_dec(&(*data)->disabled);
 94
 95out_enable:
 96	preempt_enable_notrace();
 97	return 0;
 98}
 99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102static int wakeup_display_graph(struct trace_array *tr, int set)
103{
104	if (!(is_graph(tr) ^ set))
105		return 0;
106
107	stop_func_tracer(tr, !set);
108
109	wakeup_reset(wakeup_trace);
110	tr->max_latency = 0;
111
112	return start_func_tracer(tr, set);
113}
114
115static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
116{
117	struct trace_array *tr = wakeup_trace;
118	struct trace_array_cpu *data;
119	unsigned int trace_ctx;
120	int ret = 0;
121
122	if (ftrace_graph_ignore_func(trace))
123		return 0;
124	/*
125	 * Do not trace a function if it's filtered by set_graph_notrace.
126	 * Make the index of ret stack negative to indicate that it should
127	 * ignore further functions.  But it needs its own ret stack entry
128	 * to recover the original index in order to continue tracing after
129	 * returning from the function.
130	 */
131	if (ftrace_graph_notrace_addr(trace->func))
132		return 1;
133
134	if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
135		return 0;
136
137	ret = __trace_graph_entry(tr, trace, trace_ctx);
 
138	atomic_dec(&data->disabled);
139	preempt_enable_notrace();
140
141	return ret;
142}
143
144static void wakeup_graph_return(struct ftrace_graph_ret *trace)
145{
146	struct trace_array *tr = wakeup_trace;
147	struct trace_array_cpu *data;
148	unsigned int trace_ctx;
 
149
150	ftrace_graph_addr_finish(trace);
151
152	if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
153		return;
154
155	__trace_graph_return(tr, trace, trace_ctx);
 
156	atomic_dec(&data->disabled);
157
158	preempt_enable_notrace();
159	return;
160}
161
162static struct fgraph_ops fgraph_wakeup_ops = {
163	.entryfunc = &wakeup_graph_entry,
164	.retfunc = &wakeup_graph_return,
165};
166
167static void wakeup_trace_open(struct trace_iterator *iter)
168{
169	if (is_graph(iter->tr))
170		graph_trace_open(iter);
171}
172
173static void wakeup_trace_close(struct trace_iterator *iter)
174{
175	if (iter->private)
176		graph_trace_close(iter);
177}
178
179#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
180			    TRACE_GRAPH_PRINT_CPU |  \
181			    TRACE_GRAPH_PRINT_REL_TIME | \
182			    TRACE_GRAPH_PRINT_DURATION | \
183			    TRACE_GRAPH_PRINT_OVERHEAD | \
184			    TRACE_GRAPH_PRINT_IRQS)
185
186static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
187{
188	/*
189	 * In graph mode call the graph tracer output function,
190	 * otherwise go with the TRACE_FN event handler
191	 */
192	if (is_graph(iter->tr))
193		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
194
195	return TRACE_TYPE_UNHANDLED;
196}
197
198static void wakeup_print_header(struct seq_file *s)
199{
200	if (is_graph(wakeup_trace))
201		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
202	else
203		trace_default_header(s);
204}
205#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
206
207/*
208 * wakeup uses its own tracer function to keep the overhead down:
209 */
210static void
211wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
212		   struct ftrace_ops *op, struct ftrace_regs *fregs)
 
213{
214	struct trace_array *tr = wakeup_trace;
215	struct trace_array_cpu *data;
216	unsigned long flags;
217	unsigned int trace_ctx;
218
219	if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
220		return;
221
222	local_irq_save(flags);
223	trace_function(tr, ip, parent_ip, trace_ctx);
224	local_irq_restore(flags);
225
226	atomic_dec(&data->disabled);
227	preempt_enable_notrace();
228}
229
230static int register_wakeup_function(struct trace_array *tr, int graph, int set)
231{
232	int ret;
233
234	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
235	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
236		return 0;
237
238	if (graph)
239		ret = register_ftrace_graph(&fgraph_wakeup_ops);
240	else
241		ret = register_ftrace_function(tr->ops);
242
243	if (!ret)
244		function_enabled = true;
245
246	return ret;
247}
248
249static void unregister_wakeup_function(struct trace_array *tr, int graph)
250{
251	if (!function_enabled)
252		return;
253
254	if (graph)
255		unregister_ftrace_graph(&fgraph_wakeup_ops);
256	else
257		unregister_ftrace_function(tr->ops);
258
259	function_enabled = false;
260}
 
 
261
262static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
263{
264	if (!(mask & TRACE_ITER_FUNCTION))
265		return 0;
266
267	if (set)
268		register_wakeup_function(tr, is_graph(tr), 1);
269	else
270		unregister_wakeup_function(tr, is_graph(tr));
271	return 1;
272}
273#else /* CONFIG_FUNCTION_TRACER */
274static int register_wakeup_function(struct trace_array *tr, int graph, int set)
275{
276	return 0;
277}
278static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
279static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
280{
281	return 0;
282}
283#endif /* else CONFIG_FUNCTION_TRACER */
284
285#ifndef CONFIG_FUNCTION_GRAPH_TRACER
286static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
287{
288	return TRACE_TYPE_UNHANDLED;
289}
290
291static void wakeup_trace_open(struct trace_iterator *iter) { }
292static void wakeup_trace_close(struct trace_iterator *iter) { }
293
 
 
 
 
 
 
294static void wakeup_print_header(struct seq_file *s)
295{
296	trace_default_header(s);
297}
298#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
299
300static void
301__trace_function(struct trace_array *tr,
302		 unsigned long ip, unsigned long parent_ip,
303		 unsigned int trace_ctx)
304{
305	if (is_graph(tr))
306		trace_graph_function(tr, ip, parent_ip, trace_ctx);
307	else
308		trace_function(tr, ip, parent_ip, trace_ctx);
309}
310
311static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
312{
313	struct tracer *tracer = tr->current_trace;
314
315	if (wakeup_function_set(tr, mask, set))
316		return 0;
317
318#ifdef CONFIG_FUNCTION_GRAPH_TRACER
319	if (mask & TRACE_ITER_DISPLAY_GRAPH)
320		return wakeup_display_graph(tr, set);
321#endif
322
323	return trace_keep_overwrite(tracer, mask, set);
324}
325
326static int start_func_tracer(struct trace_array *tr, int graph)
327{
328	int ret;
329
330	ret = register_wakeup_function(tr, graph, 0);
331
332	if (!ret && tracing_is_enabled())
333		tracer_enabled = 1;
334	else
335		tracer_enabled = 0;
336
337	return ret;
338}
339
340static void stop_func_tracer(struct trace_array *tr, int graph)
341{
342	tracer_enabled = 0;
343
344	unregister_wakeup_function(tr, graph);
345}
 
 
346
347/*
348 * Should this new latency be reported/recorded?
349 */
350static bool report_latency(struct trace_array *tr, u64 delta)
351{
352	if (tracing_thresh) {
353		if (delta < tracing_thresh)
354			return false;
355	} else {
356		if (delta <= tr->max_latency)
357			return false;
358	}
359	return true;
360}
361
362static void
363probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
364{
365	if (task != wakeup_task)
366		return;
367
368	wakeup_current_cpu = cpu;
369}
370
371static void
372tracing_sched_switch_trace(struct trace_array *tr,
373			   struct task_struct *prev,
374			   struct task_struct *next,
375			   unsigned int trace_ctx)
376{
377	struct trace_event_call *call = &event_context_switch;
378	struct trace_buffer *buffer = tr->array_buffer.buffer;
379	struct ring_buffer_event *event;
380	struct ctx_switch_entry *entry;
381
382	event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
383					  sizeof(*entry), trace_ctx);
384	if (!event)
385		return;
386	entry	= ring_buffer_event_data(event);
387	entry->prev_pid			= prev->pid;
388	entry->prev_prio		= prev->prio;
389	entry->prev_state		= task_state_index(prev);
390	entry->next_pid			= next->pid;
391	entry->next_prio		= next->prio;
392	entry->next_state		= task_state_index(next);
393	entry->next_cpu	= task_cpu(next);
394
395	if (!call_filter_check_discard(call, entry, buffer, event))
396		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
397}
398
399static void
400tracing_sched_wakeup_trace(struct trace_array *tr,
401			   struct task_struct *wakee,
402			   struct task_struct *curr,
403			   unsigned int trace_ctx)
404{
405	struct trace_event_call *call = &event_wakeup;
406	struct ring_buffer_event *event;
407	struct ctx_switch_entry *entry;
408	struct trace_buffer *buffer = tr->array_buffer.buffer;
409
410	event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
411					  sizeof(*entry), trace_ctx);
412	if (!event)
413		return;
414	entry	= ring_buffer_event_data(event);
415	entry->prev_pid			= curr->pid;
416	entry->prev_prio		= curr->prio;
417	entry->prev_state		= task_state_index(curr);
418	entry->next_pid			= wakee->pid;
419	entry->next_prio		= wakee->prio;
420	entry->next_state		= task_state_index(wakee);
421	entry->next_cpu			= task_cpu(wakee);
422
423	if (!call_filter_check_discard(call, entry, buffer, event))
424		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
425}
426
427static void notrace
428probe_wakeup_sched_switch(void *ignore, bool preempt,
429			  struct task_struct *prev, struct task_struct *next)
430{
431	struct trace_array_cpu *data;
432	u64 T0, T1, delta;
433	unsigned long flags;
434	long disabled;
435	int cpu;
436	unsigned int trace_ctx;
437
438	tracing_record_cmdline(prev);
439
440	if (unlikely(!tracer_enabled))
441		return;
442
443	/*
444	 * When we start a new trace, we set wakeup_task to NULL
445	 * and then set tracer_enabled = 1. We want to make sure
446	 * that another CPU does not see the tracer_enabled = 1
447	 * and the wakeup_task with an older task, that might
448	 * actually be the same as next.
449	 */
450	smp_rmb();
451
452	if (next != wakeup_task)
453		return;
454
 
 
455	/* disable local data, not wakeup_cpu data */
456	cpu = raw_smp_processor_id();
457	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
458	if (likely(disabled != 1))
459		goto out;
460
461	local_irq_save(flags);
462	trace_ctx = tracing_gen_ctx_flags(flags);
463
464	arch_spin_lock(&wakeup_lock);
465
466	/* We could race with grabbing wakeup_lock */
467	if (unlikely(!tracer_enabled || next != wakeup_task))
468		goto out_unlock;
469
470	/* The task we are waiting for is waking up */
471	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
472
473	__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
474	tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
475	__trace_stack(wakeup_trace, trace_ctx, 0);
476
477	T0 = data->preempt_timestamp;
478	T1 = ftrace_now(cpu);
479	delta = T1-T0;
480
481	if (!report_latency(wakeup_trace, delta))
482		goto out_unlock;
483
484	if (likely(!is_tracing_stopped())) {
485		wakeup_trace->max_latency = delta;
486		update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
487	}
488
489out_unlock:
490	__wakeup_reset(wakeup_trace);
491	arch_spin_unlock(&wakeup_lock);
492	local_irq_restore(flags);
493out:
494	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
495}
496
497static void __wakeup_reset(struct trace_array *tr)
498{
499	wakeup_cpu = -1;
500	wakeup_prio = -1;
501	tracing_dl = false;
502
503	if (wakeup_task)
504		put_task_struct(wakeup_task);
505
506	wakeup_task = NULL;
507}
508
509static void wakeup_reset(struct trace_array *tr)
510{
511	unsigned long flags;
512
513	tracing_reset_online_cpus(&tr->array_buffer);
514
515	local_irq_save(flags);
516	arch_spin_lock(&wakeup_lock);
517	__wakeup_reset(tr);
518	arch_spin_unlock(&wakeup_lock);
519	local_irq_restore(flags);
520}
521
522static void
523probe_wakeup(void *ignore, struct task_struct *p)
524{
525	struct trace_array_cpu *data;
526	int cpu = smp_processor_id();
 
527	long disabled;
528	unsigned int trace_ctx;
529
530	if (likely(!tracer_enabled))
531		return;
532
533	tracing_record_cmdline(p);
534	tracing_record_cmdline(current);
535
536	/*
537	 * Semantic is like this:
538	 *  - wakeup tracer handles all tasks in the system, independently
539	 *    from their scheduling class;
540	 *  - wakeup_rt tracer handles tasks belonging to sched_dl and
541	 *    sched_rt class;
542	 *  - wakeup_dl handles tasks belonging to sched_dl class only.
543	 */
544	if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
545	    (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
546	    (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
547		return;
548
549	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
 
550	if (unlikely(disabled != 1))
551		goto out;
552
553	trace_ctx = tracing_gen_ctx();
554
555	/* interrupts should be off from try_to_wake_up */
556	arch_spin_lock(&wakeup_lock);
557
558	/* check for races. */
559	if (!tracer_enabled || tracing_dl ||
560	    (!dl_task(p) && p->prio >= wakeup_prio))
561		goto out_locked;
562
563	/* reset the trace */
564	__wakeup_reset(wakeup_trace);
565
566	wakeup_cpu = task_cpu(p);
567	wakeup_current_cpu = wakeup_cpu;
568	wakeup_prio = p->prio;
569
570	/*
571	 * Once you start tracing a -deadline task, don't bother tracing
572	 * another task until the first one wakes up.
573	 */
574	if (dl_task(p))
575		tracing_dl = true;
576	else
577		tracing_dl = false;
578
579	wakeup_task = get_task_struct(p);
 
580
581	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
 
 
582	data->preempt_timestamp = ftrace_now(cpu);
583	tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
584	__trace_stack(wakeup_trace, trace_ctx, 0);
585
586	/*
587	 * We must be careful in using CALLER_ADDR2. But since wake_up
588	 * is not called by an assembly function  (where as schedule is)
589	 * it should be safe to use it here.
590	 */
591	__trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
592
593out_locked:
594	arch_spin_unlock(&wakeup_lock);
595out:
596	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
597}
598
599static void start_wakeup_tracer(struct trace_array *tr)
600{
601	int ret;
602
603	ret = register_trace_sched_wakeup(probe_wakeup, NULL);
604	if (ret) {
605		pr_info("wakeup trace: Couldn't activate tracepoint"
606			" probe to kernel_sched_wakeup\n");
607		return;
608	}
609
610	ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
611	if (ret) {
612		pr_info("wakeup trace: Couldn't activate tracepoint"
613			" probe to kernel_sched_wakeup_new\n");
614		goto fail_deprobe;
615	}
616
617	ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
618	if (ret) {
619		pr_info("sched trace: Couldn't activate tracepoint"
620			" probe to kernel_sched_switch\n");
621		goto fail_deprobe_wake_new;
622	}
623
624	ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
625	if (ret) {
626		pr_info("wakeup trace: Couldn't activate tracepoint"
627			" probe to kernel_sched_migrate_task\n");
628		goto fail_deprobe_sched_switch;
629	}
630
631	wakeup_reset(tr);
632
633	/*
634	 * Don't let the tracer_enabled = 1 show up before
635	 * the wakeup_task is reset. This may be overkill since
636	 * wakeup_reset does a spin_unlock after setting the
637	 * wakeup_task to NULL, but I want to be safe.
638	 * This is a slow path anyway.
639	 */
640	smp_wmb();
641
642	if (start_func_tracer(tr, is_graph(tr)))
643		printk(KERN_ERR "failed to start wakeup tracer\n");
644
645	return;
646fail_deprobe_sched_switch:
647	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
648fail_deprobe_wake_new:
649	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
650fail_deprobe:
651	unregister_trace_sched_wakeup(probe_wakeup, NULL);
652}
653
654static void stop_wakeup_tracer(struct trace_array *tr)
655{
656	tracer_enabled = 0;
657	stop_func_tracer(tr, is_graph(tr));
658	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
659	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
660	unregister_trace_sched_wakeup(probe_wakeup, NULL);
661	unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
662}
663
664static bool wakeup_busy;
665
666static int __wakeup_tracer_init(struct trace_array *tr)
667{
668	save_flags = tr->trace_flags;
669
670	/* non overwrite screws up the latency tracers */
671	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
672	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
673
674	tr->max_latency = 0;
675	wakeup_trace = tr;
676	ftrace_init_array_ops(tr, wakeup_tracer_call);
677	start_wakeup_tracer(tr);
678
679	wakeup_busy = true;
680	return 0;
681}
682
683static int wakeup_tracer_init(struct trace_array *tr)
684{
685	if (wakeup_busy)
686		return -EBUSY;
687
688	wakeup_dl = false;
689	wakeup_rt = false;
690	return __wakeup_tracer_init(tr);
691}
692
693static int wakeup_rt_tracer_init(struct trace_array *tr)
694{
695	if (wakeup_busy)
696		return -EBUSY;
697
698	wakeup_dl = false;
699	wakeup_rt = true;
700	return __wakeup_tracer_init(tr);
701}
702
703static int wakeup_dl_tracer_init(struct trace_array *tr)
704{
705	if (wakeup_busy)
706		return -EBUSY;
707
708	wakeup_dl = true;
709	wakeup_rt = false;
710	return __wakeup_tracer_init(tr);
711}
712
713static void wakeup_tracer_reset(struct trace_array *tr)
714{
715	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
716	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
717
718	stop_wakeup_tracer(tr);
719	/* make sure we put back any tasks we are tracing */
720	wakeup_reset(tr);
721
722	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
723	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
724	ftrace_reset_array_ops(tr);
725	wakeup_busy = false;
726}
727
728static void wakeup_tracer_start(struct trace_array *tr)
729{
730	wakeup_reset(tr);
731	tracer_enabled = 1;
732}
733
734static void wakeup_tracer_stop(struct trace_array *tr)
735{
736	tracer_enabled = 0;
737}
738
739static struct tracer wakeup_tracer __read_mostly =
740{
741	.name		= "wakeup",
742	.init		= wakeup_tracer_init,
743	.reset		= wakeup_tracer_reset,
744	.start		= wakeup_tracer_start,
745	.stop		= wakeup_tracer_stop,
746	.print_max	= true,
747	.print_header	= wakeup_print_header,
748	.print_line	= wakeup_print_line,
749	.flag_changed	= wakeup_flag_changed,
750#ifdef CONFIG_FTRACE_SELFTEST
751	.selftest    = trace_selftest_startup_wakeup,
752#endif
753	.open		= wakeup_trace_open,
754	.close		= wakeup_trace_close,
755	.allow_instances = true,
756	.use_max_tr	= true,
757};
758
759static struct tracer wakeup_rt_tracer __read_mostly =
760{
761	.name		= "wakeup_rt",
762	.init		= wakeup_rt_tracer_init,
763	.reset		= wakeup_tracer_reset,
764	.start		= wakeup_tracer_start,
765	.stop		= wakeup_tracer_stop,
766	.print_max	= true,
767	.print_header	= wakeup_print_header,
768	.print_line	= wakeup_print_line,
769	.flag_changed	= wakeup_flag_changed,
770#ifdef CONFIG_FTRACE_SELFTEST
771	.selftest    = trace_selftest_startup_wakeup,
772#endif
773	.open		= wakeup_trace_open,
774	.close		= wakeup_trace_close,
775	.allow_instances = true,
776	.use_max_tr	= true,
777};
778
779static struct tracer wakeup_dl_tracer __read_mostly =
780{
781	.name		= "wakeup_dl",
782	.init		= wakeup_dl_tracer_init,
783	.reset		= wakeup_tracer_reset,
784	.start		= wakeup_tracer_start,
785	.stop		= wakeup_tracer_stop,
786	.print_max	= true,
787	.print_header	= wakeup_print_header,
788	.print_line	= wakeup_print_line,
789	.flag_changed	= wakeup_flag_changed,
790#ifdef CONFIG_FTRACE_SELFTEST
791	.selftest    = trace_selftest_startup_wakeup,
792#endif
793	.open		= wakeup_trace_open,
794	.close		= wakeup_trace_close,
795	.allow_instances = true,
796	.use_max_tr	= true,
797};
798
799__init static int init_wakeup_tracer(void)
800{
801	int ret;
802
803	ret = register_tracer(&wakeup_tracer);
804	if (ret)
805		return ret;
806
807	ret = register_tracer(&wakeup_rt_tracer);
808	if (ret)
809		return ret;
810
811	ret = register_tracer(&wakeup_dl_tracer);
812	if (ret)
813		return ret;
814
815	return 0;
816}
817core_initcall(init_wakeup_tracer);