Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * trace task wakeup timings
  3 *
  4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6 *
  7 * Based on code from the latency_tracer, that is:
  8 *
  9 *  Copyright (C) 2004-2006 Ingo Molnar
 10 *  Copyright (C) 2004 William Lee Irwin III
 11 */
 12#include <linux/module.h>
 13#include <linux/fs.h>
 14#include <linux/debugfs.h>
 15#include <linux/kallsyms.h>
 16#include <linux/uaccess.h>
 17#include <linux/ftrace.h>
 
 
 18#include <trace/events/sched.h>
 19
 20#include "trace.h"
 21
 22static struct trace_array	*wakeup_trace;
 23static int __read_mostly	tracer_enabled;
 24
 25static struct task_struct	*wakeup_task;
 26static int			wakeup_cpu;
 27static int			wakeup_current_cpu;
 28static unsigned			wakeup_prio = -1;
 29static int			wakeup_rt;
 
 
 30
 31static arch_spinlock_t wakeup_lock =
 32	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 33
 34static void wakeup_reset(struct trace_array *tr);
 35static void __wakeup_reset(struct trace_array *tr);
 36static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
 37static void wakeup_graph_return(struct ftrace_graph_ret *trace);
 38
 39static int save_lat_flag;
 40
 41#define TRACE_DISPLAY_GRAPH     1
 42
 43static struct tracer_opt trace_opts[] = {
 44#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 45	/* display latency trace as call graph */
 46	{ TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
 
 47#endif
 48	{ } /* Empty entry */
 49};
 50
 51static struct tracer_flags tracer_flags = {
 52	.val  = 0,
 53	.opts = trace_opts,
 54};
 55
 56#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
 57
 58#ifdef CONFIG_FUNCTION_TRACER
 59
 
 
 60/*
 61 * Prologue for the wakeup function tracers.
 62 *
 63 * Returns 1 if it is OK to continue, and preemption
 64 *            is disabled and data->disabled is incremented.
 65 *         0 if the trace is to be ignored, and preemption
 66 *            is not disabled and data->disabled is
 67 *            kept the same.
 68 *
 69 * Note, this function is also used outside this ifdef but
 70 *  inside the #ifdef of the function graph tracer below.
 71 *  This is OK, since the function graph tracer is
 72 *  dependent on the function tracer.
 73 */
 74static int
 75func_prolog_preempt_disable(struct trace_array *tr,
 76			    struct trace_array_cpu **data,
 77			    int *pc)
 78{
 79	long disabled;
 80	int cpu;
 81
 82	if (likely(!wakeup_task))
 83		return 0;
 84
 85	*pc = preempt_count();
 86	preempt_disable_notrace();
 87
 88	cpu = raw_smp_processor_id();
 89	if (cpu != wakeup_current_cpu)
 90		goto out_enable;
 91
 92	*data = tr->data[cpu];
 93	disabled = atomic_inc_return(&(*data)->disabled);
 94	if (unlikely(disabled != 1))
 95		goto out;
 96
 97	return 1;
 98
 99out:
100	atomic_dec(&(*data)->disabled);
101
102out_enable:
103	preempt_enable_notrace();
104	return 0;
105}
106
107/*
108 * wakeup uses its own tracer function to keep the overhead down:
109 */
110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
112{
113	struct trace_array *tr = wakeup_trace;
114	struct trace_array_cpu *data;
115	unsigned long flags;
116	int pc;
117
118	if (!func_prolog_preempt_disable(tr, &data, &pc))
119		return;
120
121	local_irq_save(flags);
122	trace_function(tr, ip, parent_ip, flags, pc);
123	local_irq_restore(flags);
124
125	atomic_dec(&data->disabled);
126	preempt_enable_notrace();
127}
128
129static struct ftrace_ops trace_ops __read_mostly =
130{
131	.func = wakeup_tracer_call,
132	.flags = FTRACE_OPS_FL_GLOBAL,
133};
134#endif /* CONFIG_FUNCTION_TRACER */
135
136static int start_func_tracer(int graph)
137{
138	int ret;
139
140	if (!graph)
141		ret = register_ftrace_function(&trace_ops);
142	else
143		ret = register_ftrace_graph(&wakeup_graph_return,
144					    &wakeup_graph_entry);
145
146	if (!ret && tracing_is_enabled())
147		tracer_enabled = 1;
148	else
149		tracer_enabled = 0;
150
151	return ret;
152}
153
154static void stop_func_tracer(int graph)
155{
156	tracer_enabled = 0;
157
158	if (!graph)
159		unregister_ftrace_function(&trace_ops);
160	else
161		unregister_ftrace_graph();
162}
163
164#ifdef CONFIG_FUNCTION_GRAPH_TRACER
165static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
166{
167
168	if (!(bit & TRACE_DISPLAY_GRAPH))
169		return -EINVAL;
170
171	if (!(is_graph() ^ set))
172		return 0;
173
174	stop_func_tracer(!set);
175
176	wakeup_reset(wakeup_trace);
177	tracing_max_latency = 0;
178
179	return start_func_tracer(set);
180}
181
182static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
183{
184	struct trace_array *tr = wakeup_trace;
185	struct trace_array_cpu *data;
186	unsigned long flags;
187	int pc, ret = 0;
188
 
 
 
 
 
 
 
 
 
 
 
 
189	if (!func_prolog_preempt_disable(tr, &data, &pc))
190		return 0;
191
192	local_save_flags(flags);
193	ret = __trace_graph_entry(tr, trace, flags, pc);
194	atomic_dec(&data->disabled);
195	preempt_enable_notrace();
196
197	return ret;
198}
199
200static void wakeup_graph_return(struct ftrace_graph_ret *trace)
201{
202	struct trace_array *tr = wakeup_trace;
203	struct trace_array_cpu *data;
204	unsigned long flags;
205	int pc;
206
 
 
207	if (!func_prolog_preempt_disable(tr, &data, &pc))
208		return;
209
210	local_save_flags(flags);
211	__trace_graph_return(tr, trace, flags, pc);
212	atomic_dec(&data->disabled);
213
214	preempt_enable_notrace();
215	return;
216}
217
 
 
 
 
 
218static void wakeup_trace_open(struct trace_iterator *iter)
219{
220	if (is_graph())
221		graph_trace_open(iter);
222}
223
224static void wakeup_trace_close(struct trace_iterator *iter)
225{
226	if (iter->private)
227		graph_trace_close(iter);
228}
229
230#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
231			    TRACE_GRAPH_PRINT_ABS_TIME | \
232			    TRACE_GRAPH_PRINT_DURATION)
 
 
 
233
234static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
235{
236	/*
237	 * In graph mode call the graph tracer output function,
238	 * otherwise go with the TRACE_FN event handler
239	 */
240	if (is_graph())
241		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
242
243	return TRACE_TYPE_UNHANDLED;
244}
245
246static void wakeup_print_header(struct seq_file *s)
247{
248	if (is_graph())
249		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
250	else
251		trace_default_header(s);
252}
 
253
 
 
 
254static void
255__trace_function(struct trace_array *tr,
256		 unsigned long ip, unsigned long parent_ip,
257		 unsigned long flags, int pc)
258{
259	if (is_graph())
260		trace_graph_function(tr, ip, parent_ip, flags, pc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261	else
262		trace_function(tr, ip, parent_ip, flags, pc);
 
 
 
 
 
263}
264#else
265#define __trace_function trace_function
266
267static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
268{
269	return -EINVAL;
 
 
 
 
 
 
 
 
270}
271
272static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273{
274	return -1;
275}
 
276
 
277static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
278{
279	return TRACE_TYPE_UNHANDLED;
280}
281
282static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
283static void wakeup_trace_open(struct trace_iterator *iter) { }
284static void wakeup_trace_close(struct trace_iterator *iter) { }
285
286#ifdef CONFIG_FUNCTION_TRACER
287static void wakeup_print_header(struct seq_file *s)
288{
289	trace_default_header(s);
290}
291#else
292static void wakeup_print_header(struct seq_file *s)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293{
294	trace_latency_header(s);
 
 
295}
296#endif /* CONFIG_FUNCTION_TRACER */
297#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
298
299/*
300 * Should this new latency be reported/recorded?
301 */
302static int report_latency(cycle_t delta)
303{
304	if (tracing_thresh) {
305		if (delta < tracing_thresh)
306			return 0;
307	} else {
308		if (delta <= tracing_max_latency)
309			return 0;
310	}
311	return 1;
312}
313
314static void
315probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
316{
317	if (task != wakeup_task)
318		return;
319
320	wakeup_current_cpu = cpu;
321}
322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323static void notrace
324probe_wakeup_sched_switch(void *ignore,
325			  struct task_struct *prev, struct task_struct *next)
326{
327	struct trace_array_cpu *data;
328	cycle_t T0, T1, delta;
329	unsigned long flags;
330	long disabled;
331	int cpu;
332	int pc;
333
334	tracing_record_cmdline(prev);
335
336	if (unlikely(!tracer_enabled))
337		return;
338
339	/*
340	 * When we start a new trace, we set wakeup_task to NULL
341	 * and then set tracer_enabled = 1. We want to make sure
342	 * that another CPU does not see the tracer_enabled = 1
343	 * and the wakeup_task with an older task, that might
344	 * actually be the same as next.
345	 */
346	smp_rmb();
347
348	if (next != wakeup_task)
349		return;
350
351	pc = preempt_count();
352
353	/* disable local data, not wakeup_cpu data */
354	cpu = raw_smp_processor_id();
355	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
356	if (likely(disabled != 1))
357		goto out;
358
359	local_irq_save(flags);
360	arch_spin_lock(&wakeup_lock);
361
362	/* We could race with grabbing wakeup_lock */
363	if (unlikely(!tracer_enabled || next != wakeup_task))
364		goto out_unlock;
365
366	/* The task we are waiting for is waking up */
367	data = wakeup_trace->data[wakeup_cpu];
368
369	__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
370	tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
 
371
372	T0 = data->preempt_timestamp;
373	T1 = ftrace_now(cpu);
374	delta = T1-T0;
375
376	if (!report_latency(delta))
377		goto out_unlock;
378
379	if (likely(!is_tracing_stopped())) {
380		tracing_max_latency = delta;
381		update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
382	}
383
384out_unlock:
385	__wakeup_reset(wakeup_trace);
386	arch_spin_unlock(&wakeup_lock);
387	local_irq_restore(flags);
388out:
389	atomic_dec(&wakeup_trace->data[cpu]->disabled);
390}
391
392static void __wakeup_reset(struct trace_array *tr)
393{
394	wakeup_cpu = -1;
395	wakeup_prio = -1;
 
396
397	if (wakeup_task)
398		put_task_struct(wakeup_task);
399
400	wakeup_task = NULL;
401}
402
403static void wakeup_reset(struct trace_array *tr)
404{
405	unsigned long flags;
406
407	tracing_reset_online_cpus(tr);
408
409	local_irq_save(flags);
410	arch_spin_lock(&wakeup_lock);
411	__wakeup_reset(tr);
412	arch_spin_unlock(&wakeup_lock);
413	local_irq_restore(flags);
414}
415
416static void
417probe_wakeup(void *ignore, struct task_struct *p, int success)
418{
419	struct trace_array_cpu *data;
420	int cpu = smp_processor_id();
421	unsigned long flags;
422	long disabled;
423	int pc;
424
425	if (likely(!tracer_enabled))
426		return;
427
428	tracing_record_cmdline(p);
429	tracing_record_cmdline(current);
430
431	if ((wakeup_rt && !rt_task(p)) ||
432			p->prio >= wakeup_prio ||
433			p->prio >= current->prio)
 
 
 
 
 
 
 
 
434		return;
435
436	pc = preempt_count();
437	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
438	if (unlikely(disabled != 1))
439		goto out;
440
441	/* interrupts should be off from try_to_wake_up */
442	arch_spin_lock(&wakeup_lock);
443
444	/* check for races. */
445	if (!tracer_enabled || p->prio >= wakeup_prio)
 
446		goto out_locked;
447
448	/* reset the trace */
449	__wakeup_reset(wakeup_trace);
450
451	wakeup_cpu = task_cpu(p);
452	wakeup_current_cpu = wakeup_cpu;
453	wakeup_prio = p->prio;
454
455	wakeup_task = p;
456	get_task_struct(wakeup_task);
 
 
 
 
 
 
 
 
457
458	local_save_flags(flags);
459
460	data = wakeup_trace->data[wakeup_cpu];
461	data->preempt_timestamp = ftrace_now(cpu);
462	tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
 
463
464	/*
465	 * We must be careful in using CALLER_ADDR2. But since wake_up
466	 * is not called by an assembly function  (where as schedule is)
467	 * it should be safe to use it here.
468	 */
469	__trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
470
471out_locked:
472	arch_spin_unlock(&wakeup_lock);
473out:
474	atomic_dec(&wakeup_trace->data[cpu]->disabled);
475}
476
477static void start_wakeup_tracer(struct trace_array *tr)
478{
479	int ret;
480
481	ret = register_trace_sched_wakeup(probe_wakeup, NULL);
482	if (ret) {
483		pr_info("wakeup trace: Couldn't activate tracepoint"
484			" probe to kernel_sched_wakeup\n");
485		return;
486	}
487
488	ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
489	if (ret) {
490		pr_info("wakeup trace: Couldn't activate tracepoint"
491			" probe to kernel_sched_wakeup_new\n");
492		goto fail_deprobe;
493	}
494
495	ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
496	if (ret) {
497		pr_info("sched trace: Couldn't activate tracepoint"
498			" probe to kernel_sched_switch\n");
499		goto fail_deprobe_wake_new;
500	}
501
502	ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
503	if (ret) {
504		pr_info("wakeup trace: Couldn't activate tracepoint"
505			" probe to kernel_sched_migrate_task\n");
506		return;
507	}
508
509	wakeup_reset(tr);
510
511	/*
512	 * Don't let the tracer_enabled = 1 show up before
513	 * the wakeup_task is reset. This may be overkill since
514	 * wakeup_reset does a spin_unlock after setting the
515	 * wakeup_task to NULL, but I want to be safe.
516	 * This is a slow path anyway.
517	 */
518	smp_wmb();
519
520	if (start_func_tracer(is_graph()))
521		printk(KERN_ERR "failed to start wakeup tracer\n");
522
523	return;
 
 
524fail_deprobe_wake_new:
525	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
526fail_deprobe:
527	unregister_trace_sched_wakeup(probe_wakeup, NULL);
528}
529
530static void stop_wakeup_tracer(struct trace_array *tr)
531{
532	tracer_enabled = 0;
533	stop_func_tracer(is_graph());
534	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
535	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
536	unregister_trace_sched_wakeup(probe_wakeup, NULL);
537	unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
538}
539
 
 
540static int __wakeup_tracer_init(struct trace_array *tr)
541{
542	save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
543	trace_flags |= TRACE_ITER_LATENCY_FMT;
544
545	tracing_max_latency = 0;
 
 
 
 
546	wakeup_trace = tr;
 
547	start_wakeup_tracer(tr);
 
 
548	return 0;
549}
550
551static int wakeup_tracer_init(struct trace_array *tr)
552{
 
 
 
 
553	wakeup_rt = 0;
554	return __wakeup_tracer_init(tr);
555}
556
557static int wakeup_rt_tracer_init(struct trace_array *tr)
558{
 
 
 
 
559	wakeup_rt = 1;
560	return __wakeup_tracer_init(tr);
561}
562
 
 
 
 
 
 
 
 
 
 
563static void wakeup_tracer_reset(struct trace_array *tr)
564{
 
 
 
565	stop_wakeup_tracer(tr);
566	/* make sure we put back any tasks we are tracing */
567	wakeup_reset(tr);
568
569	if (!save_lat_flag)
570		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
 
 
571}
572
573static void wakeup_tracer_start(struct trace_array *tr)
574{
575	wakeup_reset(tr);
576	tracer_enabled = 1;
577}
578
579static void wakeup_tracer_stop(struct trace_array *tr)
580{
581	tracer_enabled = 0;
582}
583
584static struct tracer wakeup_tracer __read_mostly =
585{
586	.name		= "wakeup",
587	.init		= wakeup_tracer_init,
588	.reset		= wakeup_tracer_reset,
589	.start		= wakeup_tracer_start,
590	.stop		= wakeup_tracer_stop,
591	.print_max	= 1,
592	.print_header	= wakeup_print_header,
593	.print_line	= wakeup_print_line,
594	.flags		= &tracer_flags,
595	.set_flag	= wakeup_set_flag,
596#ifdef CONFIG_FTRACE_SELFTEST
597	.selftest    = trace_selftest_startup_wakeup,
598#endif
599	.open		= wakeup_trace_open,
600	.close		= wakeup_trace_close,
601	.use_max_tr	= 1,
 
602};
603
604static struct tracer wakeup_rt_tracer __read_mostly =
605{
606	.name		= "wakeup_rt",
607	.init		= wakeup_rt_tracer_init,
608	.reset		= wakeup_tracer_reset,
609	.start		= wakeup_tracer_start,
610	.stop		= wakeup_tracer_stop,
611	.wait_pipe	= poll_wait_pipe,
612	.print_max	= 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613	.print_header	= wakeup_print_header,
614	.print_line	= wakeup_print_line,
615	.flags		= &tracer_flags,
616	.set_flag	= wakeup_set_flag,
617#ifdef CONFIG_FTRACE_SELFTEST
618	.selftest    = trace_selftest_startup_wakeup,
619#endif
620	.open		= wakeup_trace_open,
621	.close		= wakeup_trace_close,
622	.use_max_tr	= 1,
 
623};
624
625__init static int init_wakeup_tracer(void)
626{
627	int ret;
628
629	ret = register_tracer(&wakeup_tracer);
630	if (ret)
631		return ret;
632
633	ret = register_tracer(&wakeup_rt_tracer);
634	if (ret)
635		return ret;
636
 
 
 
 
637	return 0;
638}
639device_initcall(init_wakeup_tracer);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * trace task wakeup timings
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * Based on code from the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/module.h>
 
 
 14#include <linux/kallsyms.h>
 15#include <linux/uaccess.h>
 16#include <linux/ftrace.h>
 17#include <linux/sched/rt.h>
 18#include <linux/sched/deadline.h>
 19#include <trace/events/sched.h>
 
 20#include "trace.h"
 21
 22static struct trace_array	*wakeup_trace;
 23static int __read_mostly	tracer_enabled;
 24
 25static struct task_struct	*wakeup_task;
 26static int			wakeup_cpu;
 27static int			wakeup_current_cpu;
 28static unsigned			wakeup_prio = -1;
 29static int			wakeup_rt;
 30static int			wakeup_dl;
 31static int			tracing_dl = 0;
 32
 33static arch_spinlock_t wakeup_lock =
 34	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 35
 36static void wakeup_reset(struct trace_array *tr);
 37static void __wakeup_reset(struct trace_array *tr);
 38static int start_func_tracer(struct trace_array *tr, int graph);
 39static void stop_func_tracer(struct trace_array *tr, int graph);
 
 
 40
 41static int save_flags;
 42
 
 43#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 44# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
 45#else
 46# define is_graph(tr) false
 47#endif
 
 
 
 
 
 
 
 
 
 48
 49#ifdef CONFIG_FUNCTION_TRACER
 50
 51static bool function_enabled;
 52
 53/*
 54 * Prologue for the wakeup function tracers.
 55 *
 56 * Returns 1 if it is OK to continue, and preemption
 57 *            is disabled and data->disabled is incremented.
 58 *         0 if the trace is to be ignored, and preemption
 59 *            is not disabled and data->disabled is
 60 *            kept the same.
 61 *
 62 * Note, this function is also used outside this ifdef but
 63 *  inside the #ifdef of the function graph tracer below.
 64 *  This is OK, since the function graph tracer is
 65 *  dependent on the function tracer.
 66 */
 67static int
 68func_prolog_preempt_disable(struct trace_array *tr,
 69			    struct trace_array_cpu **data,
 70			    int *pc)
 71{
 72	long disabled;
 73	int cpu;
 74
 75	if (likely(!wakeup_task))
 76		return 0;
 77
 78	*pc = preempt_count();
 79	preempt_disable_notrace();
 80
 81	cpu = raw_smp_processor_id();
 82	if (cpu != wakeup_current_cpu)
 83		goto out_enable;
 84
 85	*data = per_cpu_ptr(tr->array_buffer.data, cpu);
 86	disabled = atomic_inc_return(&(*data)->disabled);
 87	if (unlikely(disabled != 1))
 88		goto out;
 89
 90	return 1;
 91
 92out:
 93	atomic_dec(&(*data)->disabled);
 94
 95out_enable:
 96	preempt_enable_notrace();
 97	return 0;
 98}
 99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
 
101
102static int wakeup_display_graph(struct trace_array *tr, int set)
103{
104	if (!(is_graph(tr) ^ set))
 
105		return 0;
106
107	stop_func_tracer(tr, !set);
108
109	wakeup_reset(wakeup_trace);
110	tr->max_latency = 0;
111
112	return start_func_tracer(tr, set);
113}
114
115static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
116{
117	struct trace_array *tr = wakeup_trace;
118	struct trace_array_cpu *data;
119	unsigned long flags;
120	int pc, ret = 0;
121
122	if (ftrace_graph_ignore_func(trace))
123		return 0;
124	/*
125	 * Do not trace a function if it's filtered by set_graph_notrace.
126	 * Make the index of ret stack negative to indicate that it should
127	 * ignore further functions.  But it needs its own ret stack entry
128	 * to recover the original index in order to continue tracing after
129	 * returning from the function.
130	 */
131	if (ftrace_graph_notrace_addr(trace->func))
132		return 1;
133
134	if (!func_prolog_preempt_disable(tr, &data, &pc))
135		return 0;
136
137	local_save_flags(flags);
138	ret = __trace_graph_entry(tr, trace, flags, pc);
139	atomic_dec(&data->disabled);
140	preempt_enable_notrace();
141
142	return ret;
143}
144
145static void wakeup_graph_return(struct ftrace_graph_ret *trace)
146{
147	struct trace_array *tr = wakeup_trace;
148	struct trace_array_cpu *data;
149	unsigned long flags;
150	int pc;
151
152	ftrace_graph_addr_finish(trace);
153
154	if (!func_prolog_preempt_disable(tr, &data, &pc))
155		return;
156
157	local_save_flags(flags);
158	__trace_graph_return(tr, trace, flags, pc);
159	atomic_dec(&data->disabled);
160
161	preempt_enable_notrace();
162	return;
163}
164
165static struct fgraph_ops fgraph_wakeup_ops = {
166	.entryfunc = &wakeup_graph_entry,
167	.retfunc = &wakeup_graph_return,
168};
169
170static void wakeup_trace_open(struct trace_iterator *iter)
171{
172	if (is_graph(iter->tr))
173		graph_trace_open(iter);
174}
175
176static void wakeup_trace_close(struct trace_iterator *iter)
177{
178	if (iter->private)
179		graph_trace_close(iter);
180}
181
182#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
183			    TRACE_GRAPH_PRINT_CPU |  \
184			    TRACE_GRAPH_PRINT_REL_TIME | \
185			    TRACE_GRAPH_PRINT_DURATION | \
186			    TRACE_GRAPH_PRINT_OVERHEAD | \
187			    TRACE_GRAPH_PRINT_IRQS)
188
189static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
190{
191	/*
192	 * In graph mode call the graph tracer output function,
193	 * otherwise go with the TRACE_FN event handler
194	 */
195	if (is_graph(iter->tr))
196		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
197
198	return TRACE_TYPE_UNHANDLED;
199}
200
201static void wakeup_print_header(struct seq_file *s)
202{
203	if (is_graph(wakeup_trace))
204		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
205	else
206		trace_default_header(s);
207}
208#endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
209
210/*
211 * wakeup uses its own tracer function to keep the overhead down:
212 */
213static void
214wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
215		   struct ftrace_ops *op, struct pt_regs *pt_regs)
 
216{
217	struct trace_array *tr = wakeup_trace;
218	struct trace_array_cpu *data;
219	unsigned long flags;
220	int pc;
221
222	if (!func_prolog_preempt_disable(tr, &data, &pc))
223		return;
224
225	local_irq_save(flags);
226	trace_function(tr, ip, parent_ip, flags, pc);
227	local_irq_restore(flags);
228
229	atomic_dec(&data->disabled);
230	preempt_enable_notrace();
231}
232
233static int register_wakeup_function(struct trace_array *tr, int graph, int set)
234{
235	int ret;
236
237	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
238	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
239		return 0;
240
241	if (graph)
242		ret = register_ftrace_graph(&fgraph_wakeup_ops);
243	else
244		ret = register_ftrace_function(tr->ops);
245
246	if (!ret)
247		function_enabled = true;
248
249	return ret;
250}
 
 
251
252static void unregister_wakeup_function(struct trace_array *tr, int graph)
253{
254	if (!function_enabled)
255		return;
256
257	if (graph)
258		unregister_ftrace_graph(&fgraph_wakeup_ops);
259	else
260		unregister_ftrace_function(tr->ops);
261
262	function_enabled = false;
263}
264
265static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
266{
267	if (!(mask & TRACE_ITER_FUNCTION))
268		return 0;
269
270	if (set)
271		register_wakeup_function(tr, is_graph(tr), 1);
272	else
273		unregister_wakeup_function(tr, is_graph(tr));
274	return 1;
275}
276#else /* CONFIG_FUNCTION_TRACER */
277static int register_wakeup_function(struct trace_array *tr, int graph, int set)
278{
279	return 0;
280}
281static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
282static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
283{
284	return 0;
285}
286#endif /* else CONFIG_FUNCTION_TRACER */
287
288#ifndef CONFIG_FUNCTION_GRAPH_TRACER
289static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
290{
291	return TRACE_TYPE_UNHANDLED;
292}
293
 
294static void wakeup_trace_open(struct trace_iterator *iter) { }
295static void wakeup_trace_close(struct trace_iterator *iter) { }
296
 
297static void wakeup_print_header(struct seq_file *s)
298{
299	trace_default_header(s);
300}
301#endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
302
303static void
304__trace_function(struct trace_array *tr,
305		 unsigned long ip, unsigned long parent_ip,
306		 unsigned long flags, int pc)
307{
308	if (is_graph(tr))
309		trace_graph_function(tr, ip, parent_ip, flags, pc);
310	else
311		trace_function(tr, ip, parent_ip, flags, pc);
312}
313
314static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
315{
316	struct tracer *tracer = tr->current_trace;
317
318	if (wakeup_function_set(tr, mask, set))
319		return 0;
320
321#ifdef CONFIG_FUNCTION_GRAPH_TRACER
322	if (mask & TRACE_ITER_DISPLAY_GRAPH)
323		return wakeup_display_graph(tr, set);
324#endif
325
326	return trace_keep_overwrite(tracer, mask, set);
327}
328
329static int start_func_tracer(struct trace_array *tr, int graph)
330{
331	int ret;
332
333	ret = register_wakeup_function(tr, graph, 0);
334
335	if (!ret && tracing_is_enabled())
336		tracer_enabled = 1;
337	else
338		tracer_enabled = 0;
339
340	return ret;
341}
342
343static void stop_func_tracer(struct trace_array *tr, int graph)
344{
345	tracer_enabled = 0;
346
347	unregister_wakeup_function(tr, graph);
348}
 
 
349
350/*
351 * Should this new latency be reported/recorded?
352 */
353static bool report_latency(struct trace_array *tr, u64 delta)
354{
355	if (tracing_thresh) {
356		if (delta < tracing_thresh)
357			return false;
358	} else {
359		if (delta <= tr->max_latency)
360			return false;
361	}
362	return true;
363}
364
365static void
366probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
367{
368	if (task != wakeup_task)
369		return;
370
371	wakeup_current_cpu = cpu;
372}
373
374static void
375tracing_sched_switch_trace(struct trace_array *tr,
376			   struct task_struct *prev,
377			   struct task_struct *next,
378			   unsigned long flags, int pc)
379{
380	struct trace_event_call *call = &event_context_switch;
381	struct trace_buffer *buffer = tr->array_buffer.buffer;
382	struct ring_buffer_event *event;
383	struct ctx_switch_entry *entry;
384
385	event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
386					  sizeof(*entry), flags, pc);
387	if (!event)
388		return;
389	entry	= ring_buffer_event_data(event);
390	entry->prev_pid			= prev->pid;
391	entry->prev_prio		= prev->prio;
392	entry->prev_state		= task_state_index(prev);
393	entry->next_pid			= next->pid;
394	entry->next_prio		= next->prio;
395	entry->next_state		= task_state_index(next);
396	entry->next_cpu	= task_cpu(next);
397
398	if (!call_filter_check_discard(call, entry, buffer, event))
399		trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
400}
401
402static void
403tracing_sched_wakeup_trace(struct trace_array *tr,
404			   struct task_struct *wakee,
405			   struct task_struct *curr,
406			   unsigned long flags, int pc)
407{
408	struct trace_event_call *call = &event_wakeup;
409	struct ring_buffer_event *event;
410	struct ctx_switch_entry *entry;
411	struct trace_buffer *buffer = tr->array_buffer.buffer;
412
413	event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
414					  sizeof(*entry), flags, pc);
415	if (!event)
416		return;
417	entry	= ring_buffer_event_data(event);
418	entry->prev_pid			= curr->pid;
419	entry->prev_prio		= curr->prio;
420	entry->prev_state		= task_state_index(curr);
421	entry->next_pid			= wakee->pid;
422	entry->next_prio		= wakee->prio;
423	entry->next_state		= task_state_index(wakee);
424	entry->next_cpu			= task_cpu(wakee);
425
426	if (!call_filter_check_discard(call, entry, buffer, event))
427		trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
428}
429
430static void notrace
431probe_wakeup_sched_switch(void *ignore, bool preempt,
432			  struct task_struct *prev, struct task_struct *next)
433{
434	struct trace_array_cpu *data;
435	u64 T0, T1, delta;
436	unsigned long flags;
437	long disabled;
438	int cpu;
439	int pc;
440
441	tracing_record_cmdline(prev);
442
443	if (unlikely(!tracer_enabled))
444		return;
445
446	/*
447	 * When we start a new trace, we set wakeup_task to NULL
448	 * and then set tracer_enabled = 1. We want to make sure
449	 * that another CPU does not see the tracer_enabled = 1
450	 * and the wakeup_task with an older task, that might
451	 * actually be the same as next.
452	 */
453	smp_rmb();
454
455	if (next != wakeup_task)
456		return;
457
458	pc = preempt_count();
459
460	/* disable local data, not wakeup_cpu data */
461	cpu = raw_smp_processor_id();
462	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
463	if (likely(disabled != 1))
464		goto out;
465
466	local_irq_save(flags);
467	arch_spin_lock(&wakeup_lock);
468
469	/* We could race with grabbing wakeup_lock */
470	if (unlikely(!tracer_enabled || next != wakeup_task))
471		goto out_unlock;
472
473	/* The task we are waiting for is waking up */
474	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
475
476	__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
477	tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
478	__trace_stack(wakeup_trace, flags, 0, pc);
479
480	T0 = data->preempt_timestamp;
481	T1 = ftrace_now(cpu);
482	delta = T1-T0;
483
484	if (!report_latency(wakeup_trace, delta))
485		goto out_unlock;
486
487	if (likely(!is_tracing_stopped())) {
488		wakeup_trace->max_latency = delta;
489		update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
490	}
491
492out_unlock:
493	__wakeup_reset(wakeup_trace);
494	arch_spin_unlock(&wakeup_lock);
495	local_irq_restore(flags);
496out:
497	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
498}
499
500static void __wakeup_reset(struct trace_array *tr)
501{
502	wakeup_cpu = -1;
503	wakeup_prio = -1;
504	tracing_dl = 0;
505
506	if (wakeup_task)
507		put_task_struct(wakeup_task);
508
509	wakeup_task = NULL;
510}
511
512static void wakeup_reset(struct trace_array *tr)
513{
514	unsigned long flags;
515
516	tracing_reset_online_cpus(&tr->array_buffer);
517
518	local_irq_save(flags);
519	arch_spin_lock(&wakeup_lock);
520	__wakeup_reset(tr);
521	arch_spin_unlock(&wakeup_lock);
522	local_irq_restore(flags);
523}
524
525static void
526probe_wakeup(void *ignore, struct task_struct *p)
527{
528	struct trace_array_cpu *data;
529	int cpu = smp_processor_id();
530	unsigned long flags;
531	long disabled;
532	int pc;
533
534	if (likely(!tracer_enabled))
535		return;
536
537	tracing_record_cmdline(p);
538	tracing_record_cmdline(current);
539
540	/*
541	 * Semantic is like this:
542	 *  - wakeup tracer handles all tasks in the system, independently
543	 *    from their scheduling class;
544	 *  - wakeup_rt tracer handles tasks belonging to sched_dl and
545	 *    sched_rt class;
546	 *  - wakeup_dl handles tasks belonging to sched_dl class only.
547	 */
548	if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
549	    (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
550	    (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
551		return;
552
553	pc = preempt_count();
554	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
555	if (unlikely(disabled != 1))
556		goto out;
557
558	/* interrupts should be off from try_to_wake_up */
559	arch_spin_lock(&wakeup_lock);
560
561	/* check for races. */
562	if (!tracer_enabled || tracing_dl ||
563	    (!dl_task(p) && p->prio >= wakeup_prio))
564		goto out_locked;
565
566	/* reset the trace */
567	__wakeup_reset(wakeup_trace);
568
569	wakeup_cpu = task_cpu(p);
570	wakeup_current_cpu = wakeup_cpu;
571	wakeup_prio = p->prio;
572
573	/*
574	 * Once you start tracing a -deadline task, don't bother tracing
575	 * another task until the first one wakes up.
576	 */
577	if (dl_task(p))
578		tracing_dl = 1;
579	else
580		tracing_dl = 0;
581
582	wakeup_task = get_task_struct(p);
583
584	local_save_flags(flags);
585
586	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
587	data->preempt_timestamp = ftrace_now(cpu);
588	tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
589	__trace_stack(wakeup_trace, flags, 0, pc);
590
591	/*
592	 * We must be careful in using CALLER_ADDR2. But since wake_up
593	 * is not called by an assembly function  (where as schedule is)
594	 * it should be safe to use it here.
595	 */
596	__trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
597
598out_locked:
599	arch_spin_unlock(&wakeup_lock);
600out:
601	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
602}
603
604static void start_wakeup_tracer(struct trace_array *tr)
605{
606	int ret;
607
608	ret = register_trace_sched_wakeup(probe_wakeup, NULL);
609	if (ret) {
610		pr_info("wakeup trace: Couldn't activate tracepoint"
611			" probe to kernel_sched_wakeup\n");
612		return;
613	}
614
615	ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
616	if (ret) {
617		pr_info("wakeup trace: Couldn't activate tracepoint"
618			" probe to kernel_sched_wakeup_new\n");
619		goto fail_deprobe;
620	}
621
622	ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
623	if (ret) {
624		pr_info("sched trace: Couldn't activate tracepoint"
625			" probe to kernel_sched_switch\n");
626		goto fail_deprobe_wake_new;
627	}
628
629	ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
630	if (ret) {
631		pr_info("wakeup trace: Couldn't activate tracepoint"
632			" probe to kernel_sched_migrate_task\n");
633		goto fail_deprobe_sched_switch;
634	}
635
636	wakeup_reset(tr);
637
638	/*
639	 * Don't let the tracer_enabled = 1 show up before
640	 * the wakeup_task is reset. This may be overkill since
641	 * wakeup_reset does a spin_unlock after setting the
642	 * wakeup_task to NULL, but I want to be safe.
643	 * This is a slow path anyway.
644	 */
645	smp_wmb();
646
647	if (start_func_tracer(tr, is_graph(tr)))
648		printk(KERN_ERR "failed to start wakeup tracer\n");
649
650	return;
651fail_deprobe_sched_switch:
652	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
653fail_deprobe_wake_new:
654	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
655fail_deprobe:
656	unregister_trace_sched_wakeup(probe_wakeup, NULL);
657}
658
659static void stop_wakeup_tracer(struct trace_array *tr)
660{
661	tracer_enabled = 0;
662	stop_func_tracer(tr, is_graph(tr));
663	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
664	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
665	unregister_trace_sched_wakeup(probe_wakeup, NULL);
666	unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
667}
668
669static bool wakeup_busy;
670
671static int __wakeup_tracer_init(struct trace_array *tr)
672{
673	save_flags = tr->trace_flags;
 
674
675	/* non overwrite screws up the latency tracers */
676	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
677	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
678
679	tr->max_latency = 0;
680	wakeup_trace = tr;
681	ftrace_init_array_ops(tr, wakeup_tracer_call);
682	start_wakeup_tracer(tr);
683
684	wakeup_busy = true;
685	return 0;
686}
687
688static int wakeup_tracer_init(struct trace_array *tr)
689{
690	if (wakeup_busy)
691		return -EBUSY;
692
693	wakeup_dl = 0;
694	wakeup_rt = 0;
695	return __wakeup_tracer_init(tr);
696}
697
698static int wakeup_rt_tracer_init(struct trace_array *tr)
699{
700	if (wakeup_busy)
701		return -EBUSY;
702
703	wakeup_dl = 0;
704	wakeup_rt = 1;
705	return __wakeup_tracer_init(tr);
706}
707
708static int wakeup_dl_tracer_init(struct trace_array *tr)
709{
710	if (wakeup_busy)
711		return -EBUSY;
712
713	wakeup_dl = 1;
714	wakeup_rt = 0;
715	return __wakeup_tracer_init(tr);
716}
717
718static void wakeup_tracer_reset(struct trace_array *tr)
719{
720	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
721	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
722
723	stop_wakeup_tracer(tr);
724	/* make sure we put back any tasks we are tracing */
725	wakeup_reset(tr);
726
727	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
728	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
729	ftrace_reset_array_ops(tr);
730	wakeup_busy = false;
731}
732
733static void wakeup_tracer_start(struct trace_array *tr)
734{
735	wakeup_reset(tr);
736	tracer_enabled = 1;
737}
738
739static void wakeup_tracer_stop(struct trace_array *tr)
740{
741	tracer_enabled = 0;
742}
743
744static struct tracer wakeup_tracer __read_mostly =
745{
746	.name		= "wakeup",
747	.init		= wakeup_tracer_init,
748	.reset		= wakeup_tracer_reset,
749	.start		= wakeup_tracer_start,
750	.stop		= wakeup_tracer_stop,
751	.print_max	= true,
752	.print_header	= wakeup_print_header,
753	.print_line	= wakeup_print_line,
754	.flag_changed	= wakeup_flag_changed,
 
755#ifdef CONFIG_FTRACE_SELFTEST
756	.selftest    = trace_selftest_startup_wakeup,
757#endif
758	.open		= wakeup_trace_open,
759	.close		= wakeup_trace_close,
760	.allow_instances = true,
761	.use_max_tr	= true,
762};
763
764static struct tracer wakeup_rt_tracer __read_mostly =
765{
766	.name		= "wakeup_rt",
767	.init		= wakeup_rt_tracer_init,
768	.reset		= wakeup_tracer_reset,
769	.start		= wakeup_tracer_start,
770	.stop		= wakeup_tracer_stop,
771	.print_max	= true,
772	.print_header	= wakeup_print_header,
773	.print_line	= wakeup_print_line,
774	.flag_changed	= wakeup_flag_changed,
775#ifdef CONFIG_FTRACE_SELFTEST
776	.selftest    = trace_selftest_startup_wakeup,
777#endif
778	.open		= wakeup_trace_open,
779	.close		= wakeup_trace_close,
780	.allow_instances = true,
781	.use_max_tr	= true,
782};
783
784static struct tracer wakeup_dl_tracer __read_mostly =
785{
786	.name		= "wakeup_dl",
787	.init		= wakeup_dl_tracer_init,
788	.reset		= wakeup_tracer_reset,
789	.start		= wakeup_tracer_start,
790	.stop		= wakeup_tracer_stop,
791	.print_max	= true,
792	.print_header	= wakeup_print_header,
793	.print_line	= wakeup_print_line,
794	.flag_changed	= wakeup_flag_changed,
 
795#ifdef CONFIG_FTRACE_SELFTEST
796	.selftest    = trace_selftest_startup_wakeup,
797#endif
798	.open		= wakeup_trace_open,
799	.close		= wakeup_trace_close,
800	.allow_instances = true,
801	.use_max_tr	= true,
802};
803
804__init static int init_wakeup_tracer(void)
805{
806	int ret;
807
808	ret = register_tracer(&wakeup_tracer);
809	if (ret)
810		return ret;
811
812	ret = register_tracer(&wakeup_rt_tracer);
813	if (ret)
814		return ret;
815
816	ret = register_tracer(&wakeup_dl_tracer);
817	if (ret)
818		return ret;
819
820	return 0;
821}
822core_initcall(init_wakeup_tracer);