Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * trace irqs off critical timings
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * From code in the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/kallsyms.h>
 14#include <linux/uaccess.h>
 15#include <linux/module.h>
 16#include <linux/ftrace.h>
 17#include <linux/kprobes.h>
 18
 19#include "trace.h"
 20
 21#include <trace/events/preemptirq.h>
 22
 23#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
 24static struct trace_array		*irqsoff_trace __read_mostly;
 25static int				tracer_enabled __read_mostly;
 26
 27static DEFINE_PER_CPU(int, tracing_cpu);
 28
 29static DEFINE_RAW_SPINLOCK(max_trace_lock);
 30
 31enum {
 32	TRACER_IRQS_OFF		= (1 << 1),
 33	TRACER_PREEMPT_OFF	= (1 << 2),
 34};
 35
 36static int trace_type __read_mostly;
 37
 38static int save_flags;
 39
 40static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
 41static int start_irqsoff_tracer(struct trace_array *tr, int graph);
 42
 43#ifdef CONFIG_PREEMPT_TRACER
 44static inline int
 45preempt_trace(int pc)
 46{
 47	return ((trace_type & TRACER_PREEMPT_OFF) && pc);
 48}
 49#else
 50# define preempt_trace(pc) (0)
 51#endif
 52
 53#ifdef CONFIG_IRQSOFF_TRACER
 54static inline int
 55irq_trace(void)
 56{
 57	return ((trace_type & TRACER_IRQS_OFF) &&
 58		irqs_disabled());
 59}
 60#else
 61# define irq_trace() (0)
 62#endif
 63
 64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 65static int irqsoff_display_graph(struct trace_array *tr, int set);
 66# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
 67#else
 68static inline int irqsoff_display_graph(struct trace_array *tr, int set)
 69{
 70	return -EINVAL;
 71}
 72# define is_graph(tr) false
 73#endif
 74
 75/*
 76 * Sequence count - we record it when starting a measurement and
 77 * skip the latency if the sequence has changed - some other section
 78 * did a maximum and could disturb our measurement with serial console
 79 * printouts, etc. Truly coinciding maximum latencies should be rare
 80 * and what happens together happens separately as well, so this doesn't
 81 * decrease the validity of the maximum found:
 82 */
 83static __cacheline_aligned_in_smp	unsigned long max_sequence;
 84
 85#ifdef CONFIG_FUNCTION_TRACER
 86/*
 87 * Prologue for the preempt and irqs off function tracers.
 88 *
 89 * Returns 1 if it is OK to continue, and data->disabled is
 90 *            incremented.
 91 *         0 if the trace is to be ignored, and data->disabled
 92 *            is kept the same.
 93 *
 94 * Note, this function is also used outside this ifdef but
 95 *  inside the #ifdef of the function graph tracer below.
 96 *  This is OK, since the function graph tracer is
 97 *  dependent on the function tracer.
 98 */
 99static int func_prolog_dec(struct trace_array *tr,
100			   struct trace_array_cpu **data,
101			   unsigned long *flags)
102{
103	long disabled;
104	int cpu;
105
106	/*
107	 * Does not matter if we preempt. We test the flags
108	 * afterward, to see if irqs are disabled or not.
109	 * If we preempt and get a false positive, the flags
110	 * test will fail.
111	 */
112	cpu = raw_smp_processor_id();
113	if (likely(!per_cpu(tracing_cpu, cpu)))
114		return 0;
115
116	local_save_flags(*flags);
117	/*
118	 * Slight chance to get a false positive on tracing_cpu,
119	 * although I'm starting to think there isn't a chance.
120	 * Leave this for now just to be paranoid.
121	 */
122	if (!irqs_disabled_flags(*flags) && !preempt_count())
123		return 0;
124
125	*data = per_cpu_ptr(tr->array_buffer.data, cpu);
126	disabled = atomic_inc_return(&(*data)->disabled);
127
128	if (likely(disabled == 1))
129		return 1;
130
131	atomic_dec(&(*data)->disabled);
132
133	return 0;
134}
135
136/*
137 * irqsoff uses its own tracer function to keep the overhead down:
138 */
139static void
140irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
141		    struct ftrace_ops *op, struct ftrace_regs *fregs)
142{
143	struct trace_array *tr = irqsoff_trace;
144	struct trace_array_cpu *data;
145	unsigned long flags;
146	unsigned int trace_ctx;
147
148	if (!func_prolog_dec(tr, &data, &flags))
149		return;
150
151	trace_ctx = tracing_gen_ctx_flags(flags);
152
153	trace_function(tr, ip, parent_ip, trace_ctx);
154
155	atomic_dec(&data->disabled);
156}
157#endif /* CONFIG_FUNCTION_TRACER */
158
159#ifdef CONFIG_FUNCTION_GRAPH_TRACER
160static int irqsoff_display_graph(struct trace_array *tr, int set)
161{
162	int cpu;
163
164	if (!(is_graph(tr) ^ set))
165		return 0;
166
167	stop_irqsoff_tracer(irqsoff_trace, !set);
168
169	for_each_possible_cpu(cpu)
170		per_cpu(tracing_cpu, cpu) = 0;
171
172	tr->max_latency = 0;
173	tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
174
175	return start_irqsoff_tracer(irqsoff_trace, set);
176}
177
178static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
179{
180	struct trace_array *tr = irqsoff_trace;
181	struct trace_array_cpu *data;
182	unsigned long flags;
183	unsigned int trace_ctx;
184	int ret;
 
185
186	if (ftrace_graph_ignore_func(trace))
187		return 0;
188	/*
189	 * Do not trace a function if it's filtered by set_graph_notrace.
190	 * Make the index of ret stack negative to indicate that it should
191	 * ignore further functions.  But it needs its own ret stack entry
192	 * to recover the original index in order to continue tracing after
193	 * returning from the function.
194	 */
195	if (ftrace_graph_notrace_addr(trace->func))
196		return 1;
197
198	if (!func_prolog_dec(tr, &data, &flags))
199		return 0;
200
201	trace_ctx = tracing_gen_ctx_flags(flags);
202	ret = __trace_graph_entry(tr, trace, trace_ctx);
203	atomic_dec(&data->disabled);
204
205	return ret;
206}
207
208static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
209{
210	struct trace_array *tr = irqsoff_trace;
211	struct trace_array_cpu *data;
212	unsigned long flags;
213	unsigned int trace_ctx;
214
215	ftrace_graph_addr_finish(trace);
216
217	if (!func_prolog_dec(tr, &data, &flags))
218		return;
219
220	trace_ctx = tracing_gen_ctx_flags(flags);
221	__trace_graph_return(tr, trace, trace_ctx);
222	atomic_dec(&data->disabled);
223}
224
225static struct fgraph_ops fgraph_ops = {
226	.entryfunc		= &irqsoff_graph_entry,
227	.retfunc		= &irqsoff_graph_return,
228};
229
230static void irqsoff_trace_open(struct trace_iterator *iter)
231{
232	if (is_graph(iter->tr))
233		graph_trace_open(iter);
234
235}
236
237static void irqsoff_trace_close(struct trace_iterator *iter)
238{
239	if (iter->private)
240		graph_trace_close(iter);
241}
242
243#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
244			    TRACE_GRAPH_PRINT_PROC | \
245			    TRACE_GRAPH_PRINT_REL_TIME | \
246			    TRACE_GRAPH_PRINT_DURATION)
247
248static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
249{
250	/*
251	 * In graph mode call the graph tracer output function,
252	 * otherwise go with the TRACE_FN event handler
253	 */
254	if (is_graph(iter->tr))
255		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
256
257	return TRACE_TYPE_UNHANDLED;
258}
259
260static void irqsoff_print_header(struct seq_file *s)
261{
262	struct trace_array *tr = irqsoff_trace;
263
264	if (is_graph(tr))
265		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
266	else
267		trace_default_header(s);
268}
269
270static void
271__trace_function(struct trace_array *tr,
272		 unsigned long ip, unsigned long parent_ip,
273		 unsigned int trace_ctx)
274{
275	if (is_graph(tr))
276		trace_graph_function(tr, ip, parent_ip, trace_ctx);
277	else
278		trace_function(tr, ip, parent_ip, trace_ctx);
279}
280
281#else
282#define __trace_function trace_function
283
284static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
285{
286	return TRACE_TYPE_UNHANDLED;
287}
288
289static void irqsoff_trace_open(struct trace_iterator *iter) { }
290static void irqsoff_trace_close(struct trace_iterator *iter) { }
291
292#ifdef CONFIG_FUNCTION_TRACER
293static void irqsoff_print_header(struct seq_file *s)
294{
295	trace_default_header(s);
296}
297#else
298static void irqsoff_print_header(struct seq_file *s)
299{
300	trace_latency_header(s);
301}
302#endif /* CONFIG_FUNCTION_TRACER */
303#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
304
305/*
306 * Should this new latency be reported/recorded?
307 */
308static bool report_latency(struct trace_array *tr, u64 delta)
309{
310	if (tracing_thresh) {
311		if (delta < tracing_thresh)
312			return false;
313	} else {
314		if (delta <= tr->max_latency)
315			return false;
316	}
317	return true;
318}
319
320static void
321check_critical_timing(struct trace_array *tr,
322		      struct trace_array_cpu *data,
323		      unsigned long parent_ip,
324		      int cpu)
325{
326	u64 T0, T1, delta;
327	unsigned long flags;
328	unsigned int trace_ctx;
329
330	T0 = data->preempt_timestamp;
331	T1 = ftrace_now(cpu);
332	delta = T1-T0;
333
334	trace_ctx = tracing_gen_ctx();
 
 
335
336	if (!report_latency(tr, delta))
337		goto out;
338
339	raw_spin_lock_irqsave(&max_trace_lock, flags);
340
341	/* check if we are still the max latency */
342	if (!report_latency(tr, delta))
343		goto out_unlock;
344
345	__trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
346	/* Skip 5 functions to get to the irq/preempt enable function */
347	__trace_stack(tr, trace_ctx, 5);
348
349	if (data->critical_sequence != max_sequence)
350		goto out_unlock;
351
352	data->critical_end = parent_ip;
353
354	if (likely(!is_tracing_stopped())) {
355		tr->max_latency = delta;
356		update_max_tr_single(tr, current, cpu);
357	}
358
359	max_sequence++;
360
361out_unlock:
362	raw_spin_unlock_irqrestore(&max_trace_lock, flags);
363
364out:
365	data->critical_sequence = max_sequence;
366	data->preempt_timestamp = ftrace_now(cpu);
367	__trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
368}
369
370static nokprobe_inline void
371start_critical_timing(unsigned long ip, unsigned long parent_ip)
372{
373	int cpu;
374	struct trace_array *tr = irqsoff_trace;
375	struct trace_array_cpu *data;
 
376
377	if (!tracer_enabled || !tracing_is_enabled())
378		return;
379
380	cpu = raw_smp_processor_id();
381
382	if (per_cpu(tracing_cpu, cpu))
383		return;
384
385	data = per_cpu_ptr(tr->array_buffer.data, cpu);
386
387	if (unlikely(!data) || atomic_read(&data->disabled))
388		return;
389
390	atomic_inc(&data->disabled);
391
392	data->critical_sequence = max_sequence;
393	data->preempt_timestamp = ftrace_now(cpu);
394	data->critical_start = parent_ip ? : ip;
395
396	__trace_function(tr, ip, parent_ip, tracing_gen_ctx());
 
 
397
398	per_cpu(tracing_cpu, cpu) = 1;
399
400	atomic_dec(&data->disabled);
401}
402
403static nokprobe_inline void
404stop_critical_timing(unsigned long ip, unsigned long parent_ip)
405{
406	int cpu;
407	struct trace_array *tr = irqsoff_trace;
408	struct trace_array_cpu *data;
409	unsigned int trace_ctx;
410
411	cpu = raw_smp_processor_id();
412	/* Always clear the tracing cpu on stopping the trace */
413	if (unlikely(per_cpu(tracing_cpu, cpu)))
414		per_cpu(tracing_cpu, cpu) = 0;
415	else
416		return;
417
418	if (!tracer_enabled || !tracing_is_enabled())
419		return;
420
421	data = per_cpu_ptr(tr->array_buffer.data, cpu);
422
423	if (unlikely(!data) ||
424	    !data->critical_start || atomic_read(&data->disabled))
425		return;
426
427	atomic_inc(&data->disabled);
428
429	trace_ctx = tracing_gen_ctx();
430	__trace_function(tr, ip, parent_ip, trace_ctx);
431	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
432	data->critical_start = 0;
433	atomic_dec(&data->disabled);
434}
435
436/* start and stop critical timings used to for stoppage (in idle) */
437void start_critical_timings(void)
438{
439	if (preempt_trace(preempt_count()) || irq_trace())
440		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 
 
441}
442EXPORT_SYMBOL_GPL(start_critical_timings);
443NOKPROBE_SYMBOL(start_critical_timings);
444
445void stop_critical_timings(void)
446{
447	if (preempt_trace(preempt_count()) || irq_trace())
448		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 
 
449}
450EXPORT_SYMBOL_GPL(stop_critical_timings);
451NOKPROBE_SYMBOL(stop_critical_timings);
452
453#ifdef CONFIG_FUNCTION_TRACER
454static bool function_enabled;
455
456static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
457{
458	int ret;
459
460	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
461	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
462		return 0;
463
464	if (graph)
465		ret = register_ftrace_graph(&fgraph_ops);
466	else
467		ret = register_ftrace_function(tr->ops);
468
469	if (!ret)
470		function_enabled = true;
471
472	return ret;
473}
474
475static void unregister_irqsoff_function(struct trace_array *tr, int graph)
476{
477	if (!function_enabled)
478		return;
479
480	if (graph)
481		unregister_ftrace_graph(&fgraph_ops);
482	else
483		unregister_ftrace_function(tr->ops);
484
485	function_enabled = false;
486}
487
488static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
489{
490	if (!(mask & TRACE_ITER_FUNCTION))
491		return 0;
492
493	if (set)
494		register_irqsoff_function(tr, is_graph(tr), 1);
495	else
496		unregister_irqsoff_function(tr, is_graph(tr));
497	return 1;
498}
499#else
500static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
501{
502	return 0;
503}
504static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
505static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
506{
507	return 0;
508}
509#endif /* CONFIG_FUNCTION_TRACER */
510
511static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
512{
513	struct tracer *tracer = tr->current_trace;
514
515	if (irqsoff_function_set(tr, mask, set))
516		return 0;
517
518#ifdef CONFIG_FUNCTION_GRAPH_TRACER
519	if (mask & TRACE_ITER_DISPLAY_GRAPH)
520		return irqsoff_display_graph(tr, set);
521#endif
522
523	return trace_keep_overwrite(tracer, mask, set);
524}
525
526static int start_irqsoff_tracer(struct trace_array *tr, int graph)
527{
528	int ret;
529
530	ret = register_irqsoff_function(tr, graph, 0);
531
532	if (!ret && tracing_is_enabled())
533		tracer_enabled = 1;
534	else
535		tracer_enabled = 0;
536
537	return ret;
538}
539
540static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
541{
542	tracer_enabled = 0;
543
544	unregister_irqsoff_function(tr, graph);
545}
546
547static bool irqsoff_busy;
548
549static int __irqsoff_tracer_init(struct trace_array *tr)
550{
551	if (irqsoff_busy)
552		return -EBUSY;
553
554	save_flags = tr->trace_flags;
555
556	/* non overwrite screws up the latency tracers */
557	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
558	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
559	/* without pause, we will produce garbage if another latency occurs */
560	set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
561
562	tr->max_latency = 0;
563	irqsoff_trace = tr;
564	/* make sure that the tracer is visible */
565	smp_wmb();
566
567	ftrace_init_array_ops(tr, irqsoff_tracer_call);
568
569	/* Only toplevel instance supports graph tracing */
570	if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
571				      is_graph(tr))))
572		printk(KERN_ERR "failed to start irqsoff tracer\n");
573
574	irqsoff_busy = true;
575	return 0;
576}
577
578static void __irqsoff_tracer_reset(struct trace_array *tr)
579{
580	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
581	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
582	int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
583
584	stop_irqsoff_tracer(tr, is_graph(tr));
585
586	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
587	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
588	set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
589	ftrace_reset_array_ops(tr);
590
591	irqsoff_busy = false;
592}
593
594static void irqsoff_tracer_start(struct trace_array *tr)
595{
596	tracer_enabled = 1;
597}
598
599static void irqsoff_tracer_stop(struct trace_array *tr)
600{
601	tracer_enabled = 0;
602}
603
604#ifdef CONFIG_IRQSOFF_TRACER
605/*
606 * We are only interested in hardirq on/off events:
607 */
608void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
609{
610	if (!preempt_trace(preempt_count()) && irq_trace())
611		stop_critical_timing(a0, a1);
 
 
612}
613NOKPROBE_SYMBOL(tracer_hardirqs_on);
614
615void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
616{
617	if (!preempt_trace(preempt_count()) && irq_trace())
618		start_critical_timing(a0, a1);
 
 
619}
620NOKPROBE_SYMBOL(tracer_hardirqs_off);
621
622static int irqsoff_tracer_init(struct trace_array *tr)
623{
624	trace_type = TRACER_IRQS_OFF;
625
626	return __irqsoff_tracer_init(tr);
627}
628
629static void irqsoff_tracer_reset(struct trace_array *tr)
630{
631	__irqsoff_tracer_reset(tr);
632}
633
634static struct tracer irqsoff_tracer __read_mostly =
635{
636	.name		= "irqsoff",
637	.init		= irqsoff_tracer_init,
638	.reset		= irqsoff_tracer_reset,
639	.start		= irqsoff_tracer_start,
640	.stop		= irqsoff_tracer_stop,
641	.print_max	= true,
642	.print_header   = irqsoff_print_header,
643	.print_line     = irqsoff_print_line,
644	.flag_changed	= irqsoff_flag_changed,
645#ifdef CONFIG_FTRACE_SELFTEST
646	.selftest    = trace_selftest_startup_irqsoff,
647#endif
648	.open           = irqsoff_trace_open,
649	.close          = irqsoff_trace_close,
650	.allow_instances = true,
651	.use_max_tr	= true,
652};
653#endif /*  CONFIG_IRQSOFF_TRACER */
654
655#ifdef CONFIG_PREEMPT_TRACER
656void tracer_preempt_on(unsigned long a0, unsigned long a1)
657{
658	if (preempt_trace(preempt_count()) && !irq_trace())
659		stop_critical_timing(a0, a1);
 
 
660}
661
662void tracer_preempt_off(unsigned long a0, unsigned long a1)
663{
664	if (preempt_trace(preempt_count()) && !irq_trace())
665		start_critical_timing(a0, a1);
 
 
666}
667
668static int preemptoff_tracer_init(struct trace_array *tr)
669{
670	trace_type = TRACER_PREEMPT_OFF;
671
672	return __irqsoff_tracer_init(tr);
673}
674
675static void preemptoff_tracer_reset(struct trace_array *tr)
676{
677	__irqsoff_tracer_reset(tr);
678}
679
680static struct tracer preemptoff_tracer __read_mostly =
681{
682	.name		= "preemptoff",
683	.init		= preemptoff_tracer_init,
684	.reset		= preemptoff_tracer_reset,
685	.start		= irqsoff_tracer_start,
686	.stop		= irqsoff_tracer_stop,
687	.print_max	= true,
688	.print_header   = irqsoff_print_header,
689	.print_line     = irqsoff_print_line,
690	.flag_changed	= irqsoff_flag_changed,
691#ifdef CONFIG_FTRACE_SELFTEST
692	.selftest    = trace_selftest_startup_preemptoff,
693#endif
694	.open		= irqsoff_trace_open,
695	.close		= irqsoff_trace_close,
696	.allow_instances = true,
697	.use_max_tr	= true,
698};
699#endif /* CONFIG_PREEMPT_TRACER */
700
701#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
702
703static int preemptirqsoff_tracer_init(struct trace_array *tr)
704{
705	trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
706
707	return __irqsoff_tracer_init(tr);
708}
709
710static void preemptirqsoff_tracer_reset(struct trace_array *tr)
711{
712	__irqsoff_tracer_reset(tr);
713}
714
715static struct tracer preemptirqsoff_tracer __read_mostly =
716{
717	.name		= "preemptirqsoff",
718	.init		= preemptirqsoff_tracer_init,
719	.reset		= preemptirqsoff_tracer_reset,
720	.start		= irqsoff_tracer_start,
721	.stop		= irqsoff_tracer_stop,
722	.print_max	= true,
723	.print_header   = irqsoff_print_header,
724	.print_line     = irqsoff_print_line,
725	.flag_changed	= irqsoff_flag_changed,
726#ifdef CONFIG_FTRACE_SELFTEST
727	.selftest    = trace_selftest_startup_preemptirqsoff,
728#endif
729	.open		= irqsoff_trace_open,
730	.close		= irqsoff_trace_close,
731	.allow_instances = true,
732	.use_max_tr	= true,
733};
734#endif
735
736__init static int init_irqsoff_tracer(void)
737{
738#ifdef CONFIG_IRQSOFF_TRACER
739	register_tracer(&irqsoff_tracer);
740#endif
741#ifdef CONFIG_PREEMPT_TRACER
742	register_tracer(&preemptoff_tracer);
743#endif
744#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
745	register_tracer(&preemptirqsoff_tracer);
746#endif
747
748	return 0;
749}
750core_initcall(init_irqsoff_tracer);
751#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * trace irqs off critical timings
  4 *
  5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  7 *
  8 * From code in the latency_tracer, that is:
  9 *
 10 *  Copyright (C) 2004-2006 Ingo Molnar
 11 *  Copyright (C) 2004 Nadia Yvette Chambers
 12 */
 13#include <linux/kallsyms.h>
 14#include <linux/uaccess.h>
 15#include <linux/module.h>
 16#include <linux/ftrace.h>
 17#include <linux/kprobes.h>
 18
 19#include "trace.h"
 20
 21#include <trace/events/preemptirq.h>
 22
 23#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
 24static struct trace_array		*irqsoff_trace __read_mostly;
 25static int				tracer_enabled __read_mostly;
 26
 27static DEFINE_PER_CPU(int, tracing_cpu);
 28
 29static DEFINE_RAW_SPINLOCK(max_trace_lock);
 30
 31enum {
 32	TRACER_IRQS_OFF		= (1 << 1),
 33	TRACER_PREEMPT_OFF	= (1 << 2),
 34};
 35
 36static int trace_type __read_mostly;
 37
 38static int save_flags;
 39
 40static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
 41static int start_irqsoff_tracer(struct trace_array *tr, int graph);
 42
 43#ifdef CONFIG_PREEMPT_TRACER
 44static inline int
 45preempt_trace(int pc)
 46{
 47	return ((trace_type & TRACER_PREEMPT_OFF) && pc);
 48}
 49#else
 50# define preempt_trace(pc) (0)
 51#endif
 52
 53#ifdef CONFIG_IRQSOFF_TRACER
 54static inline int
 55irq_trace(void)
 56{
 57	return ((trace_type & TRACER_IRQS_OFF) &&
 58		irqs_disabled());
 59}
 60#else
 61# define irq_trace() (0)
 62#endif
 63
 64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 65static int irqsoff_display_graph(struct trace_array *tr, int set);
 66# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
 67#else
 68static inline int irqsoff_display_graph(struct trace_array *tr, int set)
 69{
 70	return -EINVAL;
 71}
 72# define is_graph(tr) false
 73#endif
 74
 75/*
 76 * Sequence count - we record it when starting a measurement and
 77 * skip the latency if the sequence has changed - some other section
 78 * did a maximum and could disturb our measurement with serial console
 79 * printouts, etc. Truly coinciding maximum latencies should be rare
 80 * and what happens together happens separately as well, so this doesn't
 81 * decrease the validity of the maximum found:
 82 */
 83static __cacheline_aligned_in_smp	unsigned long max_sequence;
 84
 85#ifdef CONFIG_FUNCTION_TRACER
 86/*
 87 * Prologue for the preempt and irqs off function tracers.
 88 *
 89 * Returns 1 if it is OK to continue, and data->disabled is
 90 *            incremented.
 91 *         0 if the trace is to be ignored, and data->disabled
 92 *            is kept the same.
 93 *
 94 * Note, this function is also used outside this ifdef but
 95 *  inside the #ifdef of the function graph tracer below.
 96 *  This is OK, since the function graph tracer is
 97 *  dependent on the function tracer.
 98 */
 99static int func_prolog_dec(struct trace_array *tr,
100			   struct trace_array_cpu **data,
101			   unsigned long *flags)
102{
103	long disabled;
104	int cpu;
105
106	/*
107	 * Does not matter if we preempt. We test the flags
108	 * afterward, to see if irqs are disabled or not.
109	 * If we preempt and get a false positive, the flags
110	 * test will fail.
111	 */
112	cpu = raw_smp_processor_id();
113	if (likely(!per_cpu(tracing_cpu, cpu)))
114		return 0;
115
116	local_save_flags(*flags);
117	/*
118	 * Slight chance to get a false positive on tracing_cpu,
119	 * although I'm starting to think there isn't a chance.
120	 * Leave this for now just to be paranoid.
121	 */
122	if (!irqs_disabled_flags(*flags) && !preempt_count())
123		return 0;
124
125	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
126	disabled = atomic_inc_return(&(*data)->disabled);
127
128	if (likely(disabled == 1))
129		return 1;
130
131	atomic_dec(&(*data)->disabled);
132
133	return 0;
134}
135
136/*
137 * irqsoff uses its own tracer function to keep the overhead down:
138 */
139static void
140irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
141		    struct ftrace_ops *op, struct pt_regs *pt_regs)
142{
143	struct trace_array *tr = irqsoff_trace;
144	struct trace_array_cpu *data;
145	unsigned long flags;
 
146
147	if (!func_prolog_dec(tr, &data, &flags))
148		return;
149
150	trace_function(tr, ip, parent_ip, flags, preempt_count());
 
 
151
152	atomic_dec(&data->disabled);
153}
154#endif /* CONFIG_FUNCTION_TRACER */
155
156#ifdef CONFIG_FUNCTION_GRAPH_TRACER
157static int irqsoff_display_graph(struct trace_array *tr, int set)
158{
159	int cpu;
160
161	if (!(is_graph(tr) ^ set))
162		return 0;
163
164	stop_irqsoff_tracer(irqsoff_trace, !set);
165
166	for_each_possible_cpu(cpu)
167		per_cpu(tracing_cpu, cpu) = 0;
168
169	tr->max_latency = 0;
170	tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
171
172	return start_irqsoff_tracer(irqsoff_trace, set);
173}
174
175static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
176{
177	struct trace_array *tr = irqsoff_trace;
178	struct trace_array_cpu *data;
179	unsigned long flags;
 
180	int ret;
181	int pc;
182
183	if (ftrace_graph_ignore_func(trace))
184		return 0;
185	/*
186	 * Do not trace a function if it's filtered by set_graph_notrace.
187	 * Make the index of ret stack negative to indicate that it should
188	 * ignore further functions.  But it needs its own ret stack entry
189	 * to recover the original index in order to continue tracing after
190	 * returning from the function.
191	 */
192	if (ftrace_graph_notrace_addr(trace->func))
193		return 1;
194
195	if (!func_prolog_dec(tr, &data, &flags))
196		return 0;
197
198	pc = preempt_count();
199	ret = __trace_graph_entry(tr, trace, flags, pc);
200	atomic_dec(&data->disabled);
201
202	return ret;
203}
204
205static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
206{
207	struct trace_array *tr = irqsoff_trace;
208	struct trace_array_cpu *data;
209	unsigned long flags;
210	int pc;
211
212	ftrace_graph_addr_finish(trace);
213
214	if (!func_prolog_dec(tr, &data, &flags))
215		return;
216
217	pc = preempt_count();
218	__trace_graph_return(tr, trace, flags, pc);
219	atomic_dec(&data->disabled);
220}
221
222static struct fgraph_ops fgraph_ops = {
223	.entryfunc		= &irqsoff_graph_entry,
224	.retfunc		= &irqsoff_graph_return,
225};
226
227static void irqsoff_trace_open(struct trace_iterator *iter)
228{
229	if (is_graph(iter->tr))
230		graph_trace_open(iter);
231
232}
233
234static void irqsoff_trace_close(struct trace_iterator *iter)
235{
236	if (iter->private)
237		graph_trace_close(iter);
238}
239
240#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
241			    TRACE_GRAPH_PRINT_PROC | \
242			    TRACE_GRAPH_PRINT_REL_TIME | \
243			    TRACE_GRAPH_PRINT_DURATION)
244
245static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
246{
247	/*
248	 * In graph mode call the graph tracer output function,
249	 * otherwise go with the TRACE_FN event handler
250	 */
251	if (is_graph(iter->tr))
252		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
253
254	return TRACE_TYPE_UNHANDLED;
255}
256
257static void irqsoff_print_header(struct seq_file *s)
258{
259	struct trace_array *tr = irqsoff_trace;
260
261	if (is_graph(tr))
262		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
263	else
264		trace_default_header(s);
265}
266
267static void
268__trace_function(struct trace_array *tr,
269		 unsigned long ip, unsigned long parent_ip,
270		 unsigned long flags, int pc)
271{
272	if (is_graph(tr))
273		trace_graph_function(tr, ip, parent_ip, flags, pc);
274	else
275		trace_function(tr, ip, parent_ip, flags, pc);
276}
277
278#else
279#define __trace_function trace_function
280
281static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
282{
283	return TRACE_TYPE_UNHANDLED;
284}
285
286static void irqsoff_trace_open(struct trace_iterator *iter) { }
287static void irqsoff_trace_close(struct trace_iterator *iter) { }
288
289#ifdef CONFIG_FUNCTION_TRACER
290static void irqsoff_print_header(struct seq_file *s)
291{
292	trace_default_header(s);
293}
294#else
295static void irqsoff_print_header(struct seq_file *s)
296{
297	trace_latency_header(s);
298}
299#endif /* CONFIG_FUNCTION_TRACER */
300#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
301
302/*
303 * Should this new latency be reported/recorded?
304 */
305static bool report_latency(struct trace_array *tr, u64 delta)
306{
307	if (tracing_thresh) {
308		if (delta < tracing_thresh)
309			return false;
310	} else {
311		if (delta <= tr->max_latency)
312			return false;
313	}
314	return true;
315}
316
317static void
318check_critical_timing(struct trace_array *tr,
319		      struct trace_array_cpu *data,
320		      unsigned long parent_ip,
321		      int cpu)
322{
323	u64 T0, T1, delta;
324	unsigned long flags;
325	int pc;
326
327	T0 = data->preempt_timestamp;
328	T1 = ftrace_now(cpu);
329	delta = T1-T0;
330
331	local_save_flags(flags);
332
333	pc = preempt_count();
334
335	if (!report_latency(tr, delta))
336		goto out;
337
338	raw_spin_lock_irqsave(&max_trace_lock, flags);
339
340	/* check if we are still the max latency */
341	if (!report_latency(tr, delta))
342		goto out_unlock;
343
344	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
345	/* Skip 5 functions to get to the irq/preempt enable function */
346	__trace_stack(tr, flags, 5, pc);
347
348	if (data->critical_sequence != max_sequence)
349		goto out_unlock;
350
351	data->critical_end = parent_ip;
352
353	if (likely(!is_tracing_stopped())) {
354		tr->max_latency = delta;
355		update_max_tr_single(tr, current, cpu);
356	}
357
358	max_sequence++;
359
360out_unlock:
361	raw_spin_unlock_irqrestore(&max_trace_lock, flags);
362
363out:
364	data->critical_sequence = max_sequence;
365	data->preempt_timestamp = ftrace_now(cpu);
366	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
367}
368
369static nokprobe_inline void
370start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
371{
372	int cpu;
373	struct trace_array *tr = irqsoff_trace;
374	struct trace_array_cpu *data;
375	unsigned long flags;
376
377	if (!tracer_enabled || !tracing_is_enabled())
378		return;
379
380	cpu = raw_smp_processor_id();
381
382	if (per_cpu(tracing_cpu, cpu))
383		return;
384
385	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
386
387	if (unlikely(!data) || atomic_read(&data->disabled))
388		return;
389
390	atomic_inc(&data->disabled);
391
392	data->critical_sequence = max_sequence;
393	data->preempt_timestamp = ftrace_now(cpu);
394	data->critical_start = parent_ip ? : ip;
395
396	local_save_flags(flags);
397
398	__trace_function(tr, ip, parent_ip, flags, pc);
399
400	per_cpu(tracing_cpu, cpu) = 1;
401
402	atomic_dec(&data->disabled);
403}
404
405static nokprobe_inline void
406stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
407{
408	int cpu;
409	struct trace_array *tr = irqsoff_trace;
410	struct trace_array_cpu *data;
411	unsigned long flags;
412
413	cpu = raw_smp_processor_id();
414	/* Always clear the tracing cpu on stopping the trace */
415	if (unlikely(per_cpu(tracing_cpu, cpu)))
416		per_cpu(tracing_cpu, cpu) = 0;
417	else
418		return;
419
420	if (!tracer_enabled || !tracing_is_enabled())
421		return;
422
423	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
424
425	if (unlikely(!data) ||
426	    !data->critical_start || atomic_read(&data->disabled))
427		return;
428
429	atomic_inc(&data->disabled);
430
431	local_save_flags(flags);
432	__trace_function(tr, ip, parent_ip, flags, pc);
433	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
434	data->critical_start = 0;
435	atomic_dec(&data->disabled);
436}
437
438/* start and stop critical timings used to for stoppage (in idle) */
439void start_critical_timings(void)
440{
441	int pc = preempt_count();
442
443	if (preempt_trace(pc) || irq_trace())
444		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
445}
446EXPORT_SYMBOL_GPL(start_critical_timings);
447NOKPROBE_SYMBOL(start_critical_timings);
448
449void stop_critical_timings(void)
450{
451	int pc = preempt_count();
452
453	if (preempt_trace(pc) || irq_trace())
454		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
455}
456EXPORT_SYMBOL_GPL(stop_critical_timings);
457NOKPROBE_SYMBOL(stop_critical_timings);
458
459#ifdef CONFIG_FUNCTION_TRACER
460static bool function_enabled;
461
462static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
463{
464	int ret;
465
466	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
467	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
468		return 0;
469
470	if (graph)
471		ret = register_ftrace_graph(&fgraph_ops);
472	else
473		ret = register_ftrace_function(tr->ops);
474
475	if (!ret)
476		function_enabled = true;
477
478	return ret;
479}
480
481static void unregister_irqsoff_function(struct trace_array *tr, int graph)
482{
483	if (!function_enabled)
484		return;
485
486	if (graph)
487		unregister_ftrace_graph(&fgraph_ops);
488	else
489		unregister_ftrace_function(tr->ops);
490
491	function_enabled = false;
492}
493
494static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
495{
496	if (!(mask & TRACE_ITER_FUNCTION))
497		return 0;
498
499	if (set)
500		register_irqsoff_function(tr, is_graph(tr), 1);
501	else
502		unregister_irqsoff_function(tr, is_graph(tr));
503	return 1;
504}
505#else
506static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
507{
508	return 0;
509}
510static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
511static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
512{
513	return 0;
514}
515#endif /* CONFIG_FUNCTION_TRACER */
516
517static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
518{
519	struct tracer *tracer = tr->current_trace;
520
521	if (irqsoff_function_set(tr, mask, set))
522		return 0;
523
524#ifdef CONFIG_FUNCTION_GRAPH_TRACER
525	if (mask & TRACE_ITER_DISPLAY_GRAPH)
526		return irqsoff_display_graph(tr, set);
527#endif
528
529	return trace_keep_overwrite(tracer, mask, set);
530}
531
532static int start_irqsoff_tracer(struct trace_array *tr, int graph)
533{
534	int ret;
535
536	ret = register_irqsoff_function(tr, graph, 0);
537
538	if (!ret && tracing_is_enabled())
539		tracer_enabled = 1;
540	else
541		tracer_enabled = 0;
542
543	return ret;
544}
545
546static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
547{
548	tracer_enabled = 0;
549
550	unregister_irqsoff_function(tr, graph);
551}
552
553static bool irqsoff_busy;
554
555static int __irqsoff_tracer_init(struct trace_array *tr)
556{
557	if (irqsoff_busy)
558		return -EBUSY;
559
560	save_flags = tr->trace_flags;
561
562	/* non overwrite screws up the latency tracers */
563	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
564	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
 
 
565
566	tr->max_latency = 0;
567	irqsoff_trace = tr;
568	/* make sure that the tracer is visible */
569	smp_wmb();
570
571	ftrace_init_array_ops(tr, irqsoff_tracer_call);
572
573	/* Only toplevel instance supports graph tracing */
574	if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
575				      is_graph(tr))))
576		printk(KERN_ERR "failed to start irqsoff tracer\n");
577
578	irqsoff_busy = true;
579	return 0;
580}
581
582static void __irqsoff_tracer_reset(struct trace_array *tr)
583{
584	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
585	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
 
586
587	stop_irqsoff_tracer(tr, is_graph(tr));
588
589	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
590	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
 
591	ftrace_reset_array_ops(tr);
592
593	irqsoff_busy = false;
594}
595
596static void irqsoff_tracer_start(struct trace_array *tr)
597{
598	tracer_enabled = 1;
599}
600
601static void irqsoff_tracer_stop(struct trace_array *tr)
602{
603	tracer_enabled = 0;
604}
605
606#ifdef CONFIG_IRQSOFF_TRACER
607/*
608 * We are only interested in hardirq on/off events:
609 */
610void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
611{
612	unsigned int pc = preempt_count();
613
614	if (!preempt_trace(pc) && irq_trace())
615		stop_critical_timing(a0, a1, pc);
616}
617NOKPROBE_SYMBOL(tracer_hardirqs_on);
618
619void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
620{
621	unsigned int pc = preempt_count();
622
623	if (!preempt_trace(pc) && irq_trace())
624		start_critical_timing(a0, a1, pc);
625}
626NOKPROBE_SYMBOL(tracer_hardirqs_off);
627
628static int irqsoff_tracer_init(struct trace_array *tr)
629{
630	trace_type = TRACER_IRQS_OFF;
631
632	return __irqsoff_tracer_init(tr);
633}
634
635static void irqsoff_tracer_reset(struct trace_array *tr)
636{
637	__irqsoff_tracer_reset(tr);
638}
639
640static struct tracer irqsoff_tracer __read_mostly =
641{
642	.name		= "irqsoff",
643	.init		= irqsoff_tracer_init,
644	.reset		= irqsoff_tracer_reset,
645	.start		= irqsoff_tracer_start,
646	.stop		= irqsoff_tracer_stop,
647	.print_max	= true,
648	.print_header   = irqsoff_print_header,
649	.print_line     = irqsoff_print_line,
650	.flag_changed	= irqsoff_flag_changed,
651#ifdef CONFIG_FTRACE_SELFTEST
652	.selftest    = trace_selftest_startup_irqsoff,
653#endif
654	.open           = irqsoff_trace_open,
655	.close          = irqsoff_trace_close,
656	.allow_instances = true,
657	.use_max_tr	= true,
658};
659#endif /*  CONFIG_IRQSOFF_TRACER */
660
661#ifdef CONFIG_PREEMPT_TRACER
662void tracer_preempt_on(unsigned long a0, unsigned long a1)
663{
664	int pc = preempt_count();
665
666	if (preempt_trace(pc) && !irq_trace())
667		stop_critical_timing(a0, a1, pc);
668}
669
670void tracer_preempt_off(unsigned long a0, unsigned long a1)
671{
672	int pc = preempt_count();
673
674	if (preempt_trace(pc) && !irq_trace())
675		start_critical_timing(a0, a1, pc);
676}
677
678static int preemptoff_tracer_init(struct trace_array *tr)
679{
680	trace_type = TRACER_PREEMPT_OFF;
681
682	return __irqsoff_tracer_init(tr);
683}
684
685static void preemptoff_tracer_reset(struct trace_array *tr)
686{
687	__irqsoff_tracer_reset(tr);
688}
689
690static struct tracer preemptoff_tracer __read_mostly =
691{
692	.name		= "preemptoff",
693	.init		= preemptoff_tracer_init,
694	.reset		= preemptoff_tracer_reset,
695	.start		= irqsoff_tracer_start,
696	.stop		= irqsoff_tracer_stop,
697	.print_max	= true,
698	.print_header   = irqsoff_print_header,
699	.print_line     = irqsoff_print_line,
700	.flag_changed	= irqsoff_flag_changed,
701#ifdef CONFIG_FTRACE_SELFTEST
702	.selftest    = trace_selftest_startup_preemptoff,
703#endif
704	.open		= irqsoff_trace_open,
705	.close		= irqsoff_trace_close,
706	.allow_instances = true,
707	.use_max_tr	= true,
708};
709#endif /* CONFIG_PREEMPT_TRACER */
710
711#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
712
713static int preemptirqsoff_tracer_init(struct trace_array *tr)
714{
715	trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
716
717	return __irqsoff_tracer_init(tr);
718}
719
720static void preemptirqsoff_tracer_reset(struct trace_array *tr)
721{
722	__irqsoff_tracer_reset(tr);
723}
724
725static struct tracer preemptirqsoff_tracer __read_mostly =
726{
727	.name		= "preemptirqsoff",
728	.init		= preemptirqsoff_tracer_init,
729	.reset		= preemptirqsoff_tracer_reset,
730	.start		= irqsoff_tracer_start,
731	.stop		= irqsoff_tracer_stop,
732	.print_max	= true,
733	.print_header   = irqsoff_print_header,
734	.print_line     = irqsoff_print_line,
735	.flag_changed	= irqsoff_flag_changed,
736#ifdef CONFIG_FTRACE_SELFTEST
737	.selftest    = trace_selftest_startup_preemptirqsoff,
738#endif
739	.open		= irqsoff_trace_open,
740	.close		= irqsoff_trace_close,
741	.allow_instances = true,
742	.use_max_tr	= true,
743};
744#endif
745
746__init static int init_irqsoff_tracer(void)
747{
748#ifdef CONFIG_IRQSOFF_TRACER
749	register_tracer(&irqsoff_tracer);
750#endif
751#ifdef CONFIG_PREEMPT_TRACER
752	register_tracer(&preemptoff_tracer);
753#endif
754#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
755	register_tracer(&preemptirqsoff_tracer);
756#endif
757
758	return 0;
759}
760core_initcall(init_irqsoff_tracer);
761#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */