Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace irqs off critical timings
4 *
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * From code in the latency_tracer, that is:
9 *
10 * Copyright (C) 2004-2006 Ingo Molnar
11 * Copyright (C) 2004 Nadia Yvette Chambers
12 */
13#include <linux/kallsyms.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/kprobes.h>
18
19#include "trace.h"
20
21#include <trace/events/preemptirq.h>
22
23#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
24static struct trace_array *irqsoff_trace __read_mostly;
25static int tracer_enabled __read_mostly;
26
27static DEFINE_PER_CPU(int, tracing_cpu);
28
29static DEFINE_RAW_SPINLOCK(max_trace_lock);
30
31enum {
32 TRACER_IRQS_OFF = (1 << 1),
33 TRACER_PREEMPT_OFF = (1 << 2),
34};
35
36static int trace_type __read_mostly;
37
38static int save_flags;
39
40static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
41static int start_irqsoff_tracer(struct trace_array *tr, int graph);
42
43#ifdef CONFIG_PREEMPT_TRACER
44static inline int
45preempt_trace(int pc)
46{
47 return ((trace_type & TRACER_PREEMPT_OFF) && pc);
48}
49#else
50# define preempt_trace(pc) (0)
51#endif
52
53#ifdef CONFIG_IRQSOFF_TRACER
54static inline int
55irq_trace(void)
56{
57 return ((trace_type & TRACER_IRQS_OFF) &&
58 irqs_disabled());
59}
60#else
61# define irq_trace() (0)
62#endif
63
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65static int irqsoff_display_graph(struct trace_array *tr, int set);
66# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
67#else
68static inline int irqsoff_display_graph(struct trace_array *tr, int set)
69{
70 return -EINVAL;
71}
72# define is_graph(tr) false
73#endif
74
75/*
76 * Sequence count - we record it when starting a measurement and
77 * skip the latency if the sequence has changed - some other section
78 * did a maximum and could disturb our measurement with serial console
79 * printouts, etc. Truly coinciding maximum latencies should be rare
80 * and what happens together happens separately as well, so this doesn't
81 * decrease the validity of the maximum found:
82 */
83static __cacheline_aligned_in_smp unsigned long max_sequence;
84
85#ifdef CONFIG_FUNCTION_TRACER
86/*
87 * Prologue for the preempt and irqs off function tracers.
88 *
89 * Returns 1 if it is OK to continue, and data->disabled is
90 * incremented.
91 * 0 if the trace is to be ignored, and data->disabled
92 * is kept the same.
93 *
94 * Note, this function is also used outside this ifdef but
95 * inside the #ifdef of the function graph tracer below.
96 * This is OK, since the function graph tracer is
97 * dependent on the function tracer.
98 */
99static int func_prolog_dec(struct trace_array *tr,
100 struct trace_array_cpu **data,
101 unsigned long *flags)
102{
103 long disabled;
104 int cpu;
105
106 /*
107 * Does not matter if we preempt. We test the flags
108 * afterward, to see if irqs are disabled or not.
109 * If we preempt and get a false positive, the flags
110 * test will fail.
111 */
112 cpu = raw_smp_processor_id();
113 if (likely(!per_cpu(tracing_cpu, cpu)))
114 return 0;
115
116 local_save_flags(*flags);
117 /*
118 * Slight chance to get a false positive on tracing_cpu,
119 * although I'm starting to think there isn't a chance.
120 * Leave this for now just to be paranoid.
121 */
122 if (!irqs_disabled_flags(*flags) && !preempt_count())
123 return 0;
124
125 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
126 disabled = atomic_inc_return(&(*data)->disabled);
127
128 if (likely(disabled == 1))
129 return 1;
130
131 atomic_dec(&(*data)->disabled);
132
133 return 0;
134}
135
136/*
137 * irqsoff uses its own tracer function to keep the overhead down:
138 */
139static void
140irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
141 struct ftrace_ops *op, struct pt_regs *pt_regs)
142{
143 struct trace_array *tr = irqsoff_trace;
144 struct trace_array_cpu *data;
145 unsigned long flags;
146
147 if (!func_prolog_dec(tr, &data, &flags))
148 return;
149
150 trace_function(tr, ip, parent_ip, flags, preempt_count());
151
152 atomic_dec(&data->disabled);
153}
154#endif /* CONFIG_FUNCTION_TRACER */
155
156#ifdef CONFIG_FUNCTION_GRAPH_TRACER
157static int irqsoff_display_graph(struct trace_array *tr, int set)
158{
159 int cpu;
160
161 if (!(is_graph(tr) ^ set))
162 return 0;
163
164 stop_irqsoff_tracer(irqsoff_trace, !set);
165
166 for_each_possible_cpu(cpu)
167 per_cpu(tracing_cpu, cpu) = 0;
168
169 tr->max_latency = 0;
170 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
171
172 return start_irqsoff_tracer(irqsoff_trace, set);
173}
174
175static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
176{
177 struct trace_array *tr = irqsoff_trace;
178 struct trace_array_cpu *data;
179 unsigned long flags;
180 int ret;
181 int pc;
182
183 if (ftrace_graph_ignore_func(trace))
184 return 0;
185 /*
186 * Do not trace a function if it's filtered by set_graph_notrace.
187 * Make the index of ret stack negative to indicate that it should
188 * ignore further functions. But it needs its own ret stack entry
189 * to recover the original index in order to continue tracing after
190 * returning from the function.
191 */
192 if (ftrace_graph_notrace_addr(trace->func))
193 return 1;
194
195 if (!func_prolog_dec(tr, &data, &flags))
196 return 0;
197
198 pc = preempt_count();
199 ret = __trace_graph_entry(tr, trace, flags, pc);
200 atomic_dec(&data->disabled);
201
202 return ret;
203}
204
205static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
206{
207 struct trace_array *tr = irqsoff_trace;
208 struct trace_array_cpu *data;
209 unsigned long flags;
210 int pc;
211
212 ftrace_graph_addr_finish(trace);
213
214 if (!func_prolog_dec(tr, &data, &flags))
215 return;
216
217 pc = preempt_count();
218 __trace_graph_return(tr, trace, flags, pc);
219 atomic_dec(&data->disabled);
220}
221
222static struct fgraph_ops fgraph_ops = {
223 .entryfunc = &irqsoff_graph_entry,
224 .retfunc = &irqsoff_graph_return,
225};
226
227static void irqsoff_trace_open(struct trace_iterator *iter)
228{
229 if (is_graph(iter->tr))
230 graph_trace_open(iter);
231
232}
233
234static void irqsoff_trace_close(struct trace_iterator *iter)
235{
236 if (iter->private)
237 graph_trace_close(iter);
238}
239
240#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
241 TRACE_GRAPH_PRINT_PROC | \
242 TRACE_GRAPH_PRINT_REL_TIME | \
243 TRACE_GRAPH_PRINT_DURATION)
244
245static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
246{
247 /*
248 * In graph mode call the graph tracer output function,
249 * otherwise go with the TRACE_FN event handler
250 */
251 if (is_graph(iter->tr))
252 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
253
254 return TRACE_TYPE_UNHANDLED;
255}
256
257static void irqsoff_print_header(struct seq_file *s)
258{
259 struct trace_array *tr = irqsoff_trace;
260
261 if (is_graph(tr))
262 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
263 else
264 trace_default_header(s);
265}
266
267static void
268__trace_function(struct trace_array *tr,
269 unsigned long ip, unsigned long parent_ip,
270 unsigned long flags, int pc)
271{
272 if (is_graph(tr))
273 trace_graph_function(tr, ip, parent_ip, flags, pc);
274 else
275 trace_function(tr, ip, parent_ip, flags, pc);
276}
277
278#else
279#define __trace_function trace_function
280
281static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
282{
283 return TRACE_TYPE_UNHANDLED;
284}
285
286static void irqsoff_trace_open(struct trace_iterator *iter) { }
287static void irqsoff_trace_close(struct trace_iterator *iter) { }
288
289#ifdef CONFIG_FUNCTION_TRACER
290static void irqsoff_print_header(struct seq_file *s)
291{
292 trace_default_header(s);
293}
294#else
295static void irqsoff_print_header(struct seq_file *s)
296{
297 trace_latency_header(s);
298}
299#endif /* CONFIG_FUNCTION_TRACER */
300#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
301
302/*
303 * Should this new latency be reported/recorded?
304 */
305static bool report_latency(struct trace_array *tr, u64 delta)
306{
307 if (tracing_thresh) {
308 if (delta < tracing_thresh)
309 return false;
310 } else {
311 if (delta <= tr->max_latency)
312 return false;
313 }
314 return true;
315}
316
317static void
318check_critical_timing(struct trace_array *tr,
319 struct trace_array_cpu *data,
320 unsigned long parent_ip,
321 int cpu)
322{
323 u64 T0, T1, delta;
324 unsigned long flags;
325 int pc;
326
327 T0 = data->preempt_timestamp;
328 T1 = ftrace_now(cpu);
329 delta = T1-T0;
330
331 local_save_flags(flags);
332
333 pc = preempt_count();
334
335 if (!report_latency(tr, delta))
336 goto out;
337
338 raw_spin_lock_irqsave(&max_trace_lock, flags);
339
340 /* check if we are still the max latency */
341 if (!report_latency(tr, delta))
342 goto out_unlock;
343
344 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
345 /* Skip 5 functions to get to the irq/preempt enable function */
346 __trace_stack(tr, flags, 5, pc);
347
348 if (data->critical_sequence != max_sequence)
349 goto out_unlock;
350
351 data->critical_end = parent_ip;
352
353 if (likely(!is_tracing_stopped())) {
354 tr->max_latency = delta;
355 update_max_tr_single(tr, current, cpu);
356 }
357
358 max_sequence++;
359
360out_unlock:
361 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
362
363out:
364 data->critical_sequence = max_sequence;
365 data->preempt_timestamp = ftrace_now(cpu);
366 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
367}
368
369static nokprobe_inline void
370start_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
371{
372 int cpu;
373 struct trace_array *tr = irqsoff_trace;
374 struct trace_array_cpu *data;
375 unsigned long flags;
376
377 if (!tracer_enabled || !tracing_is_enabled())
378 return;
379
380 cpu = raw_smp_processor_id();
381
382 if (per_cpu(tracing_cpu, cpu))
383 return;
384
385 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
386
387 if (unlikely(!data) || atomic_read(&data->disabled))
388 return;
389
390 atomic_inc(&data->disabled);
391
392 data->critical_sequence = max_sequence;
393 data->preempt_timestamp = ftrace_now(cpu);
394 data->critical_start = parent_ip ? : ip;
395
396 local_save_flags(flags);
397
398 __trace_function(tr, ip, parent_ip, flags, pc);
399
400 per_cpu(tracing_cpu, cpu) = 1;
401
402 atomic_dec(&data->disabled);
403}
404
405static nokprobe_inline void
406stop_critical_timing(unsigned long ip, unsigned long parent_ip, int pc)
407{
408 int cpu;
409 struct trace_array *tr = irqsoff_trace;
410 struct trace_array_cpu *data;
411 unsigned long flags;
412
413 cpu = raw_smp_processor_id();
414 /* Always clear the tracing cpu on stopping the trace */
415 if (unlikely(per_cpu(tracing_cpu, cpu)))
416 per_cpu(tracing_cpu, cpu) = 0;
417 else
418 return;
419
420 if (!tracer_enabled || !tracing_is_enabled())
421 return;
422
423 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
424
425 if (unlikely(!data) ||
426 !data->critical_start || atomic_read(&data->disabled))
427 return;
428
429 atomic_inc(&data->disabled);
430
431 local_save_flags(flags);
432 __trace_function(tr, ip, parent_ip, flags, pc);
433 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
434 data->critical_start = 0;
435 atomic_dec(&data->disabled);
436}
437
438/* start and stop critical timings used to for stoppage (in idle) */
439void start_critical_timings(void)
440{
441 int pc = preempt_count();
442
443 if (preempt_trace(pc) || irq_trace())
444 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
445}
446EXPORT_SYMBOL_GPL(start_critical_timings);
447NOKPROBE_SYMBOL(start_critical_timings);
448
449void stop_critical_timings(void)
450{
451 int pc = preempt_count();
452
453 if (preempt_trace(pc) || irq_trace())
454 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
455}
456EXPORT_SYMBOL_GPL(stop_critical_timings);
457NOKPROBE_SYMBOL(stop_critical_timings);
458
459#ifdef CONFIG_FUNCTION_TRACER
460static bool function_enabled;
461
462static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
463{
464 int ret;
465
466 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
467 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
468 return 0;
469
470 if (graph)
471 ret = register_ftrace_graph(&fgraph_ops);
472 else
473 ret = register_ftrace_function(tr->ops);
474
475 if (!ret)
476 function_enabled = true;
477
478 return ret;
479}
480
481static void unregister_irqsoff_function(struct trace_array *tr, int graph)
482{
483 if (!function_enabled)
484 return;
485
486 if (graph)
487 unregister_ftrace_graph(&fgraph_ops);
488 else
489 unregister_ftrace_function(tr->ops);
490
491 function_enabled = false;
492}
493
494static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
495{
496 if (!(mask & TRACE_ITER_FUNCTION))
497 return 0;
498
499 if (set)
500 register_irqsoff_function(tr, is_graph(tr), 1);
501 else
502 unregister_irqsoff_function(tr, is_graph(tr));
503 return 1;
504}
505#else
506static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
507{
508 return 0;
509}
510static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
511static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
512{
513 return 0;
514}
515#endif /* CONFIG_FUNCTION_TRACER */
516
517static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
518{
519 struct tracer *tracer = tr->current_trace;
520
521 if (irqsoff_function_set(tr, mask, set))
522 return 0;
523
524#ifdef CONFIG_FUNCTION_GRAPH_TRACER
525 if (mask & TRACE_ITER_DISPLAY_GRAPH)
526 return irqsoff_display_graph(tr, set);
527#endif
528
529 return trace_keep_overwrite(tracer, mask, set);
530}
531
532static int start_irqsoff_tracer(struct trace_array *tr, int graph)
533{
534 int ret;
535
536 ret = register_irqsoff_function(tr, graph, 0);
537
538 if (!ret && tracing_is_enabled())
539 tracer_enabled = 1;
540 else
541 tracer_enabled = 0;
542
543 return ret;
544}
545
546static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
547{
548 tracer_enabled = 0;
549
550 unregister_irqsoff_function(tr, graph);
551}
552
553static bool irqsoff_busy;
554
555static int __irqsoff_tracer_init(struct trace_array *tr)
556{
557 if (irqsoff_busy)
558 return -EBUSY;
559
560 save_flags = tr->trace_flags;
561
562 /* non overwrite screws up the latency tracers */
563 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
564 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
565
566 tr->max_latency = 0;
567 irqsoff_trace = tr;
568 /* make sure that the tracer is visible */
569 smp_wmb();
570
571 ftrace_init_array_ops(tr, irqsoff_tracer_call);
572
573 /* Only toplevel instance supports graph tracing */
574 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
575 is_graph(tr))))
576 printk(KERN_ERR "failed to start irqsoff tracer\n");
577
578 irqsoff_busy = true;
579 return 0;
580}
581
582static void __irqsoff_tracer_reset(struct trace_array *tr)
583{
584 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
585 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
586
587 stop_irqsoff_tracer(tr, is_graph(tr));
588
589 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
590 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
591 ftrace_reset_array_ops(tr);
592
593 irqsoff_busy = false;
594}
595
596static void irqsoff_tracer_start(struct trace_array *tr)
597{
598 tracer_enabled = 1;
599}
600
601static void irqsoff_tracer_stop(struct trace_array *tr)
602{
603 tracer_enabled = 0;
604}
605
606#ifdef CONFIG_IRQSOFF_TRACER
607/*
608 * We are only interested in hardirq on/off events:
609 */
610void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
611{
612 unsigned int pc = preempt_count();
613
614 if (!preempt_trace(pc) && irq_trace())
615 stop_critical_timing(a0, a1, pc);
616}
617NOKPROBE_SYMBOL(tracer_hardirqs_on);
618
619void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
620{
621 unsigned int pc = preempt_count();
622
623 if (!preempt_trace(pc) && irq_trace())
624 start_critical_timing(a0, a1, pc);
625}
626NOKPROBE_SYMBOL(tracer_hardirqs_off);
627
628static int irqsoff_tracer_init(struct trace_array *tr)
629{
630 trace_type = TRACER_IRQS_OFF;
631
632 return __irqsoff_tracer_init(tr);
633}
634
635static void irqsoff_tracer_reset(struct trace_array *tr)
636{
637 __irqsoff_tracer_reset(tr);
638}
639
640static struct tracer irqsoff_tracer __read_mostly =
641{
642 .name = "irqsoff",
643 .init = irqsoff_tracer_init,
644 .reset = irqsoff_tracer_reset,
645 .start = irqsoff_tracer_start,
646 .stop = irqsoff_tracer_stop,
647 .print_max = true,
648 .print_header = irqsoff_print_header,
649 .print_line = irqsoff_print_line,
650 .flag_changed = irqsoff_flag_changed,
651#ifdef CONFIG_FTRACE_SELFTEST
652 .selftest = trace_selftest_startup_irqsoff,
653#endif
654 .open = irqsoff_trace_open,
655 .close = irqsoff_trace_close,
656 .allow_instances = true,
657 .use_max_tr = true,
658};
659#endif /* CONFIG_IRQSOFF_TRACER */
660
661#ifdef CONFIG_PREEMPT_TRACER
662void tracer_preempt_on(unsigned long a0, unsigned long a1)
663{
664 int pc = preempt_count();
665
666 if (preempt_trace(pc) && !irq_trace())
667 stop_critical_timing(a0, a1, pc);
668}
669
670void tracer_preempt_off(unsigned long a0, unsigned long a1)
671{
672 int pc = preempt_count();
673
674 if (preempt_trace(pc) && !irq_trace())
675 start_critical_timing(a0, a1, pc);
676}
677
678static int preemptoff_tracer_init(struct trace_array *tr)
679{
680 trace_type = TRACER_PREEMPT_OFF;
681
682 return __irqsoff_tracer_init(tr);
683}
684
685static void preemptoff_tracer_reset(struct trace_array *tr)
686{
687 __irqsoff_tracer_reset(tr);
688}
689
690static struct tracer preemptoff_tracer __read_mostly =
691{
692 .name = "preemptoff",
693 .init = preemptoff_tracer_init,
694 .reset = preemptoff_tracer_reset,
695 .start = irqsoff_tracer_start,
696 .stop = irqsoff_tracer_stop,
697 .print_max = true,
698 .print_header = irqsoff_print_header,
699 .print_line = irqsoff_print_line,
700 .flag_changed = irqsoff_flag_changed,
701#ifdef CONFIG_FTRACE_SELFTEST
702 .selftest = trace_selftest_startup_preemptoff,
703#endif
704 .open = irqsoff_trace_open,
705 .close = irqsoff_trace_close,
706 .allow_instances = true,
707 .use_max_tr = true,
708};
709#endif /* CONFIG_PREEMPT_TRACER */
710
711#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
712
713static int preemptirqsoff_tracer_init(struct trace_array *tr)
714{
715 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
716
717 return __irqsoff_tracer_init(tr);
718}
719
720static void preemptirqsoff_tracer_reset(struct trace_array *tr)
721{
722 __irqsoff_tracer_reset(tr);
723}
724
725static struct tracer preemptirqsoff_tracer __read_mostly =
726{
727 .name = "preemptirqsoff",
728 .init = preemptirqsoff_tracer_init,
729 .reset = preemptirqsoff_tracer_reset,
730 .start = irqsoff_tracer_start,
731 .stop = irqsoff_tracer_stop,
732 .print_max = true,
733 .print_header = irqsoff_print_header,
734 .print_line = irqsoff_print_line,
735 .flag_changed = irqsoff_flag_changed,
736#ifdef CONFIG_FTRACE_SELFTEST
737 .selftest = trace_selftest_startup_preemptirqsoff,
738#endif
739 .open = irqsoff_trace_open,
740 .close = irqsoff_trace_close,
741 .allow_instances = true,
742 .use_max_tr = true,
743};
744#endif
745
746__init static int init_irqsoff_tracer(void)
747{
748#ifdef CONFIG_IRQSOFF_TRACER
749 register_tracer(&irqsoff_tracer);
750#endif
751#ifdef CONFIG_PREEMPT_TRACER
752 register_tracer(&preemptoff_tracer);
753#endif
754#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
755 register_tracer(&preemptirqsoff_tracer);
756#endif
757
758 return 0;
759}
760core_initcall(init_irqsoff_tracer);
761#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
1/*
2 * trace irqs off critical timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
24static DEFINE_PER_CPU(int, tracing_cpu);
25
26static DEFINE_SPINLOCK(max_trace_lock);
27
28enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
35static int save_lat_flag;
36
37static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
40#ifdef CONFIG_PREEMPT_TRACER
41static inline int
42preempt_trace(void)
43{
44 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45}
46#else
47# define preempt_trace() (0)
48#endif
49
50#ifdef CONFIG_IRQSOFF_TRACER
51static inline int
52irq_trace(void)
53{
54 return ((trace_type & TRACER_IRQS_OFF) &&
55 irqs_disabled());
56}
57#else
58# define irq_trace() (0)
59#endif
60
61#define TRACE_DISPLAY_GRAPH 1
62
63static struct tracer_opt trace_opts[] = {
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 /* display latency trace as call graph */
66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67#endif
68 { } /* Empty entry */
69};
70
71static struct tracer_flags tracer_flags = {
72 .val = 0,
73 .opts = trace_opts,
74};
75
76#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
78/*
79 * Sequence count - we record it when starting a measurement and
80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare
83 * and what happens together happens separately as well, so this doesn't
84 * decrease the validity of the maximum found:
85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence;
87
88#ifdef CONFIG_FUNCTION_TRACER
89/*
90 * Prologue for the preempt and irqs off function tracers.
91 *
92 * Returns 1 if it is OK to continue, and data->disabled is
93 * incremented.
94 * 0 if the trace is to be ignored, and data->disabled
95 * is kept the same.
96 *
97 * Note, this function is also used outside this ifdef but
98 * inside the #ifdef of the function graph tracer below.
99 * This is OK, since the function graph tracer is
100 * dependent on the function tracer.
101 */
102static int func_prolog_dec(struct trace_array *tr,
103 struct trace_array_cpu **data,
104 unsigned long *flags)
105{
106 long disabled;
107 int cpu;
108
109 /*
110 * Does not matter if we preempt. We test the flags
111 * afterward, to see if irqs are disabled or not.
112 * If we preempt and get a false positive, the flags
113 * test will fail.
114 */
115 cpu = raw_smp_processor_id();
116 if (likely(!per_cpu(tracing_cpu, cpu)))
117 return 0;
118
119 local_save_flags(*flags);
120 /* slight chance to get a false positive on tracing_cpu */
121 if (!irqs_disabled_flags(*flags))
122 return 0;
123
124 *data = tr->data[cpu];
125 disabled = atomic_inc_return(&(*data)->disabled);
126
127 if (likely(disabled == 1))
128 return 1;
129
130 atomic_dec(&(*data)->disabled);
131
132 return 0;
133}
134
135/*
136 * irqsoff uses its own tracer function to keep the overhead down:
137 */
138static void
139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
140{
141 struct trace_array *tr = irqsoff_trace;
142 struct trace_array_cpu *data;
143 unsigned long flags;
144
145 if (!func_prolog_dec(tr, &data, &flags))
146 return;
147
148 trace_function(tr, ip, parent_ip, flags, preempt_count());
149
150 atomic_dec(&data->disabled);
151}
152
153static struct ftrace_ops trace_ops __read_mostly =
154{
155 .func = irqsoff_tracer_call,
156 .flags = FTRACE_OPS_FL_GLOBAL,
157};
158#endif /* CONFIG_FUNCTION_TRACER */
159
160#ifdef CONFIG_FUNCTION_GRAPH_TRACER
161static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
162{
163 int cpu;
164
165 if (!(bit & TRACE_DISPLAY_GRAPH))
166 return -EINVAL;
167
168 if (!(is_graph() ^ set))
169 return 0;
170
171 stop_irqsoff_tracer(irqsoff_trace, !set);
172
173 for_each_possible_cpu(cpu)
174 per_cpu(tracing_cpu, cpu) = 0;
175
176 tracing_max_latency = 0;
177 tracing_reset_online_cpus(irqsoff_trace);
178
179 return start_irqsoff_tracer(irqsoff_trace, set);
180}
181
182static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
183{
184 struct trace_array *tr = irqsoff_trace;
185 struct trace_array_cpu *data;
186 unsigned long flags;
187 int ret;
188 int pc;
189
190 if (!func_prolog_dec(tr, &data, &flags))
191 return 0;
192
193 pc = preempt_count();
194 ret = __trace_graph_entry(tr, trace, flags, pc);
195 atomic_dec(&data->disabled);
196
197 return ret;
198}
199
200static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
201{
202 struct trace_array *tr = irqsoff_trace;
203 struct trace_array_cpu *data;
204 unsigned long flags;
205 int pc;
206
207 if (!func_prolog_dec(tr, &data, &flags))
208 return;
209
210 pc = preempt_count();
211 __trace_graph_return(tr, trace, flags, pc);
212 atomic_dec(&data->disabled);
213}
214
215static void irqsoff_trace_open(struct trace_iterator *iter)
216{
217 if (is_graph())
218 graph_trace_open(iter);
219
220}
221
222static void irqsoff_trace_close(struct trace_iterator *iter)
223{
224 if (iter->private)
225 graph_trace_close(iter);
226}
227
228#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
229 TRACE_GRAPH_PRINT_PROC | \
230 TRACE_GRAPH_PRINT_ABS_TIME | \
231 TRACE_GRAPH_PRINT_DURATION)
232
233static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
234{
235 /*
236 * In graph mode call the graph tracer output function,
237 * otherwise go with the TRACE_FN event handler
238 */
239 if (is_graph())
240 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
241
242 return TRACE_TYPE_UNHANDLED;
243}
244
245static void irqsoff_print_header(struct seq_file *s)
246{
247 if (is_graph())
248 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
249 else
250 trace_default_header(s);
251}
252
253static void
254__trace_function(struct trace_array *tr,
255 unsigned long ip, unsigned long parent_ip,
256 unsigned long flags, int pc)
257{
258 if (is_graph())
259 trace_graph_function(tr, ip, parent_ip, flags, pc);
260 else
261 trace_function(tr, ip, parent_ip, flags, pc);
262}
263
264#else
265#define __trace_function trace_function
266
267static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
268{
269 return -EINVAL;
270}
271
272static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
273{
274 return -1;
275}
276
277static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
278{
279 return TRACE_TYPE_UNHANDLED;
280}
281
282static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
283static void irqsoff_print_header(struct seq_file *s) { }
284static void irqsoff_trace_open(struct trace_iterator *iter) { }
285static void irqsoff_trace_close(struct trace_iterator *iter) { }
286#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
287
288/*
289 * Should this new latency be reported/recorded?
290 */
291static int report_latency(cycle_t delta)
292{
293 if (tracing_thresh) {
294 if (delta < tracing_thresh)
295 return 0;
296 } else {
297 if (delta <= tracing_max_latency)
298 return 0;
299 }
300 return 1;
301}
302
303static void
304check_critical_timing(struct trace_array *tr,
305 struct trace_array_cpu *data,
306 unsigned long parent_ip,
307 int cpu)
308{
309 cycle_t T0, T1, delta;
310 unsigned long flags;
311 int pc;
312
313 T0 = data->preempt_timestamp;
314 T1 = ftrace_now(cpu);
315 delta = T1-T0;
316
317 local_save_flags(flags);
318
319 pc = preempt_count();
320
321 if (!report_latency(delta))
322 goto out;
323
324 spin_lock_irqsave(&max_trace_lock, flags);
325
326 /* check if we are still the max latency */
327 if (!report_latency(delta))
328 goto out_unlock;
329
330 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
331 /* Skip 5 functions to get to the irq/preempt enable function */
332 __trace_stack(tr, flags, 5, pc);
333
334 if (data->critical_sequence != max_sequence)
335 goto out_unlock;
336
337 data->critical_end = parent_ip;
338
339 if (likely(!is_tracing_stopped())) {
340 tracing_max_latency = delta;
341 update_max_tr_single(tr, current, cpu);
342 }
343
344 max_sequence++;
345
346out_unlock:
347 spin_unlock_irqrestore(&max_trace_lock, flags);
348
349out:
350 data->critical_sequence = max_sequence;
351 data->preempt_timestamp = ftrace_now(cpu);
352 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
353}
354
355static inline void
356start_critical_timing(unsigned long ip, unsigned long parent_ip)
357{
358 int cpu;
359 struct trace_array *tr = irqsoff_trace;
360 struct trace_array_cpu *data;
361 unsigned long flags;
362
363 if (likely(!tracer_enabled))
364 return;
365
366 cpu = raw_smp_processor_id();
367
368 if (per_cpu(tracing_cpu, cpu))
369 return;
370
371 data = tr->data[cpu];
372
373 if (unlikely(!data) || atomic_read(&data->disabled))
374 return;
375
376 atomic_inc(&data->disabled);
377
378 data->critical_sequence = max_sequence;
379 data->preempt_timestamp = ftrace_now(cpu);
380 data->critical_start = parent_ip ? : ip;
381
382 local_save_flags(flags);
383
384 __trace_function(tr, ip, parent_ip, flags, preempt_count());
385
386 per_cpu(tracing_cpu, cpu) = 1;
387
388 atomic_dec(&data->disabled);
389}
390
391static inline void
392stop_critical_timing(unsigned long ip, unsigned long parent_ip)
393{
394 int cpu;
395 struct trace_array *tr = irqsoff_trace;
396 struct trace_array_cpu *data;
397 unsigned long flags;
398
399 cpu = raw_smp_processor_id();
400 /* Always clear the tracing cpu on stopping the trace */
401 if (unlikely(per_cpu(tracing_cpu, cpu)))
402 per_cpu(tracing_cpu, cpu) = 0;
403 else
404 return;
405
406 if (!tracer_enabled)
407 return;
408
409 data = tr->data[cpu];
410
411 if (unlikely(!data) ||
412 !data->critical_start || atomic_read(&data->disabled))
413 return;
414
415 atomic_inc(&data->disabled);
416
417 local_save_flags(flags);
418 __trace_function(tr, ip, parent_ip, flags, preempt_count());
419 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
420 data->critical_start = 0;
421 atomic_dec(&data->disabled);
422}
423
424/* start and stop critical timings used to for stoppage (in idle) */
425void start_critical_timings(void)
426{
427 if (preempt_trace() || irq_trace())
428 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
429}
430EXPORT_SYMBOL_GPL(start_critical_timings);
431
432void stop_critical_timings(void)
433{
434 if (preempt_trace() || irq_trace())
435 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
436}
437EXPORT_SYMBOL_GPL(stop_critical_timings);
438
439#ifdef CONFIG_IRQSOFF_TRACER
440#ifdef CONFIG_PROVE_LOCKING
441void time_hardirqs_on(unsigned long a0, unsigned long a1)
442{
443 if (!preempt_trace() && irq_trace())
444 stop_critical_timing(a0, a1);
445}
446
447void time_hardirqs_off(unsigned long a0, unsigned long a1)
448{
449 if (!preempt_trace() && irq_trace())
450 start_critical_timing(a0, a1);
451}
452
453#else /* !CONFIG_PROVE_LOCKING */
454
455/*
456 * Stubs:
457 */
458
459void trace_softirqs_on(unsigned long ip)
460{
461}
462
463void trace_softirqs_off(unsigned long ip)
464{
465}
466
467inline void print_irqtrace_events(struct task_struct *curr)
468{
469}
470
471/*
472 * We are only interested in hardirq on/off events:
473 */
474void trace_hardirqs_on(void)
475{
476 if (!preempt_trace() && irq_trace())
477 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
478}
479EXPORT_SYMBOL(trace_hardirqs_on);
480
481void trace_hardirqs_off(void)
482{
483 if (!preempt_trace() && irq_trace())
484 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
485}
486EXPORT_SYMBOL(trace_hardirqs_off);
487
488void trace_hardirqs_on_caller(unsigned long caller_addr)
489{
490 if (!preempt_trace() && irq_trace())
491 stop_critical_timing(CALLER_ADDR0, caller_addr);
492}
493EXPORT_SYMBOL(trace_hardirqs_on_caller);
494
495void trace_hardirqs_off_caller(unsigned long caller_addr)
496{
497 if (!preempt_trace() && irq_trace())
498 start_critical_timing(CALLER_ADDR0, caller_addr);
499}
500EXPORT_SYMBOL(trace_hardirqs_off_caller);
501
502#endif /* CONFIG_PROVE_LOCKING */
503#endif /* CONFIG_IRQSOFF_TRACER */
504
505#ifdef CONFIG_PREEMPT_TRACER
506void trace_preempt_on(unsigned long a0, unsigned long a1)
507{
508 if (preempt_trace())
509 stop_critical_timing(a0, a1);
510}
511
512void trace_preempt_off(unsigned long a0, unsigned long a1)
513{
514 if (preempt_trace())
515 start_critical_timing(a0, a1);
516}
517#endif /* CONFIG_PREEMPT_TRACER */
518
519static int start_irqsoff_tracer(struct trace_array *tr, int graph)
520{
521 int ret = 0;
522
523 if (!graph)
524 ret = register_ftrace_function(&trace_ops);
525 else
526 ret = register_ftrace_graph(&irqsoff_graph_return,
527 &irqsoff_graph_entry);
528
529 if (!ret && tracing_is_enabled())
530 tracer_enabled = 1;
531 else
532 tracer_enabled = 0;
533
534 return ret;
535}
536
537static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
538{
539 tracer_enabled = 0;
540
541 if (!graph)
542 unregister_ftrace_function(&trace_ops);
543 else
544 unregister_ftrace_graph();
545}
546
547static void __irqsoff_tracer_init(struct trace_array *tr)
548{
549 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
550 trace_flags |= TRACE_ITER_LATENCY_FMT;
551
552 tracing_max_latency = 0;
553 irqsoff_trace = tr;
554 /* make sure that the tracer is visible */
555 smp_wmb();
556 tracing_reset_online_cpus(tr);
557
558 if (start_irqsoff_tracer(tr, is_graph()))
559 printk(KERN_ERR "failed to start irqsoff tracer\n");
560}
561
562static void irqsoff_tracer_reset(struct trace_array *tr)
563{
564 stop_irqsoff_tracer(tr, is_graph());
565
566 if (!save_lat_flag)
567 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
568}
569
570static void irqsoff_tracer_start(struct trace_array *tr)
571{
572 tracer_enabled = 1;
573}
574
575static void irqsoff_tracer_stop(struct trace_array *tr)
576{
577 tracer_enabled = 0;
578}
579
580#ifdef CONFIG_IRQSOFF_TRACER
581static int irqsoff_tracer_init(struct trace_array *tr)
582{
583 trace_type = TRACER_IRQS_OFF;
584
585 __irqsoff_tracer_init(tr);
586 return 0;
587}
588static struct tracer irqsoff_tracer __read_mostly =
589{
590 .name = "irqsoff",
591 .init = irqsoff_tracer_init,
592 .reset = irqsoff_tracer_reset,
593 .start = irqsoff_tracer_start,
594 .stop = irqsoff_tracer_stop,
595 .print_max = 1,
596 .print_header = irqsoff_print_header,
597 .print_line = irqsoff_print_line,
598 .flags = &tracer_flags,
599 .set_flag = irqsoff_set_flag,
600#ifdef CONFIG_FTRACE_SELFTEST
601 .selftest = trace_selftest_startup_irqsoff,
602#endif
603 .open = irqsoff_trace_open,
604 .close = irqsoff_trace_close,
605 .use_max_tr = 1,
606};
607# define register_irqsoff(trace) register_tracer(&trace)
608#else
609# define register_irqsoff(trace) do { } while (0)
610#endif
611
612#ifdef CONFIG_PREEMPT_TRACER
613static int preemptoff_tracer_init(struct trace_array *tr)
614{
615 trace_type = TRACER_PREEMPT_OFF;
616
617 __irqsoff_tracer_init(tr);
618 return 0;
619}
620
621static struct tracer preemptoff_tracer __read_mostly =
622{
623 .name = "preemptoff",
624 .init = preemptoff_tracer_init,
625 .reset = irqsoff_tracer_reset,
626 .start = irqsoff_tracer_start,
627 .stop = irqsoff_tracer_stop,
628 .print_max = 1,
629 .print_header = irqsoff_print_header,
630 .print_line = irqsoff_print_line,
631 .flags = &tracer_flags,
632 .set_flag = irqsoff_set_flag,
633#ifdef CONFIG_FTRACE_SELFTEST
634 .selftest = trace_selftest_startup_preemptoff,
635#endif
636 .open = irqsoff_trace_open,
637 .close = irqsoff_trace_close,
638 .use_max_tr = 1,
639};
640# define register_preemptoff(trace) register_tracer(&trace)
641#else
642# define register_preemptoff(trace) do { } while (0)
643#endif
644
645#if defined(CONFIG_IRQSOFF_TRACER) && \
646 defined(CONFIG_PREEMPT_TRACER)
647
648static int preemptirqsoff_tracer_init(struct trace_array *tr)
649{
650 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
651
652 __irqsoff_tracer_init(tr);
653 return 0;
654}
655
656static struct tracer preemptirqsoff_tracer __read_mostly =
657{
658 .name = "preemptirqsoff",
659 .init = preemptirqsoff_tracer_init,
660 .reset = irqsoff_tracer_reset,
661 .start = irqsoff_tracer_start,
662 .stop = irqsoff_tracer_stop,
663 .print_max = 1,
664 .print_header = irqsoff_print_header,
665 .print_line = irqsoff_print_line,
666 .flags = &tracer_flags,
667 .set_flag = irqsoff_set_flag,
668#ifdef CONFIG_FTRACE_SELFTEST
669 .selftest = trace_selftest_startup_preemptirqsoff,
670#endif
671 .open = irqsoff_trace_open,
672 .close = irqsoff_trace_close,
673 .use_max_tr = 1,
674};
675
676# define register_preemptirqsoff(trace) register_tracer(&trace)
677#else
678# define register_preemptirqsoff(trace) do { } while (0)
679#endif
680
681__init static int init_irqsoff_tracer(void)
682{
683 register_irqsoff(irqsoff_tracer);
684 register_preemptoff(preemptoff_tracer);
685 register_preemptirqsoff(preemptirqsoff_tracer);
686
687 return 0;
688}
689device_initcall(init_irqsoff_tracer);