Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * preemptoff and irqoff tracepoints
4 *
5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6 */
7
8#include <linux/kallsyms.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/ftrace.h>
12#include <linux/kprobes.h>
13#include "trace.h"
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/preemptirq.h>
17
18#ifdef CONFIG_TRACE_IRQFLAGS
19/* Per-cpu variable to prevent redundant calls when IRQs already off */
20static DEFINE_PER_CPU(int, tracing_irq_cpu);
21
22/*
23 * Like trace_hardirqs_on() but without the lockdep invocation. This is
24 * used in the low level entry code where the ordering vs. RCU is important
25 * and lockdep uses a staged approach which splits the lockdep hardirq
26 * tracking into a RCU on and a RCU off section.
27 */
28void trace_hardirqs_on_prepare(void)
29{
30 if (this_cpu_read(tracing_irq_cpu)) {
31 if (!in_nmi())
32 trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
33 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
34 this_cpu_write(tracing_irq_cpu, 0);
35 }
36}
37EXPORT_SYMBOL(trace_hardirqs_on_prepare);
38NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
39
40void trace_hardirqs_on(void)
41{
42 if (this_cpu_read(tracing_irq_cpu)) {
43 if (!in_nmi())
44 trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
45 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
46 this_cpu_write(tracing_irq_cpu, 0);
47 }
48
49 lockdep_hardirqs_on_prepare();
50 lockdep_hardirqs_on(CALLER_ADDR0);
51}
52EXPORT_SYMBOL(trace_hardirqs_on);
53NOKPROBE_SYMBOL(trace_hardirqs_on);
54
55/*
56 * Like trace_hardirqs_off() but without the lockdep invocation. This is
57 * used in the low level entry code where the ordering vs. RCU is important
58 * and lockdep uses a staged approach which splits the lockdep hardirq
59 * tracking into a RCU on and a RCU off section.
60 */
61void trace_hardirqs_off_finish(void)
62{
63 if (!this_cpu_read(tracing_irq_cpu)) {
64 this_cpu_write(tracing_irq_cpu, 1);
65 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
66 if (!in_nmi())
67 trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
68 }
69
70}
71EXPORT_SYMBOL(trace_hardirqs_off_finish);
72NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
73
74void trace_hardirqs_off(void)
75{
76 lockdep_hardirqs_off(CALLER_ADDR0);
77
78 if (!this_cpu_read(tracing_irq_cpu)) {
79 this_cpu_write(tracing_irq_cpu, 1);
80 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
81 if (!in_nmi())
82 trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
83 }
84}
85EXPORT_SYMBOL(trace_hardirqs_off);
86NOKPROBE_SYMBOL(trace_hardirqs_off);
87
88__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
89{
90 if (this_cpu_read(tracing_irq_cpu)) {
91 if (!in_nmi())
92 trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
93 tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
94 this_cpu_write(tracing_irq_cpu, 0);
95 }
96
97 lockdep_hardirqs_on_prepare();
98 lockdep_hardirqs_on(caller_addr);
99}
100EXPORT_SYMBOL(trace_hardirqs_on_caller);
101NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
102
103__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
104{
105 lockdep_hardirqs_off(caller_addr);
106
107 if (!this_cpu_read(tracing_irq_cpu)) {
108 this_cpu_write(tracing_irq_cpu, 1);
109 tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
110 if (!in_nmi())
111 trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
112 }
113}
114EXPORT_SYMBOL(trace_hardirqs_off_caller);
115NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
116#endif /* CONFIG_TRACE_IRQFLAGS */
117
118#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
119
120void trace_preempt_on(unsigned long a0, unsigned long a1)
121{
122 if (!in_nmi())
123 trace_preempt_enable_rcuidle(a0, a1);
124 tracer_preempt_on(a0, a1);
125}
126
127void trace_preempt_off(unsigned long a0, unsigned long a1)
128{
129 if (!in_nmi())
130 trace_preempt_disable_rcuidle(a0, a1);
131 tracer_preempt_off(a0, a1);
132}
133#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * preemptoff and irqoff tracepoints
4 *
5 * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6 */
7
8#include <linux/kallsyms.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/ftrace.h>
12#include <linux/kprobes.h>
13#include "trace.h"
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/preemptirq.h>
17
18#ifdef CONFIG_TRACE_IRQFLAGS
19/* Per-cpu variable to prevent redundant calls when IRQs already off */
20static DEFINE_PER_CPU(int, tracing_irq_cpu);
21
22void trace_hardirqs_on(void)
23{
24 if (this_cpu_read(tracing_irq_cpu)) {
25 if (!in_nmi())
26 trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
27 tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
28 this_cpu_write(tracing_irq_cpu, 0);
29 }
30
31 lockdep_hardirqs_on(CALLER_ADDR0);
32}
33EXPORT_SYMBOL(trace_hardirqs_on);
34NOKPROBE_SYMBOL(trace_hardirqs_on);
35
36void trace_hardirqs_off(void)
37{
38 if (!this_cpu_read(tracing_irq_cpu)) {
39 this_cpu_write(tracing_irq_cpu, 1);
40 tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
41 if (!in_nmi())
42 trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
43 }
44
45 lockdep_hardirqs_off(CALLER_ADDR0);
46}
47EXPORT_SYMBOL(trace_hardirqs_off);
48NOKPROBE_SYMBOL(trace_hardirqs_off);
49
50__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
51{
52 if (this_cpu_read(tracing_irq_cpu)) {
53 if (!in_nmi())
54 trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
55 tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
56 this_cpu_write(tracing_irq_cpu, 0);
57 }
58
59 lockdep_hardirqs_on(CALLER_ADDR0);
60}
61EXPORT_SYMBOL(trace_hardirqs_on_caller);
62NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
63
64__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
65{
66 if (!this_cpu_read(tracing_irq_cpu)) {
67 this_cpu_write(tracing_irq_cpu, 1);
68 tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
69 if (!in_nmi())
70 trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
71 }
72
73 lockdep_hardirqs_off(CALLER_ADDR0);
74}
75EXPORT_SYMBOL(trace_hardirqs_off_caller);
76NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
77#endif /* CONFIG_TRACE_IRQFLAGS */
78
79#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
80
81void trace_preempt_on(unsigned long a0, unsigned long a1)
82{
83 if (!in_nmi())
84 trace_preempt_enable_rcuidle(a0, a1);
85 tracer_preempt_on(a0, a1);
86}
87
88void trace_preempt_off(unsigned long a0, unsigned long a1)
89{
90 if (!in_nmi())
91 trace_preempt_disable_rcuidle(a0, a1);
92 tracer_preempt_off(a0, a1);
93}
94#endif