Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * trace context switch
4 *
5 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
6 *
7 */
8#include <linux/module.h>
9#include <linux/kallsyms.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <trace/events/sched.h>
13
14#include "trace.h"
15
16#define RECORD_CMDLINE 1
17#define RECORD_TGID 2
18
19static int sched_cmdline_ref;
20static int sched_tgid_ref;
21static DEFINE_MUTEX(sched_register_mutex);
22
23static void
24probe_sched_switch(void *ignore, bool preempt,
25 struct task_struct *prev, struct task_struct *next)
26{
27 int flags;
28
29 flags = (RECORD_TGID * !!sched_tgid_ref) +
30 (RECORD_CMDLINE * !!sched_cmdline_ref);
31
32 if (!flags)
33 return;
34 tracing_record_taskinfo_sched_switch(prev, next, flags);
35}
36
37static void
38probe_sched_wakeup(void *ignore, struct task_struct *wakee)
39{
40 int flags;
41
42 flags = (RECORD_TGID * !!sched_tgid_ref) +
43 (RECORD_CMDLINE * !!sched_cmdline_ref);
44
45 if (!flags)
46 return;
47 tracing_record_taskinfo(current, flags);
48}
49
50static int tracing_sched_register(void)
51{
52 int ret;
53
54 ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
55 if (ret) {
56 pr_info("wakeup trace: Couldn't activate tracepoint"
57 " probe to kernel_sched_wakeup\n");
58 return ret;
59 }
60
61 ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
62 if (ret) {
63 pr_info("wakeup trace: Couldn't activate tracepoint"
64 " probe to kernel_sched_wakeup_new\n");
65 goto fail_deprobe;
66 }
67
68 ret = register_trace_sched_switch(probe_sched_switch, NULL);
69 if (ret) {
70 pr_info("sched trace: Couldn't activate tracepoint"
71 " probe to kernel_sched_switch\n");
72 goto fail_deprobe_wake_new;
73 }
74
75 return ret;
76fail_deprobe_wake_new:
77 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
78fail_deprobe:
79 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
80 return ret;
81}
82
83static void tracing_sched_unregister(void)
84{
85 unregister_trace_sched_switch(probe_sched_switch, NULL);
86 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
87 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
88}
89
90static void tracing_start_sched_switch(int ops)
91{
92 bool sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
93 mutex_lock(&sched_register_mutex);
94
95 switch (ops) {
96 case RECORD_CMDLINE:
97 sched_cmdline_ref++;
98 break;
99
100 case RECORD_TGID:
101 sched_tgid_ref++;
102 break;
103 }
104
105 if (sched_register && (sched_cmdline_ref || sched_tgid_ref))
106 tracing_sched_register();
107 mutex_unlock(&sched_register_mutex);
108}
109
110static void tracing_stop_sched_switch(int ops)
111{
112 mutex_lock(&sched_register_mutex);
113
114 switch (ops) {
115 case RECORD_CMDLINE:
116 sched_cmdline_ref--;
117 break;
118
119 case RECORD_TGID:
120 sched_tgid_ref--;
121 break;
122 }
123
124 if (!sched_cmdline_ref && !sched_tgid_ref)
125 tracing_sched_unregister();
126 mutex_unlock(&sched_register_mutex);
127}
128
129void tracing_start_cmdline_record(void)
130{
131 tracing_start_sched_switch(RECORD_CMDLINE);
132}
133
134void tracing_stop_cmdline_record(void)
135{
136 tracing_stop_sched_switch(RECORD_CMDLINE);
137}
138
139void tracing_start_tgid_record(void)
140{
141 tracing_start_sched_switch(RECORD_TGID);
142}
143
144void tracing_stop_tgid_record(void)
145{
146 tracing_stop_sched_switch(RECORD_TGID);
147}
1/*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7#include <linux/module.h>
8#include <linux/fs.h>
9#include <linux/debugfs.h>
10#include <linux/kallsyms.h>
11#include <linux/uaccess.h>
12#include <linux/ftrace.h>
13#include <trace/events/sched.h>
14
15#include "trace.h"
16
17static struct trace_array *ctx_trace;
18static int __read_mostly tracer_enabled;
19static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex);
21static int sched_stopped;
22
23
24void
25tracing_sched_switch_trace(struct trace_array *tr,
26 struct task_struct *prev,
27 struct task_struct *next,
28 unsigned long flags, int pc)
29{
30 struct ftrace_event_call *call = &event_context_switch;
31 struct ring_buffer *buffer = tr->buffer;
32 struct ring_buffer_event *event;
33 struct ctx_switch_entry *entry;
34
35 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
36 sizeof(*entry), flags, pc);
37 if (!event)
38 return;
39 entry = ring_buffer_event_data(event);
40 entry->prev_pid = prev->pid;
41 entry->prev_prio = prev->prio;
42 entry->prev_state = prev->state;
43 entry->next_pid = next->pid;
44 entry->next_prio = next->prio;
45 entry->next_state = next->state;
46 entry->next_cpu = task_cpu(next);
47
48 if (!filter_check_discard(call, entry, buffer, event))
49 trace_buffer_unlock_commit(buffer, event, flags, pc);
50}
51
52static void
53probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
54{
55 struct trace_array_cpu *data;
56 unsigned long flags;
57 int cpu;
58 int pc;
59
60 if (unlikely(!sched_ref))
61 return;
62
63 tracing_record_cmdline(prev);
64 tracing_record_cmdline(next);
65
66 if (!tracer_enabled || sched_stopped)
67 return;
68
69 pc = preempt_count();
70 local_irq_save(flags);
71 cpu = raw_smp_processor_id();
72 data = ctx_trace->data[cpu];
73
74 if (likely(!atomic_read(&data->disabled)))
75 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
76
77 local_irq_restore(flags);
78}
79
80void
81tracing_sched_wakeup_trace(struct trace_array *tr,
82 struct task_struct *wakee,
83 struct task_struct *curr,
84 unsigned long flags, int pc)
85{
86 struct ftrace_event_call *call = &event_wakeup;
87 struct ring_buffer_event *event;
88 struct ctx_switch_entry *entry;
89 struct ring_buffer *buffer = tr->buffer;
90
91 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
92 sizeof(*entry), flags, pc);
93 if (!event)
94 return;
95 entry = ring_buffer_event_data(event);
96 entry->prev_pid = curr->pid;
97 entry->prev_prio = curr->prio;
98 entry->prev_state = curr->state;
99 entry->next_pid = wakee->pid;
100 entry->next_prio = wakee->prio;
101 entry->next_state = wakee->state;
102 entry->next_cpu = task_cpu(wakee);
103
104 if (!filter_check_discard(call, entry, buffer, event))
105 ring_buffer_unlock_commit(buffer, event);
106 ftrace_trace_stack(tr->buffer, flags, 6, pc);
107 ftrace_trace_userstack(tr->buffer, flags, pc);
108}
109
110static void
111probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
112{
113 struct trace_array_cpu *data;
114 unsigned long flags;
115 int cpu, pc;
116
117 if (unlikely(!sched_ref))
118 return;
119
120 tracing_record_cmdline(current);
121
122 if (!tracer_enabled || sched_stopped)
123 return;
124
125 pc = preempt_count();
126 local_irq_save(flags);
127 cpu = raw_smp_processor_id();
128 data = ctx_trace->data[cpu];
129
130 if (likely(!atomic_read(&data->disabled)))
131 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
132 flags, pc);
133
134 local_irq_restore(flags);
135}
136
137static int tracing_sched_register(void)
138{
139 int ret;
140
141 ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
142 if (ret) {
143 pr_info("wakeup trace: Couldn't activate tracepoint"
144 " probe to kernel_sched_wakeup\n");
145 return ret;
146 }
147
148 ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
149 if (ret) {
150 pr_info("wakeup trace: Couldn't activate tracepoint"
151 " probe to kernel_sched_wakeup_new\n");
152 goto fail_deprobe;
153 }
154
155 ret = register_trace_sched_switch(probe_sched_switch, NULL);
156 if (ret) {
157 pr_info("sched trace: Couldn't activate tracepoint"
158 " probe to kernel_sched_switch\n");
159 goto fail_deprobe_wake_new;
160 }
161
162 return ret;
163fail_deprobe_wake_new:
164 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
165fail_deprobe:
166 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
167 return ret;
168}
169
170static void tracing_sched_unregister(void)
171{
172 unregister_trace_sched_switch(probe_sched_switch, NULL);
173 unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
174 unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
175}
176
177static void tracing_start_sched_switch(void)
178{
179 mutex_lock(&sched_register_mutex);
180 if (!(sched_ref++))
181 tracing_sched_register();
182 mutex_unlock(&sched_register_mutex);
183}
184
185static void tracing_stop_sched_switch(void)
186{
187 mutex_lock(&sched_register_mutex);
188 if (!(--sched_ref))
189 tracing_sched_unregister();
190 mutex_unlock(&sched_register_mutex);
191}
192
193void tracing_start_cmdline_record(void)
194{
195 tracing_start_sched_switch();
196}
197
198void tracing_stop_cmdline_record(void)
199{
200 tracing_stop_sched_switch();
201}
202
203/**
204 * tracing_start_sched_switch_record - start tracing context switches
205 *
206 * Turns on context switch tracing for a tracer.
207 */
208void tracing_start_sched_switch_record(void)
209{
210 if (unlikely(!ctx_trace)) {
211 WARN_ON(1);
212 return;
213 }
214
215 tracing_start_sched_switch();
216
217 mutex_lock(&sched_register_mutex);
218 tracer_enabled++;
219 mutex_unlock(&sched_register_mutex);
220}
221
222/**
223 * tracing_stop_sched_switch_record - start tracing context switches
224 *
225 * Turns off context switch tracing for a tracer.
226 */
227void tracing_stop_sched_switch_record(void)
228{
229 mutex_lock(&sched_register_mutex);
230 tracer_enabled--;
231 WARN_ON(tracer_enabled < 0);
232 mutex_unlock(&sched_register_mutex);
233
234 tracing_stop_sched_switch();
235}
236
237/**
238 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
239 * @tr: trace array pointer to assign
240 *
241 * Some tracers might want to record the context switches in their
242 * trace. This function lets those tracers assign the trace array
243 * to use.
244 */
245void tracing_sched_switch_assign_trace(struct trace_array *tr)
246{
247 ctx_trace = tr;
248}
249