Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * trace context switch
  3 *
  4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5 *
  6 */
  7#include <linux/module.h>
 
 
  8#include <linux/kallsyms.h>
  9#include <linux/uaccess.h>
 10#include <linux/ftrace.h>
 11#include <trace/events/sched.h>
 12
 13#include "trace.h"
 14
 
 
 15static int			sched_ref;
 16static DEFINE_MUTEX(sched_register_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 17
 18static void
 19probe_sched_switch(void *ignore, bool preempt,
 20		   struct task_struct *prev, struct task_struct *next)
 21{
 
 
 
 
 
 22	if (unlikely(!sched_ref))
 23		return;
 24
 25	tracing_record_cmdline(prev);
 26	tracing_record_cmdline(next);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27}
 28
 29static void
 30probe_sched_wakeup(void *ignore, struct task_struct *wakee)
 31{
 
 
 
 
 32	if (unlikely(!sched_ref))
 33		return;
 34
 35	tracing_record_cmdline(current);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36}
 37
 38static int tracing_sched_register(void)
 39{
 40	int ret;
 41
 42	ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
 43	if (ret) {
 44		pr_info("wakeup trace: Couldn't activate tracepoint"
 45			" probe to kernel_sched_wakeup\n");
 46		return ret;
 47	}
 48
 49	ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
 50	if (ret) {
 51		pr_info("wakeup trace: Couldn't activate tracepoint"
 52			" probe to kernel_sched_wakeup_new\n");
 53		goto fail_deprobe;
 54	}
 55
 56	ret = register_trace_sched_switch(probe_sched_switch, NULL);
 57	if (ret) {
 58		pr_info("sched trace: Couldn't activate tracepoint"
 59			" probe to kernel_sched_switch\n");
 60		goto fail_deprobe_wake_new;
 61	}
 62
 63	return ret;
 64fail_deprobe_wake_new:
 65	unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
 66fail_deprobe:
 67	unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
 68	return ret;
 69}
 70
 71static void tracing_sched_unregister(void)
 72{
 73	unregister_trace_sched_switch(probe_sched_switch, NULL);
 74	unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
 75	unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
 76}
 77
 78static void tracing_start_sched_switch(void)
 79{
 80	mutex_lock(&sched_register_mutex);
 81	if (!(sched_ref++))
 82		tracing_sched_register();
 83	mutex_unlock(&sched_register_mutex);
 84}
 85
 86static void tracing_stop_sched_switch(void)
 87{
 88	mutex_lock(&sched_register_mutex);
 89	if (!(--sched_ref))
 90		tracing_sched_unregister();
 91	mutex_unlock(&sched_register_mutex);
 92}
 93
 94void tracing_start_cmdline_record(void)
 95{
 96	tracing_start_sched_switch();
 97}
 98
 99void tracing_stop_cmdline_record(void)
100{
101	tracing_stop_sched_switch();
102}
v3.15
  1/*
  2 * trace context switch
  3 *
  4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5 *
  6 */
  7#include <linux/module.h>
  8#include <linux/fs.h>
  9#include <linux/debugfs.h>
 10#include <linux/kallsyms.h>
 11#include <linux/uaccess.h>
 12#include <linux/ftrace.h>
 13#include <trace/events/sched.h>
 14
 15#include "trace.h"
 16
 17static struct trace_array	*ctx_trace;
 18static int __read_mostly	tracer_enabled;
 19static int			sched_ref;
 20static DEFINE_MUTEX(sched_register_mutex);
 21static int			sched_stopped;
 22
 23
 24void
 25tracing_sched_switch_trace(struct trace_array *tr,
 26			   struct task_struct *prev,
 27			   struct task_struct *next,
 28			   unsigned long flags, int pc)
 29{
 30	struct ftrace_event_call *call = &event_context_switch;
 31	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 32	struct ring_buffer_event *event;
 33	struct ctx_switch_entry *entry;
 34
 35	event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
 36					  sizeof(*entry), flags, pc);
 37	if (!event)
 38		return;
 39	entry	= ring_buffer_event_data(event);
 40	entry->prev_pid			= prev->pid;
 41	entry->prev_prio		= prev->prio;
 42	entry->prev_state		= prev->state;
 43	entry->next_pid			= next->pid;
 44	entry->next_prio		= next->prio;
 45	entry->next_state		= next->state;
 46	entry->next_cpu	= task_cpu(next);
 47
 48	if (!call_filter_check_discard(call, entry, buffer, event))
 49		trace_buffer_unlock_commit(buffer, event, flags, pc);
 50}
 51
 52static void
 53probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
 
 54{
 55	struct trace_array_cpu *data;
 56	unsigned long flags;
 57	int cpu;
 58	int pc;
 59
 60	if (unlikely(!sched_ref))
 61		return;
 62
 63	tracing_record_cmdline(prev);
 64	tracing_record_cmdline(next);
 65
 66	if (!tracer_enabled || sched_stopped)
 67		return;
 68
 69	pc = preempt_count();
 70	local_irq_save(flags);
 71	cpu = raw_smp_processor_id();
 72	data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
 73
 74	if (likely(!atomic_read(&data->disabled)))
 75		tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
 76
 77	local_irq_restore(flags);
 78}
 79
 80void
 81tracing_sched_wakeup_trace(struct trace_array *tr,
 82			   struct task_struct *wakee,
 83			   struct task_struct *curr,
 84			   unsigned long flags, int pc)
 85{
 86	struct ftrace_event_call *call = &event_wakeup;
 87	struct ring_buffer_event *event;
 88	struct ctx_switch_entry *entry;
 89	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 90
 91	event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
 92					  sizeof(*entry), flags, pc);
 93	if (!event)
 94		return;
 95	entry	= ring_buffer_event_data(event);
 96	entry->prev_pid			= curr->pid;
 97	entry->prev_prio		= curr->prio;
 98	entry->prev_state		= curr->state;
 99	entry->next_pid			= wakee->pid;
100	entry->next_prio		= wakee->prio;
101	entry->next_state		= wakee->state;
102	entry->next_cpu			= task_cpu(wakee);
103
104	if (!call_filter_check_discard(call, entry, buffer, event))
105		trace_buffer_unlock_commit(buffer, event, flags, pc);
106}
107
108static void
109probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
110{
111	struct trace_array_cpu *data;
112	unsigned long flags;
113	int cpu, pc;
114
115	if (unlikely(!sched_ref))
116		return;
117
118	tracing_record_cmdline(current);
119
120	if (!tracer_enabled || sched_stopped)
121		return;
122
123	pc = preempt_count();
124	local_irq_save(flags);
125	cpu = raw_smp_processor_id();
126	data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
127
128	if (likely(!atomic_read(&data->disabled)))
129		tracing_sched_wakeup_trace(ctx_trace, wakee, current,
130					   flags, pc);
131
132	local_irq_restore(flags);
133}
134
135static int tracing_sched_register(void)
136{
137	int ret;
138
139	ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
140	if (ret) {
141		pr_info("wakeup trace: Couldn't activate tracepoint"
142			" probe to kernel_sched_wakeup\n");
143		return ret;
144	}
145
146	ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
147	if (ret) {
148		pr_info("wakeup trace: Couldn't activate tracepoint"
149			" probe to kernel_sched_wakeup_new\n");
150		goto fail_deprobe;
151	}
152
153	ret = register_trace_sched_switch(probe_sched_switch, NULL);
154	if (ret) {
155		pr_info("sched trace: Couldn't activate tracepoint"
156			" probe to kernel_sched_switch\n");
157		goto fail_deprobe_wake_new;
158	}
159
160	return ret;
161fail_deprobe_wake_new:
162	unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
163fail_deprobe:
164	unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
165	return ret;
166}
167
168static void tracing_sched_unregister(void)
169{
170	unregister_trace_sched_switch(probe_sched_switch, NULL);
171	unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
172	unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
173}
174
175static void tracing_start_sched_switch(void)
176{
177	mutex_lock(&sched_register_mutex);
178	if (!(sched_ref++))
179		tracing_sched_register();
180	mutex_unlock(&sched_register_mutex);
181}
182
183static void tracing_stop_sched_switch(void)
184{
185	mutex_lock(&sched_register_mutex);
186	if (!(--sched_ref))
187		tracing_sched_unregister();
188	mutex_unlock(&sched_register_mutex);
189}
190
191void tracing_start_cmdline_record(void)
192{
193	tracing_start_sched_switch();
194}
195
196void tracing_stop_cmdline_record(void)
197{
198	tracing_stop_sched_switch();
199}
200
201/**
202 * tracing_start_sched_switch_record - start tracing context switches
203 *
204 * Turns on context switch tracing for a tracer.
205 */
206void tracing_start_sched_switch_record(void)
207{
208	if (unlikely(!ctx_trace)) {
209		WARN_ON(1);
210		return;
211	}
212
213	tracing_start_sched_switch();
214
215	mutex_lock(&sched_register_mutex);
216	tracer_enabled++;
217	mutex_unlock(&sched_register_mutex);
218}
219
220/**
221 * tracing_stop_sched_switch_record - start tracing context switches
222 *
223 * Turns off context switch tracing for a tracer.
224 */
225void tracing_stop_sched_switch_record(void)
226{
227	mutex_lock(&sched_register_mutex);
228	tracer_enabled--;
229	WARN_ON(tracer_enabled < 0);
230	mutex_unlock(&sched_register_mutex);
231
232	tracing_stop_sched_switch();
233}
234
235/**
236 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
237 * @tr: trace array pointer to assign
238 *
239 * Some tracers might want to record the context switches in their
240 * trace. This function lets those tracers assign the trace array
241 * to use.
242 */
243void tracing_sched_switch_assign_trace(struct trace_array *tr)
244{
245	ctx_trace = tr;
246}
247