Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * trace event based perf event profiling/tracing
  3 *
  4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
  5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/kprobes.h>
 10#include "trace.h"
 11
 12static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 13
 14/*
 15 * Force it to be aligned to unsigned long to avoid misaligned accesses
 16 * suprises
 17 */
 18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
 19	perf_trace_t;
 20
 21/* Count the events in use (per event id, not per instance) */
 22static int	total_ref_count;
 23
 24static int perf_trace_event_perm(struct trace_event_call *tp_event,
 25				 struct perf_event *p_event)
 26{
 27	if (tp_event->perf_perm) {
 28		int ret = tp_event->perf_perm(tp_event, p_event);
 29		if (ret)
 30			return ret;
 31	}
 32
 33	/*
 34	 * We checked and allowed to create parent,
 35	 * allow children without checking.
 36	 */
 37	if (p_event->parent)
 38		return 0;
 39
 40	/*
 41	 * It's ok to check current process (owner) permissions in here,
 42	 * because code below is called only via perf_event_open syscall.
 43	 */
 44
 45	/* The ftrace function trace is allowed only for root. */
 46	if (ftrace_event_is_function(tp_event)) {
 47		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 48			return -EPERM;
 49
 50		/*
 51		 * We don't allow user space callchains for  function trace
 52		 * event, due to issues with page faults while tracing page
 53		 * fault handler and its overall trickiness nature.
 54		 */
 55		if (!p_event->attr.exclude_callchain_user)
 56			return -EINVAL;
 57
 58		/*
 59		 * Same reason to disable user stack dump as for user space
 60		 * callchains above.
 61		 */
 62		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
 63			return -EINVAL;
 64	}
 65
 66	/* No tracing, just counting, so no obvious leak */
 67	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
 68		return 0;
 69
 70	/* Some events are ok to be traced by non-root users... */
 71	if (p_event->attach_state == PERF_ATTACH_TASK) {
 72		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
 73			return 0;
 74	}
 75
 76	/*
 77	 * ...otherwise raw tracepoint data can be a severe data leak,
 78	 * only allow root to have these.
 79	 */
 80	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 81		return -EPERM;
 82
 83	return 0;
 84}
 85
 86static int perf_trace_event_reg(struct trace_event_call *tp_event,
 87				struct perf_event *p_event)
 88{
 89	struct hlist_head __percpu *list;
 90	int ret = -ENOMEM;
 91	int cpu;
 92
 
 
 
 
 93	p_event->tp_event = tp_event;
 94	if (tp_event->perf_refcount++ > 0)
 95		return 0;
 96
 
 
 97	list = alloc_percpu(struct hlist_head);
 98	if (!list)
 99		goto fail;
100
101	for_each_possible_cpu(cpu)
102		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
103
104	tp_event->perf_events = list;
105
106	if (!total_ref_count) {
107		char __percpu *buf;
108		int i;
109
110		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
111			buf = (char __percpu *)alloc_percpu(perf_trace_t);
112			if (!buf)
113				goto fail;
114
115			perf_trace_buf[i] = buf;
116		}
117	}
118
119	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
120	if (ret)
121		goto fail;
122
123	total_ref_count++;
124	return 0;
125
126fail:
127	if (!total_ref_count) {
128		int i;
129
130		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
131			free_percpu(perf_trace_buf[i]);
132			perf_trace_buf[i] = NULL;
133		}
134	}
135
136	if (!--tp_event->perf_refcount) {
137		free_percpu(tp_event->perf_events);
138		tp_event->perf_events = NULL;
139	}
140
141	return ret;
142}
143
144static void perf_trace_event_unreg(struct perf_event *p_event)
145{
146	struct trace_event_call *tp_event = p_event->tp_event;
147	int i;
148
149	if (--tp_event->perf_refcount > 0)
150		goto out;
151
152	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
153
154	/*
155	 * Ensure our callback won't be called anymore. The buffers
156	 * will be freed after that.
157	 */
158	tracepoint_synchronize_unregister();
159
160	free_percpu(tp_event->perf_events);
161	tp_event->perf_events = NULL;
162
163	if (!--total_ref_count) {
164		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
165			free_percpu(perf_trace_buf[i]);
166			perf_trace_buf[i] = NULL;
167		}
168	}
169out:
170	module_put(tp_event->mod);
171}
172
173static int perf_trace_event_open(struct perf_event *p_event)
174{
175	struct trace_event_call *tp_event = p_event->tp_event;
176	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
177}
178
179static void perf_trace_event_close(struct perf_event *p_event)
180{
181	struct trace_event_call *tp_event = p_event->tp_event;
182	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
183}
184
185static int perf_trace_event_init(struct trace_event_call *tp_event,
186				 struct perf_event *p_event)
187{
188	int ret;
189
190	ret = perf_trace_event_perm(tp_event, p_event);
191	if (ret)
192		return ret;
193
194	ret = perf_trace_event_reg(tp_event, p_event);
195	if (ret)
196		return ret;
197
198	ret = perf_trace_event_open(p_event);
199	if (ret) {
200		perf_trace_event_unreg(p_event);
201		return ret;
202	}
203
204	return 0;
205}
206
207int perf_trace_init(struct perf_event *p_event)
208{
209	struct trace_event_call *tp_event;
210	u64 event_id = p_event->attr.config;
211	int ret = -EINVAL;
212
213	mutex_lock(&event_mutex);
214	list_for_each_entry(tp_event, &ftrace_events, list) {
215		if (tp_event->event.type == event_id &&
216		    tp_event->class && tp_event->class->reg &&
217		    try_module_get(tp_event->mod)) {
218			ret = perf_trace_event_init(tp_event, p_event);
219			if (ret)
220				module_put(tp_event->mod);
221			break;
222		}
223	}
224	mutex_unlock(&event_mutex);
225
226	return ret;
227}
228
229void perf_trace_destroy(struct perf_event *p_event)
230{
231	mutex_lock(&event_mutex);
232	perf_trace_event_close(p_event);
233	perf_trace_event_unreg(p_event);
234	mutex_unlock(&event_mutex);
235}
236
237int perf_trace_add(struct perf_event *p_event, int flags)
238{
239	struct trace_event_call *tp_event = p_event->tp_event;
240	struct hlist_head __percpu *pcpu_list;
241	struct hlist_head *list;
242
243	pcpu_list = tp_event->perf_events;
244	if (WARN_ON_ONCE(!pcpu_list))
245		return -EINVAL;
246
247	if (!(flags & PERF_EF_START))
248		p_event->hw.state = PERF_HES_STOPPED;
249
250	list = this_cpu_ptr(pcpu_list);
251	hlist_add_head_rcu(&p_event->hlist_entry, list);
252
253	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
254}
255
256void perf_trace_del(struct perf_event *p_event, int flags)
257{
258	struct trace_event_call *tp_event = p_event->tp_event;
259	hlist_del_rcu(&p_event->hlist_entry);
260	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
261}
262
263void *perf_trace_buf_prepare(int size, unsigned short type,
264			     struct pt_regs **regs, int *rctxp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265{
266	struct trace_entry *entry;
267	unsigned long flags;
268	char *raw_data;
269	int pc;
270
271	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
272
273	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
274			"perf buffer not large enough"))
275		return NULL;
276
277	pc = preempt_count();
278
279	*rctxp = perf_swevent_get_recursion_context();
280	if (*rctxp < 0)
281		return NULL;
282
283	if (regs)
284		*regs = this_cpu_ptr(&__perf_regs[*rctxp]);
285	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
286
287	/* zero the dead bytes from align to not leak stack to user */
288	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
289
290	entry = (struct trace_entry *)raw_data;
291	local_save_flags(flags);
292	tracing_generic_entry_update(entry, flags, pc);
293	entry->type = type;
294
295	return raw_data;
296}
297EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
298NOKPROBE_SYMBOL(perf_trace_buf_prepare);
299
300#ifdef CONFIG_FUNCTION_TRACER
301static void
302perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
303			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
304{
305	struct ftrace_entry *entry;
306	struct hlist_head *head;
307	struct pt_regs regs;
308	int rctx;
309
310	head = this_cpu_ptr(event_function.perf_events);
311	if (hlist_empty(head))
312		return;
313
314#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
315		    sizeof(u64)) - sizeof(u32))
316
317	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
318
319	perf_fetch_caller_regs(&regs);
320
321	entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
322	if (!entry)
323		return;
324
325	entry->ip = ip;
326	entry->parent_ip = parent_ip;
327	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
328			      1, &regs, head, NULL);
329
330#undef ENTRY_SIZE
331}
332
333static int perf_ftrace_function_register(struct perf_event *event)
334{
335	struct ftrace_ops *ops = &event->ftrace_ops;
336
337	ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
338	ops->func = perf_ftrace_function_call;
339	return register_ftrace_function(ops);
340}
341
342static int perf_ftrace_function_unregister(struct perf_event *event)
343{
344	struct ftrace_ops *ops = &event->ftrace_ops;
345	int ret = unregister_ftrace_function(ops);
346	ftrace_free_filter(ops);
347	return ret;
348}
349
350static void perf_ftrace_function_enable(struct perf_event *event)
351{
352	ftrace_function_local_enable(&event->ftrace_ops);
353}
354
355static void perf_ftrace_function_disable(struct perf_event *event)
356{
357	ftrace_function_local_disable(&event->ftrace_ops);
358}
359
360int perf_ftrace_event_register(struct trace_event_call *call,
361			       enum trace_reg type, void *data)
362{
363	switch (type) {
364	case TRACE_REG_REGISTER:
365	case TRACE_REG_UNREGISTER:
366		break;
367	case TRACE_REG_PERF_REGISTER:
368	case TRACE_REG_PERF_UNREGISTER:
369		return 0;
370	case TRACE_REG_PERF_OPEN:
371		return perf_ftrace_function_register(data);
372	case TRACE_REG_PERF_CLOSE:
373		return perf_ftrace_function_unregister(data);
374	case TRACE_REG_PERF_ADD:
375		perf_ftrace_function_enable(data);
376		return 0;
377	case TRACE_REG_PERF_DEL:
378		perf_ftrace_function_disable(data);
379		return 0;
380	}
381
382	return -EINVAL;
383}
384#endif /* CONFIG_FUNCTION_TRACER */
v3.1
  1/*
  2 * trace event based perf event profiling/tracing
  3 *
  4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/kprobes.h>
 10#include "trace.h"
 11
 12static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 13
 14/*
 15 * Force it to be aligned to unsigned long to avoid misaligned accesses
 16 * suprises
 17 */
 18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
 19	perf_trace_t;
 20
 21/* Count the events in use (per event id, not per instance) */
 22static int	total_ref_count;
 23
 24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
 25				 struct perf_event *p_event)
 26{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27	/* No tracing, just counting, so no obvious leak */
 28	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
 29		return 0;
 30
 31	/* Some events are ok to be traced by non-root users... */
 32	if (p_event->attach_state == PERF_ATTACH_TASK) {
 33		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
 34			return 0;
 35	}
 36
 37	/*
 38	 * ...otherwise raw tracepoint data can be a severe data leak,
 39	 * only allow root to have these.
 40	 */
 41	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 42		return -EPERM;
 43
 44	return 0;
 45}
 46
 47static int perf_trace_event_init(struct ftrace_event_call *tp_event,
 48				 struct perf_event *p_event)
 49{
 50	struct hlist_head __percpu *list;
 51	int ret;
 52	int cpu;
 53
 54	ret = perf_trace_event_perm(tp_event, p_event);
 55	if (ret)
 56		return ret;
 57
 58	p_event->tp_event = tp_event;
 59	if (tp_event->perf_refcount++ > 0)
 60		return 0;
 61
 62	ret = -ENOMEM;
 63
 64	list = alloc_percpu(struct hlist_head);
 65	if (!list)
 66		goto fail;
 67
 68	for_each_possible_cpu(cpu)
 69		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
 70
 71	tp_event->perf_events = list;
 72
 73	if (!total_ref_count) {
 74		char __percpu *buf;
 75		int i;
 76
 77		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
 78			buf = (char __percpu *)alloc_percpu(perf_trace_t);
 79			if (!buf)
 80				goto fail;
 81
 82			perf_trace_buf[i] = buf;
 83		}
 84	}
 85
 86	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
 87	if (ret)
 88		goto fail;
 89
 90	total_ref_count++;
 91	return 0;
 92
 93fail:
 94	if (!total_ref_count) {
 95		int i;
 96
 97		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
 98			free_percpu(perf_trace_buf[i]);
 99			perf_trace_buf[i] = NULL;
100		}
101	}
102
103	if (!--tp_event->perf_refcount) {
104		free_percpu(tp_event->perf_events);
105		tp_event->perf_events = NULL;
106	}
107
108	return ret;
109}
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111int perf_trace_init(struct perf_event *p_event)
112{
113	struct ftrace_event_call *tp_event;
114	int event_id = p_event->attr.config;
115	int ret = -EINVAL;
116
117	mutex_lock(&event_mutex);
118	list_for_each_entry(tp_event, &ftrace_events, list) {
119		if (tp_event->event.type == event_id &&
120		    tp_event->class && tp_event->class->reg &&
121		    try_module_get(tp_event->mod)) {
122			ret = perf_trace_event_init(tp_event, p_event);
123			if (ret)
124				module_put(tp_event->mod);
125			break;
126		}
127	}
128	mutex_unlock(&event_mutex);
129
130	return ret;
131}
132
 
 
 
 
 
 
 
 
133int perf_trace_add(struct perf_event *p_event, int flags)
134{
135	struct ftrace_event_call *tp_event = p_event->tp_event;
136	struct hlist_head __percpu *pcpu_list;
137	struct hlist_head *list;
138
139	pcpu_list = tp_event->perf_events;
140	if (WARN_ON_ONCE(!pcpu_list))
141		return -EINVAL;
142
143	if (!(flags & PERF_EF_START))
144		p_event->hw.state = PERF_HES_STOPPED;
145
146	list = this_cpu_ptr(pcpu_list);
147	hlist_add_head_rcu(&p_event->hlist_entry, list);
148
149	return 0;
150}
151
152void perf_trace_del(struct perf_event *p_event, int flags)
153{
 
154	hlist_del_rcu(&p_event->hlist_entry);
 
155}
156
157void perf_trace_destroy(struct perf_event *p_event)
158{
159	struct ftrace_event_call *tp_event = p_event->tp_event;
160	int i;
161
162	mutex_lock(&event_mutex);
163	if (--tp_event->perf_refcount > 0)
164		goto out;
165
166	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
167
168	/*
169	 * Ensure our callback won't be called anymore. The buffers
170	 * will be freed after that.
171	 */
172	tracepoint_synchronize_unregister();
173
174	free_percpu(tp_event->perf_events);
175	tp_event->perf_events = NULL;
176
177	if (!--total_ref_count) {
178		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
179			free_percpu(perf_trace_buf[i]);
180			perf_trace_buf[i] = NULL;
181		}
182	}
183out:
184	module_put(tp_event->mod);
185	mutex_unlock(&event_mutex);
186}
187
188__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
189				       struct pt_regs *regs, int *rctxp)
190{
191	struct trace_entry *entry;
192	unsigned long flags;
193	char *raw_data;
194	int pc;
195
196	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
197
 
 
 
 
198	pc = preempt_count();
199
200	*rctxp = perf_swevent_get_recursion_context();
201	if (*rctxp < 0)
202		return NULL;
203
 
 
204	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
205
206	/* zero the dead bytes from align to not leak stack to user */
207	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
208
209	entry = (struct trace_entry *)raw_data;
210	local_save_flags(flags);
211	tracing_generic_entry_update(entry, flags, pc);
212	entry->type = type;
213
214	return raw_data;
215}
216EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);