Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * trace event based perf event profiling/tracing
  4 *
  5 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
  6 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/kprobes.h>
 11#include "trace.h"
 12#include "trace_probe.h"
 13
 14static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 15
 16/*
 17 * Force it to be aligned to unsigned long to avoid misaligned accesses
 18 * suprises
 19 */
 20typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
 21	perf_trace_t;
 22
 23/* Count the events in use (per event id, not per instance) */
 24static int	total_ref_count;
 25
 26static int perf_trace_event_perm(struct trace_event_call *tp_event,
 27				 struct perf_event *p_event)
 28{
 29	if (tp_event->perf_perm) {
 30		int ret = tp_event->perf_perm(tp_event, p_event);
 31		if (ret)
 32			return ret;
 33	}
 34
 35	/*
 36	 * We checked and allowed to create parent,
 37	 * allow children without checking.
 38	 */
 39	if (p_event->parent)
 40		return 0;
 41
 42	/*
 43	 * It's ok to check current process (owner) permissions in here,
 44	 * because code below is called only via perf_event_open syscall.
 45	 */
 46
 47	/* The ftrace function trace is allowed only for root. */
 48	if (ftrace_event_is_function(tp_event)) {
 49		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 50			return -EPERM;
 51
 52		if (!is_sampling_event(p_event))
 53			return 0;
 54
 55		/*
 56		 * We don't allow user space callchains for  function trace
 57		 * event, due to issues with page faults while tracing page
 58		 * fault handler and its overall trickiness nature.
 59		 */
 60		if (!p_event->attr.exclude_callchain_user)
 61			return -EINVAL;
 62
 63		/*
 64		 * Same reason to disable user stack dump as for user space
 65		 * callchains above.
 66		 */
 67		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
 68			return -EINVAL;
 69	}
 70
 71	/* No tracing, just counting, so no obvious leak */
 72	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
 73		return 0;
 74
 75	/* Some events are ok to be traced by non-root users... */
 76	if (p_event->attach_state == PERF_ATTACH_TASK) {
 77		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
 78			return 0;
 79	}
 80
 81	/*
 82	 * ...otherwise raw tracepoint data can be a severe data leak,
 83	 * only allow root to have these.
 84	 */
 85	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 86		return -EPERM;
 87
 88	return 0;
 89}
 90
 91static int perf_trace_event_reg(struct trace_event_call *tp_event,
 92				struct perf_event *p_event)
 93{
 94	struct hlist_head __percpu *list;
 95	int ret = -ENOMEM;
 96	int cpu;
 97
 98	p_event->tp_event = tp_event;
 99	if (tp_event->perf_refcount++ > 0)
100		return 0;
101
102	list = alloc_percpu(struct hlist_head);
103	if (!list)
104		goto fail;
105
106	for_each_possible_cpu(cpu)
107		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
108
109	tp_event->perf_events = list;
110
111	if (!total_ref_count) {
112		char __percpu *buf;
113		int i;
114
115		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
116			buf = (char __percpu *)alloc_percpu(perf_trace_t);
117			if (!buf)
118				goto fail;
119
120			perf_trace_buf[i] = buf;
121		}
122	}
123
124	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
125	if (ret)
126		goto fail;
127
128	total_ref_count++;
129	return 0;
130
131fail:
132	if (!total_ref_count) {
133		int i;
134
135		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
136			free_percpu(perf_trace_buf[i]);
137			perf_trace_buf[i] = NULL;
138		}
139	}
140
141	if (!--tp_event->perf_refcount) {
142		free_percpu(tp_event->perf_events);
143		tp_event->perf_events = NULL;
144	}
145
146	return ret;
147}
148
149static void perf_trace_event_unreg(struct perf_event *p_event)
150{
151	struct trace_event_call *tp_event = p_event->tp_event;
152	int i;
153
154	if (--tp_event->perf_refcount > 0)
155		goto out;
156
157	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
158
159	/*
160	 * Ensure our callback won't be called anymore. The buffers
161	 * will be freed after that.
162	 */
163	tracepoint_synchronize_unregister();
164
165	free_percpu(tp_event->perf_events);
166	tp_event->perf_events = NULL;
167
168	if (!--total_ref_count) {
169		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
170			free_percpu(perf_trace_buf[i]);
171			perf_trace_buf[i] = NULL;
172		}
173	}
174out:
175	module_put(tp_event->mod);
176}
177
178static int perf_trace_event_open(struct perf_event *p_event)
179{
180	struct trace_event_call *tp_event = p_event->tp_event;
181	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
182}
183
184static void perf_trace_event_close(struct perf_event *p_event)
185{
186	struct trace_event_call *tp_event = p_event->tp_event;
187	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
188}
189
190static int perf_trace_event_init(struct trace_event_call *tp_event,
191				 struct perf_event *p_event)
192{
193	int ret;
194
195	ret = perf_trace_event_perm(tp_event, p_event);
196	if (ret)
197		return ret;
198
199	ret = perf_trace_event_reg(tp_event, p_event);
200	if (ret)
201		return ret;
202
203	ret = perf_trace_event_open(p_event);
204	if (ret) {
205		perf_trace_event_unreg(p_event);
206		return ret;
207	}
208
209	return 0;
210}
211
212int perf_trace_init(struct perf_event *p_event)
213{
214	struct trace_event_call *tp_event;
215	u64 event_id = p_event->attr.config;
216	int ret = -EINVAL;
217
218	mutex_lock(&event_mutex);
219	list_for_each_entry(tp_event, &ftrace_events, list) {
220		if (tp_event->event.type == event_id &&
221		    tp_event->class && tp_event->class->reg &&
222		    try_module_get(tp_event->mod)) {
223			ret = perf_trace_event_init(tp_event, p_event);
224			if (ret)
225				module_put(tp_event->mod);
226			break;
227		}
228	}
229	mutex_unlock(&event_mutex);
230
231	return ret;
232}
233
234void perf_trace_destroy(struct perf_event *p_event)
235{
236	mutex_lock(&event_mutex);
237	perf_trace_event_close(p_event);
238	perf_trace_event_unreg(p_event);
239	mutex_unlock(&event_mutex);
240}
241
242#ifdef CONFIG_KPROBE_EVENTS
243int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
244{
245	int ret;
246	char *func = NULL;
247	struct trace_event_call *tp_event;
248
249	if (p_event->attr.kprobe_func) {
250		func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
251		if (!func)
252			return -ENOMEM;
253		ret = strncpy_from_user(
254			func, u64_to_user_ptr(p_event->attr.kprobe_func),
255			KSYM_NAME_LEN);
256		if (ret == KSYM_NAME_LEN)
257			ret = -E2BIG;
258		if (ret < 0)
259			goto out;
260
261		if (func[0] == '\0') {
262			kfree(func);
263			func = NULL;
264		}
265	}
266
267	tp_event = create_local_trace_kprobe(
268		func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
269		p_event->attr.probe_offset, is_retprobe);
270	if (IS_ERR(tp_event)) {
271		ret = PTR_ERR(tp_event);
272		goto out;
273	}
274
275	mutex_lock(&event_mutex);
276	ret = perf_trace_event_init(tp_event, p_event);
277	if (ret)
278		destroy_local_trace_kprobe(tp_event);
279	mutex_unlock(&event_mutex);
280out:
281	kfree(func);
282	return ret;
283}
284
285void perf_kprobe_destroy(struct perf_event *p_event)
286{
287	mutex_lock(&event_mutex);
288	perf_trace_event_close(p_event);
289	perf_trace_event_unreg(p_event);
290	mutex_unlock(&event_mutex);
291
292	destroy_local_trace_kprobe(p_event->tp_event);
293}
294#endif /* CONFIG_KPROBE_EVENTS */
295
296#ifdef CONFIG_UPROBE_EVENTS
297int perf_uprobe_init(struct perf_event *p_event,
298		     unsigned long ref_ctr_offset, bool is_retprobe)
299{
300	int ret;
301	char *path = NULL;
302	struct trace_event_call *tp_event;
303
304	if (!p_event->attr.uprobe_path)
 
305		return -EINVAL;
306
307	path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
308			    PATH_MAX);
309	if (IS_ERR(path)) {
310		ret = PTR_ERR(path);
311		return (ret == -EINVAL) ? -E2BIG : ret;
312	}
313	if (path[0] == '\0') {
314		ret = -EINVAL;
315		goto out;
316	}
317
318	tp_event = create_local_trace_uprobe(path, p_event->attr.probe_offset,
319					     ref_ctr_offset, is_retprobe);
320	if (IS_ERR(tp_event)) {
321		ret = PTR_ERR(tp_event);
322		goto out;
323	}
324
325	/*
326	 * local trace_uprobe need to hold event_mutex to call
327	 * uprobe_buffer_enable() and uprobe_buffer_disable().
328	 * event_mutex is not required for local trace_kprobes.
329	 */
330	mutex_lock(&event_mutex);
331	ret = perf_trace_event_init(tp_event, p_event);
332	if (ret)
333		destroy_local_trace_uprobe(tp_event);
334	mutex_unlock(&event_mutex);
335out:
336	kfree(path);
337	return ret;
338}
339
340void perf_uprobe_destroy(struct perf_event *p_event)
341{
342	mutex_lock(&event_mutex);
343	perf_trace_event_close(p_event);
344	perf_trace_event_unreg(p_event);
345	mutex_unlock(&event_mutex);
346	destroy_local_trace_uprobe(p_event->tp_event);
347}
348#endif /* CONFIG_UPROBE_EVENTS */
349
350int perf_trace_add(struct perf_event *p_event, int flags)
351{
352	struct trace_event_call *tp_event = p_event->tp_event;
353
354	if (!(flags & PERF_EF_START))
355		p_event->hw.state = PERF_HES_STOPPED;
356
357	/*
358	 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
359	 * and we need to take the default action of enqueueing our event on
360	 * the right per-cpu hlist.
361	 */
362	if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
363		struct hlist_head __percpu *pcpu_list;
364		struct hlist_head *list;
365
366		pcpu_list = tp_event->perf_events;
367		if (WARN_ON_ONCE(!pcpu_list))
368			return -EINVAL;
369
370		list = this_cpu_ptr(pcpu_list);
371		hlist_add_head_rcu(&p_event->hlist_entry, list);
372	}
373
374	return 0;
375}
376
377void perf_trace_del(struct perf_event *p_event, int flags)
378{
379	struct trace_event_call *tp_event = p_event->tp_event;
380
381	/*
382	 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
383	 * and we need to take the default action of dequeueing our event from
384	 * the right per-cpu hlist.
385	 */
386	if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
387		hlist_del_rcu(&p_event->hlist_entry);
388}
389
390void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
 
391{
 
 
392	char *raw_data;
393	int rctx;
394
395	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
396
397	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
398		      "perf buffer not large enough"))
399		return NULL;
400
401	*rctxp = rctx = perf_swevent_get_recursion_context();
402	if (rctx < 0)
 
 
403		return NULL;
404
405	if (regs)
406		*regs = this_cpu_ptr(&__perf_regs[rctx]);
407	raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
408
409	/* zero the dead bytes from align to not leak stack to user */
410	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
411	return raw_data;
412}
413EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
414NOKPROBE_SYMBOL(perf_trace_buf_alloc);
415
416void perf_trace_buf_update(void *record, u16 type)
417{
418	struct trace_entry *entry = record;
419	int pc = preempt_count();
420	unsigned long flags;
421
 
422	local_save_flags(flags);
423	tracing_generic_entry_update(entry, type, flags, pc);
 
 
 
424}
425NOKPROBE_SYMBOL(perf_trace_buf_update);
426
427#ifdef CONFIG_FUNCTION_TRACER
428static void
429perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
430			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
431{
432	struct ftrace_entry *entry;
433	struct perf_event *event;
434	struct hlist_head head;
435	struct pt_regs regs;
436	int rctx;
437
438	if ((unsigned long)ops->private != smp_processor_id())
 
439		return;
440
441	event = container_of(ops, struct perf_event, ftrace_ops);
442
443	/*
444	 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
445	 * the perf code does is hlist_for_each_entry_rcu(), so we can
446	 * get away with simply setting the @head.first pointer in order
447	 * to create a singular list.
448	 */
449	head.first = &event->hlist_entry;
450
451#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
452		    sizeof(u64)) - sizeof(u32))
453
454	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
455
456	memset(&regs, 0, sizeof(regs));
457	perf_fetch_caller_regs(&regs);
458
459	entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
460	if (!entry)
461		return;
462
463	entry->ip = ip;
464	entry->parent_ip = parent_ip;
465	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
466			      1, &regs, &head, NULL);
467
468#undef ENTRY_SIZE
469}
470
471static int perf_ftrace_function_register(struct perf_event *event)
472{
473	struct ftrace_ops *ops = &event->ftrace_ops;
474
475	ops->flags   = FTRACE_OPS_FL_RCU;
476	ops->func    = perf_ftrace_function_call;
477	ops->private = (void *)(unsigned long)nr_cpu_ids;
478
479	return register_ftrace_function(ops);
480}
481
482static int perf_ftrace_function_unregister(struct perf_event *event)
483{
484	struct ftrace_ops *ops = &event->ftrace_ops;
485	int ret = unregister_ftrace_function(ops);
486	ftrace_free_filter(ops);
487	return ret;
488}
489
490int perf_ftrace_event_register(struct trace_event_call *call,
491			       enum trace_reg type, void *data)
492{
493	struct perf_event *event = data;
 
494
 
 
 
 
 
 
 
 
495	switch (type) {
496	case TRACE_REG_REGISTER:
497	case TRACE_REG_UNREGISTER:
498		break;
499	case TRACE_REG_PERF_REGISTER:
500	case TRACE_REG_PERF_UNREGISTER:
501		return 0;
502	case TRACE_REG_PERF_OPEN:
503		return perf_ftrace_function_register(data);
504	case TRACE_REG_PERF_CLOSE:
505		return perf_ftrace_function_unregister(data);
506	case TRACE_REG_PERF_ADD:
507		event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
508		return 1;
509	case TRACE_REG_PERF_DEL:
510		event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
511		return 1;
512	}
513
514	return -EINVAL;
515}
516#endif /* CONFIG_FUNCTION_TRACER */
v3.15
 
  1/*
  2 * trace event based perf event profiling/tracing
  3 *
  4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/kprobes.h>
 10#include "trace.h"
 
 11
 12static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 13
 14/*
 15 * Force it to be aligned to unsigned long to avoid misaligned accesses
 16 * suprises
 17 */
 18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
 19	perf_trace_t;
 20
 21/* Count the events in use (per event id, not per instance) */
 22static int	total_ref_count;
 23
 24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
 25				 struct perf_event *p_event)
 26{
 27	if (tp_event->perf_perm) {
 28		int ret = tp_event->perf_perm(tp_event, p_event);
 29		if (ret)
 30			return ret;
 31	}
 32
 
 
 
 
 
 
 
 
 
 
 
 
 33	/* The ftrace function trace is allowed only for root. */
 34	if (ftrace_event_is_function(tp_event)) {
 35		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 36			return -EPERM;
 37
 
 
 
 38		/*
 39		 * We don't allow user space callchains for  function trace
 40		 * event, due to issues with page faults while tracing page
 41		 * fault handler and its overall trickiness nature.
 42		 */
 43		if (!p_event->attr.exclude_callchain_user)
 44			return -EINVAL;
 45
 46		/*
 47		 * Same reason to disable user stack dump as for user space
 48		 * callchains above.
 49		 */
 50		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
 51			return -EINVAL;
 52	}
 53
 54	/* No tracing, just counting, so no obvious leak */
 55	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
 56		return 0;
 57
 58	/* Some events are ok to be traced by non-root users... */
 59	if (p_event->attach_state == PERF_ATTACH_TASK) {
 60		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
 61			return 0;
 62	}
 63
 64	/*
 65	 * ...otherwise raw tracepoint data can be a severe data leak,
 66	 * only allow root to have these.
 67	 */
 68	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 69		return -EPERM;
 70
 71	return 0;
 72}
 73
 74static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
 75				struct perf_event *p_event)
 76{
 77	struct hlist_head __percpu *list;
 78	int ret = -ENOMEM;
 79	int cpu;
 80
 81	p_event->tp_event = tp_event;
 82	if (tp_event->perf_refcount++ > 0)
 83		return 0;
 84
 85	list = alloc_percpu(struct hlist_head);
 86	if (!list)
 87		goto fail;
 88
 89	for_each_possible_cpu(cpu)
 90		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
 91
 92	tp_event->perf_events = list;
 93
 94	if (!total_ref_count) {
 95		char __percpu *buf;
 96		int i;
 97
 98		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
 99			buf = (char __percpu *)alloc_percpu(perf_trace_t);
100			if (!buf)
101				goto fail;
102
103			perf_trace_buf[i] = buf;
104		}
105	}
106
107	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
108	if (ret)
109		goto fail;
110
111	total_ref_count++;
112	return 0;
113
114fail:
115	if (!total_ref_count) {
116		int i;
117
118		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
119			free_percpu(perf_trace_buf[i]);
120			perf_trace_buf[i] = NULL;
121		}
122	}
123
124	if (!--tp_event->perf_refcount) {
125		free_percpu(tp_event->perf_events);
126		tp_event->perf_events = NULL;
127	}
128
129	return ret;
130}
131
132static void perf_trace_event_unreg(struct perf_event *p_event)
133{
134	struct ftrace_event_call *tp_event = p_event->tp_event;
135	int i;
136
137	if (--tp_event->perf_refcount > 0)
138		goto out;
139
140	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
141
142	/*
143	 * Ensure our callback won't be called anymore. The buffers
144	 * will be freed after that.
145	 */
146	tracepoint_synchronize_unregister();
147
148	free_percpu(tp_event->perf_events);
149	tp_event->perf_events = NULL;
150
151	if (!--total_ref_count) {
152		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
153			free_percpu(perf_trace_buf[i]);
154			perf_trace_buf[i] = NULL;
155		}
156	}
157out:
158	module_put(tp_event->mod);
159}
160
161static int perf_trace_event_open(struct perf_event *p_event)
162{
163	struct ftrace_event_call *tp_event = p_event->tp_event;
164	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
165}
166
167static void perf_trace_event_close(struct perf_event *p_event)
168{
169	struct ftrace_event_call *tp_event = p_event->tp_event;
170	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
171}
172
173static int perf_trace_event_init(struct ftrace_event_call *tp_event,
174				 struct perf_event *p_event)
175{
176	int ret;
177
178	ret = perf_trace_event_perm(tp_event, p_event);
179	if (ret)
180		return ret;
181
182	ret = perf_trace_event_reg(tp_event, p_event);
183	if (ret)
184		return ret;
185
186	ret = perf_trace_event_open(p_event);
187	if (ret) {
188		perf_trace_event_unreg(p_event);
189		return ret;
190	}
191
192	return 0;
193}
194
195int perf_trace_init(struct perf_event *p_event)
196{
197	struct ftrace_event_call *tp_event;
198	u64 event_id = p_event->attr.config;
199	int ret = -EINVAL;
200
201	mutex_lock(&event_mutex);
202	list_for_each_entry(tp_event, &ftrace_events, list) {
203		if (tp_event->event.type == event_id &&
204		    tp_event->class && tp_event->class->reg &&
205		    try_module_get(tp_event->mod)) {
206			ret = perf_trace_event_init(tp_event, p_event);
207			if (ret)
208				module_put(tp_event->mod);
209			break;
210		}
211	}
212	mutex_unlock(&event_mutex);
213
214	return ret;
215}
216
217void perf_trace_destroy(struct perf_event *p_event)
218{
219	mutex_lock(&event_mutex);
220	perf_trace_event_close(p_event);
221	perf_trace_event_unreg(p_event);
222	mutex_unlock(&event_mutex);
223}
224
225int perf_trace_add(struct perf_event *p_event, int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226{
227	struct ftrace_event_call *tp_event = p_event->tp_event;
228	struct hlist_head __percpu *pcpu_list;
229	struct hlist_head *list;
230
231	pcpu_list = tp_event->perf_events;
232	if (WARN_ON_ONCE(!pcpu_list))
233		return -EINVAL;
234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235	if (!(flags & PERF_EF_START))
236		p_event->hw.state = PERF_HES_STOPPED;
237
238	list = this_cpu_ptr(pcpu_list);
239	hlist_add_head_rcu(&p_event->hlist_entry, list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
241	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
242}
243
244void perf_trace_del(struct perf_event *p_event, int flags)
245{
246	struct ftrace_event_call *tp_event = p_event->tp_event;
247	hlist_del_rcu(&p_event->hlist_entry);
248	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 
 
 
 
 
 
249}
250
251__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
252				       struct pt_regs *regs, int *rctxp)
253{
254	struct trace_entry *entry;
255	unsigned long flags;
256	char *raw_data;
257	int pc;
258
259	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
260
261	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
262			"perf buffer not large enough"))
263		return NULL;
264
265	pc = preempt_count();
266
267	*rctxp = perf_swevent_get_recursion_context();
268	if (*rctxp < 0)
269		return NULL;
270
271	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
 
 
272
273	/* zero the dead bytes from align to not leak stack to user */
274	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
 
 
 
 
 
 
 
 
 
 
275
276	entry = (struct trace_entry *)raw_data;
277	local_save_flags(flags);
278	tracing_generic_entry_update(entry, flags, pc);
279	entry->type = type;
280
281	return raw_data;
282}
283EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
284
285#ifdef CONFIG_FUNCTION_TRACER
286static void
287perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
288			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
289{
290	struct ftrace_entry *entry;
291	struct hlist_head *head;
 
292	struct pt_regs regs;
293	int rctx;
294
295	head = this_cpu_ptr(event_function.perf_events);
296	if (hlist_empty(head))
297		return;
298
 
 
 
 
 
 
 
 
 
 
299#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
300		    sizeof(u64)) - sizeof(u32))
301
302	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
303
 
304	perf_fetch_caller_regs(&regs);
305
306	entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
307	if (!entry)
308		return;
309
310	entry->ip = ip;
311	entry->parent_ip = parent_ip;
312	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
313			      1, &regs, head, NULL);
314
315#undef ENTRY_SIZE
316}
317
318static int perf_ftrace_function_register(struct perf_event *event)
319{
320	struct ftrace_ops *ops = &event->ftrace_ops;
321
322	ops->flags |= FTRACE_OPS_FL_CONTROL;
323	ops->func = perf_ftrace_function_call;
 
 
324	return register_ftrace_function(ops);
325}
326
327static int perf_ftrace_function_unregister(struct perf_event *event)
328{
329	struct ftrace_ops *ops = &event->ftrace_ops;
330	int ret = unregister_ftrace_function(ops);
331	ftrace_free_filter(ops);
332	return ret;
333}
334
335static void perf_ftrace_function_enable(struct perf_event *event)
 
336{
337	ftrace_function_local_enable(&event->ftrace_ops);
338}
339
340static void perf_ftrace_function_disable(struct perf_event *event)
341{
342	ftrace_function_local_disable(&event->ftrace_ops);
343}
344
345int perf_ftrace_event_register(struct ftrace_event_call *call,
346			       enum trace_reg type, void *data)
347{
348	switch (type) {
349	case TRACE_REG_REGISTER:
350	case TRACE_REG_UNREGISTER:
351		break;
352	case TRACE_REG_PERF_REGISTER:
353	case TRACE_REG_PERF_UNREGISTER:
354		return 0;
355	case TRACE_REG_PERF_OPEN:
356		return perf_ftrace_function_register(data);
357	case TRACE_REG_PERF_CLOSE:
358		return perf_ftrace_function_unregister(data);
359	case TRACE_REG_PERF_ADD:
360		perf_ftrace_function_enable(data);
361		return 0;
362	case TRACE_REG_PERF_DEL:
363		perf_ftrace_function_disable(data);
364		return 0;
365	}
366
367	return -EINVAL;
368}
369#endif /* CONFIG_FUNCTION_TRACER */