Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * trace event based perf event profiling/tracing
  3 *
  4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
  5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/kprobes.h>
 10#include "trace.h"
 
 11
 12static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 13
 14/*
 15 * Force it to be aligned to unsigned long to avoid misaligned accesses
 16 * suprises
 17 */
 18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
 19	perf_trace_t;
 20
 21/* Count the events in use (per event id, not per instance) */
 22static int	total_ref_count;
 23
 24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
 25				 struct perf_event *p_event)
 26{
 27	if (tp_event->perf_perm) {
 28		int ret = tp_event->perf_perm(tp_event, p_event);
 29		if (ret)
 30			return ret;
 31	}
 32
 
 
 
 
 
 
 
 
 
 
 
 
 33	/* The ftrace function trace is allowed only for root. */
 34	if (ftrace_event_is_function(tp_event)) {
 35		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 36			return -EPERM;
 37
 
 
 
 38		/*
 39		 * We don't allow user space callchains for  function trace
 40		 * event, due to issues with page faults while tracing page
 41		 * fault handler and its overall trickiness nature.
 42		 */
 43		if (!p_event->attr.exclude_callchain_user)
 44			return -EINVAL;
 45
 46		/*
 47		 * Same reason to disable user stack dump as for user space
 48		 * callchains above.
 49		 */
 50		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
 51			return -EINVAL;
 52	}
 53
 54	/* No tracing, just counting, so no obvious leak */
 55	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
 56		return 0;
 57
 58	/* Some events are ok to be traced by non-root users... */
 59	if (p_event->attach_state == PERF_ATTACH_TASK) {
 60		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
 61			return 0;
 62	}
 63
 64	/*
 65	 * ...otherwise raw tracepoint data can be a severe data leak,
 66	 * only allow root to have these.
 67	 */
 68	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 69		return -EPERM;
 70
 71	return 0;
 72}
 73
 74static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
 75				struct perf_event *p_event)
 76{
 77	struct hlist_head __percpu *list;
 78	int ret = -ENOMEM;
 79	int cpu;
 80
 81	p_event->tp_event = tp_event;
 82	if (tp_event->perf_refcount++ > 0)
 83		return 0;
 84
 85	list = alloc_percpu(struct hlist_head);
 86	if (!list)
 87		goto fail;
 88
 89	for_each_possible_cpu(cpu)
 90		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
 91
 92	tp_event->perf_events = list;
 93
 94	if (!total_ref_count) {
 95		char __percpu *buf;
 96		int i;
 97
 98		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
 99			buf = (char __percpu *)alloc_percpu(perf_trace_t);
100			if (!buf)
101				goto fail;
102
103			perf_trace_buf[i] = buf;
104		}
105	}
106
107	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
108	if (ret)
109		goto fail;
110
111	total_ref_count++;
112	return 0;
113
114fail:
115	if (!total_ref_count) {
116		int i;
117
118		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
119			free_percpu(perf_trace_buf[i]);
120			perf_trace_buf[i] = NULL;
121		}
122	}
123
124	if (!--tp_event->perf_refcount) {
125		free_percpu(tp_event->perf_events);
126		tp_event->perf_events = NULL;
127	}
128
129	return ret;
130}
131
132static void perf_trace_event_unreg(struct perf_event *p_event)
133{
134	struct ftrace_event_call *tp_event = p_event->tp_event;
135	int i;
136
137	if (--tp_event->perf_refcount > 0)
138		goto out;
139
140	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
141
142	/*
143	 * Ensure our callback won't be called anymore. The buffers
144	 * will be freed after that.
145	 */
146	tracepoint_synchronize_unregister();
147
148	free_percpu(tp_event->perf_events);
149	tp_event->perf_events = NULL;
150
151	if (!--total_ref_count) {
152		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
153			free_percpu(perf_trace_buf[i]);
154			perf_trace_buf[i] = NULL;
155		}
156	}
157out:
158	module_put(tp_event->mod);
159}
160
161static int perf_trace_event_open(struct perf_event *p_event)
162{
163	struct ftrace_event_call *tp_event = p_event->tp_event;
164	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
165}
166
167static void perf_trace_event_close(struct perf_event *p_event)
168{
169	struct ftrace_event_call *tp_event = p_event->tp_event;
170	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
171}
172
173static int perf_trace_event_init(struct ftrace_event_call *tp_event,
174				 struct perf_event *p_event)
175{
176	int ret;
177
178	ret = perf_trace_event_perm(tp_event, p_event);
179	if (ret)
180		return ret;
181
182	ret = perf_trace_event_reg(tp_event, p_event);
183	if (ret)
184		return ret;
185
186	ret = perf_trace_event_open(p_event);
187	if (ret) {
188		perf_trace_event_unreg(p_event);
189		return ret;
190	}
191
192	return 0;
193}
194
195int perf_trace_init(struct perf_event *p_event)
196{
197	struct ftrace_event_call *tp_event;
198	u64 event_id = p_event->attr.config;
199	int ret = -EINVAL;
200
201	mutex_lock(&event_mutex);
202	list_for_each_entry(tp_event, &ftrace_events, list) {
203		if (tp_event->event.type == event_id &&
204		    tp_event->class && tp_event->class->reg &&
205		    try_module_get(tp_event->mod)) {
206			ret = perf_trace_event_init(tp_event, p_event);
207			if (ret)
208				module_put(tp_event->mod);
209			break;
210		}
211	}
212	mutex_unlock(&event_mutex);
213
214	return ret;
215}
216
217void perf_trace_destroy(struct perf_event *p_event)
218{
219	mutex_lock(&event_mutex);
220	perf_trace_event_close(p_event);
221	perf_trace_event_unreg(p_event);
222	mutex_unlock(&event_mutex);
223}
224
225int perf_trace_add(struct perf_event *p_event, int flags)
 
226{
227	struct ftrace_event_call *tp_event = p_event->tp_event;
228	struct hlist_head __percpu *pcpu_list;
229	struct hlist_head *list;
230
231	pcpu_list = tp_event->perf_events;
232	if (WARN_ON_ONCE(!pcpu_list))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
235	if (!(flags & PERF_EF_START))
236		p_event->hw.state = PERF_HES_STOPPED;
237
238	list = this_cpu_ptr(pcpu_list);
239	hlist_add_head_rcu(&p_event->hlist_entry, list);
 
 
 
 
 
 
 
 
 
 
240
241	return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
 
 
 
 
242}
243
244void perf_trace_del(struct perf_event *p_event, int flags)
245{
246	struct ftrace_event_call *tp_event = p_event->tp_event;
247	hlist_del_rcu(&p_event->hlist_entry);
248	tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 
 
 
 
 
 
249}
250
251__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
252				       struct pt_regs *regs, int *rctxp)
253{
254	struct trace_entry *entry;
255	unsigned long flags;
256	char *raw_data;
257	int pc;
258
259	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
260
261	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
262			"perf buffer not large enough"))
263		return NULL;
264
265	pc = preempt_count();
266
267	*rctxp = perf_swevent_get_recursion_context();
268	if (*rctxp < 0)
269		return NULL;
270
271	raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
 
 
272
273	/* zero the dead bytes from align to not leak stack to user */
274	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
 
 
 
 
 
 
 
 
 
 
275
276	entry = (struct trace_entry *)raw_data;
277	local_save_flags(flags);
278	tracing_generic_entry_update(entry, flags, pc);
279	entry->type = type;
280
281	return raw_data;
282}
283EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
284
285#ifdef CONFIG_FUNCTION_TRACER
286static void
287perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
288			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
289{
290	struct ftrace_entry *entry;
291	struct hlist_head *head;
 
292	struct pt_regs regs;
293	int rctx;
294
295	head = this_cpu_ptr(event_function.perf_events);
296	if (hlist_empty(head))
297		return;
298
 
 
 
 
 
 
 
 
 
 
299#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
300		    sizeof(u64)) - sizeof(u32))
301
302	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
303
 
304	perf_fetch_caller_regs(&regs);
305
306	entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
307	if (!entry)
308		return;
309
310	entry->ip = ip;
311	entry->parent_ip = parent_ip;
312	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
313			      1, &regs, head, NULL);
314
315#undef ENTRY_SIZE
316}
317
318static int perf_ftrace_function_register(struct perf_event *event)
319{
320	struct ftrace_ops *ops = &event->ftrace_ops;
321
322	ops->flags |= FTRACE_OPS_FL_CONTROL;
323	ops->func = perf_ftrace_function_call;
 
 
324	return register_ftrace_function(ops);
325}
326
327static int perf_ftrace_function_unregister(struct perf_event *event)
328{
329	struct ftrace_ops *ops = &event->ftrace_ops;
330	int ret = unregister_ftrace_function(ops);
331	ftrace_free_filter(ops);
332	return ret;
333}
334
335static void perf_ftrace_function_enable(struct perf_event *event)
336{
337	ftrace_function_local_enable(&event->ftrace_ops);
338}
339
340static void perf_ftrace_function_disable(struct perf_event *event)
341{
342	ftrace_function_local_disable(&event->ftrace_ops);
343}
344
345int perf_ftrace_event_register(struct ftrace_event_call *call,
346			       enum trace_reg type, void *data)
347{
 
 
348	switch (type) {
349	case TRACE_REG_REGISTER:
350	case TRACE_REG_UNREGISTER:
351		break;
352	case TRACE_REG_PERF_REGISTER:
353	case TRACE_REG_PERF_UNREGISTER:
354		return 0;
355	case TRACE_REG_PERF_OPEN:
356		return perf_ftrace_function_register(data);
357	case TRACE_REG_PERF_CLOSE:
358		return perf_ftrace_function_unregister(data);
359	case TRACE_REG_PERF_ADD:
360		perf_ftrace_function_enable(data);
361		return 0;
362	case TRACE_REG_PERF_DEL:
363		perf_ftrace_function_disable(data);
364		return 0;
365	}
366
367	return -EINVAL;
368}
369#endif /* CONFIG_FUNCTION_TRACER */
v4.17
  1/*
  2 * trace event based perf event profiling/tracing
  3 *
  4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
  5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/kprobes.h>
 10#include "trace.h"
 11#include "trace_probe.h"
 12
 13static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 14
 15/*
 16 * Force it to be aligned to unsigned long to avoid misaligned accesses
 17 * suprises
 18 */
 19typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
 20	perf_trace_t;
 21
 22/* Count the events in use (per event id, not per instance) */
 23static int	total_ref_count;
 24
 25static int perf_trace_event_perm(struct trace_event_call *tp_event,
 26				 struct perf_event *p_event)
 27{
 28	if (tp_event->perf_perm) {
 29		int ret = tp_event->perf_perm(tp_event, p_event);
 30		if (ret)
 31			return ret;
 32	}
 33
 34	/*
 35	 * We checked and allowed to create parent,
 36	 * allow children without checking.
 37	 */
 38	if (p_event->parent)
 39		return 0;
 40
 41	/*
 42	 * It's ok to check current process (owner) permissions in here,
 43	 * because code below is called only via perf_event_open syscall.
 44	 */
 45
 46	/* The ftrace function trace is allowed only for root. */
 47	if (ftrace_event_is_function(tp_event)) {
 48		if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 49			return -EPERM;
 50
 51		if (!is_sampling_event(p_event))
 52			return 0;
 53
 54		/*
 55		 * We don't allow user space callchains for  function trace
 56		 * event, due to issues with page faults while tracing page
 57		 * fault handler and its overall trickiness nature.
 58		 */
 59		if (!p_event->attr.exclude_callchain_user)
 60			return -EINVAL;
 61
 62		/*
 63		 * Same reason to disable user stack dump as for user space
 64		 * callchains above.
 65		 */
 66		if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
 67			return -EINVAL;
 68	}
 69
 70	/* No tracing, just counting, so no obvious leak */
 71	if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
 72		return 0;
 73
 74	/* Some events are ok to be traced by non-root users... */
 75	if (p_event->attach_state == PERF_ATTACH_TASK) {
 76		if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
 77			return 0;
 78	}
 79
 80	/*
 81	 * ...otherwise raw tracepoint data can be a severe data leak,
 82	 * only allow root to have these.
 83	 */
 84	if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
 85		return -EPERM;
 86
 87	return 0;
 88}
 89
 90static int perf_trace_event_reg(struct trace_event_call *tp_event,
 91				struct perf_event *p_event)
 92{
 93	struct hlist_head __percpu *list;
 94	int ret = -ENOMEM;
 95	int cpu;
 96
 97	p_event->tp_event = tp_event;
 98	if (tp_event->perf_refcount++ > 0)
 99		return 0;
100
101	list = alloc_percpu(struct hlist_head);
102	if (!list)
103		goto fail;
104
105	for_each_possible_cpu(cpu)
106		INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
107
108	tp_event->perf_events = list;
109
110	if (!total_ref_count) {
111		char __percpu *buf;
112		int i;
113
114		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
115			buf = (char __percpu *)alloc_percpu(perf_trace_t);
116			if (!buf)
117				goto fail;
118
119			perf_trace_buf[i] = buf;
120		}
121	}
122
123	ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
124	if (ret)
125		goto fail;
126
127	total_ref_count++;
128	return 0;
129
130fail:
131	if (!total_ref_count) {
132		int i;
133
134		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
135			free_percpu(perf_trace_buf[i]);
136			perf_trace_buf[i] = NULL;
137		}
138	}
139
140	if (!--tp_event->perf_refcount) {
141		free_percpu(tp_event->perf_events);
142		tp_event->perf_events = NULL;
143	}
144
145	return ret;
146}
147
148static void perf_trace_event_unreg(struct perf_event *p_event)
149{
150	struct trace_event_call *tp_event = p_event->tp_event;
151	int i;
152
153	if (--tp_event->perf_refcount > 0)
154		goto out;
155
156	tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
157
158	/*
159	 * Ensure our callback won't be called anymore. The buffers
160	 * will be freed after that.
161	 */
162	tracepoint_synchronize_unregister();
163
164	free_percpu(tp_event->perf_events);
165	tp_event->perf_events = NULL;
166
167	if (!--total_ref_count) {
168		for (i = 0; i < PERF_NR_CONTEXTS; i++) {
169			free_percpu(perf_trace_buf[i]);
170			perf_trace_buf[i] = NULL;
171		}
172	}
173out:
174	module_put(tp_event->mod);
175}
176
177static int perf_trace_event_open(struct perf_event *p_event)
178{
179	struct trace_event_call *tp_event = p_event->tp_event;
180	return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
181}
182
183static void perf_trace_event_close(struct perf_event *p_event)
184{
185	struct trace_event_call *tp_event = p_event->tp_event;
186	tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
187}
188
189static int perf_trace_event_init(struct trace_event_call *tp_event,
190				 struct perf_event *p_event)
191{
192	int ret;
193
194	ret = perf_trace_event_perm(tp_event, p_event);
195	if (ret)
196		return ret;
197
198	ret = perf_trace_event_reg(tp_event, p_event);
199	if (ret)
200		return ret;
201
202	ret = perf_trace_event_open(p_event);
203	if (ret) {
204		perf_trace_event_unreg(p_event);
205		return ret;
206	}
207
208	return 0;
209}
210
211int perf_trace_init(struct perf_event *p_event)
212{
213	struct trace_event_call *tp_event;
214	u64 event_id = p_event->attr.config;
215	int ret = -EINVAL;
216
217	mutex_lock(&event_mutex);
218	list_for_each_entry(tp_event, &ftrace_events, list) {
219		if (tp_event->event.type == event_id &&
220		    tp_event->class && tp_event->class->reg &&
221		    try_module_get(tp_event->mod)) {
222			ret = perf_trace_event_init(tp_event, p_event);
223			if (ret)
224				module_put(tp_event->mod);
225			break;
226		}
227	}
228	mutex_unlock(&event_mutex);
229
230	return ret;
231}
232
233void perf_trace_destroy(struct perf_event *p_event)
234{
235	mutex_lock(&event_mutex);
236	perf_trace_event_close(p_event);
237	perf_trace_event_unreg(p_event);
238	mutex_unlock(&event_mutex);
239}
240
241#ifdef CONFIG_KPROBE_EVENTS
242int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
243{
244	int ret;
245	char *func = NULL;
246	struct trace_event_call *tp_event;
247
248	if (p_event->attr.kprobe_func) {
249		func = kzalloc(KSYM_NAME_LEN, GFP_KERNEL);
250		if (!func)
251			return -ENOMEM;
252		ret = strncpy_from_user(
253			func, u64_to_user_ptr(p_event->attr.kprobe_func),
254			KSYM_NAME_LEN);
255		if (ret == KSYM_NAME_LEN)
256			ret = -E2BIG;
257		if (ret < 0)
258			goto out;
259
260		if (func[0] == '\0') {
261			kfree(func);
262			func = NULL;
263		}
264	}
265
266	tp_event = create_local_trace_kprobe(
267		func, (void *)(unsigned long)(p_event->attr.kprobe_addr),
268		p_event->attr.probe_offset, is_retprobe);
269	if (IS_ERR(tp_event)) {
270		ret = PTR_ERR(tp_event);
271		goto out;
272	}
273
274	ret = perf_trace_event_init(tp_event, p_event);
275	if (ret)
276		destroy_local_trace_kprobe(tp_event);
277out:
278	kfree(func);
279	return ret;
280}
281
282void perf_kprobe_destroy(struct perf_event *p_event)
283{
284	perf_trace_event_close(p_event);
285	perf_trace_event_unreg(p_event);
286
287	destroy_local_trace_kprobe(p_event->tp_event);
288}
289#endif /* CONFIG_KPROBE_EVENTS */
290
291#ifdef CONFIG_UPROBE_EVENTS
292int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe)
293{
294	int ret;
295	char *path = NULL;
296	struct trace_event_call *tp_event;
297
298	if (!p_event->attr.uprobe_path)
299		return -EINVAL;
300	path = kzalloc(PATH_MAX, GFP_KERNEL);
301	if (!path)
302		return -ENOMEM;
303	ret = strncpy_from_user(
304		path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
305	if (ret == PATH_MAX)
306		return -E2BIG;
307	if (ret < 0)
308		goto out;
309	if (path[0] == '\0') {
310		ret = -EINVAL;
311		goto out;
312	}
313
314	tp_event = create_local_trace_uprobe(
315		path, p_event->attr.probe_offset, is_retprobe);
316	if (IS_ERR(tp_event)) {
317		ret = PTR_ERR(tp_event);
318		goto out;
319	}
320
321	/*
322	 * local trace_uprobe need to hold event_mutex to call
323	 * uprobe_buffer_enable() and uprobe_buffer_disable().
324	 * event_mutex is not required for local trace_kprobes.
325	 */
326	mutex_lock(&event_mutex);
327	ret = perf_trace_event_init(tp_event, p_event);
328	if (ret)
329		destroy_local_trace_uprobe(tp_event);
330	mutex_unlock(&event_mutex);
331out:
332	kfree(path);
333	return ret;
334}
335
336void perf_uprobe_destroy(struct perf_event *p_event)
337{
338	mutex_lock(&event_mutex);
339	perf_trace_event_close(p_event);
340	perf_trace_event_unreg(p_event);
341	mutex_unlock(&event_mutex);
342	destroy_local_trace_uprobe(p_event->tp_event);
343}
344#endif /* CONFIG_UPROBE_EVENTS */
345
346int perf_trace_add(struct perf_event *p_event, int flags)
347{
348	struct trace_event_call *tp_event = p_event->tp_event;
349
350	if (!(flags & PERF_EF_START))
351		p_event->hw.state = PERF_HES_STOPPED;
352
353	/*
354	 * If TRACE_REG_PERF_ADD returns false; no custom action was performed
355	 * and we need to take the default action of enqueueing our event on
356	 * the right per-cpu hlist.
357	 */
358	if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event)) {
359		struct hlist_head __percpu *pcpu_list;
360		struct hlist_head *list;
361
362		pcpu_list = tp_event->perf_events;
363		if (WARN_ON_ONCE(!pcpu_list))
364			return -EINVAL;
365
366		list = this_cpu_ptr(pcpu_list);
367		hlist_add_head_rcu(&p_event->hlist_entry, list);
368	}
369
370	return 0;
371}
372
373void perf_trace_del(struct perf_event *p_event, int flags)
374{
375	struct trace_event_call *tp_event = p_event->tp_event;
376
377	/*
378	 * If TRACE_REG_PERF_DEL returns false; no custom action was performed
379	 * and we need to take the default action of dequeueing our event from
380	 * the right per-cpu hlist.
381	 */
382	if (!tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event))
383		hlist_del_rcu(&p_event->hlist_entry);
384}
385
386void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
 
387{
 
 
388	char *raw_data;
389	int rctx;
390
391	BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
392
393	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
394		      "perf buffer not large enough"))
395		return NULL;
396
397	*rctxp = rctx = perf_swevent_get_recursion_context();
398	if (rctx < 0)
 
 
399		return NULL;
400
401	if (regs)
402		*regs = this_cpu_ptr(&__perf_regs[rctx]);
403	raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
404
405	/* zero the dead bytes from align to not leak stack to user */
406	memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
407	return raw_data;
408}
409EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
410NOKPROBE_SYMBOL(perf_trace_buf_alloc);
411
412void perf_trace_buf_update(void *record, u16 type)
413{
414	struct trace_entry *entry = record;
415	int pc = preempt_count();
416	unsigned long flags;
417
 
418	local_save_flags(flags);
419	tracing_generic_entry_update(entry, flags, pc);
420	entry->type = type;
 
 
421}
422NOKPROBE_SYMBOL(perf_trace_buf_update);
423
424#ifdef CONFIG_FUNCTION_TRACER
425static void
426perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
427			  struct ftrace_ops *ops, struct pt_regs *pt_regs)
428{
429	struct ftrace_entry *entry;
430	struct perf_event *event;
431	struct hlist_head head;
432	struct pt_regs regs;
433	int rctx;
434
435	if ((unsigned long)ops->private != smp_processor_id())
 
436		return;
437
438	event = container_of(ops, struct perf_event, ftrace_ops);
439
440	/*
441	 * @event->hlist entry is NULL (per INIT_HLIST_NODE), and all
442	 * the perf code does is hlist_for_each_entry_rcu(), so we can
443	 * get away with simply setting the @head.first pointer in order
444	 * to create a singular list.
445	 */
446	head.first = &event->hlist_entry;
447
448#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
449		    sizeof(u64)) - sizeof(u32))
450
451	BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
452
453	memset(&regs, 0, sizeof(regs));
454	perf_fetch_caller_regs(&regs);
455
456	entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
457	if (!entry)
458		return;
459
460	entry->ip = ip;
461	entry->parent_ip = parent_ip;
462	perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
463			      1, &regs, &head, NULL);
464
465#undef ENTRY_SIZE
466}
467
468static int perf_ftrace_function_register(struct perf_event *event)
469{
470	struct ftrace_ops *ops = &event->ftrace_ops;
471
472	ops->flags   = FTRACE_OPS_FL_RCU;
473	ops->func    = perf_ftrace_function_call;
474	ops->private = (void *)(unsigned long)nr_cpu_ids;
475
476	return register_ftrace_function(ops);
477}
478
479static int perf_ftrace_function_unregister(struct perf_event *event)
480{
481	struct ftrace_ops *ops = &event->ftrace_ops;
482	int ret = unregister_ftrace_function(ops);
483	ftrace_free_filter(ops);
484	return ret;
485}
486
487int perf_ftrace_event_register(struct trace_event_call *call,
 
 
 
 
 
 
 
 
 
 
488			       enum trace_reg type, void *data)
489{
490	struct perf_event *event = data;
491
492	switch (type) {
493	case TRACE_REG_REGISTER:
494	case TRACE_REG_UNREGISTER:
495		break;
496	case TRACE_REG_PERF_REGISTER:
497	case TRACE_REG_PERF_UNREGISTER:
498		return 0;
499	case TRACE_REG_PERF_OPEN:
500		return perf_ftrace_function_register(data);
501	case TRACE_REG_PERF_CLOSE:
502		return perf_ftrace_function_unregister(data);
503	case TRACE_REG_PERF_ADD:
504		event->ftrace_ops.private = (void *)(unsigned long)smp_processor_id();
505		return 1;
506	case TRACE_REG_PERF_DEL:
507		event->ftrace_ops.private = (void *)(unsigned long)nr_cpu_ids;
508		return 1;
509	}
510
511	return -EINVAL;
512}
513#endif /* CONFIG_FUNCTION_TRACER */