Loading...
1/*
2 * trace event based perf event profiling/tracing
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */
7
8#include <linux/module.h>
9#include <linux/kprobes.h>
10#include "trace.h"
11
12static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
13
14/*
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
16 * suprises
17 */
18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 perf_trace_t;
20
21/* Count the events in use (per event id, not per instance) */
22static int total_ref_count;
23
24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event)
26{
27 if (tp_event->perf_perm) {
28 int ret = tp_event->perf_perm(tp_event, p_event);
29 if (ret)
30 return ret;
31 }
32
33 /* The ftrace function trace is allowed only for root. */
34 if (ftrace_event_is_function(tp_event)) {
35 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
36 return -EPERM;
37
38 /*
39 * We don't allow user space callchains for function trace
40 * event, due to issues with page faults while tracing page
41 * fault handler and its overall trickiness nature.
42 */
43 if (!p_event->attr.exclude_callchain_user)
44 return -EINVAL;
45
46 /*
47 * Same reason to disable user stack dump as for user space
48 * callchains above.
49 */
50 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
51 return -EINVAL;
52 }
53
54 /* No tracing, just counting, so no obvious leak */
55 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
56 return 0;
57
58 /* Some events are ok to be traced by non-root users... */
59 if (p_event->attach_state == PERF_ATTACH_TASK) {
60 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
61 return 0;
62 }
63
64 /*
65 * ...otherwise raw tracepoint data can be a severe data leak,
66 * only allow root to have these.
67 */
68 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
69 return -EPERM;
70
71 return 0;
72}
73
74static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
75 struct perf_event *p_event)
76{
77 struct hlist_head __percpu *list;
78 int ret = -ENOMEM;
79 int cpu;
80
81 p_event->tp_event = tp_event;
82 if (tp_event->perf_refcount++ > 0)
83 return 0;
84
85 list = alloc_percpu(struct hlist_head);
86 if (!list)
87 goto fail;
88
89 for_each_possible_cpu(cpu)
90 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
91
92 tp_event->perf_events = list;
93
94 if (!total_ref_count) {
95 char __percpu *buf;
96 int i;
97
98 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
99 buf = (char __percpu *)alloc_percpu(perf_trace_t);
100 if (!buf)
101 goto fail;
102
103 perf_trace_buf[i] = buf;
104 }
105 }
106
107 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
108 if (ret)
109 goto fail;
110
111 total_ref_count++;
112 return 0;
113
114fail:
115 if (!total_ref_count) {
116 int i;
117
118 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
119 free_percpu(perf_trace_buf[i]);
120 perf_trace_buf[i] = NULL;
121 }
122 }
123
124 if (!--tp_event->perf_refcount) {
125 free_percpu(tp_event->perf_events);
126 tp_event->perf_events = NULL;
127 }
128
129 return ret;
130}
131
132static void perf_trace_event_unreg(struct perf_event *p_event)
133{
134 struct ftrace_event_call *tp_event = p_event->tp_event;
135 int i;
136
137 if (--tp_event->perf_refcount > 0)
138 goto out;
139
140 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
141
142 /*
143 * Ensure our callback won't be called anymore. The buffers
144 * will be freed after that.
145 */
146 tracepoint_synchronize_unregister();
147
148 free_percpu(tp_event->perf_events);
149 tp_event->perf_events = NULL;
150
151 if (!--total_ref_count) {
152 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
153 free_percpu(perf_trace_buf[i]);
154 perf_trace_buf[i] = NULL;
155 }
156 }
157out:
158 module_put(tp_event->mod);
159}
160
161static int perf_trace_event_open(struct perf_event *p_event)
162{
163 struct ftrace_event_call *tp_event = p_event->tp_event;
164 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
165}
166
167static void perf_trace_event_close(struct perf_event *p_event)
168{
169 struct ftrace_event_call *tp_event = p_event->tp_event;
170 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
171}
172
173static int perf_trace_event_init(struct ftrace_event_call *tp_event,
174 struct perf_event *p_event)
175{
176 int ret;
177
178 ret = perf_trace_event_perm(tp_event, p_event);
179 if (ret)
180 return ret;
181
182 ret = perf_trace_event_reg(tp_event, p_event);
183 if (ret)
184 return ret;
185
186 ret = perf_trace_event_open(p_event);
187 if (ret) {
188 perf_trace_event_unreg(p_event);
189 return ret;
190 }
191
192 return 0;
193}
194
195int perf_trace_init(struct perf_event *p_event)
196{
197 struct ftrace_event_call *tp_event;
198 u64 event_id = p_event->attr.config;
199 int ret = -EINVAL;
200
201 mutex_lock(&event_mutex);
202 list_for_each_entry(tp_event, &ftrace_events, list) {
203 if (tp_event->event.type == event_id &&
204 tp_event->class && tp_event->class->reg &&
205 try_module_get(tp_event->mod)) {
206 ret = perf_trace_event_init(tp_event, p_event);
207 if (ret)
208 module_put(tp_event->mod);
209 break;
210 }
211 }
212 mutex_unlock(&event_mutex);
213
214 return ret;
215}
216
217void perf_trace_destroy(struct perf_event *p_event)
218{
219 mutex_lock(&event_mutex);
220 perf_trace_event_close(p_event);
221 perf_trace_event_unreg(p_event);
222 mutex_unlock(&event_mutex);
223}
224
225int perf_trace_add(struct perf_event *p_event, int flags)
226{
227 struct ftrace_event_call *tp_event = p_event->tp_event;
228 struct hlist_head __percpu *pcpu_list;
229 struct hlist_head *list;
230
231 pcpu_list = tp_event->perf_events;
232 if (WARN_ON_ONCE(!pcpu_list))
233 return -EINVAL;
234
235 if (!(flags & PERF_EF_START))
236 p_event->hw.state = PERF_HES_STOPPED;
237
238 list = this_cpu_ptr(pcpu_list);
239 hlist_add_head_rcu(&p_event->hlist_entry, list);
240
241 return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
242}
243
244void perf_trace_del(struct perf_event *p_event, int flags)
245{
246 struct ftrace_event_call *tp_event = p_event->tp_event;
247 hlist_del_rcu(&p_event->hlist_entry);
248 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
249}
250
251__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
252 struct pt_regs *regs, int *rctxp)
253{
254 struct trace_entry *entry;
255 unsigned long flags;
256 char *raw_data;
257 int pc;
258
259 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
260
261 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
262 "perf buffer not large enough"))
263 return NULL;
264
265 pc = preempt_count();
266
267 *rctxp = perf_swevent_get_recursion_context();
268 if (*rctxp < 0)
269 return NULL;
270
271 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
272
273 /* zero the dead bytes from align to not leak stack to user */
274 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
275
276 entry = (struct trace_entry *)raw_data;
277 local_save_flags(flags);
278 tracing_generic_entry_update(entry, flags, pc);
279 entry->type = type;
280
281 return raw_data;
282}
283EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
284
285#ifdef CONFIG_FUNCTION_TRACER
286static void
287perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
288 struct ftrace_ops *ops, struct pt_regs *pt_regs)
289{
290 struct ftrace_entry *entry;
291 struct hlist_head *head;
292 struct pt_regs regs;
293 int rctx;
294
295 head = this_cpu_ptr(event_function.perf_events);
296 if (hlist_empty(head))
297 return;
298
299#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
300 sizeof(u64)) - sizeof(u32))
301
302 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
303
304 perf_fetch_caller_regs(®s);
305
306 entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
307 if (!entry)
308 return;
309
310 entry->ip = ip;
311 entry->parent_ip = parent_ip;
312 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
313 1, ®s, head, NULL);
314
315#undef ENTRY_SIZE
316}
317
318static int perf_ftrace_function_register(struct perf_event *event)
319{
320 struct ftrace_ops *ops = &event->ftrace_ops;
321
322 ops->flags |= FTRACE_OPS_FL_CONTROL;
323 ops->func = perf_ftrace_function_call;
324 return register_ftrace_function(ops);
325}
326
327static int perf_ftrace_function_unregister(struct perf_event *event)
328{
329 struct ftrace_ops *ops = &event->ftrace_ops;
330 int ret = unregister_ftrace_function(ops);
331 ftrace_free_filter(ops);
332 return ret;
333}
334
335static void perf_ftrace_function_enable(struct perf_event *event)
336{
337 ftrace_function_local_enable(&event->ftrace_ops);
338}
339
340static void perf_ftrace_function_disable(struct perf_event *event)
341{
342 ftrace_function_local_disable(&event->ftrace_ops);
343}
344
345int perf_ftrace_event_register(struct ftrace_event_call *call,
346 enum trace_reg type, void *data)
347{
348 switch (type) {
349 case TRACE_REG_REGISTER:
350 case TRACE_REG_UNREGISTER:
351 break;
352 case TRACE_REG_PERF_REGISTER:
353 case TRACE_REG_PERF_UNREGISTER:
354 return 0;
355 case TRACE_REG_PERF_OPEN:
356 return perf_ftrace_function_register(data);
357 case TRACE_REG_PERF_CLOSE:
358 return perf_ftrace_function_unregister(data);
359 case TRACE_REG_PERF_ADD:
360 perf_ftrace_function_enable(data);
361 return 0;
362 case TRACE_REG_PERF_DEL:
363 perf_ftrace_function_disable(data);
364 return 0;
365 }
366
367 return -EINVAL;
368}
369#endif /* CONFIG_FUNCTION_TRACER */
1/*
2 * trace event based perf event profiling/tracing
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */
7
8#include <linux/module.h>
9#include <linux/kprobes.h>
10#include "trace.h"
11
12static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
13
14/*
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
16 * suprises
17 */
18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 perf_trace_t;
20
21/* Count the events in use (per event id, not per instance) */
22static int total_ref_count;
23
24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event)
26{
27 /* No tracing, just counting, so no obvious leak */
28 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
29 return 0;
30
31 /* Some events are ok to be traced by non-root users... */
32 if (p_event->attach_state == PERF_ATTACH_TASK) {
33 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
34 return 0;
35 }
36
37 /*
38 * ...otherwise raw tracepoint data can be a severe data leak,
39 * only allow root to have these.
40 */
41 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
42 return -EPERM;
43
44 return 0;
45}
46
47static int perf_trace_event_init(struct ftrace_event_call *tp_event,
48 struct perf_event *p_event)
49{
50 struct hlist_head __percpu *list;
51 int ret;
52 int cpu;
53
54 ret = perf_trace_event_perm(tp_event, p_event);
55 if (ret)
56 return ret;
57
58 p_event->tp_event = tp_event;
59 if (tp_event->perf_refcount++ > 0)
60 return 0;
61
62 ret = -ENOMEM;
63
64 list = alloc_percpu(struct hlist_head);
65 if (!list)
66 goto fail;
67
68 for_each_possible_cpu(cpu)
69 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
70
71 tp_event->perf_events = list;
72
73 if (!total_ref_count) {
74 char __percpu *buf;
75 int i;
76
77 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
78 buf = (char __percpu *)alloc_percpu(perf_trace_t);
79 if (!buf)
80 goto fail;
81
82 perf_trace_buf[i] = buf;
83 }
84 }
85
86 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
87 if (ret)
88 goto fail;
89
90 total_ref_count++;
91 return 0;
92
93fail:
94 if (!total_ref_count) {
95 int i;
96
97 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
98 free_percpu(perf_trace_buf[i]);
99 perf_trace_buf[i] = NULL;
100 }
101 }
102
103 if (!--tp_event->perf_refcount) {
104 free_percpu(tp_event->perf_events);
105 tp_event->perf_events = NULL;
106 }
107
108 return ret;
109}
110
111int perf_trace_init(struct perf_event *p_event)
112{
113 struct ftrace_event_call *tp_event;
114 int event_id = p_event->attr.config;
115 int ret = -EINVAL;
116
117 mutex_lock(&event_mutex);
118 list_for_each_entry(tp_event, &ftrace_events, list) {
119 if (tp_event->event.type == event_id &&
120 tp_event->class && tp_event->class->reg &&
121 try_module_get(tp_event->mod)) {
122 ret = perf_trace_event_init(tp_event, p_event);
123 if (ret)
124 module_put(tp_event->mod);
125 break;
126 }
127 }
128 mutex_unlock(&event_mutex);
129
130 return ret;
131}
132
133int perf_trace_add(struct perf_event *p_event, int flags)
134{
135 struct ftrace_event_call *tp_event = p_event->tp_event;
136 struct hlist_head __percpu *pcpu_list;
137 struct hlist_head *list;
138
139 pcpu_list = tp_event->perf_events;
140 if (WARN_ON_ONCE(!pcpu_list))
141 return -EINVAL;
142
143 if (!(flags & PERF_EF_START))
144 p_event->hw.state = PERF_HES_STOPPED;
145
146 list = this_cpu_ptr(pcpu_list);
147 hlist_add_head_rcu(&p_event->hlist_entry, list);
148
149 return 0;
150}
151
152void perf_trace_del(struct perf_event *p_event, int flags)
153{
154 hlist_del_rcu(&p_event->hlist_entry);
155}
156
157void perf_trace_destroy(struct perf_event *p_event)
158{
159 struct ftrace_event_call *tp_event = p_event->tp_event;
160 int i;
161
162 mutex_lock(&event_mutex);
163 if (--tp_event->perf_refcount > 0)
164 goto out;
165
166 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
167
168 /*
169 * Ensure our callback won't be called anymore. The buffers
170 * will be freed after that.
171 */
172 tracepoint_synchronize_unregister();
173
174 free_percpu(tp_event->perf_events);
175 tp_event->perf_events = NULL;
176
177 if (!--total_ref_count) {
178 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
179 free_percpu(perf_trace_buf[i]);
180 perf_trace_buf[i] = NULL;
181 }
182 }
183out:
184 module_put(tp_event->mod);
185 mutex_unlock(&event_mutex);
186}
187
188__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
189 struct pt_regs *regs, int *rctxp)
190{
191 struct trace_entry *entry;
192 unsigned long flags;
193 char *raw_data;
194 int pc;
195
196 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
197
198 pc = preempt_count();
199
200 *rctxp = perf_swevent_get_recursion_context();
201 if (*rctxp < 0)
202 return NULL;
203
204 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
205
206 /* zero the dead bytes from align to not leak stack to user */
207 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
208
209 entry = (struct trace_entry *)raw_data;
210 local_save_flags(flags);
211 tracing_generic_entry_update(entry, flags, pc);
212 entry->type = type;
213
214 return raw_data;
215}
216EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);