Loading...
1/**
2 * @file buffer_sync.c
3 *
4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf
9 * @author Robert Richter <robert.richter@amd.com>
10 *
11 * This is the core of the buffer management. Each
12 * CPU buffer is processed and entered into the
13 * global event buffer. Such processing is necessary
14 * in several circumstances, mentioned below.
15 *
16 * The processing does the job of converting the
17 * transitory EIP value into a persistent dentry/offset
18 * value that the profiler can record at its leisure.
19 *
20 * See fs/dcookies.c for a description of the dentry/offset
21 * objects.
22 */
23
24#include <linux/file.h>
25#include <linux/mm.h>
26#include <linux/workqueue.h>
27#include <linux/notifier.h>
28#include <linux/dcookies.h>
29#include <linux/profile.h>
30#include <linux/module.h>
31#include <linux/fs.h>
32#include <linux/oprofile.h>
33#include <linux/sched.h>
34#include <linux/sched/mm.h>
35#include <linux/sched/task.h>
36#include <linux/gfp.h>
37
38#include "oprofile_stats.h"
39#include "event_buffer.h"
40#include "cpu_buffer.h"
41#include "buffer_sync.h"
42
43static LIST_HEAD(dying_tasks);
44static LIST_HEAD(dead_tasks);
45static cpumask_var_t marked_cpus;
46static DEFINE_SPINLOCK(task_mortuary);
47static void process_task_mortuary(void);
48
49/* Take ownership of the task struct and place it on the
50 * list for processing. Only after two full buffer syncs
51 * does the task eventually get freed, because by then
52 * we are sure we will not reference it again.
53 * Can be invoked from softirq via RCU callback due to
54 * call_rcu() of the task struct, hence the _irqsave.
55 */
56static int
57task_free_notify(struct notifier_block *self, unsigned long val, void *data)
58{
59 unsigned long flags;
60 struct task_struct *task = data;
61 spin_lock_irqsave(&task_mortuary, flags);
62 list_add(&task->tasks, &dying_tasks);
63 spin_unlock_irqrestore(&task_mortuary, flags);
64 return NOTIFY_OK;
65}
66
67
68/* The task is on its way out. A sync of the buffer means we can catch
69 * any remaining samples for this task.
70 */
71static int
72task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
73{
74 /* To avoid latency problems, we only process the current CPU,
75 * hoping that most samples for the task are on this CPU
76 */
77 sync_buffer(raw_smp_processor_id());
78 return 0;
79}
80
81
82/* The task is about to try a do_munmap(). We peek at what it's going to
83 * do, and if it's an executable region, process the samples first, so
84 * we don't lose any. This does not have to be exact, it's a QoI issue
85 * only.
86 */
87static int
88munmap_notify(struct notifier_block *self, unsigned long val, void *data)
89{
90 unsigned long addr = (unsigned long)data;
91 struct mm_struct *mm = current->mm;
92 struct vm_area_struct *mpnt;
93
94 mmap_read_lock(mm);
95
96 mpnt = find_vma(mm, addr);
97 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
98 mmap_read_unlock(mm);
99 /* To avoid latency problems, we only process the current CPU,
100 * hoping that most samples for the task are on this CPU
101 */
102 sync_buffer(raw_smp_processor_id());
103 return 0;
104 }
105
106 mmap_read_unlock(mm);
107 return 0;
108}
109
110
111/* We need to be told about new modules so we don't attribute to a previously
112 * loaded module, or drop the samples on the floor.
113 */
114static int
115module_load_notify(struct notifier_block *self, unsigned long val, void *data)
116{
117#ifdef CONFIG_MODULES
118 if (val != MODULE_STATE_COMING)
119 return 0;
120
121 /* FIXME: should we process all CPU buffers ? */
122 mutex_lock(&buffer_mutex);
123 add_event_entry(ESCAPE_CODE);
124 add_event_entry(MODULE_LOADED_CODE);
125 mutex_unlock(&buffer_mutex);
126#endif
127 return 0;
128}
129
130
131static struct notifier_block task_free_nb = {
132 .notifier_call = task_free_notify,
133};
134
135static struct notifier_block task_exit_nb = {
136 .notifier_call = task_exit_notify,
137};
138
139static struct notifier_block munmap_nb = {
140 .notifier_call = munmap_notify,
141};
142
143static struct notifier_block module_load_nb = {
144 .notifier_call = module_load_notify,
145};
146
147static void free_all_tasks(void)
148{
149 /* make sure we don't leak task structs */
150 process_task_mortuary();
151 process_task_mortuary();
152}
153
154int sync_start(void)
155{
156 int err;
157
158 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
159 return -ENOMEM;
160
161 err = task_handoff_register(&task_free_nb);
162 if (err)
163 goto out1;
164 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
165 if (err)
166 goto out2;
167 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
168 if (err)
169 goto out3;
170 err = register_module_notifier(&module_load_nb);
171 if (err)
172 goto out4;
173
174 start_cpu_work();
175
176out:
177 return err;
178out4:
179 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
180out3:
181 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
182out2:
183 task_handoff_unregister(&task_free_nb);
184 free_all_tasks();
185out1:
186 free_cpumask_var(marked_cpus);
187 goto out;
188}
189
190
191void sync_stop(void)
192{
193 end_cpu_work();
194 unregister_module_notifier(&module_load_nb);
195 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
196 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
197 task_handoff_unregister(&task_free_nb);
198 barrier(); /* do all of the above first */
199
200 flush_cpu_work();
201
202 free_all_tasks();
203 free_cpumask_var(marked_cpus);
204}
205
206
207/* Optimisation. We can manage without taking the dcookie sem
208 * because we cannot reach this code without at least one
209 * dcookie user still being registered (namely, the reader
210 * of the event buffer). */
211static inline unsigned long fast_get_dcookie(const struct path *path)
212{
213 unsigned long cookie;
214
215 if (path->dentry->d_flags & DCACHE_COOKIE)
216 return (unsigned long)path->dentry;
217 get_dcookie(path, &cookie);
218 return cookie;
219}
220
221
222/* Look up the dcookie for the task's mm->exe_file,
223 * which corresponds loosely to "application name". This is
224 * not strictly necessary but allows oprofile to associate
225 * shared-library samples with particular applications
226 */
227static unsigned long get_exec_dcookie(struct mm_struct *mm)
228{
229 unsigned long cookie = NO_COOKIE;
230 struct file *exe_file;
231
232 if (!mm)
233 goto done;
234
235 exe_file = get_mm_exe_file(mm);
236 if (!exe_file)
237 goto done;
238
239 cookie = fast_get_dcookie(&exe_file->f_path);
240 fput(exe_file);
241done:
242 return cookie;
243}
244
245
246/* Convert the EIP value of a sample into a persistent dentry/offset
247 * pair that can then be added to the global event buffer. We make
248 * sure to do this lookup before a mm->mmap modification happens so
249 * we don't lose track.
250 *
251 * The caller must ensure the mm is not nil (ie: not a kernel thread).
252 */
253static unsigned long
254lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
255{
256 unsigned long cookie = NO_COOKIE;
257 struct vm_area_struct *vma;
258
259 mmap_read_lock(mm);
260 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
261
262 if (addr < vma->vm_start || addr >= vma->vm_end)
263 continue;
264
265 if (vma->vm_file) {
266 cookie = fast_get_dcookie(&vma->vm_file->f_path);
267 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
268 vma->vm_start;
269 } else {
270 /* must be an anonymous map */
271 *offset = addr;
272 }
273
274 break;
275 }
276
277 if (!vma)
278 cookie = INVALID_COOKIE;
279 mmap_read_unlock(mm);
280
281 return cookie;
282}
283
284static unsigned long last_cookie = INVALID_COOKIE;
285
286static void add_cpu_switch(int i)
287{
288 add_event_entry(ESCAPE_CODE);
289 add_event_entry(CPU_SWITCH_CODE);
290 add_event_entry(i);
291 last_cookie = INVALID_COOKIE;
292}
293
294static void add_kernel_ctx_switch(unsigned int in_kernel)
295{
296 add_event_entry(ESCAPE_CODE);
297 if (in_kernel)
298 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
299 else
300 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
301}
302
303static void
304add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
305{
306 add_event_entry(ESCAPE_CODE);
307 add_event_entry(CTX_SWITCH_CODE);
308 add_event_entry(task->pid);
309 add_event_entry(cookie);
310 /* Another code for daemon back-compat */
311 add_event_entry(ESCAPE_CODE);
312 add_event_entry(CTX_TGID_CODE);
313 add_event_entry(task->tgid);
314}
315
316
317static void add_cookie_switch(unsigned long cookie)
318{
319 add_event_entry(ESCAPE_CODE);
320 add_event_entry(COOKIE_SWITCH_CODE);
321 add_event_entry(cookie);
322}
323
324
325static void add_trace_begin(void)
326{
327 add_event_entry(ESCAPE_CODE);
328 add_event_entry(TRACE_BEGIN_CODE);
329}
330
331static void add_data(struct op_entry *entry, struct mm_struct *mm)
332{
333 unsigned long code, pc, val;
334 unsigned long cookie;
335 off_t offset;
336
337 if (!op_cpu_buffer_get_data(entry, &code))
338 return;
339 if (!op_cpu_buffer_get_data(entry, &pc))
340 return;
341 if (!op_cpu_buffer_get_size(entry))
342 return;
343
344 if (mm) {
345 cookie = lookup_dcookie(mm, pc, &offset);
346
347 if (cookie == NO_COOKIE)
348 offset = pc;
349 if (cookie == INVALID_COOKIE) {
350 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
351 offset = pc;
352 }
353 if (cookie != last_cookie) {
354 add_cookie_switch(cookie);
355 last_cookie = cookie;
356 }
357 } else
358 offset = pc;
359
360 add_event_entry(ESCAPE_CODE);
361 add_event_entry(code);
362 add_event_entry(offset); /* Offset from Dcookie */
363
364 while (op_cpu_buffer_get_data(entry, &val))
365 add_event_entry(val);
366}
367
368static inline void add_sample_entry(unsigned long offset, unsigned long event)
369{
370 add_event_entry(offset);
371 add_event_entry(event);
372}
373
374
375/*
376 * Add a sample to the global event buffer. If possible the
377 * sample is converted into a persistent dentry/offset pair
378 * for later lookup from userspace. Return 0 on failure.
379 */
380static int
381add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
382{
383 unsigned long cookie;
384 off_t offset;
385
386 if (in_kernel) {
387 add_sample_entry(s->eip, s->event);
388 return 1;
389 }
390
391 /* add userspace sample */
392
393 if (!mm) {
394 atomic_inc(&oprofile_stats.sample_lost_no_mm);
395 return 0;
396 }
397
398 cookie = lookup_dcookie(mm, s->eip, &offset);
399
400 if (cookie == INVALID_COOKIE) {
401 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
402 return 0;
403 }
404
405 if (cookie != last_cookie) {
406 add_cookie_switch(cookie);
407 last_cookie = cookie;
408 }
409
410 add_sample_entry(offset, s->event);
411
412 return 1;
413}
414
415
416static void release_mm(struct mm_struct *mm)
417{
418 if (!mm)
419 return;
420 mmput(mm);
421}
422
423static inline int is_code(unsigned long val)
424{
425 return val == ESCAPE_CODE;
426}
427
428
429/* Move tasks along towards death. Any tasks on dead_tasks
430 * will definitely have no remaining references in any
431 * CPU buffers at this point, because we use two lists,
432 * and to have reached the list, it must have gone through
433 * one full sync already.
434 */
435static void process_task_mortuary(void)
436{
437 unsigned long flags;
438 LIST_HEAD(local_dead_tasks);
439 struct task_struct *task;
440 struct task_struct *ttask;
441
442 spin_lock_irqsave(&task_mortuary, flags);
443
444 list_splice_init(&dead_tasks, &local_dead_tasks);
445 list_splice_init(&dying_tasks, &dead_tasks);
446
447 spin_unlock_irqrestore(&task_mortuary, flags);
448
449 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
450 list_del(&task->tasks);
451 free_task(task);
452 }
453}
454
455
456static void mark_done(int cpu)
457{
458 int i;
459
460 cpumask_set_cpu(cpu, marked_cpus);
461
462 for_each_online_cpu(i) {
463 if (!cpumask_test_cpu(i, marked_cpus))
464 return;
465 }
466
467 /* All CPUs have been processed at least once,
468 * we can process the mortuary once
469 */
470 process_task_mortuary();
471
472 cpumask_clear(marked_cpus);
473}
474
475
476/* FIXME: this is not sufficient if we implement syscall barrier backtrace
477 * traversal, the code switch to sb_sample_start at first kernel enter/exit
478 * switch so we need a fifth state and some special handling in sync_buffer()
479 */
480typedef enum {
481 sb_bt_ignore = -2,
482 sb_buffer_start,
483 sb_bt_start,
484 sb_sample_start,
485} sync_buffer_state;
486
487/* Sync one of the CPU's buffers into the global event buffer.
488 * Here we need to go through each batch of samples punctuated
489 * by context switch notes, taking the task's mmap_lock and doing
490 * lookup in task->mm->mmap to convert EIP into dcookie/offset
491 * value.
492 */
493void sync_buffer(int cpu)
494{
495 struct mm_struct *mm = NULL;
496 struct mm_struct *oldmm;
497 unsigned long val;
498 struct task_struct *new;
499 unsigned long cookie = 0;
500 int in_kernel = 1;
501 sync_buffer_state state = sb_buffer_start;
502 unsigned int i;
503 unsigned long available;
504 unsigned long flags;
505 struct op_entry entry;
506 struct op_sample *sample;
507
508 mutex_lock(&buffer_mutex);
509
510 add_cpu_switch(cpu);
511
512 op_cpu_buffer_reset(cpu);
513 available = op_cpu_buffer_entries(cpu);
514
515 for (i = 0; i < available; ++i) {
516 sample = op_cpu_buffer_read_entry(&entry, cpu);
517 if (!sample)
518 break;
519
520 if (is_code(sample->eip)) {
521 flags = sample->event;
522 if (flags & TRACE_BEGIN) {
523 state = sb_bt_start;
524 add_trace_begin();
525 }
526 if (flags & KERNEL_CTX_SWITCH) {
527 /* kernel/userspace switch */
528 in_kernel = flags & IS_KERNEL;
529 if (state == sb_buffer_start)
530 state = sb_sample_start;
531 add_kernel_ctx_switch(flags & IS_KERNEL);
532 }
533 if (flags & USER_CTX_SWITCH
534 && op_cpu_buffer_get_data(&entry, &val)) {
535 /* userspace context switch */
536 new = (struct task_struct *)val;
537 oldmm = mm;
538 release_mm(oldmm);
539 mm = get_task_mm(new);
540 if (mm != oldmm)
541 cookie = get_exec_dcookie(mm);
542 add_user_ctx_switch(new, cookie);
543 }
544 if (op_cpu_buffer_get_size(&entry))
545 add_data(&entry, mm);
546 continue;
547 }
548
549 if (state < sb_bt_start)
550 /* ignore sample */
551 continue;
552
553 if (add_sample(mm, sample, in_kernel))
554 continue;
555
556 /* ignore backtraces if failed to add a sample */
557 if (state == sb_bt_start) {
558 state = sb_bt_ignore;
559 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
560 }
561 }
562 release_mm(mm);
563
564 mark_done(cpu);
565
566 mutex_unlock(&buffer_mutex);
567}
568
569/* The function can be used to add a buffer worth of data directly to
570 * the kernel buffer. The buffer is assumed to be a circular buffer.
571 * Take the entries from index start and end at index end, wrapping
572 * at max_entries.
573 */
574void oprofile_put_buff(unsigned long *buf, unsigned int start,
575 unsigned int stop, unsigned int max)
576{
577 int i;
578
579 i = start;
580
581 mutex_lock(&buffer_mutex);
582 while (i != stop) {
583 add_event_entry(buf[i++]);
584
585 if (i >= max)
586 i = 0;
587 }
588
589 mutex_unlock(&buffer_mutex);
590}
591
1/**
2 * @file buffer_sync.c
3 *
4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
8 * @author Barry Kasindorf
9 * @author Robert Richter <robert.richter@amd.com>
10 *
11 * This is the core of the buffer management. Each
12 * CPU buffer is processed and entered into the
13 * global event buffer. Such processing is necessary
14 * in several circumstances, mentioned below.
15 *
16 * The processing does the job of converting the
17 * transitory EIP value into a persistent dentry/offset
18 * value that the profiler can record at its leisure.
19 *
20 * See fs/dcookies.c for a description of the dentry/offset
21 * objects.
22 */
23
24#include <linux/mm.h>
25#include <linux/workqueue.h>
26#include <linux/notifier.h>
27#include <linux/dcookies.h>
28#include <linux/profile.h>
29#include <linux/module.h>
30#include <linux/fs.h>
31#include <linux/oprofile.h>
32#include <linux/sched.h>
33#include <linux/gfp.h>
34
35#include "oprofile_stats.h"
36#include "event_buffer.h"
37#include "cpu_buffer.h"
38#include "buffer_sync.h"
39
40static LIST_HEAD(dying_tasks);
41static LIST_HEAD(dead_tasks);
42static cpumask_var_t marked_cpus;
43static DEFINE_SPINLOCK(task_mortuary);
44static void process_task_mortuary(void);
45
46/* Take ownership of the task struct and place it on the
47 * list for processing. Only after two full buffer syncs
48 * does the task eventually get freed, because by then
49 * we are sure we will not reference it again.
50 * Can be invoked from softirq via RCU callback due to
51 * call_rcu() of the task struct, hence the _irqsave.
52 */
53static int
54task_free_notify(struct notifier_block *self, unsigned long val, void *data)
55{
56 unsigned long flags;
57 struct task_struct *task = data;
58 spin_lock_irqsave(&task_mortuary, flags);
59 list_add(&task->tasks, &dying_tasks);
60 spin_unlock_irqrestore(&task_mortuary, flags);
61 return NOTIFY_OK;
62}
63
64
65/* The task is on its way out. A sync of the buffer means we can catch
66 * any remaining samples for this task.
67 */
68static int
69task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
70{
71 /* To avoid latency problems, we only process the current CPU,
72 * hoping that most samples for the task are on this CPU
73 */
74 sync_buffer(raw_smp_processor_id());
75 return 0;
76}
77
78
79/* The task is about to try a do_munmap(). We peek at what it's going to
80 * do, and if it's an executable region, process the samples first, so
81 * we don't lose any. This does not have to be exact, it's a QoI issue
82 * only.
83 */
84static int
85munmap_notify(struct notifier_block *self, unsigned long val, void *data)
86{
87 unsigned long addr = (unsigned long)data;
88 struct mm_struct *mm = current->mm;
89 struct vm_area_struct *mpnt;
90
91 down_read(&mm->mmap_sem);
92
93 mpnt = find_vma(mm, addr);
94 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
95 up_read(&mm->mmap_sem);
96 /* To avoid latency problems, we only process the current CPU,
97 * hoping that most samples for the task are on this CPU
98 */
99 sync_buffer(raw_smp_processor_id());
100 return 0;
101 }
102
103 up_read(&mm->mmap_sem);
104 return 0;
105}
106
107
108/* We need to be told about new modules so we don't attribute to a previously
109 * loaded module, or drop the samples on the floor.
110 */
111static int
112module_load_notify(struct notifier_block *self, unsigned long val, void *data)
113{
114#ifdef CONFIG_MODULES
115 if (val != MODULE_STATE_COMING)
116 return 0;
117
118 /* FIXME: should we process all CPU buffers ? */
119 mutex_lock(&buffer_mutex);
120 add_event_entry(ESCAPE_CODE);
121 add_event_entry(MODULE_LOADED_CODE);
122 mutex_unlock(&buffer_mutex);
123#endif
124 return 0;
125}
126
127
128static struct notifier_block task_free_nb = {
129 .notifier_call = task_free_notify,
130};
131
132static struct notifier_block task_exit_nb = {
133 .notifier_call = task_exit_notify,
134};
135
136static struct notifier_block munmap_nb = {
137 .notifier_call = munmap_notify,
138};
139
140static struct notifier_block module_load_nb = {
141 .notifier_call = module_load_notify,
142};
143
144static void free_all_tasks(void)
145{
146 /* make sure we don't leak task structs */
147 process_task_mortuary();
148 process_task_mortuary();
149}
150
151int sync_start(void)
152{
153 int err;
154
155 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
156 return -ENOMEM;
157
158 err = task_handoff_register(&task_free_nb);
159 if (err)
160 goto out1;
161 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
162 if (err)
163 goto out2;
164 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
165 if (err)
166 goto out3;
167 err = register_module_notifier(&module_load_nb);
168 if (err)
169 goto out4;
170
171 start_cpu_work();
172
173out:
174 return err;
175out4:
176 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
177out3:
178 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
179out2:
180 task_handoff_unregister(&task_free_nb);
181 free_all_tasks();
182out1:
183 free_cpumask_var(marked_cpus);
184 goto out;
185}
186
187
188void sync_stop(void)
189{
190 end_cpu_work();
191 unregister_module_notifier(&module_load_nb);
192 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
193 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
194 task_handoff_unregister(&task_free_nb);
195 barrier(); /* do all of the above first */
196
197 flush_cpu_work();
198
199 free_all_tasks();
200 free_cpumask_var(marked_cpus);
201}
202
203
204/* Optimisation. We can manage without taking the dcookie sem
205 * because we cannot reach this code without at least one
206 * dcookie user still being registered (namely, the reader
207 * of the event buffer). */
208static inline unsigned long fast_get_dcookie(struct path *path)
209{
210 unsigned long cookie;
211
212 if (path->dentry->d_flags & DCACHE_COOKIE)
213 return (unsigned long)path->dentry;
214 get_dcookie(path, &cookie);
215 return cookie;
216}
217
218
219/* Look up the dcookie for the task's mm->exe_file,
220 * which corresponds loosely to "application name". This is
221 * not strictly necessary but allows oprofile to associate
222 * shared-library samples with particular applications
223 */
224static unsigned long get_exec_dcookie(struct mm_struct *mm)
225{
226 unsigned long cookie = NO_COOKIE;
227
228 if (mm && mm->exe_file)
229 cookie = fast_get_dcookie(&mm->exe_file->f_path);
230
231 return cookie;
232}
233
234
235/* Convert the EIP value of a sample into a persistent dentry/offset
236 * pair that can then be added to the global event buffer. We make
237 * sure to do this lookup before a mm->mmap modification happens so
238 * we don't lose track.
239 */
240static unsigned long
241lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
242{
243 unsigned long cookie = NO_COOKIE;
244 struct vm_area_struct *vma;
245
246 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
247
248 if (addr < vma->vm_start || addr >= vma->vm_end)
249 continue;
250
251 if (vma->vm_file) {
252 cookie = fast_get_dcookie(&vma->vm_file->f_path);
253 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
254 vma->vm_start;
255 } else {
256 /* must be an anonymous map */
257 *offset = addr;
258 }
259
260 break;
261 }
262
263 if (!vma)
264 cookie = INVALID_COOKIE;
265
266 return cookie;
267}
268
269static unsigned long last_cookie = INVALID_COOKIE;
270
271static void add_cpu_switch(int i)
272{
273 add_event_entry(ESCAPE_CODE);
274 add_event_entry(CPU_SWITCH_CODE);
275 add_event_entry(i);
276 last_cookie = INVALID_COOKIE;
277}
278
279static void add_kernel_ctx_switch(unsigned int in_kernel)
280{
281 add_event_entry(ESCAPE_CODE);
282 if (in_kernel)
283 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
284 else
285 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
286}
287
288static void
289add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
290{
291 add_event_entry(ESCAPE_CODE);
292 add_event_entry(CTX_SWITCH_CODE);
293 add_event_entry(task->pid);
294 add_event_entry(cookie);
295 /* Another code for daemon back-compat */
296 add_event_entry(ESCAPE_CODE);
297 add_event_entry(CTX_TGID_CODE);
298 add_event_entry(task->tgid);
299}
300
301
302static void add_cookie_switch(unsigned long cookie)
303{
304 add_event_entry(ESCAPE_CODE);
305 add_event_entry(COOKIE_SWITCH_CODE);
306 add_event_entry(cookie);
307}
308
309
310static void add_trace_begin(void)
311{
312 add_event_entry(ESCAPE_CODE);
313 add_event_entry(TRACE_BEGIN_CODE);
314}
315
316static void add_data(struct op_entry *entry, struct mm_struct *mm)
317{
318 unsigned long code, pc, val;
319 unsigned long cookie;
320 off_t offset;
321
322 if (!op_cpu_buffer_get_data(entry, &code))
323 return;
324 if (!op_cpu_buffer_get_data(entry, &pc))
325 return;
326 if (!op_cpu_buffer_get_size(entry))
327 return;
328
329 if (mm) {
330 cookie = lookup_dcookie(mm, pc, &offset);
331
332 if (cookie == NO_COOKIE)
333 offset = pc;
334 if (cookie == INVALID_COOKIE) {
335 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
336 offset = pc;
337 }
338 if (cookie != last_cookie) {
339 add_cookie_switch(cookie);
340 last_cookie = cookie;
341 }
342 } else
343 offset = pc;
344
345 add_event_entry(ESCAPE_CODE);
346 add_event_entry(code);
347 add_event_entry(offset); /* Offset from Dcookie */
348
349 while (op_cpu_buffer_get_data(entry, &val))
350 add_event_entry(val);
351}
352
353static inline void add_sample_entry(unsigned long offset, unsigned long event)
354{
355 add_event_entry(offset);
356 add_event_entry(event);
357}
358
359
360/*
361 * Add a sample to the global event buffer. If possible the
362 * sample is converted into a persistent dentry/offset pair
363 * for later lookup from userspace. Return 0 on failure.
364 */
365static int
366add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
367{
368 unsigned long cookie;
369 off_t offset;
370
371 if (in_kernel) {
372 add_sample_entry(s->eip, s->event);
373 return 1;
374 }
375
376 /* add userspace sample */
377
378 if (!mm) {
379 atomic_inc(&oprofile_stats.sample_lost_no_mm);
380 return 0;
381 }
382
383 cookie = lookup_dcookie(mm, s->eip, &offset);
384
385 if (cookie == INVALID_COOKIE) {
386 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
387 return 0;
388 }
389
390 if (cookie != last_cookie) {
391 add_cookie_switch(cookie);
392 last_cookie = cookie;
393 }
394
395 add_sample_entry(offset, s->event);
396
397 return 1;
398}
399
400
401static void release_mm(struct mm_struct *mm)
402{
403 if (!mm)
404 return;
405 up_read(&mm->mmap_sem);
406 mmput(mm);
407}
408
409
410static struct mm_struct *take_tasks_mm(struct task_struct *task)
411{
412 struct mm_struct *mm = get_task_mm(task);
413 if (mm)
414 down_read(&mm->mmap_sem);
415 return mm;
416}
417
418
419static inline int is_code(unsigned long val)
420{
421 return val == ESCAPE_CODE;
422}
423
424
425/* Move tasks along towards death. Any tasks on dead_tasks
426 * will definitely have no remaining references in any
427 * CPU buffers at this point, because we use two lists,
428 * and to have reached the list, it must have gone through
429 * one full sync already.
430 */
431static void process_task_mortuary(void)
432{
433 unsigned long flags;
434 LIST_HEAD(local_dead_tasks);
435 struct task_struct *task;
436 struct task_struct *ttask;
437
438 spin_lock_irqsave(&task_mortuary, flags);
439
440 list_splice_init(&dead_tasks, &local_dead_tasks);
441 list_splice_init(&dying_tasks, &dead_tasks);
442
443 spin_unlock_irqrestore(&task_mortuary, flags);
444
445 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
446 list_del(&task->tasks);
447 free_task(task);
448 }
449}
450
451
452static void mark_done(int cpu)
453{
454 int i;
455
456 cpumask_set_cpu(cpu, marked_cpus);
457
458 for_each_online_cpu(i) {
459 if (!cpumask_test_cpu(i, marked_cpus))
460 return;
461 }
462
463 /* All CPUs have been processed at least once,
464 * we can process the mortuary once
465 */
466 process_task_mortuary();
467
468 cpumask_clear(marked_cpus);
469}
470
471
472/* FIXME: this is not sufficient if we implement syscall barrier backtrace
473 * traversal, the code switch to sb_sample_start at first kernel enter/exit
474 * switch so we need a fifth state and some special handling in sync_buffer()
475 */
476typedef enum {
477 sb_bt_ignore = -2,
478 sb_buffer_start,
479 sb_bt_start,
480 sb_sample_start,
481} sync_buffer_state;
482
483/* Sync one of the CPU's buffers into the global event buffer.
484 * Here we need to go through each batch of samples punctuated
485 * by context switch notes, taking the task's mmap_sem and doing
486 * lookup in task->mm->mmap to convert EIP into dcookie/offset
487 * value.
488 */
489void sync_buffer(int cpu)
490{
491 struct mm_struct *mm = NULL;
492 struct mm_struct *oldmm;
493 unsigned long val;
494 struct task_struct *new;
495 unsigned long cookie = 0;
496 int in_kernel = 1;
497 sync_buffer_state state = sb_buffer_start;
498 unsigned int i;
499 unsigned long available;
500 unsigned long flags;
501 struct op_entry entry;
502 struct op_sample *sample;
503
504 mutex_lock(&buffer_mutex);
505
506 add_cpu_switch(cpu);
507
508 op_cpu_buffer_reset(cpu);
509 available = op_cpu_buffer_entries(cpu);
510
511 for (i = 0; i < available; ++i) {
512 sample = op_cpu_buffer_read_entry(&entry, cpu);
513 if (!sample)
514 break;
515
516 if (is_code(sample->eip)) {
517 flags = sample->event;
518 if (flags & TRACE_BEGIN) {
519 state = sb_bt_start;
520 add_trace_begin();
521 }
522 if (flags & KERNEL_CTX_SWITCH) {
523 /* kernel/userspace switch */
524 in_kernel = flags & IS_KERNEL;
525 if (state == sb_buffer_start)
526 state = sb_sample_start;
527 add_kernel_ctx_switch(flags & IS_KERNEL);
528 }
529 if (flags & USER_CTX_SWITCH
530 && op_cpu_buffer_get_data(&entry, &val)) {
531 /* userspace context switch */
532 new = (struct task_struct *)val;
533 oldmm = mm;
534 release_mm(oldmm);
535 mm = take_tasks_mm(new);
536 if (mm != oldmm)
537 cookie = get_exec_dcookie(mm);
538 add_user_ctx_switch(new, cookie);
539 }
540 if (op_cpu_buffer_get_size(&entry))
541 add_data(&entry, mm);
542 continue;
543 }
544
545 if (state < sb_bt_start)
546 /* ignore sample */
547 continue;
548
549 if (add_sample(mm, sample, in_kernel))
550 continue;
551
552 /* ignore backtraces if failed to add a sample */
553 if (state == sb_bt_start) {
554 state = sb_bt_ignore;
555 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
556 }
557 }
558 release_mm(mm);
559
560 mark_done(cpu);
561
562 mutex_unlock(&buffer_mutex);
563}
564
565/* The function can be used to add a buffer worth of data directly to
566 * the kernel buffer. The buffer is assumed to be a circular buffer.
567 * Take the entries from index start and end at index end, wrapping
568 * at max_entries.
569 */
570void oprofile_put_buff(unsigned long *buf, unsigned int start,
571 unsigned int stop, unsigned int max)
572{
573 int i;
574
575 i = start;
576
577 mutex_lock(&buffer_mutex);
578 while (i != stop) {
579 add_event_entry(buf[i++]);
580
581 if (i >= max)
582 i = 0;
583 }
584
585 mutex_unlock(&buffer_mutex);
586}
587