Loading...
1/*
2 * Performance events core code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
16#include <linux/idr.h>
17#include <linux/file.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/hash.h>
21#include <linux/tick.h>
22#include <linux/sysfs.h>
23#include <linux/dcache.h>
24#include <linux/percpu.h>
25#include <linux/ptrace.h>
26#include <linux/reboot.h>
27#include <linux/vmstat.h>
28#include <linux/device.h>
29#include <linux/export.h>
30#include <linux/vmalloc.h>
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
36#include <linux/kernel_stat.h>
37#include <linux/cgroup.h>
38#include <linux/perf_event.h>
39#include <linux/trace_events.h>
40#include <linux/hw_breakpoint.h>
41#include <linux/mm_types.h>
42#include <linux/module.h>
43#include <linux/mman.h>
44#include <linux/compat.h>
45#include <linux/bpf.h>
46#include <linux/filter.h>
47
48#include "internal.h"
49
50#include <asm/irq_regs.h>
51
52typedef int (*remote_function_f)(void *);
53
54struct remote_function_call {
55 struct task_struct *p;
56 remote_function_f func;
57 void *info;
58 int ret;
59};
60
61static void remote_function(void *data)
62{
63 struct remote_function_call *tfc = data;
64 struct task_struct *p = tfc->p;
65
66 if (p) {
67 /* -EAGAIN */
68 if (task_cpu(p) != smp_processor_id())
69 return;
70
71 /*
72 * Now that we're on right CPU with IRQs disabled, we can test
73 * if we hit the right task without races.
74 */
75
76 tfc->ret = -ESRCH; /* No such (running) process */
77 if (p != current)
78 return;
79 }
80
81 tfc->ret = tfc->func(tfc->info);
82}
83
84/**
85 * task_function_call - call a function on the cpu on which a task runs
86 * @p: the task to evaluate
87 * @func: the function to be called
88 * @info: the function call argument
89 *
90 * Calls the function @func when the task is currently running. This might
91 * be on the current CPU, which just calls the function directly
92 *
93 * returns: @func return value, or
94 * -ESRCH - when the process isn't running
95 * -EAGAIN - when the process moved away
96 */
97static int
98task_function_call(struct task_struct *p, remote_function_f func, void *info)
99{
100 struct remote_function_call data = {
101 .p = p,
102 .func = func,
103 .info = info,
104 .ret = -EAGAIN,
105 };
106 int ret;
107
108 do {
109 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
110 if (!ret)
111 ret = data.ret;
112 } while (ret == -EAGAIN);
113
114 return ret;
115}
116
117/**
118 * cpu_function_call - call a function on the cpu
119 * @func: the function to be called
120 * @info: the function call argument
121 *
122 * Calls the function @func on the remote cpu.
123 *
124 * returns: @func return value or -ENXIO when the cpu is offline
125 */
126static int cpu_function_call(int cpu, remote_function_f func, void *info)
127{
128 struct remote_function_call data = {
129 .p = NULL,
130 .func = func,
131 .info = info,
132 .ret = -ENXIO, /* No such CPU */
133 };
134
135 smp_call_function_single(cpu, remote_function, &data, 1);
136
137 return data.ret;
138}
139
140static inline struct perf_cpu_context *
141__get_cpu_context(struct perf_event_context *ctx)
142{
143 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
144}
145
146static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
147 struct perf_event_context *ctx)
148{
149 raw_spin_lock(&cpuctx->ctx.lock);
150 if (ctx)
151 raw_spin_lock(&ctx->lock);
152}
153
154static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
155 struct perf_event_context *ctx)
156{
157 if (ctx)
158 raw_spin_unlock(&ctx->lock);
159 raw_spin_unlock(&cpuctx->ctx.lock);
160}
161
162#define TASK_TOMBSTONE ((void *)-1L)
163
164static bool is_kernel_event(struct perf_event *event)
165{
166 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
167}
168
169/*
170 * On task ctx scheduling...
171 *
172 * When !ctx->nr_events a task context will not be scheduled. This means
173 * we can disable the scheduler hooks (for performance) without leaving
174 * pending task ctx state.
175 *
176 * This however results in two special cases:
177 *
178 * - removing the last event from a task ctx; this is relatively straight
179 * forward and is done in __perf_remove_from_context.
180 *
181 * - adding the first event to a task ctx; this is tricky because we cannot
182 * rely on ctx->is_active and therefore cannot use event_function_call().
183 * See perf_install_in_context().
184 *
185 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
186 */
187
188typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
189 struct perf_event_context *, void *);
190
191struct event_function_struct {
192 struct perf_event *event;
193 event_f func;
194 void *data;
195};
196
197static int event_function(void *info)
198{
199 struct event_function_struct *efs = info;
200 struct perf_event *event = efs->event;
201 struct perf_event_context *ctx = event->ctx;
202 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
203 struct perf_event_context *task_ctx = cpuctx->task_ctx;
204 int ret = 0;
205
206 WARN_ON_ONCE(!irqs_disabled());
207
208 perf_ctx_lock(cpuctx, task_ctx);
209 /*
210 * Since we do the IPI call without holding ctx->lock things can have
211 * changed, double check we hit the task we set out to hit.
212 */
213 if (ctx->task) {
214 if (ctx->task != current) {
215 ret = -ESRCH;
216 goto unlock;
217 }
218
219 /*
220 * We only use event_function_call() on established contexts,
221 * and event_function() is only ever called when active (or
222 * rather, we'll have bailed in task_function_call() or the
223 * above ctx->task != current test), therefore we must have
224 * ctx->is_active here.
225 */
226 WARN_ON_ONCE(!ctx->is_active);
227 /*
228 * And since we have ctx->is_active, cpuctx->task_ctx must
229 * match.
230 */
231 WARN_ON_ONCE(task_ctx != ctx);
232 } else {
233 WARN_ON_ONCE(&cpuctx->ctx != ctx);
234 }
235
236 efs->func(event, cpuctx, ctx, efs->data);
237unlock:
238 perf_ctx_unlock(cpuctx, task_ctx);
239
240 return ret;
241}
242
243static void event_function_local(struct perf_event *event, event_f func, void *data)
244{
245 struct event_function_struct efs = {
246 .event = event,
247 .func = func,
248 .data = data,
249 };
250
251 int ret = event_function(&efs);
252 WARN_ON_ONCE(ret);
253}
254
255static void event_function_call(struct perf_event *event, event_f func, void *data)
256{
257 struct perf_event_context *ctx = event->ctx;
258 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
259 struct event_function_struct efs = {
260 .event = event,
261 .func = func,
262 .data = data,
263 };
264
265 if (!event->parent) {
266 /*
267 * If this is a !child event, we must hold ctx::mutex to
268 * stabilize the the event->ctx relation. See
269 * perf_event_ctx_lock().
270 */
271 lockdep_assert_held(&ctx->mutex);
272 }
273
274 if (!task) {
275 cpu_function_call(event->cpu, event_function, &efs);
276 return;
277 }
278
279 if (task == TASK_TOMBSTONE)
280 return;
281
282again:
283 if (!task_function_call(task, event_function, &efs))
284 return;
285
286 raw_spin_lock_irq(&ctx->lock);
287 /*
288 * Reload the task pointer, it might have been changed by
289 * a concurrent perf_event_context_sched_out().
290 */
291 task = ctx->task;
292 if (task == TASK_TOMBSTONE) {
293 raw_spin_unlock_irq(&ctx->lock);
294 return;
295 }
296 if (ctx->is_active) {
297 raw_spin_unlock_irq(&ctx->lock);
298 goto again;
299 }
300 func(event, NULL, ctx, data);
301 raw_spin_unlock_irq(&ctx->lock);
302}
303
304#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
305 PERF_FLAG_FD_OUTPUT |\
306 PERF_FLAG_PID_CGROUP |\
307 PERF_FLAG_FD_CLOEXEC)
308
309/*
310 * branch priv levels that need permission checks
311 */
312#define PERF_SAMPLE_BRANCH_PERM_PLM \
313 (PERF_SAMPLE_BRANCH_KERNEL |\
314 PERF_SAMPLE_BRANCH_HV)
315
316enum event_type_t {
317 EVENT_FLEXIBLE = 0x1,
318 EVENT_PINNED = 0x2,
319 EVENT_TIME = 0x4,
320 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
321};
322
323/*
324 * perf_sched_events : >0 events exist
325 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
326 */
327
328static void perf_sched_delayed(struct work_struct *work);
329DEFINE_STATIC_KEY_FALSE(perf_sched_events);
330static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
331static DEFINE_MUTEX(perf_sched_mutex);
332static atomic_t perf_sched_count;
333
334static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
335static DEFINE_PER_CPU(int, perf_sched_cb_usages);
336
337static atomic_t nr_mmap_events __read_mostly;
338static atomic_t nr_comm_events __read_mostly;
339static atomic_t nr_task_events __read_mostly;
340static atomic_t nr_freq_events __read_mostly;
341static atomic_t nr_switch_events __read_mostly;
342
343static LIST_HEAD(pmus);
344static DEFINE_MUTEX(pmus_lock);
345static struct srcu_struct pmus_srcu;
346
347/*
348 * perf event paranoia level:
349 * -1 - not paranoid at all
350 * 0 - disallow raw tracepoint access for unpriv
351 * 1 - disallow cpu events for unpriv
352 * 2 - disallow kernel profiling for unpriv
353 */
354int sysctl_perf_event_paranoid __read_mostly = 2;
355
356/* Minimum for 512 kiB + 1 user control page */
357int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
358
359/*
360 * max perf event sample rate
361 */
362#define DEFAULT_MAX_SAMPLE_RATE 100000
363#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
364#define DEFAULT_CPU_TIME_MAX_PERCENT 25
365
366int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
367
368static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
369static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
370
371static int perf_sample_allowed_ns __read_mostly =
372 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
373
374static void update_perf_cpu_limits(void)
375{
376 u64 tmp = perf_sample_period_ns;
377
378 tmp *= sysctl_perf_cpu_time_max_percent;
379 tmp = div_u64(tmp, 100);
380 if (!tmp)
381 tmp = 1;
382
383 WRITE_ONCE(perf_sample_allowed_ns, tmp);
384}
385
386static int perf_rotate_context(struct perf_cpu_context *cpuctx);
387
388int perf_proc_update_handler(struct ctl_table *table, int write,
389 void __user *buffer, size_t *lenp,
390 loff_t *ppos)
391{
392 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
393
394 if (ret || !write)
395 return ret;
396
397 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
398 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
399 update_perf_cpu_limits();
400
401 return 0;
402}
403
404int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
405
406int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
407 void __user *buffer, size_t *lenp,
408 loff_t *ppos)
409{
410 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
411
412 if (ret || !write)
413 return ret;
414
415 if (sysctl_perf_cpu_time_max_percent == 100 ||
416 sysctl_perf_cpu_time_max_percent == 0) {
417 printk(KERN_WARNING
418 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
419 WRITE_ONCE(perf_sample_allowed_ns, 0);
420 } else {
421 update_perf_cpu_limits();
422 }
423
424 return 0;
425}
426
427/*
428 * perf samples are done in some very critical code paths (NMIs).
429 * If they take too much CPU time, the system can lock up and not
430 * get any real work done. This will drop the sample rate when
431 * we detect that events are taking too long.
432 */
433#define NR_ACCUMULATED_SAMPLES 128
434static DEFINE_PER_CPU(u64, running_sample_length);
435
436static u64 __report_avg;
437static u64 __report_allowed;
438
439static void perf_duration_warn(struct irq_work *w)
440{
441 printk_ratelimited(KERN_WARNING
442 "perf: interrupt took too long (%lld > %lld), lowering "
443 "kernel.perf_event_max_sample_rate to %d\n",
444 __report_avg, __report_allowed,
445 sysctl_perf_event_sample_rate);
446}
447
448static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
449
450void perf_sample_event_took(u64 sample_len_ns)
451{
452 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
453 u64 running_len;
454 u64 avg_len;
455 u32 max;
456
457 if (max_len == 0)
458 return;
459
460 /* Decay the counter by 1 average sample. */
461 running_len = __this_cpu_read(running_sample_length);
462 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
463 running_len += sample_len_ns;
464 __this_cpu_write(running_sample_length, running_len);
465
466 /*
467 * Note: this will be biased artifically low until we have
468 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
469 * from having to maintain a count.
470 */
471 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
472 if (avg_len <= max_len)
473 return;
474
475 __report_avg = avg_len;
476 __report_allowed = max_len;
477
478 /*
479 * Compute a throttle threshold 25% below the current duration.
480 */
481 avg_len += avg_len / 4;
482 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
483 if (avg_len < max)
484 max /= (u32)avg_len;
485 else
486 max = 1;
487
488 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
489 WRITE_ONCE(max_samples_per_tick, max);
490
491 sysctl_perf_event_sample_rate = max * HZ;
492 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
493
494 if (!irq_work_queue(&perf_duration_work)) {
495 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
496 "kernel.perf_event_max_sample_rate to %d\n",
497 __report_avg, __report_allowed,
498 sysctl_perf_event_sample_rate);
499 }
500}
501
502static atomic64_t perf_event_id;
503
504static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
505 enum event_type_t event_type);
506
507static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
508 enum event_type_t event_type,
509 struct task_struct *task);
510
511static void update_context_time(struct perf_event_context *ctx);
512static u64 perf_event_time(struct perf_event *event);
513
514void __weak perf_event_print_debug(void) { }
515
516extern __weak const char *perf_pmu_name(void)
517{
518 return "pmu";
519}
520
521static inline u64 perf_clock(void)
522{
523 return local_clock();
524}
525
526static inline u64 perf_event_clock(struct perf_event *event)
527{
528 return event->clock();
529}
530
531#ifdef CONFIG_CGROUP_PERF
532
533static inline bool
534perf_cgroup_match(struct perf_event *event)
535{
536 struct perf_event_context *ctx = event->ctx;
537 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
538
539 /* @event doesn't care about cgroup */
540 if (!event->cgrp)
541 return true;
542
543 /* wants specific cgroup scope but @cpuctx isn't associated with any */
544 if (!cpuctx->cgrp)
545 return false;
546
547 /*
548 * Cgroup scoping is recursive. An event enabled for a cgroup is
549 * also enabled for all its descendant cgroups. If @cpuctx's
550 * cgroup is a descendant of @event's (the test covers identity
551 * case), it's a match.
552 */
553 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
554 event->cgrp->css.cgroup);
555}
556
557static inline void perf_detach_cgroup(struct perf_event *event)
558{
559 css_put(&event->cgrp->css);
560 event->cgrp = NULL;
561}
562
563static inline int is_cgroup_event(struct perf_event *event)
564{
565 return event->cgrp != NULL;
566}
567
568static inline u64 perf_cgroup_event_time(struct perf_event *event)
569{
570 struct perf_cgroup_info *t;
571
572 t = per_cpu_ptr(event->cgrp->info, event->cpu);
573 return t->time;
574}
575
576static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
577{
578 struct perf_cgroup_info *info;
579 u64 now;
580
581 now = perf_clock();
582
583 info = this_cpu_ptr(cgrp->info);
584
585 info->time += now - info->timestamp;
586 info->timestamp = now;
587}
588
589static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
590{
591 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
592 if (cgrp_out)
593 __update_cgrp_time(cgrp_out);
594}
595
596static inline void update_cgrp_time_from_event(struct perf_event *event)
597{
598 struct perf_cgroup *cgrp;
599
600 /*
601 * ensure we access cgroup data only when needed and
602 * when we know the cgroup is pinned (css_get)
603 */
604 if (!is_cgroup_event(event))
605 return;
606
607 cgrp = perf_cgroup_from_task(current, event->ctx);
608 /*
609 * Do not update time when cgroup is not active
610 */
611 if (cgrp == event->cgrp)
612 __update_cgrp_time(event->cgrp);
613}
614
615static inline void
616perf_cgroup_set_timestamp(struct task_struct *task,
617 struct perf_event_context *ctx)
618{
619 struct perf_cgroup *cgrp;
620 struct perf_cgroup_info *info;
621
622 /*
623 * ctx->lock held by caller
624 * ensure we do not access cgroup data
625 * unless we have the cgroup pinned (css_get)
626 */
627 if (!task || !ctx->nr_cgroups)
628 return;
629
630 cgrp = perf_cgroup_from_task(task, ctx);
631 info = this_cpu_ptr(cgrp->info);
632 info->timestamp = ctx->timestamp;
633}
634
635#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
636#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
637
638/*
639 * reschedule events based on the cgroup constraint of task.
640 *
641 * mode SWOUT : schedule out everything
642 * mode SWIN : schedule in based on cgroup for next
643 */
644static void perf_cgroup_switch(struct task_struct *task, int mode)
645{
646 struct perf_cpu_context *cpuctx;
647 struct pmu *pmu;
648 unsigned long flags;
649
650 /*
651 * disable interrupts to avoid geting nr_cgroup
652 * changes via __perf_event_disable(). Also
653 * avoids preemption.
654 */
655 local_irq_save(flags);
656
657 /*
658 * we reschedule only in the presence of cgroup
659 * constrained events.
660 */
661
662 list_for_each_entry_rcu(pmu, &pmus, entry) {
663 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
664 if (cpuctx->unique_pmu != pmu)
665 continue; /* ensure we process each cpuctx once */
666
667 /*
668 * perf_cgroup_events says at least one
669 * context on this CPU has cgroup events.
670 *
671 * ctx->nr_cgroups reports the number of cgroup
672 * events for a context.
673 */
674 if (cpuctx->ctx.nr_cgroups > 0) {
675 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
676 perf_pmu_disable(cpuctx->ctx.pmu);
677
678 if (mode & PERF_CGROUP_SWOUT) {
679 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
680 /*
681 * must not be done before ctxswout due
682 * to event_filter_match() in event_sched_out()
683 */
684 cpuctx->cgrp = NULL;
685 }
686
687 if (mode & PERF_CGROUP_SWIN) {
688 WARN_ON_ONCE(cpuctx->cgrp);
689 /*
690 * set cgrp before ctxsw in to allow
691 * event_filter_match() to not have to pass
692 * task around
693 * we pass the cpuctx->ctx to perf_cgroup_from_task()
694 * because cgorup events are only per-cpu
695 */
696 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
697 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
698 }
699 perf_pmu_enable(cpuctx->ctx.pmu);
700 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
701 }
702 }
703
704 local_irq_restore(flags);
705}
706
707static inline void perf_cgroup_sched_out(struct task_struct *task,
708 struct task_struct *next)
709{
710 struct perf_cgroup *cgrp1;
711 struct perf_cgroup *cgrp2 = NULL;
712
713 rcu_read_lock();
714 /*
715 * we come here when we know perf_cgroup_events > 0
716 * we do not need to pass the ctx here because we know
717 * we are holding the rcu lock
718 */
719 cgrp1 = perf_cgroup_from_task(task, NULL);
720 cgrp2 = perf_cgroup_from_task(next, NULL);
721
722 /*
723 * only schedule out current cgroup events if we know
724 * that we are switching to a different cgroup. Otherwise,
725 * do no touch the cgroup events.
726 */
727 if (cgrp1 != cgrp2)
728 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
729
730 rcu_read_unlock();
731}
732
733static inline void perf_cgroup_sched_in(struct task_struct *prev,
734 struct task_struct *task)
735{
736 struct perf_cgroup *cgrp1;
737 struct perf_cgroup *cgrp2 = NULL;
738
739 rcu_read_lock();
740 /*
741 * we come here when we know perf_cgroup_events > 0
742 * we do not need to pass the ctx here because we know
743 * we are holding the rcu lock
744 */
745 cgrp1 = perf_cgroup_from_task(task, NULL);
746 cgrp2 = perf_cgroup_from_task(prev, NULL);
747
748 /*
749 * only need to schedule in cgroup events if we are changing
750 * cgroup during ctxsw. Cgroup events were not scheduled
751 * out of ctxsw out if that was not the case.
752 */
753 if (cgrp1 != cgrp2)
754 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
755
756 rcu_read_unlock();
757}
758
759static inline int perf_cgroup_connect(int fd, struct perf_event *event,
760 struct perf_event_attr *attr,
761 struct perf_event *group_leader)
762{
763 struct perf_cgroup *cgrp;
764 struct cgroup_subsys_state *css;
765 struct fd f = fdget(fd);
766 int ret = 0;
767
768 if (!f.file)
769 return -EBADF;
770
771 css = css_tryget_online_from_dir(f.file->f_path.dentry,
772 &perf_event_cgrp_subsys);
773 if (IS_ERR(css)) {
774 ret = PTR_ERR(css);
775 goto out;
776 }
777
778 cgrp = container_of(css, struct perf_cgroup, css);
779 event->cgrp = cgrp;
780
781 /*
782 * all events in a group must monitor
783 * the same cgroup because a task belongs
784 * to only one perf cgroup at a time
785 */
786 if (group_leader && group_leader->cgrp != cgrp) {
787 perf_detach_cgroup(event);
788 ret = -EINVAL;
789 }
790out:
791 fdput(f);
792 return ret;
793}
794
795static inline void
796perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
797{
798 struct perf_cgroup_info *t;
799 t = per_cpu_ptr(event->cgrp->info, event->cpu);
800 event->shadow_ctx_time = now - t->timestamp;
801}
802
803static inline void
804perf_cgroup_defer_enabled(struct perf_event *event)
805{
806 /*
807 * when the current task's perf cgroup does not match
808 * the event's, we need to remember to call the
809 * perf_mark_enable() function the first time a task with
810 * a matching perf cgroup is scheduled in.
811 */
812 if (is_cgroup_event(event) && !perf_cgroup_match(event))
813 event->cgrp_defer_enabled = 1;
814}
815
816static inline void
817perf_cgroup_mark_enabled(struct perf_event *event,
818 struct perf_event_context *ctx)
819{
820 struct perf_event *sub;
821 u64 tstamp = perf_event_time(event);
822
823 if (!event->cgrp_defer_enabled)
824 return;
825
826 event->cgrp_defer_enabled = 0;
827
828 event->tstamp_enabled = tstamp - event->total_time_enabled;
829 list_for_each_entry(sub, &event->sibling_list, group_entry) {
830 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
831 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
832 sub->cgrp_defer_enabled = 0;
833 }
834 }
835}
836#else /* !CONFIG_CGROUP_PERF */
837
838static inline bool
839perf_cgroup_match(struct perf_event *event)
840{
841 return true;
842}
843
844static inline void perf_detach_cgroup(struct perf_event *event)
845{}
846
847static inline int is_cgroup_event(struct perf_event *event)
848{
849 return 0;
850}
851
852static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
853{
854 return 0;
855}
856
857static inline void update_cgrp_time_from_event(struct perf_event *event)
858{
859}
860
861static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
862{
863}
864
865static inline void perf_cgroup_sched_out(struct task_struct *task,
866 struct task_struct *next)
867{
868}
869
870static inline void perf_cgroup_sched_in(struct task_struct *prev,
871 struct task_struct *task)
872{
873}
874
875static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
876 struct perf_event_attr *attr,
877 struct perf_event *group_leader)
878{
879 return -EINVAL;
880}
881
882static inline void
883perf_cgroup_set_timestamp(struct task_struct *task,
884 struct perf_event_context *ctx)
885{
886}
887
888void
889perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
890{
891}
892
893static inline void
894perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
895{
896}
897
898static inline u64 perf_cgroup_event_time(struct perf_event *event)
899{
900 return 0;
901}
902
903static inline void
904perf_cgroup_defer_enabled(struct perf_event *event)
905{
906}
907
908static inline void
909perf_cgroup_mark_enabled(struct perf_event *event,
910 struct perf_event_context *ctx)
911{
912}
913#endif
914
915/*
916 * set default to be dependent on timer tick just
917 * like original code
918 */
919#define PERF_CPU_HRTIMER (1000 / HZ)
920/*
921 * function must be called with interrupts disbled
922 */
923static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
924{
925 struct perf_cpu_context *cpuctx;
926 int rotations = 0;
927
928 WARN_ON(!irqs_disabled());
929
930 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
931 rotations = perf_rotate_context(cpuctx);
932
933 raw_spin_lock(&cpuctx->hrtimer_lock);
934 if (rotations)
935 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
936 else
937 cpuctx->hrtimer_active = 0;
938 raw_spin_unlock(&cpuctx->hrtimer_lock);
939
940 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
941}
942
943static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
944{
945 struct hrtimer *timer = &cpuctx->hrtimer;
946 struct pmu *pmu = cpuctx->ctx.pmu;
947 u64 interval;
948
949 /* no multiplexing needed for SW PMU */
950 if (pmu->task_ctx_nr == perf_sw_context)
951 return;
952
953 /*
954 * check default is sane, if not set then force to
955 * default interval (1/tick)
956 */
957 interval = pmu->hrtimer_interval_ms;
958 if (interval < 1)
959 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
960
961 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
962
963 raw_spin_lock_init(&cpuctx->hrtimer_lock);
964 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
965 timer->function = perf_mux_hrtimer_handler;
966}
967
968static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
969{
970 struct hrtimer *timer = &cpuctx->hrtimer;
971 struct pmu *pmu = cpuctx->ctx.pmu;
972 unsigned long flags;
973
974 /* not for SW PMU */
975 if (pmu->task_ctx_nr == perf_sw_context)
976 return 0;
977
978 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
979 if (!cpuctx->hrtimer_active) {
980 cpuctx->hrtimer_active = 1;
981 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
982 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
983 }
984 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
985
986 return 0;
987}
988
989void perf_pmu_disable(struct pmu *pmu)
990{
991 int *count = this_cpu_ptr(pmu->pmu_disable_count);
992 if (!(*count)++)
993 pmu->pmu_disable(pmu);
994}
995
996void perf_pmu_enable(struct pmu *pmu)
997{
998 int *count = this_cpu_ptr(pmu->pmu_disable_count);
999 if (!--(*count))
1000 pmu->pmu_enable(pmu);
1001}
1002
1003static DEFINE_PER_CPU(struct list_head, active_ctx_list);
1004
1005/*
1006 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1007 * perf_event_task_tick() are fully serialized because they're strictly cpu
1008 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1009 * disabled, while perf_event_task_tick is called from IRQ context.
1010 */
1011static void perf_event_ctx_activate(struct perf_event_context *ctx)
1012{
1013 struct list_head *head = this_cpu_ptr(&active_ctx_list);
1014
1015 WARN_ON(!irqs_disabled());
1016
1017 WARN_ON(!list_empty(&ctx->active_ctx_list));
1018
1019 list_add(&ctx->active_ctx_list, head);
1020}
1021
1022static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1023{
1024 WARN_ON(!irqs_disabled());
1025
1026 WARN_ON(list_empty(&ctx->active_ctx_list));
1027
1028 list_del_init(&ctx->active_ctx_list);
1029}
1030
1031static void get_ctx(struct perf_event_context *ctx)
1032{
1033 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
1034}
1035
1036static void free_ctx(struct rcu_head *head)
1037{
1038 struct perf_event_context *ctx;
1039
1040 ctx = container_of(head, struct perf_event_context, rcu_head);
1041 kfree(ctx->task_ctx_data);
1042 kfree(ctx);
1043}
1044
1045static void put_ctx(struct perf_event_context *ctx)
1046{
1047 if (atomic_dec_and_test(&ctx->refcount)) {
1048 if (ctx->parent_ctx)
1049 put_ctx(ctx->parent_ctx);
1050 if (ctx->task && ctx->task != TASK_TOMBSTONE)
1051 put_task_struct(ctx->task);
1052 call_rcu(&ctx->rcu_head, free_ctx);
1053 }
1054}
1055
1056/*
1057 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1058 * perf_pmu_migrate_context() we need some magic.
1059 *
1060 * Those places that change perf_event::ctx will hold both
1061 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1062 *
1063 * Lock ordering is by mutex address. There are two other sites where
1064 * perf_event_context::mutex nests and those are:
1065 *
1066 * - perf_event_exit_task_context() [ child , 0 ]
1067 * perf_event_exit_event()
1068 * put_event() [ parent, 1 ]
1069 *
1070 * - perf_event_init_context() [ parent, 0 ]
1071 * inherit_task_group()
1072 * inherit_group()
1073 * inherit_event()
1074 * perf_event_alloc()
1075 * perf_init_event()
1076 * perf_try_init_event() [ child , 1 ]
1077 *
1078 * While it appears there is an obvious deadlock here -- the parent and child
1079 * nesting levels are inverted between the two. This is in fact safe because
1080 * life-time rules separate them. That is an exiting task cannot fork, and a
1081 * spawning task cannot (yet) exit.
1082 *
1083 * But remember that that these are parent<->child context relations, and
1084 * migration does not affect children, therefore these two orderings should not
1085 * interact.
1086 *
1087 * The change in perf_event::ctx does not affect children (as claimed above)
1088 * because the sys_perf_event_open() case will install a new event and break
1089 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1090 * concerned with cpuctx and that doesn't have children.
1091 *
1092 * The places that change perf_event::ctx will issue:
1093 *
1094 * perf_remove_from_context();
1095 * synchronize_rcu();
1096 * perf_install_in_context();
1097 *
1098 * to affect the change. The remove_from_context() + synchronize_rcu() should
1099 * quiesce the event, after which we can install it in the new location. This
1100 * means that only external vectors (perf_fops, prctl) can perturb the event
1101 * while in transit. Therefore all such accessors should also acquire
1102 * perf_event_context::mutex to serialize against this.
1103 *
1104 * However; because event->ctx can change while we're waiting to acquire
1105 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1106 * function.
1107 *
1108 * Lock order:
1109 * cred_guard_mutex
1110 * task_struct::perf_event_mutex
1111 * perf_event_context::mutex
1112 * perf_event::child_mutex;
1113 * perf_event_context::lock
1114 * perf_event::mmap_mutex
1115 * mmap_sem
1116 */
1117static struct perf_event_context *
1118perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1119{
1120 struct perf_event_context *ctx;
1121
1122again:
1123 rcu_read_lock();
1124 ctx = ACCESS_ONCE(event->ctx);
1125 if (!atomic_inc_not_zero(&ctx->refcount)) {
1126 rcu_read_unlock();
1127 goto again;
1128 }
1129 rcu_read_unlock();
1130
1131 mutex_lock_nested(&ctx->mutex, nesting);
1132 if (event->ctx != ctx) {
1133 mutex_unlock(&ctx->mutex);
1134 put_ctx(ctx);
1135 goto again;
1136 }
1137
1138 return ctx;
1139}
1140
1141static inline struct perf_event_context *
1142perf_event_ctx_lock(struct perf_event *event)
1143{
1144 return perf_event_ctx_lock_nested(event, 0);
1145}
1146
1147static void perf_event_ctx_unlock(struct perf_event *event,
1148 struct perf_event_context *ctx)
1149{
1150 mutex_unlock(&ctx->mutex);
1151 put_ctx(ctx);
1152}
1153
1154/*
1155 * This must be done under the ctx->lock, such as to serialize against
1156 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1157 * calling scheduler related locks and ctx->lock nests inside those.
1158 */
1159static __must_check struct perf_event_context *
1160unclone_ctx(struct perf_event_context *ctx)
1161{
1162 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1163
1164 lockdep_assert_held(&ctx->lock);
1165
1166 if (parent_ctx)
1167 ctx->parent_ctx = NULL;
1168 ctx->generation++;
1169
1170 return parent_ctx;
1171}
1172
1173static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1174{
1175 /*
1176 * only top level events have the pid namespace they were created in
1177 */
1178 if (event->parent)
1179 event = event->parent;
1180
1181 return task_tgid_nr_ns(p, event->ns);
1182}
1183
1184static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1185{
1186 /*
1187 * only top level events have the pid namespace they were created in
1188 */
1189 if (event->parent)
1190 event = event->parent;
1191
1192 return task_pid_nr_ns(p, event->ns);
1193}
1194
1195/*
1196 * If we inherit events we want to return the parent event id
1197 * to userspace.
1198 */
1199static u64 primary_event_id(struct perf_event *event)
1200{
1201 u64 id = event->id;
1202
1203 if (event->parent)
1204 id = event->parent->id;
1205
1206 return id;
1207}
1208
1209/*
1210 * Get the perf_event_context for a task and lock it.
1211 *
1212 * This has to cope with with the fact that until it is locked,
1213 * the context could get moved to another task.
1214 */
1215static struct perf_event_context *
1216perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
1217{
1218 struct perf_event_context *ctx;
1219
1220retry:
1221 /*
1222 * One of the few rules of preemptible RCU is that one cannot do
1223 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1224 * part of the read side critical section was irqs-enabled -- see
1225 * rcu_read_unlock_special().
1226 *
1227 * Since ctx->lock nests under rq->lock we must ensure the entire read
1228 * side critical section has interrupts disabled.
1229 */
1230 local_irq_save(*flags);
1231 rcu_read_lock();
1232 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
1233 if (ctx) {
1234 /*
1235 * If this context is a clone of another, it might
1236 * get swapped for another underneath us by
1237 * perf_event_task_sched_out, though the
1238 * rcu_read_lock() protects us from any context
1239 * getting freed. Lock the context and check if it
1240 * got swapped before we could get the lock, and retry
1241 * if so. If we locked the right context, then it
1242 * can't get swapped on us any more.
1243 */
1244 raw_spin_lock(&ctx->lock);
1245 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
1246 raw_spin_unlock(&ctx->lock);
1247 rcu_read_unlock();
1248 local_irq_restore(*flags);
1249 goto retry;
1250 }
1251
1252 if (ctx->task == TASK_TOMBSTONE ||
1253 !atomic_inc_not_zero(&ctx->refcount)) {
1254 raw_spin_unlock(&ctx->lock);
1255 ctx = NULL;
1256 } else {
1257 WARN_ON_ONCE(ctx->task != task);
1258 }
1259 }
1260 rcu_read_unlock();
1261 if (!ctx)
1262 local_irq_restore(*flags);
1263 return ctx;
1264}
1265
1266/*
1267 * Get the context for a task and increment its pin_count so it
1268 * can't get swapped to another task. This also increments its
1269 * reference count so that the context can't get freed.
1270 */
1271static struct perf_event_context *
1272perf_pin_task_context(struct task_struct *task, int ctxn)
1273{
1274 struct perf_event_context *ctx;
1275 unsigned long flags;
1276
1277 ctx = perf_lock_task_context(task, ctxn, &flags);
1278 if (ctx) {
1279 ++ctx->pin_count;
1280 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1281 }
1282 return ctx;
1283}
1284
1285static void perf_unpin_context(struct perf_event_context *ctx)
1286{
1287 unsigned long flags;
1288
1289 raw_spin_lock_irqsave(&ctx->lock, flags);
1290 --ctx->pin_count;
1291 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1292}
1293
1294/*
1295 * Update the record of the current time in a context.
1296 */
1297static void update_context_time(struct perf_event_context *ctx)
1298{
1299 u64 now = perf_clock();
1300
1301 ctx->time += now - ctx->timestamp;
1302 ctx->timestamp = now;
1303}
1304
1305static u64 perf_event_time(struct perf_event *event)
1306{
1307 struct perf_event_context *ctx = event->ctx;
1308
1309 if (is_cgroup_event(event))
1310 return perf_cgroup_event_time(event);
1311
1312 return ctx ? ctx->time : 0;
1313}
1314
1315/*
1316 * Update the total_time_enabled and total_time_running fields for a event.
1317 */
1318static void update_event_times(struct perf_event *event)
1319{
1320 struct perf_event_context *ctx = event->ctx;
1321 u64 run_end;
1322
1323 lockdep_assert_held(&ctx->lock);
1324
1325 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1326 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1327 return;
1328
1329 /*
1330 * in cgroup mode, time_enabled represents
1331 * the time the event was enabled AND active
1332 * tasks were in the monitored cgroup. This is
1333 * independent of the activity of the context as
1334 * there may be a mix of cgroup and non-cgroup events.
1335 *
1336 * That is why we treat cgroup events differently
1337 * here.
1338 */
1339 if (is_cgroup_event(event))
1340 run_end = perf_cgroup_event_time(event);
1341 else if (ctx->is_active)
1342 run_end = ctx->time;
1343 else
1344 run_end = event->tstamp_stopped;
1345
1346 event->total_time_enabled = run_end - event->tstamp_enabled;
1347
1348 if (event->state == PERF_EVENT_STATE_INACTIVE)
1349 run_end = event->tstamp_stopped;
1350 else
1351 run_end = perf_event_time(event);
1352
1353 event->total_time_running = run_end - event->tstamp_running;
1354
1355}
1356
1357/*
1358 * Update total_time_enabled and total_time_running for all events in a group.
1359 */
1360static void update_group_times(struct perf_event *leader)
1361{
1362 struct perf_event *event;
1363
1364 update_event_times(leader);
1365 list_for_each_entry(event, &leader->sibling_list, group_entry)
1366 update_event_times(event);
1367}
1368
1369static struct list_head *
1370ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1371{
1372 if (event->attr.pinned)
1373 return &ctx->pinned_groups;
1374 else
1375 return &ctx->flexible_groups;
1376}
1377
1378/*
1379 * Add a event from the lists for its context.
1380 * Must be called with ctx->mutex and ctx->lock held.
1381 */
1382static void
1383list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1384{
1385 lockdep_assert_held(&ctx->lock);
1386
1387 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1388 event->attach_state |= PERF_ATTACH_CONTEXT;
1389
1390 /*
1391 * If we're a stand alone event or group leader, we go to the context
1392 * list, group events are kept attached to the group so that
1393 * perf_group_detach can, at all times, locate all siblings.
1394 */
1395 if (event->group_leader == event) {
1396 struct list_head *list;
1397
1398 if (is_software_event(event))
1399 event->group_flags |= PERF_GROUP_SOFTWARE;
1400
1401 list = ctx_group_list(event, ctx);
1402 list_add_tail(&event->group_entry, list);
1403 }
1404
1405 if (is_cgroup_event(event))
1406 ctx->nr_cgroups++;
1407
1408 list_add_rcu(&event->event_entry, &ctx->event_list);
1409 ctx->nr_events++;
1410 if (event->attr.inherit_stat)
1411 ctx->nr_stat++;
1412
1413 ctx->generation++;
1414}
1415
1416/*
1417 * Initialize event state based on the perf_event_attr::disabled.
1418 */
1419static inline void perf_event__state_init(struct perf_event *event)
1420{
1421 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1422 PERF_EVENT_STATE_INACTIVE;
1423}
1424
1425static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
1426{
1427 int entry = sizeof(u64); /* value */
1428 int size = 0;
1429 int nr = 1;
1430
1431 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1432 size += sizeof(u64);
1433
1434 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1435 size += sizeof(u64);
1436
1437 if (event->attr.read_format & PERF_FORMAT_ID)
1438 entry += sizeof(u64);
1439
1440 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1441 nr += nr_siblings;
1442 size += sizeof(u64);
1443 }
1444
1445 size += entry * nr;
1446 event->read_size = size;
1447}
1448
1449static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
1450{
1451 struct perf_sample_data *data;
1452 u16 size = 0;
1453
1454 if (sample_type & PERF_SAMPLE_IP)
1455 size += sizeof(data->ip);
1456
1457 if (sample_type & PERF_SAMPLE_ADDR)
1458 size += sizeof(data->addr);
1459
1460 if (sample_type & PERF_SAMPLE_PERIOD)
1461 size += sizeof(data->period);
1462
1463 if (sample_type & PERF_SAMPLE_WEIGHT)
1464 size += sizeof(data->weight);
1465
1466 if (sample_type & PERF_SAMPLE_READ)
1467 size += event->read_size;
1468
1469 if (sample_type & PERF_SAMPLE_DATA_SRC)
1470 size += sizeof(data->data_src.val);
1471
1472 if (sample_type & PERF_SAMPLE_TRANSACTION)
1473 size += sizeof(data->txn);
1474
1475 event->header_size = size;
1476}
1477
1478/*
1479 * Called at perf_event creation and when events are attached/detached from a
1480 * group.
1481 */
1482static void perf_event__header_size(struct perf_event *event)
1483{
1484 __perf_event_read_size(event,
1485 event->group_leader->nr_siblings);
1486 __perf_event_header_size(event, event->attr.sample_type);
1487}
1488
1489static void perf_event__id_header_size(struct perf_event *event)
1490{
1491 struct perf_sample_data *data;
1492 u64 sample_type = event->attr.sample_type;
1493 u16 size = 0;
1494
1495 if (sample_type & PERF_SAMPLE_TID)
1496 size += sizeof(data->tid_entry);
1497
1498 if (sample_type & PERF_SAMPLE_TIME)
1499 size += sizeof(data->time);
1500
1501 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1502 size += sizeof(data->id);
1503
1504 if (sample_type & PERF_SAMPLE_ID)
1505 size += sizeof(data->id);
1506
1507 if (sample_type & PERF_SAMPLE_STREAM_ID)
1508 size += sizeof(data->stream_id);
1509
1510 if (sample_type & PERF_SAMPLE_CPU)
1511 size += sizeof(data->cpu_entry);
1512
1513 event->id_header_size = size;
1514}
1515
1516static bool perf_event_validate_size(struct perf_event *event)
1517{
1518 /*
1519 * The values computed here will be over-written when we actually
1520 * attach the event.
1521 */
1522 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1523 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1524 perf_event__id_header_size(event);
1525
1526 /*
1527 * Sum the lot; should not exceed the 64k limit we have on records.
1528 * Conservative limit to allow for callchains and other variable fields.
1529 */
1530 if (event->read_size + event->header_size +
1531 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1532 return false;
1533
1534 return true;
1535}
1536
1537static void perf_group_attach(struct perf_event *event)
1538{
1539 struct perf_event *group_leader = event->group_leader, *pos;
1540
1541 /*
1542 * We can have double attach due to group movement in perf_event_open.
1543 */
1544 if (event->attach_state & PERF_ATTACH_GROUP)
1545 return;
1546
1547 event->attach_state |= PERF_ATTACH_GROUP;
1548
1549 if (group_leader == event)
1550 return;
1551
1552 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1553
1554 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1555 !is_software_event(event))
1556 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1557
1558 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1559 group_leader->nr_siblings++;
1560
1561 perf_event__header_size(group_leader);
1562
1563 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1564 perf_event__header_size(pos);
1565}
1566
1567/*
1568 * Remove a event from the lists for its context.
1569 * Must be called with ctx->mutex and ctx->lock held.
1570 */
1571static void
1572list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1573{
1574 struct perf_cpu_context *cpuctx;
1575
1576 WARN_ON_ONCE(event->ctx != ctx);
1577 lockdep_assert_held(&ctx->lock);
1578
1579 /*
1580 * We can have double detach due to exit/hot-unplug + close.
1581 */
1582 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1583 return;
1584
1585 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1586
1587 if (is_cgroup_event(event)) {
1588 ctx->nr_cgroups--;
1589 /*
1590 * Because cgroup events are always per-cpu events, this will
1591 * always be called from the right CPU.
1592 */
1593 cpuctx = __get_cpu_context(ctx);
1594 /*
1595 * If there are no more cgroup events then clear cgrp to avoid
1596 * stale pointer in update_cgrp_time_from_cpuctx().
1597 */
1598 if (!ctx->nr_cgroups)
1599 cpuctx->cgrp = NULL;
1600 }
1601
1602 ctx->nr_events--;
1603 if (event->attr.inherit_stat)
1604 ctx->nr_stat--;
1605
1606 list_del_rcu(&event->event_entry);
1607
1608 if (event->group_leader == event)
1609 list_del_init(&event->group_entry);
1610
1611 update_group_times(event);
1612
1613 /*
1614 * If event was in error state, then keep it
1615 * that way, otherwise bogus counts will be
1616 * returned on read(). The only way to get out
1617 * of error state is by explicit re-enabling
1618 * of the event
1619 */
1620 if (event->state > PERF_EVENT_STATE_OFF)
1621 event->state = PERF_EVENT_STATE_OFF;
1622
1623 ctx->generation++;
1624}
1625
1626static void perf_group_detach(struct perf_event *event)
1627{
1628 struct perf_event *sibling, *tmp;
1629 struct list_head *list = NULL;
1630
1631 /*
1632 * We can have double detach due to exit/hot-unplug + close.
1633 */
1634 if (!(event->attach_state & PERF_ATTACH_GROUP))
1635 return;
1636
1637 event->attach_state &= ~PERF_ATTACH_GROUP;
1638
1639 /*
1640 * If this is a sibling, remove it from its group.
1641 */
1642 if (event->group_leader != event) {
1643 list_del_init(&event->group_entry);
1644 event->group_leader->nr_siblings--;
1645 goto out;
1646 }
1647
1648 if (!list_empty(&event->group_entry))
1649 list = &event->group_entry;
1650
1651 /*
1652 * If this was a group event with sibling events then
1653 * upgrade the siblings to singleton events by adding them
1654 * to whatever list we are on.
1655 */
1656 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1657 if (list)
1658 list_move_tail(&sibling->group_entry, list);
1659 sibling->group_leader = sibling;
1660
1661 /* Inherit group flags from the previous leader */
1662 sibling->group_flags = event->group_flags;
1663
1664 WARN_ON_ONCE(sibling->ctx != event->ctx);
1665 }
1666
1667out:
1668 perf_event__header_size(event->group_leader);
1669
1670 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1671 perf_event__header_size(tmp);
1672}
1673
1674static bool is_orphaned_event(struct perf_event *event)
1675{
1676 return event->state == PERF_EVENT_STATE_DEAD;
1677}
1678
1679static inline int pmu_filter_match(struct perf_event *event)
1680{
1681 struct pmu *pmu = event->pmu;
1682 return pmu->filter_match ? pmu->filter_match(event) : 1;
1683}
1684
1685static inline int
1686event_filter_match(struct perf_event *event)
1687{
1688 return (event->cpu == -1 || event->cpu == smp_processor_id())
1689 && perf_cgroup_match(event) && pmu_filter_match(event);
1690}
1691
1692static void
1693event_sched_out(struct perf_event *event,
1694 struct perf_cpu_context *cpuctx,
1695 struct perf_event_context *ctx)
1696{
1697 u64 tstamp = perf_event_time(event);
1698 u64 delta;
1699
1700 WARN_ON_ONCE(event->ctx != ctx);
1701 lockdep_assert_held(&ctx->lock);
1702
1703 /*
1704 * An event which could not be activated because of
1705 * filter mismatch still needs to have its timings
1706 * maintained, otherwise bogus information is return
1707 * via read() for time_enabled, time_running:
1708 */
1709 if (event->state == PERF_EVENT_STATE_INACTIVE
1710 && !event_filter_match(event)) {
1711 delta = tstamp - event->tstamp_stopped;
1712 event->tstamp_running += delta;
1713 event->tstamp_stopped = tstamp;
1714 }
1715
1716 if (event->state != PERF_EVENT_STATE_ACTIVE)
1717 return;
1718
1719 perf_pmu_disable(event->pmu);
1720
1721 event->tstamp_stopped = tstamp;
1722 event->pmu->del(event, 0);
1723 event->oncpu = -1;
1724 event->state = PERF_EVENT_STATE_INACTIVE;
1725 if (event->pending_disable) {
1726 event->pending_disable = 0;
1727 event->state = PERF_EVENT_STATE_OFF;
1728 }
1729
1730 if (!is_software_event(event))
1731 cpuctx->active_oncpu--;
1732 if (!--ctx->nr_active)
1733 perf_event_ctx_deactivate(ctx);
1734 if (event->attr.freq && event->attr.sample_freq)
1735 ctx->nr_freq--;
1736 if (event->attr.exclusive || !cpuctx->active_oncpu)
1737 cpuctx->exclusive = 0;
1738
1739 perf_pmu_enable(event->pmu);
1740}
1741
1742static void
1743group_sched_out(struct perf_event *group_event,
1744 struct perf_cpu_context *cpuctx,
1745 struct perf_event_context *ctx)
1746{
1747 struct perf_event *event;
1748 int state = group_event->state;
1749
1750 event_sched_out(group_event, cpuctx, ctx);
1751
1752 /*
1753 * Schedule out siblings (if any):
1754 */
1755 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1756 event_sched_out(event, cpuctx, ctx);
1757
1758 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1759 cpuctx->exclusive = 0;
1760}
1761
1762#define DETACH_GROUP 0x01UL
1763
1764/*
1765 * Cross CPU call to remove a performance event
1766 *
1767 * We disable the event on the hardware level first. After that we
1768 * remove it from the context list.
1769 */
1770static void
1771__perf_remove_from_context(struct perf_event *event,
1772 struct perf_cpu_context *cpuctx,
1773 struct perf_event_context *ctx,
1774 void *info)
1775{
1776 unsigned long flags = (unsigned long)info;
1777
1778 event_sched_out(event, cpuctx, ctx);
1779 if (flags & DETACH_GROUP)
1780 perf_group_detach(event);
1781 list_del_event(event, ctx);
1782
1783 if (!ctx->nr_events && ctx->is_active) {
1784 ctx->is_active = 0;
1785 if (ctx->task) {
1786 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1787 cpuctx->task_ctx = NULL;
1788 }
1789 }
1790}
1791
1792/*
1793 * Remove the event from a task's (or a CPU's) list of events.
1794 *
1795 * If event->ctx is a cloned context, callers must make sure that
1796 * every task struct that event->ctx->task could possibly point to
1797 * remains valid. This is OK when called from perf_release since
1798 * that only calls us on the top-level context, which can't be a clone.
1799 * When called from perf_event_exit_task, it's OK because the
1800 * context has been detached from its task.
1801 */
1802static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1803{
1804 lockdep_assert_held(&event->ctx->mutex);
1805
1806 event_function_call(event, __perf_remove_from_context, (void *)flags);
1807}
1808
1809/*
1810 * Cross CPU call to disable a performance event
1811 */
1812static void __perf_event_disable(struct perf_event *event,
1813 struct perf_cpu_context *cpuctx,
1814 struct perf_event_context *ctx,
1815 void *info)
1816{
1817 if (event->state < PERF_EVENT_STATE_INACTIVE)
1818 return;
1819
1820 update_context_time(ctx);
1821 update_cgrp_time_from_event(event);
1822 update_group_times(event);
1823 if (event == event->group_leader)
1824 group_sched_out(event, cpuctx, ctx);
1825 else
1826 event_sched_out(event, cpuctx, ctx);
1827 event->state = PERF_EVENT_STATE_OFF;
1828}
1829
1830/*
1831 * Disable a event.
1832 *
1833 * If event->ctx is a cloned context, callers must make sure that
1834 * every task struct that event->ctx->task could possibly point to
1835 * remains valid. This condition is satisifed when called through
1836 * perf_event_for_each_child or perf_event_for_each because they
1837 * hold the top-level event's child_mutex, so any descendant that
1838 * goes to exit will block in perf_event_exit_event().
1839 *
1840 * When called from perf_pending_event it's OK because event->ctx
1841 * is the current context on this CPU and preemption is disabled,
1842 * hence we can't get into perf_event_task_sched_out for this context.
1843 */
1844static void _perf_event_disable(struct perf_event *event)
1845{
1846 struct perf_event_context *ctx = event->ctx;
1847
1848 raw_spin_lock_irq(&ctx->lock);
1849 if (event->state <= PERF_EVENT_STATE_OFF) {
1850 raw_spin_unlock_irq(&ctx->lock);
1851 return;
1852 }
1853 raw_spin_unlock_irq(&ctx->lock);
1854
1855 event_function_call(event, __perf_event_disable, NULL);
1856}
1857
1858void perf_event_disable_local(struct perf_event *event)
1859{
1860 event_function_local(event, __perf_event_disable, NULL);
1861}
1862
1863/*
1864 * Strictly speaking kernel users cannot create groups and therefore this
1865 * interface does not need the perf_event_ctx_lock() magic.
1866 */
1867void perf_event_disable(struct perf_event *event)
1868{
1869 struct perf_event_context *ctx;
1870
1871 ctx = perf_event_ctx_lock(event);
1872 _perf_event_disable(event);
1873 perf_event_ctx_unlock(event, ctx);
1874}
1875EXPORT_SYMBOL_GPL(perf_event_disable);
1876
1877static void perf_set_shadow_time(struct perf_event *event,
1878 struct perf_event_context *ctx,
1879 u64 tstamp)
1880{
1881 /*
1882 * use the correct time source for the time snapshot
1883 *
1884 * We could get by without this by leveraging the
1885 * fact that to get to this function, the caller
1886 * has most likely already called update_context_time()
1887 * and update_cgrp_time_xx() and thus both timestamp
1888 * are identical (or very close). Given that tstamp is,
1889 * already adjusted for cgroup, we could say that:
1890 * tstamp - ctx->timestamp
1891 * is equivalent to
1892 * tstamp - cgrp->timestamp.
1893 *
1894 * Then, in perf_output_read(), the calculation would
1895 * work with no changes because:
1896 * - event is guaranteed scheduled in
1897 * - no scheduled out in between
1898 * - thus the timestamp would be the same
1899 *
1900 * But this is a bit hairy.
1901 *
1902 * So instead, we have an explicit cgroup call to remain
1903 * within the time time source all along. We believe it
1904 * is cleaner and simpler to understand.
1905 */
1906 if (is_cgroup_event(event))
1907 perf_cgroup_set_shadow_time(event, tstamp);
1908 else
1909 event->shadow_ctx_time = tstamp - ctx->timestamp;
1910}
1911
1912#define MAX_INTERRUPTS (~0ULL)
1913
1914static void perf_log_throttle(struct perf_event *event, int enable);
1915static void perf_log_itrace_start(struct perf_event *event);
1916
1917static int
1918event_sched_in(struct perf_event *event,
1919 struct perf_cpu_context *cpuctx,
1920 struct perf_event_context *ctx)
1921{
1922 u64 tstamp = perf_event_time(event);
1923 int ret = 0;
1924
1925 lockdep_assert_held(&ctx->lock);
1926
1927 if (event->state <= PERF_EVENT_STATE_OFF)
1928 return 0;
1929
1930 event->state = PERF_EVENT_STATE_ACTIVE;
1931 event->oncpu = smp_processor_id();
1932
1933 /*
1934 * Unthrottle events, since we scheduled we might have missed several
1935 * ticks already, also for a heavily scheduling task there is little
1936 * guarantee it'll get a tick in a timely manner.
1937 */
1938 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1939 perf_log_throttle(event, 1);
1940 event->hw.interrupts = 0;
1941 }
1942
1943 /*
1944 * The new state must be visible before we turn it on in the hardware:
1945 */
1946 smp_wmb();
1947
1948 perf_pmu_disable(event->pmu);
1949
1950 perf_set_shadow_time(event, ctx, tstamp);
1951
1952 perf_log_itrace_start(event);
1953
1954 if (event->pmu->add(event, PERF_EF_START)) {
1955 event->state = PERF_EVENT_STATE_INACTIVE;
1956 event->oncpu = -1;
1957 ret = -EAGAIN;
1958 goto out;
1959 }
1960
1961 event->tstamp_running += tstamp - event->tstamp_stopped;
1962
1963 if (!is_software_event(event))
1964 cpuctx->active_oncpu++;
1965 if (!ctx->nr_active++)
1966 perf_event_ctx_activate(ctx);
1967 if (event->attr.freq && event->attr.sample_freq)
1968 ctx->nr_freq++;
1969
1970 if (event->attr.exclusive)
1971 cpuctx->exclusive = 1;
1972
1973out:
1974 perf_pmu_enable(event->pmu);
1975
1976 return ret;
1977}
1978
1979static int
1980group_sched_in(struct perf_event *group_event,
1981 struct perf_cpu_context *cpuctx,
1982 struct perf_event_context *ctx)
1983{
1984 struct perf_event *event, *partial_group = NULL;
1985 struct pmu *pmu = ctx->pmu;
1986 u64 now = ctx->time;
1987 bool simulate = false;
1988
1989 if (group_event->state == PERF_EVENT_STATE_OFF)
1990 return 0;
1991
1992 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
1993
1994 if (event_sched_in(group_event, cpuctx, ctx)) {
1995 pmu->cancel_txn(pmu);
1996 perf_mux_hrtimer_restart(cpuctx);
1997 return -EAGAIN;
1998 }
1999
2000 /*
2001 * Schedule in siblings as one group (if any):
2002 */
2003 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2004 if (event_sched_in(event, cpuctx, ctx)) {
2005 partial_group = event;
2006 goto group_error;
2007 }
2008 }
2009
2010 if (!pmu->commit_txn(pmu))
2011 return 0;
2012
2013group_error:
2014 /*
2015 * Groups can be scheduled in as one unit only, so undo any
2016 * partial group before returning:
2017 * The events up to the failed event are scheduled out normally,
2018 * tstamp_stopped will be updated.
2019 *
2020 * The failed events and the remaining siblings need to have
2021 * their timings updated as if they had gone thru event_sched_in()
2022 * and event_sched_out(). This is required to get consistent timings
2023 * across the group. This also takes care of the case where the group
2024 * could never be scheduled by ensuring tstamp_stopped is set to mark
2025 * the time the event was actually stopped, such that time delta
2026 * calculation in update_event_times() is correct.
2027 */
2028 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2029 if (event == partial_group)
2030 simulate = true;
2031
2032 if (simulate) {
2033 event->tstamp_running += now - event->tstamp_stopped;
2034 event->tstamp_stopped = now;
2035 } else {
2036 event_sched_out(event, cpuctx, ctx);
2037 }
2038 }
2039 event_sched_out(group_event, cpuctx, ctx);
2040
2041 pmu->cancel_txn(pmu);
2042
2043 perf_mux_hrtimer_restart(cpuctx);
2044
2045 return -EAGAIN;
2046}
2047
2048/*
2049 * Work out whether we can put this event group on the CPU now.
2050 */
2051static int group_can_go_on(struct perf_event *event,
2052 struct perf_cpu_context *cpuctx,
2053 int can_add_hw)
2054{
2055 /*
2056 * Groups consisting entirely of software events can always go on.
2057 */
2058 if (event->group_flags & PERF_GROUP_SOFTWARE)
2059 return 1;
2060 /*
2061 * If an exclusive group is already on, no other hardware
2062 * events can go on.
2063 */
2064 if (cpuctx->exclusive)
2065 return 0;
2066 /*
2067 * If this group is exclusive and there are already
2068 * events on the CPU, it can't go on.
2069 */
2070 if (event->attr.exclusive && cpuctx->active_oncpu)
2071 return 0;
2072 /*
2073 * Otherwise, try to add it if all previous groups were able
2074 * to go on.
2075 */
2076 return can_add_hw;
2077}
2078
2079static void add_event_to_ctx(struct perf_event *event,
2080 struct perf_event_context *ctx)
2081{
2082 u64 tstamp = perf_event_time(event);
2083
2084 list_add_event(event, ctx);
2085 perf_group_attach(event);
2086 event->tstamp_enabled = tstamp;
2087 event->tstamp_running = tstamp;
2088 event->tstamp_stopped = tstamp;
2089}
2090
2091static void ctx_sched_out(struct perf_event_context *ctx,
2092 struct perf_cpu_context *cpuctx,
2093 enum event_type_t event_type);
2094static void
2095ctx_sched_in(struct perf_event_context *ctx,
2096 struct perf_cpu_context *cpuctx,
2097 enum event_type_t event_type,
2098 struct task_struct *task);
2099
2100static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2101 struct perf_event_context *ctx)
2102{
2103 if (!cpuctx->task_ctx)
2104 return;
2105
2106 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2107 return;
2108
2109 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2110}
2111
2112static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2113 struct perf_event_context *ctx,
2114 struct task_struct *task)
2115{
2116 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2117 if (ctx)
2118 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2119 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2120 if (ctx)
2121 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2122}
2123
2124static void ctx_resched(struct perf_cpu_context *cpuctx,
2125 struct perf_event_context *task_ctx)
2126{
2127 perf_pmu_disable(cpuctx->ctx.pmu);
2128 if (task_ctx)
2129 task_ctx_sched_out(cpuctx, task_ctx);
2130 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
2131 perf_event_sched_in(cpuctx, task_ctx, current);
2132 perf_pmu_enable(cpuctx->ctx.pmu);
2133}
2134
2135/*
2136 * Cross CPU call to install and enable a performance event
2137 *
2138 * Very similar to remote_function() + event_function() but cannot assume that
2139 * things like ctx->is_active and cpuctx->task_ctx are set.
2140 */
2141static int __perf_install_in_context(void *info)
2142{
2143 struct perf_event *event = info;
2144 struct perf_event_context *ctx = event->ctx;
2145 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2146 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2147 bool activate = true;
2148 int ret = 0;
2149
2150 raw_spin_lock(&cpuctx->ctx.lock);
2151 if (ctx->task) {
2152 raw_spin_lock(&ctx->lock);
2153 task_ctx = ctx;
2154
2155 /* If we're on the wrong CPU, try again */
2156 if (task_cpu(ctx->task) != smp_processor_id()) {
2157 ret = -ESRCH;
2158 goto unlock;
2159 }
2160
2161 /*
2162 * If we're on the right CPU, see if the task we target is
2163 * current, if not we don't have to activate the ctx, a future
2164 * context switch will do that for us.
2165 */
2166 if (ctx->task != current)
2167 activate = false;
2168 else
2169 WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2170
2171 } else if (task_ctx) {
2172 raw_spin_lock(&task_ctx->lock);
2173 }
2174
2175 if (activate) {
2176 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2177 add_event_to_ctx(event, ctx);
2178 ctx_resched(cpuctx, task_ctx);
2179 } else {
2180 add_event_to_ctx(event, ctx);
2181 }
2182
2183unlock:
2184 perf_ctx_unlock(cpuctx, task_ctx);
2185
2186 return ret;
2187}
2188
2189/*
2190 * Attach a performance event to a context.
2191 *
2192 * Very similar to event_function_call, see comment there.
2193 */
2194static void
2195perf_install_in_context(struct perf_event_context *ctx,
2196 struct perf_event *event,
2197 int cpu)
2198{
2199 struct task_struct *task = READ_ONCE(ctx->task);
2200
2201 lockdep_assert_held(&ctx->mutex);
2202
2203 event->ctx = ctx;
2204 if (event->cpu != -1)
2205 event->cpu = cpu;
2206
2207 if (!task) {
2208 cpu_function_call(cpu, __perf_install_in_context, event);
2209 return;
2210 }
2211
2212 /*
2213 * Should not happen, we validate the ctx is still alive before calling.
2214 */
2215 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2216 return;
2217
2218 /*
2219 * Installing events is tricky because we cannot rely on ctx->is_active
2220 * to be set in case this is the nr_events 0 -> 1 transition.
2221 */
2222again:
2223 /*
2224 * Cannot use task_function_call() because we need to run on the task's
2225 * CPU regardless of whether its current or not.
2226 */
2227 if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
2228 return;
2229
2230 raw_spin_lock_irq(&ctx->lock);
2231 task = ctx->task;
2232 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2233 /*
2234 * Cannot happen because we already checked above (which also
2235 * cannot happen), and we hold ctx->mutex, which serializes us
2236 * against perf_event_exit_task_context().
2237 */
2238 raw_spin_unlock_irq(&ctx->lock);
2239 return;
2240 }
2241 raw_spin_unlock_irq(&ctx->lock);
2242 /*
2243 * Since !ctx->is_active doesn't mean anything, we must IPI
2244 * unconditionally.
2245 */
2246 goto again;
2247}
2248
2249/*
2250 * Put a event into inactive state and update time fields.
2251 * Enabling the leader of a group effectively enables all
2252 * the group members that aren't explicitly disabled, so we
2253 * have to update their ->tstamp_enabled also.
2254 * Note: this works for group members as well as group leaders
2255 * since the non-leader members' sibling_lists will be empty.
2256 */
2257static void __perf_event_mark_enabled(struct perf_event *event)
2258{
2259 struct perf_event *sub;
2260 u64 tstamp = perf_event_time(event);
2261
2262 event->state = PERF_EVENT_STATE_INACTIVE;
2263 event->tstamp_enabled = tstamp - event->total_time_enabled;
2264 list_for_each_entry(sub, &event->sibling_list, group_entry) {
2265 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2266 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
2267 }
2268}
2269
2270/*
2271 * Cross CPU call to enable a performance event
2272 */
2273static void __perf_event_enable(struct perf_event *event,
2274 struct perf_cpu_context *cpuctx,
2275 struct perf_event_context *ctx,
2276 void *info)
2277{
2278 struct perf_event *leader = event->group_leader;
2279 struct perf_event_context *task_ctx;
2280
2281 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2282 event->state <= PERF_EVENT_STATE_ERROR)
2283 return;
2284
2285 if (ctx->is_active)
2286 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2287
2288 __perf_event_mark_enabled(event);
2289
2290 if (!ctx->is_active)
2291 return;
2292
2293 if (!event_filter_match(event)) {
2294 if (is_cgroup_event(event))
2295 perf_cgroup_defer_enabled(event);
2296 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2297 return;
2298 }
2299
2300 /*
2301 * If the event is in a group and isn't the group leader,
2302 * then don't put it on unless the group is on.
2303 */
2304 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2305 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2306 return;
2307 }
2308
2309 task_ctx = cpuctx->task_ctx;
2310 if (ctx->task)
2311 WARN_ON_ONCE(task_ctx != ctx);
2312
2313 ctx_resched(cpuctx, task_ctx);
2314}
2315
2316/*
2317 * Enable a event.
2318 *
2319 * If event->ctx is a cloned context, callers must make sure that
2320 * every task struct that event->ctx->task could possibly point to
2321 * remains valid. This condition is satisfied when called through
2322 * perf_event_for_each_child or perf_event_for_each as described
2323 * for perf_event_disable.
2324 */
2325static void _perf_event_enable(struct perf_event *event)
2326{
2327 struct perf_event_context *ctx = event->ctx;
2328
2329 raw_spin_lock_irq(&ctx->lock);
2330 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2331 event->state < PERF_EVENT_STATE_ERROR) {
2332 raw_spin_unlock_irq(&ctx->lock);
2333 return;
2334 }
2335
2336 /*
2337 * If the event is in error state, clear that first.
2338 *
2339 * That way, if we see the event in error state below, we know that it
2340 * has gone back into error state, as distinct from the task having
2341 * been scheduled away before the cross-call arrived.
2342 */
2343 if (event->state == PERF_EVENT_STATE_ERROR)
2344 event->state = PERF_EVENT_STATE_OFF;
2345 raw_spin_unlock_irq(&ctx->lock);
2346
2347 event_function_call(event, __perf_event_enable, NULL);
2348}
2349
2350/*
2351 * See perf_event_disable();
2352 */
2353void perf_event_enable(struct perf_event *event)
2354{
2355 struct perf_event_context *ctx;
2356
2357 ctx = perf_event_ctx_lock(event);
2358 _perf_event_enable(event);
2359 perf_event_ctx_unlock(event, ctx);
2360}
2361EXPORT_SYMBOL_GPL(perf_event_enable);
2362
2363static int _perf_event_refresh(struct perf_event *event, int refresh)
2364{
2365 /*
2366 * not supported on inherited events
2367 */
2368 if (event->attr.inherit || !is_sampling_event(event))
2369 return -EINVAL;
2370
2371 atomic_add(refresh, &event->event_limit);
2372 _perf_event_enable(event);
2373
2374 return 0;
2375}
2376
2377/*
2378 * See perf_event_disable()
2379 */
2380int perf_event_refresh(struct perf_event *event, int refresh)
2381{
2382 struct perf_event_context *ctx;
2383 int ret;
2384
2385 ctx = perf_event_ctx_lock(event);
2386 ret = _perf_event_refresh(event, refresh);
2387 perf_event_ctx_unlock(event, ctx);
2388
2389 return ret;
2390}
2391EXPORT_SYMBOL_GPL(perf_event_refresh);
2392
2393static void ctx_sched_out(struct perf_event_context *ctx,
2394 struct perf_cpu_context *cpuctx,
2395 enum event_type_t event_type)
2396{
2397 int is_active = ctx->is_active;
2398 struct perf_event *event;
2399
2400 lockdep_assert_held(&ctx->lock);
2401
2402 if (likely(!ctx->nr_events)) {
2403 /*
2404 * See __perf_remove_from_context().
2405 */
2406 WARN_ON_ONCE(ctx->is_active);
2407 if (ctx->task)
2408 WARN_ON_ONCE(cpuctx->task_ctx);
2409 return;
2410 }
2411
2412 ctx->is_active &= ~event_type;
2413 if (!(ctx->is_active & EVENT_ALL))
2414 ctx->is_active = 0;
2415
2416 if (ctx->task) {
2417 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2418 if (!ctx->is_active)
2419 cpuctx->task_ctx = NULL;
2420 }
2421
2422 /*
2423 * Always update time if it was set; not only when it changes.
2424 * Otherwise we can 'forget' to update time for any but the last
2425 * context we sched out. For example:
2426 *
2427 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2428 * ctx_sched_out(.event_type = EVENT_PINNED)
2429 *
2430 * would only update time for the pinned events.
2431 */
2432 if (is_active & EVENT_TIME) {
2433 /* update (and stop) ctx time */
2434 update_context_time(ctx);
2435 update_cgrp_time_from_cpuctx(cpuctx);
2436 }
2437
2438 is_active ^= ctx->is_active; /* changed bits */
2439
2440 if (!ctx->nr_active || !(is_active & EVENT_ALL))
2441 return;
2442
2443 perf_pmu_disable(ctx->pmu);
2444 if (is_active & EVENT_PINNED) {
2445 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2446 group_sched_out(event, cpuctx, ctx);
2447 }
2448
2449 if (is_active & EVENT_FLEXIBLE) {
2450 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2451 group_sched_out(event, cpuctx, ctx);
2452 }
2453 perf_pmu_enable(ctx->pmu);
2454}
2455
2456/*
2457 * Test whether two contexts are equivalent, i.e. whether they have both been
2458 * cloned from the same version of the same context.
2459 *
2460 * Equivalence is measured using a generation number in the context that is
2461 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2462 * and list_del_event().
2463 */
2464static int context_equiv(struct perf_event_context *ctx1,
2465 struct perf_event_context *ctx2)
2466{
2467 lockdep_assert_held(&ctx1->lock);
2468 lockdep_assert_held(&ctx2->lock);
2469
2470 /* Pinning disables the swap optimization */
2471 if (ctx1->pin_count || ctx2->pin_count)
2472 return 0;
2473
2474 /* If ctx1 is the parent of ctx2 */
2475 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2476 return 1;
2477
2478 /* If ctx2 is the parent of ctx1 */
2479 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2480 return 1;
2481
2482 /*
2483 * If ctx1 and ctx2 have the same parent; we flatten the parent
2484 * hierarchy, see perf_event_init_context().
2485 */
2486 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2487 ctx1->parent_gen == ctx2->parent_gen)
2488 return 1;
2489
2490 /* Unmatched */
2491 return 0;
2492}
2493
2494static void __perf_event_sync_stat(struct perf_event *event,
2495 struct perf_event *next_event)
2496{
2497 u64 value;
2498
2499 if (!event->attr.inherit_stat)
2500 return;
2501
2502 /*
2503 * Update the event value, we cannot use perf_event_read()
2504 * because we're in the middle of a context switch and have IRQs
2505 * disabled, which upsets smp_call_function_single(), however
2506 * we know the event must be on the current CPU, therefore we
2507 * don't need to use it.
2508 */
2509 switch (event->state) {
2510 case PERF_EVENT_STATE_ACTIVE:
2511 event->pmu->read(event);
2512 /* fall-through */
2513
2514 case PERF_EVENT_STATE_INACTIVE:
2515 update_event_times(event);
2516 break;
2517
2518 default:
2519 break;
2520 }
2521
2522 /*
2523 * In order to keep per-task stats reliable we need to flip the event
2524 * values when we flip the contexts.
2525 */
2526 value = local64_read(&next_event->count);
2527 value = local64_xchg(&event->count, value);
2528 local64_set(&next_event->count, value);
2529
2530 swap(event->total_time_enabled, next_event->total_time_enabled);
2531 swap(event->total_time_running, next_event->total_time_running);
2532
2533 /*
2534 * Since we swizzled the values, update the user visible data too.
2535 */
2536 perf_event_update_userpage(event);
2537 perf_event_update_userpage(next_event);
2538}
2539
2540static void perf_event_sync_stat(struct perf_event_context *ctx,
2541 struct perf_event_context *next_ctx)
2542{
2543 struct perf_event *event, *next_event;
2544
2545 if (!ctx->nr_stat)
2546 return;
2547
2548 update_context_time(ctx);
2549
2550 event = list_first_entry(&ctx->event_list,
2551 struct perf_event, event_entry);
2552
2553 next_event = list_first_entry(&next_ctx->event_list,
2554 struct perf_event, event_entry);
2555
2556 while (&event->event_entry != &ctx->event_list &&
2557 &next_event->event_entry != &next_ctx->event_list) {
2558
2559 __perf_event_sync_stat(event, next_event);
2560
2561 event = list_next_entry(event, event_entry);
2562 next_event = list_next_entry(next_event, event_entry);
2563 }
2564}
2565
2566static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2567 struct task_struct *next)
2568{
2569 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2570 struct perf_event_context *next_ctx;
2571 struct perf_event_context *parent, *next_parent;
2572 struct perf_cpu_context *cpuctx;
2573 int do_switch = 1;
2574
2575 if (likely(!ctx))
2576 return;
2577
2578 cpuctx = __get_cpu_context(ctx);
2579 if (!cpuctx->task_ctx)
2580 return;
2581
2582 rcu_read_lock();
2583 next_ctx = next->perf_event_ctxp[ctxn];
2584 if (!next_ctx)
2585 goto unlock;
2586
2587 parent = rcu_dereference(ctx->parent_ctx);
2588 next_parent = rcu_dereference(next_ctx->parent_ctx);
2589
2590 /* If neither context have a parent context; they cannot be clones. */
2591 if (!parent && !next_parent)
2592 goto unlock;
2593
2594 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
2595 /*
2596 * Looks like the two contexts are clones, so we might be
2597 * able to optimize the context switch. We lock both
2598 * contexts and check that they are clones under the
2599 * lock (including re-checking that neither has been
2600 * uncloned in the meantime). It doesn't matter which
2601 * order we take the locks because no other cpu could
2602 * be trying to lock both of these tasks.
2603 */
2604 raw_spin_lock(&ctx->lock);
2605 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2606 if (context_equiv(ctx, next_ctx)) {
2607 WRITE_ONCE(ctx->task, next);
2608 WRITE_ONCE(next_ctx->task, task);
2609
2610 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2611
2612 /*
2613 * RCU_INIT_POINTER here is safe because we've not
2614 * modified the ctx and the above modification of
2615 * ctx->task and ctx->task_ctx_data are immaterial
2616 * since those values are always verified under
2617 * ctx->lock which we're now holding.
2618 */
2619 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2620 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2621
2622 do_switch = 0;
2623
2624 perf_event_sync_stat(ctx, next_ctx);
2625 }
2626 raw_spin_unlock(&next_ctx->lock);
2627 raw_spin_unlock(&ctx->lock);
2628 }
2629unlock:
2630 rcu_read_unlock();
2631
2632 if (do_switch) {
2633 raw_spin_lock(&ctx->lock);
2634 task_ctx_sched_out(cpuctx, ctx);
2635 raw_spin_unlock(&ctx->lock);
2636 }
2637}
2638
2639void perf_sched_cb_dec(struct pmu *pmu)
2640{
2641 this_cpu_dec(perf_sched_cb_usages);
2642}
2643
2644void perf_sched_cb_inc(struct pmu *pmu)
2645{
2646 this_cpu_inc(perf_sched_cb_usages);
2647}
2648
2649/*
2650 * This function provides the context switch callback to the lower code
2651 * layer. It is invoked ONLY when the context switch callback is enabled.
2652 */
2653static void perf_pmu_sched_task(struct task_struct *prev,
2654 struct task_struct *next,
2655 bool sched_in)
2656{
2657 struct perf_cpu_context *cpuctx;
2658 struct pmu *pmu;
2659 unsigned long flags;
2660
2661 if (prev == next)
2662 return;
2663
2664 local_irq_save(flags);
2665
2666 rcu_read_lock();
2667
2668 list_for_each_entry_rcu(pmu, &pmus, entry) {
2669 if (pmu->sched_task) {
2670 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2671
2672 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2673
2674 perf_pmu_disable(pmu);
2675
2676 pmu->sched_task(cpuctx->task_ctx, sched_in);
2677
2678 perf_pmu_enable(pmu);
2679
2680 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2681 }
2682 }
2683
2684 rcu_read_unlock();
2685
2686 local_irq_restore(flags);
2687}
2688
2689static void perf_event_switch(struct task_struct *task,
2690 struct task_struct *next_prev, bool sched_in);
2691
2692#define for_each_task_context_nr(ctxn) \
2693 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2694
2695/*
2696 * Called from scheduler to remove the events of the current task,
2697 * with interrupts disabled.
2698 *
2699 * We stop each event and update the event value in event->count.
2700 *
2701 * This does not protect us against NMI, but disable()
2702 * sets the disabled bit in the control field of event _before_
2703 * accessing the event control register. If a NMI hits, then it will
2704 * not restart the event.
2705 */
2706void __perf_event_task_sched_out(struct task_struct *task,
2707 struct task_struct *next)
2708{
2709 int ctxn;
2710
2711 if (__this_cpu_read(perf_sched_cb_usages))
2712 perf_pmu_sched_task(task, next, false);
2713
2714 if (atomic_read(&nr_switch_events))
2715 perf_event_switch(task, next, false);
2716
2717 for_each_task_context_nr(ctxn)
2718 perf_event_context_sched_out(task, ctxn, next);
2719
2720 /*
2721 * if cgroup events exist on this CPU, then we need
2722 * to check if we have to switch out PMU state.
2723 * cgroup event are system-wide mode only
2724 */
2725 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2726 perf_cgroup_sched_out(task, next);
2727}
2728
2729/*
2730 * Called with IRQs disabled
2731 */
2732static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2733 enum event_type_t event_type)
2734{
2735 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2736}
2737
2738static void
2739ctx_pinned_sched_in(struct perf_event_context *ctx,
2740 struct perf_cpu_context *cpuctx)
2741{
2742 struct perf_event *event;
2743
2744 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2745 if (event->state <= PERF_EVENT_STATE_OFF)
2746 continue;
2747 if (!event_filter_match(event))
2748 continue;
2749
2750 /* may need to reset tstamp_enabled */
2751 if (is_cgroup_event(event))
2752 perf_cgroup_mark_enabled(event, ctx);
2753
2754 if (group_can_go_on(event, cpuctx, 1))
2755 group_sched_in(event, cpuctx, ctx);
2756
2757 /*
2758 * If this pinned group hasn't been scheduled,
2759 * put it in error state.
2760 */
2761 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2762 update_group_times(event);
2763 event->state = PERF_EVENT_STATE_ERROR;
2764 }
2765 }
2766}
2767
2768static void
2769ctx_flexible_sched_in(struct perf_event_context *ctx,
2770 struct perf_cpu_context *cpuctx)
2771{
2772 struct perf_event *event;
2773 int can_add_hw = 1;
2774
2775 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2776 /* Ignore events in OFF or ERROR state */
2777 if (event->state <= PERF_EVENT_STATE_OFF)
2778 continue;
2779 /*
2780 * Listen to the 'cpu' scheduling filter constraint
2781 * of events:
2782 */
2783 if (!event_filter_match(event))
2784 continue;
2785
2786 /* may need to reset tstamp_enabled */
2787 if (is_cgroup_event(event))
2788 perf_cgroup_mark_enabled(event, ctx);
2789
2790 if (group_can_go_on(event, cpuctx, can_add_hw)) {
2791 if (group_sched_in(event, cpuctx, ctx))
2792 can_add_hw = 0;
2793 }
2794 }
2795}
2796
2797static void
2798ctx_sched_in(struct perf_event_context *ctx,
2799 struct perf_cpu_context *cpuctx,
2800 enum event_type_t event_type,
2801 struct task_struct *task)
2802{
2803 int is_active = ctx->is_active;
2804 u64 now;
2805
2806 lockdep_assert_held(&ctx->lock);
2807
2808 if (likely(!ctx->nr_events))
2809 return;
2810
2811 ctx->is_active |= (event_type | EVENT_TIME);
2812 if (ctx->task) {
2813 if (!is_active)
2814 cpuctx->task_ctx = ctx;
2815 else
2816 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2817 }
2818
2819 is_active ^= ctx->is_active; /* changed bits */
2820
2821 if (is_active & EVENT_TIME) {
2822 /* start ctx time */
2823 now = perf_clock();
2824 ctx->timestamp = now;
2825 perf_cgroup_set_timestamp(task, ctx);
2826 }
2827
2828 /*
2829 * First go through the list and put on any pinned groups
2830 * in order to give them the best chance of going on.
2831 */
2832 if (is_active & EVENT_PINNED)
2833 ctx_pinned_sched_in(ctx, cpuctx);
2834
2835 /* Then walk through the lower prio flexible groups */
2836 if (is_active & EVENT_FLEXIBLE)
2837 ctx_flexible_sched_in(ctx, cpuctx);
2838}
2839
2840static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2841 enum event_type_t event_type,
2842 struct task_struct *task)
2843{
2844 struct perf_event_context *ctx = &cpuctx->ctx;
2845
2846 ctx_sched_in(ctx, cpuctx, event_type, task);
2847}
2848
2849static void perf_event_context_sched_in(struct perf_event_context *ctx,
2850 struct task_struct *task)
2851{
2852 struct perf_cpu_context *cpuctx;
2853
2854 cpuctx = __get_cpu_context(ctx);
2855 if (cpuctx->task_ctx == ctx)
2856 return;
2857
2858 perf_ctx_lock(cpuctx, ctx);
2859 perf_pmu_disable(ctx->pmu);
2860 /*
2861 * We want to keep the following priority order:
2862 * cpu pinned (that don't need to move), task pinned,
2863 * cpu flexible, task flexible.
2864 */
2865 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2866 perf_event_sched_in(cpuctx, ctx, task);
2867 perf_pmu_enable(ctx->pmu);
2868 perf_ctx_unlock(cpuctx, ctx);
2869}
2870
2871/*
2872 * Called from scheduler to add the events of the current task
2873 * with interrupts disabled.
2874 *
2875 * We restore the event value and then enable it.
2876 *
2877 * This does not protect us against NMI, but enable()
2878 * sets the enabled bit in the control field of event _before_
2879 * accessing the event control register. If a NMI hits, then it will
2880 * keep the event running.
2881 */
2882void __perf_event_task_sched_in(struct task_struct *prev,
2883 struct task_struct *task)
2884{
2885 struct perf_event_context *ctx;
2886 int ctxn;
2887
2888 /*
2889 * If cgroup events exist on this CPU, then we need to check if we have
2890 * to switch in PMU state; cgroup event are system-wide mode only.
2891 *
2892 * Since cgroup events are CPU events, we must schedule these in before
2893 * we schedule in the task events.
2894 */
2895 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2896 perf_cgroup_sched_in(prev, task);
2897
2898 for_each_task_context_nr(ctxn) {
2899 ctx = task->perf_event_ctxp[ctxn];
2900 if (likely(!ctx))
2901 continue;
2902
2903 perf_event_context_sched_in(ctx, task);
2904 }
2905
2906 if (atomic_read(&nr_switch_events))
2907 perf_event_switch(task, prev, true);
2908
2909 if (__this_cpu_read(perf_sched_cb_usages))
2910 perf_pmu_sched_task(prev, task, true);
2911}
2912
2913static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2914{
2915 u64 frequency = event->attr.sample_freq;
2916 u64 sec = NSEC_PER_SEC;
2917 u64 divisor, dividend;
2918
2919 int count_fls, nsec_fls, frequency_fls, sec_fls;
2920
2921 count_fls = fls64(count);
2922 nsec_fls = fls64(nsec);
2923 frequency_fls = fls64(frequency);
2924 sec_fls = 30;
2925
2926 /*
2927 * We got @count in @nsec, with a target of sample_freq HZ
2928 * the target period becomes:
2929 *
2930 * @count * 10^9
2931 * period = -------------------
2932 * @nsec * sample_freq
2933 *
2934 */
2935
2936 /*
2937 * Reduce accuracy by one bit such that @a and @b converge
2938 * to a similar magnitude.
2939 */
2940#define REDUCE_FLS(a, b) \
2941do { \
2942 if (a##_fls > b##_fls) { \
2943 a >>= 1; \
2944 a##_fls--; \
2945 } else { \
2946 b >>= 1; \
2947 b##_fls--; \
2948 } \
2949} while (0)
2950
2951 /*
2952 * Reduce accuracy until either term fits in a u64, then proceed with
2953 * the other, so that finally we can do a u64/u64 division.
2954 */
2955 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2956 REDUCE_FLS(nsec, frequency);
2957 REDUCE_FLS(sec, count);
2958 }
2959
2960 if (count_fls + sec_fls > 64) {
2961 divisor = nsec * frequency;
2962
2963 while (count_fls + sec_fls > 64) {
2964 REDUCE_FLS(count, sec);
2965 divisor >>= 1;
2966 }
2967
2968 dividend = count * sec;
2969 } else {
2970 dividend = count * sec;
2971
2972 while (nsec_fls + frequency_fls > 64) {
2973 REDUCE_FLS(nsec, frequency);
2974 dividend >>= 1;
2975 }
2976
2977 divisor = nsec * frequency;
2978 }
2979
2980 if (!divisor)
2981 return dividend;
2982
2983 return div64_u64(dividend, divisor);
2984}
2985
2986static DEFINE_PER_CPU(int, perf_throttled_count);
2987static DEFINE_PER_CPU(u64, perf_throttled_seq);
2988
2989static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2990{
2991 struct hw_perf_event *hwc = &event->hw;
2992 s64 period, sample_period;
2993 s64 delta;
2994
2995 period = perf_calculate_period(event, nsec, count);
2996
2997 delta = (s64)(period - hwc->sample_period);
2998 delta = (delta + 7) / 8; /* low pass filter */
2999
3000 sample_period = hwc->sample_period + delta;
3001
3002 if (!sample_period)
3003 sample_period = 1;
3004
3005 hwc->sample_period = sample_period;
3006
3007 if (local64_read(&hwc->period_left) > 8*sample_period) {
3008 if (disable)
3009 event->pmu->stop(event, PERF_EF_UPDATE);
3010
3011 local64_set(&hwc->period_left, 0);
3012
3013 if (disable)
3014 event->pmu->start(event, PERF_EF_RELOAD);
3015 }
3016}
3017
3018/*
3019 * combine freq adjustment with unthrottling to avoid two passes over the
3020 * events. At the same time, make sure, having freq events does not change
3021 * the rate of unthrottling as that would introduce bias.
3022 */
3023static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3024 int needs_unthr)
3025{
3026 struct perf_event *event;
3027 struct hw_perf_event *hwc;
3028 u64 now, period = TICK_NSEC;
3029 s64 delta;
3030
3031 /*
3032 * only need to iterate over all events iff:
3033 * - context have events in frequency mode (needs freq adjust)
3034 * - there are events to unthrottle on this cpu
3035 */
3036 if (!(ctx->nr_freq || needs_unthr))
3037 return;
3038
3039 raw_spin_lock(&ctx->lock);
3040 perf_pmu_disable(ctx->pmu);
3041
3042 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3043 if (event->state != PERF_EVENT_STATE_ACTIVE)
3044 continue;
3045
3046 if (!event_filter_match(event))
3047 continue;
3048
3049 perf_pmu_disable(event->pmu);
3050
3051 hwc = &event->hw;
3052
3053 if (hwc->interrupts == MAX_INTERRUPTS) {
3054 hwc->interrupts = 0;
3055 perf_log_throttle(event, 1);
3056 event->pmu->start(event, 0);
3057 }
3058
3059 if (!event->attr.freq || !event->attr.sample_freq)
3060 goto next;
3061
3062 /*
3063 * stop the event and update event->count
3064 */
3065 event->pmu->stop(event, PERF_EF_UPDATE);
3066
3067 now = local64_read(&event->count);
3068 delta = now - hwc->freq_count_stamp;
3069 hwc->freq_count_stamp = now;
3070
3071 /*
3072 * restart the event
3073 * reload only if value has changed
3074 * we have stopped the event so tell that
3075 * to perf_adjust_period() to avoid stopping it
3076 * twice.
3077 */
3078 if (delta > 0)
3079 perf_adjust_period(event, period, delta, false);
3080
3081 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
3082 next:
3083 perf_pmu_enable(event->pmu);
3084 }
3085
3086 perf_pmu_enable(ctx->pmu);
3087 raw_spin_unlock(&ctx->lock);
3088}
3089
3090/*
3091 * Round-robin a context's events:
3092 */
3093static void rotate_ctx(struct perf_event_context *ctx)
3094{
3095 /*
3096 * Rotate the first entry last of non-pinned groups. Rotation might be
3097 * disabled by the inheritance code.
3098 */
3099 if (!ctx->rotate_disable)
3100 list_rotate_left(&ctx->flexible_groups);
3101}
3102
3103static int perf_rotate_context(struct perf_cpu_context *cpuctx)
3104{
3105 struct perf_event_context *ctx = NULL;
3106 int rotate = 0;
3107
3108 if (cpuctx->ctx.nr_events) {
3109 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3110 rotate = 1;
3111 }
3112
3113 ctx = cpuctx->task_ctx;
3114 if (ctx && ctx->nr_events) {
3115 if (ctx->nr_events != ctx->nr_active)
3116 rotate = 1;
3117 }
3118
3119 if (!rotate)
3120 goto done;
3121
3122 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3123 perf_pmu_disable(cpuctx->ctx.pmu);
3124
3125 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3126 if (ctx)
3127 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
3128
3129 rotate_ctx(&cpuctx->ctx);
3130 if (ctx)
3131 rotate_ctx(ctx);
3132
3133 perf_event_sched_in(cpuctx, ctx, current);
3134
3135 perf_pmu_enable(cpuctx->ctx.pmu);
3136 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3137done:
3138
3139 return rotate;
3140}
3141
3142void perf_event_task_tick(void)
3143{
3144 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3145 struct perf_event_context *ctx, *tmp;
3146 int throttled;
3147
3148 WARN_ON(!irqs_disabled());
3149
3150 __this_cpu_inc(perf_throttled_seq);
3151 throttled = __this_cpu_xchg(perf_throttled_count, 0);
3152 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
3153
3154 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
3155 perf_adjust_freq_unthr_context(ctx, throttled);
3156}
3157
3158static int event_enable_on_exec(struct perf_event *event,
3159 struct perf_event_context *ctx)
3160{
3161 if (!event->attr.enable_on_exec)
3162 return 0;
3163
3164 event->attr.enable_on_exec = 0;
3165 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3166 return 0;
3167
3168 __perf_event_mark_enabled(event);
3169
3170 return 1;
3171}
3172
3173/*
3174 * Enable all of a task's events that have been marked enable-on-exec.
3175 * This expects task == current.
3176 */
3177static void perf_event_enable_on_exec(int ctxn)
3178{
3179 struct perf_event_context *ctx, *clone_ctx = NULL;
3180 struct perf_cpu_context *cpuctx;
3181 struct perf_event *event;
3182 unsigned long flags;
3183 int enabled = 0;
3184
3185 local_irq_save(flags);
3186 ctx = current->perf_event_ctxp[ctxn];
3187 if (!ctx || !ctx->nr_events)
3188 goto out;
3189
3190 cpuctx = __get_cpu_context(ctx);
3191 perf_ctx_lock(cpuctx, ctx);
3192 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
3193 list_for_each_entry(event, &ctx->event_list, event_entry)
3194 enabled |= event_enable_on_exec(event, ctx);
3195
3196 /*
3197 * Unclone and reschedule this context if we enabled any event.
3198 */
3199 if (enabled) {
3200 clone_ctx = unclone_ctx(ctx);
3201 ctx_resched(cpuctx, ctx);
3202 }
3203 perf_ctx_unlock(cpuctx, ctx);
3204
3205out:
3206 local_irq_restore(flags);
3207
3208 if (clone_ctx)
3209 put_ctx(clone_ctx);
3210}
3211
3212void perf_event_exec(void)
3213{
3214 int ctxn;
3215
3216 rcu_read_lock();
3217 for_each_task_context_nr(ctxn)
3218 perf_event_enable_on_exec(ctxn);
3219 rcu_read_unlock();
3220}
3221
3222struct perf_read_data {
3223 struct perf_event *event;
3224 bool group;
3225 int ret;
3226};
3227
3228/*
3229 * Cross CPU call to read the hardware event
3230 */
3231static void __perf_event_read(void *info)
3232{
3233 struct perf_read_data *data = info;
3234 struct perf_event *sub, *event = data->event;
3235 struct perf_event_context *ctx = event->ctx;
3236 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
3237 struct pmu *pmu = event->pmu;
3238
3239 /*
3240 * If this is a task context, we need to check whether it is
3241 * the current task context of this cpu. If not it has been
3242 * scheduled out before the smp call arrived. In that case
3243 * event->count would have been updated to a recent sample
3244 * when the event was scheduled out.
3245 */
3246 if (ctx->task && cpuctx->task_ctx != ctx)
3247 return;
3248
3249 raw_spin_lock(&ctx->lock);
3250 if (ctx->is_active) {
3251 update_context_time(ctx);
3252 update_cgrp_time_from_event(event);
3253 }
3254
3255 update_event_times(event);
3256 if (event->state != PERF_EVENT_STATE_ACTIVE)
3257 goto unlock;
3258
3259 if (!data->group) {
3260 pmu->read(event);
3261 data->ret = 0;
3262 goto unlock;
3263 }
3264
3265 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3266
3267 pmu->read(event);
3268
3269 list_for_each_entry(sub, &event->sibling_list, group_entry) {
3270 update_event_times(sub);
3271 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3272 /*
3273 * Use sibling's PMU rather than @event's since
3274 * sibling could be on different (eg: software) PMU.
3275 */
3276 sub->pmu->read(sub);
3277 }
3278 }
3279
3280 data->ret = pmu->commit_txn(pmu);
3281
3282unlock:
3283 raw_spin_unlock(&ctx->lock);
3284}
3285
3286static inline u64 perf_event_count(struct perf_event *event)
3287{
3288 if (event->pmu->count)
3289 return event->pmu->count(event);
3290
3291 return __perf_event_count(event);
3292}
3293
3294/*
3295 * NMI-safe method to read a local event, that is an event that
3296 * is:
3297 * - either for the current task, or for this CPU
3298 * - does not have inherit set, for inherited task events
3299 * will not be local and we cannot read them atomically
3300 * - must not have a pmu::count method
3301 */
3302u64 perf_event_read_local(struct perf_event *event)
3303{
3304 unsigned long flags;
3305 u64 val;
3306
3307 /*
3308 * Disabling interrupts avoids all counter scheduling (context
3309 * switches, timer based rotation and IPIs).
3310 */
3311 local_irq_save(flags);
3312
3313 /* If this is a per-task event, it must be for current */
3314 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
3315 event->hw.target != current);
3316
3317 /* If this is a per-CPU event, it must be for this CPU */
3318 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
3319 event->cpu != smp_processor_id());
3320
3321 /*
3322 * It must not be an event with inherit set, we cannot read
3323 * all child counters from atomic context.
3324 */
3325 WARN_ON_ONCE(event->attr.inherit);
3326
3327 /*
3328 * It must not have a pmu::count method, those are not
3329 * NMI safe.
3330 */
3331 WARN_ON_ONCE(event->pmu->count);
3332
3333 /*
3334 * If the event is currently on this CPU, its either a per-task event,
3335 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3336 * oncpu == -1).
3337 */
3338 if (event->oncpu == smp_processor_id())
3339 event->pmu->read(event);
3340
3341 val = local64_read(&event->count);
3342 local_irq_restore(flags);
3343
3344 return val;
3345}
3346
3347static int perf_event_read(struct perf_event *event, bool group)
3348{
3349 int ret = 0;
3350
3351 /*
3352 * If event is enabled and currently active on a CPU, update the
3353 * value in the event structure:
3354 */
3355 if (event->state == PERF_EVENT_STATE_ACTIVE) {
3356 struct perf_read_data data = {
3357 .event = event,
3358 .group = group,
3359 .ret = 0,
3360 };
3361 smp_call_function_single(event->oncpu,
3362 __perf_event_read, &data, 1);
3363 ret = data.ret;
3364 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3365 struct perf_event_context *ctx = event->ctx;
3366 unsigned long flags;
3367
3368 raw_spin_lock_irqsave(&ctx->lock, flags);
3369 /*
3370 * may read while context is not active
3371 * (e.g., thread is blocked), in that case
3372 * we cannot update context time
3373 */
3374 if (ctx->is_active) {
3375 update_context_time(ctx);
3376 update_cgrp_time_from_event(event);
3377 }
3378 if (group)
3379 update_group_times(event);
3380 else
3381 update_event_times(event);
3382 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3383 }
3384
3385 return ret;
3386}
3387
3388/*
3389 * Initialize the perf_event context in a task_struct:
3390 */
3391static void __perf_event_init_context(struct perf_event_context *ctx)
3392{
3393 raw_spin_lock_init(&ctx->lock);
3394 mutex_init(&ctx->mutex);
3395 INIT_LIST_HEAD(&ctx->active_ctx_list);
3396 INIT_LIST_HEAD(&ctx->pinned_groups);
3397 INIT_LIST_HEAD(&ctx->flexible_groups);
3398 INIT_LIST_HEAD(&ctx->event_list);
3399 atomic_set(&ctx->refcount, 1);
3400}
3401
3402static struct perf_event_context *
3403alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3404{
3405 struct perf_event_context *ctx;
3406
3407 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3408 if (!ctx)
3409 return NULL;
3410
3411 __perf_event_init_context(ctx);
3412 if (task) {
3413 ctx->task = task;
3414 get_task_struct(task);
3415 }
3416 ctx->pmu = pmu;
3417
3418 return ctx;
3419}
3420
3421static struct task_struct *
3422find_lively_task_by_vpid(pid_t vpid)
3423{
3424 struct task_struct *task;
3425
3426 rcu_read_lock();
3427 if (!vpid)
3428 task = current;
3429 else
3430 task = find_task_by_vpid(vpid);
3431 if (task)
3432 get_task_struct(task);
3433 rcu_read_unlock();
3434
3435 if (!task)
3436 return ERR_PTR(-ESRCH);
3437
3438 return task;
3439}
3440
3441/*
3442 * Returns a matching context with refcount and pincount.
3443 */
3444static struct perf_event_context *
3445find_get_context(struct pmu *pmu, struct task_struct *task,
3446 struct perf_event *event)
3447{
3448 struct perf_event_context *ctx, *clone_ctx = NULL;
3449 struct perf_cpu_context *cpuctx;
3450 void *task_ctx_data = NULL;
3451 unsigned long flags;
3452 int ctxn, err;
3453 int cpu = event->cpu;
3454
3455 if (!task) {
3456 /* Must be root to operate on a CPU event: */
3457 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3458 return ERR_PTR(-EACCES);
3459
3460 /*
3461 * We could be clever and allow to attach a event to an
3462 * offline CPU and activate it when the CPU comes up, but
3463 * that's for later.
3464 */
3465 if (!cpu_online(cpu))
3466 return ERR_PTR(-ENODEV);
3467
3468 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
3469 ctx = &cpuctx->ctx;
3470 get_ctx(ctx);
3471 ++ctx->pin_count;
3472
3473 return ctx;
3474 }
3475
3476 err = -EINVAL;
3477 ctxn = pmu->task_ctx_nr;
3478 if (ctxn < 0)
3479 goto errout;
3480
3481 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3482 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3483 if (!task_ctx_data) {
3484 err = -ENOMEM;
3485 goto errout;
3486 }
3487 }
3488
3489retry:
3490 ctx = perf_lock_task_context(task, ctxn, &flags);
3491 if (ctx) {
3492 clone_ctx = unclone_ctx(ctx);
3493 ++ctx->pin_count;
3494
3495 if (task_ctx_data && !ctx->task_ctx_data) {
3496 ctx->task_ctx_data = task_ctx_data;
3497 task_ctx_data = NULL;
3498 }
3499 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3500
3501 if (clone_ctx)
3502 put_ctx(clone_ctx);
3503 } else {
3504 ctx = alloc_perf_context(pmu, task);
3505 err = -ENOMEM;
3506 if (!ctx)
3507 goto errout;
3508
3509 if (task_ctx_data) {
3510 ctx->task_ctx_data = task_ctx_data;
3511 task_ctx_data = NULL;
3512 }
3513
3514 err = 0;
3515 mutex_lock(&task->perf_event_mutex);
3516 /*
3517 * If it has already passed perf_event_exit_task().
3518 * we must see PF_EXITING, it takes this mutex too.
3519 */
3520 if (task->flags & PF_EXITING)
3521 err = -ESRCH;
3522 else if (task->perf_event_ctxp[ctxn])
3523 err = -EAGAIN;
3524 else {
3525 get_ctx(ctx);
3526 ++ctx->pin_count;
3527 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3528 }
3529 mutex_unlock(&task->perf_event_mutex);
3530
3531 if (unlikely(err)) {
3532 put_ctx(ctx);
3533
3534 if (err == -EAGAIN)
3535 goto retry;
3536 goto errout;
3537 }
3538 }
3539
3540 kfree(task_ctx_data);
3541 return ctx;
3542
3543errout:
3544 kfree(task_ctx_data);
3545 return ERR_PTR(err);
3546}
3547
3548static void perf_event_free_filter(struct perf_event *event);
3549static void perf_event_free_bpf_prog(struct perf_event *event);
3550
3551static void free_event_rcu(struct rcu_head *head)
3552{
3553 struct perf_event *event;
3554
3555 event = container_of(head, struct perf_event, rcu_head);
3556 if (event->ns)
3557 put_pid_ns(event->ns);
3558 perf_event_free_filter(event);
3559 kfree(event);
3560}
3561
3562static void ring_buffer_attach(struct perf_event *event,
3563 struct ring_buffer *rb);
3564
3565static void unaccount_event_cpu(struct perf_event *event, int cpu)
3566{
3567 if (event->parent)
3568 return;
3569
3570 if (is_cgroup_event(event))
3571 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3572}
3573
3574#ifdef CONFIG_NO_HZ_FULL
3575static DEFINE_SPINLOCK(nr_freq_lock);
3576#endif
3577
3578static void unaccount_freq_event_nohz(void)
3579{
3580#ifdef CONFIG_NO_HZ_FULL
3581 spin_lock(&nr_freq_lock);
3582 if (atomic_dec_and_test(&nr_freq_events))
3583 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
3584 spin_unlock(&nr_freq_lock);
3585#endif
3586}
3587
3588static void unaccount_freq_event(void)
3589{
3590 if (tick_nohz_full_enabled())
3591 unaccount_freq_event_nohz();
3592 else
3593 atomic_dec(&nr_freq_events);
3594}
3595
3596static void unaccount_event(struct perf_event *event)
3597{
3598 bool dec = false;
3599
3600 if (event->parent)
3601 return;
3602
3603 if (event->attach_state & PERF_ATTACH_TASK)
3604 dec = true;
3605 if (event->attr.mmap || event->attr.mmap_data)
3606 atomic_dec(&nr_mmap_events);
3607 if (event->attr.comm)
3608 atomic_dec(&nr_comm_events);
3609 if (event->attr.task)
3610 atomic_dec(&nr_task_events);
3611 if (event->attr.freq)
3612 unaccount_freq_event();
3613 if (event->attr.context_switch) {
3614 dec = true;
3615 atomic_dec(&nr_switch_events);
3616 }
3617 if (is_cgroup_event(event))
3618 dec = true;
3619 if (has_branch_stack(event))
3620 dec = true;
3621
3622 if (dec) {
3623 if (!atomic_add_unless(&perf_sched_count, -1, 1))
3624 schedule_delayed_work(&perf_sched_work, HZ);
3625 }
3626
3627 unaccount_event_cpu(event, event->cpu);
3628}
3629
3630static void perf_sched_delayed(struct work_struct *work)
3631{
3632 mutex_lock(&perf_sched_mutex);
3633 if (atomic_dec_and_test(&perf_sched_count))
3634 static_branch_disable(&perf_sched_events);
3635 mutex_unlock(&perf_sched_mutex);
3636}
3637
3638/*
3639 * The following implement mutual exclusion of events on "exclusive" pmus
3640 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3641 * at a time, so we disallow creating events that might conflict, namely:
3642 *
3643 * 1) cpu-wide events in the presence of per-task events,
3644 * 2) per-task events in the presence of cpu-wide events,
3645 * 3) two matching events on the same context.
3646 *
3647 * The former two cases are handled in the allocation path (perf_event_alloc(),
3648 * _free_event()), the latter -- before the first perf_install_in_context().
3649 */
3650static int exclusive_event_init(struct perf_event *event)
3651{
3652 struct pmu *pmu = event->pmu;
3653
3654 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3655 return 0;
3656
3657 /*
3658 * Prevent co-existence of per-task and cpu-wide events on the
3659 * same exclusive pmu.
3660 *
3661 * Negative pmu::exclusive_cnt means there are cpu-wide
3662 * events on this "exclusive" pmu, positive means there are
3663 * per-task events.
3664 *
3665 * Since this is called in perf_event_alloc() path, event::ctx
3666 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3667 * to mean "per-task event", because unlike other attach states it
3668 * never gets cleared.
3669 */
3670 if (event->attach_state & PERF_ATTACH_TASK) {
3671 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3672 return -EBUSY;
3673 } else {
3674 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
3675 return -EBUSY;
3676 }
3677
3678 return 0;
3679}
3680
3681static void exclusive_event_destroy(struct perf_event *event)
3682{
3683 struct pmu *pmu = event->pmu;
3684
3685 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3686 return;
3687
3688 /* see comment in exclusive_event_init() */
3689 if (event->attach_state & PERF_ATTACH_TASK)
3690 atomic_dec(&pmu->exclusive_cnt);
3691 else
3692 atomic_inc(&pmu->exclusive_cnt);
3693}
3694
3695static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3696{
3697 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
3698 (e1->cpu == e2->cpu ||
3699 e1->cpu == -1 ||
3700 e2->cpu == -1))
3701 return true;
3702 return false;
3703}
3704
3705/* Called under the same ctx::mutex as perf_install_in_context() */
3706static bool exclusive_event_installable(struct perf_event *event,
3707 struct perf_event_context *ctx)
3708{
3709 struct perf_event *iter_event;
3710 struct pmu *pmu = event->pmu;
3711
3712 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3713 return true;
3714
3715 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
3716 if (exclusive_event_match(iter_event, event))
3717 return false;
3718 }
3719
3720 return true;
3721}
3722
3723static void _free_event(struct perf_event *event)
3724{
3725 irq_work_sync(&event->pending);
3726
3727 unaccount_event(event);
3728
3729 if (event->rb) {
3730 /*
3731 * Can happen when we close an event with re-directed output.
3732 *
3733 * Since we have a 0 refcount, perf_mmap_close() will skip
3734 * over us; possibly making our ring_buffer_put() the last.
3735 */
3736 mutex_lock(&event->mmap_mutex);
3737 ring_buffer_attach(event, NULL);
3738 mutex_unlock(&event->mmap_mutex);
3739 }
3740
3741 if (is_cgroup_event(event))
3742 perf_detach_cgroup(event);
3743
3744 if (!event->parent) {
3745 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3746 put_callchain_buffers();
3747 }
3748
3749 perf_event_free_bpf_prog(event);
3750
3751 if (event->destroy)
3752 event->destroy(event);
3753
3754 if (event->ctx)
3755 put_ctx(event->ctx);
3756
3757 if (event->pmu) {
3758 exclusive_event_destroy(event);
3759 module_put(event->pmu->module);
3760 }
3761
3762 call_rcu(&event->rcu_head, free_event_rcu);
3763}
3764
3765/*
3766 * Used to free events which have a known refcount of 1, such as in error paths
3767 * where the event isn't exposed yet and inherited events.
3768 */
3769static void free_event(struct perf_event *event)
3770{
3771 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
3772 "unexpected event refcount: %ld; ptr=%p\n",
3773 atomic_long_read(&event->refcount), event)) {
3774 /* leak to avoid use-after-free */
3775 return;
3776 }
3777
3778 _free_event(event);
3779}
3780
3781/*
3782 * Remove user event from the owner task.
3783 */
3784static void perf_remove_from_owner(struct perf_event *event)
3785{
3786 struct task_struct *owner;
3787
3788 rcu_read_lock();
3789 /*
3790 * Matches the smp_store_release() in perf_event_exit_task(). If we
3791 * observe !owner it means the list deletion is complete and we can
3792 * indeed free this event, otherwise we need to serialize on
3793 * owner->perf_event_mutex.
3794 */
3795 owner = lockless_dereference(event->owner);
3796 if (owner) {
3797 /*
3798 * Since delayed_put_task_struct() also drops the last
3799 * task reference we can safely take a new reference
3800 * while holding the rcu_read_lock().
3801 */
3802 get_task_struct(owner);
3803 }
3804 rcu_read_unlock();
3805
3806 if (owner) {
3807 /*
3808 * If we're here through perf_event_exit_task() we're already
3809 * holding ctx->mutex which would be an inversion wrt. the
3810 * normal lock order.
3811 *
3812 * However we can safely take this lock because its the child
3813 * ctx->mutex.
3814 */
3815 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
3816
3817 /*
3818 * We have to re-check the event->owner field, if it is cleared
3819 * we raced with perf_event_exit_task(), acquiring the mutex
3820 * ensured they're done, and we can proceed with freeing the
3821 * event.
3822 */
3823 if (event->owner) {
3824 list_del_init(&event->owner_entry);
3825 smp_store_release(&event->owner, NULL);
3826 }
3827 mutex_unlock(&owner->perf_event_mutex);
3828 put_task_struct(owner);
3829 }
3830}
3831
3832static void put_event(struct perf_event *event)
3833{
3834 if (!atomic_long_dec_and_test(&event->refcount))
3835 return;
3836
3837 _free_event(event);
3838}
3839
3840/*
3841 * Kill an event dead; while event:refcount will preserve the event
3842 * object, it will not preserve its functionality. Once the last 'user'
3843 * gives up the object, we'll destroy the thing.
3844 */
3845int perf_event_release_kernel(struct perf_event *event)
3846{
3847 struct perf_event_context *ctx = event->ctx;
3848 struct perf_event *child, *tmp;
3849
3850 /*
3851 * If we got here through err_file: fput(event_file); we will not have
3852 * attached to a context yet.
3853 */
3854 if (!ctx) {
3855 WARN_ON_ONCE(event->attach_state &
3856 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
3857 goto no_ctx;
3858 }
3859
3860 if (!is_kernel_event(event))
3861 perf_remove_from_owner(event);
3862
3863 ctx = perf_event_ctx_lock(event);
3864 WARN_ON_ONCE(ctx->parent_ctx);
3865 perf_remove_from_context(event, DETACH_GROUP);
3866
3867 raw_spin_lock_irq(&ctx->lock);
3868 /*
3869 * Mark this even as STATE_DEAD, there is no external reference to it
3870 * anymore.
3871 *
3872 * Anybody acquiring event->child_mutex after the below loop _must_
3873 * also see this, most importantly inherit_event() which will avoid
3874 * placing more children on the list.
3875 *
3876 * Thus this guarantees that we will in fact observe and kill _ALL_
3877 * child events.
3878 */
3879 event->state = PERF_EVENT_STATE_DEAD;
3880 raw_spin_unlock_irq(&ctx->lock);
3881
3882 perf_event_ctx_unlock(event, ctx);
3883
3884again:
3885 mutex_lock(&event->child_mutex);
3886 list_for_each_entry(child, &event->child_list, child_list) {
3887
3888 /*
3889 * Cannot change, child events are not migrated, see the
3890 * comment with perf_event_ctx_lock_nested().
3891 */
3892 ctx = lockless_dereference(child->ctx);
3893 /*
3894 * Since child_mutex nests inside ctx::mutex, we must jump
3895 * through hoops. We start by grabbing a reference on the ctx.
3896 *
3897 * Since the event cannot get freed while we hold the
3898 * child_mutex, the context must also exist and have a !0
3899 * reference count.
3900 */
3901 get_ctx(ctx);
3902
3903 /*
3904 * Now that we have a ctx ref, we can drop child_mutex, and
3905 * acquire ctx::mutex without fear of it going away. Then we
3906 * can re-acquire child_mutex.
3907 */
3908 mutex_unlock(&event->child_mutex);
3909 mutex_lock(&ctx->mutex);
3910 mutex_lock(&event->child_mutex);
3911
3912 /*
3913 * Now that we hold ctx::mutex and child_mutex, revalidate our
3914 * state, if child is still the first entry, it didn't get freed
3915 * and we can continue doing so.
3916 */
3917 tmp = list_first_entry_or_null(&event->child_list,
3918 struct perf_event, child_list);
3919 if (tmp == child) {
3920 perf_remove_from_context(child, DETACH_GROUP);
3921 list_del(&child->child_list);
3922 free_event(child);
3923 /*
3924 * This matches the refcount bump in inherit_event();
3925 * this can't be the last reference.
3926 */
3927 put_event(event);
3928 }
3929
3930 mutex_unlock(&event->child_mutex);
3931 mutex_unlock(&ctx->mutex);
3932 put_ctx(ctx);
3933 goto again;
3934 }
3935 mutex_unlock(&event->child_mutex);
3936
3937no_ctx:
3938 put_event(event); /* Must be the 'last' reference */
3939 return 0;
3940}
3941EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3942
3943/*
3944 * Called when the last reference to the file is gone.
3945 */
3946static int perf_release(struct inode *inode, struct file *file)
3947{
3948 perf_event_release_kernel(file->private_data);
3949 return 0;
3950}
3951
3952u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3953{
3954 struct perf_event *child;
3955 u64 total = 0;
3956
3957 *enabled = 0;
3958 *running = 0;
3959
3960 mutex_lock(&event->child_mutex);
3961
3962 (void)perf_event_read(event, false);
3963 total += perf_event_count(event);
3964
3965 *enabled += event->total_time_enabled +
3966 atomic64_read(&event->child_total_time_enabled);
3967 *running += event->total_time_running +
3968 atomic64_read(&event->child_total_time_running);
3969
3970 list_for_each_entry(child, &event->child_list, child_list) {
3971 (void)perf_event_read(child, false);
3972 total += perf_event_count(child);
3973 *enabled += child->total_time_enabled;
3974 *running += child->total_time_running;
3975 }
3976 mutex_unlock(&event->child_mutex);
3977
3978 return total;
3979}
3980EXPORT_SYMBOL_GPL(perf_event_read_value);
3981
3982static int __perf_read_group_add(struct perf_event *leader,
3983 u64 read_format, u64 *values)
3984{
3985 struct perf_event *sub;
3986 int n = 1; /* skip @nr */
3987 int ret;
3988
3989 ret = perf_event_read(leader, true);
3990 if (ret)
3991 return ret;
3992
3993 /*
3994 * Since we co-schedule groups, {enabled,running} times of siblings
3995 * will be identical to those of the leader, so we only publish one
3996 * set.
3997 */
3998 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3999 values[n++] += leader->total_time_enabled +
4000 atomic64_read(&leader->child_total_time_enabled);
4001 }
4002
4003 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4004 values[n++] += leader->total_time_running +
4005 atomic64_read(&leader->child_total_time_running);
4006 }
4007
4008 /*
4009 * Write {count,id} tuples for every sibling.
4010 */
4011 values[n++] += perf_event_count(leader);
4012 if (read_format & PERF_FORMAT_ID)
4013 values[n++] = primary_event_id(leader);
4014
4015 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4016 values[n++] += perf_event_count(sub);
4017 if (read_format & PERF_FORMAT_ID)
4018 values[n++] = primary_event_id(sub);
4019 }
4020
4021 return 0;
4022}
4023
4024static int perf_read_group(struct perf_event *event,
4025 u64 read_format, char __user *buf)
4026{
4027 struct perf_event *leader = event->group_leader, *child;
4028 struct perf_event_context *ctx = leader->ctx;
4029 int ret;
4030 u64 *values;
4031
4032 lockdep_assert_held(&ctx->mutex);
4033
4034 values = kzalloc(event->read_size, GFP_KERNEL);
4035 if (!values)
4036 return -ENOMEM;
4037
4038 values[0] = 1 + leader->nr_siblings;
4039
4040 /*
4041 * By locking the child_mutex of the leader we effectively
4042 * lock the child list of all siblings.. XXX explain how.
4043 */
4044 mutex_lock(&leader->child_mutex);
4045
4046 ret = __perf_read_group_add(leader, read_format, values);
4047 if (ret)
4048 goto unlock;
4049
4050 list_for_each_entry(child, &leader->child_list, child_list) {
4051 ret = __perf_read_group_add(child, read_format, values);
4052 if (ret)
4053 goto unlock;
4054 }
4055
4056 mutex_unlock(&leader->child_mutex);
4057
4058 ret = event->read_size;
4059 if (copy_to_user(buf, values, event->read_size))
4060 ret = -EFAULT;
4061 goto out;
4062
4063unlock:
4064 mutex_unlock(&leader->child_mutex);
4065out:
4066 kfree(values);
4067 return ret;
4068}
4069
4070static int perf_read_one(struct perf_event *event,
4071 u64 read_format, char __user *buf)
4072{
4073 u64 enabled, running;
4074 u64 values[4];
4075 int n = 0;
4076
4077 values[n++] = perf_event_read_value(event, &enabled, &running);
4078 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4079 values[n++] = enabled;
4080 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4081 values[n++] = running;
4082 if (read_format & PERF_FORMAT_ID)
4083 values[n++] = primary_event_id(event);
4084
4085 if (copy_to_user(buf, values, n * sizeof(u64)))
4086 return -EFAULT;
4087
4088 return n * sizeof(u64);
4089}
4090
4091static bool is_event_hup(struct perf_event *event)
4092{
4093 bool no_children;
4094
4095 if (event->state > PERF_EVENT_STATE_EXIT)
4096 return false;
4097
4098 mutex_lock(&event->child_mutex);
4099 no_children = list_empty(&event->child_list);
4100 mutex_unlock(&event->child_mutex);
4101 return no_children;
4102}
4103
4104/*
4105 * Read the performance event - simple non blocking version for now
4106 */
4107static ssize_t
4108__perf_read(struct perf_event *event, char __user *buf, size_t count)
4109{
4110 u64 read_format = event->attr.read_format;
4111 int ret;
4112
4113 /*
4114 * Return end-of-file for a read on a event that is in
4115 * error state (i.e. because it was pinned but it couldn't be
4116 * scheduled on to the CPU at some point).
4117 */
4118 if (event->state == PERF_EVENT_STATE_ERROR)
4119 return 0;
4120
4121 if (count < event->read_size)
4122 return -ENOSPC;
4123
4124 WARN_ON_ONCE(event->ctx->parent_ctx);
4125 if (read_format & PERF_FORMAT_GROUP)
4126 ret = perf_read_group(event, read_format, buf);
4127 else
4128 ret = perf_read_one(event, read_format, buf);
4129
4130 return ret;
4131}
4132
4133static ssize_t
4134perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4135{
4136 struct perf_event *event = file->private_data;
4137 struct perf_event_context *ctx;
4138 int ret;
4139
4140 ctx = perf_event_ctx_lock(event);
4141 ret = __perf_read(event, buf, count);
4142 perf_event_ctx_unlock(event, ctx);
4143
4144 return ret;
4145}
4146
4147static unsigned int perf_poll(struct file *file, poll_table *wait)
4148{
4149 struct perf_event *event = file->private_data;
4150 struct ring_buffer *rb;
4151 unsigned int events = POLLHUP;
4152
4153 poll_wait(file, &event->waitq, wait);
4154
4155 if (is_event_hup(event))
4156 return events;
4157
4158 /*
4159 * Pin the event->rb by taking event->mmap_mutex; otherwise
4160 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
4161 */
4162 mutex_lock(&event->mmap_mutex);
4163 rb = event->rb;
4164 if (rb)
4165 events = atomic_xchg(&rb->poll, 0);
4166 mutex_unlock(&event->mmap_mutex);
4167 return events;
4168}
4169
4170static void _perf_event_reset(struct perf_event *event)
4171{
4172 (void)perf_event_read(event, false);
4173 local64_set(&event->count, 0);
4174 perf_event_update_userpage(event);
4175}
4176
4177/*
4178 * Holding the top-level event's child_mutex means that any
4179 * descendant process that has inherited this event will block
4180 * in perf_event_exit_event() if it goes to exit, thus satisfying the
4181 * task existence requirements of perf_event_enable/disable.
4182 */
4183static void perf_event_for_each_child(struct perf_event *event,
4184 void (*func)(struct perf_event *))
4185{
4186 struct perf_event *child;
4187
4188 WARN_ON_ONCE(event->ctx->parent_ctx);
4189
4190 mutex_lock(&event->child_mutex);
4191 func(event);
4192 list_for_each_entry(child, &event->child_list, child_list)
4193 func(child);
4194 mutex_unlock(&event->child_mutex);
4195}
4196
4197static void perf_event_for_each(struct perf_event *event,
4198 void (*func)(struct perf_event *))
4199{
4200 struct perf_event_context *ctx = event->ctx;
4201 struct perf_event *sibling;
4202
4203 lockdep_assert_held(&ctx->mutex);
4204
4205 event = event->group_leader;
4206
4207 perf_event_for_each_child(event, func);
4208 list_for_each_entry(sibling, &event->sibling_list, group_entry)
4209 perf_event_for_each_child(sibling, func);
4210}
4211
4212static void __perf_event_period(struct perf_event *event,
4213 struct perf_cpu_context *cpuctx,
4214 struct perf_event_context *ctx,
4215 void *info)
4216{
4217 u64 value = *((u64 *)info);
4218 bool active;
4219
4220 if (event->attr.freq) {
4221 event->attr.sample_freq = value;
4222 } else {
4223 event->attr.sample_period = value;
4224 event->hw.sample_period = value;
4225 }
4226
4227 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4228 if (active) {
4229 perf_pmu_disable(ctx->pmu);
4230 /*
4231 * We could be throttled; unthrottle now to avoid the tick
4232 * trying to unthrottle while we already re-started the event.
4233 */
4234 if (event->hw.interrupts == MAX_INTERRUPTS) {
4235 event->hw.interrupts = 0;
4236 perf_log_throttle(event, 1);
4237 }
4238 event->pmu->stop(event, PERF_EF_UPDATE);
4239 }
4240
4241 local64_set(&event->hw.period_left, 0);
4242
4243 if (active) {
4244 event->pmu->start(event, PERF_EF_RELOAD);
4245 perf_pmu_enable(ctx->pmu);
4246 }
4247}
4248
4249static int perf_event_period(struct perf_event *event, u64 __user *arg)
4250{
4251 u64 value;
4252
4253 if (!is_sampling_event(event))
4254 return -EINVAL;
4255
4256 if (copy_from_user(&value, arg, sizeof(value)))
4257 return -EFAULT;
4258
4259 if (!value)
4260 return -EINVAL;
4261
4262 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4263 return -EINVAL;
4264
4265 event_function_call(event, __perf_event_period, &value);
4266
4267 return 0;
4268}
4269
4270static const struct file_operations perf_fops;
4271
4272static inline int perf_fget_light(int fd, struct fd *p)
4273{
4274 struct fd f = fdget(fd);
4275 if (!f.file)
4276 return -EBADF;
4277
4278 if (f.file->f_op != &perf_fops) {
4279 fdput(f);
4280 return -EBADF;
4281 }
4282 *p = f;
4283 return 0;
4284}
4285
4286static int perf_event_set_output(struct perf_event *event,
4287 struct perf_event *output_event);
4288static int perf_event_set_filter(struct perf_event *event, void __user *arg);
4289static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
4290
4291static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
4292{
4293 void (*func)(struct perf_event *);
4294 u32 flags = arg;
4295
4296 switch (cmd) {
4297 case PERF_EVENT_IOC_ENABLE:
4298 func = _perf_event_enable;
4299 break;
4300 case PERF_EVENT_IOC_DISABLE:
4301 func = _perf_event_disable;
4302 break;
4303 case PERF_EVENT_IOC_RESET:
4304 func = _perf_event_reset;
4305 break;
4306
4307 case PERF_EVENT_IOC_REFRESH:
4308 return _perf_event_refresh(event, arg);
4309
4310 case PERF_EVENT_IOC_PERIOD:
4311 return perf_event_period(event, (u64 __user *)arg);
4312
4313 case PERF_EVENT_IOC_ID:
4314 {
4315 u64 id = primary_event_id(event);
4316
4317 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4318 return -EFAULT;
4319 return 0;
4320 }
4321
4322 case PERF_EVENT_IOC_SET_OUTPUT:
4323 {
4324 int ret;
4325 if (arg != -1) {
4326 struct perf_event *output_event;
4327 struct fd output;
4328 ret = perf_fget_light(arg, &output);
4329 if (ret)
4330 return ret;
4331 output_event = output.file->private_data;
4332 ret = perf_event_set_output(event, output_event);
4333 fdput(output);
4334 } else {
4335 ret = perf_event_set_output(event, NULL);
4336 }
4337 return ret;
4338 }
4339
4340 case PERF_EVENT_IOC_SET_FILTER:
4341 return perf_event_set_filter(event, (void __user *)arg);
4342
4343 case PERF_EVENT_IOC_SET_BPF:
4344 return perf_event_set_bpf_prog(event, arg);
4345
4346 default:
4347 return -ENOTTY;
4348 }
4349
4350 if (flags & PERF_IOC_FLAG_GROUP)
4351 perf_event_for_each(event, func);
4352 else
4353 perf_event_for_each_child(event, func);
4354
4355 return 0;
4356}
4357
4358static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4359{
4360 struct perf_event *event = file->private_data;
4361 struct perf_event_context *ctx;
4362 long ret;
4363
4364 ctx = perf_event_ctx_lock(event);
4365 ret = _perf_ioctl(event, cmd, arg);
4366 perf_event_ctx_unlock(event, ctx);
4367
4368 return ret;
4369}
4370
4371#ifdef CONFIG_COMPAT
4372static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4373 unsigned long arg)
4374{
4375 switch (_IOC_NR(cmd)) {
4376 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4377 case _IOC_NR(PERF_EVENT_IOC_ID):
4378 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4379 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4380 cmd &= ~IOCSIZE_MASK;
4381 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4382 }
4383 break;
4384 }
4385 return perf_ioctl(file, cmd, arg);
4386}
4387#else
4388# define perf_compat_ioctl NULL
4389#endif
4390
4391int perf_event_task_enable(void)
4392{
4393 struct perf_event_context *ctx;
4394 struct perf_event *event;
4395
4396 mutex_lock(¤t->perf_event_mutex);
4397 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
4398 ctx = perf_event_ctx_lock(event);
4399 perf_event_for_each_child(event, _perf_event_enable);
4400 perf_event_ctx_unlock(event, ctx);
4401 }
4402 mutex_unlock(¤t->perf_event_mutex);
4403
4404 return 0;
4405}
4406
4407int perf_event_task_disable(void)
4408{
4409 struct perf_event_context *ctx;
4410 struct perf_event *event;
4411
4412 mutex_lock(¤t->perf_event_mutex);
4413 list_for_each_entry(event, ¤t->perf_event_list, owner_entry) {
4414 ctx = perf_event_ctx_lock(event);
4415 perf_event_for_each_child(event, _perf_event_disable);
4416 perf_event_ctx_unlock(event, ctx);
4417 }
4418 mutex_unlock(¤t->perf_event_mutex);
4419
4420 return 0;
4421}
4422
4423static int perf_event_index(struct perf_event *event)
4424{
4425 if (event->hw.state & PERF_HES_STOPPED)
4426 return 0;
4427
4428 if (event->state != PERF_EVENT_STATE_ACTIVE)
4429 return 0;
4430
4431 return event->pmu->event_idx(event);
4432}
4433
4434static void calc_timer_values(struct perf_event *event,
4435 u64 *now,
4436 u64 *enabled,
4437 u64 *running)
4438{
4439 u64 ctx_time;
4440
4441 *now = perf_clock();
4442 ctx_time = event->shadow_ctx_time + *now;
4443 *enabled = ctx_time - event->tstamp_enabled;
4444 *running = ctx_time - event->tstamp_running;
4445}
4446
4447static void perf_event_init_userpage(struct perf_event *event)
4448{
4449 struct perf_event_mmap_page *userpg;
4450 struct ring_buffer *rb;
4451
4452 rcu_read_lock();
4453 rb = rcu_dereference(event->rb);
4454 if (!rb)
4455 goto unlock;
4456
4457 userpg = rb->user_page;
4458
4459 /* Allow new userspace to detect that bit 0 is deprecated */
4460 userpg->cap_bit0_is_deprecated = 1;
4461 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
4462 userpg->data_offset = PAGE_SIZE;
4463 userpg->data_size = perf_data_size(rb);
4464
4465unlock:
4466 rcu_read_unlock();
4467}
4468
4469void __weak arch_perf_update_userpage(
4470 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
4471{
4472}
4473
4474/*
4475 * Callers need to ensure there can be no nesting of this function, otherwise
4476 * the seqlock logic goes bad. We can not serialize this because the arch
4477 * code calls this from NMI context.
4478 */
4479void perf_event_update_userpage(struct perf_event *event)
4480{
4481 struct perf_event_mmap_page *userpg;
4482 struct ring_buffer *rb;
4483 u64 enabled, running, now;
4484
4485 rcu_read_lock();
4486 rb = rcu_dereference(event->rb);
4487 if (!rb)
4488 goto unlock;
4489
4490 /*
4491 * compute total_time_enabled, total_time_running
4492 * based on snapshot values taken when the event
4493 * was last scheduled in.
4494 *
4495 * we cannot simply called update_context_time()
4496 * because of locking issue as we can be called in
4497 * NMI context
4498 */
4499 calc_timer_values(event, &now, &enabled, &running);
4500
4501 userpg = rb->user_page;
4502 /*
4503 * Disable preemption so as to not let the corresponding user-space
4504 * spin too long if we get preempted.
4505 */
4506 preempt_disable();
4507 ++userpg->lock;
4508 barrier();
4509 userpg->index = perf_event_index(event);
4510 userpg->offset = perf_event_count(event);
4511 if (userpg->index)
4512 userpg->offset -= local64_read(&event->hw.prev_count);
4513
4514 userpg->time_enabled = enabled +
4515 atomic64_read(&event->child_total_time_enabled);
4516
4517 userpg->time_running = running +
4518 atomic64_read(&event->child_total_time_running);
4519
4520 arch_perf_update_userpage(event, userpg, now);
4521
4522 barrier();
4523 ++userpg->lock;
4524 preempt_enable();
4525unlock:
4526 rcu_read_unlock();
4527}
4528
4529static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4530{
4531 struct perf_event *event = vma->vm_file->private_data;
4532 struct ring_buffer *rb;
4533 int ret = VM_FAULT_SIGBUS;
4534
4535 if (vmf->flags & FAULT_FLAG_MKWRITE) {
4536 if (vmf->pgoff == 0)
4537 ret = 0;
4538 return ret;
4539 }
4540
4541 rcu_read_lock();
4542 rb = rcu_dereference(event->rb);
4543 if (!rb)
4544 goto unlock;
4545
4546 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4547 goto unlock;
4548
4549 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
4550 if (!vmf->page)
4551 goto unlock;
4552
4553 get_page(vmf->page);
4554 vmf->page->mapping = vma->vm_file->f_mapping;
4555 vmf->page->index = vmf->pgoff;
4556
4557 ret = 0;
4558unlock:
4559 rcu_read_unlock();
4560
4561 return ret;
4562}
4563
4564static void ring_buffer_attach(struct perf_event *event,
4565 struct ring_buffer *rb)
4566{
4567 struct ring_buffer *old_rb = NULL;
4568 unsigned long flags;
4569
4570 if (event->rb) {
4571 /*
4572 * Should be impossible, we set this when removing
4573 * event->rb_entry and wait/clear when adding event->rb_entry.
4574 */
4575 WARN_ON_ONCE(event->rcu_pending);
4576
4577 old_rb = event->rb;
4578 spin_lock_irqsave(&old_rb->event_lock, flags);
4579 list_del_rcu(&event->rb_entry);
4580 spin_unlock_irqrestore(&old_rb->event_lock, flags);
4581
4582 event->rcu_batches = get_state_synchronize_rcu();
4583 event->rcu_pending = 1;
4584 }
4585
4586 if (rb) {
4587 if (event->rcu_pending) {
4588 cond_synchronize_rcu(event->rcu_batches);
4589 event->rcu_pending = 0;
4590 }
4591
4592 spin_lock_irqsave(&rb->event_lock, flags);
4593 list_add_rcu(&event->rb_entry, &rb->event_list);
4594 spin_unlock_irqrestore(&rb->event_lock, flags);
4595 }
4596
4597 rcu_assign_pointer(event->rb, rb);
4598
4599 if (old_rb) {
4600 ring_buffer_put(old_rb);
4601 /*
4602 * Since we detached before setting the new rb, so that we
4603 * could attach the new rb, we could have missed a wakeup.
4604 * Provide it now.
4605 */
4606 wake_up_all(&event->waitq);
4607 }
4608}
4609
4610static void ring_buffer_wakeup(struct perf_event *event)
4611{
4612 struct ring_buffer *rb;
4613
4614 rcu_read_lock();
4615 rb = rcu_dereference(event->rb);
4616 if (rb) {
4617 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4618 wake_up_all(&event->waitq);
4619 }
4620 rcu_read_unlock();
4621}
4622
4623struct ring_buffer *ring_buffer_get(struct perf_event *event)
4624{
4625 struct ring_buffer *rb;
4626
4627 rcu_read_lock();
4628 rb = rcu_dereference(event->rb);
4629 if (rb) {
4630 if (!atomic_inc_not_zero(&rb->refcount))
4631 rb = NULL;
4632 }
4633 rcu_read_unlock();
4634
4635 return rb;
4636}
4637
4638void ring_buffer_put(struct ring_buffer *rb)
4639{
4640 if (!atomic_dec_and_test(&rb->refcount))
4641 return;
4642
4643 WARN_ON_ONCE(!list_empty(&rb->event_list));
4644
4645 call_rcu(&rb->rcu_head, rb_free_rcu);
4646}
4647
4648static void perf_mmap_open(struct vm_area_struct *vma)
4649{
4650 struct perf_event *event = vma->vm_file->private_data;
4651
4652 atomic_inc(&event->mmap_count);
4653 atomic_inc(&event->rb->mmap_count);
4654
4655 if (vma->vm_pgoff)
4656 atomic_inc(&event->rb->aux_mmap_count);
4657
4658 if (event->pmu->event_mapped)
4659 event->pmu->event_mapped(event);
4660}
4661
4662/*
4663 * A buffer can be mmap()ed multiple times; either directly through the same
4664 * event, or through other events by use of perf_event_set_output().
4665 *
4666 * In order to undo the VM accounting done by perf_mmap() we need to destroy
4667 * the buffer here, where we still have a VM context. This means we need
4668 * to detach all events redirecting to us.
4669 */
4670static void perf_mmap_close(struct vm_area_struct *vma)
4671{
4672 struct perf_event *event = vma->vm_file->private_data;
4673
4674 struct ring_buffer *rb = ring_buffer_get(event);
4675 struct user_struct *mmap_user = rb->mmap_user;
4676 int mmap_locked = rb->mmap_locked;
4677 unsigned long size = perf_data_size(rb);
4678
4679 if (event->pmu->event_unmapped)
4680 event->pmu->event_unmapped(event);
4681
4682 /*
4683 * rb->aux_mmap_count will always drop before rb->mmap_count and
4684 * event->mmap_count, so it is ok to use event->mmap_mutex to
4685 * serialize with perf_mmap here.
4686 */
4687 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
4688 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
4689 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
4690 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
4691
4692 rb_free_aux(rb);
4693 mutex_unlock(&event->mmap_mutex);
4694 }
4695
4696 atomic_dec(&rb->mmap_count);
4697
4698 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
4699 goto out_put;
4700
4701 ring_buffer_attach(event, NULL);
4702 mutex_unlock(&event->mmap_mutex);
4703
4704 /* If there's still other mmap()s of this buffer, we're done. */
4705 if (atomic_read(&rb->mmap_count))
4706 goto out_put;
4707
4708 /*
4709 * No other mmap()s, detach from all other events that might redirect
4710 * into the now unreachable buffer. Somewhat complicated by the
4711 * fact that rb::event_lock otherwise nests inside mmap_mutex.
4712 */
4713again:
4714 rcu_read_lock();
4715 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
4716 if (!atomic_long_inc_not_zero(&event->refcount)) {
4717 /*
4718 * This event is en-route to free_event() which will
4719 * detach it and remove it from the list.
4720 */
4721 continue;
4722 }
4723 rcu_read_unlock();
4724
4725 mutex_lock(&event->mmap_mutex);
4726 /*
4727 * Check we didn't race with perf_event_set_output() which can
4728 * swizzle the rb from under us while we were waiting to
4729 * acquire mmap_mutex.
4730 *
4731 * If we find a different rb; ignore this event, a next
4732 * iteration will no longer find it on the list. We have to
4733 * still restart the iteration to make sure we're not now
4734 * iterating the wrong list.
4735 */
4736 if (event->rb == rb)
4737 ring_buffer_attach(event, NULL);
4738
4739 mutex_unlock(&event->mmap_mutex);
4740 put_event(event);
4741
4742 /*
4743 * Restart the iteration; either we're on the wrong list or
4744 * destroyed its integrity by doing a deletion.
4745 */
4746 goto again;
4747 }
4748 rcu_read_unlock();
4749
4750 /*
4751 * It could be there's still a few 0-ref events on the list; they'll
4752 * get cleaned up by free_event() -- they'll also still have their
4753 * ref on the rb and will free it whenever they are done with it.
4754 *
4755 * Aside from that, this buffer is 'fully' detached and unmapped,
4756 * undo the VM accounting.
4757 */
4758
4759 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
4760 vma->vm_mm->pinned_vm -= mmap_locked;
4761 free_uid(mmap_user);
4762
4763out_put:
4764 ring_buffer_put(rb); /* could be last */
4765}
4766
4767static const struct vm_operations_struct perf_mmap_vmops = {
4768 .open = perf_mmap_open,
4769 .close = perf_mmap_close, /* non mergable */
4770 .fault = perf_mmap_fault,
4771 .page_mkwrite = perf_mmap_fault,
4772};
4773
4774static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4775{
4776 struct perf_event *event = file->private_data;
4777 unsigned long user_locked, user_lock_limit;
4778 struct user_struct *user = current_user();
4779 unsigned long locked, lock_limit;
4780 struct ring_buffer *rb = NULL;
4781 unsigned long vma_size;
4782 unsigned long nr_pages;
4783 long user_extra = 0, extra = 0;
4784 int ret = 0, flags = 0;
4785
4786 /*
4787 * Don't allow mmap() of inherited per-task counters. This would
4788 * create a performance issue due to all children writing to the
4789 * same rb.
4790 */
4791 if (event->cpu == -1 && event->attr.inherit)
4792 return -EINVAL;
4793
4794 if (!(vma->vm_flags & VM_SHARED))
4795 return -EINVAL;
4796
4797 vma_size = vma->vm_end - vma->vm_start;
4798
4799 if (vma->vm_pgoff == 0) {
4800 nr_pages = (vma_size / PAGE_SIZE) - 1;
4801 } else {
4802 /*
4803 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
4804 * mapped, all subsequent mappings should have the same size
4805 * and offset. Must be above the normal perf buffer.
4806 */
4807 u64 aux_offset, aux_size;
4808
4809 if (!event->rb)
4810 return -EINVAL;
4811
4812 nr_pages = vma_size / PAGE_SIZE;
4813
4814 mutex_lock(&event->mmap_mutex);
4815 ret = -EINVAL;
4816
4817 rb = event->rb;
4818 if (!rb)
4819 goto aux_unlock;
4820
4821 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
4822 aux_size = ACCESS_ONCE(rb->user_page->aux_size);
4823
4824 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
4825 goto aux_unlock;
4826
4827 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
4828 goto aux_unlock;
4829
4830 /* already mapped with a different offset */
4831 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
4832 goto aux_unlock;
4833
4834 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
4835 goto aux_unlock;
4836
4837 /* already mapped with a different size */
4838 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
4839 goto aux_unlock;
4840
4841 if (!is_power_of_2(nr_pages))
4842 goto aux_unlock;
4843
4844 if (!atomic_inc_not_zero(&rb->mmap_count))
4845 goto aux_unlock;
4846
4847 if (rb_has_aux(rb)) {
4848 atomic_inc(&rb->aux_mmap_count);
4849 ret = 0;
4850 goto unlock;
4851 }
4852
4853 atomic_set(&rb->aux_mmap_count, 1);
4854 user_extra = nr_pages;
4855
4856 goto accounting;
4857 }
4858
4859 /*
4860 * If we have rb pages ensure they're a power-of-two number, so we
4861 * can do bitmasks instead of modulo.
4862 */
4863 if (nr_pages != 0 && !is_power_of_2(nr_pages))
4864 return -EINVAL;
4865
4866 if (vma_size != PAGE_SIZE * (1 + nr_pages))
4867 return -EINVAL;
4868
4869 WARN_ON_ONCE(event->ctx->parent_ctx);
4870again:
4871 mutex_lock(&event->mmap_mutex);
4872 if (event->rb) {
4873 if (event->rb->nr_pages != nr_pages) {
4874 ret = -EINVAL;
4875 goto unlock;
4876 }
4877
4878 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
4879 /*
4880 * Raced against perf_mmap_close() through
4881 * perf_event_set_output(). Try again, hope for better
4882 * luck.
4883 */
4884 mutex_unlock(&event->mmap_mutex);
4885 goto again;
4886 }
4887
4888 goto unlock;
4889 }
4890
4891 user_extra = nr_pages + 1;
4892
4893accounting:
4894 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
4895
4896 /*
4897 * Increase the limit linearly with more CPUs:
4898 */
4899 user_lock_limit *= num_online_cpus();
4900
4901 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
4902
4903 if (user_locked > user_lock_limit)
4904 extra = user_locked - user_lock_limit;
4905
4906 lock_limit = rlimit(RLIMIT_MEMLOCK);
4907 lock_limit >>= PAGE_SHIFT;
4908 locked = vma->vm_mm->pinned_vm + extra;
4909
4910 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
4911 !capable(CAP_IPC_LOCK)) {
4912 ret = -EPERM;
4913 goto unlock;
4914 }
4915
4916 WARN_ON(!rb && event->rb);
4917
4918 if (vma->vm_flags & VM_WRITE)
4919 flags |= RING_BUFFER_WRITABLE;
4920
4921 if (!rb) {
4922 rb = rb_alloc(nr_pages,
4923 event->attr.watermark ? event->attr.wakeup_watermark : 0,
4924 event->cpu, flags);
4925
4926 if (!rb) {
4927 ret = -ENOMEM;
4928 goto unlock;
4929 }
4930
4931 atomic_set(&rb->mmap_count, 1);
4932 rb->mmap_user = get_current_user();
4933 rb->mmap_locked = extra;
4934
4935 ring_buffer_attach(event, rb);
4936
4937 perf_event_init_userpage(event);
4938 perf_event_update_userpage(event);
4939 } else {
4940 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
4941 event->attr.aux_watermark, flags);
4942 if (!ret)
4943 rb->aux_mmap_locked = extra;
4944 }
4945
4946unlock:
4947 if (!ret) {
4948 atomic_long_add(user_extra, &user->locked_vm);
4949 vma->vm_mm->pinned_vm += extra;
4950
4951 atomic_inc(&event->mmap_count);
4952 } else if (rb) {
4953 atomic_dec(&rb->mmap_count);
4954 }
4955aux_unlock:
4956 mutex_unlock(&event->mmap_mutex);
4957
4958 /*
4959 * Since pinned accounting is per vm we cannot allow fork() to copy our
4960 * vma.
4961 */
4962 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
4963 vma->vm_ops = &perf_mmap_vmops;
4964
4965 if (event->pmu->event_mapped)
4966 event->pmu->event_mapped(event);
4967
4968 return ret;
4969}
4970
4971static int perf_fasync(int fd, struct file *filp, int on)
4972{
4973 struct inode *inode = file_inode(filp);
4974 struct perf_event *event = filp->private_data;
4975 int retval;
4976
4977 inode_lock(inode);
4978 retval = fasync_helper(fd, filp, on, &event->fasync);
4979 inode_unlock(inode);
4980
4981 if (retval < 0)
4982 return retval;
4983
4984 return 0;
4985}
4986
4987static const struct file_operations perf_fops = {
4988 .llseek = no_llseek,
4989 .release = perf_release,
4990 .read = perf_read,
4991 .poll = perf_poll,
4992 .unlocked_ioctl = perf_ioctl,
4993 .compat_ioctl = perf_compat_ioctl,
4994 .mmap = perf_mmap,
4995 .fasync = perf_fasync,
4996};
4997
4998/*
4999 * Perf event wakeup
5000 *
5001 * If there's data, ensure we set the poll() state and publish everything
5002 * to user-space before waking everybody up.
5003 */
5004
5005static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5006{
5007 /* only the parent has fasync state */
5008 if (event->parent)
5009 event = event->parent;
5010 return &event->fasync;
5011}
5012
5013void perf_event_wakeup(struct perf_event *event)
5014{
5015 ring_buffer_wakeup(event);
5016
5017 if (event->pending_kill) {
5018 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
5019 event->pending_kill = 0;
5020 }
5021}
5022
5023static void perf_pending_event(struct irq_work *entry)
5024{
5025 struct perf_event *event = container_of(entry,
5026 struct perf_event, pending);
5027 int rctx;
5028
5029 rctx = perf_swevent_get_recursion_context();
5030 /*
5031 * If we 'fail' here, that's OK, it means recursion is already disabled
5032 * and we won't recurse 'further'.
5033 */
5034
5035 if (event->pending_disable) {
5036 event->pending_disable = 0;
5037 perf_event_disable_local(event);
5038 }
5039
5040 if (event->pending_wakeup) {
5041 event->pending_wakeup = 0;
5042 perf_event_wakeup(event);
5043 }
5044
5045 if (rctx >= 0)
5046 perf_swevent_put_recursion_context(rctx);
5047}
5048
5049/*
5050 * We assume there is only KVM supporting the callbacks.
5051 * Later on, we might change it to a list if there is
5052 * another virtualization implementation supporting the callbacks.
5053 */
5054struct perf_guest_info_callbacks *perf_guest_cbs;
5055
5056int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5057{
5058 perf_guest_cbs = cbs;
5059 return 0;
5060}
5061EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5062
5063int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5064{
5065 perf_guest_cbs = NULL;
5066 return 0;
5067}
5068EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5069
5070static void
5071perf_output_sample_regs(struct perf_output_handle *handle,
5072 struct pt_regs *regs, u64 mask)
5073{
5074 int bit;
5075
5076 for_each_set_bit(bit, (const unsigned long *) &mask,
5077 sizeof(mask) * BITS_PER_BYTE) {
5078 u64 val;
5079
5080 val = perf_reg_value(regs, bit);
5081 perf_output_put(handle, val);
5082 }
5083}
5084
5085static void perf_sample_regs_user(struct perf_regs *regs_user,
5086 struct pt_regs *regs,
5087 struct pt_regs *regs_user_copy)
5088{
5089 if (user_mode(regs)) {
5090 regs_user->abi = perf_reg_abi(current);
5091 regs_user->regs = regs;
5092 } else if (current->mm) {
5093 perf_get_regs_user(regs_user, regs, regs_user_copy);
5094 } else {
5095 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5096 regs_user->regs = NULL;
5097 }
5098}
5099
5100static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5101 struct pt_regs *regs)
5102{
5103 regs_intr->regs = regs;
5104 regs_intr->abi = perf_reg_abi(current);
5105}
5106
5107
5108/*
5109 * Get remaining task size from user stack pointer.
5110 *
5111 * It'd be better to take stack vma map and limit this more
5112 * precisly, but there's no way to get it safely under interrupt,
5113 * so using TASK_SIZE as limit.
5114 */
5115static u64 perf_ustack_task_size(struct pt_regs *regs)
5116{
5117 unsigned long addr = perf_user_stack_pointer(regs);
5118
5119 if (!addr || addr >= TASK_SIZE)
5120 return 0;
5121
5122 return TASK_SIZE - addr;
5123}
5124
5125static u16
5126perf_sample_ustack_size(u16 stack_size, u16 header_size,
5127 struct pt_regs *regs)
5128{
5129 u64 task_size;
5130
5131 /* No regs, no stack pointer, no dump. */
5132 if (!regs)
5133 return 0;
5134
5135 /*
5136 * Check if we fit in with the requested stack size into the:
5137 * - TASK_SIZE
5138 * If we don't, we limit the size to the TASK_SIZE.
5139 *
5140 * - remaining sample size
5141 * If we don't, we customize the stack size to
5142 * fit in to the remaining sample size.
5143 */
5144
5145 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5146 stack_size = min(stack_size, (u16) task_size);
5147
5148 /* Current header size plus static size and dynamic size. */
5149 header_size += 2 * sizeof(u64);
5150
5151 /* Do we fit in with the current stack dump size? */
5152 if ((u16) (header_size + stack_size) < header_size) {
5153 /*
5154 * If we overflow the maximum size for the sample,
5155 * we customize the stack dump size to fit in.
5156 */
5157 stack_size = USHRT_MAX - header_size - sizeof(u64);
5158 stack_size = round_up(stack_size, sizeof(u64));
5159 }
5160
5161 return stack_size;
5162}
5163
5164static void
5165perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5166 struct pt_regs *regs)
5167{
5168 /* Case of a kernel thread, nothing to dump */
5169 if (!regs) {
5170 u64 size = 0;
5171 perf_output_put(handle, size);
5172 } else {
5173 unsigned long sp;
5174 unsigned int rem;
5175 u64 dyn_size;
5176
5177 /*
5178 * We dump:
5179 * static size
5180 * - the size requested by user or the best one we can fit
5181 * in to the sample max size
5182 * data
5183 * - user stack dump data
5184 * dynamic size
5185 * - the actual dumped size
5186 */
5187
5188 /* Static size. */
5189 perf_output_put(handle, dump_size);
5190
5191 /* Data. */
5192 sp = perf_user_stack_pointer(regs);
5193 rem = __output_copy_user(handle, (void *) sp, dump_size);
5194 dyn_size = dump_size - rem;
5195
5196 perf_output_skip(handle, rem);
5197
5198 /* Dynamic size. */
5199 perf_output_put(handle, dyn_size);
5200 }
5201}
5202
5203static void __perf_event_header__init_id(struct perf_event_header *header,
5204 struct perf_sample_data *data,
5205 struct perf_event *event)
5206{
5207 u64 sample_type = event->attr.sample_type;
5208
5209 data->type = sample_type;
5210 header->size += event->id_header_size;
5211
5212 if (sample_type & PERF_SAMPLE_TID) {
5213 /* namespace issues */
5214 data->tid_entry.pid = perf_event_pid(event, current);
5215 data->tid_entry.tid = perf_event_tid(event, current);
5216 }
5217
5218 if (sample_type & PERF_SAMPLE_TIME)
5219 data->time = perf_event_clock(event);
5220
5221 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
5222 data->id = primary_event_id(event);
5223
5224 if (sample_type & PERF_SAMPLE_STREAM_ID)
5225 data->stream_id = event->id;
5226
5227 if (sample_type & PERF_SAMPLE_CPU) {
5228 data->cpu_entry.cpu = raw_smp_processor_id();
5229 data->cpu_entry.reserved = 0;
5230 }
5231}
5232
5233void perf_event_header__init_id(struct perf_event_header *header,
5234 struct perf_sample_data *data,
5235 struct perf_event *event)
5236{
5237 if (event->attr.sample_id_all)
5238 __perf_event_header__init_id(header, data, event);
5239}
5240
5241static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5242 struct perf_sample_data *data)
5243{
5244 u64 sample_type = data->type;
5245
5246 if (sample_type & PERF_SAMPLE_TID)
5247 perf_output_put(handle, data->tid_entry);
5248
5249 if (sample_type & PERF_SAMPLE_TIME)
5250 perf_output_put(handle, data->time);
5251
5252 if (sample_type & PERF_SAMPLE_ID)
5253 perf_output_put(handle, data->id);
5254
5255 if (sample_type & PERF_SAMPLE_STREAM_ID)
5256 perf_output_put(handle, data->stream_id);
5257
5258 if (sample_type & PERF_SAMPLE_CPU)
5259 perf_output_put(handle, data->cpu_entry);
5260
5261 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5262 perf_output_put(handle, data->id);
5263}
5264
5265void perf_event__output_id_sample(struct perf_event *event,
5266 struct perf_output_handle *handle,
5267 struct perf_sample_data *sample)
5268{
5269 if (event->attr.sample_id_all)
5270 __perf_event__output_id_sample(handle, sample);
5271}
5272
5273static void perf_output_read_one(struct perf_output_handle *handle,
5274 struct perf_event *event,
5275 u64 enabled, u64 running)
5276{
5277 u64 read_format = event->attr.read_format;
5278 u64 values[4];
5279 int n = 0;
5280
5281 values[n++] = perf_event_count(event);
5282 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
5283 values[n++] = enabled +
5284 atomic64_read(&event->child_total_time_enabled);
5285 }
5286 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
5287 values[n++] = running +
5288 atomic64_read(&event->child_total_time_running);
5289 }
5290 if (read_format & PERF_FORMAT_ID)
5291 values[n++] = primary_event_id(event);
5292
5293 __output_copy(handle, values, n * sizeof(u64));
5294}
5295
5296/*
5297 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
5298 */
5299static void perf_output_read_group(struct perf_output_handle *handle,
5300 struct perf_event *event,
5301 u64 enabled, u64 running)
5302{
5303 struct perf_event *leader = event->group_leader, *sub;
5304 u64 read_format = event->attr.read_format;
5305 u64 values[5];
5306 int n = 0;
5307
5308 values[n++] = 1 + leader->nr_siblings;
5309
5310 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
5311 values[n++] = enabled;
5312
5313 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5314 values[n++] = running;
5315
5316 if (leader != event)
5317 leader->pmu->read(leader);
5318
5319 values[n++] = perf_event_count(leader);
5320 if (read_format & PERF_FORMAT_ID)
5321 values[n++] = primary_event_id(leader);
5322
5323 __output_copy(handle, values, n * sizeof(u64));
5324
5325 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
5326 n = 0;
5327
5328 if ((sub != event) &&
5329 (sub->state == PERF_EVENT_STATE_ACTIVE))
5330 sub->pmu->read(sub);
5331
5332 values[n++] = perf_event_count(sub);
5333 if (read_format & PERF_FORMAT_ID)
5334 values[n++] = primary_event_id(sub);
5335
5336 __output_copy(handle, values, n * sizeof(u64));
5337 }
5338}
5339
5340#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5341 PERF_FORMAT_TOTAL_TIME_RUNNING)
5342
5343static void perf_output_read(struct perf_output_handle *handle,
5344 struct perf_event *event)
5345{
5346 u64 enabled = 0, running = 0, now;
5347 u64 read_format = event->attr.read_format;
5348
5349 /*
5350 * compute total_time_enabled, total_time_running
5351 * based on snapshot values taken when the event
5352 * was last scheduled in.
5353 *
5354 * we cannot simply called update_context_time()
5355 * because of locking issue as we are called in
5356 * NMI context
5357 */
5358 if (read_format & PERF_FORMAT_TOTAL_TIMES)
5359 calc_timer_values(event, &now, &enabled, &running);
5360
5361 if (event->attr.read_format & PERF_FORMAT_GROUP)
5362 perf_output_read_group(handle, event, enabled, running);
5363 else
5364 perf_output_read_one(handle, event, enabled, running);
5365}
5366
5367void perf_output_sample(struct perf_output_handle *handle,
5368 struct perf_event_header *header,
5369 struct perf_sample_data *data,
5370 struct perf_event *event)
5371{
5372 u64 sample_type = data->type;
5373
5374 perf_output_put(handle, *header);
5375
5376 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5377 perf_output_put(handle, data->id);
5378
5379 if (sample_type & PERF_SAMPLE_IP)
5380 perf_output_put(handle, data->ip);
5381
5382 if (sample_type & PERF_SAMPLE_TID)
5383 perf_output_put(handle, data->tid_entry);
5384
5385 if (sample_type & PERF_SAMPLE_TIME)
5386 perf_output_put(handle, data->time);
5387
5388 if (sample_type & PERF_SAMPLE_ADDR)
5389 perf_output_put(handle, data->addr);
5390
5391 if (sample_type & PERF_SAMPLE_ID)
5392 perf_output_put(handle, data->id);
5393
5394 if (sample_type & PERF_SAMPLE_STREAM_ID)
5395 perf_output_put(handle, data->stream_id);
5396
5397 if (sample_type & PERF_SAMPLE_CPU)
5398 perf_output_put(handle, data->cpu_entry);
5399
5400 if (sample_type & PERF_SAMPLE_PERIOD)
5401 perf_output_put(handle, data->period);
5402
5403 if (sample_type & PERF_SAMPLE_READ)
5404 perf_output_read(handle, event);
5405
5406 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5407 if (data->callchain) {
5408 int size = 1;
5409
5410 if (data->callchain)
5411 size += data->callchain->nr;
5412
5413 size *= sizeof(u64);
5414
5415 __output_copy(handle, data->callchain, size);
5416 } else {
5417 u64 nr = 0;
5418 perf_output_put(handle, nr);
5419 }
5420 }
5421
5422 if (sample_type & PERF_SAMPLE_RAW) {
5423 if (data->raw) {
5424 u32 raw_size = data->raw->size;
5425 u32 real_size = round_up(raw_size + sizeof(u32),
5426 sizeof(u64)) - sizeof(u32);
5427 u64 zero = 0;
5428
5429 perf_output_put(handle, real_size);
5430 __output_copy(handle, data->raw->data, raw_size);
5431 if (real_size - raw_size)
5432 __output_copy(handle, &zero, real_size - raw_size);
5433 } else {
5434 struct {
5435 u32 size;
5436 u32 data;
5437 } raw = {
5438 .size = sizeof(u32),
5439 .data = 0,
5440 };
5441 perf_output_put(handle, raw);
5442 }
5443 }
5444
5445 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5446 if (data->br_stack) {
5447 size_t size;
5448
5449 size = data->br_stack->nr
5450 * sizeof(struct perf_branch_entry);
5451
5452 perf_output_put(handle, data->br_stack->nr);
5453 perf_output_copy(handle, data->br_stack->entries, size);
5454 } else {
5455 /*
5456 * we always store at least the value of nr
5457 */
5458 u64 nr = 0;
5459 perf_output_put(handle, nr);
5460 }
5461 }
5462
5463 if (sample_type & PERF_SAMPLE_REGS_USER) {
5464 u64 abi = data->regs_user.abi;
5465
5466 /*
5467 * If there are no regs to dump, notice it through
5468 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5469 */
5470 perf_output_put(handle, abi);
5471
5472 if (abi) {
5473 u64 mask = event->attr.sample_regs_user;
5474 perf_output_sample_regs(handle,
5475 data->regs_user.regs,
5476 mask);
5477 }
5478 }
5479
5480 if (sample_type & PERF_SAMPLE_STACK_USER) {
5481 perf_output_sample_ustack(handle,
5482 data->stack_user_size,
5483 data->regs_user.regs);
5484 }
5485
5486 if (sample_type & PERF_SAMPLE_WEIGHT)
5487 perf_output_put(handle, data->weight);
5488
5489 if (sample_type & PERF_SAMPLE_DATA_SRC)
5490 perf_output_put(handle, data->data_src.val);
5491
5492 if (sample_type & PERF_SAMPLE_TRANSACTION)
5493 perf_output_put(handle, data->txn);
5494
5495 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5496 u64 abi = data->regs_intr.abi;
5497 /*
5498 * If there are no regs to dump, notice it through
5499 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5500 */
5501 perf_output_put(handle, abi);
5502
5503 if (abi) {
5504 u64 mask = event->attr.sample_regs_intr;
5505
5506 perf_output_sample_regs(handle,
5507 data->regs_intr.regs,
5508 mask);
5509 }
5510 }
5511
5512 if (!event->attr.watermark) {
5513 int wakeup_events = event->attr.wakeup_events;
5514
5515 if (wakeup_events) {
5516 struct ring_buffer *rb = handle->rb;
5517 int events = local_inc_return(&rb->events);
5518
5519 if (events >= wakeup_events) {
5520 local_sub(wakeup_events, &rb->events);
5521 local_inc(&rb->wakeup);
5522 }
5523 }
5524 }
5525}
5526
5527void perf_prepare_sample(struct perf_event_header *header,
5528 struct perf_sample_data *data,
5529 struct perf_event *event,
5530 struct pt_regs *regs)
5531{
5532 u64 sample_type = event->attr.sample_type;
5533
5534 header->type = PERF_RECORD_SAMPLE;
5535 header->size = sizeof(*header) + event->header_size;
5536
5537 header->misc = 0;
5538 header->misc |= perf_misc_flags(regs);
5539
5540 __perf_event_header__init_id(header, data, event);
5541
5542 if (sample_type & PERF_SAMPLE_IP)
5543 data->ip = perf_instruction_pointer(regs);
5544
5545 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5546 int size = 1;
5547
5548 data->callchain = perf_callchain(event, regs);
5549
5550 if (data->callchain)
5551 size += data->callchain->nr;
5552
5553 header->size += size * sizeof(u64);
5554 }
5555
5556 if (sample_type & PERF_SAMPLE_RAW) {
5557 int size = sizeof(u32);
5558
5559 if (data->raw)
5560 size += data->raw->size;
5561 else
5562 size += sizeof(u32);
5563
5564 header->size += round_up(size, sizeof(u64));
5565 }
5566
5567 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5568 int size = sizeof(u64); /* nr */
5569 if (data->br_stack) {
5570 size += data->br_stack->nr
5571 * sizeof(struct perf_branch_entry);
5572 }
5573 header->size += size;
5574 }
5575
5576 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
5577 perf_sample_regs_user(&data->regs_user, regs,
5578 &data->regs_user_copy);
5579
5580 if (sample_type & PERF_SAMPLE_REGS_USER) {
5581 /* regs dump ABI info */
5582 int size = sizeof(u64);
5583
5584 if (data->regs_user.regs) {
5585 u64 mask = event->attr.sample_regs_user;
5586 size += hweight64(mask) * sizeof(u64);
5587 }
5588
5589 header->size += size;
5590 }
5591
5592 if (sample_type & PERF_SAMPLE_STACK_USER) {
5593 /*
5594 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
5595 * processed as the last one or have additional check added
5596 * in case new sample type is added, because we could eat
5597 * up the rest of the sample size.
5598 */
5599 u16 stack_size = event->attr.sample_stack_user;
5600 u16 size = sizeof(u64);
5601
5602 stack_size = perf_sample_ustack_size(stack_size, header->size,
5603 data->regs_user.regs);
5604
5605 /*
5606 * If there is something to dump, add space for the dump
5607 * itself and for the field that tells the dynamic size,
5608 * which is how many have been actually dumped.
5609 */
5610 if (stack_size)
5611 size += sizeof(u64) + stack_size;
5612
5613 data->stack_user_size = stack_size;
5614 header->size += size;
5615 }
5616
5617 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5618 /* regs dump ABI info */
5619 int size = sizeof(u64);
5620
5621 perf_sample_regs_intr(&data->regs_intr, regs);
5622
5623 if (data->regs_intr.regs) {
5624 u64 mask = event->attr.sample_regs_intr;
5625
5626 size += hweight64(mask) * sizeof(u64);
5627 }
5628
5629 header->size += size;
5630 }
5631}
5632
5633void perf_event_output(struct perf_event *event,
5634 struct perf_sample_data *data,
5635 struct pt_regs *regs)
5636{
5637 struct perf_output_handle handle;
5638 struct perf_event_header header;
5639
5640 /* protect the callchain buffers */
5641 rcu_read_lock();
5642
5643 perf_prepare_sample(&header, data, event, regs);
5644
5645 if (perf_output_begin(&handle, event, header.size))
5646 goto exit;
5647
5648 perf_output_sample(&handle, &header, data, event);
5649
5650 perf_output_end(&handle);
5651
5652exit:
5653 rcu_read_unlock();
5654}
5655
5656/*
5657 * read event_id
5658 */
5659
5660struct perf_read_event {
5661 struct perf_event_header header;
5662
5663 u32 pid;
5664 u32 tid;
5665};
5666
5667static void
5668perf_event_read_event(struct perf_event *event,
5669 struct task_struct *task)
5670{
5671 struct perf_output_handle handle;
5672 struct perf_sample_data sample;
5673 struct perf_read_event read_event = {
5674 .header = {
5675 .type = PERF_RECORD_READ,
5676 .misc = 0,
5677 .size = sizeof(read_event) + event->read_size,
5678 },
5679 .pid = perf_event_pid(event, task),
5680 .tid = perf_event_tid(event, task),
5681 };
5682 int ret;
5683
5684 perf_event_header__init_id(&read_event.header, &sample, event);
5685 ret = perf_output_begin(&handle, event, read_event.header.size);
5686 if (ret)
5687 return;
5688
5689 perf_output_put(&handle, read_event);
5690 perf_output_read(&handle, event);
5691 perf_event__output_id_sample(event, &handle, &sample);
5692
5693 perf_output_end(&handle);
5694}
5695
5696typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
5697
5698static void
5699perf_event_aux_ctx(struct perf_event_context *ctx,
5700 perf_event_aux_output_cb output,
5701 void *data)
5702{
5703 struct perf_event *event;
5704
5705 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5706 if (event->state < PERF_EVENT_STATE_INACTIVE)
5707 continue;
5708 if (!event_filter_match(event))
5709 continue;
5710 output(event, data);
5711 }
5712}
5713
5714static void
5715perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
5716 struct perf_event_context *task_ctx)
5717{
5718 rcu_read_lock();
5719 preempt_disable();
5720 perf_event_aux_ctx(task_ctx, output, data);
5721 preempt_enable();
5722 rcu_read_unlock();
5723}
5724
5725static void
5726perf_event_aux(perf_event_aux_output_cb output, void *data,
5727 struct perf_event_context *task_ctx)
5728{
5729 struct perf_cpu_context *cpuctx;
5730 struct perf_event_context *ctx;
5731 struct pmu *pmu;
5732 int ctxn;
5733
5734 /*
5735 * If we have task_ctx != NULL we only notify
5736 * the task context itself. The task_ctx is set
5737 * only for EXIT events before releasing task
5738 * context.
5739 */
5740 if (task_ctx) {
5741 perf_event_aux_task_ctx(output, data, task_ctx);
5742 return;
5743 }
5744
5745 rcu_read_lock();
5746 list_for_each_entry_rcu(pmu, &pmus, entry) {
5747 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
5748 if (cpuctx->unique_pmu != pmu)
5749 goto next;
5750 perf_event_aux_ctx(&cpuctx->ctx, output, data);
5751 ctxn = pmu->task_ctx_nr;
5752 if (ctxn < 0)
5753 goto next;
5754 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
5755 if (ctx)
5756 perf_event_aux_ctx(ctx, output, data);
5757next:
5758 put_cpu_ptr(pmu->pmu_cpu_context);
5759 }
5760 rcu_read_unlock();
5761}
5762
5763/*
5764 * task tracking -- fork/exit
5765 *
5766 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
5767 */
5768
5769struct perf_task_event {
5770 struct task_struct *task;
5771 struct perf_event_context *task_ctx;
5772
5773 struct {
5774 struct perf_event_header header;
5775
5776 u32 pid;
5777 u32 ppid;
5778 u32 tid;
5779 u32 ptid;
5780 u64 time;
5781 } event_id;
5782};
5783
5784static int perf_event_task_match(struct perf_event *event)
5785{
5786 return event->attr.comm || event->attr.mmap ||
5787 event->attr.mmap2 || event->attr.mmap_data ||
5788 event->attr.task;
5789}
5790
5791static void perf_event_task_output(struct perf_event *event,
5792 void *data)
5793{
5794 struct perf_task_event *task_event = data;
5795 struct perf_output_handle handle;
5796 struct perf_sample_data sample;
5797 struct task_struct *task = task_event->task;
5798 int ret, size = task_event->event_id.header.size;
5799
5800 if (!perf_event_task_match(event))
5801 return;
5802
5803 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
5804
5805 ret = perf_output_begin(&handle, event,
5806 task_event->event_id.header.size);
5807 if (ret)
5808 goto out;
5809
5810 task_event->event_id.pid = perf_event_pid(event, task);
5811 task_event->event_id.ppid = perf_event_pid(event, current);
5812
5813 task_event->event_id.tid = perf_event_tid(event, task);
5814 task_event->event_id.ptid = perf_event_tid(event, current);
5815
5816 task_event->event_id.time = perf_event_clock(event);
5817
5818 perf_output_put(&handle, task_event->event_id);
5819
5820 perf_event__output_id_sample(event, &handle, &sample);
5821
5822 perf_output_end(&handle);
5823out:
5824 task_event->event_id.header.size = size;
5825}
5826
5827static void perf_event_task(struct task_struct *task,
5828 struct perf_event_context *task_ctx,
5829 int new)
5830{
5831 struct perf_task_event task_event;
5832
5833 if (!atomic_read(&nr_comm_events) &&
5834 !atomic_read(&nr_mmap_events) &&
5835 !atomic_read(&nr_task_events))
5836 return;
5837
5838 task_event = (struct perf_task_event){
5839 .task = task,
5840 .task_ctx = task_ctx,
5841 .event_id = {
5842 .header = {
5843 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
5844 .misc = 0,
5845 .size = sizeof(task_event.event_id),
5846 },
5847 /* .pid */
5848 /* .ppid */
5849 /* .tid */
5850 /* .ptid */
5851 /* .time */
5852 },
5853 };
5854
5855 perf_event_aux(perf_event_task_output,
5856 &task_event,
5857 task_ctx);
5858}
5859
5860void perf_event_fork(struct task_struct *task)
5861{
5862 perf_event_task(task, NULL, 1);
5863}
5864
5865/*
5866 * comm tracking
5867 */
5868
5869struct perf_comm_event {
5870 struct task_struct *task;
5871 char *comm;
5872 int comm_size;
5873
5874 struct {
5875 struct perf_event_header header;
5876
5877 u32 pid;
5878 u32 tid;
5879 } event_id;
5880};
5881
5882static int perf_event_comm_match(struct perf_event *event)
5883{
5884 return event->attr.comm;
5885}
5886
5887static void perf_event_comm_output(struct perf_event *event,
5888 void *data)
5889{
5890 struct perf_comm_event *comm_event = data;
5891 struct perf_output_handle handle;
5892 struct perf_sample_data sample;
5893 int size = comm_event->event_id.header.size;
5894 int ret;
5895
5896 if (!perf_event_comm_match(event))
5897 return;
5898
5899 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
5900 ret = perf_output_begin(&handle, event,
5901 comm_event->event_id.header.size);
5902
5903 if (ret)
5904 goto out;
5905
5906 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
5907 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
5908
5909 perf_output_put(&handle, comm_event->event_id);
5910 __output_copy(&handle, comm_event->comm,
5911 comm_event->comm_size);
5912
5913 perf_event__output_id_sample(event, &handle, &sample);
5914
5915 perf_output_end(&handle);
5916out:
5917 comm_event->event_id.header.size = size;
5918}
5919
5920static void perf_event_comm_event(struct perf_comm_event *comm_event)
5921{
5922 char comm[TASK_COMM_LEN];
5923 unsigned int size;
5924
5925 memset(comm, 0, sizeof(comm));
5926 strlcpy(comm, comm_event->task->comm, sizeof(comm));
5927 size = ALIGN(strlen(comm)+1, sizeof(u64));
5928
5929 comm_event->comm = comm;
5930 comm_event->comm_size = size;
5931
5932 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
5933
5934 perf_event_aux(perf_event_comm_output,
5935 comm_event,
5936 NULL);
5937}
5938
5939void perf_event_comm(struct task_struct *task, bool exec)
5940{
5941 struct perf_comm_event comm_event;
5942
5943 if (!atomic_read(&nr_comm_events))
5944 return;
5945
5946 comm_event = (struct perf_comm_event){
5947 .task = task,
5948 /* .comm */
5949 /* .comm_size */
5950 .event_id = {
5951 .header = {
5952 .type = PERF_RECORD_COMM,
5953 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
5954 /* .size */
5955 },
5956 /* .pid */
5957 /* .tid */
5958 },
5959 };
5960
5961 perf_event_comm_event(&comm_event);
5962}
5963
5964/*
5965 * mmap tracking
5966 */
5967
5968struct perf_mmap_event {
5969 struct vm_area_struct *vma;
5970
5971 const char *file_name;
5972 int file_size;
5973 int maj, min;
5974 u64 ino;
5975 u64 ino_generation;
5976 u32 prot, flags;
5977
5978 struct {
5979 struct perf_event_header header;
5980
5981 u32 pid;
5982 u32 tid;
5983 u64 start;
5984 u64 len;
5985 u64 pgoff;
5986 } event_id;
5987};
5988
5989static int perf_event_mmap_match(struct perf_event *event,
5990 void *data)
5991{
5992 struct perf_mmap_event *mmap_event = data;
5993 struct vm_area_struct *vma = mmap_event->vma;
5994 int executable = vma->vm_flags & VM_EXEC;
5995
5996 return (!executable && event->attr.mmap_data) ||
5997 (executable && (event->attr.mmap || event->attr.mmap2));
5998}
5999
6000static void perf_event_mmap_output(struct perf_event *event,
6001 void *data)
6002{
6003 struct perf_mmap_event *mmap_event = data;
6004 struct perf_output_handle handle;
6005 struct perf_sample_data sample;
6006 int size = mmap_event->event_id.header.size;
6007 int ret;
6008
6009 if (!perf_event_mmap_match(event, data))
6010 return;
6011
6012 if (event->attr.mmap2) {
6013 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
6014 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
6015 mmap_event->event_id.header.size += sizeof(mmap_event->min);
6016 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
6017 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
6018 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
6019 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
6020 }
6021
6022 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
6023 ret = perf_output_begin(&handle, event,
6024 mmap_event->event_id.header.size);
6025 if (ret)
6026 goto out;
6027
6028 mmap_event->event_id.pid = perf_event_pid(event, current);
6029 mmap_event->event_id.tid = perf_event_tid(event, current);
6030
6031 perf_output_put(&handle, mmap_event->event_id);
6032
6033 if (event->attr.mmap2) {
6034 perf_output_put(&handle, mmap_event->maj);
6035 perf_output_put(&handle, mmap_event->min);
6036 perf_output_put(&handle, mmap_event->ino);
6037 perf_output_put(&handle, mmap_event->ino_generation);
6038 perf_output_put(&handle, mmap_event->prot);
6039 perf_output_put(&handle, mmap_event->flags);
6040 }
6041
6042 __output_copy(&handle, mmap_event->file_name,
6043 mmap_event->file_size);
6044
6045 perf_event__output_id_sample(event, &handle, &sample);
6046
6047 perf_output_end(&handle);
6048out:
6049 mmap_event->event_id.header.size = size;
6050}
6051
6052static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6053{
6054 struct vm_area_struct *vma = mmap_event->vma;
6055 struct file *file = vma->vm_file;
6056 int maj = 0, min = 0;
6057 u64 ino = 0, gen = 0;
6058 u32 prot = 0, flags = 0;
6059 unsigned int size;
6060 char tmp[16];
6061 char *buf = NULL;
6062 char *name;
6063
6064 if (file) {
6065 struct inode *inode;
6066 dev_t dev;
6067
6068 buf = kmalloc(PATH_MAX, GFP_KERNEL);
6069 if (!buf) {
6070 name = "//enomem";
6071 goto cpy_name;
6072 }
6073 /*
6074 * d_path() works from the end of the rb backwards, so we
6075 * need to add enough zero bytes after the string to handle
6076 * the 64bit alignment we do later.
6077 */
6078 name = file_path(file, buf, PATH_MAX - sizeof(u64));
6079 if (IS_ERR(name)) {
6080 name = "//toolong";
6081 goto cpy_name;
6082 }
6083 inode = file_inode(vma->vm_file);
6084 dev = inode->i_sb->s_dev;
6085 ino = inode->i_ino;
6086 gen = inode->i_generation;
6087 maj = MAJOR(dev);
6088 min = MINOR(dev);
6089
6090 if (vma->vm_flags & VM_READ)
6091 prot |= PROT_READ;
6092 if (vma->vm_flags & VM_WRITE)
6093 prot |= PROT_WRITE;
6094 if (vma->vm_flags & VM_EXEC)
6095 prot |= PROT_EXEC;
6096
6097 if (vma->vm_flags & VM_MAYSHARE)
6098 flags = MAP_SHARED;
6099 else
6100 flags = MAP_PRIVATE;
6101
6102 if (vma->vm_flags & VM_DENYWRITE)
6103 flags |= MAP_DENYWRITE;
6104 if (vma->vm_flags & VM_MAYEXEC)
6105 flags |= MAP_EXECUTABLE;
6106 if (vma->vm_flags & VM_LOCKED)
6107 flags |= MAP_LOCKED;
6108 if (vma->vm_flags & VM_HUGETLB)
6109 flags |= MAP_HUGETLB;
6110
6111 goto got_name;
6112 } else {
6113 if (vma->vm_ops && vma->vm_ops->name) {
6114 name = (char *) vma->vm_ops->name(vma);
6115 if (name)
6116 goto cpy_name;
6117 }
6118
6119 name = (char *)arch_vma_name(vma);
6120 if (name)
6121 goto cpy_name;
6122
6123 if (vma->vm_start <= vma->vm_mm->start_brk &&
6124 vma->vm_end >= vma->vm_mm->brk) {
6125 name = "[heap]";
6126 goto cpy_name;
6127 }
6128 if (vma->vm_start <= vma->vm_mm->start_stack &&
6129 vma->vm_end >= vma->vm_mm->start_stack) {
6130 name = "[stack]";
6131 goto cpy_name;
6132 }
6133
6134 name = "//anon";
6135 goto cpy_name;
6136 }
6137
6138cpy_name:
6139 strlcpy(tmp, name, sizeof(tmp));
6140 name = tmp;
6141got_name:
6142 /*
6143 * Since our buffer works in 8 byte units we need to align our string
6144 * size to a multiple of 8. However, we must guarantee the tail end is
6145 * zero'd out to avoid leaking random bits to userspace.
6146 */
6147 size = strlen(name)+1;
6148 while (!IS_ALIGNED(size, sizeof(u64)))
6149 name[size++] = '\0';
6150
6151 mmap_event->file_name = name;
6152 mmap_event->file_size = size;
6153 mmap_event->maj = maj;
6154 mmap_event->min = min;
6155 mmap_event->ino = ino;
6156 mmap_event->ino_generation = gen;
6157 mmap_event->prot = prot;
6158 mmap_event->flags = flags;
6159
6160 if (!(vma->vm_flags & VM_EXEC))
6161 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6162
6163 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
6164
6165 perf_event_aux(perf_event_mmap_output,
6166 mmap_event,
6167 NULL);
6168
6169 kfree(buf);
6170}
6171
6172void perf_event_mmap(struct vm_area_struct *vma)
6173{
6174 struct perf_mmap_event mmap_event;
6175
6176 if (!atomic_read(&nr_mmap_events))
6177 return;
6178
6179 mmap_event = (struct perf_mmap_event){
6180 .vma = vma,
6181 /* .file_name */
6182 /* .file_size */
6183 .event_id = {
6184 .header = {
6185 .type = PERF_RECORD_MMAP,
6186 .misc = PERF_RECORD_MISC_USER,
6187 /* .size */
6188 },
6189 /* .pid */
6190 /* .tid */
6191 .start = vma->vm_start,
6192 .len = vma->vm_end - vma->vm_start,
6193 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
6194 },
6195 /* .maj (attr_mmap2 only) */
6196 /* .min (attr_mmap2 only) */
6197 /* .ino (attr_mmap2 only) */
6198 /* .ino_generation (attr_mmap2 only) */
6199 /* .prot (attr_mmap2 only) */
6200 /* .flags (attr_mmap2 only) */
6201 };
6202
6203 perf_event_mmap_event(&mmap_event);
6204}
6205
6206void perf_event_aux_event(struct perf_event *event, unsigned long head,
6207 unsigned long size, u64 flags)
6208{
6209 struct perf_output_handle handle;
6210 struct perf_sample_data sample;
6211 struct perf_aux_event {
6212 struct perf_event_header header;
6213 u64 offset;
6214 u64 size;
6215 u64 flags;
6216 } rec = {
6217 .header = {
6218 .type = PERF_RECORD_AUX,
6219 .misc = 0,
6220 .size = sizeof(rec),
6221 },
6222 .offset = head,
6223 .size = size,
6224 .flags = flags,
6225 };
6226 int ret;
6227
6228 perf_event_header__init_id(&rec.header, &sample, event);
6229 ret = perf_output_begin(&handle, event, rec.header.size);
6230
6231 if (ret)
6232 return;
6233
6234 perf_output_put(&handle, rec);
6235 perf_event__output_id_sample(event, &handle, &sample);
6236
6237 perf_output_end(&handle);
6238}
6239
6240/*
6241 * Lost/dropped samples logging
6242 */
6243void perf_log_lost_samples(struct perf_event *event, u64 lost)
6244{
6245 struct perf_output_handle handle;
6246 struct perf_sample_data sample;
6247 int ret;
6248
6249 struct {
6250 struct perf_event_header header;
6251 u64 lost;
6252 } lost_samples_event = {
6253 .header = {
6254 .type = PERF_RECORD_LOST_SAMPLES,
6255 .misc = 0,
6256 .size = sizeof(lost_samples_event),
6257 },
6258 .lost = lost,
6259 };
6260
6261 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
6262
6263 ret = perf_output_begin(&handle, event,
6264 lost_samples_event.header.size);
6265 if (ret)
6266 return;
6267
6268 perf_output_put(&handle, lost_samples_event);
6269 perf_event__output_id_sample(event, &handle, &sample);
6270 perf_output_end(&handle);
6271}
6272
6273/*
6274 * context_switch tracking
6275 */
6276
6277struct perf_switch_event {
6278 struct task_struct *task;
6279 struct task_struct *next_prev;
6280
6281 struct {
6282 struct perf_event_header header;
6283 u32 next_prev_pid;
6284 u32 next_prev_tid;
6285 } event_id;
6286};
6287
6288static int perf_event_switch_match(struct perf_event *event)
6289{
6290 return event->attr.context_switch;
6291}
6292
6293static void perf_event_switch_output(struct perf_event *event, void *data)
6294{
6295 struct perf_switch_event *se = data;
6296 struct perf_output_handle handle;
6297 struct perf_sample_data sample;
6298 int ret;
6299
6300 if (!perf_event_switch_match(event))
6301 return;
6302
6303 /* Only CPU-wide events are allowed to see next/prev pid/tid */
6304 if (event->ctx->task) {
6305 se->event_id.header.type = PERF_RECORD_SWITCH;
6306 se->event_id.header.size = sizeof(se->event_id.header);
6307 } else {
6308 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
6309 se->event_id.header.size = sizeof(se->event_id);
6310 se->event_id.next_prev_pid =
6311 perf_event_pid(event, se->next_prev);
6312 se->event_id.next_prev_tid =
6313 perf_event_tid(event, se->next_prev);
6314 }
6315
6316 perf_event_header__init_id(&se->event_id.header, &sample, event);
6317
6318 ret = perf_output_begin(&handle, event, se->event_id.header.size);
6319 if (ret)
6320 return;
6321
6322 if (event->ctx->task)
6323 perf_output_put(&handle, se->event_id.header);
6324 else
6325 perf_output_put(&handle, se->event_id);
6326
6327 perf_event__output_id_sample(event, &handle, &sample);
6328
6329 perf_output_end(&handle);
6330}
6331
6332static void perf_event_switch(struct task_struct *task,
6333 struct task_struct *next_prev, bool sched_in)
6334{
6335 struct perf_switch_event switch_event;
6336
6337 /* N.B. caller checks nr_switch_events != 0 */
6338
6339 switch_event = (struct perf_switch_event){
6340 .task = task,
6341 .next_prev = next_prev,
6342 .event_id = {
6343 .header = {
6344 /* .type */
6345 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
6346 /* .size */
6347 },
6348 /* .next_prev_pid */
6349 /* .next_prev_tid */
6350 },
6351 };
6352
6353 perf_event_aux(perf_event_switch_output,
6354 &switch_event,
6355 NULL);
6356}
6357
6358/*
6359 * IRQ throttle logging
6360 */
6361
6362static void perf_log_throttle(struct perf_event *event, int enable)
6363{
6364 struct perf_output_handle handle;
6365 struct perf_sample_data sample;
6366 int ret;
6367
6368 struct {
6369 struct perf_event_header header;
6370 u64 time;
6371 u64 id;
6372 u64 stream_id;
6373 } throttle_event = {
6374 .header = {
6375 .type = PERF_RECORD_THROTTLE,
6376 .misc = 0,
6377 .size = sizeof(throttle_event),
6378 },
6379 .time = perf_event_clock(event),
6380 .id = primary_event_id(event),
6381 .stream_id = event->id,
6382 };
6383
6384 if (enable)
6385 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
6386
6387 perf_event_header__init_id(&throttle_event.header, &sample, event);
6388
6389 ret = perf_output_begin(&handle, event,
6390 throttle_event.header.size);
6391 if (ret)
6392 return;
6393
6394 perf_output_put(&handle, throttle_event);
6395 perf_event__output_id_sample(event, &handle, &sample);
6396 perf_output_end(&handle);
6397}
6398
6399static void perf_log_itrace_start(struct perf_event *event)
6400{
6401 struct perf_output_handle handle;
6402 struct perf_sample_data sample;
6403 struct perf_aux_event {
6404 struct perf_event_header header;
6405 u32 pid;
6406 u32 tid;
6407 } rec;
6408 int ret;
6409
6410 if (event->parent)
6411 event = event->parent;
6412
6413 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
6414 event->hw.itrace_started)
6415 return;
6416
6417 rec.header.type = PERF_RECORD_ITRACE_START;
6418 rec.header.misc = 0;
6419 rec.header.size = sizeof(rec);
6420 rec.pid = perf_event_pid(event, current);
6421 rec.tid = perf_event_tid(event, current);
6422
6423 perf_event_header__init_id(&rec.header, &sample, event);
6424 ret = perf_output_begin(&handle, event, rec.header.size);
6425
6426 if (ret)
6427 return;
6428
6429 perf_output_put(&handle, rec);
6430 perf_event__output_id_sample(event, &handle, &sample);
6431
6432 perf_output_end(&handle);
6433}
6434
6435/*
6436 * Generic event overflow handling, sampling.
6437 */
6438
6439static int __perf_event_overflow(struct perf_event *event,
6440 int throttle, struct perf_sample_data *data,
6441 struct pt_regs *regs)
6442{
6443 int events = atomic_read(&event->event_limit);
6444 struct hw_perf_event *hwc = &event->hw;
6445 u64 seq;
6446 int ret = 0;
6447
6448 /*
6449 * Non-sampling counters might still use the PMI to fold short
6450 * hardware counters, ignore those.
6451 */
6452 if (unlikely(!is_sampling_event(event)))
6453 return 0;
6454
6455 seq = __this_cpu_read(perf_throttled_seq);
6456 if (seq != hwc->interrupts_seq) {
6457 hwc->interrupts_seq = seq;
6458 hwc->interrupts = 1;
6459 } else {
6460 hwc->interrupts++;
6461 if (unlikely(throttle
6462 && hwc->interrupts >= max_samples_per_tick)) {
6463 __this_cpu_inc(perf_throttled_count);
6464 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
6465 hwc->interrupts = MAX_INTERRUPTS;
6466 perf_log_throttle(event, 0);
6467 ret = 1;
6468 }
6469 }
6470
6471 if (event->attr.freq) {
6472 u64 now = perf_clock();
6473 s64 delta = now - hwc->freq_time_stamp;
6474
6475 hwc->freq_time_stamp = now;
6476
6477 if (delta > 0 && delta < 2*TICK_NSEC)
6478 perf_adjust_period(event, delta, hwc->last_period, true);
6479 }
6480
6481 /*
6482 * XXX event_limit might not quite work as expected on inherited
6483 * events
6484 */
6485
6486 event->pending_kill = POLL_IN;
6487 if (events && atomic_dec_and_test(&event->event_limit)) {
6488 ret = 1;
6489 event->pending_kill = POLL_HUP;
6490 event->pending_disable = 1;
6491 irq_work_queue(&event->pending);
6492 }
6493
6494 if (event->overflow_handler)
6495 event->overflow_handler(event, data, regs);
6496 else
6497 perf_event_output(event, data, regs);
6498
6499 if (*perf_event_fasync(event) && event->pending_kill) {
6500 event->pending_wakeup = 1;
6501 irq_work_queue(&event->pending);
6502 }
6503
6504 return ret;
6505}
6506
6507int perf_event_overflow(struct perf_event *event,
6508 struct perf_sample_data *data,
6509 struct pt_regs *regs)
6510{
6511 return __perf_event_overflow(event, 1, data, regs);
6512}
6513
6514/*
6515 * Generic software event infrastructure
6516 */
6517
6518struct swevent_htable {
6519 struct swevent_hlist *swevent_hlist;
6520 struct mutex hlist_mutex;
6521 int hlist_refcount;
6522
6523 /* Recursion avoidance in each contexts */
6524 int recursion[PERF_NR_CONTEXTS];
6525};
6526
6527static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
6528
6529/*
6530 * We directly increment event->count and keep a second value in
6531 * event->hw.period_left to count intervals. This period event
6532 * is kept in the range [-sample_period, 0] so that we can use the
6533 * sign as trigger.
6534 */
6535
6536u64 perf_swevent_set_period(struct perf_event *event)
6537{
6538 struct hw_perf_event *hwc = &event->hw;
6539 u64 period = hwc->last_period;
6540 u64 nr, offset;
6541 s64 old, val;
6542
6543 hwc->last_period = hwc->sample_period;
6544
6545again:
6546 old = val = local64_read(&hwc->period_left);
6547 if (val < 0)
6548 return 0;
6549
6550 nr = div64_u64(period + val, period);
6551 offset = nr * period;
6552 val -= offset;
6553 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
6554 goto again;
6555
6556 return nr;
6557}
6558
6559static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
6560 struct perf_sample_data *data,
6561 struct pt_regs *regs)
6562{
6563 struct hw_perf_event *hwc = &event->hw;
6564 int throttle = 0;
6565
6566 if (!overflow)
6567 overflow = perf_swevent_set_period(event);
6568
6569 if (hwc->interrupts == MAX_INTERRUPTS)
6570 return;
6571
6572 for (; overflow; overflow--) {
6573 if (__perf_event_overflow(event, throttle,
6574 data, regs)) {
6575 /*
6576 * We inhibit the overflow from happening when
6577 * hwc->interrupts == MAX_INTERRUPTS.
6578 */
6579 break;
6580 }
6581 throttle = 1;
6582 }
6583}
6584
6585static void perf_swevent_event(struct perf_event *event, u64 nr,
6586 struct perf_sample_data *data,
6587 struct pt_regs *regs)
6588{
6589 struct hw_perf_event *hwc = &event->hw;
6590
6591 local64_add(nr, &event->count);
6592
6593 if (!regs)
6594 return;
6595
6596 if (!is_sampling_event(event))
6597 return;
6598
6599 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
6600 data->period = nr;
6601 return perf_swevent_overflow(event, 1, data, regs);
6602 } else
6603 data->period = event->hw.last_period;
6604
6605 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
6606 return perf_swevent_overflow(event, 1, data, regs);
6607
6608 if (local64_add_negative(nr, &hwc->period_left))
6609 return;
6610
6611 perf_swevent_overflow(event, 0, data, regs);
6612}
6613
6614static int perf_exclude_event(struct perf_event *event,
6615 struct pt_regs *regs)
6616{
6617 if (event->hw.state & PERF_HES_STOPPED)
6618 return 1;
6619
6620 if (regs) {
6621 if (event->attr.exclude_user && user_mode(regs))
6622 return 1;
6623
6624 if (event->attr.exclude_kernel && !user_mode(regs))
6625 return 1;
6626 }
6627
6628 return 0;
6629}
6630
6631static int perf_swevent_match(struct perf_event *event,
6632 enum perf_type_id type,
6633 u32 event_id,
6634 struct perf_sample_data *data,
6635 struct pt_regs *regs)
6636{
6637 if (event->attr.type != type)
6638 return 0;
6639
6640 if (event->attr.config != event_id)
6641 return 0;
6642
6643 if (perf_exclude_event(event, regs))
6644 return 0;
6645
6646 return 1;
6647}
6648
6649static inline u64 swevent_hash(u64 type, u32 event_id)
6650{
6651 u64 val = event_id | (type << 32);
6652
6653 return hash_64(val, SWEVENT_HLIST_BITS);
6654}
6655
6656static inline struct hlist_head *
6657__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
6658{
6659 u64 hash = swevent_hash(type, event_id);
6660
6661 return &hlist->heads[hash];
6662}
6663
6664/* For the read side: events when they trigger */
6665static inline struct hlist_head *
6666find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
6667{
6668 struct swevent_hlist *hlist;
6669
6670 hlist = rcu_dereference(swhash->swevent_hlist);
6671 if (!hlist)
6672 return NULL;
6673
6674 return __find_swevent_head(hlist, type, event_id);
6675}
6676
6677/* For the event head insertion and removal in the hlist */
6678static inline struct hlist_head *
6679find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
6680{
6681 struct swevent_hlist *hlist;
6682 u32 event_id = event->attr.config;
6683 u64 type = event->attr.type;
6684
6685 /*
6686 * Event scheduling is always serialized against hlist allocation
6687 * and release. Which makes the protected version suitable here.
6688 * The context lock guarantees that.
6689 */
6690 hlist = rcu_dereference_protected(swhash->swevent_hlist,
6691 lockdep_is_held(&event->ctx->lock));
6692 if (!hlist)
6693 return NULL;
6694
6695 return __find_swevent_head(hlist, type, event_id);
6696}
6697
6698static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
6699 u64 nr,
6700 struct perf_sample_data *data,
6701 struct pt_regs *regs)
6702{
6703 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6704 struct perf_event *event;
6705 struct hlist_head *head;
6706
6707 rcu_read_lock();
6708 head = find_swevent_head_rcu(swhash, type, event_id);
6709 if (!head)
6710 goto end;
6711
6712 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6713 if (perf_swevent_match(event, type, event_id, data, regs))
6714 perf_swevent_event(event, nr, data, regs);
6715 }
6716end:
6717 rcu_read_unlock();
6718}
6719
6720DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
6721
6722int perf_swevent_get_recursion_context(void)
6723{
6724 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6725
6726 return get_recursion_context(swhash->recursion);
6727}
6728EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
6729
6730inline void perf_swevent_put_recursion_context(int rctx)
6731{
6732 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6733
6734 put_recursion_context(swhash->recursion, rctx);
6735}
6736
6737void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
6738{
6739 struct perf_sample_data data;
6740
6741 if (WARN_ON_ONCE(!regs))
6742 return;
6743
6744 perf_sample_data_init(&data, addr, 0);
6745 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
6746}
6747
6748void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
6749{
6750 int rctx;
6751
6752 preempt_disable_notrace();
6753 rctx = perf_swevent_get_recursion_context();
6754 if (unlikely(rctx < 0))
6755 goto fail;
6756
6757 ___perf_sw_event(event_id, nr, regs, addr);
6758
6759 perf_swevent_put_recursion_context(rctx);
6760fail:
6761 preempt_enable_notrace();
6762}
6763
6764static void perf_swevent_read(struct perf_event *event)
6765{
6766}
6767
6768static int perf_swevent_add(struct perf_event *event, int flags)
6769{
6770 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
6771 struct hw_perf_event *hwc = &event->hw;
6772 struct hlist_head *head;
6773
6774 if (is_sampling_event(event)) {
6775 hwc->last_period = hwc->sample_period;
6776 perf_swevent_set_period(event);
6777 }
6778
6779 hwc->state = !(flags & PERF_EF_START);
6780
6781 head = find_swevent_head(swhash, event);
6782 if (WARN_ON_ONCE(!head))
6783 return -EINVAL;
6784
6785 hlist_add_head_rcu(&event->hlist_entry, head);
6786 perf_event_update_userpage(event);
6787
6788 return 0;
6789}
6790
6791static void perf_swevent_del(struct perf_event *event, int flags)
6792{
6793 hlist_del_rcu(&event->hlist_entry);
6794}
6795
6796static void perf_swevent_start(struct perf_event *event, int flags)
6797{
6798 event->hw.state = 0;
6799}
6800
6801static void perf_swevent_stop(struct perf_event *event, int flags)
6802{
6803 event->hw.state = PERF_HES_STOPPED;
6804}
6805
6806/* Deref the hlist from the update side */
6807static inline struct swevent_hlist *
6808swevent_hlist_deref(struct swevent_htable *swhash)
6809{
6810 return rcu_dereference_protected(swhash->swevent_hlist,
6811 lockdep_is_held(&swhash->hlist_mutex));
6812}
6813
6814static void swevent_hlist_release(struct swevent_htable *swhash)
6815{
6816 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
6817
6818 if (!hlist)
6819 return;
6820
6821 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
6822 kfree_rcu(hlist, rcu_head);
6823}
6824
6825static void swevent_hlist_put_cpu(int cpu)
6826{
6827 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6828
6829 mutex_lock(&swhash->hlist_mutex);
6830
6831 if (!--swhash->hlist_refcount)
6832 swevent_hlist_release(swhash);
6833
6834 mutex_unlock(&swhash->hlist_mutex);
6835}
6836
6837static void swevent_hlist_put(void)
6838{
6839 int cpu;
6840
6841 for_each_possible_cpu(cpu)
6842 swevent_hlist_put_cpu(cpu);
6843}
6844
6845static int swevent_hlist_get_cpu(int cpu)
6846{
6847 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6848 int err = 0;
6849
6850 mutex_lock(&swhash->hlist_mutex);
6851 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
6852 struct swevent_hlist *hlist;
6853
6854 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
6855 if (!hlist) {
6856 err = -ENOMEM;
6857 goto exit;
6858 }
6859 rcu_assign_pointer(swhash->swevent_hlist, hlist);
6860 }
6861 swhash->hlist_refcount++;
6862exit:
6863 mutex_unlock(&swhash->hlist_mutex);
6864
6865 return err;
6866}
6867
6868static int swevent_hlist_get(void)
6869{
6870 int err, cpu, failed_cpu;
6871
6872 get_online_cpus();
6873 for_each_possible_cpu(cpu) {
6874 err = swevent_hlist_get_cpu(cpu);
6875 if (err) {
6876 failed_cpu = cpu;
6877 goto fail;
6878 }
6879 }
6880 put_online_cpus();
6881
6882 return 0;
6883fail:
6884 for_each_possible_cpu(cpu) {
6885 if (cpu == failed_cpu)
6886 break;
6887 swevent_hlist_put_cpu(cpu);
6888 }
6889
6890 put_online_cpus();
6891 return err;
6892}
6893
6894struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
6895
6896static void sw_perf_event_destroy(struct perf_event *event)
6897{
6898 u64 event_id = event->attr.config;
6899
6900 WARN_ON(event->parent);
6901
6902 static_key_slow_dec(&perf_swevent_enabled[event_id]);
6903 swevent_hlist_put();
6904}
6905
6906static int perf_swevent_init(struct perf_event *event)
6907{
6908 u64 event_id = event->attr.config;
6909
6910 if (event->attr.type != PERF_TYPE_SOFTWARE)
6911 return -ENOENT;
6912
6913 /*
6914 * no branch sampling for software events
6915 */
6916 if (has_branch_stack(event))
6917 return -EOPNOTSUPP;
6918
6919 switch (event_id) {
6920 case PERF_COUNT_SW_CPU_CLOCK:
6921 case PERF_COUNT_SW_TASK_CLOCK:
6922 return -ENOENT;
6923
6924 default:
6925 break;
6926 }
6927
6928 if (event_id >= PERF_COUNT_SW_MAX)
6929 return -ENOENT;
6930
6931 if (!event->parent) {
6932 int err;
6933
6934 err = swevent_hlist_get();
6935 if (err)
6936 return err;
6937
6938 static_key_slow_inc(&perf_swevent_enabled[event_id]);
6939 event->destroy = sw_perf_event_destroy;
6940 }
6941
6942 return 0;
6943}
6944
6945static struct pmu perf_swevent = {
6946 .task_ctx_nr = perf_sw_context,
6947
6948 .capabilities = PERF_PMU_CAP_NO_NMI,
6949
6950 .event_init = perf_swevent_init,
6951 .add = perf_swevent_add,
6952 .del = perf_swevent_del,
6953 .start = perf_swevent_start,
6954 .stop = perf_swevent_stop,
6955 .read = perf_swevent_read,
6956};
6957
6958#ifdef CONFIG_EVENT_TRACING
6959
6960static int perf_tp_filter_match(struct perf_event *event,
6961 struct perf_sample_data *data)
6962{
6963 void *record = data->raw->data;
6964
6965 /* only top level events have filters set */
6966 if (event->parent)
6967 event = event->parent;
6968
6969 if (likely(!event->filter) || filter_match_preds(event->filter, record))
6970 return 1;
6971 return 0;
6972}
6973
6974static int perf_tp_event_match(struct perf_event *event,
6975 struct perf_sample_data *data,
6976 struct pt_regs *regs)
6977{
6978 if (event->hw.state & PERF_HES_STOPPED)
6979 return 0;
6980 /*
6981 * All tracepoints are from kernel-space.
6982 */
6983 if (event->attr.exclude_kernel)
6984 return 0;
6985
6986 if (!perf_tp_filter_match(event, data))
6987 return 0;
6988
6989 return 1;
6990}
6991
6992void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
6993 struct pt_regs *regs, struct hlist_head *head, int rctx,
6994 struct task_struct *task)
6995{
6996 struct perf_sample_data data;
6997 struct perf_event *event;
6998
6999 struct perf_raw_record raw = {
7000 .size = entry_size,
7001 .data = record,
7002 };
7003
7004 perf_sample_data_init(&data, addr, 0);
7005 data.raw = &raw;
7006
7007 hlist_for_each_entry_rcu(event, head, hlist_entry) {
7008 if (perf_tp_event_match(event, &data, regs))
7009 perf_swevent_event(event, count, &data, regs);
7010 }
7011
7012 /*
7013 * If we got specified a target task, also iterate its context and
7014 * deliver this event there too.
7015 */
7016 if (task && task != current) {
7017 struct perf_event_context *ctx;
7018 struct trace_entry *entry = record;
7019
7020 rcu_read_lock();
7021 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
7022 if (!ctx)
7023 goto unlock;
7024
7025 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7026 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7027 continue;
7028 if (event->attr.config != entry->type)
7029 continue;
7030 if (perf_tp_event_match(event, &data, regs))
7031 perf_swevent_event(event, count, &data, regs);
7032 }
7033unlock:
7034 rcu_read_unlock();
7035 }
7036
7037 perf_swevent_put_recursion_context(rctx);
7038}
7039EXPORT_SYMBOL_GPL(perf_tp_event);
7040
7041static void tp_perf_event_destroy(struct perf_event *event)
7042{
7043 perf_trace_destroy(event);
7044}
7045
7046static int perf_tp_event_init(struct perf_event *event)
7047{
7048 int err;
7049
7050 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7051 return -ENOENT;
7052
7053 /*
7054 * no branch sampling for tracepoint events
7055 */
7056 if (has_branch_stack(event))
7057 return -EOPNOTSUPP;
7058
7059 err = perf_trace_init(event);
7060 if (err)
7061 return err;
7062
7063 event->destroy = tp_perf_event_destroy;
7064
7065 return 0;
7066}
7067
7068static struct pmu perf_tracepoint = {
7069 .task_ctx_nr = perf_sw_context,
7070
7071 .event_init = perf_tp_event_init,
7072 .add = perf_trace_add,
7073 .del = perf_trace_del,
7074 .start = perf_swevent_start,
7075 .stop = perf_swevent_stop,
7076 .read = perf_swevent_read,
7077};
7078
7079static inline void perf_tp_register(void)
7080{
7081 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
7082}
7083
7084static int perf_event_set_filter(struct perf_event *event, void __user *arg)
7085{
7086 char *filter_str;
7087 int ret;
7088
7089 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7090 return -EINVAL;
7091
7092 filter_str = strndup_user(arg, PAGE_SIZE);
7093 if (IS_ERR(filter_str))
7094 return PTR_ERR(filter_str);
7095
7096 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
7097
7098 kfree(filter_str);
7099 return ret;
7100}
7101
7102static void perf_event_free_filter(struct perf_event *event)
7103{
7104 ftrace_profile_free_filter(event);
7105}
7106
7107static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7108{
7109 struct bpf_prog *prog;
7110
7111 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7112 return -EINVAL;
7113
7114 if (event->tp_event->prog)
7115 return -EEXIST;
7116
7117 if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE))
7118 /* bpf programs can only be attached to u/kprobes */
7119 return -EINVAL;
7120
7121 prog = bpf_prog_get(prog_fd);
7122 if (IS_ERR(prog))
7123 return PTR_ERR(prog);
7124
7125 if (prog->type != BPF_PROG_TYPE_KPROBE) {
7126 /* valid fd, but invalid bpf program type */
7127 bpf_prog_put(prog);
7128 return -EINVAL;
7129 }
7130
7131 event->tp_event->prog = prog;
7132
7133 return 0;
7134}
7135
7136static void perf_event_free_bpf_prog(struct perf_event *event)
7137{
7138 struct bpf_prog *prog;
7139
7140 if (!event->tp_event)
7141 return;
7142
7143 prog = event->tp_event->prog;
7144 if (prog) {
7145 event->tp_event->prog = NULL;
7146 bpf_prog_put(prog);
7147 }
7148}
7149
7150#else
7151
7152static inline void perf_tp_register(void)
7153{
7154}
7155
7156static int perf_event_set_filter(struct perf_event *event, void __user *arg)
7157{
7158 return -ENOENT;
7159}
7160
7161static void perf_event_free_filter(struct perf_event *event)
7162{
7163}
7164
7165static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7166{
7167 return -ENOENT;
7168}
7169
7170static void perf_event_free_bpf_prog(struct perf_event *event)
7171{
7172}
7173#endif /* CONFIG_EVENT_TRACING */
7174
7175#ifdef CONFIG_HAVE_HW_BREAKPOINT
7176void perf_bp_event(struct perf_event *bp, void *data)
7177{
7178 struct perf_sample_data sample;
7179 struct pt_regs *regs = data;
7180
7181 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
7182
7183 if (!bp->hw.state && !perf_exclude_event(bp, regs))
7184 perf_swevent_event(bp, 1, &sample, regs);
7185}
7186#endif
7187
7188/*
7189 * hrtimer based swevent callback
7190 */
7191
7192static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
7193{
7194 enum hrtimer_restart ret = HRTIMER_RESTART;
7195 struct perf_sample_data data;
7196 struct pt_regs *regs;
7197 struct perf_event *event;
7198 u64 period;
7199
7200 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
7201
7202 if (event->state != PERF_EVENT_STATE_ACTIVE)
7203 return HRTIMER_NORESTART;
7204
7205 event->pmu->read(event);
7206
7207 perf_sample_data_init(&data, 0, event->hw.last_period);
7208 regs = get_irq_regs();
7209
7210 if (regs && !perf_exclude_event(event, regs)) {
7211 if (!(event->attr.exclude_idle && is_idle_task(current)))
7212 if (__perf_event_overflow(event, 1, &data, regs))
7213 ret = HRTIMER_NORESTART;
7214 }
7215
7216 period = max_t(u64, 10000, event->hw.sample_period);
7217 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
7218
7219 return ret;
7220}
7221
7222static void perf_swevent_start_hrtimer(struct perf_event *event)
7223{
7224 struct hw_perf_event *hwc = &event->hw;
7225 s64 period;
7226
7227 if (!is_sampling_event(event))
7228 return;
7229
7230 period = local64_read(&hwc->period_left);
7231 if (period) {
7232 if (period < 0)
7233 period = 10000;
7234
7235 local64_set(&hwc->period_left, 0);
7236 } else {
7237 period = max_t(u64, 10000, hwc->sample_period);
7238 }
7239 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
7240 HRTIMER_MODE_REL_PINNED);
7241}
7242
7243static void perf_swevent_cancel_hrtimer(struct perf_event *event)
7244{
7245 struct hw_perf_event *hwc = &event->hw;
7246
7247 if (is_sampling_event(event)) {
7248 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
7249 local64_set(&hwc->period_left, ktime_to_ns(remaining));
7250
7251 hrtimer_cancel(&hwc->hrtimer);
7252 }
7253}
7254
7255static void perf_swevent_init_hrtimer(struct perf_event *event)
7256{
7257 struct hw_perf_event *hwc = &event->hw;
7258
7259 if (!is_sampling_event(event))
7260 return;
7261
7262 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7263 hwc->hrtimer.function = perf_swevent_hrtimer;
7264
7265 /*
7266 * Since hrtimers have a fixed rate, we can do a static freq->period
7267 * mapping and avoid the whole period adjust feedback stuff.
7268 */
7269 if (event->attr.freq) {
7270 long freq = event->attr.sample_freq;
7271
7272 event->attr.sample_period = NSEC_PER_SEC / freq;
7273 hwc->sample_period = event->attr.sample_period;
7274 local64_set(&hwc->period_left, hwc->sample_period);
7275 hwc->last_period = hwc->sample_period;
7276 event->attr.freq = 0;
7277 }
7278}
7279
7280/*
7281 * Software event: cpu wall time clock
7282 */
7283
7284static void cpu_clock_event_update(struct perf_event *event)
7285{
7286 s64 prev;
7287 u64 now;
7288
7289 now = local_clock();
7290 prev = local64_xchg(&event->hw.prev_count, now);
7291 local64_add(now - prev, &event->count);
7292}
7293
7294static void cpu_clock_event_start(struct perf_event *event, int flags)
7295{
7296 local64_set(&event->hw.prev_count, local_clock());
7297 perf_swevent_start_hrtimer(event);
7298}
7299
7300static void cpu_clock_event_stop(struct perf_event *event, int flags)
7301{
7302 perf_swevent_cancel_hrtimer(event);
7303 cpu_clock_event_update(event);
7304}
7305
7306static int cpu_clock_event_add(struct perf_event *event, int flags)
7307{
7308 if (flags & PERF_EF_START)
7309 cpu_clock_event_start(event, flags);
7310 perf_event_update_userpage(event);
7311
7312 return 0;
7313}
7314
7315static void cpu_clock_event_del(struct perf_event *event, int flags)
7316{
7317 cpu_clock_event_stop(event, flags);
7318}
7319
7320static void cpu_clock_event_read(struct perf_event *event)
7321{
7322 cpu_clock_event_update(event);
7323}
7324
7325static int cpu_clock_event_init(struct perf_event *event)
7326{
7327 if (event->attr.type != PERF_TYPE_SOFTWARE)
7328 return -ENOENT;
7329
7330 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
7331 return -ENOENT;
7332
7333 /*
7334 * no branch sampling for software events
7335 */
7336 if (has_branch_stack(event))
7337 return -EOPNOTSUPP;
7338
7339 perf_swevent_init_hrtimer(event);
7340
7341 return 0;
7342}
7343
7344static struct pmu perf_cpu_clock = {
7345 .task_ctx_nr = perf_sw_context,
7346
7347 .capabilities = PERF_PMU_CAP_NO_NMI,
7348
7349 .event_init = cpu_clock_event_init,
7350 .add = cpu_clock_event_add,
7351 .del = cpu_clock_event_del,
7352 .start = cpu_clock_event_start,
7353 .stop = cpu_clock_event_stop,
7354 .read = cpu_clock_event_read,
7355};
7356
7357/*
7358 * Software event: task time clock
7359 */
7360
7361static void task_clock_event_update(struct perf_event *event, u64 now)
7362{
7363 u64 prev;
7364 s64 delta;
7365
7366 prev = local64_xchg(&event->hw.prev_count, now);
7367 delta = now - prev;
7368 local64_add(delta, &event->count);
7369}
7370
7371static void task_clock_event_start(struct perf_event *event, int flags)
7372{
7373 local64_set(&event->hw.prev_count, event->ctx->time);
7374 perf_swevent_start_hrtimer(event);
7375}
7376
7377static void task_clock_event_stop(struct perf_event *event, int flags)
7378{
7379 perf_swevent_cancel_hrtimer(event);
7380 task_clock_event_update(event, event->ctx->time);
7381}
7382
7383static int task_clock_event_add(struct perf_event *event, int flags)
7384{
7385 if (flags & PERF_EF_START)
7386 task_clock_event_start(event, flags);
7387 perf_event_update_userpage(event);
7388
7389 return 0;
7390}
7391
7392static void task_clock_event_del(struct perf_event *event, int flags)
7393{
7394 task_clock_event_stop(event, PERF_EF_UPDATE);
7395}
7396
7397static void task_clock_event_read(struct perf_event *event)
7398{
7399 u64 now = perf_clock();
7400 u64 delta = now - event->ctx->timestamp;
7401 u64 time = event->ctx->time + delta;
7402
7403 task_clock_event_update(event, time);
7404}
7405
7406static int task_clock_event_init(struct perf_event *event)
7407{
7408 if (event->attr.type != PERF_TYPE_SOFTWARE)
7409 return -ENOENT;
7410
7411 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
7412 return -ENOENT;
7413
7414 /*
7415 * no branch sampling for software events
7416 */
7417 if (has_branch_stack(event))
7418 return -EOPNOTSUPP;
7419
7420 perf_swevent_init_hrtimer(event);
7421
7422 return 0;
7423}
7424
7425static struct pmu perf_task_clock = {
7426 .task_ctx_nr = perf_sw_context,
7427
7428 .capabilities = PERF_PMU_CAP_NO_NMI,
7429
7430 .event_init = task_clock_event_init,
7431 .add = task_clock_event_add,
7432 .del = task_clock_event_del,
7433 .start = task_clock_event_start,
7434 .stop = task_clock_event_stop,
7435 .read = task_clock_event_read,
7436};
7437
7438static void perf_pmu_nop_void(struct pmu *pmu)
7439{
7440}
7441
7442static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
7443{
7444}
7445
7446static int perf_pmu_nop_int(struct pmu *pmu)
7447{
7448 return 0;
7449}
7450
7451static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
7452
7453static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
7454{
7455 __this_cpu_write(nop_txn_flags, flags);
7456
7457 if (flags & ~PERF_PMU_TXN_ADD)
7458 return;
7459
7460 perf_pmu_disable(pmu);
7461}
7462
7463static int perf_pmu_commit_txn(struct pmu *pmu)
7464{
7465 unsigned int flags = __this_cpu_read(nop_txn_flags);
7466
7467 __this_cpu_write(nop_txn_flags, 0);
7468
7469 if (flags & ~PERF_PMU_TXN_ADD)
7470 return 0;
7471
7472 perf_pmu_enable(pmu);
7473 return 0;
7474}
7475
7476static void perf_pmu_cancel_txn(struct pmu *pmu)
7477{
7478 unsigned int flags = __this_cpu_read(nop_txn_flags);
7479
7480 __this_cpu_write(nop_txn_flags, 0);
7481
7482 if (flags & ~PERF_PMU_TXN_ADD)
7483 return;
7484
7485 perf_pmu_enable(pmu);
7486}
7487
7488static int perf_event_idx_default(struct perf_event *event)
7489{
7490 return 0;
7491}
7492
7493/*
7494 * Ensures all contexts with the same task_ctx_nr have the same
7495 * pmu_cpu_context too.
7496 */
7497static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
7498{
7499 struct pmu *pmu;
7500
7501 if (ctxn < 0)
7502 return NULL;
7503
7504 list_for_each_entry(pmu, &pmus, entry) {
7505 if (pmu->task_ctx_nr == ctxn)
7506 return pmu->pmu_cpu_context;
7507 }
7508
7509 return NULL;
7510}
7511
7512static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
7513{
7514 int cpu;
7515
7516 for_each_possible_cpu(cpu) {
7517 struct perf_cpu_context *cpuctx;
7518
7519 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7520
7521 if (cpuctx->unique_pmu == old_pmu)
7522 cpuctx->unique_pmu = pmu;
7523 }
7524}
7525
7526static void free_pmu_context(struct pmu *pmu)
7527{
7528 struct pmu *i;
7529
7530 mutex_lock(&pmus_lock);
7531 /*
7532 * Like a real lame refcount.
7533 */
7534 list_for_each_entry(i, &pmus, entry) {
7535 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
7536 update_pmu_context(i, pmu);
7537 goto out;
7538 }
7539 }
7540
7541 free_percpu(pmu->pmu_cpu_context);
7542out:
7543 mutex_unlock(&pmus_lock);
7544}
7545static struct idr pmu_idr;
7546
7547static ssize_t
7548type_show(struct device *dev, struct device_attribute *attr, char *page)
7549{
7550 struct pmu *pmu = dev_get_drvdata(dev);
7551
7552 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
7553}
7554static DEVICE_ATTR_RO(type);
7555
7556static ssize_t
7557perf_event_mux_interval_ms_show(struct device *dev,
7558 struct device_attribute *attr,
7559 char *page)
7560{
7561 struct pmu *pmu = dev_get_drvdata(dev);
7562
7563 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
7564}
7565
7566static DEFINE_MUTEX(mux_interval_mutex);
7567
7568static ssize_t
7569perf_event_mux_interval_ms_store(struct device *dev,
7570 struct device_attribute *attr,
7571 const char *buf, size_t count)
7572{
7573 struct pmu *pmu = dev_get_drvdata(dev);
7574 int timer, cpu, ret;
7575
7576 ret = kstrtoint(buf, 0, &timer);
7577 if (ret)
7578 return ret;
7579
7580 if (timer < 1)
7581 return -EINVAL;
7582
7583 /* same value, noting to do */
7584 if (timer == pmu->hrtimer_interval_ms)
7585 return count;
7586
7587 mutex_lock(&mux_interval_mutex);
7588 pmu->hrtimer_interval_ms = timer;
7589
7590 /* update all cpuctx for this PMU */
7591 get_online_cpus();
7592 for_each_online_cpu(cpu) {
7593 struct perf_cpu_context *cpuctx;
7594 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7595 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
7596
7597 cpu_function_call(cpu,
7598 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
7599 }
7600 put_online_cpus();
7601 mutex_unlock(&mux_interval_mutex);
7602
7603 return count;
7604}
7605static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
7606
7607static struct attribute *pmu_dev_attrs[] = {
7608 &dev_attr_type.attr,
7609 &dev_attr_perf_event_mux_interval_ms.attr,
7610 NULL,
7611};
7612ATTRIBUTE_GROUPS(pmu_dev);
7613
7614static int pmu_bus_running;
7615static struct bus_type pmu_bus = {
7616 .name = "event_source",
7617 .dev_groups = pmu_dev_groups,
7618};
7619
7620static void pmu_dev_release(struct device *dev)
7621{
7622 kfree(dev);
7623}
7624
7625static int pmu_dev_alloc(struct pmu *pmu)
7626{
7627 int ret = -ENOMEM;
7628
7629 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
7630 if (!pmu->dev)
7631 goto out;
7632
7633 pmu->dev->groups = pmu->attr_groups;
7634 device_initialize(pmu->dev);
7635 ret = dev_set_name(pmu->dev, "%s", pmu->name);
7636 if (ret)
7637 goto free_dev;
7638
7639 dev_set_drvdata(pmu->dev, pmu);
7640 pmu->dev->bus = &pmu_bus;
7641 pmu->dev->release = pmu_dev_release;
7642 ret = device_add(pmu->dev);
7643 if (ret)
7644 goto free_dev;
7645
7646out:
7647 return ret;
7648
7649free_dev:
7650 put_device(pmu->dev);
7651 goto out;
7652}
7653
7654static struct lock_class_key cpuctx_mutex;
7655static struct lock_class_key cpuctx_lock;
7656
7657int perf_pmu_register(struct pmu *pmu, const char *name, int type)
7658{
7659 int cpu, ret;
7660
7661 mutex_lock(&pmus_lock);
7662 ret = -ENOMEM;
7663 pmu->pmu_disable_count = alloc_percpu(int);
7664 if (!pmu->pmu_disable_count)
7665 goto unlock;
7666
7667 pmu->type = -1;
7668 if (!name)
7669 goto skip_type;
7670 pmu->name = name;
7671
7672 if (type < 0) {
7673 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
7674 if (type < 0) {
7675 ret = type;
7676 goto free_pdc;
7677 }
7678 }
7679 pmu->type = type;
7680
7681 if (pmu_bus_running) {
7682 ret = pmu_dev_alloc(pmu);
7683 if (ret)
7684 goto free_idr;
7685 }
7686
7687skip_type:
7688 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
7689 if (pmu->pmu_cpu_context)
7690 goto got_cpu_context;
7691
7692 ret = -ENOMEM;
7693 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
7694 if (!pmu->pmu_cpu_context)
7695 goto free_dev;
7696
7697 for_each_possible_cpu(cpu) {
7698 struct perf_cpu_context *cpuctx;
7699
7700 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
7701 __perf_event_init_context(&cpuctx->ctx);
7702 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
7703 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
7704 cpuctx->ctx.pmu = pmu;
7705
7706 __perf_mux_hrtimer_init(cpuctx, cpu);
7707
7708 cpuctx->unique_pmu = pmu;
7709 }
7710
7711got_cpu_context:
7712 if (!pmu->start_txn) {
7713 if (pmu->pmu_enable) {
7714 /*
7715 * If we have pmu_enable/pmu_disable calls, install
7716 * transaction stubs that use that to try and batch
7717 * hardware accesses.
7718 */
7719 pmu->start_txn = perf_pmu_start_txn;
7720 pmu->commit_txn = perf_pmu_commit_txn;
7721 pmu->cancel_txn = perf_pmu_cancel_txn;
7722 } else {
7723 pmu->start_txn = perf_pmu_nop_txn;
7724 pmu->commit_txn = perf_pmu_nop_int;
7725 pmu->cancel_txn = perf_pmu_nop_void;
7726 }
7727 }
7728
7729 if (!pmu->pmu_enable) {
7730 pmu->pmu_enable = perf_pmu_nop_void;
7731 pmu->pmu_disable = perf_pmu_nop_void;
7732 }
7733
7734 if (!pmu->event_idx)
7735 pmu->event_idx = perf_event_idx_default;
7736
7737 list_add_rcu(&pmu->entry, &pmus);
7738 atomic_set(&pmu->exclusive_cnt, 0);
7739 ret = 0;
7740unlock:
7741 mutex_unlock(&pmus_lock);
7742
7743 return ret;
7744
7745free_dev:
7746 device_del(pmu->dev);
7747 put_device(pmu->dev);
7748
7749free_idr:
7750 if (pmu->type >= PERF_TYPE_MAX)
7751 idr_remove(&pmu_idr, pmu->type);
7752
7753free_pdc:
7754 free_percpu(pmu->pmu_disable_count);
7755 goto unlock;
7756}
7757EXPORT_SYMBOL_GPL(perf_pmu_register);
7758
7759void perf_pmu_unregister(struct pmu *pmu)
7760{
7761 mutex_lock(&pmus_lock);
7762 list_del_rcu(&pmu->entry);
7763 mutex_unlock(&pmus_lock);
7764
7765 /*
7766 * We dereference the pmu list under both SRCU and regular RCU, so
7767 * synchronize against both of those.
7768 */
7769 synchronize_srcu(&pmus_srcu);
7770 synchronize_rcu();
7771
7772 free_percpu(pmu->pmu_disable_count);
7773 if (pmu->type >= PERF_TYPE_MAX)
7774 idr_remove(&pmu_idr, pmu->type);
7775 device_del(pmu->dev);
7776 put_device(pmu->dev);
7777 free_pmu_context(pmu);
7778}
7779EXPORT_SYMBOL_GPL(perf_pmu_unregister);
7780
7781static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
7782{
7783 struct perf_event_context *ctx = NULL;
7784 int ret;
7785
7786 if (!try_module_get(pmu->module))
7787 return -ENODEV;
7788
7789 if (event->group_leader != event) {
7790 /*
7791 * This ctx->mutex can nest when we're called through
7792 * inheritance. See the perf_event_ctx_lock_nested() comment.
7793 */
7794 ctx = perf_event_ctx_lock_nested(event->group_leader,
7795 SINGLE_DEPTH_NESTING);
7796 BUG_ON(!ctx);
7797 }
7798
7799 event->pmu = pmu;
7800 ret = pmu->event_init(event);
7801
7802 if (ctx)
7803 perf_event_ctx_unlock(event->group_leader, ctx);
7804
7805 if (ret)
7806 module_put(pmu->module);
7807
7808 return ret;
7809}
7810
7811static struct pmu *perf_init_event(struct perf_event *event)
7812{
7813 struct pmu *pmu = NULL;
7814 int idx;
7815 int ret;
7816
7817 idx = srcu_read_lock(&pmus_srcu);
7818
7819 rcu_read_lock();
7820 pmu = idr_find(&pmu_idr, event->attr.type);
7821 rcu_read_unlock();
7822 if (pmu) {
7823 ret = perf_try_init_event(pmu, event);
7824 if (ret)
7825 pmu = ERR_PTR(ret);
7826 goto unlock;
7827 }
7828
7829 list_for_each_entry_rcu(pmu, &pmus, entry) {
7830 ret = perf_try_init_event(pmu, event);
7831 if (!ret)
7832 goto unlock;
7833
7834 if (ret != -ENOENT) {
7835 pmu = ERR_PTR(ret);
7836 goto unlock;
7837 }
7838 }
7839 pmu = ERR_PTR(-ENOENT);
7840unlock:
7841 srcu_read_unlock(&pmus_srcu, idx);
7842
7843 return pmu;
7844}
7845
7846static void account_event_cpu(struct perf_event *event, int cpu)
7847{
7848 if (event->parent)
7849 return;
7850
7851 if (is_cgroup_event(event))
7852 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
7853}
7854
7855/* Freq events need the tick to stay alive (see perf_event_task_tick). */
7856static void account_freq_event_nohz(void)
7857{
7858#ifdef CONFIG_NO_HZ_FULL
7859 /* Lock so we don't race with concurrent unaccount */
7860 spin_lock(&nr_freq_lock);
7861 if (atomic_inc_return(&nr_freq_events) == 1)
7862 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
7863 spin_unlock(&nr_freq_lock);
7864#endif
7865}
7866
7867static void account_freq_event(void)
7868{
7869 if (tick_nohz_full_enabled())
7870 account_freq_event_nohz();
7871 else
7872 atomic_inc(&nr_freq_events);
7873}
7874
7875
7876static void account_event(struct perf_event *event)
7877{
7878 bool inc = false;
7879
7880 if (event->parent)
7881 return;
7882
7883 if (event->attach_state & PERF_ATTACH_TASK)
7884 inc = true;
7885 if (event->attr.mmap || event->attr.mmap_data)
7886 atomic_inc(&nr_mmap_events);
7887 if (event->attr.comm)
7888 atomic_inc(&nr_comm_events);
7889 if (event->attr.task)
7890 atomic_inc(&nr_task_events);
7891 if (event->attr.freq)
7892 account_freq_event();
7893 if (event->attr.context_switch) {
7894 atomic_inc(&nr_switch_events);
7895 inc = true;
7896 }
7897 if (has_branch_stack(event))
7898 inc = true;
7899 if (is_cgroup_event(event))
7900 inc = true;
7901
7902 if (inc) {
7903 if (atomic_inc_not_zero(&perf_sched_count))
7904 goto enabled;
7905
7906 mutex_lock(&perf_sched_mutex);
7907 if (!atomic_read(&perf_sched_count)) {
7908 static_branch_enable(&perf_sched_events);
7909 /*
7910 * Guarantee that all CPUs observe they key change and
7911 * call the perf scheduling hooks before proceeding to
7912 * install events that need them.
7913 */
7914 synchronize_sched();
7915 }
7916 /*
7917 * Now that we have waited for the sync_sched(), allow further
7918 * increments to by-pass the mutex.
7919 */
7920 atomic_inc(&perf_sched_count);
7921 mutex_unlock(&perf_sched_mutex);
7922 }
7923enabled:
7924
7925 account_event_cpu(event, event->cpu);
7926}
7927
7928/*
7929 * Allocate and initialize a event structure
7930 */
7931static struct perf_event *
7932perf_event_alloc(struct perf_event_attr *attr, int cpu,
7933 struct task_struct *task,
7934 struct perf_event *group_leader,
7935 struct perf_event *parent_event,
7936 perf_overflow_handler_t overflow_handler,
7937 void *context, int cgroup_fd)
7938{
7939 struct pmu *pmu;
7940 struct perf_event *event;
7941 struct hw_perf_event *hwc;
7942 long err = -EINVAL;
7943
7944 if ((unsigned)cpu >= nr_cpu_ids) {
7945 if (!task || cpu != -1)
7946 return ERR_PTR(-EINVAL);
7947 }
7948
7949 event = kzalloc(sizeof(*event), GFP_KERNEL);
7950 if (!event)
7951 return ERR_PTR(-ENOMEM);
7952
7953 /*
7954 * Single events are their own group leaders, with an
7955 * empty sibling list:
7956 */
7957 if (!group_leader)
7958 group_leader = event;
7959
7960 mutex_init(&event->child_mutex);
7961 INIT_LIST_HEAD(&event->child_list);
7962
7963 INIT_LIST_HEAD(&event->group_entry);
7964 INIT_LIST_HEAD(&event->event_entry);
7965 INIT_LIST_HEAD(&event->sibling_list);
7966 INIT_LIST_HEAD(&event->rb_entry);
7967 INIT_LIST_HEAD(&event->active_entry);
7968 INIT_HLIST_NODE(&event->hlist_entry);
7969
7970
7971 init_waitqueue_head(&event->waitq);
7972 init_irq_work(&event->pending, perf_pending_event);
7973
7974 mutex_init(&event->mmap_mutex);
7975
7976 atomic_long_set(&event->refcount, 1);
7977 event->cpu = cpu;
7978 event->attr = *attr;
7979 event->group_leader = group_leader;
7980 event->pmu = NULL;
7981 event->oncpu = -1;
7982
7983 event->parent = parent_event;
7984
7985 event->ns = get_pid_ns(task_active_pid_ns(current));
7986 event->id = atomic64_inc_return(&perf_event_id);
7987
7988 event->state = PERF_EVENT_STATE_INACTIVE;
7989
7990 if (task) {
7991 event->attach_state = PERF_ATTACH_TASK;
7992 /*
7993 * XXX pmu::event_init needs to know what task to account to
7994 * and we cannot use the ctx information because we need the
7995 * pmu before we get a ctx.
7996 */
7997 event->hw.target = task;
7998 }
7999
8000 event->clock = &local_clock;
8001 if (parent_event)
8002 event->clock = parent_event->clock;
8003
8004 if (!overflow_handler && parent_event) {
8005 overflow_handler = parent_event->overflow_handler;
8006 context = parent_event->overflow_handler_context;
8007 }
8008
8009 event->overflow_handler = overflow_handler;
8010 event->overflow_handler_context = context;
8011
8012 perf_event__state_init(event);
8013
8014 pmu = NULL;
8015
8016 hwc = &event->hw;
8017 hwc->sample_period = attr->sample_period;
8018 if (attr->freq && attr->sample_freq)
8019 hwc->sample_period = 1;
8020 hwc->last_period = hwc->sample_period;
8021
8022 local64_set(&hwc->period_left, hwc->sample_period);
8023
8024 /*
8025 * we currently do not support PERF_FORMAT_GROUP on inherited events
8026 */
8027 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
8028 goto err_ns;
8029
8030 if (!has_branch_stack(event))
8031 event->attr.branch_sample_type = 0;
8032
8033 if (cgroup_fd != -1) {
8034 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
8035 if (err)
8036 goto err_ns;
8037 }
8038
8039 pmu = perf_init_event(event);
8040 if (!pmu)
8041 goto err_ns;
8042 else if (IS_ERR(pmu)) {
8043 err = PTR_ERR(pmu);
8044 goto err_ns;
8045 }
8046
8047 err = exclusive_event_init(event);
8048 if (err)
8049 goto err_pmu;
8050
8051 if (!event->parent) {
8052 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
8053 err = get_callchain_buffers();
8054 if (err)
8055 goto err_per_task;
8056 }
8057 }
8058
8059 /* symmetric to unaccount_event() in _free_event() */
8060 account_event(event);
8061
8062 return event;
8063
8064err_per_task:
8065 exclusive_event_destroy(event);
8066
8067err_pmu:
8068 if (event->destroy)
8069 event->destroy(event);
8070 module_put(pmu->module);
8071err_ns:
8072 if (is_cgroup_event(event))
8073 perf_detach_cgroup(event);
8074 if (event->ns)
8075 put_pid_ns(event->ns);
8076 kfree(event);
8077
8078 return ERR_PTR(err);
8079}
8080
8081static int perf_copy_attr(struct perf_event_attr __user *uattr,
8082 struct perf_event_attr *attr)
8083{
8084 u32 size;
8085 int ret;
8086
8087 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
8088 return -EFAULT;
8089
8090 /*
8091 * zero the full structure, so that a short copy will be nice.
8092 */
8093 memset(attr, 0, sizeof(*attr));
8094
8095 ret = get_user(size, &uattr->size);
8096 if (ret)
8097 return ret;
8098
8099 if (size > PAGE_SIZE) /* silly large */
8100 goto err_size;
8101
8102 if (!size) /* abi compat */
8103 size = PERF_ATTR_SIZE_VER0;
8104
8105 if (size < PERF_ATTR_SIZE_VER0)
8106 goto err_size;
8107
8108 /*
8109 * If we're handed a bigger struct than we know of,
8110 * ensure all the unknown bits are 0 - i.e. new
8111 * user-space does not rely on any kernel feature
8112 * extensions we dont know about yet.
8113 */
8114 if (size > sizeof(*attr)) {
8115 unsigned char __user *addr;
8116 unsigned char __user *end;
8117 unsigned char val;
8118
8119 addr = (void __user *)uattr + sizeof(*attr);
8120 end = (void __user *)uattr + size;
8121
8122 for (; addr < end; addr++) {
8123 ret = get_user(val, addr);
8124 if (ret)
8125 return ret;
8126 if (val)
8127 goto err_size;
8128 }
8129 size = sizeof(*attr);
8130 }
8131
8132 ret = copy_from_user(attr, uattr, size);
8133 if (ret)
8134 return -EFAULT;
8135
8136 if (attr->__reserved_1)
8137 return -EINVAL;
8138
8139 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
8140 return -EINVAL;
8141
8142 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
8143 return -EINVAL;
8144
8145 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
8146 u64 mask = attr->branch_sample_type;
8147
8148 /* only using defined bits */
8149 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
8150 return -EINVAL;
8151
8152 /* at least one branch bit must be set */
8153 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
8154 return -EINVAL;
8155
8156 /* propagate priv level, when not set for branch */
8157 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
8158
8159 /* exclude_kernel checked on syscall entry */
8160 if (!attr->exclude_kernel)
8161 mask |= PERF_SAMPLE_BRANCH_KERNEL;
8162
8163 if (!attr->exclude_user)
8164 mask |= PERF_SAMPLE_BRANCH_USER;
8165
8166 if (!attr->exclude_hv)
8167 mask |= PERF_SAMPLE_BRANCH_HV;
8168 /*
8169 * adjust user setting (for HW filter setup)
8170 */
8171 attr->branch_sample_type = mask;
8172 }
8173 /* privileged levels capture (kernel, hv): check permissions */
8174 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
8175 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
8176 return -EACCES;
8177 }
8178
8179 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
8180 ret = perf_reg_validate(attr->sample_regs_user);
8181 if (ret)
8182 return ret;
8183 }
8184
8185 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
8186 if (!arch_perf_have_user_stack_dump())
8187 return -ENOSYS;
8188
8189 /*
8190 * We have __u32 type for the size, but so far
8191 * we can only use __u16 as maximum due to the
8192 * __u16 sample size limit.
8193 */
8194 if (attr->sample_stack_user >= USHRT_MAX)
8195 ret = -EINVAL;
8196 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
8197 ret = -EINVAL;
8198 }
8199
8200 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
8201 ret = perf_reg_validate(attr->sample_regs_intr);
8202out:
8203 return ret;
8204
8205err_size:
8206 put_user(sizeof(*attr), &uattr->size);
8207 ret = -E2BIG;
8208 goto out;
8209}
8210
8211static int
8212perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
8213{
8214 struct ring_buffer *rb = NULL;
8215 int ret = -EINVAL;
8216
8217 if (!output_event)
8218 goto set;
8219
8220 /* don't allow circular references */
8221 if (event == output_event)
8222 goto out;
8223
8224 /*
8225 * Don't allow cross-cpu buffers
8226 */
8227 if (output_event->cpu != event->cpu)
8228 goto out;
8229
8230 /*
8231 * If its not a per-cpu rb, it must be the same task.
8232 */
8233 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
8234 goto out;
8235
8236 /*
8237 * Mixing clocks in the same buffer is trouble you don't need.
8238 */
8239 if (output_event->clock != event->clock)
8240 goto out;
8241
8242 /*
8243 * If both events generate aux data, they must be on the same PMU
8244 */
8245 if (has_aux(event) && has_aux(output_event) &&
8246 event->pmu != output_event->pmu)
8247 goto out;
8248
8249set:
8250 mutex_lock(&event->mmap_mutex);
8251 /* Can't redirect output if we've got an active mmap() */
8252 if (atomic_read(&event->mmap_count))
8253 goto unlock;
8254
8255 if (output_event) {
8256 /* get the rb we want to redirect to */
8257 rb = ring_buffer_get(output_event);
8258 if (!rb)
8259 goto unlock;
8260 }
8261
8262 ring_buffer_attach(event, rb);
8263
8264 ret = 0;
8265unlock:
8266 mutex_unlock(&event->mmap_mutex);
8267
8268out:
8269 return ret;
8270}
8271
8272static void mutex_lock_double(struct mutex *a, struct mutex *b)
8273{
8274 if (b < a)
8275 swap(a, b);
8276
8277 mutex_lock(a);
8278 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
8279}
8280
8281static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
8282{
8283 bool nmi_safe = false;
8284
8285 switch (clk_id) {
8286 case CLOCK_MONOTONIC:
8287 event->clock = &ktime_get_mono_fast_ns;
8288 nmi_safe = true;
8289 break;
8290
8291 case CLOCK_MONOTONIC_RAW:
8292 event->clock = &ktime_get_raw_fast_ns;
8293 nmi_safe = true;
8294 break;
8295
8296 case CLOCK_REALTIME:
8297 event->clock = &ktime_get_real_ns;
8298 break;
8299
8300 case CLOCK_BOOTTIME:
8301 event->clock = &ktime_get_boot_ns;
8302 break;
8303
8304 case CLOCK_TAI:
8305 event->clock = &ktime_get_tai_ns;
8306 break;
8307
8308 default:
8309 return -EINVAL;
8310 }
8311
8312 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
8313 return -EINVAL;
8314
8315 return 0;
8316}
8317
8318/**
8319 * sys_perf_event_open - open a performance event, associate it to a task/cpu
8320 *
8321 * @attr_uptr: event_id type attributes for monitoring/sampling
8322 * @pid: target pid
8323 * @cpu: target cpu
8324 * @group_fd: group leader event fd
8325 */
8326SYSCALL_DEFINE5(perf_event_open,
8327 struct perf_event_attr __user *, attr_uptr,
8328 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
8329{
8330 struct perf_event *group_leader = NULL, *output_event = NULL;
8331 struct perf_event *event, *sibling;
8332 struct perf_event_attr attr;
8333 struct perf_event_context *ctx, *uninitialized_var(gctx);
8334 struct file *event_file = NULL;
8335 struct fd group = {NULL, 0};
8336 struct task_struct *task = NULL;
8337 struct pmu *pmu;
8338 int event_fd;
8339 int move_group = 0;
8340 int err;
8341 int f_flags = O_RDWR;
8342 int cgroup_fd = -1;
8343
8344 /* for future expandability... */
8345 if (flags & ~PERF_FLAG_ALL)
8346 return -EINVAL;
8347
8348 err = perf_copy_attr(attr_uptr, &attr);
8349 if (err)
8350 return err;
8351
8352 if (!attr.exclude_kernel) {
8353 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
8354 return -EACCES;
8355 }
8356
8357 if (attr.freq) {
8358 if (attr.sample_freq > sysctl_perf_event_sample_rate)
8359 return -EINVAL;
8360 } else {
8361 if (attr.sample_period & (1ULL << 63))
8362 return -EINVAL;
8363 }
8364
8365 /*
8366 * In cgroup mode, the pid argument is used to pass the fd
8367 * opened to the cgroup directory in cgroupfs. The cpu argument
8368 * designates the cpu on which to monitor threads from that
8369 * cgroup.
8370 */
8371 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
8372 return -EINVAL;
8373
8374 if (flags & PERF_FLAG_FD_CLOEXEC)
8375 f_flags |= O_CLOEXEC;
8376
8377 event_fd = get_unused_fd_flags(f_flags);
8378 if (event_fd < 0)
8379 return event_fd;
8380
8381 if (group_fd != -1) {
8382 err = perf_fget_light(group_fd, &group);
8383 if (err)
8384 goto err_fd;
8385 group_leader = group.file->private_data;
8386 if (flags & PERF_FLAG_FD_OUTPUT)
8387 output_event = group_leader;
8388 if (flags & PERF_FLAG_FD_NO_GROUP)
8389 group_leader = NULL;
8390 }
8391
8392 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
8393 task = find_lively_task_by_vpid(pid);
8394 if (IS_ERR(task)) {
8395 err = PTR_ERR(task);
8396 goto err_group_fd;
8397 }
8398 }
8399
8400 if (task && group_leader &&
8401 group_leader->attr.inherit != attr.inherit) {
8402 err = -EINVAL;
8403 goto err_task;
8404 }
8405
8406 get_online_cpus();
8407
8408 if (task) {
8409 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
8410 if (err)
8411 goto err_cpus;
8412
8413 /*
8414 * Reuse ptrace permission checks for now.
8415 *
8416 * We must hold cred_guard_mutex across this and any potential
8417 * perf_install_in_context() call for this new event to
8418 * serialize against exec() altering our credentials (and the
8419 * perf_event_exit_task() that could imply).
8420 */
8421 err = -EACCES;
8422 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
8423 goto err_cred;
8424 }
8425
8426 if (flags & PERF_FLAG_PID_CGROUP)
8427 cgroup_fd = pid;
8428
8429 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
8430 NULL, NULL, cgroup_fd);
8431 if (IS_ERR(event)) {
8432 err = PTR_ERR(event);
8433 goto err_cred;
8434 }
8435
8436 if (is_sampling_event(event)) {
8437 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
8438 err = -ENOTSUPP;
8439 goto err_alloc;
8440 }
8441 }
8442
8443 /*
8444 * Special case software events and allow them to be part of
8445 * any hardware group.
8446 */
8447 pmu = event->pmu;
8448
8449 if (attr.use_clockid) {
8450 err = perf_event_set_clock(event, attr.clockid);
8451 if (err)
8452 goto err_alloc;
8453 }
8454
8455 if (group_leader &&
8456 (is_software_event(event) != is_software_event(group_leader))) {
8457 if (is_software_event(event)) {
8458 /*
8459 * If event and group_leader are not both a software
8460 * event, and event is, then group leader is not.
8461 *
8462 * Allow the addition of software events to !software
8463 * groups, this is safe because software events never
8464 * fail to schedule.
8465 */
8466 pmu = group_leader->pmu;
8467 } else if (is_software_event(group_leader) &&
8468 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
8469 /*
8470 * In case the group is a pure software group, and we
8471 * try to add a hardware event, move the whole group to
8472 * the hardware context.
8473 */
8474 move_group = 1;
8475 }
8476 }
8477
8478 /*
8479 * Get the target context (task or percpu):
8480 */
8481 ctx = find_get_context(pmu, task, event);
8482 if (IS_ERR(ctx)) {
8483 err = PTR_ERR(ctx);
8484 goto err_alloc;
8485 }
8486
8487 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
8488 err = -EBUSY;
8489 goto err_context;
8490 }
8491
8492 /*
8493 * Look up the group leader (we will attach this event to it):
8494 */
8495 if (group_leader) {
8496 err = -EINVAL;
8497
8498 /*
8499 * Do not allow a recursive hierarchy (this new sibling
8500 * becoming part of another group-sibling):
8501 */
8502 if (group_leader->group_leader != group_leader)
8503 goto err_context;
8504
8505 /* All events in a group should have the same clock */
8506 if (group_leader->clock != event->clock)
8507 goto err_context;
8508
8509 /*
8510 * Do not allow to attach to a group in a different
8511 * task or CPU context:
8512 */
8513 if (move_group) {
8514 /*
8515 * Make sure we're both on the same task, or both
8516 * per-cpu events.
8517 */
8518 if (group_leader->ctx->task != ctx->task)
8519 goto err_context;
8520
8521 /*
8522 * Make sure we're both events for the same CPU;
8523 * grouping events for different CPUs is broken; since
8524 * you can never concurrently schedule them anyhow.
8525 */
8526 if (group_leader->cpu != event->cpu)
8527 goto err_context;
8528 } else {
8529 if (group_leader->ctx != ctx)
8530 goto err_context;
8531 }
8532
8533 /*
8534 * Only a group leader can be exclusive or pinned
8535 */
8536 if (attr.exclusive || attr.pinned)
8537 goto err_context;
8538 }
8539
8540 if (output_event) {
8541 err = perf_event_set_output(event, output_event);
8542 if (err)
8543 goto err_context;
8544 }
8545
8546 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
8547 f_flags);
8548 if (IS_ERR(event_file)) {
8549 err = PTR_ERR(event_file);
8550 event_file = NULL;
8551 goto err_context;
8552 }
8553
8554 if (move_group) {
8555 gctx = group_leader->ctx;
8556 mutex_lock_double(&gctx->mutex, &ctx->mutex);
8557 if (gctx->task == TASK_TOMBSTONE) {
8558 err = -ESRCH;
8559 goto err_locked;
8560 }
8561 } else {
8562 mutex_lock(&ctx->mutex);
8563 }
8564
8565 if (ctx->task == TASK_TOMBSTONE) {
8566 err = -ESRCH;
8567 goto err_locked;
8568 }
8569
8570 if (!perf_event_validate_size(event)) {
8571 err = -E2BIG;
8572 goto err_locked;
8573 }
8574
8575 /*
8576 * Must be under the same ctx::mutex as perf_install_in_context(),
8577 * because we need to serialize with concurrent event creation.
8578 */
8579 if (!exclusive_event_installable(event, ctx)) {
8580 /* exclusive and group stuff are assumed mutually exclusive */
8581 WARN_ON_ONCE(move_group);
8582
8583 err = -EBUSY;
8584 goto err_locked;
8585 }
8586
8587 WARN_ON_ONCE(ctx->parent_ctx);
8588
8589 /*
8590 * This is the point on no return; we cannot fail hereafter. This is
8591 * where we start modifying current state.
8592 */
8593
8594 if (move_group) {
8595 /*
8596 * See perf_event_ctx_lock() for comments on the details
8597 * of swizzling perf_event::ctx.
8598 */
8599 perf_remove_from_context(group_leader, 0);
8600
8601 list_for_each_entry(sibling, &group_leader->sibling_list,
8602 group_entry) {
8603 perf_remove_from_context(sibling, 0);
8604 put_ctx(gctx);
8605 }
8606
8607 /*
8608 * Wait for everybody to stop referencing the events through
8609 * the old lists, before installing it on new lists.
8610 */
8611 synchronize_rcu();
8612
8613 /*
8614 * Install the group siblings before the group leader.
8615 *
8616 * Because a group leader will try and install the entire group
8617 * (through the sibling list, which is still in-tact), we can
8618 * end up with siblings installed in the wrong context.
8619 *
8620 * By installing siblings first we NO-OP because they're not
8621 * reachable through the group lists.
8622 */
8623 list_for_each_entry(sibling, &group_leader->sibling_list,
8624 group_entry) {
8625 perf_event__state_init(sibling);
8626 perf_install_in_context(ctx, sibling, sibling->cpu);
8627 get_ctx(ctx);
8628 }
8629
8630 /*
8631 * Removing from the context ends up with disabled
8632 * event. What we want here is event in the initial
8633 * startup state, ready to be add into new context.
8634 */
8635 perf_event__state_init(group_leader);
8636 perf_install_in_context(ctx, group_leader, group_leader->cpu);
8637 get_ctx(ctx);
8638
8639 /*
8640 * Now that all events are installed in @ctx, nothing
8641 * references @gctx anymore, so drop the last reference we have
8642 * on it.
8643 */
8644 put_ctx(gctx);
8645 }
8646
8647 /*
8648 * Precalculate sample_data sizes; do while holding ctx::mutex such
8649 * that we're serialized against further additions and before
8650 * perf_install_in_context() which is the point the event is active and
8651 * can use these values.
8652 */
8653 perf_event__header_size(event);
8654 perf_event__id_header_size(event);
8655
8656 event->owner = current;
8657
8658 perf_install_in_context(ctx, event, event->cpu);
8659 perf_unpin_context(ctx);
8660
8661 if (move_group)
8662 mutex_unlock(&gctx->mutex);
8663 mutex_unlock(&ctx->mutex);
8664
8665 if (task) {
8666 mutex_unlock(&task->signal->cred_guard_mutex);
8667 put_task_struct(task);
8668 }
8669
8670 put_online_cpus();
8671
8672 mutex_lock(¤t->perf_event_mutex);
8673 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
8674 mutex_unlock(¤t->perf_event_mutex);
8675
8676 /*
8677 * Drop the reference on the group_event after placing the
8678 * new event on the sibling_list. This ensures destruction
8679 * of the group leader will find the pointer to itself in
8680 * perf_group_detach().
8681 */
8682 fdput(group);
8683 fd_install(event_fd, event_file);
8684 return event_fd;
8685
8686err_locked:
8687 if (move_group)
8688 mutex_unlock(&gctx->mutex);
8689 mutex_unlock(&ctx->mutex);
8690/* err_file: */
8691 fput(event_file);
8692err_context:
8693 perf_unpin_context(ctx);
8694 put_ctx(ctx);
8695err_alloc:
8696 /*
8697 * If event_file is set, the fput() above will have called ->release()
8698 * and that will take care of freeing the event.
8699 */
8700 if (!event_file)
8701 free_event(event);
8702err_cred:
8703 if (task)
8704 mutex_unlock(&task->signal->cred_guard_mutex);
8705err_cpus:
8706 put_online_cpus();
8707err_task:
8708 if (task)
8709 put_task_struct(task);
8710err_group_fd:
8711 fdput(group);
8712err_fd:
8713 put_unused_fd(event_fd);
8714 return err;
8715}
8716
8717/**
8718 * perf_event_create_kernel_counter
8719 *
8720 * @attr: attributes of the counter to create
8721 * @cpu: cpu in which the counter is bound
8722 * @task: task to profile (NULL for percpu)
8723 */
8724struct perf_event *
8725perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8726 struct task_struct *task,
8727 perf_overflow_handler_t overflow_handler,
8728 void *context)
8729{
8730 struct perf_event_context *ctx;
8731 struct perf_event *event;
8732 int err;
8733
8734 /*
8735 * Get the target context (task or percpu):
8736 */
8737
8738 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
8739 overflow_handler, context, -1);
8740 if (IS_ERR(event)) {
8741 err = PTR_ERR(event);
8742 goto err;
8743 }
8744
8745 /* Mark owner so we could distinguish it from user events. */
8746 event->owner = TASK_TOMBSTONE;
8747
8748 ctx = find_get_context(event->pmu, task, event);
8749 if (IS_ERR(ctx)) {
8750 err = PTR_ERR(ctx);
8751 goto err_free;
8752 }
8753
8754 WARN_ON_ONCE(ctx->parent_ctx);
8755 mutex_lock(&ctx->mutex);
8756 if (ctx->task == TASK_TOMBSTONE) {
8757 err = -ESRCH;
8758 goto err_unlock;
8759 }
8760
8761 if (!exclusive_event_installable(event, ctx)) {
8762 err = -EBUSY;
8763 goto err_unlock;
8764 }
8765
8766 perf_install_in_context(ctx, event, cpu);
8767 perf_unpin_context(ctx);
8768 mutex_unlock(&ctx->mutex);
8769
8770 return event;
8771
8772err_unlock:
8773 mutex_unlock(&ctx->mutex);
8774 perf_unpin_context(ctx);
8775 put_ctx(ctx);
8776err_free:
8777 free_event(event);
8778err:
8779 return ERR_PTR(err);
8780}
8781EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
8782
8783void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
8784{
8785 struct perf_event_context *src_ctx;
8786 struct perf_event_context *dst_ctx;
8787 struct perf_event *event, *tmp;
8788 LIST_HEAD(events);
8789
8790 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
8791 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
8792
8793 /*
8794 * See perf_event_ctx_lock() for comments on the details
8795 * of swizzling perf_event::ctx.
8796 */
8797 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
8798 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
8799 event_entry) {
8800 perf_remove_from_context(event, 0);
8801 unaccount_event_cpu(event, src_cpu);
8802 put_ctx(src_ctx);
8803 list_add(&event->migrate_entry, &events);
8804 }
8805
8806 /*
8807 * Wait for the events to quiesce before re-instating them.
8808 */
8809 synchronize_rcu();
8810
8811 /*
8812 * Re-instate events in 2 passes.
8813 *
8814 * Skip over group leaders and only install siblings on this first
8815 * pass, siblings will not get enabled without a leader, however a
8816 * leader will enable its siblings, even if those are still on the old
8817 * context.
8818 */
8819 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
8820 if (event->group_leader == event)
8821 continue;
8822
8823 list_del(&event->migrate_entry);
8824 if (event->state >= PERF_EVENT_STATE_OFF)
8825 event->state = PERF_EVENT_STATE_INACTIVE;
8826 account_event_cpu(event, dst_cpu);
8827 perf_install_in_context(dst_ctx, event, dst_cpu);
8828 get_ctx(dst_ctx);
8829 }
8830
8831 /*
8832 * Once all the siblings are setup properly, install the group leaders
8833 * to make it go.
8834 */
8835 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
8836 list_del(&event->migrate_entry);
8837 if (event->state >= PERF_EVENT_STATE_OFF)
8838 event->state = PERF_EVENT_STATE_INACTIVE;
8839 account_event_cpu(event, dst_cpu);
8840 perf_install_in_context(dst_ctx, event, dst_cpu);
8841 get_ctx(dst_ctx);
8842 }
8843 mutex_unlock(&dst_ctx->mutex);
8844 mutex_unlock(&src_ctx->mutex);
8845}
8846EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
8847
8848static void sync_child_event(struct perf_event *child_event,
8849 struct task_struct *child)
8850{
8851 struct perf_event *parent_event = child_event->parent;
8852 u64 child_val;
8853
8854 if (child_event->attr.inherit_stat)
8855 perf_event_read_event(child_event, child);
8856
8857 child_val = perf_event_count(child_event);
8858
8859 /*
8860 * Add back the child's count to the parent's count:
8861 */
8862 atomic64_add(child_val, &parent_event->child_count);
8863 atomic64_add(child_event->total_time_enabled,
8864 &parent_event->child_total_time_enabled);
8865 atomic64_add(child_event->total_time_running,
8866 &parent_event->child_total_time_running);
8867}
8868
8869static void
8870perf_event_exit_event(struct perf_event *child_event,
8871 struct perf_event_context *child_ctx,
8872 struct task_struct *child)
8873{
8874 struct perf_event *parent_event = child_event->parent;
8875
8876 /*
8877 * Do not destroy the 'original' grouping; because of the context
8878 * switch optimization the original events could've ended up in a
8879 * random child task.
8880 *
8881 * If we were to destroy the original group, all group related
8882 * operations would cease to function properly after this random
8883 * child dies.
8884 *
8885 * Do destroy all inherited groups, we don't care about those
8886 * and being thorough is better.
8887 */
8888 raw_spin_lock_irq(&child_ctx->lock);
8889 WARN_ON_ONCE(child_ctx->is_active);
8890
8891 if (parent_event)
8892 perf_group_detach(child_event);
8893 list_del_event(child_event, child_ctx);
8894 child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
8895 raw_spin_unlock_irq(&child_ctx->lock);
8896
8897 /*
8898 * Parent events are governed by their filedesc, retain them.
8899 */
8900 if (!parent_event) {
8901 perf_event_wakeup(child_event);
8902 return;
8903 }
8904 /*
8905 * Child events can be cleaned up.
8906 */
8907
8908 sync_child_event(child_event, child);
8909
8910 /*
8911 * Remove this event from the parent's list
8912 */
8913 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
8914 mutex_lock(&parent_event->child_mutex);
8915 list_del_init(&child_event->child_list);
8916 mutex_unlock(&parent_event->child_mutex);
8917
8918 /*
8919 * Kick perf_poll() for is_event_hup().
8920 */
8921 perf_event_wakeup(parent_event);
8922 free_event(child_event);
8923 put_event(parent_event);
8924}
8925
8926static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
8927{
8928 struct perf_event_context *child_ctx, *clone_ctx = NULL;
8929 struct perf_event *child_event, *next;
8930
8931 WARN_ON_ONCE(child != current);
8932
8933 child_ctx = perf_pin_task_context(child, ctxn);
8934 if (!child_ctx)
8935 return;
8936
8937 /*
8938 * In order to reduce the amount of tricky in ctx tear-down, we hold
8939 * ctx::mutex over the entire thing. This serializes against almost
8940 * everything that wants to access the ctx.
8941 *
8942 * The exception is sys_perf_event_open() /
8943 * perf_event_create_kernel_count() which does find_get_context()
8944 * without ctx::mutex (it cannot because of the move_group double mutex
8945 * lock thing). See the comments in perf_install_in_context().
8946 */
8947 mutex_lock(&child_ctx->mutex);
8948
8949 /*
8950 * In a single ctx::lock section, de-schedule the events and detach the
8951 * context from the task such that we cannot ever get it scheduled back
8952 * in.
8953 */
8954 raw_spin_lock_irq(&child_ctx->lock);
8955 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
8956
8957 /*
8958 * Now that the context is inactive, destroy the task <-> ctx relation
8959 * and mark the context dead.
8960 */
8961 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
8962 put_ctx(child_ctx); /* cannot be last */
8963 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
8964 put_task_struct(current); /* cannot be last */
8965
8966 clone_ctx = unclone_ctx(child_ctx);
8967 raw_spin_unlock_irq(&child_ctx->lock);
8968
8969 if (clone_ctx)
8970 put_ctx(clone_ctx);
8971
8972 /*
8973 * Report the task dead after unscheduling the events so that we
8974 * won't get any samples after PERF_RECORD_EXIT. We can however still
8975 * get a few PERF_RECORD_READ events.
8976 */
8977 perf_event_task(child, child_ctx, 0);
8978
8979 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
8980 perf_event_exit_event(child_event, child_ctx, child);
8981
8982 mutex_unlock(&child_ctx->mutex);
8983
8984 put_ctx(child_ctx);
8985}
8986
8987/*
8988 * When a child task exits, feed back event values to parent events.
8989 *
8990 * Can be called with cred_guard_mutex held when called from
8991 * install_exec_creds().
8992 */
8993void perf_event_exit_task(struct task_struct *child)
8994{
8995 struct perf_event *event, *tmp;
8996 int ctxn;
8997
8998 mutex_lock(&child->perf_event_mutex);
8999 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
9000 owner_entry) {
9001 list_del_init(&event->owner_entry);
9002
9003 /*
9004 * Ensure the list deletion is visible before we clear
9005 * the owner, closes a race against perf_release() where
9006 * we need to serialize on the owner->perf_event_mutex.
9007 */
9008 smp_store_release(&event->owner, NULL);
9009 }
9010 mutex_unlock(&child->perf_event_mutex);
9011
9012 for_each_task_context_nr(ctxn)
9013 perf_event_exit_task_context(child, ctxn);
9014
9015 /*
9016 * The perf_event_exit_task_context calls perf_event_task
9017 * with child's task_ctx, which generates EXIT events for
9018 * child contexts and sets child->perf_event_ctxp[] to NULL.
9019 * At this point we need to send EXIT events to cpu contexts.
9020 */
9021 perf_event_task(child, NULL, 0);
9022}
9023
9024static void perf_free_event(struct perf_event *event,
9025 struct perf_event_context *ctx)
9026{
9027 struct perf_event *parent = event->parent;
9028
9029 if (WARN_ON_ONCE(!parent))
9030 return;
9031
9032 mutex_lock(&parent->child_mutex);
9033 list_del_init(&event->child_list);
9034 mutex_unlock(&parent->child_mutex);
9035
9036 put_event(parent);
9037
9038 raw_spin_lock_irq(&ctx->lock);
9039 perf_group_detach(event);
9040 list_del_event(event, ctx);
9041 raw_spin_unlock_irq(&ctx->lock);
9042 free_event(event);
9043}
9044
9045/*
9046 * Free an unexposed, unused context as created by inheritance by
9047 * perf_event_init_task below, used by fork() in case of fail.
9048 *
9049 * Not all locks are strictly required, but take them anyway to be nice and
9050 * help out with the lockdep assertions.
9051 */
9052void perf_event_free_task(struct task_struct *task)
9053{
9054 struct perf_event_context *ctx;
9055 struct perf_event *event, *tmp;
9056 int ctxn;
9057
9058 for_each_task_context_nr(ctxn) {
9059 ctx = task->perf_event_ctxp[ctxn];
9060 if (!ctx)
9061 continue;
9062
9063 mutex_lock(&ctx->mutex);
9064again:
9065 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
9066 group_entry)
9067 perf_free_event(event, ctx);
9068
9069 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
9070 group_entry)
9071 perf_free_event(event, ctx);
9072
9073 if (!list_empty(&ctx->pinned_groups) ||
9074 !list_empty(&ctx->flexible_groups))
9075 goto again;
9076
9077 mutex_unlock(&ctx->mutex);
9078
9079 put_ctx(ctx);
9080 }
9081}
9082
9083void perf_event_delayed_put(struct task_struct *task)
9084{
9085 int ctxn;
9086
9087 for_each_task_context_nr(ctxn)
9088 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
9089}
9090
9091struct file *perf_event_get(unsigned int fd)
9092{
9093 struct file *file;
9094
9095 file = fget_raw(fd);
9096 if (!file)
9097 return ERR_PTR(-EBADF);
9098
9099 if (file->f_op != &perf_fops) {
9100 fput(file);
9101 return ERR_PTR(-EBADF);
9102 }
9103
9104 return file;
9105}
9106
9107const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
9108{
9109 if (!event)
9110 return ERR_PTR(-EINVAL);
9111
9112 return &event->attr;
9113}
9114
9115/*
9116 * inherit a event from parent task to child task:
9117 */
9118static struct perf_event *
9119inherit_event(struct perf_event *parent_event,
9120 struct task_struct *parent,
9121 struct perf_event_context *parent_ctx,
9122 struct task_struct *child,
9123 struct perf_event *group_leader,
9124 struct perf_event_context *child_ctx)
9125{
9126 enum perf_event_active_state parent_state = parent_event->state;
9127 struct perf_event *child_event;
9128 unsigned long flags;
9129
9130 /*
9131 * Instead of creating recursive hierarchies of events,
9132 * we link inherited events back to the original parent,
9133 * which has a filp for sure, which we use as the reference
9134 * count:
9135 */
9136 if (parent_event->parent)
9137 parent_event = parent_event->parent;
9138
9139 child_event = perf_event_alloc(&parent_event->attr,
9140 parent_event->cpu,
9141 child,
9142 group_leader, parent_event,
9143 NULL, NULL, -1);
9144 if (IS_ERR(child_event))
9145 return child_event;
9146
9147 /*
9148 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
9149 * must be under the same lock in order to serialize against
9150 * perf_event_release_kernel(), such that either we must observe
9151 * is_orphaned_event() or they will observe us on the child_list.
9152 */
9153 mutex_lock(&parent_event->child_mutex);
9154 if (is_orphaned_event(parent_event) ||
9155 !atomic_long_inc_not_zero(&parent_event->refcount)) {
9156 mutex_unlock(&parent_event->child_mutex);
9157 free_event(child_event);
9158 return NULL;
9159 }
9160
9161 get_ctx(child_ctx);
9162
9163 /*
9164 * Make the child state follow the state of the parent event,
9165 * not its attr.disabled bit. We hold the parent's mutex,
9166 * so we won't race with perf_event_{en, dis}able_family.
9167 */
9168 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
9169 child_event->state = PERF_EVENT_STATE_INACTIVE;
9170 else
9171 child_event->state = PERF_EVENT_STATE_OFF;
9172
9173 if (parent_event->attr.freq) {
9174 u64 sample_period = parent_event->hw.sample_period;
9175 struct hw_perf_event *hwc = &child_event->hw;
9176
9177 hwc->sample_period = sample_period;
9178 hwc->last_period = sample_period;
9179
9180 local64_set(&hwc->period_left, sample_period);
9181 }
9182
9183 child_event->ctx = child_ctx;
9184 child_event->overflow_handler = parent_event->overflow_handler;
9185 child_event->overflow_handler_context
9186 = parent_event->overflow_handler_context;
9187
9188 /*
9189 * Precalculate sample_data sizes
9190 */
9191 perf_event__header_size(child_event);
9192 perf_event__id_header_size(child_event);
9193
9194 /*
9195 * Link it up in the child's context:
9196 */
9197 raw_spin_lock_irqsave(&child_ctx->lock, flags);
9198 add_event_to_ctx(child_event, child_ctx);
9199 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
9200
9201 /*
9202 * Link this into the parent event's child list
9203 */
9204 list_add_tail(&child_event->child_list, &parent_event->child_list);
9205 mutex_unlock(&parent_event->child_mutex);
9206
9207 return child_event;
9208}
9209
9210static int inherit_group(struct perf_event *parent_event,
9211 struct task_struct *parent,
9212 struct perf_event_context *parent_ctx,
9213 struct task_struct *child,
9214 struct perf_event_context *child_ctx)
9215{
9216 struct perf_event *leader;
9217 struct perf_event *sub;
9218 struct perf_event *child_ctr;
9219
9220 leader = inherit_event(parent_event, parent, parent_ctx,
9221 child, NULL, child_ctx);
9222 if (IS_ERR(leader))
9223 return PTR_ERR(leader);
9224 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
9225 child_ctr = inherit_event(sub, parent, parent_ctx,
9226 child, leader, child_ctx);
9227 if (IS_ERR(child_ctr))
9228 return PTR_ERR(child_ctr);
9229 }
9230 return 0;
9231}
9232
9233static int
9234inherit_task_group(struct perf_event *event, struct task_struct *parent,
9235 struct perf_event_context *parent_ctx,
9236 struct task_struct *child, int ctxn,
9237 int *inherited_all)
9238{
9239 int ret;
9240 struct perf_event_context *child_ctx;
9241
9242 if (!event->attr.inherit) {
9243 *inherited_all = 0;
9244 return 0;
9245 }
9246
9247 child_ctx = child->perf_event_ctxp[ctxn];
9248 if (!child_ctx) {
9249 /*
9250 * This is executed from the parent task context, so
9251 * inherit events that have been marked for cloning.
9252 * First allocate and initialize a context for the
9253 * child.
9254 */
9255
9256 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
9257 if (!child_ctx)
9258 return -ENOMEM;
9259
9260 child->perf_event_ctxp[ctxn] = child_ctx;
9261 }
9262
9263 ret = inherit_group(event, parent, parent_ctx,
9264 child, child_ctx);
9265
9266 if (ret)
9267 *inherited_all = 0;
9268
9269 return ret;
9270}
9271
9272/*
9273 * Initialize the perf_event context in task_struct
9274 */
9275static int perf_event_init_context(struct task_struct *child, int ctxn)
9276{
9277 struct perf_event_context *child_ctx, *parent_ctx;
9278 struct perf_event_context *cloned_ctx;
9279 struct perf_event *event;
9280 struct task_struct *parent = current;
9281 int inherited_all = 1;
9282 unsigned long flags;
9283 int ret = 0;
9284
9285 if (likely(!parent->perf_event_ctxp[ctxn]))
9286 return 0;
9287
9288 /*
9289 * If the parent's context is a clone, pin it so it won't get
9290 * swapped under us.
9291 */
9292 parent_ctx = perf_pin_task_context(parent, ctxn);
9293 if (!parent_ctx)
9294 return 0;
9295
9296 /*
9297 * No need to check if parent_ctx != NULL here; since we saw
9298 * it non-NULL earlier, the only reason for it to become NULL
9299 * is if we exit, and since we're currently in the middle of
9300 * a fork we can't be exiting at the same time.
9301 */
9302
9303 /*
9304 * Lock the parent list. No need to lock the child - not PID
9305 * hashed yet and not running, so nobody can access it.
9306 */
9307 mutex_lock(&parent_ctx->mutex);
9308
9309 /*
9310 * We dont have to disable NMIs - we are only looking at
9311 * the list, not manipulating it:
9312 */
9313 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
9314 ret = inherit_task_group(event, parent, parent_ctx,
9315 child, ctxn, &inherited_all);
9316 if (ret)
9317 break;
9318 }
9319
9320 /*
9321 * We can't hold ctx->lock when iterating the ->flexible_group list due
9322 * to allocations, but we need to prevent rotation because
9323 * rotate_ctx() will change the list from interrupt context.
9324 */
9325 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
9326 parent_ctx->rotate_disable = 1;
9327 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
9328
9329 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
9330 ret = inherit_task_group(event, parent, parent_ctx,
9331 child, ctxn, &inherited_all);
9332 if (ret)
9333 break;
9334 }
9335
9336 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
9337 parent_ctx->rotate_disable = 0;
9338
9339 child_ctx = child->perf_event_ctxp[ctxn];
9340
9341 if (child_ctx && inherited_all) {
9342 /*
9343 * Mark the child context as a clone of the parent
9344 * context, or of whatever the parent is a clone of.
9345 *
9346 * Note that if the parent is a clone, the holding of
9347 * parent_ctx->lock avoids it from being uncloned.
9348 */
9349 cloned_ctx = parent_ctx->parent_ctx;
9350 if (cloned_ctx) {
9351 child_ctx->parent_ctx = cloned_ctx;
9352 child_ctx->parent_gen = parent_ctx->parent_gen;
9353 } else {
9354 child_ctx->parent_ctx = parent_ctx;
9355 child_ctx->parent_gen = parent_ctx->generation;
9356 }
9357 get_ctx(child_ctx->parent_ctx);
9358 }
9359
9360 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
9361 mutex_unlock(&parent_ctx->mutex);
9362
9363 perf_unpin_context(parent_ctx);
9364 put_ctx(parent_ctx);
9365
9366 return ret;
9367}
9368
9369/*
9370 * Initialize the perf_event context in task_struct
9371 */
9372int perf_event_init_task(struct task_struct *child)
9373{
9374 int ctxn, ret;
9375
9376 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
9377 mutex_init(&child->perf_event_mutex);
9378 INIT_LIST_HEAD(&child->perf_event_list);
9379
9380 for_each_task_context_nr(ctxn) {
9381 ret = perf_event_init_context(child, ctxn);
9382 if (ret) {
9383 perf_event_free_task(child);
9384 return ret;
9385 }
9386 }
9387
9388 return 0;
9389}
9390
9391static void __init perf_event_init_all_cpus(void)
9392{
9393 struct swevent_htable *swhash;
9394 int cpu;
9395
9396 for_each_possible_cpu(cpu) {
9397 swhash = &per_cpu(swevent_htable, cpu);
9398 mutex_init(&swhash->hlist_mutex);
9399 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
9400 }
9401}
9402
9403static void perf_event_init_cpu(int cpu)
9404{
9405 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
9406
9407 mutex_lock(&swhash->hlist_mutex);
9408 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
9409 struct swevent_hlist *hlist;
9410
9411 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
9412 WARN_ON(!hlist);
9413 rcu_assign_pointer(swhash->swevent_hlist, hlist);
9414 }
9415 mutex_unlock(&swhash->hlist_mutex);
9416}
9417
9418#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
9419static void __perf_event_exit_context(void *__info)
9420{
9421 struct perf_event_context *ctx = __info;
9422 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
9423 struct perf_event *event;
9424
9425 raw_spin_lock(&ctx->lock);
9426 list_for_each_entry(event, &ctx->event_list, event_entry)
9427 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
9428 raw_spin_unlock(&ctx->lock);
9429}
9430
9431static void perf_event_exit_cpu_context(int cpu)
9432{
9433 struct perf_event_context *ctx;
9434 struct pmu *pmu;
9435 int idx;
9436
9437 idx = srcu_read_lock(&pmus_srcu);
9438 list_for_each_entry_rcu(pmu, &pmus, entry) {
9439 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
9440
9441 mutex_lock(&ctx->mutex);
9442 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
9443 mutex_unlock(&ctx->mutex);
9444 }
9445 srcu_read_unlock(&pmus_srcu, idx);
9446}
9447
9448static void perf_event_exit_cpu(int cpu)
9449{
9450 perf_event_exit_cpu_context(cpu);
9451}
9452#else
9453static inline void perf_event_exit_cpu(int cpu) { }
9454#endif
9455
9456static int
9457perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
9458{
9459 int cpu;
9460
9461 for_each_online_cpu(cpu)
9462 perf_event_exit_cpu(cpu);
9463
9464 return NOTIFY_OK;
9465}
9466
9467/*
9468 * Run the perf reboot notifier at the very last possible moment so that
9469 * the generic watchdog code runs as long as possible.
9470 */
9471static struct notifier_block perf_reboot_notifier = {
9472 .notifier_call = perf_reboot,
9473 .priority = INT_MIN,
9474};
9475
9476static int
9477perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
9478{
9479 unsigned int cpu = (long)hcpu;
9480
9481 switch (action & ~CPU_TASKS_FROZEN) {
9482
9483 case CPU_UP_PREPARE:
9484 /*
9485 * This must be done before the CPU comes alive, because the
9486 * moment we can run tasks we can encounter (software) events.
9487 *
9488 * Specifically, someone can have inherited events on kthreadd
9489 * or a pre-existing worker thread that gets re-bound.
9490 */
9491 perf_event_init_cpu(cpu);
9492 break;
9493
9494 case CPU_DOWN_PREPARE:
9495 /*
9496 * This must be done before the CPU dies because after that an
9497 * active event might want to IPI the CPU and that'll not work
9498 * so great for dead CPUs.
9499 *
9500 * XXX smp_call_function_single() return -ENXIO without a warn
9501 * so we could possibly deal with this.
9502 *
9503 * This is safe against new events arriving because
9504 * sys_perf_event_open() serializes against hotplug using
9505 * get_online_cpus().
9506 */
9507 perf_event_exit_cpu(cpu);
9508 break;
9509 default:
9510 break;
9511 }
9512
9513 return NOTIFY_OK;
9514}
9515
9516void __init perf_event_init(void)
9517{
9518 int ret;
9519
9520 idr_init(&pmu_idr);
9521
9522 perf_event_init_all_cpus();
9523 init_srcu_struct(&pmus_srcu);
9524 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
9525 perf_pmu_register(&perf_cpu_clock, NULL, -1);
9526 perf_pmu_register(&perf_task_clock, NULL, -1);
9527 perf_tp_register();
9528 perf_cpu_notifier(perf_cpu_notify);
9529 register_reboot_notifier(&perf_reboot_notifier);
9530
9531 ret = init_hw_breakpoint();
9532 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
9533
9534 /*
9535 * Build time assertion that we keep the data_head at the intended
9536 * location. IOW, validation we got the __reserved[] size right.
9537 */
9538 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
9539 != 1024);
9540}
9541
9542ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
9543 char *page)
9544{
9545 struct perf_pmu_events_attr *pmu_attr =
9546 container_of(attr, struct perf_pmu_events_attr, attr);
9547
9548 if (pmu_attr->event_str)
9549 return sprintf(page, "%s\n", pmu_attr->event_str);
9550
9551 return 0;
9552}
9553EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
9554
9555static int __init perf_event_sysfs_init(void)
9556{
9557 struct pmu *pmu;
9558 int ret;
9559
9560 mutex_lock(&pmus_lock);
9561
9562 ret = bus_register(&pmu_bus);
9563 if (ret)
9564 goto unlock;
9565
9566 list_for_each_entry(pmu, &pmus, entry) {
9567 if (!pmu->name || pmu->type < 0)
9568 continue;
9569
9570 ret = pmu_dev_alloc(pmu);
9571 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
9572 }
9573 pmu_bus_running = 1;
9574 ret = 0;
9575
9576unlock:
9577 mutex_unlock(&pmus_lock);
9578
9579 return ret;
9580}
9581device_initcall(perf_event_sysfs_init);
9582
9583#ifdef CONFIG_CGROUP_PERF
9584static struct cgroup_subsys_state *
9585perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
9586{
9587 struct perf_cgroup *jc;
9588
9589 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
9590 if (!jc)
9591 return ERR_PTR(-ENOMEM);
9592
9593 jc->info = alloc_percpu(struct perf_cgroup_info);
9594 if (!jc->info) {
9595 kfree(jc);
9596 return ERR_PTR(-ENOMEM);
9597 }
9598
9599 return &jc->css;
9600}
9601
9602static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
9603{
9604 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
9605
9606 free_percpu(jc->info);
9607 kfree(jc);
9608}
9609
9610static int __perf_cgroup_move(void *info)
9611{
9612 struct task_struct *task = info;
9613 rcu_read_lock();
9614 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
9615 rcu_read_unlock();
9616 return 0;
9617}
9618
9619static void perf_cgroup_attach(struct cgroup_taskset *tset)
9620{
9621 struct task_struct *task;
9622 struct cgroup_subsys_state *css;
9623
9624 cgroup_taskset_for_each(task, css, tset)
9625 task_function_call(task, __perf_cgroup_move, task);
9626}
9627
9628struct cgroup_subsys perf_event_cgrp_subsys = {
9629 .css_alloc = perf_cgroup_css_alloc,
9630 .css_free = perf_cgroup_css_free,
9631 .attach = perf_cgroup_attach,
9632};
9633#endif /* CONFIG_CGROUP_PERF */
1/*
2 * Performance events core code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
16#include <linux/idr.h>
17#include <linux/file.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/hash.h>
21#include <linux/sysfs.h>
22#include <linux/dcache.h>
23#include <linux/percpu.h>
24#include <linux/ptrace.h>
25#include <linux/reboot.h>
26#include <linux/vmstat.h>
27#include <linux/device.h>
28#include <linux/export.h>
29#include <linux/vmalloc.h>
30#include <linux/hardirq.h>
31#include <linux/rculist.h>
32#include <linux/uaccess.h>
33#include <linux/syscalls.h>
34#include <linux/anon_inodes.h>
35#include <linux/kernel_stat.h>
36#include <linux/perf_event.h>
37#include <linux/ftrace_event.h>
38#include <linux/hw_breakpoint.h>
39
40#include "internal.h"
41
42#include <asm/irq_regs.h>
43
44struct remote_function_call {
45 struct task_struct *p;
46 int (*func)(void *info);
47 void *info;
48 int ret;
49};
50
51static void remote_function(void *data)
52{
53 struct remote_function_call *tfc = data;
54 struct task_struct *p = tfc->p;
55
56 if (p) {
57 tfc->ret = -EAGAIN;
58 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
59 return;
60 }
61
62 tfc->ret = tfc->func(tfc->info);
63}
64
65/**
66 * task_function_call - call a function on the cpu on which a task runs
67 * @p: the task to evaluate
68 * @func: the function to be called
69 * @info: the function call argument
70 *
71 * Calls the function @func when the task is currently running. This might
72 * be on the current CPU, which just calls the function directly
73 *
74 * returns: @func return value, or
75 * -ESRCH - when the process isn't running
76 * -EAGAIN - when the process moved away
77 */
78static int
79task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
80{
81 struct remote_function_call data = {
82 .p = p,
83 .func = func,
84 .info = info,
85 .ret = -ESRCH, /* No such (running) process */
86 };
87
88 if (task_curr(p))
89 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
90
91 return data.ret;
92}
93
94/**
95 * cpu_function_call - call a function on the cpu
96 * @func: the function to be called
97 * @info: the function call argument
98 *
99 * Calls the function @func on the remote cpu.
100 *
101 * returns: @func return value or -ENXIO when the cpu is offline
102 */
103static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
104{
105 struct remote_function_call data = {
106 .p = NULL,
107 .func = func,
108 .info = info,
109 .ret = -ENXIO, /* No such CPU */
110 };
111
112 smp_call_function_single(cpu, remote_function, &data, 1);
113
114 return data.ret;
115}
116
117#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
118 PERF_FLAG_FD_OUTPUT |\
119 PERF_FLAG_PID_CGROUP)
120
121/*
122 * branch priv levels that need permission checks
123 */
124#define PERF_SAMPLE_BRANCH_PERM_PLM \
125 (PERF_SAMPLE_BRANCH_KERNEL |\
126 PERF_SAMPLE_BRANCH_HV)
127
128enum event_type_t {
129 EVENT_FLEXIBLE = 0x1,
130 EVENT_PINNED = 0x2,
131 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
132};
133
134/*
135 * perf_sched_events : >0 events exist
136 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
137 */
138struct static_key_deferred perf_sched_events __read_mostly;
139static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
140static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
141
142static atomic_t nr_mmap_events __read_mostly;
143static atomic_t nr_comm_events __read_mostly;
144static atomic_t nr_task_events __read_mostly;
145
146static LIST_HEAD(pmus);
147static DEFINE_MUTEX(pmus_lock);
148static struct srcu_struct pmus_srcu;
149
150/*
151 * perf event paranoia level:
152 * -1 - not paranoid at all
153 * 0 - disallow raw tracepoint access for unpriv
154 * 1 - disallow cpu events for unpriv
155 * 2 - disallow kernel profiling for unpriv
156 */
157int sysctl_perf_event_paranoid __read_mostly = 1;
158
159/* Minimum for 512 kiB + 1 user control page */
160int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
161
162/*
163 * max perf event sample rate
164 */
165#define DEFAULT_MAX_SAMPLE_RATE 100000
166int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
167static int max_samples_per_tick __read_mostly =
168 DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
169
170int perf_proc_update_handler(struct ctl_table *table, int write,
171 void __user *buffer, size_t *lenp,
172 loff_t *ppos)
173{
174 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
175
176 if (ret || !write)
177 return ret;
178
179 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
180
181 return 0;
182}
183
184static atomic64_t perf_event_id;
185
186static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
187 enum event_type_t event_type);
188
189static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
190 enum event_type_t event_type,
191 struct task_struct *task);
192
193static void update_context_time(struct perf_event_context *ctx);
194static u64 perf_event_time(struct perf_event *event);
195
196static void ring_buffer_attach(struct perf_event *event,
197 struct ring_buffer *rb);
198
199void __weak perf_event_print_debug(void) { }
200
201extern __weak const char *perf_pmu_name(void)
202{
203 return "pmu";
204}
205
206static inline u64 perf_clock(void)
207{
208 return local_clock();
209}
210
211static inline struct perf_cpu_context *
212__get_cpu_context(struct perf_event_context *ctx)
213{
214 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
215}
216
217static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
218 struct perf_event_context *ctx)
219{
220 raw_spin_lock(&cpuctx->ctx.lock);
221 if (ctx)
222 raw_spin_lock(&ctx->lock);
223}
224
225static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
226 struct perf_event_context *ctx)
227{
228 if (ctx)
229 raw_spin_unlock(&ctx->lock);
230 raw_spin_unlock(&cpuctx->ctx.lock);
231}
232
233#ifdef CONFIG_CGROUP_PERF
234
235/*
236 * Must ensure cgroup is pinned (css_get) before calling
237 * this function. In other words, we cannot call this function
238 * if there is no cgroup event for the current CPU context.
239 */
240static inline struct perf_cgroup *
241perf_cgroup_from_task(struct task_struct *task)
242{
243 return container_of(task_subsys_state(task, perf_subsys_id),
244 struct perf_cgroup, css);
245}
246
247static inline bool
248perf_cgroup_match(struct perf_event *event)
249{
250 struct perf_event_context *ctx = event->ctx;
251 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
252
253 return !event->cgrp || event->cgrp == cpuctx->cgrp;
254}
255
256static inline bool perf_tryget_cgroup(struct perf_event *event)
257{
258 return css_tryget(&event->cgrp->css);
259}
260
261static inline void perf_put_cgroup(struct perf_event *event)
262{
263 css_put(&event->cgrp->css);
264}
265
266static inline void perf_detach_cgroup(struct perf_event *event)
267{
268 perf_put_cgroup(event);
269 event->cgrp = NULL;
270}
271
272static inline int is_cgroup_event(struct perf_event *event)
273{
274 return event->cgrp != NULL;
275}
276
277static inline u64 perf_cgroup_event_time(struct perf_event *event)
278{
279 struct perf_cgroup_info *t;
280
281 t = per_cpu_ptr(event->cgrp->info, event->cpu);
282 return t->time;
283}
284
285static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
286{
287 struct perf_cgroup_info *info;
288 u64 now;
289
290 now = perf_clock();
291
292 info = this_cpu_ptr(cgrp->info);
293
294 info->time += now - info->timestamp;
295 info->timestamp = now;
296}
297
298static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
299{
300 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
301 if (cgrp_out)
302 __update_cgrp_time(cgrp_out);
303}
304
305static inline void update_cgrp_time_from_event(struct perf_event *event)
306{
307 struct perf_cgroup *cgrp;
308
309 /*
310 * ensure we access cgroup data only when needed and
311 * when we know the cgroup is pinned (css_get)
312 */
313 if (!is_cgroup_event(event))
314 return;
315
316 cgrp = perf_cgroup_from_task(current);
317 /*
318 * Do not update time when cgroup is not active
319 */
320 if (cgrp == event->cgrp)
321 __update_cgrp_time(event->cgrp);
322}
323
324static inline void
325perf_cgroup_set_timestamp(struct task_struct *task,
326 struct perf_event_context *ctx)
327{
328 struct perf_cgroup *cgrp;
329 struct perf_cgroup_info *info;
330
331 /*
332 * ctx->lock held by caller
333 * ensure we do not access cgroup data
334 * unless we have the cgroup pinned (css_get)
335 */
336 if (!task || !ctx->nr_cgroups)
337 return;
338
339 cgrp = perf_cgroup_from_task(task);
340 info = this_cpu_ptr(cgrp->info);
341 info->timestamp = ctx->timestamp;
342}
343
344#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
345#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
346
347/*
348 * reschedule events based on the cgroup constraint of task.
349 *
350 * mode SWOUT : schedule out everything
351 * mode SWIN : schedule in based on cgroup for next
352 */
353void perf_cgroup_switch(struct task_struct *task, int mode)
354{
355 struct perf_cpu_context *cpuctx;
356 struct pmu *pmu;
357 unsigned long flags;
358
359 /*
360 * disable interrupts to avoid geting nr_cgroup
361 * changes via __perf_event_disable(). Also
362 * avoids preemption.
363 */
364 local_irq_save(flags);
365
366 /*
367 * we reschedule only in the presence of cgroup
368 * constrained events.
369 */
370 rcu_read_lock();
371
372 list_for_each_entry_rcu(pmu, &pmus, entry) {
373 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
374
375 /*
376 * perf_cgroup_events says at least one
377 * context on this CPU has cgroup events.
378 *
379 * ctx->nr_cgroups reports the number of cgroup
380 * events for a context.
381 */
382 if (cpuctx->ctx.nr_cgroups > 0) {
383 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
384 perf_pmu_disable(cpuctx->ctx.pmu);
385
386 if (mode & PERF_CGROUP_SWOUT) {
387 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
388 /*
389 * must not be done before ctxswout due
390 * to event_filter_match() in event_sched_out()
391 */
392 cpuctx->cgrp = NULL;
393 }
394
395 if (mode & PERF_CGROUP_SWIN) {
396 WARN_ON_ONCE(cpuctx->cgrp);
397 /* set cgrp before ctxsw in to
398 * allow event_filter_match() to not
399 * have to pass task around
400 */
401 cpuctx->cgrp = perf_cgroup_from_task(task);
402 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
403 }
404 perf_pmu_enable(cpuctx->ctx.pmu);
405 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
406 }
407 }
408
409 rcu_read_unlock();
410
411 local_irq_restore(flags);
412}
413
414static inline void perf_cgroup_sched_out(struct task_struct *task,
415 struct task_struct *next)
416{
417 struct perf_cgroup *cgrp1;
418 struct perf_cgroup *cgrp2 = NULL;
419
420 /*
421 * we come here when we know perf_cgroup_events > 0
422 */
423 cgrp1 = perf_cgroup_from_task(task);
424
425 /*
426 * next is NULL when called from perf_event_enable_on_exec()
427 * that will systematically cause a cgroup_switch()
428 */
429 if (next)
430 cgrp2 = perf_cgroup_from_task(next);
431
432 /*
433 * only schedule out current cgroup events if we know
434 * that we are switching to a different cgroup. Otherwise,
435 * do no touch the cgroup events.
436 */
437 if (cgrp1 != cgrp2)
438 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
439}
440
441static inline void perf_cgroup_sched_in(struct task_struct *prev,
442 struct task_struct *task)
443{
444 struct perf_cgroup *cgrp1;
445 struct perf_cgroup *cgrp2 = NULL;
446
447 /*
448 * we come here when we know perf_cgroup_events > 0
449 */
450 cgrp1 = perf_cgroup_from_task(task);
451
452 /* prev can never be NULL */
453 cgrp2 = perf_cgroup_from_task(prev);
454
455 /*
456 * only need to schedule in cgroup events if we are changing
457 * cgroup during ctxsw. Cgroup events were not scheduled
458 * out of ctxsw out if that was not the case.
459 */
460 if (cgrp1 != cgrp2)
461 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
462}
463
464static inline int perf_cgroup_connect(int fd, struct perf_event *event,
465 struct perf_event_attr *attr,
466 struct perf_event *group_leader)
467{
468 struct perf_cgroup *cgrp;
469 struct cgroup_subsys_state *css;
470 struct file *file;
471 int ret = 0, fput_needed;
472
473 file = fget_light(fd, &fput_needed);
474 if (!file)
475 return -EBADF;
476
477 css = cgroup_css_from_dir(file, perf_subsys_id);
478 if (IS_ERR(css)) {
479 ret = PTR_ERR(css);
480 goto out;
481 }
482
483 cgrp = container_of(css, struct perf_cgroup, css);
484 event->cgrp = cgrp;
485
486 /* must be done before we fput() the file */
487 if (!perf_tryget_cgroup(event)) {
488 event->cgrp = NULL;
489 ret = -ENOENT;
490 goto out;
491 }
492
493 /*
494 * all events in a group must monitor
495 * the same cgroup because a task belongs
496 * to only one perf cgroup at a time
497 */
498 if (group_leader && group_leader->cgrp != cgrp) {
499 perf_detach_cgroup(event);
500 ret = -EINVAL;
501 }
502out:
503 fput_light(file, fput_needed);
504 return ret;
505}
506
507static inline void
508perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
509{
510 struct perf_cgroup_info *t;
511 t = per_cpu_ptr(event->cgrp->info, event->cpu);
512 event->shadow_ctx_time = now - t->timestamp;
513}
514
515static inline void
516perf_cgroup_defer_enabled(struct perf_event *event)
517{
518 /*
519 * when the current task's perf cgroup does not match
520 * the event's, we need to remember to call the
521 * perf_mark_enable() function the first time a task with
522 * a matching perf cgroup is scheduled in.
523 */
524 if (is_cgroup_event(event) && !perf_cgroup_match(event))
525 event->cgrp_defer_enabled = 1;
526}
527
528static inline void
529perf_cgroup_mark_enabled(struct perf_event *event,
530 struct perf_event_context *ctx)
531{
532 struct perf_event *sub;
533 u64 tstamp = perf_event_time(event);
534
535 if (!event->cgrp_defer_enabled)
536 return;
537
538 event->cgrp_defer_enabled = 0;
539
540 event->tstamp_enabled = tstamp - event->total_time_enabled;
541 list_for_each_entry(sub, &event->sibling_list, group_entry) {
542 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
543 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
544 sub->cgrp_defer_enabled = 0;
545 }
546 }
547}
548#else /* !CONFIG_CGROUP_PERF */
549
550static inline bool
551perf_cgroup_match(struct perf_event *event)
552{
553 return true;
554}
555
556static inline void perf_detach_cgroup(struct perf_event *event)
557{}
558
559static inline int is_cgroup_event(struct perf_event *event)
560{
561 return 0;
562}
563
564static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
565{
566 return 0;
567}
568
569static inline void update_cgrp_time_from_event(struct perf_event *event)
570{
571}
572
573static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
574{
575}
576
577static inline void perf_cgroup_sched_out(struct task_struct *task,
578 struct task_struct *next)
579{
580}
581
582static inline void perf_cgroup_sched_in(struct task_struct *prev,
583 struct task_struct *task)
584{
585}
586
587static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
588 struct perf_event_attr *attr,
589 struct perf_event *group_leader)
590{
591 return -EINVAL;
592}
593
594static inline void
595perf_cgroup_set_timestamp(struct task_struct *task,
596 struct perf_event_context *ctx)
597{
598}
599
600void
601perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
602{
603}
604
605static inline void
606perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
607{
608}
609
610static inline u64 perf_cgroup_event_time(struct perf_event *event)
611{
612 return 0;
613}
614
615static inline void
616perf_cgroup_defer_enabled(struct perf_event *event)
617{
618}
619
620static inline void
621perf_cgroup_mark_enabled(struct perf_event *event,
622 struct perf_event_context *ctx)
623{
624}
625#endif
626
627void perf_pmu_disable(struct pmu *pmu)
628{
629 int *count = this_cpu_ptr(pmu->pmu_disable_count);
630 if (!(*count)++)
631 pmu->pmu_disable(pmu);
632}
633
634void perf_pmu_enable(struct pmu *pmu)
635{
636 int *count = this_cpu_ptr(pmu->pmu_disable_count);
637 if (!--(*count))
638 pmu->pmu_enable(pmu);
639}
640
641static DEFINE_PER_CPU(struct list_head, rotation_list);
642
643/*
644 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
645 * because they're strictly cpu affine and rotate_start is called with IRQs
646 * disabled, while rotate_context is called from IRQ context.
647 */
648static void perf_pmu_rotate_start(struct pmu *pmu)
649{
650 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
651 struct list_head *head = &__get_cpu_var(rotation_list);
652
653 WARN_ON(!irqs_disabled());
654
655 if (list_empty(&cpuctx->rotation_list))
656 list_add(&cpuctx->rotation_list, head);
657}
658
659static void get_ctx(struct perf_event_context *ctx)
660{
661 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
662}
663
664static void put_ctx(struct perf_event_context *ctx)
665{
666 if (atomic_dec_and_test(&ctx->refcount)) {
667 if (ctx->parent_ctx)
668 put_ctx(ctx->parent_ctx);
669 if (ctx->task)
670 put_task_struct(ctx->task);
671 kfree_rcu(ctx, rcu_head);
672 }
673}
674
675static void unclone_ctx(struct perf_event_context *ctx)
676{
677 if (ctx->parent_ctx) {
678 put_ctx(ctx->parent_ctx);
679 ctx->parent_ctx = NULL;
680 }
681}
682
683static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
684{
685 /*
686 * only top level events have the pid namespace they were created in
687 */
688 if (event->parent)
689 event = event->parent;
690
691 return task_tgid_nr_ns(p, event->ns);
692}
693
694static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
695{
696 /*
697 * only top level events have the pid namespace they were created in
698 */
699 if (event->parent)
700 event = event->parent;
701
702 return task_pid_nr_ns(p, event->ns);
703}
704
705/*
706 * If we inherit events we want to return the parent event id
707 * to userspace.
708 */
709static u64 primary_event_id(struct perf_event *event)
710{
711 u64 id = event->id;
712
713 if (event->parent)
714 id = event->parent->id;
715
716 return id;
717}
718
719/*
720 * Get the perf_event_context for a task and lock it.
721 * This has to cope with with the fact that until it is locked,
722 * the context could get moved to another task.
723 */
724static struct perf_event_context *
725perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
726{
727 struct perf_event_context *ctx;
728
729 rcu_read_lock();
730retry:
731 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
732 if (ctx) {
733 /*
734 * If this context is a clone of another, it might
735 * get swapped for another underneath us by
736 * perf_event_task_sched_out, though the
737 * rcu_read_lock() protects us from any context
738 * getting freed. Lock the context and check if it
739 * got swapped before we could get the lock, and retry
740 * if so. If we locked the right context, then it
741 * can't get swapped on us any more.
742 */
743 raw_spin_lock_irqsave(&ctx->lock, *flags);
744 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
745 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
746 goto retry;
747 }
748
749 if (!atomic_inc_not_zero(&ctx->refcount)) {
750 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
751 ctx = NULL;
752 }
753 }
754 rcu_read_unlock();
755 return ctx;
756}
757
758/*
759 * Get the context for a task and increment its pin_count so it
760 * can't get swapped to another task. This also increments its
761 * reference count so that the context can't get freed.
762 */
763static struct perf_event_context *
764perf_pin_task_context(struct task_struct *task, int ctxn)
765{
766 struct perf_event_context *ctx;
767 unsigned long flags;
768
769 ctx = perf_lock_task_context(task, ctxn, &flags);
770 if (ctx) {
771 ++ctx->pin_count;
772 raw_spin_unlock_irqrestore(&ctx->lock, flags);
773 }
774 return ctx;
775}
776
777static void perf_unpin_context(struct perf_event_context *ctx)
778{
779 unsigned long flags;
780
781 raw_spin_lock_irqsave(&ctx->lock, flags);
782 --ctx->pin_count;
783 raw_spin_unlock_irqrestore(&ctx->lock, flags);
784}
785
786/*
787 * Update the record of the current time in a context.
788 */
789static void update_context_time(struct perf_event_context *ctx)
790{
791 u64 now = perf_clock();
792
793 ctx->time += now - ctx->timestamp;
794 ctx->timestamp = now;
795}
796
797static u64 perf_event_time(struct perf_event *event)
798{
799 struct perf_event_context *ctx = event->ctx;
800
801 if (is_cgroup_event(event))
802 return perf_cgroup_event_time(event);
803
804 return ctx ? ctx->time : 0;
805}
806
807/*
808 * Update the total_time_enabled and total_time_running fields for a event.
809 * The caller of this function needs to hold the ctx->lock.
810 */
811static void update_event_times(struct perf_event *event)
812{
813 struct perf_event_context *ctx = event->ctx;
814 u64 run_end;
815
816 if (event->state < PERF_EVENT_STATE_INACTIVE ||
817 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
818 return;
819 /*
820 * in cgroup mode, time_enabled represents
821 * the time the event was enabled AND active
822 * tasks were in the monitored cgroup. This is
823 * independent of the activity of the context as
824 * there may be a mix of cgroup and non-cgroup events.
825 *
826 * That is why we treat cgroup events differently
827 * here.
828 */
829 if (is_cgroup_event(event))
830 run_end = perf_cgroup_event_time(event);
831 else if (ctx->is_active)
832 run_end = ctx->time;
833 else
834 run_end = event->tstamp_stopped;
835
836 event->total_time_enabled = run_end - event->tstamp_enabled;
837
838 if (event->state == PERF_EVENT_STATE_INACTIVE)
839 run_end = event->tstamp_stopped;
840 else
841 run_end = perf_event_time(event);
842
843 event->total_time_running = run_end - event->tstamp_running;
844
845}
846
847/*
848 * Update total_time_enabled and total_time_running for all events in a group.
849 */
850static void update_group_times(struct perf_event *leader)
851{
852 struct perf_event *event;
853
854 update_event_times(leader);
855 list_for_each_entry(event, &leader->sibling_list, group_entry)
856 update_event_times(event);
857}
858
859static struct list_head *
860ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
861{
862 if (event->attr.pinned)
863 return &ctx->pinned_groups;
864 else
865 return &ctx->flexible_groups;
866}
867
868/*
869 * Add a event from the lists for its context.
870 * Must be called with ctx->mutex and ctx->lock held.
871 */
872static void
873list_add_event(struct perf_event *event, struct perf_event_context *ctx)
874{
875 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
876 event->attach_state |= PERF_ATTACH_CONTEXT;
877
878 /*
879 * If we're a stand alone event or group leader, we go to the context
880 * list, group events are kept attached to the group so that
881 * perf_group_detach can, at all times, locate all siblings.
882 */
883 if (event->group_leader == event) {
884 struct list_head *list;
885
886 if (is_software_event(event))
887 event->group_flags |= PERF_GROUP_SOFTWARE;
888
889 list = ctx_group_list(event, ctx);
890 list_add_tail(&event->group_entry, list);
891 }
892
893 if (is_cgroup_event(event))
894 ctx->nr_cgroups++;
895
896 if (has_branch_stack(event))
897 ctx->nr_branch_stack++;
898
899 list_add_rcu(&event->event_entry, &ctx->event_list);
900 if (!ctx->nr_events)
901 perf_pmu_rotate_start(ctx->pmu);
902 ctx->nr_events++;
903 if (event->attr.inherit_stat)
904 ctx->nr_stat++;
905}
906
907/*
908 * Called at perf_event creation and when events are attached/detached from a
909 * group.
910 */
911static void perf_event__read_size(struct perf_event *event)
912{
913 int entry = sizeof(u64); /* value */
914 int size = 0;
915 int nr = 1;
916
917 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
918 size += sizeof(u64);
919
920 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
921 size += sizeof(u64);
922
923 if (event->attr.read_format & PERF_FORMAT_ID)
924 entry += sizeof(u64);
925
926 if (event->attr.read_format & PERF_FORMAT_GROUP) {
927 nr += event->group_leader->nr_siblings;
928 size += sizeof(u64);
929 }
930
931 size += entry * nr;
932 event->read_size = size;
933}
934
935static void perf_event__header_size(struct perf_event *event)
936{
937 struct perf_sample_data *data;
938 u64 sample_type = event->attr.sample_type;
939 u16 size = 0;
940
941 perf_event__read_size(event);
942
943 if (sample_type & PERF_SAMPLE_IP)
944 size += sizeof(data->ip);
945
946 if (sample_type & PERF_SAMPLE_ADDR)
947 size += sizeof(data->addr);
948
949 if (sample_type & PERF_SAMPLE_PERIOD)
950 size += sizeof(data->period);
951
952 if (sample_type & PERF_SAMPLE_READ)
953 size += event->read_size;
954
955 event->header_size = size;
956}
957
958static void perf_event__id_header_size(struct perf_event *event)
959{
960 struct perf_sample_data *data;
961 u64 sample_type = event->attr.sample_type;
962 u16 size = 0;
963
964 if (sample_type & PERF_SAMPLE_TID)
965 size += sizeof(data->tid_entry);
966
967 if (sample_type & PERF_SAMPLE_TIME)
968 size += sizeof(data->time);
969
970 if (sample_type & PERF_SAMPLE_ID)
971 size += sizeof(data->id);
972
973 if (sample_type & PERF_SAMPLE_STREAM_ID)
974 size += sizeof(data->stream_id);
975
976 if (sample_type & PERF_SAMPLE_CPU)
977 size += sizeof(data->cpu_entry);
978
979 event->id_header_size = size;
980}
981
982static void perf_group_attach(struct perf_event *event)
983{
984 struct perf_event *group_leader = event->group_leader, *pos;
985
986 /*
987 * We can have double attach due to group movement in perf_event_open.
988 */
989 if (event->attach_state & PERF_ATTACH_GROUP)
990 return;
991
992 event->attach_state |= PERF_ATTACH_GROUP;
993
994 if (group_leader == event)
995 return;
996
997 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
998 !is_software_event(event))
999 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1000
1001 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1002 group_leader->nr_siblings++;
1003
1004 perf_event__header_size(group_leader);
1005
1006 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1007 perf_event__header_size(pos);
1008}
1009
1010/*
1011 * Remove a event from the lists for its context.
1012 * Must be called with ctx->mutex and ctx->lock held.
1013 */
1014static void
1015list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1016{
1017 struct perf_cpu_context *cpuctx;
1018 /*
1019 * We can have double detach due to exit/hot-unplug + close.
1020 */
1021 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1022 return;
1023
1024 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1025
1026 if (is_cgroup_event(event)) {
1027 ctx->nr_cgroups--;
1028 cpuctx = __get_cpu_context(ctx);
1029 /*
1030 * if there are no more cgroup events
1031 * then cler cgrp to avoid stale pointer
1032 * in update_cgrp_time_from_cpuctx()
1033 */
1034 if (!ctx->nr_cgroups)
1035 cpuctx->cgrp = NULL;
1036 }
1037
1038 if (has_branch_stack(event))
1039 ctx->nr_branch_stack--;
1040
1041 ctx->nr_events--;
1042 if (event->attr.inherit_stat)
1043 ctx->nr_stat--;
1044
1045 list_del_rcu(&event->event_entry);
1046
1047 if (event->group_leader == event)
1048 list_del_init(&event->group_entry);
1049
1050 update_group_times(event);
1051
1052 /*
1053 * If event was in error state, then keep it
1054 * that way, otherwise bogus counts will be
1055 * returned on read(). The only way to get out
1056 * of error state is by explicit re-enabling
1057 * of the event
1058 */
1059 if (event->state > PERF_EVENT_STATE_OFF)
1060 event->state = PERF_EVENT_STATE_OFF;
1061}
1062
1063static void perf_group_detach(struct perf_event *event)
1064{
1065 struct perf_event *sibling, *tmp;
1066 struct list_head *list = NULL;
1067
1068 /*
1069 * We can have double detach due to exit/hot-unplug + close.
1070 */
1071 if (!(event->attach_state & PERF_ATTACH_GROUP))
1072 return;
1073
1074 event->attach_state &= ~PERF_ATTACH_GROUP;
1075
1076 /*
1077 * If this is a sibling, remove it from its group.
1078 */
1079 if (event->group_leader != event) {
1080 list_del_init(&event->group_entry);
1081 event->group_leader->nr_siblings--;
1082 goto out;
1083 }
1084
1085 if (!list_empty(&event->group_entry))
1086 list = &event->group_entry;
1087
1088 /*
1089 * If this was a group event with sibling events then
1090 * upgrade the siblings to singleton events by adding them
1091 * to whatever list we are on.
1092 */
1093 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1094 if (list)
1095 list_move_tail(&sibling->group_entry, list);
1096 sibling->group_leader = sibling;
1097
1098 /* Inherit group flags from the previous leader */
1099 sibling->group_flags = event->group_flags;
1100 }
1101
1102out:
1103 perf_event__header_size(event->group_leader);
1104
1105 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1106 perf_event__header_size(tmp);
1107}
1108
1109static inline int
1110event_filter_match(struct perf_event *event)
1111{
1112 return (event->cpu == -1 || event->cpu == smp_processor_id())
1113 && perf_cgroup_match(event);
1114}
1115
1116static void
1117event_sched_out(struct perf_event *event,
1118 struct perf_cpu_context *cpuctx,
1119 struct perf_event_context *ctx)
1120{
1121 u64 tstamp = perf_event_time(event);
1122 u64 delta;
1123 /*
1124 * An event which could not be activated because of
1125 * filter mismatch still needs to have its timings
1126 * maintained, otherwise bogus information is return
1127 * via read() for time_enabled, time_running:
1128 */
1129 if (event->state == PERF_EVENT_STATE_INACTIVE
1130 && !event_filter_match(event)) {
1131 delta = tstamp - event->tstamp_stopped;
1132 event->tstamp_running += delta;
1133 event->tstamp_stopped = tstamp;
1134 }
1135
1136 if (event->state != PERF_EVENT_STATE_ACTIVE)
1137 return;
1138
1139 event->state = PERF_EVENT_STATE_INACTIVE;
1140 if (event->pending_disable) {
1141 event->pending_disable = 0;
1142 event->state = PERF_EVENT_STATE_OFF;
1143 }
1144 event->tstamp_stopped = tstamp;
1145 event->pmu->del(event, 0);
1146 event->oncpu = -1;
1147
1148 if (!is_software_event(event))
1149 cpuctx->active_oncpu--;
1150 ctx->nr_active--;
1151 if (event->attr.freq && event->attr.sample_freq)
1152 ctx->nr_freq--;
1153 if (event->attr.exclusive || !cpuctx->active_oncpu)
1154 cpuctx->exclusive = 0;
1155}
1156
1157static void
1158group_sched_out(struct perf_event *group_event,
1159 struct perf_cpu_context *cpuctx,
1160 struct perf_event_context *ctx)
1161{
1162 struct perf_event *event;
1163 int state = group_event->state;
1164
1165 event_sched_out(group_event, cpuctx, ctx);
1166
1167 /*
1168 * Schedule out siblings (if any):
1169 */
1170 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1171 event_sched_out(event, cpuctx, ctx);
1172
1173 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1174 cpuctx->exclusive = 0;
1175}
1176
1177/*
1178 * Cross CPU call to remove a performance event
1179 *
1180 * We disable the event on the hardware level first. After that we
1181 * remove it from the context list.
1182 */
1183static int __perf_remove_from_context(void *info)
1184{
1185 struct perf_event *event = info;
1186 struct perf_event_context *ctx = event->ctx;
1187 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1188
1189 raw_spin_lock(&ctx->lock);
1190 event_sched_out(event, cpuctx, ctx);
1191 list_del_event(event, ctx);
1192 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1193 ctx->is_active = 0;
1194 cpuctx->task_ctx = NULL;
1195 }
1196 raw_spin_unlock(&ctx->lock);
1197
1198 return 0;
1199}
1200
1201
1202/*
1203 * Remove the event from a task's (or a CPU's) list of events.
1204 *
1205 * CPU events are removed with a smp call. For task events we only
1206 * call when the task is on a CPU.
1207 *
1208 * If event->ctx is a cloned context, callers must make sure that
1209 * every task struct that event->ctx->task could possibly point to
1210 * remains valid. This is OK when called from perf_release since
1211 * that only calls us on the top-level context, which can't be a clone.
1212 * When called from perf_event_exit_task, it's OK because the
1213 * context has been detached from its task.
1214 */
1215static void perf_remove_from_context(struct perf_event *event)
1216{
1217 struct perf_event_context *ctx = event->ctx;
1218 struct task_struct *task = ctx->task;
1219
1220 lockdep_assert_held(&ctx->mutex);
1221
1222 if (!task) {
1223 /*
1224 * Per cpu events are removed via an smp call and
1225 * the removal is always successful.
1226 */
1227 cpu_function_call(event->cpu, __perf_remove_from_context, event);
1228 return;
1229 }
1230
1231retry:
1232 if (!task_function_call(task, __perf_remove_from_context, event))
1233 return;
1234
1235 raw_spin_lock_irq(&ctx->lock);
1236 /*
1237 * If we failed to find a running task, but find the context active now
1238 * that we've acquired the ctx->lock, retry.
1239 */
1240 if (ctx->is_active) {
1241 raw_spin_unlock_irq(&ctx->lock);
1242 goto retry;
1243 }
1244
1245 /*
1246 * Since the task isn't running, its safe to remove the event, us
1247 * holding the ctx->lock ensures the task won't get scheduled in.
1248 */
1249 list_del_event(event, ctx);
1250 raw_spin_unlock_irq(&ctx->lock);
1251}
1252
1253/*
1254 * Cross CPU call to disable a performance event
1255 */
1256static int __perf_event_disable(void *info)
1257{
1258 struct perf_event *event = info;
1259 struct perf_event_context *ctx = event->ctx;
1260 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1261
1262 /*
1263 * If this is a per-task event, need to check whether this
1264 * event's task is the current task on this cpu.
1265 *
1266 * Can trigger due to concurrent perf_event_context_sched_out()
1267 * flipping contexts around.
1268 */
1269 if (ctx->task && cpuctx->task_ctx != ctx)
1270 return -EINVAL;
1271
1272 raw_spin_lock(&ctx->lock);
1273
1274 /*
1275 * If the event is on, turn it off.
1276 * If it is in error state, leave it in error state.
1277 */
1278 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1279 update_context_time(ctx);
1280 update_cgrp_time_from_event(event);
1281 update_group_times(event);
1282 if (event == event->group_leader)
1283 group_sched_out(event, cpuctx, ctx);
1284 else
1285 event_sched_out(event, cpuctx, ctx);
1286 event->state = PERF_EVENT_STATE_OFF;
1287 }
1288
1289 raw_spin_unlock(&ctx->lock);
1290
1291 return 0;
1292}
1293
1294/*
1295 * Disable a event.
1296 *
1297 * If event->ctx is a cloned context, callers must make sure that
1298 * every task struct that event->ctx->task could possibly point to
1299 * remains valid. This condition is satisifed when called through
1300 * perf_event_for_each_child or perf_event_for_each because they
1301 * hold the top-level event's child_mutex, so any descendant that
1302 * goes to exit will block in sync_child_event.
1303 * When called from perf_pending_event it's OK because event->ctx
1304 * is the current context on this CPU and preemption is disabled,
1305 * hence we can't get into perf_event_task_sched_out for this context.
1306 */
1307void perf_event_disable(struct perf_event *event)
1308{
1309 struct perf_event_context *ctx = event->ctx;
1310 struct task_struct *task = ctx->task;
1311
1312 if (!task) {
1313 /*
1314 * Disable the event on the cpu that it's on
1315 */
1316 cpu_function_call(event->cpu, __perf_event_disable, event);
1317 return;
1318 }
1319
1320retry:
1321 if (!task_function_call(task, __perf_event_disable, event))
1322 return;
1323
1324 raw_spin_lock_irq(&ctx->lock);
1325 /*
1326 * If the event is still active, we need to retry the cross-call.
1327 */
1328 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1329 raw_spin_unlock_irq(&ctx->lock);
1330 /*
1331 * Reload the task pointer, it might have been changed by
1332 * a concurrent perf_event_context_sched_out().
1333 */
1334 task = ctx->task;
1335 goto retry;
1336 }
1337
1338 /*
1339 * Since we have the lock this context can't be scheduled
1340 * in, so we can change the state safely.
1341 */
1342 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1343 update_group_times(event);
1344 event->state = PERF_EVENT_STATE_OFF;
1345 }
1346 raw_spin_unlock_irq(&ctx->lock);
1347}
1348EXPORT_SYMBOL_GPL(perf_event_disable);
1349
1350static void perf_set_shadow_time(struct perf_event *event,
1351 struct perf_event_context *ctx,
1352 u64 tstamp)
1353{
1354 /*
1355 * use the correct time source for the time snapshot
1356 *
1357 * We could get by without this by leveraging the
1358 * fact that to get to this function, the caller
1359 * has most likely already called update_context_time()
1360 * and update_cgrp_time_xx() and thus both timestamp
1361 * are identical (or very close). Given that tstamp is,
1362 * already adjusted for cgroup, we could say that:
1363 * tstamp - ctx->timestamp
1364 * is equivalent to
1365 * tstamp - cgrp->timestamp.
1366 *
1367 * Then, in perf_output_read(), the calculation would
1368 * work with no changes because:
1369 * - event is guaranteed scheduled in
1370 * - no scheduled out in between
1371 * - thus the timestamp would be the same
1372 *
1373 * But this is a bit hairy.
1374 *
1375 * So instead, we have an explicit cgroup call to remain
1376 * within the time time source all along. We believe it
1377 * is cleaner and simpler to understand.
1378 */
1379 if (is_cgroup_event(event))
1380 perf_cgroup_set_shadow_time(event, tstamp);
1381 else
1382 event->shadow_ctx_time = tstamp - ctx->timestamp;
1383}
1384
1385#define MAX_INTERRUPTS (~0ULL)
1386
1387static void perf_log_throttle(struct perf_event *event, int enable);
1388
1389static int
1390event_sched_in(struct perf_event *event,
1391 struct perf_cpu_context *cpuctx,
1392 struct perf_event_context *ctx)
1393{
1394 u64 tstamp = perf_event_time(event);
1395
1396 if (event->state <= PERF_EVENT_STATE_OFF)
1397 return 0;
1398
1399 event->state = PERF_EVENT_STATE_ACTIVE;
1400 event->oncpu = smp_processor_id();
1401
1402 /*
1403 * Unthrottle events, since we scheduled we might have missed several
1404 * ticks already, also for a heavily scheduling task there is little
1405 * guarantee it'll get a tick in a timely manner.
1406 */
1407 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1408 perf_log_throttle(event, 1);
1409 event->hw.interrupts = 0;
1410 }
1411
1412 /*
1413 * The new state must be visible before we turn it on in the hardware:
1414 */
1415 smp_wmb();
1416
1417 if (event->pmu->add(event, PERF_EF_START)) {
1418 event->state = PERF_EVENT_STATE_INACTIVE;
1419 event->oncpu = -1;
1420 return -EAGAIN;
1421 }
1422
1423 event->tstamp_running += tstamp - event->tstamp_stopped;
1424
1425 perf_set_shadow_time(event, ctx, tstamp);
1426
1427 if (!is_software_event(event))
1428 cpuctx->active_oncpu++;
1429 ctx->nr_active++;
1430 if (event->attr.freq && event->attr.sample_freq)
1431 ctx->nr_freq++;
1432
1433 if (event->attr.exclusive)
1434 cpuctx->exclusive = 1;
1435
1436 return 0;
1437}
1438
1439static int
1440group_sched_in(struct perf_event *group_event,
1441 struct perf_cpu_context *cpuctx,
1442 struct perf_event_context *ctx)
1443{
1444 struct perf_event *event, *partial_group = NULL;
1445 struct pmu *pmu = group_event->pmu;
1446 u64 now = ctx->time;
1447 bool simulate = false;
1448
1449 if (group_event->state == PERF_EVENT_STATE_OFF)
1450 return 0;
1451
1452 pmu->start_txn(pmu);
1453
1454 if (event_sched_in(group_event, cpuctx, ctx)) {
1455 pmu->cancel_txn(pmu);
1456 return -EAGAIN;
1457 }
1458
1459 /*
1460 * Schedule in siblings as one group (if any):
1461 */
1462 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1463 if (event_sched_in(event, cpuctx, ctx)) {
1464 partial_group = event;
1465 goto group_error;
1466 }
1467 }
1468
1469 if (!pmu->commit_txn(pmu))
1470 return 0;
1471
1472group_error:
1473 /*
1474 * Groups can be scheduled in as one unit only, so undo any
1475 * partial group before returning:
1476 * The events up to the failed event are scheduled out normally,
1477 * tstamp_stopped will be updated.
1478 *
1479 * The failed events and the remaining siblings need to have
1480 * their timings updated as if they had gone thru event_sched_in()
1481 * and event_sched_out(). This is required to get consistent timings
1482 * across the group. This also takes care of the case where the group
1483 * could never be scheduled by ensuring tstamp_stopped is set to mark
1484 * the time the event was actually stopped, such that time delta
1485 * calculation in update_event_times() is correct.
1486 */
1487 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1488 if (event == partial_group)
1489 simulate = true;
1490
1491 if (simulate) {
1492 event->tstamp_running += now - event->tstamp_stopped;
1493 event->tstamp_stopped = now;
1494 } else {
1495 event_sched_out(event, cpuctx, ctx);
1496 }
1497 }
1498 event_sched_out(group_event, cpuctx, ctx);
1499
1500 pmu->cancel_txn(pmu);
1501
1502 return -EAGAIN;
1503}
1504
1505/*
1506 * Work out whether we can put this event group on the CPU now.
1507 */
1508static int group_can_go_on(struct perf_event *event,
1509 struct perf_cpu_context *cpuctx,
1510 int can_add_hw)
1511{
1512 /*
1513 * Groups consisting entirely of software events can always go on.
1514 */
1515 if (event->group_flags & PERF_GROUP_SOFTWARE)
1516 return 1;
1517 /*
1518 * If an exclusive group is already on, no other hardware
1519 * events can go on.
1520 */
1521 if (cpuctx->exclusive)
1522 return 0;
1523 /*
1524 * If this group is exclusive and there are already
1525 * events on the CPU, it can't go on.
1526 */
1527 if (event->attr.exclusive && cpuctx->active_oncpu)
1528 return 0;
1529 /*
1530 * Otherwise, try to add it if all previous groups were able
1531 * to go on.
1532 */
1533 return can_add_hw;
1534}
1535
1536static void add_event_to_ctx(struct perf_event *event,
1537 struct perf_event_context *ctx)
1538{
1539 u64 tstamp = perf_event_time(event);
1540
1541 list_add_event(event, ctx);
1542 perf_group_attach(event);
1543 event->tstamp_enabled = tstamp;
1544 event->tstamp_running = tstamp;
1545 event->tstamp_stopped = tstamp;
1546}
1547
1548static void task_ctx_sched_out(struct perf_event_context *ctx);
1549static void
1550ctx_sched_in(struct perf_event_context *ctx,
1551 struct perf_cpu_context *cpuctx,
1552 enum event_type_t event_type,
1553 struct task_struct *task);
1554
1555static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1556 struct perf_event_context *ctx,
1557 struct task_struct *task)
1558{
1559 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1560 if (ctx)
1561 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1562 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1563 if (ctx)
1564 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1565}
1566
1567/*
1568 * Cross CPU call to install and enable a performance event
1569 *
1570 * Must be called with ctx->mutex held
1571 */
1572static int __perf_install_in_context(void *info)
1573{
1574 struct perf_event *event = info;
1575 struct perf_event_context *ctx = event->ctx;
1576 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1577 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1578 struct task_struct *task = current;
1579
1580 perf_ctx_lock(cpuctx, task_ctx);
1581 perf_pmu_disable(cpuctx->ctx.pmu);
1582
1583 /*
1584 * If there was an active task_ctx schedule it out.
1585 */
1586 if (task_ctx)
1587 task_ctx_sched_out(task_ctx);
1588
1589 /*
1590 * If the context we're installing events in is not the
1591 * active task_ctx, flip them.
1592 */
1593 if (ctx->task && task_ctx != ctx) {
1594 if (task_ctx)
1595 raw_spin_unlock(&task_ctx->lock);
1596 raw_spin_lock(&ctx->lock);
1597 task_ctx = ctx;
1598 }
1599
1600 if (task_ctx) {
1601 cpuctx->task_ctx = task_ctx;
1602 task = task_ctx->task;
1603 }
1604
1605 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1606
1607 update_context_time(ctx);
1608 /*
1609 * update cgrp time only if current cgrp
1610 * matches event->cgrp. Must be done before
1611 * calling add_event_to_ctx()
1612 */
1613 update_cgrp_time_from_event(event);
1614
1615 add_event_to_ctx(event, ctx);
1616
1617 /*
1618 * Schedule everything back in
1619 */
1620 perf_event_sched_in(cpuctx, task_ctx, task);
1621
1622 perf_pmu_enable(cpuctx->ctx.pmu);
1623 perf_ctx_unlock(cpuctx, task_ctx);
1624
1625 return 0;
1626}
1627
1628/*
1629 * Attach a performance event to a context
1630 *
1631 * First we add the event to the list with the hardware enable bit
1632 * in event->hw_config cleared.
1633 *
1634 * If the event is attached to a task which is on a CPU we use a smp
1635 * call to enable it in the task context. The task might have been
1636 * scheduled away, but we check this in the smp call again.
1637 */
1638static void
1639perf_install_in_context(struct perf_event_context *ctx,
1640 struct perf_event *event,
1641 int cpu)
1642{
1643 struct task_struct *task = ctx->task;
1644
1645 lockdep_assert_held(&ctx->mutex);
1646
1647 event->ctx = ctx;
1648
1649 if (!task) {
1650 /*
1651 * Per cpu events are installed via an smp call and
1652 * the install is always successful.
1653 */
1654 cpu_function_call(cpu, __perf_install_in_context, event);
1655 return;
1656 }
1657
1658retry:
1659 if (!task_function_call(task, __perf_install_in_context, event))
1660 return;
1661
1662 raw_spin_lock_irq(&ctx->lock);
1663 /*
1664 * If we failed to find a running task, but find the context active now
1665 * that we've acquired the ctx->lock, retry.
1666 */
1667 if (ctx->is_active) {
1668 raw_spin_unlock_irq(&ctx->lock);
1669 goto retry;
1670 }
1671
1672 /*
1673 * Since the task isn't running, its safe to add the event, us holding
1674 * the ctx->lock ensures the task won't get scheduled in.
1675 */
1676 add_event_to_ctx(event, ctx);
1677 raw_spin_unlock_irq(&ctx->lock);
1678}
1679
1680/*
1681 * Put a event into inactive state and update time fields.
1682 * Enabling the leader of a group effectively enables all
1683 * the group members that aren't explicitly disabled, so we
1684 * have to update their ->tstamp_enabled also.
1685 * Note: this works for group members as well as group leaders
1686 * since the non-leader members' sibling_lists will be empty.
1687 */
1688static void __perf_event_mark_enabled(struct perf_event *event)
1689{
1690 struct perf_event *sub;
1691 u64 tstamp = perf_event_time(event);
1692
1693 event->state = PERF_EVENT_STATE_INACTIVE;
1694 event->tstamp_enabled = tstamp - event->total_time_enabled;
1695 list_for_each_entry(sub, &event->sibling_list, group_entry) {
1696 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1697 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1698 }
1699}
1700
1701/*
1702 * Cross CPU call to enable a performance event
1703 */
1704static int __perf_event_enable(void *info)
1705{
1706 struct perf_event *event = info;
1707 struct perf_event_context *ctx = event->ctx;
1708 struct perf_event *leader = event->group_leader;
1709 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1710 int err;
1711
1712 if (WARN_ON_ONCE(!ctx->is_active))
1713 return -EINVAL;
1714
1715 raw_spin_lock(&ctx->lock);
1716 update_context_time(ctx);
1717
1718 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1719 goto unlock;
1720
1721 /*
1722 * set current task's cgroup time reference point
1723 */
1724 perf_cgroup_set_timestamp(current, ctx);
1725
1726 __perf_event_mark_enabled(event);
1727
1728 if (!event_filter_match(event)) {
1729 if (is_cgroup_event(event))
1730 perf_cgroup_defer_enabled(event);
1731 goto unlock;
1732 }
1733
1734 /*
1735 * If the event is in a group and isn't the group leader,
1736 * then don't put it on unless the group is on.
1737 */
1738 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1739 goto unlock;
1740
1741 if (!group_can_go_on(event, cpuctx, 1)) {
1742 err = -EEXIST;
1743 } else {
1744 if (event == leader)
1745 err = group_sched_in(event, cpuctx, ctx);
1746 else
1747 err = event_sched_in(event, cpuctx, ctx);
1748 }
1749
1750 if (err) {
1751 /*
1752 * If this event can't go on and it's part of a
1753 * group, then the whole group has to come off.
1754 */
1755 if (leader != event)
1756 group_sched_out(leader, cpuctx, ctx);
1757 if (leader->attr.pinned) {
1758 update_group_times(leader);
1759 leader->state = PERF_EVENT_STATE_ERROR;
1760 }
1761 }
1762
1763unlock:
1764 raw_spin_unlock(&ctx->lock);
1765
1766 return 0;
1767}
1768
1769/*
1770 * Enable a event.
1771 *
1772 * If event->ctx is a cloned context, callers must make sure that
1773 * every task struct that event->ctx->task could possibly point to
1774 * remains valid. This condition is satisfied when called through
1775 * perf_event_for_each_child or perf_event_for_each as described
1776 * for perf_event_disable.
1777 */
1778void perf_event_enable(struct perf_event *event)
1779{
1780 struct perf_event_context *ctx = event->ctx;
1781 struct task_struct *task = ctx->task;
1782
1783 if (!task) {
1784 /*
1785 * Enable the event on the cpu that it's on
1786 */
1787 cpu_function_call(event->cpu, __perf_event_enable, event);
1788 return;
1789 }
1790
1791 raw_spin_lock_irq(&ctx->lock);
1792 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1793 goto out;
1794
1795 /*
1796 * If the event is in error state, clear that first.
1797 * That way, if we see the event in error state below, we
1798 * know that it has gone back into error state, as distinct
1799 * from the task having been scheduled away before the
1800 * cross-call arrived.
1801 */
1802 if (event->state == PERF_EVENT_STATE_ERROR)
1803 event->state = PERF_EVENT_STATE_OFF;
1804
1805retry:
1806 if (!ctx->is_active) {
1807 __perf_event_mark_enabled(event);
1808 goto out;
1809 }
1810
1811 raw_spin_unlock_irq(&ctx->lock);
1812
1813 if (!task_function_call(task, __perf_event_enable, event))
1814 return;
1815
1816 raw_spin_lock_irq(&ctx->lock);
1817
1818 /*
1819 * If the context is active and the event is still off,
1820 * we need to retry the cross-call.
1821 */
1822 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1823 /*
1824 * task could have been flipped by a concurrent
1825 * perf_event_context_sched_out()
1826 */
1827 task = ctx->task;
1828 goto retry;
1829 }
1830
1831out:
1832 raw_spin_unlock_irq(&ctx->lock);
1833}
1834EXPORT_SYMBOL_GPL(perf_event_enable);
1835
1836int perf_event_refresh(struct perf_event *event, int refresh)
1837{
1838 /*
1839 * not supported on inherited events
1840 */
1841 if (event->attr.inherit || !is_sampling_event(event))
1842 return -EINVAL;
1843
1844 atomic_add(refresh, &event->event_limit);
1845 perf_event_enable(event);
1846
1847 return 0;
1848}
1849EXPORT_SYMBOL_GPL(perf_event_refresh);
1850
1851static void ctx_sched_out(struct perf_event_context *ctx,
1852 struct perf_cpu_context *cpuctx,
1853 enum event_type_t event_type)
1854{
1855 struct perf_event *event;
1856 int is_active = ctx->is_active;
1857
1858 ctx->is_active &= ~event_type;
1859 if (likely(!ctx->nr_events))
1860 return;
1861
1862 update_context_time(ctx);
1863 update_cgrp_time_from_cpuctx(cpuctx);
1864 if (!ctx->nr_active)
1865 return;
1866
1867 perf_pmu_disable(ctx->pmu);
1868 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1869 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1870 group_sched_out(event, cpuctx, ctx);
1871 }
1872
1873 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1874 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1875 group_sched_out(event, cpuctx, ctx);
1876 }
1877 perf_pmu_enable(ctx->pmu);
1878}
1879
1880/*
1881 * Test whether two contexts are equivalent, i.e. whether they
1882 * have both been cloned from the same version of the same context
1883 * and they both have the same number of enabled events.
1884 * If the number of enabled events is the same, then the set
1885 * of enabled events should be the same, because these are both
1886 * inherited contexts, therefore we can't access individual events
1887 * in them directly with an fd; we can only enable/disable all
1888 * events via prctl, or enable/disable all events in a family
1889 * via ioctl, which will have the same effect on both contexts.
1890 */
1891static int context_equiv(struct perf_event_context *ctx1,
1892 struct perf_event_context *ctx2)
1893{
1894 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1895 && ctx1->parent_gen == ctx2->parent_gen
1896 && !ctx1->pin_count && !ctx2->pin_count;
1897}
1898
1899static void __perf_event_sync_stat(struct perf_event *event,
1900 struct perf_event *next_event)
1901{
1902 u64 value;
1903
1904 if (!event->attr.inherit_stat)
1905 return;
1906
1907 /*
1908 * Update the event value, we cannot use perf_event_read()
1909 * because we're in the middle of a context switch and have IRQs
1910 * disabled, which upsets smp_call_function_single(), however
1911 * we know the event must be on the current CPU, therefore we
1912 * don't need to use it.
1913 */
1914 switch (event->state) {
1915 case PERF_EVENT_STATE_ACTIVE:
1916 event->pmu->read(event);
1917 /* fall-through */
1918
1919 case PERF_EVENT_STATE_INACTIVE:
1920 update_event_times(event);
1921 break;
1922
1923 default:
1924 break;
1925 }
1926
1927 /*
1928 * In order to keep per-task stats reliable we need to flip the event
1929 * values when we flip the contexts.
1930 */
1931 value = local64_read(&next_event->count);
1932 value = local64_xchg(&event->count, value);
1933 local64_set(&next_event->count, value);
1934
1935 swap(event->total_time_enabled, next_event->total_time_enabled);
1936 swap(event->total_time_running, next_event->total_time_running);
1937
1938 /*
1939 * Since we swizzled the values, update the user visible data too.
1940 */
1941 perf_event_update_userpage(event);
1942 perf_event_update_userpage(next_event);
1943}
1944
1945#define list_next_entry(pos, member) \
1946 list_entry(pos->member.next, typeof(*pos), member)
1947
1948static void perf_event_sync_stat(struct perf_event_context *ctx,
1949 struct perf_event_context *next_ctx)
1950{
1951 struct perf_event *event, *next_event;
1952
1953 if (!ctx->nr_stat)
1954 return;
1955
1956 update_context_time(ctx);
1957
1958 event = list_first_entry(&ctx->event_list,
1959 struct perf_event, event_entry);
1960
1961 next_event = list_first_entry(&next_ctx->event_list,
1962 struct perf_event, event_entry);
1963
1964 while (&event->event_entry != &ctx->event_list &&
1965 &next_event->event_entry != &next_ctx->event_list) {
1966
1967 __perf_event_sync_stat(event, next_event);
1968
1969 event = list_next_entry(event, event_entry);
1970 next_event = list_next_entry(next_event, event_entry);
1971 }
1972}
1973
1974static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1975 struct task_struct *next)
1976{
1977 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1978 struct perf_event_context *next_ctx;
1979 struct perf_event_context *parent;
1980 struct perf_cpu_context *cpuctx;
1981 int do_switch = 1;
1982
1983 if (likely(!ctx))
1984 return;
1985
1986 cpuctx = __get_cpu_context(ctx);
1987 if (!cpuctx->task_ctx)
1988 return;
1989
1990 rcu_read_lock();
1991 parent = rcu_dereference(ctx->parent_ctx);
1992 next_ctx = next->perf_event_ctxp[ctxn];
1993 if (parent && next_ctx &&
1994 rcu_dereference(next_ctx->parent_ctx) == parent) {
1995 /*
1996 * Looks like the two contexts are clones, so we might be
1997 * able to optimize the context switch. We lock both
1998 * contexts and check that they are clones under the
1999 * lock (including re-checking that neither has been
2000 * uncloned in the meantime). It doesn't matter which
2001 * order we take the locks because no other cpu could
2002 * be trying to lock both of these tasks.
2003 */
2004 raw_spin_lock(&ctx->lock);
2005 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2006 if (context_equiv(ctx, next_ctx)) {
2007 /*
2008 * XXX do we need a memory barrier of sorts
2009 * wrt to rcu_dereference() of perf_event_ctxp
2010 */
2011 task->perf_event_ctxp[ctxn] = next_ctx;
2012 next->perf_event_ctxp[ctxn] = ctx;
2013 ctx->task = next;
2014 next_ctx->task = task;
2015 do_switch = 0;
2016
2017 perf_event_sync_stat(ctx, next_ctx);
2018 }
2019 raw_spin_unlock(&next_ctx->lock);
2020 raw_spin_unlock(&ctx->lock);
2021 }
2022 rcu_read_unlock();
2023
2024 if (do_switch) {
2025 raw_spin_lock(&ctx->lock);
2026 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2027 cpuctx->task_ctx = NULL;
2028 raw_spin_unlock(&ctx->lock);
2029 }
2030}
2031
2032#define for_each_task_context_nr(ctxn) \
2033 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2034
2035/*
2036 * Called from scheduler to remove the events of the current task,
2037 * with interrupts disabled.
2038 *
2039 * We stop each event and update the event value in event->count.
2040 *
2041 * This does not protect us against NMI, but disable()
2042 * sets the disabled bit in the control field of event _before_
2043 * accessing the event control register. If a NMI hits, then it will
2044 * not restart the event.
2045 */
2046void __perf_event_task_sched_out(struct task_struct *task,
2047 struct task_struct *next)
2048{
2049 int ctxn;
2050
2051 for_each_task_context_nr(ctxn)
2052 perf_event_context_sched_out(task, ctxn, next);
2053
2054 /*
2055 * if cgroup events exist on this CPU, then we need
2056 * to check if we have to switch out PMU state.
2057 * cgroup event are system-wide mode only
2058 */
2059 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2060 perf_cgroup_sched_out(task, next);
2061}
2062
2063static void task_ctx_sched_out(struct perf_event_context *ctx)
2064{
2065 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2066
2067 if (!cpuctx->task_ctx)
2068 return;
2069
2070 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2071 return;
2072
2073 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2074 cpuctx->task_ctx = NULL;
2075}
2076
2077/*
2078 * Called with IRQs disabled
2079 */
2080static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2081 enum event_type_t event_type)
2082{
2083 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2084}
2085
2086static void
2087ctx_pinned_sched_in(struct perf_event_context *ctx,
2088 struct perf_cpu_context *cpuctx)
2089{
2090 struct perf_event *event;
2091
2092 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2093 if (event->state <= PERF_EVENT_STATE_OFF)
2094 continue;
2095 if (!event_filter_match(event))
2096 continue;
2097
2098 /* may need to reset tstamp_enabled */
2099 if (is_cgroup_event(event))
2100 perf_cgroup_mark_enabled(event, ctx);
2101
2102 if (group_can_go_on(event, cpuctx, 1))
2103 group_sched_in(event, cpuctx, ctx);
2104
2105 /*
2106 * If this pinned group hasn't been scheduled,
2107 * put it in error state.
2108 */
2109 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2110 update_group_times(event);
2111 event->state = PERF_EVENT_STATE_ERROR;
2112 }
2113 }
2114}
2115
2116static void
2117ctx_flexible_sched_in(struct perf_event_context *ctx,
2118 struct perf_cpu_context *cpuctx)
2119{
2120 struct perf_event *event;
2121 int can_add_hw = 1;
2122
2123 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2124 /* Ignore events in OFF or ERROR state */
2125 if (event->state <= PERF_EVENT_STATE_OFF)
2126 continue;
2127 /*
2128 * Listen to the 'cpu' scheduling filter constraint
2129 * of events:
2130 */
2131 if (!event_filter_match(event))
2132 continue;
2133
2134 /* may need to reset tstamp_enabled */
2135 if (is_cgroup_event(event))
2136 perf_cgroup_mark_enabled(event, ctx);
2137
2138 if (group_can_go_on(event, cpuctx, can_add_hw)) {
2139 if (group_sched_in(event, cpuctx, ctx))
2140 can_add_hw = 0;
2141 }
2142 }
2143}
2144
2145static void
2146ctx_sched_in(struct perf_event_context *ctx,
2147 struct perf_cpu_context *cpuctx,
2148 enum event_type_t event_type,
2149 struct task_struct *task)
2150{
2151 u64 now;
2152 int is_active = ctx->is_active;
2153
2154 ctx->is_active |= event_type;
2155 if (likely(!ctx->nr_events))
2156 return;
2157
2158 now = perf_clock();
2159 ctx->timestamp = now;
2160 perf_cgroup_set_timestamp(task, ctx);
2161 /*
2162 * First go through the list and put on any pinned groups
2163 * in order to give them the best chance of going on.
2164 */
2165 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2166 ctx_pinned_sched_in(ctx, cpuctx);
2167
2168 /* Then walk through the lower prio flexible groups */
2169 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2170 ctx_flexible_sched_in(ctx, cpuctx);
2171}
2172
2173static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2174 enum event_type_t event_type,
2175 struct task_struct *task)
2176{
2177 struct perf_event_context *ctx = &cpuctx->ctx;
2178
2179 ctx_sched_in(ctx, cpuctx, event_type, task);
2180}
2181
2182static void perf_event_context_sched_in(struct perf_event_context *ctx,
2183 struct task_struct *task)
2184{
2185 struct perf_cpu_context *cpuctx;
2186
2187 cpuctx = __get_cpu_context(ctx);
2188 if (cpuctx->task_ctx == ctx)
2189 return;
2190
2191 perf_ctx_lock(cpuctx, ctx);
2192 perf_pmu_disable(ctx->pmu);
2193 /*
2194 * We want to keep the following priority order:
2195 * cpu pinned (that don't need to move), task pinned,
2196 * cpu flexible, task flexible.
2197 */
2198 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2199
2200 if (ctx->nr_events)
2201 cpuctx->task_ctx = ctx;
2202
2203 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2204
2205 perf_pmu_enable(ctx->pmu);
2206 perf_ctx_unlock(cpuctx, ctx);
2207
2208 /*
2209 * Since these rotations are per-cpu, we need to ensure the
2210 * cpu-context we got scheduled on is actually rotating.
2211 */
2212 perf_pmu_rotate_start(ctx->pmu);
2213}
2214
2215/*
2216 * When sampling the branck stack in system-wide, it may be necessary
2217 * to flush the stack on context switch. This happens when the branch
2218 * stack does not tag its entries with the pid of the current task.
2219 * Otherwise it becomes impossible to associate a branch entry with a
2220 * task. This ambiguity is more likely to appear when the branch stack
2221 * supports priv level filtering and the user sets it to monitor only
2222 * at the user level (which could be a useful measurement in system-wide
2223 * mode). In that case, the risk is high of having a branch stack with
2224 * branch from multiple tasks. Flushing may mean dropping the existing
2225 * entries or stashing them somewhere in the PMU specific code layer.
2226 *
2227 * This function provides the context switch callback to the lower code
2228 * layer. It is invoked ONLY when there is at least one system-wide context
2229 * with at least one active event using taken branch sampling.
2230 */
2231static void perf_branch_stack_sched_in(struct task_struct *prev,
2232 struct task_struct *task)
2233{
2234 struct perf_cpu_context *cpuctx;
2235 struct pmu *pmu;
2236 unsigned long flags;
2237
2238 /* no need to flush branch stack if not changing task */
2239 if (prev == task)
2240 return;
2241
2242 local_irq_save(flags);
2243
2244 rcu_read_lock();
2245
2246 list_for_each_entry_rcu(pmu, &pmus, entry) {
2247 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2248
2249 /*
2250 * check if the context has at least one
2251 * event using PERF_SAMPLE_BRANCH_STACK
2252 */
2253 if (cpuctx->ctx.nr_branch_stack > 0
2254 && pmu->flush_branch_stack) {
2255
2256 pmu = cpuctx->ctx.pmu;
2257
2258 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2259
2260 perf_pmu_disable(pmu);
2261
2262 pmu->flush_branch_stack();
2263
2264 perf_pmu_enable(pmu);
2265
2266 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2267 }
2268 }
2269
2270 rcu_read_unlock();
2271
2272 local_irq_restore(flags);
2273}
2274
2275/*
2276 * Called from scheduler to add the events of the current task
2277 * with interrupts disabled.
2278 *
2279 * We restore the event value and then enable it.
2280 *
2281 * This does not protect us against NMI, but enable()
2282 * sets the enabled bit in the control field of event _before_
2283 * accessing the event control register. If a NMI hits, then it will
2284 * keep the event running.
2285 */
2286void __perf_event_task_sched_in(struct task_struct *prev,
2287 struct task_struct *task)
2288{
2289 struct perf_event_context *ctx;
2290 int ctxn;
2291
2292 for_each_task_context_nr(ctxn) {
2293 ctx = task->perf_event_ctxp[ctxn];
2294 if (likely(!ctx))
2295 continue;
2296
2297 perf_event_context_sched_in(ctx, task);
2298 }
2299 /*
2300 * if cgroup events exist on this CPU, then we need
2301 * to check if we have to switch in PMU state.
2302 * cgroup event are system-wide mode only
2303 */
2304 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2305 perf_cgroup_sched_in(prev, task);
2306
2307 /* check for system-wide branch_stack events */
2308 if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2309 perf_branch_stack_sched_in(prev, task);
2310}
2311
2312static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2313{
2314 u64 frequency = event->attr.sample_freq;
2315 u64 sec = NSEC_PER_SEC;
2316 u64 divisor, dividend;
2317
2318 int count_fls, nsec_fls, frequency_fls, sec_fls;
2319
2320 count_fls = fls64(count);
2321 nsec_fls = fls64(nsec);
2322 frequency_fls = fls64(frequency);
2323 sec_fls = 30;
2324
2325 /*
2326 * We got @count in @nsec, with a target of sample_freq HZ
2327 * the target period becomes:
2328 *
2329 * @count * 10^9
2330 * period = -------------------
2331 * @nsec * sample_freq
2332 *
2333 */
2334
2335 /*
2336 * Reduce accuracy by one bit such that @a and @b converge
2337 * to a similar magnitude.
2338 */
2339#define REDUCE_FLS(a, b) \
2340do { \
2341 if (a##_fls > b##_fls) { \
2342 a >>= 1; \
2343 a##_fls--; \
2344 } else { \
2345 b >>= 1; \
2346 b##_fls--; \
2347 } \
2348} while (0)
2349
2350 /*
2351 * Reduce accuracy until either term fits in a u64, then proceed with
2352 * the other, so that finally we can do a u64/u64 division.
2353 */
2354 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2355 REDUCE_FLS(nsec, frequency);
2356 REDUCE_FLS(sec, count);
2357 }
2358
2359 if (count_fls + sec_fls > 64) {
2360 divisor = nsec * frequency;
2361
2362 while (count_fls + sec_fls > 64) {
2363 REDUCE_FLS(count, sec);
2364 divisor >>= 1;
2365 }
2366
2367 dividend = count * sec;
2368 } else {
2369 dividend = count * sec;
2370
2371 while (nsec_fls + frequency_fls > 64) {
2372 REDUCE_FLS(nsec, frequency);
2373 dividend >>= 1;
2374 }
2375
2376 divisor = nsec * frequency;
2377 }
2378
2379 if (!divisor)
2380 return dividend;
2381
2382 return div64_u64(dividend, divisor);
2383}
2384
2385static DEFINE_PER_CPU(int, perf_throttled_count);
2386static DEFINE_PER_CPU(u64, perf_throttled_seq);
2387
2388static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2389{
2390 struct hw_perf_event *hwc = &event->hw;
2391 s64 period, sample_period;
2392 s64 delta;
2393
2394 period = perf_calculate_period(event, nsec, count);
2395
2396 delta = (s64)(period - hwc->sample_period);
2397 delta = (delta + 7) / 8; /* low pass filter */
2398
2399 sample_period = hwc->sample_period + delta;
2400
2401 if (!sample_period)
2402 sample_period = 1;
2403
2404 hwc->sample_period = sample_period;
2405
2406 if (local64_read(&hwc->period_left) > 8*sample_period) {
2407 if (disable)
2408 event->pmu->stop(event, PERF_EF_UPDATE);
2409
2410 local64_set(&hwc->period_left, 0);
2411
2412 if (disable)
2413 event->pmu->start(event, PERF_EF_RELOAD);
2414 }
2415}
2416
2417/*
2418 * combine freq adjustment with unthrottling to avoid two passes over the
2419 * events. At the same time, make sure, having freq events does not change
2420 * the rate of unthrottling as that would introduce bias.
2421 */
2422static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2423 int needs_unthr)
2424{
2425 struct perf_event *event;
2426 struct hw_perf_event *hwc;
2427 u64 now, period = TICK_NSEC;
2428 s64 delta;
2429
2430 /*
2431 * only need to iterate over all events iff:
2432 * - context have events in frequency mode (needs freq adjust)
2433 * - there are events to unthrottle on this cpu
2434 */
2435 if (!(ctx->nr_freq || needs_unthr))
2436 return;
2437
2438 raw_spin_lock(&ctx->lock);
2439 perf_pmu_disable(ctx->pmu);
2440
2441 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2442 if (event->state != PERF_EVENT_STATE_ACTIVE)
2443 continue;
2444
2445 if (!event_filter_match(event))
2446 continue;
2447
2448 hwc = &event->hw;
2449
2450 if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
2451 hwc->interrupts = 0;
2452 perf_log_throttle(event, 1);
2453 event->pmu->start(event, 0);
2454 }
2455
2456 if (!event->attr.freq || !event->attr.sample_freq)
2457 continue;
2458
2459 /*
2460 * stop the event and update event->count
2461 */
2462 event->pmu->stop(event, PERF_EF_UPDATE);
2463
2464 now = local64_read(&event->count);
2465 delta = now - hwc->freq_count_stamp;
2466 hwc->freq_count_stamp = now;
2467
2468 /*
2469 * restart the event
2470 * reload only if value has changed
2471 * we have stopped the event so tell that
2472 * to perf_adjust_period() to avoid stopping it
2473 * twice.
2474 */
2475 if (delta > 0)
2476 perf_adjust_period(event, period, delta, false);
2477
2478 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2479 }
2480
2481 perf_pmu_enable(ctx->pmu);
2482 raw_spin_unlock(&ctx->lock);
2483}
2484
2485/*
2486 * Round-robin a context's events:
2487 */
2488static void rotate_ctx(struct perf_event_context *ctx)
2489{
2490 /*
2491 * Rotate the first entry last of non-pinned groups. Rotation might be
2492 * disabled by the inheritance code.
2493 */
2494 if (!ctx->rotate_disable)
2495 list_rotate_left(&ctx->flexible_groups);
2496}
2497
2498/*
2499 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2500 * because they're strictly cpu affine and rotate_start is called with IRQs
2501 * disabled, while rotate_context is called from IRQ context.
2502 */
2503static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2504{
2505 struct perf_event_context *ctx = NULL;
2506 int rotate = 0, remove = 1;
2507
2508 if (cpuctx->ctx.nr_events) {
2509 remove = 0;
2510 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2511 rotate = 1;
2512 }
2513
2514 ctx = cpuctx->task_ctx;
2515 if (ctx && ctx->nr_events) {
2516 remove = 0;
2517 if (ctx->nr_events != ctx->nr_active)
2518 rotate = 1;
2519 }
2520
2521 if (!rotate)
2522 goto done;
2523
2524 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2525 perf_pmu_disable(cpuctx->ctx.pmu);
2526
2527 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2528 if (ctx)
2529 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2530
2531 rotate_ctx(&cpuctx->ctx);
2532 if (ctx)
2533 rotate_ctx(ctx);
2534
2535 perf_event_sched_in(cpuctx, ctx, current);
2536
2537 perf_pmu_enable(cpuctx->ctx.pmu);
2538 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2539done:
2540 if (remove)
2541 list_del_init(&cpuctx->rotation_list);
2542}
2543
2544void perf_event_task_tick(void)
2545{
2546 struct list_head *head = &__get_cpu_var(rotation_list);
2547 struct perf_cpu_context *cpuctx, *tmp;
2548 struct perf_event_context *ctx;
2549 int throttled;
2550
2551 WARN_ON(!irqs_disabled());
2552
2553 __this_cpu_inc(perf_throttled_seq);
2554 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2555
2556 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2557 ctx = &cpuctx->ctx;
2558 perf_adjust_freq_unthr_context(ctx, throttled);
2559
2560 ctx = cpuctx->task_ctx;
2561 if (ctx)
2562 perf_adjust_freq_unthr_context(ctx, throttled);
2563
2564 if (cpuctx->jiffies_interval == 1 ||
2565 !(jiffies % cpuctx->jiffies_interval))
2566 perf_rotate_context(cpuctx);
2567 }
2568}
2569
2570static int event_enable_on_exec(struct perf_event *event,
2571 struct perf_event_context *ctx)
2572{
2573 if (!event->attr.enable_on_exec)
2574 return 0;
2575
2576 event->attr.enable_on_exec = 0;
2577 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2578 return 0;
2579
2580 __perf_event_mark_enabled(event);
2581
2582 return 1;
2583}
2584
2585/*
2586 * Enable all of a task's events that have been marked enable-on-exec.
2587 * This expects task == current.
2588 */
2589static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2590{
2591 struct perf_event *event;
2592 unsigned long flags;
2593 int enabled = 0;
2594 int ret;
2595
2596 local_irq_save(flags);
2597 if (!ctx || !ctx->nr_events)
2598 goto out;
2599
2600 /*
2601 * We must ctxsw out cgroup events to avoid conflict
2602 * when invoking perf_task_event_sched_in() later on
2603 * in this function. Otherwise we end up trying to
2604 * ctxswin cgroup events which are already scheduled
2605 * in.
2606 */
2607 perf_cgroup_sched_out(current, NULL);
2608
2609 raw_spin_lock(&ctx->lock);
2610 task_ctx_sched_out(ctx);
2611
2612 list_for_each_entry(event, &ctx->event_list, event_entry) {
2613 ret = event_enable_on_exec(event, ctx);
2614 if (ret)
2615 enabled = 1;
2616 }
2617
2618 /*
2619 * Unclone this context if we enabled any event.
2620 */
2621 if (enabled)
2622 unclone_ctx(ctx);
2623
2624 raw_spin_unlock(&ctx->lock);
2625
2626 /*
2627 * Also calls ctxswin for cgroup events, if any:
2628 */
2629 perf_event_context_sched_in(ctx, ctx->task);
2630out:
2631 local_irq_restore(flags);
2632}
2633
2634/*
2635 * Cross CPU call to read the hardware event
2636 */
2637static void __perf_event_read(void *info)
2638{
2639 struct perf_event *event = info;
2640 struct perf_event_context *ctx = event->ctx;
2641 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2642
2643 /*
2644 * If this is a task context, we need to check whether it is
2645 * the current task context of this cpu. If not it has been
2646 * scheduled out before the smp call arrived. In that case
2647 * event->count would have been updated to a recent sample
2648 * when the event was scheduled out.
2649 */
2650 if (ctx->task && cpuctx->task_ctx != ctx)
2651 return;
2652
2653 raw_spin_lock(&ctx->lock);
2654 if (ctx->is_active) {
2655 update_context_time(ctx);
2656 update_cgrp_time_from_event(event);
2657 }
2658 update_event_times(event);
2659 if (event->state == PERF_EVENT_STATE_ACTIVE)
2660 event->pmu->read(event);
2661 raw_spin_unlock(&ctx->lock);
2662}
2663
2664static inline u64 perf_event_count(struct perf_event *event)
2665{
2666 return local64_read(&event->count) + atomic64_read(&event->child_count);
2667}
2668
2669static u64 perf_event_read(struct perf_event *event)
2670{
2671 /*
2672 * If event is enabled and currently active on a CPU, update the
2673 * value in the event structure:
2674 */
2675 if (event->state == PERF_EVENT_STATE_ACTIVE) {
2676 smp_call_function_single(event->oncpu,
2677 __perf_event_read, event, 1);
2678 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2679 struct perf_event_context *ctx = event->ctx;
2680 unsigned long flags;
2681
2682 raw_spin_lock_irqsave(&ctx->lock, flags);
2683 /*
2684 * may read while context is not active
2685 * (e.g., thread is blocked), in that case
2686 * we cannot update context time
2687 */
2688 if (ctx->is_active) {
2689 update_context_time(ctx);
2690 update_cgrp_time_from_event(event);
2691 }
2692 update_event_times(event);
2693 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2694 }
2695
2696 return perf_event_count(event);
2697}
2698
2699/*
2700 * Initialize the perf_event context in a task_struct:
2701 */
2702static void __perf_event_init_context(struct perf_event_context *ctx)
2703{
2704 raw_spin_lock_init(&ctx->lock);
2705 mutex_init(&ctx->mutex);
2706 INIT_LIST_HEAD(&ctx->pinned_groups);
2707 INIT_LIST_HEAD(&ctx->flexible_groups);
2708 INIT_LIST_HEAD(&ctx->event_list);
2709 atomic_set(&ctx->refcount, 1);
2710}
2711
2712static struct perf_event_context *
2713alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2714{
2715 struct perf_event_context *ctx;
2716
2717 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2718 if (!ctx)
2719 return NULL;
2720
2721 __perf_event_init_context(ctx);
2722 if (task) {
2723 ctx->task = task;
2724 get_task_struct(task);
2725 }
2726 ctx->pmu = pmu;
2727
2728 return ctx;
2729}
2730
2731static struct task_struct *
2732find_lively_task_by_vpid(pid_t vpid)
2733{
2734 struct task_struct *task;
2735 int err;
2736
2737 rcu_read_lock();
2738 if (!vpid)
2739 task = current;
2740 else
2741 task = find_task_by_vpid(vpid);
2742 if (task)
2743 get_task_struct(task);
2744 rcu_read_unlock();
2745
2746 if (!task)
2747 return ERR_PTR(-ESRCH);
2748
2749 /* Reuse ptrace permission checks for now. */
2750 err = -EACCES;
2751 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2752 goto errout;
2753
2754 return task;
2755errout:
2756 put_task_struct(task);
2757 return ERR_PTR(err);
2758
2759}
2760
2761/*
2762 * Returns a matching context with refcount and pincount.
2763 */
2764static struct perf_event_context *
2765find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
2766{
2767 struct perf_event_context *ctx;
2768 struct perf_cpu_context *cpuctx;
2769 unsigned long flags;
2770 int ctxn, err;
2771
2772 if (!task) {
2773 /* Must be root to operate on a CPU event: */
2774 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2775 return ERR_PTR(-EACCES);
2776
2777 /*
2778 * We could be clever and allow to attach a event to an
2779 * offline CPU and activate it when the CPU comes up, but
2780 * that's for later.
2781 */
2782 if (!cpu_online(cpu))
2783 return ERR_PTR(-ENODEV);
2784
2785 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2786 ctx = &cpuctx->ctx;
2787 get_ctx(ctx);
2788 ++ctx->pin_count;
2789
2790 return ctx;
2791 }
2792
2793 err = -EINVAL;
2794 ctxn = pmu->task_ctx_nr;
2795 if (ctxn < 0)
2796 goto errout;
2797
2798retry:
2799 ctx = perf_lock_task_context(task, ctxn, &flags);
2800 if (ctx) {
2801 unclone_ctx(ctx);
2802 ++ctx->pin_count;
2803 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2804 } else {
2805 ctx = alloc_perf_context(pmu, task);
2806 err = -ENOMEM;
2807 if (!ctx)
2808 goto errout;
2809
2810 err = 0;
2811 mutex_lock(&task->perf_event_mutex);
2812 /*
2813 * If it has already passed perf_event_exit_task().
2814 * we must see PF_EXITING, it takes this mutex too.
2815 */
2816 if (task->flags & PF_EXITING)
2817 err = -ESRCH;
2818 else if (task->perf_event_ctxp[ctxn])
2819 err = -EAGAIN;
2820 else {
2821 get_ctx(ctx);
2822 ++ctx->pin_count;
2823 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2824 }
2825 mutex_unlock(&task->perf_event_mutex);
2826
2827 if (unlikely(err)) {
2828 put_ctx(ctx);
2829
2830 if (err == -EAGAIN)
2831 goto retry;
2832 goto errout;
2833 }
2834 }
2835
2836 return ctx;
2837
2838errout:
2839 return ERR_PTR(err);
2840}
2841
2842static void perf_event_free_filter(struct perf_event *event);
2843
2844static void free_event_rcu(struct rcu_head *head)
2845{
2846 struct perf_event *event;
2847
2848 event = container_of(head, struct perf_event, rcu_head);
2849 if (event->ns)
2850 put_pid_ns(event->ns);
2851 perf_event_free_filter(event);
2852 kfree(event);
2853}
2854
2855static void ring_buffer_put(struct ring_buffer *rb);
2856
2857static void free_event(struct perf_event *event)
2858{
2859 irq_work_sync(&event->pending);
2860
2861 if (!event->parent) {
2862 if (event->attach_state & PERF_ATTACH_TASK)
2863 static_key_slow_dec_deferred(&perf_sched_events);
2864 if (event->attr.mmap || event->attr.mmap_data)
2865 atomic_dec(&nr_mmap_events);
2866 if (event->attr.comm)
2867 atomic_dec(&nr_comm_events);
2868 if (event->attr.task)
2869 atomic_dec(&nr_task_events);
2870 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2871 put_callchain_buffers();
2872 if (is_cgroup_event(event)) {
2873 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2874 static_key_slow_dec_deferred(&perf_sched_events);
2875 }
2876
2877 if (has_branch_stack(event)) {
2878 static_key_slow_dec_deferred(&perf_sched_events);
2879 /* is system-wide event */
2880 if (!(event->attach_state & PERF_ATTACH_TASK))
2881 atomic_dec(&per_cpu(perf_branch_stack_events,
2882 event->cpu));
2883 }
2884 }
2885
2886 if (event->rb) {
2887 ring_buffer_put(event->rb);
2888 event->rb = NULL;
2889 }
2890
2891 if (is_cgroup_event(event))
2892 perf_detach_cgroup(event);
2893
2894 if (event->destroy)
2895 event->destroy(event);
2896
2897 if (event->ctx)
2898 put_ctx(event->ctx);
2899
2900 call_rcu(&event->rcu_head, free_event_rcu);
2901}
2902
2903int perf_event_release_kernel(struct perf_event *event)
2904{
2905 struct perf_event_context *ctx = event->ctx;
2906
2907 WARN_ON_ONCE(ctx->parent_ctx);
2908 /*
2909 * There are two ways this annotation is useful:
2910 *
2911 * 1) there is a lock recursion from perf_event_exit_task
2912 * see the comment there.
2913 *
2914 * 2) there is a lock-inversion with mmap_sem through
2915 * perf_event_read_group(), which takes faults while
2916 * holding ctx->mutex, however this is called after
2917 * the last filedesc died, so there is no possibility
2918 * to trigger the AB-BA case.
2919 */
2920 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2921 raw_spin_lock_irq(&ctx->lock);
2922 perf_group_detach(event);
2923 raw_spin_unlock_irq(&ctx->lock);
2924 perf_remove_from_context(event);
2925 mutex_unlock(&ctx->mutex);
2926
2927 free_event(event);
2928
2929 return 0;
2930}
2931EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2932
2933/*
2934 * Called when the last reference to the file is gone.
2935 */
2936static void put_event(struct perf_event *event)
2937{
2938 struct task_struct *owner;
2939
2940 if (!atomic_long_dec_and_test(&event->refcount))
2941 return;
2942
2943 rcu_read_lock();
2944 owner = ACCESS_ONCE(event->owner);
2945 /*
2946 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2947 * !owner it means the list deletion is complete and we can indeed
2948 * free this event, otherwise we need to serialize on
2949 * owner->perf_event_mutex.
2950 */
2951 smp_read_barrier_depends();
2952 if (owner) {
2953 /*
2954 * Since delayed_put_task_struct() also drops the last
2955 * task reference we can safely take a new reference
2956 * while holding the rcu_read_lock().
2957 */
2958 get_task_struct(owner);
2959 }
2960 rcu_read_unlock();
2961
2962 if (owner) {
2963 mutex_lock(&owner->perf_event_mutex);
2964 /*
2965 * We have to re-check the event->owner field, if it is cleared
2966 * we raced with perf_event_exit_task(), acquiring the mutex
2967 * ensured they're done, and we can proceed with freeing the
2968 * event.
2969 */
2970 if (event->owner)
2971 list_del_init(&event->owner_entry);
2972 mutex_unlock(&owner->perf_event_mutex);
2973 put_task_struct(owner);
2974 }
2975
2976 perf_event_release_kernel(event);
2977}
2978
2979static int perf_release(struct inode *inode, struct file *file)
2980{
2981 put_event(file->private_data);
2982 return 0;
2983}
2984
2985u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2986{
2987 struct perf_event *child;
2988 u64 total = 0;
2989
2990 *enabled = 0;
2991 *running = 0;
2992
2993 mutex_lock(&event->child_mutex);
2994 total += perf_event_read(event);
2995 *enabled += event->total_time_enabled +
2996 atomic64_read(&event->child_total_time_enabled);
2997 *running += event->total_time_running +
2998 atomic64_read(&event->child_total_time_running);
2999
3000 list_for_each_entry(child, &event->child_list, child_list) {
3001 total += perf_event_read(child);
3002 *enabled += child->total_time_enabled;
3003 *running += child->total_time_running;
3004 }
3005 mutex_unlock(&event->child_mutex);
3006
3007 return total;
3008}
3009EXPORT_SYMBOL_GPL(perf_event_read_value);
3010
3011static int perf_event_read_group(struct perf_event *event,
3012 u64 read_format, char __user *buf)
3013{
3014 struct perf_event *leader = event->group_leader, *sub;
3015 int n = 0, size = 0, ret = -EFAULT;
3016 struct perf_event_context *ctx = leader->ctx;
3017 u64 values[5];
3018 u64 count, enabled, running;
3019
3020 mutex_lock(&ctx->mutex);
3021 count = perf_event_read_value(leader, &enabled, &running);
3022
3023 values[n++] = 1 + leader->nr_siblings;
3024 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3025 values[n++] = enabled;
3026 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3027 values[n++] = running;
3028 values[n++] = count;
3029 if (read_format & PERF_FORMAT_ID)
3030 values[n++] = primary_event_id(leader);
3031
3032 size = n * sizeof(u64);
3033
3034 if (copy_to_user(buf, values, size))
3035 goto unlock;
3036
3037 ret = size;
3038
3039 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3040 n = 0;
3041
3042 values[n++] = perf_event_read_value(sub, &enabled, &running);
3043 if (read_format & PERF_FORMAT_ID)
3044 values[n++] = primary_event_id(sub);
3045
3046 size = n * sizeof(u64);
3047
3048 if (copy_to_user(buf + ret, values, size)) {
3049 ret = -EFAULT;
3050 goto unlock;
3051 }
3052
3053 ret += size;
3054 }
3055unlock:
3056 mutex_unlock(&ctx->mutex);
3057
3058 return ret;
3059}
3060
3061static int perf_event_read_one(struct perf_event *event,
3062 u64 read_format, char __user *buf)
3063{
3064 u64 enabled, running;
3065 u64 values[4];
3066 int n = 0;
3067
3068 values[n++] = perf_event_read_value(event, &enabled, &running);
3069 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3070 values[n++] = enabled;
3071 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3072 values[n++] = running;
3073 if (read_format & PERF_FORMAT_ID)
3074 values[n++] = primary_event_id(event);
3075
3076 if (copy_to_user(buf, values, n * sizeof(u64)))
3077 return -EFAULT;
3078
3079 return n * sizeof(u64);
3080}
3081
3082/*
3083 * Read the performance event - simple non blocking version for now
3084 */
3085static ssize_t
3086perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3087{
3088 u64 read_format = event->attr.read_format;
3089 int ret;
3090
3091 /*
3092 * Return end-of-file for a read on a event that is in
3093 * error state (i.e. because it was pinned but it couldn't be
3094 * scheduled on to the CPU at some point).
3095 */
3096 if (event->state == PERF_EVENT_STATE_ERROR)
3097 return 0;
3098
3099 if (count < event->read_size)
3100 return -ENOSPC;
3101
3102 WARN_ON_ONCE(event->ctx->parent_ctx);
3103 if (read_format & PERF_FORMAT_GROUP)
3104 ret = perf_event_read_group(event, read_format, buf);
3105 else
3106 ret = perf_event_read_one(event, read_format, buf);
3107
3108 return ret;
3109}
3110
3111static ssize_t
3112perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3113{
3114 struct perf_event *event = file->private_data;
3115
3116 return perf_read_hw(event, buf, count);
3117}
3118
3119static unsigned int perf_poll(struct file *file, poll_table *wait)
3120{
3121 struct perf_event *event = file->private_data;
3122 struct ring_buffer *rb;
3123 unsigned int events = POLL_HUP;
3124
3125 /*
3126 * Race between perf_event_set_output() and perf_poll(): perf_poll()
3127 * grabs the rb reference but perf_event_set_output() overrides it.
3128 * Here is the timeline for two threads T1, T2:
3129 * t0: T1, rb = rcu_dereference(event->rb)
3130 * t1: T2, old_rb = event->rb
3131 * t2: T2, event->rb = new rb
3132 * t3: T2, ring_buffer_detach(old_rb)
3133 * t4: T1, ring_buffer_attach(rb1)
3134 * t5: T1, poll_wait(event->waitq)
3135 *
3136 * To avoid this problem, we grab mmap_mutex in perf_poll()
3137 * thereby ensuring that the assignment of the new ring buffer
3138 * and the detachment of the old buffer appear atomic to perf_poll()
3139 */
3140 mutex_lock(&event->mmap_mutex);
3141
3142 rcu_read_lock();
3143 rb = rcu_dereference(event->rb);
3144 if (rb) {
3145 ring_buffer_attach(event, rb);
3146 events = atomic_xchg(&rb->poll, 0);
3147 }
3148 rcu_read_unlock();
3149
3150 mutex_unlock(&event->mmap_mutex);
3151
3152 poll_wait(file, &event->waitq, wait);
3153
3154 return events;
3155}
3156
3157static void perf_event_reset(struct perf_event *event)
3158{
3159 (void)perf_event_read(event);
3160 local64_set(&event->count, 0);
3161 perf_event_update_userpage(event);
3162}
3163
3164/*
3165 * Holding the top-level event's child_mutex means that any
3166 * descendant process that has inherited this event will block
3167 * in sync_child_event if it goes to exit, thus satisfying the
3168 * task existence requirements of perf_event_enable/disable.
3169 */
3170static void perf_event_for_each_child(struct perf_event *event,
3171 void (*func)(struct perf_event *))
3172{
3173 struct perf_event *child;
3174
3175 WARN_ON_ONCE(event->ctx->parent_ctx);
3176 mutex_lock(&event->child_mutex);
3177 func(event);
3178 list_for_each_entry(child, &event->child_list, child_list)
3179 func(child);
3180 mutex_unlock(&event->child_mutex);
3181}
3182
3183static void perf_event_for_each(struct perf_event *event,
3184 void (*func)(struct perf_event *))
3185{
3186 struct perf_event_context *ctx = event->ctx;
3187 struct perf_event *sibling;
3188
3189 WARN_ON_ONCE(ctx->parent_ctx);
3190 mutex_lock(&ctx->mutex);
3191 event = event->group_leader;
3192
3193 perf_event_for_each_child(event, func);
3194 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3195 perf_event_for_each_child(sibling, func);
3196 mutex_unlock(&ctx->mutex);
3197}
3198
3199static int perf_event_period(struct perf_event *event, u64 __user *arg)
3200{
3201 struct perf_event_context *ctx = event->ctx;
3202 int ret = 0;
3203 u64 value;
3204
3205 if (!is_sampling_event(event))
3206 return -EINVAL;
3207
3208 if (copy_from_user(&value, arg, sizeof(value)))
3209 return -EFAULT;
3210
3211 if (!value)
3212 return -EINVAL;
3213
3214 raw_spin_lock_irq(&ctx->lock);
3215 if (event->attr.freq) {
3216 if (value > sysctl_perf_event_sample_rate) {
3217 ret = -EINVAL;
3218 goto unlock;
3219 }
3220
3221 event->attr.sample_freq = value;
3222 } else {
3223 event->attr.sample_period = value;
3224 event->hw.sample_period = value;
3225 }
3226unlock:
3227 raw_spin_unlock_irq(&ctx->lock);
3228
3229 return ret;
3230}
3231
3232static const struct file_operations perf_fops;
3233
3234static struct file *perf_fget_light(int fd, int *fput_needed)
3235{
3236 struct file *file;
3237
3238 file = fget_light(fd, fput_needed);
3239 if (!file)
3240 return ERR_PTR(-EBADF);
3241
3242 if (file->f_op != &perf_fops) {
3243 fput_light(file, *fput_needed);
3244 *fput_needed = 0;
3245 return ERR_PTR(-EBADF);
3246 }
3247
3248 return file;
3249}
3250
3251static int perf_event_set_output(struct perf_event *event,
3252 struct perf_event *output_event);
3253static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3254
3255static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3256{
3257 struct perf_event *event = file->private_data;
3258 void (*func)(struct perf_event *);
3259 u32 flags = arg;
3260
3261 switch (cmd) {
3262 case PERF_EVENT_IOC_ENABLE:
3263 func = perf_event_enable;
3264 break;
3265 case PERF_EVENT_IOC_DISABLE:
3266 func = perf_event_disable;
3267 break;
3268 case PERF_EVENT_IOC_RESET:
3269 func = perf_event_reset;
3270 break;
3271
3272 case PERF_EVENT_IOC_REFRESH:
3273 return perf_event_refresh(event, arg);
3274
3275 case PERF_EVENT_IOC_PERIOD:
3276 return perf_event_period(event, (u64 __user *)arg);
3277
3278 case PERF_EVENT_IOC_SET_OUTPUT:
3279 {
3280 struct file *output_file = NULL;
3281 struct perf_event *output_event = NULL;
3282 int fput_needed = 0;
3283 int ret;
3284
3285 if (arg != -1) {
3286 output_file = perf_fget_light(arg, &fput_needed);
3287 if (IS_ERR(output_file))
3288 return PTR_ERR(output_file);
3289 output_event = output_file->private_data;
3290 }
3291
3292 ret = perf_event_set_output(event, output_event);
3293 if (output_event)
3294 fput_light(output_file, fput_needed);
3295
3296 return ret;
3297 }
3298
3299 case PERF_EVENT_IOC_SET_FILTER:
3300 return perf_event_set_filter(event, (void __user *)arg);
3301
3302 default:
3303 return -ENOTTY;
3304 }
3305
3306 if (flags & PERF_IOC_FLAG_GROUP)
3307 perf_event_for_each(event, func);
3308 else
3309 perf_event_for_each_child(event, func);
3310
3311 return 0;
3312}
3313
3314int perf_event_task_enable(void)
3315{
3316 struct perf_event *event;
3317
3318 mutex_lock(¤t->perf_event_mutex);
3319 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
3320 perf_event_for_each_child(event, perf_event_enable);
3321 mutex_unlock(¤t->perf_event_mutex);
3322
3323 return 0;
3324}
3325
3326int perf_event_task_disable(void)
3327{
3328 struct perf_event *event;
3329
3330 mutex_lock(¤t->perf_event_mutex);
3331 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
3332 perf_event_for_each_child(event, perf_event_disable);
3333 mutex_unlock(¤t->perf_event_mutex);
3334
3335 return 0;
3336}
3337
3338static int perf_event_index(struct perf_event *event)
3339{
3340 if (event->hw.state & PERF_HES_STOPPED)
3341 return 0;
3342
3343 if (event->state != PERF_EVENT_STATE_ACTIVE)
3344 return 0;
3345
3346 return event->pmu->event_idx(event);
3347}
3348
3349static void calc_timer_values(struct perf_event *event,
3350 u64 *now,
3351 u64 *enabled,
3352 u64 *running)
3353{
3354 u64 ctx_time;
3355
3356 *now = perf_clock();
3357 ctx_time = event->shadow_ctx_time + *now;
3358 *enabled = ctx_time - event->tstamp_enabled;
3359 *running = ctx_time - event->tstamp_running;
3360}
3361
3362void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3363{
3364}
3365
3366/*
3367 * Callers need to ensure there can be no nesting of this function, otherwise
3368 * the seqlock logic goes bad. We can not serialize this because the arch
3369 * code calls this from NMI context.
3370 */
3371void perf_event_update_userpage(struct perf_event *event)
3372{
3373 struct perf_event_mmap_page *userpg;
3374 struct ring_buffer *rb;
3375 u64 enabled, running, now;
3376
3377 rcu_read_lock();
3378 /*
3379 * compute total_time_enabled, total_time_running
3380 * based on snapshot values taken when the event
3381 * was last scheduled in.
3382 *
3383 * we cannot simply called update_context_time()
3384 * because of locking issue as we can be called in
3385 * NMI context
3386 */
3387 calc_timer_values(event, &now, &enabled, &running);
3388 rb = rcu_dereference(event->rb);
3389 if (!rb)
3390 goto unlock;
3391
3392 userpg = rb->user_page;
3393
3394 /*
3395 * Disable preemption so as to not let the corresponding user-space
3396 * spin too long if we get preempted.
3397 */
3398 preempt_disable();
3399 ++userpg->lock;
3400 barrier();
3401 userpg->index = perf_event_index(event);
3402 userpg->offset = perf_event_count(event);
3403 if (userpg->index)
3404 userpg->offset -= local64_read(&event->hw.prev_count);
3405
3406 userpg->time_enabled = enabled +
3407 atomic64_read(&event->child_total_time_enabled);
3408
3409 userpg->time_running = running +
3410 atomic64_read(&event->child_total_time_running);
3411
3412 arch_perf_update_userpage(userpg, now);
3413
3414 barrier();
3415 ++userpg->lock;
3416 preempt_enable();
3417unlock:
3418 rcu_read_unlock();
3419}
3420
3421static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3422{
3423 struct perf_event *event = vma->vm_file->private_data;
3424 struct ring_buffer *rb;
3425 int ret = VM_FAULT_SIGBUS;
3426
3427 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3428 if (vmf->pgoff == 0)
3429 ret = 0;
3430 return ret;
3431 }
3432
3433 rcu_read_lock();
3434 rb = rcu_dereference(event->rb);
3435 if (!rb)
3436 goto unlock;
3437
3438 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3439 goto unlock;
3440
3441 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3442 if (!vmf->page)
3443 goto unlock;
3444
3445 get_page(vmf->page);
3446 vmf->page->mapping = vma->vm_file->f_mapping;
3447 vmf->page->index = vmf->pgoff;
3448
3449 ret = 0;
3450unlock:
3451 rcu_read_unlock();
3452
3453 return ret;
3454}
3455
3456static void ring_buffer_attach(struct perf_event *event,
3457 struct ring_buffer *rb)
3458{
3459 unsigned long flags;
3460
3461 if (!list_empty(&event->rb_entry))
3462 return;
3463
3464 spin_lock_irqsave(&rb->event_lock, flags);
3465 if (!list_empty(&event->rb_entry))
3466 goto unlock;
3467
3468 list_add(&event->rb_entry, &rb->event_list);
3469unlock:
3470 spin_unlock_irqrestore(&rb->event_lock, flags);
3471}
3472
3473static void ring_buffer_detach(struct perf_event *event,
3474 struct ring_buffer *rb)
3475{
3476 unsigned long flags;
3477
3478 if (list_empty(&event->rb_entry))
3479 return;
3480
3481 spin_lock_irqsave(&rb->event_lock, flags);
3482 list_del_init(&event->rb_entry);
3483 wake_up_all(&event->waitq);
3484 spin_unlock_irqrestore(&rb->event_lock, flags);
3485}
3486
3487static void ring_buffer_wakeup(struct perf_event *event)
3488{
3489 struct ring_buffer *rb;
3490
3491 rcu_read_lock();
3492 rb = rcu_dereference(event->rb);
3493 if (!rb)
3494 goto unlock;
3495
3496 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3497 wake_up_all(&event->waitq);
3498
3499unlock:
3500 rcu_read_unlock();
3501}
3502
3503static void rb_free_rcu(struct rcu_head *rcu_head)
3504{
3505 struct ring_buffer *rb;
3506
3507 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3508 rb_free(rb);
3509}
3510
3511static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3512{
3513 struct ring_buffer *rb;
3514
3515 rcu_read_lock();
3516 rb = rcu_dereference(event->rb);
3517 if (rb) {
3518 if (!atomic_inc_not_zero(&rb->refcount))
3519 rb = NULL;
3520 }
3521 rcu_read_unlock();
3522
3523 return rb;
3524}
3525
3526static void ring_buffer_put(struct ring_buffer *rb)
3527{
3528 struct perf_event *event, *n;
3529 unsigned long flags;
3530
3531 if (!atomic_dec_and_test(&rb->refcount))
3532 return;
3533
3534 spin_lock_irqsave(&rb->event_lock, flags);
3535 list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3536 list_del_init(&event->rb_entry);
3537 wake_up_all(&event->waitq);
3538 }
3539 spin_unlock_irqrestore(&rb->event_lock, flags);
3540
3541 call_rcu(&rb->rcu_head, rb_free_rcu);
3542}
3543
3544static void perf_mmap_open(struct vm_area_struct *vma)
3545{
3546 struct perf_event *event = vma->vm_file->private_data;
3547
3548 atomic_inc(&event->mmap_count);
3549}
3550
3551static void perf_mmap_close(struct vm_area_struct *vma)
3552{
3553 struct perf_event *event = vma->vm_file->private_data;
3554
3555 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
3556 unsigned long size = perf_data_size(event->rb);
3557 struct user_struct *user = event->mmap_user;
3558 struct ring_buffer *rb = event->rb;
3559
3560 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3561 vma->vm_mm->pinned_vm -= event->mmap_locked;
3562 rcu_assign_pointer(event->rb, NULL);
3563 ring_buffer_detach(event, rb);
3564 mutex_unlock(&event->mmap_mutex);
3565
3566 ring_buffer_put(rb);
3567 free_uid(user);
3568 }
3569}
3570
3571static const struct vm_operations_struct perf_mmap_vmops = {
3572 .open = perf_mmap_open,
3573 .close = perf_mmap_close,
3574 .fault = perf_mmap_fault,
3575 .page_mkwrite = perf_mmap_fault,
3576};
3577
3578static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3579{
3580 struct perf_event *event = file->private_data;
3581 unsigned long user_locked, user_lock_limit;
3582 struct user_struct *user = current_user();
3583 unsigned long locked, lock_limit;
3584 struct ring_buffer *rb;
3585 unsigned long vma_size;
3586 unsigned long nr_pages;
3587 long user_extra, extra;
3588 int ret = 0, flags = 0;
3589
3590 /*
3591 * Don't allow mmap() of inherited per-task counters. This would
3592 * create a performance issue due to all children writing to the
3593 * same rb.
3594 */
3595 if (event->cpu == -1 && event->attr.inherit)
3596 return -EINVAL;
3597
3598 if (!(vma->vm_flags & VM_SHARED))
3599 return -EINVAL;
3600
3601 vma_size = vma->vm_end - vma->vm_start;
3602 nr_pages = (vma_size / PAGE_SIZE) - 1;
3603
3604 /*
3605 * If we have rb pages ensure they're a power-of-two number, so we
3606 * can do bitmasks instead of modulo.
3607 */
3608 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3609 return -EINVAL;
3610
3611 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3612 return -EINVAL;
3613
3614 if (vma->vm_pgoff != 0)
3615 return -EINVAL;
3616
3617 WARN_ON_ONCE(event->ctx->parent_ctx);
3618 mutex_lock(&event->mmap_mutex);
3619 if (event->rb) {
3620 if (event->rb->nr_pages == nr_pages)
3621 atomic_inc(&event->rb->refcount);
3622 else
3623 ret = -EINVAL;
3624 goto unlock;
3625 }
3626
3627 user_extra = nr_pages + 1;
3628 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3629
3630 /*
3631 * Increase the limit linearly with more CPUs:
3632 */
3633 user_lock_limit *= num_online_cpus();
3634
3635 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3636
3637 extra = 0;
3638 if (user_locked > user_lock_limit)
3639 extra = user_locked - user_lock_limit;
3640
3641 lock_limit = rlimit(RLIMIT_MEMLOCK);
3642 lock_limit >>= PAGE_SHIFT;
3643 locked = vma->vm_mm->pinned_vm + extra;
3644
3645 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3646 !capable(CAP_IPC_LOCK)) {
3647 ret = -EPERM;
3648 goto unlock;
3649 }
3650
3651 WARN_ON(event->rb);
3652
3653 if (vma->vm_flags & VM_WRITE)
3654 flags |= RING_BUFFER_WRITABLE;
3655
3656 rb = rb_alloc(nr_pages,
3657 event->attr.watermark ? event->attr.wakeup_watermark : 0,
3658 event->cpu, flags);
3659
3660 if (!rb) {
3661 ret = -ENOMEM;
3662 goto unlock;
3663 }
3664 rcu_assign_pointer(event->rb, rb);
3665
3666 atomic_long_add(user_extra, &user->locked_vm);
3667 event->mmap_locked = extra;
3668 event->mmap_user = get_current_user();
3669 vma->vm_mm->pinned_vm += event->mmap_locked;
3670
3671 perf_event_update_userpage(event);
3672
3673unlock:
3674 if (!ret)
3675 atomic_inc(&event->mmap_count);
3676 mutex_unlock(&event->mmap_mutex);
3677
3678 vma->vm_flags |= VM_RESERVED;
3679 vma->vm_ops = &perf_mmap_vmops;
3680
3681 return ret;
3682}
3683
3684static int perf_fasync(int fd, struct file *filp, int on)
3685{
3686 struct inode *inode = filp->f_path.dentry->d_inode;
3687 struct perf_event *event = filp->private_data;
3688 int retval;
3689
3690 mutex_lock(&inode->i_mutex);
3691 retval = fasync_helper(fd, filp, on, &event->fasync);
3692 mutex_unlock(&inode->i_mutex);
3693
3694 if (retval < 0)
3695 return retval;
3696
3697 return 0;
3698}
3699
3700static const struct file_operations perf_fops = {
3701 .llseek = no_llseek,
3702 .release = perf_release,
3703 .read = perf_read,
3704 .poll = perf_poll,
3705 .unlocked_ioctl = perf_ioctl,
3706 .compat_ioctl = perf_ioctl,
3707 .mmap = perf_mmap,
3708 .fasync = perf_fasync,
3709};
3710
3711/*
3712 * Perf event wakeup
3713 *
3714 * If there's data, ensure we set the poll() state and publish everything
3715 * to user-space before waking everybody up.
3716 */
3717
3718void perf_event_wakeup(struct perf_event *event)
3719{
3720 ring_buffer_wakeup(event);
3721
3722 if (event->pending_kill) {
3723 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3724 event->pending_kill = 0;
3725 }
3726}
3727
3728static void perf_pending_event(struct irq_work *entry)
3729{
3730 struct perf_event *event = container_of(entry,
3731 struct perf_event, pending);
3732
3733 if (event->pending_disable) {
3734 event->pending_disable = 0;
3735 __perf_event_disable(event);
3736 }
3737
3738 if (event->pending_wakeup) {
3739 event->pending_wakeup = 0;
3740 perf_event_wakeup(event);
3741 }
3742}
3743
3744/*
3745 * We assume there is only KVM supporting the callbacks.
3746 * Later on, we might change it to a list if there is
3747 * another virtualization implementation supporting the callbacks.
3748 */
3749struct perf_guest_info_callbacks *perf_guest_cbs;
3750
3751int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3752{
3753 perf_guest_cbs = cbs;
3754 return 0;
3755}
3756EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3757
3758int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3759{
3760 perf_guest_cbs = NULL;
3761 return 0;
3762}
3763EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3764
3765static void __perf_event_header__init_id(struct perf_event_header *header,
3766 struct perf_sample_data *data,
3767 struct perf_event *event)
3768{
3769 u64 sample_type = event->attr.sample_type;
3770
3771 data->type = sample_type;
3772 header->size += event->id_header_size;
3773
3774 if (sample_type & PERF_SAMPLE_TID) {
3775 /* namespace issues */
3776 data->tid_entry.pid = perf_event_pid(event, current);
3777 data->tid_entry.tid = perf_event_tid(event, current);
3778 }
3779
3780 if (sample_type & PERF_SAMPLE_TIME)
3781 data->time = perf_clock();
3782
3783 if (sample_type & PERF_SAMPLE_ID)
3784 data->id = primary_event_id(event);
3785
3786 if (sample_type & PERF_SAMPLE_STREAM_ID)
3787 data->stream_id = event->id;
3788
3789 if (sample_type & PERF_SAMPLE_CPU) {
3790 data->cpu_entry.cpu = raw_smp_processor_id();
3791 data->cpu_entry.reserved = 0;
3792 }
3793}
3794
3795void perf_event_header__init_id(struct perf_event_header *header,
3796 struct perf_sample_data *data,
3797 struct perf_event *event)
3798{
3799 if (event->attr.sample_id_all)
3800 __perf_event_header__init_id(header, data, event);
3801}
3802
3803static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3804 struct perf_sample_data *data)
3805{
3806 u64 sample_type = data->type;
3807
3808 if (sample_type & PERF_SAMPLE_TID)
3809 perf_output_put(handle, data->tid_entry);
3810
3811 if (sample_type & PERF_SAMPLE_TIME)
3812 perf_output_put(handle, data->time);
3813
3814 if (sample_type & PERF_SAMPLE_ID)
3815 perf_output_put(handle, data->id);
3816
3817 if (sample_type & PERF_SAMPLE_STREAM_ID)
3818 perf_output_put(handle, data->stream_id);
3819
3820 if (sample_type & PERF_SAMPLE_CPU)
3821 perf_output_put(handle, data->cpu_entry);
3822}
3823
3824void perf_event__output_id_sample(struct perf_event *event,
3825 struct perf_output_handle *handle,
3826 struct perf_sample_data *sample)
3827{
3828 if (event->attr.sample_id_all)
3829 __perf_event__output_id_sample(handle, sample);
3830}
3831
3832static void perf_output_read_one(struct perf_output_handle *handle,
3833 struct perf_event *event,
3834 u64 enabled, u64 running)
3835{
3836 u64 read_format = event->attr.read_format;
3837 u64 values[4];
3838 int n = 0;
3839
3840 values[n++] = perf_event_count(event);
3841 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3842 values[n++] = enabled +
3843 atomic64_read(&event->child_total_time_enabled);
3844 }
3845 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3846 values[n++] = running +
3847 atomic64_read(&event->child_total_time_running);
3848 }
3849 if (read_format & PERF_FORMAT_ID)
3850 values[n++] = primary_event_id(event);
3851
3852 __output_copy(handle, values, n * sizeof(u64));
3853}
3854
3855/*
3856 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3857 */
3858static void perf_output_read_group(struct perf_output_handle *handle,
3859 struct perf_event *event,
3860 u64 enabled, u64 running)
3861{
3862 struct perf_event *leader = event->group_leader, *sub;
3863 u64 read_format = event->attr.read_format;
3864 u64 values[5];
3865 int n = 0;
3866
3867 values[n++] = 1 + leader->nr_siblings;
3868
3869 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3870 values[n++] = enabled;
3871
3872 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3873 values[n++] = running;
3874
3875 if (leader != event)
3876 leader->pmu->read(leader);
3877
3878 values[n++] = perf_event_count(leader);
3879 if (read_format & PERF_FORMAT_ID)
3880 values[n++] = primary_event_id(leader);
3881
3882 __output_copy(handle, values, n * sizeof(u64));
3883
3884 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3885 n = 0;
3886
3887 if (sub != event)
3888 sub->pmu->read(sub);
3889
3890 values[n++] = perf_event_count(sub);
3891 if (read_format & PERF_FORMAT_ID)
3892 values[n++] = primary_event_id(sub);
3893
3894 __output_copy(handle, values, n * sizeof(u64));
3895 }
3896}
3897
3898#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3899 PERF_FORMAT_TOTAL_TIME_RUNNING)
3900
3901static void perf_output_read(struct perf_output_handle *handle,
3902 struct perf_event *event)
3903{
3904 u64 enabled = 0, running = 0, now;
3905 u64 read_format = event->attr.read_format;
3906
3907 /*
3908 * compute total_time_enabled, total_time_running
3909 * based on snapshot values taken when the event
3910 * was last scheduled in.
3911 *
3912 * we cannot simply called update_context_time()
3913 * because of locking issue as we are called in
3914 * NMI context
3915 */
3916 if (read_format & PERF_FORMAT_TOTAL_TIMES)
3917 calc_timer_values(event, &now, &enabled, &running);
3918
3919 if (event->attr.read_format & PERF_FORMAT_GROUP)
3920 perf_output_read_group(handle, event, enabled, running);
3921 else
3922 perf_output_read_one(handle, event, enabled, running);
3923}
3924
3925void perf_output_sample(struct perf_output_handle *handle,
3926 struct perf_event_header *header,
3927 struct perf_sample_data *data,
3928 struct perf_event *event)
3929{
3930 u64 sample_type = data->type;
3931
3932 perf_output_put(handle, *header);
3933
3934 if (sample_type & PERF_SAMPLE_IP)
3935 perf_output_put(handle, data->ip);
3936
3937 if (sample_type & PERF_SAMPLE_TID)
3938 perf_output_put(handle, data->tid_entry);
3939
3940 if (sample_type & PERF_SAMPLE_TIME)
3941 perf_output_put(handle, data->time);
3942
3943 if (sample_type & PERF_SAMPLE_ADDR)
3944 perf_output_put(handle, data->addr);
3945
3946 if (sample_type & PERF_SAMPLE_ID)
3947 perf_output_put(handle, data->id);
3948
3949 if (sample_type & PERF_SAMPLE_STREAM_ID)
3950 perf_output_put(handle, data->stream_id);
3951
3952 if (sample_type & PERF_SAMPLE_CPU)
3953 perf_output_put(handle, data->cpu_entry);
3954
3955 if (sample_type & PERF_SAMPLE_PERIOD)
3956 perf_output_put(handle, data->period);
3957
3958 if (sample_type & PERF_SAMPLE_READ)
3959 perf_output_read(handle, event);
3960
3961 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3962 if (data->callchain) {
3963 int size = 1;
3964
3965 if (data->callchain)
3966 size += data->callchain->nr;
3967
3968 size *= sizeof(u64);
3969
3970 __output_copy(handle, data->callchain, size);
3971 } else {
3972 u64 nr = 0;
3973 perf_output_put(handle, nr);
3974 }
3975 }
3976
3977 if (sample_type & PERF_SAMPLE_RAW) {
3978 if (data->raw) {
3979 perf_output_put(handle, data->raw->size);
3980 __output_copy(handle, data->raw->data,
3981 data->raw->size);
3982 } else {
3983 struct {
3984 u32 size;
3985 u32 data;
3986 } raw = {
3987 .size = sizeof(u32),
3988 .data = 0,
3989 };
3990 perf_output_put(handle, raw);
3991 }
3992 }
3993
3994 if (!event->attr.watermark) {
3995 int wakeup_events = event->attr.wakeup_events;
3996
3997 if (wakeup_events) {
3998 struct ring_buffer *rb = handle->rb;
3999 int events = local_inc_return(&rb->events);
4000
4001 if (events >= wakeup_events) {
4002 local_sub(wakeup_events, &rb->events);
4003 local_inc(&rb->wakeup);
4004 }
4005 }
4006 }
4007
4008 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4009 if (data->br_stack) {
4010 size_t size;
4011
4012 size = data->br_stack->nr
4013 * sizeof(struct perf_branch_entry);
4014
4015 perf_output_put(handle, data->br_stack->nr);
4016 perf_output_copy(handle, data->br_stack->entries, size);
4017 } else {
4018 /*
4019 * we always store at least the value of nr
4020 */
4021 u64 nr = 0;
4022 perf_output_put(handle, nr);
4023 }
4024 }
4025}
4026
4027void perf_prepare_sample(struct perf_event_header *header,
4028 struct perf_sample_data *data,
4029 struct perf_event *event,
4030 struct pt_regs *regs)
4031{
4032 u64 sample_type = event->attr.sample_type;
4033
4034 header->type = PERF_RECORD_SAMPLE;
4035 header->size = sizeof(*header) + event->header_size;
4036
4037 header->misc = 0;
4038 header->misc |= perf_misc_flags(regs);
4039
4040 __perf_event_header__init_id(header, data, event);
4041
4042 if (sample_type & PERF_SAMPLE_IP)
4043 data->ip = perf_instruction_pointer(regs);
4044
4045 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4046 int size = 1;
4047
4048 data->callchain = perf_callchain(regs);
4049
4050 if (data->callchain)
4051 size += data->callchain->nr;
4052
4053 header->size += size * sizeof(u64);
4054 }
4055
4056 if (sample_type & PERF_SAMPLE_RAW) {
4057 int size = sizeof(u32);
4058
4059 if (data->raw)
4060 size += data->raw->size;
4061 else
4062 size += sizeof(u32);
4063
4064 WARN_ON_ONCE(size & (sizeof(u64)-1));
4065 header->size += size;
4066 }
4067
4068 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4069 int size = sizeof(u64); /* nr */
4070 if (data->br_stack) {
4071 size += data->br_stack->nr
4072 * sizeof(struct perf_branch_entry);
4073 }
4074 header->size += size;
4075 }
4076}
4077
4078static void perf_event_output(struct perf_event *event,
4079 struct perf_sample_data *data,
4080 struct pt_regs *regs)
4081{
4082 struct perf_output_handle handle;
4083 struct perf_event_header header;
4084
4085 /* protect the callchain buffers */
4086 rcu_read_lock();
4087
4088 perf_prepare_sample(&header, data, event, regs);
4089
4090 if (perf_output_begin(&handle, event, header.size))
4091 goto exit;
4092
4093 perf_output_sample(&handle, &header, data, event);
4094
4095 perf_output_end(&handle);
4096
4097exit:
4098 rcu_read_unlock();
4099}
4100
4101/*
4102 * read event_id
4103 */
4104
4105struct perf_read_event {
4106 struct perf_event_header header;
4107
4108 u32 pid;
4109 u32 tid;
4110};
4111
4112static void
4113perf_event_read_event(struct perf_event *event,
4114 struct task_struct *task)
4115{
4116 struct perf_output_handle handle;
4117 struct perf_sample_data sample;
4118 struct perf_read_event read_event = {
4119 .header = {
4120 .type = PERF_RECORD_READ,
4121 .misc = 0,
4122 .size = sizeof(read_event) + event->read_size,
4123 },
4124 .pid = perf_event_pid(event, task),
4125 .tid = perf_event_tid(event, task),
4126 };
4127 int ret;
4128
4129 perf_event_header__init_id(&read_event.header, &sample, event);
4130 ret = perf_output_begin(&handle, event, read_event.header.size);
4131 if (ret)
4132 return;
4133
4134 perf_output_put(&handle, read_event);
4135 perf_output_read(&handle, event);
4136 perf_event__output_id_sample(event, &handle, &sample);
4137
4138 perf_output_end(&handle);
4139}
4140
4141/*
4142 * task tracking -- fork/exit
4143 *
4144 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
4145 */
4146
4147struct perf_task_event {
4148 struct task_struct *task;
4149 struct perf_event_context *task_ctx;
4150
4151 struct {
4152 struct perf_event_header header;
4153
4154 u32 pid;
4155 u32 ppid;
4156 u32 tid;
4157 u32 ptid;
4158 u64 time;
4159 } event_id;
4160};
4161
4162static void perf_event_task_output(struct perf_event *event,
4163 struct perf_task_event *task_event)
4164{
4165 struct perf_output_handle handle;
4166 struct perf_sample_data sample;
4167 struct task_struct *task = task_event->task;
4168 int ret, size = task_event->event_id.header.size;
4169
4170 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4171
4172 ret = perf_output_begin(&handle, event,
4173 task_event->event_id.header.size);
4174 if (ret)
4175 goto out;
4176
4177 task_event->event_id.pid = perf_event_pid(event, task);
4178 task_event->event_id.ppid = perf_event_pid(event, current);
4179
4180 task_event->event_id.tid = perf_event_tid(event, task);
4181 task_event->event_id.ptid = perf_event_tid(event, current);
4182
4183 perf_output_put(&handle, task_event->event_id);
4184
4185 perf_event__output_id_sample(event, &handle, &sample);
4186
4187 perf_output_end(&handle);
4188out:
4189 task_event->event_id.header.size = size;
4190}
4191
4192static int perf_event_task_match(struct perf_event *event)
4193{
4194 if (event->state < PERF_EVENT_STATE_INACTIVE)
4195 return 0;
4196
4197 if (!event_filter_match(event))
4198 return 0;
4199
4200 if (event->attr.comm || event->attr.mmap ||
4201 event->attr.mmap_data || event->attr.task)
4202 return 1;
4203
4204 return 0;
4205}
4206
4207static void perf_event_task_ctx(struct perf_event_context *ctx,
4208 struct perf_task_event *task_event)
4209{
4210 struct perf_event *event;
4211
4212 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4213 if (perf_event_task_match(event))
4214 perf_event_task_output(event, task_event);
4215 }
4216}
4217
4218static void perf_event_task_event(struct perf_task_event *task_event)
4219{
4220 struct perf_cpu_context *cpuctx;
4221 struct perf_event_context *ctx;
4222 struct pmu *pmu;
4223 int ctxn;
4224
4225 rcu_read_lock();
4226 list_for_each_entry_rcu(pmu, &pmus, entry) {
4227 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4228 if (cpuctx->active_pmu != pmu)
4229 goto next;
4230 perf_event_task_ctx(&cpuctx->ctx, task_event);
4231
4232 ctx = task_event->task_ctx;
4233 if (!ctx) {
4234 ctxn = pmu->task_ctx_nr;
4235 if (ctxn < 0)
4236 goto next;
4237 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4238 }
4239 if (ctx)
4240 perf_event_task_ctx(ctx, task_event);
4241next:
4242 put_cpu_ptr(pmu->pmu_cpu_context);
4243 }
4244 rcu_read_unlock();
4245}
4246
4247static void perf_event_task(struct task_struct *task,
4248 struct perf_event_context *task_ctx,
4249 int new)
4250{
4251 struct perf_task_event task_event;
4252
4253 if (!atomic_read(&nr_comm_events) &&
4254 !atomic_read(&nr_mmap_events) &&
4255 !atomic_read(&nr_task_events))
4256 return;
4257
4258 task_event = (struct perf_task_event){
4259 .task = task,
4260 .task_ctx = task_ctx,
4261 .event_id = {
4262 .header = {
4263 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4264 .misc = 0,
4265 .size = sizeof(task_event.event_id),
4266 },
4267 /* .pid */
4268 /* .ppid */
4269 /* .tid */
4270 /* .ptid */
4271 .time = perf_clock(),
4272 },
4273 };
4274
4275 perf_event_task_event(&task_event);
4276}
4277
4278void perf_event_fork(struct task_struct *task)
4279{
4280 perf_event_task(task, NULL, 1);
4281}
4282
4283/*
4284 * comm tracking
4285 */
4286
4287struct perf_comm_event {
4288 struct task_struct *task;
4289 char *comm;
4290 int comm_size;
4291
4292 struct {
4293 struct perf_event_header header;
4294
4295 u32 pid;
4296 u32 tid;
4297 } event_id;
4298};
4299
4300static void perf_event_comm_output(struct perf_event *event,
4301 struct perf_comm_event *comm_event)
4302{
4303 struct perf_output_handle handle;
4304 struct perf_sample_data sample;
4305 int size = comm_event->event_id.header.size;
4306 int ret;
4307
4308 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4309 ret = perf_output_begin(&handle, event,
4310 comm_event->event_id.header.size);
4311
4312 if (ret)
4313 goto out;
4314
4315 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4316 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4317
4318 perf_output_put(&handle, comm_event->event_id);
4319 __output_copy(&handle, comm_event->comm,
4320 comm_event->comm_size);
4321
4322 perf_event__output_id_sample(event, &handle, &sample);
4323
4324 perf_output_end(&handle);
4325out:
4326 comm_event->event_id.header.size = size;
4327}
4328
4329static int perf_event_comm_match(struct perf_event *event)
4330{
4331 if (event->state < PERF_EVENT_STATE_INACTIVE)
4332 return 0;
4333
4334 if (!event_filter_match(event))
4335 return 0;
4336
4337 if (event->attr.comm)
4338 return 1;
4339
4340 return 0;
4341}
4342
4343static void perf_event_comm_ctx(struct perf_event_context *ctx,
4344 struct perf_comm_event *comm_event)
4345{
4346 struct perf_event *event;
4347
4348 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4349 if (perf_event_comm_match(event))
4350 perf_event_comm_output(event, comm_event);
4351 }
4352}
4353
4354static void perf_event_comm_event(struct perf_comm_event *comm_event)
4355{
4356 struct perf_cpu_context *cpuctx;
4357 struct perf_event_context *ctx;
4358 char comm[TASK_COMM_LEN];
4359 unsigned int size;
4360 struct pmu *pmu;
4361 int ctxn;
4362
4363 memset(comm, 0, sizeof(comm));
4364 strlcpy(comm, comm_event->task->comm, sizeof(comm));
4365 size = ALIGN(strlen(comm)+1, sizeof(u64));
4366
4367 comm_event->comm = comm;
4368 comm_event->comm_size = size;
4369
4370 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4371 rcu_read_lock();
4372 list_for_each_entry_rcu(pmu, &pmus, entry) {
4373 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4374 if (cpuctx->active_pmu != pmu)
4375 goto next;
4376 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4377
4378 ctxn = pmu->task_ctx_nr;
4379 if (ctxn < 0)
4380 goto next;
4381
4382 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4383 if (ctx)
4384 perf_event_comm_ctx(ctx, comm_event);
4385next:
4386 put_cpu_ptr(pmu->pmu_cpu_context);
4387 }
4388 rcu_read_unlock();
4389}
4390
4391void perf_event_comm(struct task_struct *task)
4392{
4393 struct perf_comm_event comm_event;
4394 struct perf_event_context *ctx;
4395 int ctxn;
4396
4397 for_each_task_context_nr(ctxn) {
4398 ctx = task->perf_event_ctxp[ctxn];
4399 if (!ctx)
4400 continue;
4401
4402 perf_event_enable_on_exec(ctx);
4403 }
4404
4405 if (!atomic_read(&nr_comm_events))
4406 return;
4407
4408 comm_event = (struct perf_comm_event){
4409 .task = task,
4410 /* .comm */
4411 /* .comm_size */
4412 .event_id = {
4413 .header = {
4414 .type = PERF_RECORD_COMM,
4415 .misc = 0,
4416 /* .size */
4417 },
4418 /* .pid */
4419 /* .tid */
4420 },
4421 };
4422
4423 perf_event_comm_event(&comm_event);
4424}
4425
4426/*
4427 * mmap tracking
4428 */
4429
4430struct perf_mmap_event {
4431 struct vm_area_struct *vma;
4432
4433 const char *file_name;
4434 int file_size;
4435
4436 struct {
4437 struct perf_event_header header;
4438
4439 u32 pid;
4440 u32 tid;
4441 u64 start;
4442 u64 len;
4443 u64 pgoff;
4444 } event_id;
4445};
4446
4447static void perf_event_mmap_output(struct perf_event *event,
4448 struct perf_mmap_event *mmap_event)
4449{
4450 struct perf_output_handle handle;
4451 struct perf_sample_data sample;
4452 int size = mmap_event->event_id.header.size;
4453 int ret;
4454
4455 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4456 ret = perf_output_begin(&handle, event,
4457 mmap_event->event_id.header.size);
4458 if (ret)
4459 goto out;
4460
4461 mmap_event->event_id.pid = perf_event_pid(event, current);
4462 mmap_event->event_id.tid = perf_event_tid(event, current);
4463
4464 perf_output_put(&handle, mmap_event->event_id);
4465 __output_copy(&handle, mmap_event->file_name,
4466 mmap_event->file_size);
4467
4468 perf_event__output_id_sample(event, &handle, &sample);
4469
4470 perf_output_end(&handle);
4471out:
4472 mmap_event->event_id.header.size = size;
4473}
4474
4475static int perf_event_mmap_match(struct perf_event *event,
4476 struct perf_mmap_event *mmap_event,
4477 int executable)
4478{
4479 if (event->state < PERF_EVENT_STATE_INACTIVE)
4480 return 0;
4481
4482 if (!event_filter_match(event))
4483 return 0;
4484
4485 if ((!executable && event->attr.mmap_data) ||
4486 (executable && event->attr.mmap))
4487 return 1;
4488
4489 return 0;
4490}
4491
4492static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4493 struct perf_mmap_event *mmap_event,
4494 int executable)
4495{
4496 struct perf_event *event;
4497
4498 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4499 if (perf_event_mmap_match(event, mmap_event, executable))
4500 perf_event_mmap_output(event, mmap_event);
4501 }
4502}
4503
4504static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4505{
4506 struct perf_cpu_context *cpuctx;
4507 struct perf_event_context *ctx;
4508 struct vm_area_struct *vma = mmap_event->vma;
4509 struct file *file = vma->vm_file;
4510 unsigned int size;
4511 char tmp[16];
4512 char *buf = NULL;
4513 const char *name;
4514 struct pmu *pmu;
4515 int ctxn;
4516
4517 memset(tmp, 0, sizeof(tmp));
4518
4519 if (file) {
4520 /*
4521 * d_path works from the end of the rb backwards, so we
4522 * need to add enough zero bytes after the string to handle
4523 * the 64bit alignment we do later.
4524 */
4525 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4526 if (!buf) {
4527 name = strncpy(tmp, "//enomem", sizeof(tmp));
4528 goto got_name;
4529 }
4530 name = d_path(&file->f_path, buf, PATH_MAX);
4531 if (IS_ERR(name)) {
4532 name = strncpy(tmp, "//toolong", sizeof(tmp));
4533 goto got_name;
4534 }
4535 } else {
4536 if (arch_vma_name(mmap_event->vma)) {
4537 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4538 sizeof(tmp));
4539 goto got_name;
4540 }
4541
4542 if (!vma->vm_mm) {
4543 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4544 goto got_name;
4545 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4546 vma->vm_end >= vma->vm_mm->brk) {
4547 name = strncpy(tmp, "[heap]", sizeof(tmp));
4548 goto got_name;
4549 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4550 vma->vm_end >= vma->vm_mm->start_stack) {
4551 name = strncpy(tmp, "[stack]", sizeof(tmp));
4552 goto got_name;
4553 }
4554
4555 name = strncpy(tmp, "//anon", sizeof(tmp));
4556 goto got_name;
4557 }
4558
4559got_name:
4560 size = ALIGN(strlen(name)+1, sizeof(u64));
4561
4562 mmap_event->file_name = name;
4563 mmap_event->file_size = size;
4564
4565 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4566
4567 rcu_read_lock();
4568 list_for_each_entry_rcu(pmu, &pmus, entry) {
4569 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4570 if (cpuctx->active_pmu != pmu)
4571 goto next;
4572 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4573 vma->vm_flags & VM_EXEC);
4574
4575 ctxn = pmu->task_ctx_nr;
4576 if (ctxn < 0)
4577 goto next;
4578
4579 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4580 if (ctx) {
4581 perf_event_mmap_ctx(ctx, mmap_event,
4582 vma->vm_flags & VM_EXEC);
4583 }
4584next:
4585 put_cpu_ptr(pmu->pmu_cpu_context);
4586 }
4587 rcu_read_unlock();
4588
4589 kfree(buf);
4590}
4591
4592void perf_event_mmap(struct vm_area_struct *vma)
4593{
4594 struct perf_mmap_event mmap_event;
4595
4596 if (!atomic_read(&nr_mmap_events))
4597 return;
4598
4599 mmap_event = (struct perf_mmap_event){
4600 .vma = vma,
4601 /* .file_name */
4602 /* .file_size */
4603 .event_id = {
4604 .header = {
4605 .type = PERF_RECORD_MMAP,
4606 .misc = PERF_RECORD_MISC_USER,
4607 /* .size */
4608 },
4609 /* .pid */
4610 /* .tid */
4611 .start = vma->vm_start,
4612 .len = vma->vm_end - vma->vm_start,
4613 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
4614 },
4615 };
4616
4617 perf_event_mmap_event(&mmap_event);
4618}
4619
4620/*
4621 * IRQ throttle logging
4622 */
4623
4624static void perf_log_throttle(struct perf_event *event, int enable)
4625{
4626 struct perf_output_handle handle;
4627 struct perf_sample_data sample;
4628 int ret;
4629
4630 struct {
4631 struct perf_event_header header;
4632 u64 time;
4633 u64 id;
4634 u64 stream_id;
4635 } throttle_event = {
4636 .header = {
4637 .type = PERF_RECORD_THROTTLE,
4638 .misc = 0,
4639 .size = sizeof(throttle_event),
4640 },
4641 .time = perf_clock(),
4642 .id = primary_event_id(event),
4643 .stream_id = event->id,
4644 };
4645
4646 if (enable)
4647 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4648
4649 perf_event_header__init_id(&throttle_event.header, &sample, event);
4650
4651 ret = perf_output_begin(&handle, event,
4652 throttle_event.header.size);
4653 if (ret)
4654 return;
4655
4656 perf_output_put(&handle, throttle_event);
4657 perf_event__output_id_sample(event, &handle, &sample);
4658 perf_output_end(&handle);
4659}
4660
4661/*
4662 * Generic event overflow handling, sampling.
4663 */
4664
4665static int __perf_event_overflow(struct perf_event *event,
4666 int throttle, struct perf_sample_data *data,
4667 struct pt_regs *regs)
4668{
4669 int events = atomic_read(&event->event_limit);
4670 struct hw_perf_event *hwc = &event->hw;
4671 u64 seq;
4672 int ret = 0;
4673
4674 /*
4675 * Non-sampling counters might still use the PMI to fold short
4676 * hardware counters, ignore those.
4677 */
4678 if (unlikely(!is_sampling_event(event)))
4679 return 0;
4680
4681 seq = __this_cpu_read(perf_throttled_seq);
4682 if (seq != hwc->interrupts_seq) {
4683 hwc->interrupts_seq = seq;
4684 hwc->interrupts = 1;
4685 } else {
4686 hwc->interrupts++;
4687 if (unlikely(throttle
4688 && hwc->interrupts >= max_samples_per_tick)) {
4689 __this_cpu_inc(perf_throttled_count);
4690 hwc->interrupts = MAX_INTERRUPTS;
4691 perf_log_throttle(event, 0);
4692 ret = 1;
4693 }
4694 }
4695
4696 if (event->attr.freq) {
4697 u64 now = perf_clock();
4698 s64 delta = now - hwc->freq_time_stamp;
4699
4700 hwc->freq_time_stamp = now;
4701
4702 if (delta > 0 && delta < 2*TICK_NSEC)
4703 perf_adjust_period(event, delta, hwc->last_period, true);
4704 }
4705
4706 /*
4707 * XXX event_limit might not quite work as expected on inherited
4708 * events
4709 */
4710
4711 event->pending_kill = POLL_IN;
4712 if (events && atomic_dec_and_test(&event->event_limit)) {
4713 ret = 1;
4714 event->pending_kill = POLL_HUP;
4715 event->pending_disable = 1;
4716 irq_work_queue(&event->pending);
4717 }
4718
4719 if (event->overflow_handler)
4720 event->overflow_handler(event, data, regs);
4721 else
4722 perf_event_output(event, data, regs);
4723
4724 if (event->fasync && event->pending_kill) {
4725 event->pending_wakeup = 1;
4726 irq_work_queue(&event->pending);
4727 }
4728
4729 return ret;
4730}
4731
4732int perf_event_overflow(struct perf_event *event,
4733 struct perf_sample_data *data,
4734 struct pt_regs *regs)
4735{
4736 return __perf_event_overflow(event, 1, data, regs);
4737}
4738
4739/*
4740 * Generic software event infrastructure
4741 */
4742
4743struct swevent_htable {
4744 struct swevent_hlist *swevent_hlist;
4745 struct mutex hlist_mutex;
4746 int hlist_refcount;
4747
4748 /* Recursion avoidance in each contexts */
4749 int recursion[PERF_NR_CONTEXTS];
4750};
4751
4752static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4753
4754/*
4755 * We directly increment event->count and keep a second value in
4756 * event->hw.period_left to count intervals. This period event
4757 * is kept in the range [-sample_period, 0] so that we can use the
4758 * sign as trigger.
4759 */
4760
4761static u64 perf_swevent_set_period(struct perf_event *event)
4762{
4763 struct hw_perf_event *hwc = &event->hw;
4764 u64 period = hwc->last_period;
4765 u64 nr, offset;
4766 s64 old, val;
4767
4768 hwc->last_period = hwc->sample_period;
4769
4770again:
4771 old = val = local64_read(&hwc->period_left);
4772 if (val < 0)
4773 return 0;
4774
4775 nr = div64_u64(period + val, period);
4776 offset = nr * period;
4777 val -= offset;
4778 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4779 goto again;
4780
4781 return nr;
4782}
4783
4784static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4785 struct perf_sample_data *data,
4786 struct pt_regs *regs)
4787{
4788 struct hw_perf_event *hwc = &event->hw;
4789 int throttle = 0;
4790
4791 if (!overflow)
4792 overflow = perf_swevent_set_period(event);
4793
4794 if (hwc->interrupts == MAX_INTERRUPTS)
4795 return;
4796
4797 for (; overflow; overflow--) {
4798 if (__perf_event_overflow(event, throttle,
4799 data, regs)) {
4800 /*
4801 * We inhibit the overflow from happening when
4802 * hwc->interrupts == MAX_INTERRUPTS.
4803 */
4804 break;
4805 }
4806 throttle = 1;
4807 }
4808}
4809
4810static void perf_swevent_event(struct perf_event *event, u64 nr,
4811 struct perf_sample_data *data,
4812 struct pt_regs *regs)
4813{
4814 struct hw_perf_event *hwc = &event->hw;
4815
4816 local64_add(nr, &event->count);
4817
4818 if (!regs)
4819 return;
4820
4821 if (!is_sampling_event(event))
4822 return;
4823
4824 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
4825 data->period = nr;
4826 return perf_swevent_overflow(event, 1, data, regs);
4827 } else
4828 data->period = event->hw.last_period;
4829
4830 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4831 return perf_swevent_overflow(event, 1, data, regs);
4832
4833 if (local64_add_negative(nr, &hwc->period_left))
4834 return;
4835
4836 perf_swevent_overflow(event, 0, data, regs);
4837}
4838
4839static int perf_exclude_event(struct perf_event *event,
4840 struct pt_regs *regs)
4841{
4842 if (event->hw.state & PERF_HES_STOPPED)
4843 return 1;
4844
4845 if (regs) {
4846 if (event->attr.exclude_user && user_mode(regs))
4847 return 1;
4848
4849 if (event->attr.exclude_kernel && !user_mode(regs))
4850 return 1;
4851 }
4852
4853 return 0;
4854}
4855
4856static int perf_swevent_match(struct perf_event *event,
4857 enum perf_type_id type,
4858 u32 event_id,
4859 struct perf_sample_data *data,
4860 struct pt_regs *regs)
4861{
4862 if (event->attr.type != type)
4863 return 0;
4864
4865 if (event->attr.config != event_id)
4866 return 0;
4867
4868 if (perf_exclude_event(event, regs))
4869 return 0;
4870
4871 return 1;
4872}
4873
4874static inline u64 swevent_hash(u64 type, u32 event_id)
4875{
4876 u64 val = event_id | (type << 32);
4877
4878 return hash_64(val, SWEVENT_HLIST_BITS);
4879}
4880
4881static inline struct hlist_head *
4882__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4883{
4884 u64 hash = swevent_hash(type, event_id);
4885
4886 return &hlist->heads[hash];
4887}
4888
4889/* For the read side: events when they trigger */
4890static inline struct hlist_head *
4891find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4892{
4893 struct swevent_hlist *hlist;
4894
4895 hlist = rcu_dereference(swhash->swevent_hlist);
4896 if (!hlist)
4897 return NULL;
4898
4899 return __find_swevent_head(hlist, type, event_id);
4900}
4901
4902/* For the event head insertion and removal in the hlist */
4903static inline struct hlist_head *
4904find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4905{
4906 struct swevent_hlist *hlist;
4907 u32 event_id = event->attr.config;
4908 u64 type = event->attr.type;
4909
4910 /*
4911 * Event scheduling is always serialized against hlist allocation
4912 * and release. Which makes the protected version suitable here.
4913 * The context lock guarantees that.
4914 */
4915 hlist = rcu_dereference_protected(swhash->swevent_hlist,
4916 lockdep_is_held(&event->ctx->lock));
4917 if (!hlist)
4918 return NULL;
4919
4920 return __find_swevent_head(hlist, type, event_id);
4921}
4922
4923static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4924 u64 nr,
4925 struct perf_sample_data *data,
4926 struct pt_regs *regs)
4927{
4928 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4929 struct perf_event *event;
4930 struct hlist_node *node;
4931 struct hlist_head *head;
4932
4933 rcu_read_lock();
4934 head = find_swevent_head_rcu(swhash, type, event_id);
4935 if (!head)
4936 goto end;
4937
4938 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4939 if (perf_swevent_match(event, type, event_id, data, regs))
4940 perf_swevent_event(event, nr, data, regs);
4941 }
4942end:
4943 rcu_read_unlock();
4944}
4945
4946int perf_swevent_get_recursion_context(void)
4947{
4948 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4949
4950 return get_recursion_context(swhash->recursion);
4951}
4952EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4953
4954inline void perf_swevent_put_recursion_context(int rctx)
4955{
4956 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4957
4958 put_recursion_context(swhash->recursion, rctx);
4959}
4960
4961void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
4962{
4963 struct perf_sample_data data;
4964 int rctx;
4965
4966 preempt_disable_notrace();
4967 rctx = perf_swevent_get_recursion_context();
4968 if (rctx < 0)
4969 return;
4970
4971 perf_sample_data_init(&data, addr, 0);
4972
4973 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
4974
4975 perf_swevent_put_recursion_context(rctx);
4976 preempt_enable_notrace();
4977}
4978
4979static void perf_swevent_read(struct perf_event *event)
4980{
4981}
4982
4983static int perf_swevent_add(struct perf_event *event, int flags)
4984{
4985 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4986 struct hw_perf_event *hwc = &event->hw;
4987 struct hlist_head *head;
4988
4989 if (is_sampling_event(event)) {
4990 hwc->last_period = hwc->sample_period;
4991 perf_swevent_set_period(event);
4992 }
4993
4994 hwc->state = !(flags & PERF_EF_START);
4995
4996 head = find_swevent_head(swhash, event);
4997 if (WARN_ON_ONCE(!head))
4998 return -EINVAL;
4999
5000 hlist_add_head_rcu(&event->hlist_entry, head);
5001
5002 return 0;
5003}
5004
5005static void perf_swevent_del(struct perf_event *event, int flags)
5006{
5007 hlist_del_rcu(&event->hlist_entry);
5008}
5009
5010static void perf_swevent_start(struct perf_event *event, int flags)
5011{
5012 event->hw.state = 0;
5013}
5014
5015static void perf_swevent_stop(struct perf_event *event, int flags)
5016{
5017 event->hw.state = PERF_HES_STOPPED;
5018}
5019
5020/* Deref the hlist from the update side */
5021static inline struct swevent_hlist *
5022swevent_hlist_deref(struct swevent_htable *swhash)
5023{
5024 return rcu_dereference_protected(swhash->swevent_hlist,
5025 lockdep_is_held(&swhash->hlist_mutex));
5026}
5027
5028static void swevent_hlist_release(struct swevent_htable *swhash)
5029{
5030 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5031
5032 if (!hlist)
5033 return;
5034
5035 rcu_assign_pointer(swhash->swevent_hlist, NULL);
5036 kfree_rcu(hlist, rcu_head);
5037}
5038
5039static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5040{
5041 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5042
5043 mutex_lock(&swhash->hlist_mutex);
5044
5045 if (!--swhash->hlist_refcount)
5046 swevent_hlist_release(swhash);
5047
5048 mutex_unlock(&swhash->hlist_mutex);
5049}
5050
5051static void swevent_hlist_put(struct perf_event *event)
5052{
5053 int cpu;
5054
5055 if (event->cpu != -1) {
5056 swevent_hlist_put_cpu(event, event->cpu);
5057 return;
5058 }
5059
5060 for_each_possible_cpu(cpu)
5061 swevent_hlist_put_cpu(event, cpu);
5062}
5063
5064static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5065{
5066 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5067 int err = 0;
5068
5069 mutex_lock(&swhash->hlist_mutex);
5070
5071 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5072 struct swevent_hlist *hlist;
5073
5074 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5075 if (!hlist) {
5076 err = -ENOMEM;
5077 goto exit;
5078 }
5079 rcu_assign_pointer(swhash->swevent_hlist, hlist);
5080 }
5081 swhash->hlist_refcount++;
5082exit:
5083 mutex_unlock(&swhash->hlist_mutex);
5084
5085 return err;
5086}
5087
5088static int swevent_hlist_get(struct perf_event *event)
5089{
5090 int err;
5091 int cpu, failed_cpu;
5092
5093 if (event->cpu != -1)
5094 return swevent_hlist_get_cpu(event, event->cpu);
5095
5096 get_online_cpus();
5097 for_each_possible_cpu(cpu) {
5098 err = swevent_hlist_get_cpu(event, cpu);
5099 if (err) {
5100 failed_cpu = cpu;
5101 goto fail;
5102 }
5103 }
5104 put_online_cpus();
5105
5106 return 0;
5107fail:
5108 for_each_possible_cpu(cpu) {
5109 if (cpu == failed_cpu)
5110 break;
5111 swevent_hlist_put_cpu(event, cpu);
5112 }
5113
5114 put_online_cpus();
5115 return err;
5116}
5117
5118struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5119
5120static void sw_perf_event_destroy(struct perf_event *event)
5121{
5122 u64 event_id = event->attr.config;
5123
5124 WARN_ON(event->parent);
5125
5126 static_key_slow_dec(&perf_swevent_enabled[event_id]);
5127 swevent_hlist_put(event);
5128}
5129
5130static int perf_swevent_init(struct perf_event *event)
5131{
5132 int event_id = event->attr.config;
5133
5134 if (event->attr.type != PERF_TYPE_SOFTWARE)
5135 return -ENOENT;
5136
5137 /*
5138 * no branch sampling for software events
5139 */
5140 if (has_branch_stack(event))
5141 return -EOPNOTSUPP;
5142
5143 switch (event_id) {
5144 case PERF_COUNT_SW_CPU_CLOCK:
5145 case PERF_COUNT_SW_TASK_CLOCK:
5146 return -ENOENT;
5147
5148 default:
5149 break;
5150 }
5151
5152 if (event_id >= PERF_COUNT_SW_MAX)
5153 return -ENOENT;
5154
5155 if (!event->parent) {
5156 int err;
5157
5158 err = swevent_hlist_get(event);
5159 if (err)
5160 return err;
5161
5162 static_key_slow_inc(&perf_swevent_enabled[event_id]);
5163 event->destroy = sw_perf_event_destroy;
5164 }
5165
5166 return 0;
5167}
5168
5169static int perf_swevent_event_idx(struct perf_event *event)
5170{
5171 return 0;
5172}
5173
5174static struct pmu perf_swevent = {
5175 .task_ctx_nr = perf_sw_context,
5176
5177 .event_init = perf_swevent_init,
5178 .add = perf_swevent_add,
5179 .del = perf_swevent_del,
5180 .start = perf_swevent_start,
5181 .stop = perf_swevent_stop,
5182 .read = perf_swevent_read,
5183
5184 .event_idx = perf_swevent_event_idx,
5185};
5186
5187#ifdef CONFIG_EVENT_TRACING
5188
5189static int perf_tp_filter_match(struct perf_event *event,
5190 struct perf_sample_data *data)
5191{
5192 void *record = data->raw->data;
5193
5194 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5195 return 1;
5196 return 0;
5197}
5198
5199static int perf_tp_event_match(struct perf_event *event,
5200 struct perf_sample_data *data,
5201 struct pt_regs *regs)
5202{
5203 if (event->hw.state & PERF_HES_STOPPED)
5204 return 0;
5205 /*
5206 * All tracepoints are from kernel-space.
5207 */
5208 if (event->attr.exclude_kernel)
5209 return 0;
5210
5211 if (!perf_tp_filter_match(event, data))
5212 return 0;
5213
5214 return 1;
5215}
5216
5217void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5218 struct pt_regs *regs, struct hlist_head *head, int rctx)
5219{
5220 struct perf_sample_data data;
5221 struct perf_event *event;
5222 struct hlist_node *node;
5223
5224 struct perf_raw_record raw = {
5225 .size = entry_size,
5226 .data = record,
5227 };
5228
5229 perf_sample_data_init(&data, addr, 0);
5230 data.raw = &raw;
5231
5232 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5233 if (perf_tp_event_match(event, &data, regs))
5234 perf_swevent_event(event, count, &data, regs);
5235 }
5236
5237 perf_swevent_put_recursion_context(rctx);
5238}
5239EXPORT_SYMBOL_GPL(perf_tp_event);
5240
5241static void tp_perf_event_destroy(struct perf_event *event)
5242{
5243 perf_trace_destroy(event);
5244}
5245
5246static int perf_tp_event_init(struct perf_event *event)
5247{
5248 int err;
5249
5250 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5251 return -ENOENT;
5252
5253 /*
5254 * no branch sampling for tracepoint events
5255 */
5256 if (has_branch_stack(event))
5257 return -EOPNOTSUPP;
5258
5259 err = perf_trace_init(event);
5260 if (err)
5261 return err;
5262
5263 event->destroy = tp_perf_event_destroy;
5264
5265 return 0;
5266}
5267
5268static struct pmu perf_tracepoint = {
5269 .task_ctx_nr = perf_sw_context,
5270
5271 .event_init = perf_tp_event_init,
5272 .add = perf_trace_add,
5273 .del = perf_trace_del,
5274 .start = perf_swevent_start,
5275 .stop = perf_swevent_stop,
5276 .read = perf_swevent_read,
5277
5278 .event_idx = perf_swevent_event_idx,
5279};
5280
5281static inline void perf_tp_register(void)
5282{
5283 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5284}
5285
5286static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5287{
5288 char *filter_str;
5289 int ret;
5290
5291 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5292 return -EINVAL;
5293
5294 filter_str = strndup_user(arg, PAGE_SIZE);
5295 if (IS_ERR(filter_str))
5296 return PTR_ERR(filter_str);
5297
5298 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5299
5300 kfree(filter_str);
5301 return ret;
5302}
5303
5304static void perf_event_free_filter(struct perf_event *event)
5305{
5306 ftrace_profile_free_filter(event);
5307}
5308
5309#else
5310
5311static inline void perf_tp_register(void)
5312{
5313}
5314
5315static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5316{
5317 return -ENOENT;
5318}
5319
5320static void perf_event_free_filter(struct perf_event *event)
5321{
5322}
5323
5324#endif /* CONFIG_EVENT_TRACING */
5325
5326#ifdef CONFIG_HAVE_HW_BREAKPOINT
5327void perf_bp_event(struct perf_event *bp, void *data)
5328{
5329 struct perf_sample_data sample;
5330 struct pt_regs *regs = data;
5331
5332 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5333
5334 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5335 perf_swevent_event(bp, 1, &sample, regs);
5336}
5337#endif
5338
5339/*
5340 * hrtimer based swevent callback
5341 */
5342
5343static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5344{
5345 enum hrtimer_restart ret = HRTIMER_RESTART;
5346 struct perf_sample_data data;
5347 struct pt_regs *regs;
5348 struct perf_event *event;
5349 u64 period;
5350
5351 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5352
5353 if (event->state != PERF_EVENT_STATE_ACTIVE)
5354 return HRTIMER_NORESTART;
5355
5356 event->pmu->read(event);
5357
5358 perf_sample_data_init(&data, 0, event->hw.last_period);
5359 regs = get_irq_regs();
5360
5361 if (regs && !perf_exclude_event(event, regs)) {
5362 if (!(event->attr.exclude_idle && is_idle_task(current)))
5363 if (__perf_event_overflow(event, 1, &data, regs))
5364 ret = HRTIMER_NORESTART;
5365 }
5366
5367 period = max_t(u64, 10000, event->hw.sample_period);
5368 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5369
5370 return ret;
5371}
5372
5373static void perf_swevent_start_hrtimer(struct perf_event *event)
5374{
5375 struct hw_perf_event *hwc = &event->hw;
5376 s64 period;
5377
5378 if (!is_sampling_event(event))
5379 return;
5380
5381 period = local64_read(&hwc->period_left);
5382 if (period) {
5383 if (period < 0)
5384 period = 10000;
5385
5386 local64_set(&hwc->period_left, 0);
5387 } else {
5388 period = max_t(u64, 10000, hwc->sample_period);
5389 }
5390 __hrtimer_start_range_ns(&hwc->hrtimer,
5391 ns_to_ktime(period), 0,
5392 HRTIMER_MODE_REL_PINNED, 0);
5393}
5394
5395static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5396{
5397 struct hw_perf_event *hwc = &event->hw;
5398
5399 if (is_sampling_event(event)) {
5400 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
5401 local64_set(&hwc->period_left, ktime_to_ns(remaining));
5402
5403 hrtimer_cancel(&hwc->hrtimer);
5404 }
5405}
5406
5407static void perf_swevent_init_hrtimer(struct perf_event *event)
5408{
5409 struct hw_perf_event *hwc = &event->hw;
5410
5411 if (!is_sampling_event(event))
5412 return;
5413
5414 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5415 hwc->hrtimer.function = perf_swevent_hrtimer;
5416
5417 /*
5418 * Since hrtimers have a fixed rate, we can do a static freq->period
5419 * mapping and avoid the whole period adjust feedback stuff.
5420 */
5421 if (event->attr.freq) {
5422 long freq = event->attr.sample_freq;
5423
5424 event->attr.sample_period = NSEC_PER_SEC / freq;
5425 hwc->sample_period = event->attr.sample_period;
5426 local64_set(&hwc->period_left, hwc->sample_period);
5427 event->attr.freq = 0;
5428 }
5429}
5430
5431/*
5432 * Software event: cpu wall time clock
5433 */
5434
5435static void cpu_clock_event_update(struct perf_event *event)
5436{
5437 s64 prev;
5438 u64 now;
5439
5440 now = local_clock();
5441 prev = local64_xchg(&event->hw.prev_count, now);
5442 local64_add(now - prev, &event->count);
5443}
5444
5445static void cpu_clock_event_start(struct perf_event *event, int flags)
5446{
5447 local64_set(&event->hw.prev_count, local_clock());
5448 perf_swevent_start_hrtimer(event);
5449}
5450
5451static void cpu_clock_event_stop(struct perf_event *event, int flags)
5452{
5453 perf_swevent_cancel_hrtimer(event);
5454 cpu_clock_event_update(event);
5455}
5456
5457static int cpu_clock_event_add(struct perf_event *event, int flags)
5458{
5459 if (flags & PERF_EF_START)
5460 cpu_clock_event_start(event, flags);
5461
5462 return 0;
5463}
5464
5465static void cpu_clock_event_del(struct perf_event *event, int flags)
5466{
5467 cpu_clock_event_stop(event, flags);
5468}
5469
5470static void cpu_clock_event_read(struct perf_event *event)
5471{
5472 cpu_clock_event_update(event);
5473}
5474
5475static int cpu_clock_event_init(struct perf_event *event)
5476{
5477 if (event->attr.type != PERF_TYPE_SOFTWARE)
5478 return -ENOENT;
5479
5480 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5481 return -ENOENT;
5482
5483 /*
5484 * no branch sampling for software events
5485 */
5486 if (has_branch_stack(event))
5487 return -EOPNOTSUPP;
5488
5489 perf_swevent_init_hrtimer(event);
5490
5491 return 0;
5492}
5493
5494static struct pmu perf_cpu_clock = {
5495 .task_ctx_nr = perf_sw_context,
5496
5497 .event_init = cpu_clock_event_init,
5498 .add = cpu_clock_event_add,
5499 .del = cpu_clock_event_del,
5500 .start = cpu_clock_event_start,
5501 .stop = cpu_clock_event_stop,
5502 .read = cpu_clock_event_read,
5503
5504 .event_idx = perf_swevent_event_idx,
5505};
5506
5507/*
5508 * Software event: task time clock
5509 */
5510
5511static void task_clock_event_update(struct perf_event *event, u64 now)
5512{
5513 u64 prev;
5514 s64 delta;
5515
5516 prev = local64_xchg(&event->hw.prev_count, now);
5517 delta = now - prev;
5518 local64_add(delta, &event->count);
5519}
5520
5521static void task_clock_event_start(struct perf_event *event, int flags)
5522{
5523 local64_set(&event->hw.prev_count, event->ctx->time);
5524 perf_swevent_start_hrtimer(event);
5525}
5526
5527static void task_clock_event_stop(struct perf_event *event, int flags)
5528{
5529 perf_swevent_cancel_hrtimer(event);
5530 task_clock_event_update(event, event->ctx->time);
5531}
5532
5533static int task_clock_event_add(struct perf_event *event, int flags)
5534{
5535 if (flags & PERF_EF_START)
5536 task_clock_event_start(event, flags);
5537
5538 return 0;
5539}
5540
5541static void task_clock_event_del(struct perf_event *event, int flags)
5542{
5543 task_clock_event_stop(event, PERF_EF_UPDATE);
5544}
5545
5546static void task_clock_event_read(struct perf_event *event)
5547{
5548 u64 now = perf_clock();
5549 u64 delta = now - event->ctx->timestamp;
5550 u64 time = event->ctx->time + delta;
5551
5552 task_clock_event_update(event, time);
5553}
5554
5555static int task_clock_event_init(struct perf_event *event)
5556{
5557 if (event->attr.type != PERF_TYPE_SOFTWARE)
5558 return -ENOENT;
5559
5560 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5561 return -ENOENT;
5562
5563 /*
5564 * no branch sampling for software events
5565 */
5566 if (has_branch_stack(event))
5567 return -EOPNOTSUPP;
5568
5569 perf_swevent_init_hrtimer(event);
5570
5571 return 0;
5572}
5573
5574static struct pmu perf_task_clock = {
5575 .task_ctx_nr = perf_sw_context,
5576
5577 .event_init = task_clock_event_init,
5578 .add = task_clock_event_add,
5579 .del = task_clock_event_del,
5580 .start = task_clock_event_start,
5581 .stop = task_clock_event_stop,
5582 .read = task_clock_event_read,
5583
5584 .event_idx = perf_swevent_event_idx,
5585};
5586
5587static void perf_pmu_nop_void(struct pmu *pmu)
5588{
5589}
5590
5591static int perf_pmu_nop_int(struct pmu *pmu)
5592{
5593 return 0;
5594}
5595
5596static void perf_pmu_start_txn(struct pmu *pmu)
5597{
5598 perf_pmu_disable(pmu);
5599}
5600
5601static int perf_pmu_commit_txn(struct pmu *pmu)
5602{
5603 perf_pmu_enable(pmu);
5604 return 0;
5605}
5606
5607static void perf_pmu_cancel_txn(struct pmu *pmu)
5608{
5609 perf_pmu_enable(pmu);
5610}
5611
5612static int perf_event_idx_default(struct perf_event *event)
5613{
5614 return event->hw.idx + 1;
5615}
5616
5617/*
5618 * Ensures all contexts with the same task_ctx_nr have the same
5619 * pmu_cpu_context too.
5620 */
5621static void *find_pmu_context(int ctxn)
5622{
5623 struct pmu *pmu;
5624
5625 if (ctxn < 0)
5626 return NULL;
5627
5628 list_for_each_entry(pmu, &pmus, entry) {
5629 if (pmu->task_ctx_nr == ctxn)
5630 return pmu->pmu_cpu_context;
5631 }
5632
5633 return NULL;
5634}
5635
5636static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5637{
5638 int cpu;
5639
5640 for_each_possible_cpu(cpu) {
5641 struct perf_cpu_context *cpuctx;
5642
5643 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5644
5645 if (cpuctx->active_pmu == old_pmu)
5646 cpuctx->active_pmu = pmu;
5647 }
5648}
5649
5650static void free_pmu_context(struct pmu *pmu)
5651{
5652 struct pmu *i;
5653
5654 mutex_lock(&pmus_lock);
5655 /*
5656 * Like a real lame refcount.
5657 */
5658 list_for_each_entry(i, &pmus, entry) {
5659 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5660 update_pmu_context(i, pmu);
5661 goto out;
5662 }
5663 }
5664
5665 free_percpu(pmu->pmu_cpu_context);
5666out:
5667 mutex_unlock(&pmus_lock);
5668}
5669static struct idr pmu_idr;
5670
5671static ssize_t
5672type_show(struct device *dev, struct device_attribute *attr, char *page)
5673{
5674 struct pmu *pmu = dev_get_drvdata(dev);
5675
5676 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5677}
5678
5679static struct device_attribute pmu_dev_attrs[] = {
5680 __ATTR_RO(type),
5681 __ATTR_NULL,
5682};
5683
5684static int pmu_bus_running;
5685static struct bus_type pmu_bus = {
5686 .name = "event_source",
5687 .dev_attrs = pmu_dev_attrs,
5688};
5689
5690static void pmu_dev_release(struct device *dev)
5691{
5692 kfree(dev);
5693}
5694
5695static int pmu_dev_alloc(struct pmu *pmu)
5696{
5697 int ret = -ENOMEM;
5698
5699 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5700 if (!pmu->dev)
5701 goto out;
5702
5703 pmu->dev->groups = pmu->attr_groups;
5704 device_initialize(pmu->dev);
5705 ret = dev_set_name(pmu->dev, "%s", pmu->name);
5706 if (ret)
5707 goto free_dev;
5708
5709 dev_set_drvdata(pmu->dev, pmu);
5710 pmu->dev->bus = &pmu_bus;
5711 pmu->dev->release = pmu_dev_release;
5712 ret = device_add(pmu->dev);
5713 if (ret)
5714 goto free_dev;
5715
5716out:
5717 return ret;
5718
5719free_dev:
5720 put_device(pmu->dev);
5721 goto out;
5722}
5723
5724static struct lock_class_key cpuctx_mutex;
5725static struct lock_class_key cpuctx_lock;
5726
5727int perf_pmu_register(struct pmu *pmu, char *name, int type)
5728{
5729 int cpu, ret;
5730
5731 mutex_lock(&pmus_lock);
5732 ret = -ENOMEM;
5733 pmu->pmu_disable_count = alloc_percpu(int);
5734 if (!pmu->pmu_disable_count)
5735 goto unlock;
5736
5737 pmu->type = -1;
5738 if (!name)
5739 goto skip_type;
5740 pmu->name = name;
5741
5742 if (type < 0) {
5743 int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5744 if (!err)
5745 goto free_pdc;
5746
5747 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5748 if (err) {
5749 ret = err;
5750 goto free_pdc;
5751 }
5752 }
5753 pmu->type = type;
5754
5755 if (pmu_bus_running) {
5756 ret = pmu_dev_alloc(pmu);
5757 if (ret)
5758 goto free_idr;
5759 }
5760
5761skip_type:
5762 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5763 if (pmu->pmu_cpu_context)
5764 goto got_cpu_context;
5765
5766 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5767 if (!pmu->pmu_cpu_context)
5768 goto free_dev;
5769
5770 for_each_possible_cpu(cpu) {
5771 struct perf_cpu_context *cpuctx;
5772
5773 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5774 __perf_event_init_context(&cpuctx->ctx);
5775 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
5776 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
5777 cpuctx->ctx.type = cpu_context;
5778 cpuctx->ctx.pmu = pmu;
5779 cpuctx->jiffies_interval = 1;
5780 INIT_LIST_HEAD(&cpuctx->rotation_list);
5781 cpuctx->active_pmu = pmu;
5782 }
5783
5784got_cpu_context:
5785 if (!pmu->start_txn) {
5786 if (pmu->pmu_enable) {
5787 /*
5788 * If we have pmu_enable/pmu_disable calls, install
5789 * transaction stubs that use that to try and batch
5790 * hardware accesses.
5791 */
5792 pmu->start_txn = perf_pmu_start_txn;
5793 pmu->commit_txn = perf_pmu_commit_txn;
5794 pmu->cancel_txn = perf_pmu_cancel_txn;
5795 } else {
5796 pmu->start_txn = perf_pmu_nop_void;
5797 pmu->commit_txn = perf_pmu_nop_int;
5798 pmu->cancel_txn = perf_pmu_nop_void;
5799 }
5800 }
5801
5802 if (!pmu->pmu_enable) {
5803 pmu->pmu_enable = perf_pmu_nop_void;
5804 pmu->pmu_disable = perf_pmu_nop_void;
5805 }
5806
5807 if (!pmu->event_idx)
5808 pmu->event_idx = perf_event_idx_default;
5809
5810 list_add_rcu(&pmu->entry, &pmus);
5811 ret = 0;
5812unlock:
5813 mutex_unlock(&pmus_lock);
5814
5815 return ret;
5816
5817free_dev:
5818 device_del(pmu->dev);
5819 put_device(pmu->dev);
5820
5821free_idr:
5822 if (pmu->type >= PERF_TYPE_MAX)
5823 idr_remove(&pmu_idr, pmu->type);
5824
5825free_pdc:
5826 free_percpu(pmu->pmu_disable_count);
5827 goto unlock;
5828}
5829
5830void perf_pmu_unregister(struct pmu *pmu)
5831{
5832 mutex_lock(&pmus_lock);
5833 list_del_rcu(&pmu->entry);
5834 mutex_unlock(&pmus_lock);
5835
5836 /*
5837 * We dereference the pmu list under both SRCU and regular RCU, so
5838 * synchronize against both of those.
5839 */
5840 synchronize_srcu(&pmus_srcu);
5841 synchronize_rcu();
5842
5843 free_percpu(pmu->pmu_disable_count);
5844 if (pmu->type >= PERF_TYPE_MAX)
5845 idr_remove(&pmu_idr, pmu->type);
5846 device_del(pmu->dev);
5847 put_device(pmu->dev);
5848 free_pmu_context(pmu);
5849}
5850
5851struct pmu *perf_init_event(struct perf_event *event)
5852{
5853 struct pmu *pmu = NULL;
5854 int idx;
5855 int ret;
5856
5857 idx = srcu_read_lock(&pmus_srcu);
5858
5859 rcu_read_lock();
5860 pmu = idr_find(&pmu_idr, event->attr.type);
5861 rcu_read_unlock();
5862 if (pmu) {
5863 event->pmu = pmu;
5864 ret = pmu->event_init(event);
5865 if (ret)
5866 pmu = ERR_PTR(ret);
5867 goto unlock;
5868 }
5869
5870 list_for_each_entry_rcu(pmu, &pmus, entry) {
5871 event->pmu = pmu;
5872 ret = pmu->event_init(event);
5873 if (!ret)
5874 goto unlock;
5875
5876 if (ret != -ENOENT) {
5877 pmu = ERR_PTR(ret);
5878 goto unlock;
5879 }
5880 }
5881 pmu = ERR_PTR(-ENOENT);
5882unlock:
5883 srcu_read_unlock(&pmus_srcu, idx);
5884
5885 return pmu;
5886}
5887
5888/*
5889 * Allocate and initialize a event structure
5890 */
5891static struct perf_event *
5892perf_event_alloc(struct perf_event_attr *attr, int cpu,
5893 struct task_struct *task,
5894 struct perf_event *group_leader,
5895 struct perf_event *parent_event,
5896 perf_overflow_handler_t overflow_handler,
5897 void *context)
5898{
5899 struct pmu *pmu;
5900 struct perf_event *event;
5901 struct hw_perf_event *hwc;
5902 long err;
5903
5904 if ((unsigned)cpu >= nr_cpu_ids) {
5905 if (!task || cpu != -1)
5906 return ERR_PTR(-EINVAL);
5907 }
5908
5909 event = kzalloc(sizeof(*event), GFP_KERNEL);
5910 if (!event)
5911 return ERR_PTR(-ENOMEM);
5912
5913 /*
5914 * Single events are their own group leaders, with an
5915 * empty sibling list:
5916 */
5917 if (!group_leader)
5918 group_leader = event;
5919
5920 mutex_init(&event->child_mutex);
5921 INIT_LIST_HEAD(&event->child_list);
5922
5923 INIT_LIST_HEAD(&event->group_entry);
5924 INIT_LIST_HEAD(&event->event_entry);
5925 INIT_LIST_HEAD(&event->sibling_list);
5926 INIT_LIST_HEAD(&event->rb_entry);
5927
5928 init_waitqueue_head(&event->waitq);
5929 init_irq_work(&event->pending, perf_pending_event);
5930
5931 mutex_init(&event->mmap_mutex);
5932
5933 atomic_long_set(&event->refcount, 1);
5934 event->cpu = cpu;
5935 event->attr = *attr;
5936 event->group_leader = group_leader;
5937 event->pmu = NULL;
5938 event->oncpu = -1;
5939
5940 event->parent = parent_event;
5941
5942 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5943 event->id = atomic64_inc_return(&perf_event_id);
5944
5945 event->state = PERF_EVENT_STATE_INACTIVE;
5946
5947 if (task) {
5948 event->attach_state = PERF_ATTACH_TASK;
5949#ifdef CONFIG_HAVE_HW_BREAKPOINT
5950 /*
5951 * hw_breakpoint is a bit difficult here..
5952 */
5953 if (attr->type == PERF_TYPE_BREAKPOINT)
5954 event->hw.bp_target = task;
5955#endif
5956 }
5957
5958 if (!overflow_handler && parent_event) {
5959 overflow_handler = parent_event->overflow_handler;
5960 context = parent_event->overflow_handler_context;
5961 }
5962
5963 event->overflow_handler = overflow_handler;
5964 event->overflow_handler_context = context;
5965
5966 if (attr->disabled)
5967 event->state = PERF_EVENT_STATE_OFF;
5968
5969 pmu = NULL;
5970
5971 hwc = &event->hw;
5972 hwc->sample_period = attr->sample_period;
5973 if (attr->freq && attr->sample_freq)
5974 hwc->sample_period = 1;
5975 hwc->last_period = hwc->sample_period;
5976
5977 local64_set(&hwc->period_left, hwc->sample_period);
5978
5979 /*
5980 * we currently do not support PERF_FORMAT_GROUP on inherited events
5981 */
5982 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5983 goto done;
5984
5985 pmu = perf_init_event(event);
5986
5987done:
5988 err = 0;
5989 if (!pmu)
5990 err = -EINVAL;
5991 else if (IS_ERR(pmu))
5992 err = PTR_ERR(pmu);
5993
5994 if (err) {
5995 if (event->ns)
5996 put_pid_ns(event->ns);
5997 kfree(event);
5998 return ERR_PTR(err);
5999 }
6000
6001 if (!event->parent) {
6002 if (event->attach_state & PERF_ATTACH_TASK)
6003 static_key_slow_inc(&perf_sched_events.key);
6004 if (event->attr.mmap || event->attr.mmap_data)
6005 atomic_inc(&nr_mmap_events);
6006 if (event->attr.comm)
6007 atomic_inc(&nr_comm_events);
6008 if (event->attr.task)
6009 atomic_inc(&nr_task_events);
6010 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6011 err = get_callchain_buffers();
6012 if (err) {
6013 free_event(event);
6014 return ERR_PTR(err);
6015 }
6016 }
6017 if (has_branch_stack(event)) {
6018 static_key_slow_inc(&perf_sched_events.key);
6019 if (!(event->attach_state & PERF_ATTACH_TASK))
6020 atomic_inc(&per_cpu(perf_branch_stack_events,
6021 event->cpu));
6022 }
6023 }
6024
6025 return event;
6026}
6027
6028static int perf_copy_attr(struct perf_event_attr __user *uattr,
6029 struct perf_event_attr *attr)
6030{
6031 u32 size;
6032 int ret;
6033
6034 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6035 return -EFAULT;
6036
6037 /*
6038 * zero the full structure, so that a short copy will be nice.
6039 */
6040 memset(attr, 0, sizeof(*attr));
6041
6042 ret = get_user(size, &uattr->size);
6043 if (ret)
6044 return ret;
6045
6046 if (size > PAGE_SIZE) /* silly large */
6047 goto err_size;
6048
6049 if (!size) /* abi compat */
6050 size = PERF_ATTR_SIZE_VER0;
6051
6052 if (size < PERF_ATTR_SIZE_VER0)
6053 goto err_size;
6054
6055 /*
6056 * If we're handed a bigger struct than we know of,
6057 * ensure all the unknown bits are 0 - i.e. new
6058 * user-space does not rely on any kernel feature
6059 * extensions we dont know about yet.
6060 */
6061 if (size > sizeof(*attr)) {
6062 unsigned char __user *addr;
6063 unsigned char __user *end;
6064 unsigned char val;
6065
6066 addr = (void __user *)uattr + sizeof(*attr);
6067 end = (void __user *)uattr + size;
6068
6069 for (; addr < end; addr++) {
6070 ret = get_user(val, addr);
6071 if (ret)
6072 return ret;
6073 if (val)
6074 goto err_size;
6075 }
6076 size = sizeof(*attr);
6077 }
6078
6079 ret = copy_from_user(attr, uattr, size);
6080 if (ret)
6081 return -EFAULT;
6082
6083 if (attr->__reserved_1)
6084 return -EINVAL;
6085
6086 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6087 return -EINVAL;
6088
6089 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6090 return -EINVAL;
6091
6092 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6093 u64 mask = attr->branch_sample_type;
6094
6095 /* only using defined bits */
6096 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6097 return -EINVAL;
6098
6099 /* at least one branch bit must be set */
6100 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6101 return -EINVAL;
6102
6103 /* kernel level capture: check permissions */
6104 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
6105 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6106 return -EACCES;
6107
6108 /* propagate priv level, when not set for branch */
6109 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6110
6111 /* exclude_kernel checked on syscall entry */
6112 if (!attr->exclude_kernel)
6113 mask |= PERF_SAMPLE_BRANCH_KERNEL;
6114
6115 if (!attr->exclude_user)
6116 mask |= PERF_SAMPLE_BRANCH_USER;
6117
6118 if (!attr->exclude_hv)
6119 mask |= PERF_SAMPLE_BRANCH_HV;
6120 /*
6121 * adjust user setting (for HW filter setup)
6122 */
6123 attr->branch_sample_type = mask;
6124 }
6125 }
6126out:
6127 return ret;
6128
6129err_size:
6130 put_user(sizeof(*attr), &uattr->size);
6131 ret = -E2BIG;
6132 goto out;
6133}
6134
6135static int
6136perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6137{
6138 struct ring_buffer *rb = NULL, *old_rb = NULL;
6139 int ret = -EINVAL;
6140
6141 if (!output_event)
6142 goto set;
6143
6144 /* don't allow circular references */
6145 if (event == output_event)
6146 goto out;
6147
6148 /*
6149 * Don't allow cross-cpu buffers
6150 */
6151 if (output_event->cpu != event->cpu)
6152 goto out;
6153
6154 /*
6155 * If its not a per-cpu rb, it must be the same task.
6156 */
6157 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6158 goto out;
6159
6160set:
6161 mutex_lock(&event->mmap_mutex);
6162 /* Can't redirect output if we've got an active mmap() */
6163 if (atomic_read(&event->mmap_count))
6164 goto unlock;
6165
6166 if (output_event) {
6167 /* get the rb we want to redirect to */
6168 rb = ring_buffer_get(output_event);
6169 if (!rb)
6170 goto unlock;
6171 }
6172
6173 old_rb = event->rb;
6174 rcu_assign_pointer(event->rb, rb);
6175 if (old_rb)
6176 ring_buffer_detach(event, old_rb);
6177 ret = 0;
6178unlock:
6179 mutex_unlock(&event->mmap_mutex);
6180
6181 if (old_rb)
6182 ring_buffer_put(old_rb);
6183out:
6184 return ret;
6185}
6186
6187/**
6188 * sys_perf_event_open - open a performance event, associate it to a task/cpu
6189 *
6190 * @attr_uptr: event_id type attributes for monitoring/sampling
6191 * @pid: target pid
6192 * @cpu: target cpu
6193 * @group_fd: group leader event fd
6194 */
6195SYSCALL_DEFINE5(perf_event_open,
6196 struct perf_event_attr __user *, attr_uptr,
6197 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
6198{
6199 struct perf_event *group_leader = NULL, *output_event = NULL;
6200 struct perf_event *event, *sibling;
6201 struct perf_event_attr attr;
6202 struct perf_event_context *ctx;
6203 struct file *event_file = NULL;
6204 struct file *group_file = NULL;
6205 struct task_struct *task = NULL;
6206 struct pmu *pmu;
6207 int event_fd;
6208 int move_group = 0;
6209 int fput_needed = 0;
6210 int err;
6211
6212 /* for future expandability... */
6213 if (flags & ~PERF_FLAG_ALL)
6214 return -EINVAL;
6215
6216 err = perf_copy_attr(attr_uptr, &attr);
6217 if (err)
6218 return err;
6219
6220 if (!attr.exclude_kernel) {
6221 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6222 return -EACCES;
6223 }
6224
6225 if (attr.freq) {
6226 if (attr.sample_freq > sysctl_perf_event_sample_rate)
6227 return -EINVAL;
6228 }
6229
6230 /*
6231 * In cgroup mode, the pid argument is used to pass the fd
6232 * opened to the cgroup directory in cgroupfs. The cpu argument
6233 * designates the cpu on which to monitor threads from that
6234 * cgroup.
6235 */
6236 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6237 return -EINVAL;
6238
6239 event_fd = get_unused_fd_flags(O_RDWR);
6240 if (event_fd < 0)
6241 return event_fd;
6242
6243 if (group_fd != -1) {
6244 group_file = perf_fget_light(group_fd, &fput_needed);
6245 if (IS_ERR(group_file)) {
6246 err = PTR_ERR(group_file);
6247 goto err_fd;
6248 }
6249 group_leader = group_file->private_data;
6250 if (flags & PERF_FLAG_FD_OUTPUT)
6251 output_event = group_leader;
6252 if (flags & PERF_FLAG_FD_NO_GROUP)
6253 group_leader = NULL;
6254 }
6255
6256 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6257 task = find_lively_task_by_vpid(pid);
6258 if (IS_ERR(task)) {
6259 err = PTR_ERR(task);
6260 goto err_group_fd;
6261 }
6262 }
6263
6264 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6265 NULL, NULL);
6266 if (IS_ERR(event)) {
6267 err = PTR_ERR(event);
6268 goto err_task;
6269 }
6270
6271 if (flags & PERF_FLAG_PID_CGROUP) {
6272 err = perf_cgroup_connect(pid, event, &attr, group_leader);
6273 if (err)
6274 goto err_alloc;
6275 /*
6276 * one more event:
6277 * - that has cgroup constraint on event->cpu
6278 * - that may need work on context switch
6279 */
6280 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6281 static_key_slow_inc(&perf_sched_events.key);
6282 }
6283
6284 /*
6285 * Special case software events and allow them to be part of
6286 * any hardware group.
6287 */
6288 pmu = event->pmu;
6289
6290 if (group_leader &&
6291 (is_software_event(event) != is_software_event(group_leader))) {
6292 if (is_software_event(event)) {
6293 /*
6294 * If event and group_leader are not both a software
6295 * event, and event is, then group leader is not.
6296 *
6297 * Allow the addition of software events to !software
6298 * groups, this is safe because software events never
6299 * fail to schedule.
6300 */
6301 pmu = group_leader->pmu;
6302 } else if (is_software_event(group_leader) &&
6303 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6304 /*
6305 * In case the group is a pure software group, and we
6306 * try to add a hardware event, move the whole group to
6307 * the hardware context.
6308 */
6309 move_group = 1;
6310 }
6311 }
6312
6313 /*
6314 * Get the target context (task or percpu):
6315 */
6316 ctx = find_get_context(pmu, task, cpu);
6317 if (IS_ERR(ctx)) {
6318 err = PTR_ERR(ctx);
6319 goto err_alloc;
6320 }
6321
6322 if (task) {
6323 put_task_struct(task);
6324 task = NULL;
6325 }
6326
6327 /*
6328 * Look up the group leader (we will attach this event to it):
6329 */
6330 if (group_leader) {
6331 err = -EINVAL;
6332
6333 /*
6334 * Do not allow a recursive hierarchy (this new sibling
6335 * becoming part of another group-sibling):
6336 */
6337 if (group_leader->group_leader != group_leader)
6338 goto err_context;
6339 /*
6340 * Do not allow to attach to a group in a different
6341 * task or CPU context:
6342 */
6343 if (move_group) {
6344 if (group_leader->ctx->type != ctx->type)
6345 goto err_context;
6346 } else {
6347 if (group_leader->ctx != ctx)
6348 goto err_context;
6349 }
6350
6351 /*
6352 * Only a group leader can be exclusive or pinned
6353 */
6354 if (attr.exclusive || attr.pinned)
6355 goto err_context;
6356 }
6357
6358 if (output_event) {
6359 err = perf_event_set_output(event, output_event);
6360 if (err)
6361 goto err_context;
6362 }
6363
6364 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6365 if (IS_ERR(event_file)) {
6366 err = PTR_ERR(event_file);
6367 goto err_context;
6368 }
6369
6370 if (move_group) {
6371 struct perf_event_context *gctx = group_leader->ctx;
6372
6373 mutex_lock(&gctx->mutex);
6374 perf_remove_from_context(group_leader);
6375 list_for_each_entry(sibling, &group_leader->sibling_list,
6376 group_entry) {
6377 perf_remove_from_context(sibling);
6378 put_ctx(gctx);
6379 }
6380 mutex_unlock(&gctx->mutex);
6381 put_ctx(gctx);
6382 }
6383
6384 WARN_ON_ONCE(ctx->parent_ctx);
6385 mutex_lock(&ctx->mutex);
6386
6387 if (move_group) {
6388 perf_install_in_context(ctx, group_leader, cpu);
6389 get_ctx(ctx);
6390 list_for_each_entry(sibling, &group_leader->sibling_list,
6391 group_entry) {
6392 perf_install_in_context(ctx, sibling, cpu);
6393 get_ctx(ctx);
6394 }
6395 }
6396
6397 perf_install_in_context(ctx, event, cpu);
6398 ++ctx->generation;
6399 perf_unpin_context(ctx);
6400 mutex_unlock(&ctx->mutex);
6401
6402 event->owner = current;
6403
6404 mutex_lock(¤t->perf_event_mutex);
6405 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
6406 mutex_unlock(¤t->perf_event_mutex);
6407
6408 /*
6409 * Precalculate sample_data sizes
6410 */
6411 perf_event__header_size(event);
6412 perf_event__id_header_size(event);
6413
6414 /*
6415 * Drop the reference on the group_event after placing the
6416 * new event on the sibling_list. This ensures destruction
6417 * of the group leader will find the pointer to itself in
6418 * perf_group_detach().
6419 */
6420 fput_light(group_file, fput_needed);
6421 fd_install(event_fd, event_file);
6422 return event_fd;
6423
6424err_context:
6425 perf_unpin_context(ctx);
6426 put_ctx(ctx);
6427err_alloc:
6428 free_event(event);
6429err_task:
6430 if (task)
6431 put_task_struct(task);
6432err_group_fd:
6433 fput_light(group_file, fput_needed);
6434err_fd:
6435 put_unused_fd(event_fd);
6436 return err;
6437}
6438
6439/**
6440 * perf_event_create_kernel_counter
6441 *
6442 * @attr: attributes of the counter to create
6443 * @cpu: cpu in which the counter is bound
6444 * @task: task to profile (NULL for percpu)
6445 */
6446struct perf_event *
6447perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
6448 struct task_struct *task,
6449 perf_overflow_handler_t overflow_handler,
6450 void *context)
6451{
6452 struct perf_event_context *ctx;
6453 struct perf_event *event;
6454 int err;
6455
6456 /*
6457 * Get the target context (task or percpu):
6458 */
6459
6460 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
6461 overflow_handler, context);
6462 if (IS_ERR(event)) {
6463 err = PTR_ERR(event);
6464 goto err;
6465 }
6466
6467 ctx = find_get_context(event->pmu, task, cpu);
6468 if (IS_ERR(ctx)) {
6469 err = PTR_ERR(ctx);
6470 goto err_free;
6471 }
6472
6473 WARN_ON_ONCE(ctx->parent_ctx);
6474 mutex_lock(&ctx->mutex);
6475 perf_install_in_context(ctx, event, cpu);
6476 ++ctx->generation;
6477 perf_unpin_context(ctx);
6478 mutex_unlock(&ctx->mutex);
6479
6480 return event;
6481
6482err_free:
6483 free_event(event);
6484err:
6485 return ERR_PTR(err);
6486}
6487EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6488
6489static void sync_child_event(struct perf_event *child_event,
6490 struct task_struct *child)
6491{
6492 struct perf_event *parent_event = child_event->parent;
6493 u64 child_val;
6494
6495 if (child_event->attr.inherit_stat)
6496 perf_event_read_event(child_event, child);
6497
6498 child_val = perf_event_count(child_event);
6499
6500 /*
6501 * Add back the child's count to the parent's count:
6502 */
6503 atomic64_add(child_val, &parent_event->child_count);
6504 atomic64_add(child_event->total_time_enabled,
6505 &parent_event->child_total_time_enabled);
6506 atomic64_add(child_event->total_time_running,
6507 &parent_event->child_total_time_running);
6508
6509 /*
6510 * Remove this event from the parent's list
6511 */
6512 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6513 mutex_lock(&parent_event->child_mutex);
6514 list_del_init(&child_event->child_list);
6515 mutex_unlock(&parent_event->child_mutex);
6516
6517 /*
6518 * Release the parent event, if this was the last
6519 * reference to it.
6520 */
6521 put_event(parent_event);
6522}
6523
6524static void
6525__perf_event_exit_task(struct perf_event *child_event,
6526 struct perf_event_context *child_ctx,
6527 struct task_struct *child)
6528{
6529 if (child_event->parent) {
6530 raw_spin_lock_irq(&child_ctx->lock);
6531 perf_group_detach(child_event);
6532 raw_spin_unlock_irq(&child_ctx->lock);
6533 }
6534
6535 perf_remove_from_context(child_event);
6536
6537 /*
6538 * It can happen that the parent exits first, and has events
6539 * that are still around due to the child reference. These
6540 * events need to be zapped.
6541 */
6542 if (child_event->parent) {
6543 sync_child_event(child_event, child);
6544 free_event(child_event);
6545 }
6546}
6547
6548static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6549{
6550 struct perf_event *child_event, *tmp;
6551 struct perf_event_context *child_ctx;
6552 unsigned long flags;
6553
6554 if (likely(!child->perf_event_ctxp[ctxn])) {
6555 perf_event_task(child, NULL, 0);
6556 return;
6557 }
6558
6559 local_irq_save(flags);
6560 /*
6561 * We can't reschedule here because interrupts are disabled,
6562 * and either child is current or it is a task that can't be
6563 * scheduled, so we are now safe from rescheduling changing
6564 * our context.
6565 */
6566 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6567
6568 /*
6569 * Take the context lock here so that if find_get_context is
6570 * reading child->perf_event_ctxp, we wait until it has
6571 * incremented the context's refcount before we do put_ctx below.
6572 */
6573 raw_spin_lock(&child_ctx->lock);
6574 task_ctx_sched_out(child_ctx);
6575 child->perf_event_ctxp[ctxn] = NULL;
6576 /*
6577 * If this context is a clone; unclone it so it can't get
6578 * swapped to another process while we're removing all
6579 * the events from it.
6580 */
6581 unclone_ctx(child_ctx);
6582 update_context_time(child_ctx);
6583 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6584
6585 /*
6586 * Report the task dead after unscheduling the events so that we
6587 * won't get any samples after PERF_RECORD_EXIT. We can however still
6588 * get a few PERF_RECORD_READ events.
6589 */
6590 perf_event_task(child, child_ctx, 0);
6591
6592 /*
6593 * We can recurse on the same lock type through:
6594 *
6595 * __perf_event_exit_task()
6596 * sync_child_event()
6597 * put_event()
6598 * mutex_lock(&ctx->mutex)
6599 *
6600 * But since its the parent context it won't be the same instance.
6601 */
6602 mutex_lock(&child_ctx->mutex);
6603
6604again:
6605 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6606 group_entry)
6607 __perf_event_exit_task(child_event, child_ctx, child);
6608
6609 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6610 group_entry)
6611 __perf_event_exit_task(child_event, child_ctx, child);
6612
6613 /*
6614 * If the last event was a group event, it will have appended all
6615 * its siblings to the list, but we obtained 'tmp' before that which
6616 * will still point to the list head terminating the iteration.
6617 */
6618 if (!list_empty(&child_ctx->pinned_groups) ||
6619 !list_empty(&child_ctx->flexible_groups))
6620 goto again;
6621
6622 mutex_unlock(&child_ctx->mutex);
6623
6624 put_ctx(child_ctx);
6625}
6626
6627/*
6628 * When a child task exits, feed back event values to parent events.
6629 */
6630void perf_event_exit_task(struct task_struct *child)
6631{
6632 struct perf_event *event, *tmp;
6633 int ctxn;
6634
6635 mutex_lock(&child->perf_event_mutex);
6636 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6637 owner_entry) {
6638 list_del_init(&event->owner_entry);
6639
6640 /*
6641 * Ensure the list deletion is visible before we clear
6642 * the owner, closes a race against perf_release() where
6643 * we need to serialize on the owner->perf_event_mutex.
6644 */
6645 smp_wmb();
6646 event->owner = NULL;
6647 }
6648 mutex_unlock(&child->perf_event_mutex);
6649
6650 for_each_task_context_nr(ctxn)
6651 perf_event_exit_task_context(child, ctxn);
6652}
6653
6654static void perf_free_event(struct perf_event *event,
6655 struct perf_event_context *ctx)
6656{
6657 struct perf_event *parent = event->parent;
6658
6659 if (WARN_ON_ONCE(!parent))
6660 return;
6661
6662 mutex_lock(&parent->child_mutex);
6663 list_del_init(&event->child_list);
6664 mutex_unlock(&parent->child_mutex);
6665
6666 put_event(parent);
6667
6668 perf_group_detach(event);
6669 list_del_event(event, ctx);
6670 free_event(event);
6671}
6672
6673/*
6674 * free an unexposed, unused context as created by inheritance by
6675 * perf_event_init_task below, used by fork() in case of fail.
6676 */
6677void perf_event_free_task(struct task_struct *task)
6678{
6679 struct perf_event_context *ctx;
6680 struct perf_event *event, *tmp;
6681 int ctxn;
6682
6683 for_each_task_context_nr(ctxn) {
6684 ctx = task->perf_event_ctxp[ctxn];
6685 if (!ctx)
6686 continue;
6687
6688 mutex_lock(&ctx->mutex);
6689again:
6690 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6691 group_entry)
6692 perf_free_event(event, ctx);
6693
6694 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6695 group_entry)
6696 perf_free_event(event, ctx);
6697
6698 if (!list_empty(&ctx->pinned_groups) ||
6699 !list_empty(&ctx->flexible_groups))
6700 goto again;
6701
6702 mutex_unlock(&ctx->mutex);
6703
6704 put_ctx(ctx);
6705 }
6706}
6707
6708void perf_event_delayed_put(struct task_struct *task)
6709{
6710 int ctxn;
6711
6712 for_each_task_context_nr(ctxn)
6713 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6714}
6715
6716/*
6717 * inherit a event from parent task to child task:
6718 */
6719static struct perf_event *
6720inherit_event(struct perf_event *parent_event,
6721 struct task_struct *parent,
6722 struct perf_event_context *parent_ctx,
6723 struct task_struct *child,
6724 struct perf_event *group_leader,
6725 struct perf_event_context *child_ctx)
6726{
6727 struct perf_event *child_event;
6728 unsigned long flags;
6729
6730 /*
6731 * Instead of creating recursive hierarchies of events,
6732 * we link inherited events back to the original parent,
6733 * which has a filp for sure, which we use as the reference
6734 * count:
6735 */
6736 if (parent_event->parent)
6737 parent_event = parent_event->parent;
6738
6739 child_event = perf_event_alloc(&parent_event->attr,
6740 parent_event->cpu,
6741 child,
6742 group_leader, parent_event,
6743 NULL, NULL);
6744 if (IS_ERR(child_event))
6745 return child_event;
6746
6747 if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
6748 free_event(child_event);
6749 return NULL;
6750 }
6751
6752 get_ctx(child_ctx);
6753
6754 /*
6755 * Make the child state follow the state of the parent event,
6756 * not its attr.disabled bit. We hold the parent's mutex,
6757 * so we won't race with perf_event_{en, dis}able_family.
6758 */
6759 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6760 child_event->state = PERF_EVENT_STATE_INACTIVE;
6761 else
6762 child_event->state = PERF_EVENT_STATE_OFF;
6763
6764 if (parent_event->attr.freq) {
6765 u64 sample_period = parent_event->hw.sample_period;
6766 struct hw_perf_event *hwc = &child_event->hw;
6767
6768 hwc->sample_period = sample_period;
6769 hwc->last_period = sample_period;
6770
6771 local64_set(&hwc->period_left, sample_period);
6772 }
6773
6774 child_event->ctx = child_ctx;
6775 child_event->overflow_handler = parent_event->overflow_handler;
6776 child_event->overflow_handler_context
6777 = parent_event->overflow_handler_context;
6778
6779 /*
6780 * Precalculate sample_data sizes
6781 */
6782 perf_event__header_size(child_event);
6783 perf_event__id_header_size(child_event);
6784
6785 /*
6786 * Link it up in the child's context:
6787 */
6788 raw_spin_lock_irqsave(&child_ctx->lock, flags);
6789 add_event_to_ctx(child_event, child_ctx);
6790 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6791
6792 /*
6793 * Link this into the parent event's child list
6794 */
6795 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6796 mutex_lock(&parent_event->child_mutex);
6797 list_add_tail(&child_event->child_list, &parent_event->child_list);
6798 mutex_unlock(&parent_event->child_mutex);
6799
6800 return child_event;
6801}
6802
6803static int inherit_group(struct perf_event *parent_event,
6804 struct task_struct *parent,
6805 struct perf_event_context *parent_ctx,
6806 struct task_struct *child,
6807 struct perf_event_context *child_ctx)
6808{
6809 struct perf_event *leader;
6810 struct perf_event *sub;
6811 struct perf_event *child_ctr;
6812
6813 leader = inherit_event(parent_event, parent, parent_ctx,
6814 child, NULL, child_ctx);
6815 if (IS_ERR(leader))
6816 return PTR_ERR(leader);
6817 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6818 child_ctr = inherit_event(sub, parent, parent_ctx,
6819 child, leader, child_ctx);
6820 if (IS_ERR(child_ctr))
6821 return PTR_ERR(child_ctr);
6822 }
6823 return 0;
6824}
6825
6826static int
6827inherit_task_group(struct perf_event *event, struct task_struct *parent,
6828 struct perf_event_context *parent_ctx,
6829 struct task_struct *child, int ctxn,
6830 int *inherited_all)
6831{
6832 int ret;
6833 struct perf_event_context *child_ctx;
6834
6835 if (!event->attr.inherit) {
6836 *inherited_all = 0;
6837 return 0;
6838 }
6839
6840 child_ctx = child->perf_event_ctxp[ctxn];
6841 if (!child_ctx) {
6842 /*
6843 * This is executed from the parent task context, so
6844 * inherit events that have been marked for cloning.
6845 * First allocate and initialize a context for the
6846 * child.
6847 */
6848
6849 child_ctx = alloc_perf_context(event->pmu, child);
6850 if (!child_ctx)
6851 return -ENOMEM;
6852
6853 child->perf_event_ctxp[ctxn] = child_ctx;
6854 }
6855
6856 ret = inherit_group(event, parent, parent_ctx,
6857 child, child_ctx);
6858
6859 if (ret)
6860 *inherited_all = 0;
6861
6862 return ret;
6863}
6864
6865/*
6866 * Initialize the perf_event context in task_struct
6867 */
6868int perf_event_init_context(struct task_struct *child, int ctxn)
6869{
6870 struct perf_event_context *child_ctx, *parent_ctx;
6871 struct perf_event_context *cloned_ctx;
6872 struct perf_event *event;
6873 struct task_struct *parent = current;
6874 int inherited_all = 1;
6875 unsigned long flags;
6876 int ret = 0;
6877
6878 if (likely(!parent->perf_event_ctxp[ctxn]))
6879 return 0;
6880
6881 /*
6882 * If the parent's context is a clone, pin it so it won't get
6883 * swapped under us.
6884 */
6885 parent_ctx = perf_pin_task_context(parent, ctxn);
6886
6887 /*
6888 * No need to check if parent_ctx != NULL here; since we saw
6889 * it non-NULL earlier, the only reason for it to become NULL
6890 * is if we exit, and since we're currently in the middle of
6891 * a fork we can't be exiting at the same time.
6892 */
6893
6894 /*
6895 * Lock the parent list. No need to lock the child - not PID
6896 * hashed yet and not running, so nobody can access it.
6897 */
6898 mutex_lock(&parent_ctx->mutex);
6899
6900 /*
6901 * We dont have to disable NMIs - we are only looking at
6902 * the list, not manipulating it:
6903 */
6904 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
6905 ret = inherit_task_group(event, parent, parent_ctx,
6906 child, ctxn, &inherited_all);
6907 if (ret)
6908 break;
6909 }
6910
6911 /*
6912 * We can't hold ctx->lock when iterating the ->flexible_group list due
6913 * to allocations, but we need to prevent rotation because
6914 * rotate_ctx() will change the list from interrupt context.
6915 */
6916 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6917 parent_ctx->rotate_disable = 1;
6918 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6919
6920 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6921 ret = inherit_task_group(event, parent, parent_ctx,
6922 child, ctxn, &inherited_all);
6923 if (ret)
6924 break;
6925 }
6926
6927 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6928 parent_ctx->rotate_disable = 0;
6929
6930 child_ctx = child->perf_event_ctxp[ctxn];
6931
6932 if (child_ctx && inherited_all) {
6933 /*
6934 * Mark the child context as a clone of the parent
6935 * context, or of whatever the parent is a clone of.
6936 *
6937 * Note that if the parent is a clone, the holding of
6938 * parent_ctx->lock avoids it from being uncloned.
6939 */
6940 cloned_ctx = parent_ctx->parent_ctx;
6941 if (cloned_ctx) {
6942 child_ctx->parent_ctx = cloned_ctx;
6943 child_ctx->parent_gen = parent_ctx->parent_gen;
6944 } else {
6945 child_ctx->parent_ctx = parent_ctx;
6946 child_ctx->parent_gen = parent_ctx->generation;
6947 }
6948 get_ctx(child_ctx->parent_ctx);
6949 }
6950
6951 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6952 mutex_unlock(&parent_ctx->mutex);
6953
6954 perf_unpin_context(parent_ctx);
6955 put_ctx(parent_ctx);
6956
6957 return ret;
6958}
6959
6960/*
6961 * Initialize the perf_event context in task_struct
6962 */
6963int perf_event_init_task(struct task_struct *child)
6964{
6965 int ctxn, ret;
6966
6967 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
6968 mutex_init(&child->perf_event_mutex);
6969 INIT_LIST_HEAD(&child->perf_event_list);
6970
6971 for_each_task_context_nr(ctxn) {
6972 ret = perf_event_init_context(child, ctxn);
6973 if (ret)
6974 return ret;
6975 }
6976
6977 return 0;
6978}
6979
6980static void __init perf_event_init_all_cpus(void)
6981{
6982 struct swevent_htable *swhash;
6983 int cpu;
6984
6985 for_each_possible_cpu(cpu) {
6986 swhash = &per_cpu(swevent_htable, cpu);
6987 mutex_init(&swhash->hlist_mutex);
6988 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6989 }
6990}
6991
6992static void __cpuinit perf_event_init_cpu(int cpu)
6993{
6994 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6995
6996 mutex_lock(&swhash->hlist_mutex);
6997 if (swhash->hlist_refcount > 0) {
6998 struct swevent_hlist *hlist;
6999
7000 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7001 WARN_ON(!hlist);
7002 rcu_assign_pointer(swhash->swevent_hlist, hlist);
7003 }
7004 mutex_unlock(&swhash->hlist_mutex);
7005}
7006
7007#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
7008static void perf_pmu_rotate_stop(struct pmu *pmu)
7009{
7010 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7011
7012 WARN_ON(!irqs_disabled());
7013
7014 list_del_init(&cpuctx->rotation_list);
7015}
7016
7017static void __perf_event_exit_context(void *__info)
7018{
7019 struct perf_event_context *ctx = __info;
7020 struct perf_event *event, *tmp;
7021
7022 perf_pmu_rotate_stop(ctx->pmu);
7023
7024 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
7025 __perf_remove_from_context(event);
7026 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
7027 __perf_remove_from_context(event);
7028}
7029
7030static void perf_event_exit_cpu_context(int cpu)
7031{
7032 struct perf_event_context *ctx;
7033 struct pmu *pmu;
7034 int idx;
7035
7036 idx = srcu_read_lock(&pmus_srcu);
7037 list_for_each_entry_rcu(pmu, &pmus, entry) {
7038 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
7039
7040 mutex_lock(&ctx->mutex);
7041 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7042 mutex_unlock(&ctx->mutex);
7043 }
7044 srcu_read_unlock(&pmus_srcu, idx);
7045}
7046
7047static void perf_event_exit_cpu(int cpu)
7048{
7049 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7050
7051 mutex_lock(&swhash->hlist_mutex);
7052 swevent_hlist_release(swhash);
7053 mutex_unlock(&swhash->hlist_mutex);
7054
7055 perf_event_exit_cpu_context(cpu);
7056}
7057#else
7058static inline void perf_event_exit_cpu(int cpu) { }
7059#endif
7060
7061static int
7062perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7063{
7064 int cpu;
7065
7066 for_each_online_cpu(cpu)
7067 perf_event_exit_cpu(cpu);
7068
7069 return NOTIFY_OK;
7070}
7071
7072/*
7073 * Run the perf reboot notifier at the very last possible moment so that
7074 * the generic watchdog code runs as long as possible.
7075 */
7076static struct notifier_block perf_reboot_notifier = {
7077 .notifier_call = perf_reboot,
7078 .priority = INT_MIN,
7079};
7080
7081static int __cpuinit
7082perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7083{
7084 unsigned int cpu = (long)hcpu;
7085
7086 switch (action & ~CPU_TASKS_FROZEN) {
7087
7088 case CPU_UP_PREPARE:
7089 case CPU_DOWN_FAILED:
7090 perf_event_init_cpu(cpu);
7091 break;
7092
7093 case CPU_UP_CANCELED:
7094 case CPU_DOWN_PREPARE:
7095 perf_event_exit_cpu(cpu);
7096 break;
7097
7098 default:
7099 break;
7100 }
7101
7102 return NOTIFY_OK;
7103}
7104
7105void __init perf_event_init(void)
7106{
7107 int ret;
7108
7109 idr_init(&pmu_idr);
7110
7111 perf_event_init_all_cpus();
7112 init_srcu_struct(&pmus_srcu);
7113 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7114 perf_pmu_register(&perf_cpu_clock, NULL, -1);
7115 perf_pmu_register(&perf_task_clock, NULL, -1);
7116 perf_tp_register();
7117 perf_cpu_notifier(perf_cpu_notify);
7118 register_reboot_notifier(&perf_reboot_notifier);
7119
7120 ret = init_hw_breakpoint();
7121 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
7122
7123 /* do not patch jump label more than once per second */
7124 jump_label_rate_limit(&perf_sched_events, HZ);
7125
7126 /*
7127 * Build time assertion that we keep the data_head at the intended
7128 * location. IOW, validation we got the __reserved[] size right.
7129 */
7130 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7131 != 1024);
7132}
7133
7134static int __init perf_event_sysfs_init(void)
7135{
7136 struct pmu *pmu;
7137 int ret;
7138
7139 mutex_lock(&pmus_lock);
7140
7141 ret = bus_register(&pmu_bus);
7142 if (ret)
7143 goto unlock;
7144
7145 list_for_each_entry(pmu, &pmus, entry) {
7146 if (!pmu->name || pmu->type < 0)
7147 continue;
7148
7149 ret = pmu_dev_alloc(pmu);
7150 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7151 }
7152 pmu_bus_running = 1;
7153 ret = 0;
7154
7155unlock:
7156 mutex_unlock(&pmus_lock);
7157
7158 return ret;
7159}
7160device_initcall(perf_event_sysfs_init);
7161
7162#ifdef CONFIG_CGROUP_PERF
7163static struct cgroup_subsys_state *perf_cgroup_create(struct cgroup *cont)
7164{
7165 struct perf_cgroup *jc;
7166
7167 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
7168 if (!jc)
7169 return ERR_PTR(-ENOMEM);
7170
7171 jc->info = alloc_percpu(struct perf_cgroup_info);
7172 if (!jc->info) {
7173 kfree(jc);
7174 return ERR_PTR(-ENOMEM);
7175 }
7176
7177 return &jc->css;
7178}
7179
7180static void perf_cgroup_destroy(struct cgroup *cont)
7181{
7182 struct perf_cgroup *jc;
7183 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
7184 struct perf_cgroup, css);
7185 free_percpu(jc->info);
7186 kfree(jc);
7187}
7188
7189static int __perf_cgroup_move(void *info)
7190{
7191 struct task_struct *task = info;
7192 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7193 return 0;
7194}
7195
7196static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
7197{
7198 struct task_struct *task;
7199
7200 cgroup_taskset_for_each(task, cgrp, tset)
7201 task_function_call(task, __perf_cgroup_move, task);
7202}
7203
7204static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7205 struct task_struct *task)
7206{
7207 /*
7208 * cgroup_exit() is called in the copy_process() failure path.
7209 * Ignore this case since the task hasn't ran yet, this avoids
7210 * trying to poke a half freed task state from generic code.
7211 */
7212 if (!(task->flags & PF_EXITING))
7213 return;
7214
7215 task_function_call(task, __perf_cgroup_move, task);
7216}
7217
7218struct cgroup_subsys perf_subsys = {
7219 .name = "perf_event",
7220 .subsys_id = perf_subsys_id,
7221 .create = perf_cgroup_create,
7222 .destroy = perf_cgroup_destroy,
7223 .exit = perf_cgroup_exit,
7224 .attach = perf_cgroup_attach,
7225};
7226#endif /* CONFIG_CGROUP_PERF */