Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2#undef DEBUG
  3
  4/*
  5 * ARM performance counter support.
  6 *
  7 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  8 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  9 *
 10 * This code is based on the sparc64 perf event code, which is in turn based
 11 * on the x86 code.
 12 */
 13#define pr_fmt(fmt) "hw perfevents: " fmt
 14
 15#include <linux/bitmap.h>
 16#include <linux/cpumask.h>
 17#include <linux/cpu_pm.h>
 18#include <linux/export.h>
 19#include <linux/kernel.h>
 
 20#include <linux/perf/arm_pmu.h>
 
 21#include <linux/slab.h>
 22#include <linux/sched/clock.h>
 23#include <linux/spinlock.h>
 24#include <linux/irq.h>
 25#include <linux/irqdesc.h>
 26
 
 27#include <asm/irq_regs.h>
 28
 29static int armpmu_count_irq_users(const int irq);
 30
 31struct pmu_irq_ops {
 32	void (*enable_pmuirq)(unsigned int irq);
 33	void (*disable_pmuirq)(unsigned int irq);
 34	void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid);
 35};
 36
 37static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid)
 38{
 39	free_irq(irq, per_cpu_ptr(devid, cpu));
 40}
 41
 42static const struct pmu_irq_ops pmuirq_ops = {
 43	.enable_pmuirq = enable_irq,
 44	.disable_pmuirq = disable_irq_nosync,
 45	.free_pmuirq = armpmu_free_pmuirq
 46};
 47
 48static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid)
 49{
 50	free_nmi(irq, per_cpu_ptr(devid, cpu));
 51}
 52
 53static const struct pmu_irq_ops pmunmi_ops = {
 54	.enable_pmuirq = enable_nmi,
 55	.disable_pmuirq = disable_nmi_nosync,
 56	.free_pmuirq = armpmu_free_pmunmi
 57};
 58
 59static void armpmu_enable_percpu_pmuirq(unsigned int irq)
 60{
 61	enable_percpu_irq(irq, IRQ_TYPE_NONE);
 62}
 63
 64static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
 65				   void __percpu *devid)
 66{
 67	if (armpmu_count_irq_users(irq) == 1)
 68		free_percpu_irq(irq, devid);
 69}
 70
 71static const struct pmu_irq_ops percpu_pmuirq_ops = {
 72	.enable_pmuirq = armpmu_enable_percpu_pmuirq,
 73	.disable_pmuirq = disable_percpu_irq,
 74	.free_pmuirq = armpmu_free_percpu_pmuirq
 75};
 76
 77static void armpmu_enable_percpu_pmunmi(unsigned int irq)
 78{
 79	if (!prepare_percpu_nmi(irq))
 80		enable_percpu_nmi(irq, IRQ_TYPE_NONE);
 81}
 82
 83static void armpmu_disable_percpu_pmunmi(unsigned int irq)
 84{
 85	disable_percpu_nmi(irq);
 86	teardown_percpu_nmi(irq);
 87}
 88
 89static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
 90				      void __percpu *devid)
 91{
 92	if (armpmu_count_irq_users(irq) == 1)
 93		free_percpu_nmi(irq, devid);
 94}
 95
 96static const struct pmu_irq_ops percpu_pmunmi_ops = {
 97	.enable_pmuirq = armpmu_enable_percpu_pmunmi,
 98	.disable_pmuirq = armpmu_disable_percpu_pmunmi,
 99	.free_pmuirq = armpmu_free_percpu_pmunmi
100};
101
102static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
103static DEFINE_PER_CPU(int, cpu_irq);
104static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
105
106static bool has_nmi;
107
108static inline u64 arm_pmu_event_max_period(struct perf_event *event)
109{
110	if (event->hw.flags & ARMPMU_EVT_64BIT)
111		return GENMASK_ULL(63, 0);
112	else if (event->hw.flags & ARMPMU_EVT_63BIT)
113		return GENMASK_ULL(62, 0);
114	else if (event->hw.flags & ARMPMU_EVT_47BIT)
115		return GENMASK_ULL(46, 0);
116	else
117		return GENMASK_ULL(31, 0);
118}
119
120static int
121armpmu_map_cache_event(const unsigned (*cache_map)
122				      [PERF_COUNT_HW_CACHE_MAX]
123				      [PERF_COUNT_HW_CACHE_OP_MAX]
124				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
125		       u64 config)
126{
127	unsigned int cache_type, cache_op, cache_result, ret;
128
129	cache_type = (config >>  0) & 0xff;
130	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
131		return -EINVAL;
132
133	cache_op = (config >>  8) & 0xff;
134	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
135		return -EINVAL;
136
137	cache_result = (config >> 16) & 0xff;
138	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
139		return -EINVAL;
140
141	if (!cache_map)
142		return -ENOENT;
143
144	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
145
146	if (ret == CACHE_OP_UNSUPPORTED)
147		return -ENOENT;
148
149	return ret;
150}
151
152static int
153armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
154{
155	int mapping;
156
157	if (config >= PERF_COUNT_HW_MAX)
158		return -EINVAL;
159
160	if (!event_map)
161		return -ENOENT;
162
163	mapping = (*event_map)[config];
164	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
165}
166
167static int
168armpmu_map_raw_event(u32 raw_event_mask, u64 config)
169{
170	return (int)(config & raw_event_mask);
171}
172
173int
174armpmu_map_event(struct perf_event *event,
175		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
176		 const unsigned (*cache_map)
177				[PERF_COUNT_HW_CACHE_MAX]
178				[PERF_COUNT_HW_CACHE_OP_MAX]
179				[PERF_COUNT_HW_CACHE_RESULT_MAX],
180		 u32 raw_event_mask)
181{
182	u64 config = event->attr.config;
183	int type = event->attr.type;
184
185	if (type == event->pmu->type)
186		return armpmu_map_raw_event(raw_event_mask, config);
187
188	switch (type) {
189	case PERF_TYPE_HARDWARE:
190		return armpmu_map_hw_event(event_map, config);
191	case PERF_TYPE_HW_CACHE:
192		return armpmu_map_cache_event(cache_map, config);
193	case PERF_TYPE_RAW:
194		return armpmu_map_raw_event(raw_event_mask, config);
195	}
196
197	return -ENOENT;
198}
199
200int armpmu_event_set_period(struct perf_event *event)
201{
202	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
203	struct hw_perf_event *hwc = &event->hw;
204	s64 left = local64_read(&hwc->period_left);
205	s64 period = hwc->sample_period;
206	u64 max_period;
207	int ret = 0;
208
209	max_period = arm_pmu_event_max_period(event);
210	if (unlikely(left <= -period)) {
211		left = period;
212		local64_set(&hwc->period_left, left);
213		hwc->last_period = period;
214		ret = 1;
215	}
216
217	if (unlikely(left <= 0)) {
218		left += period;
219		local64_set(&hwc->period_left, left);
220		hwc->last_period = period;
221		ret = 1;
222	}
223
224	/*
225	 * Limit the maximum period to prevent the counter value
226	 * from overtaking the one we are about to program. In
227	 * effect we are reducing max_period to account for
228	 * interrupt latency (and we are being very conservative).
229	 */
230	if (left > (max_period >> 1))
231		left = (max_period >> 1);
232
233	local64_set(&hwc->prev_count, (u64)-left);
234
235	armpmu->write_counter(event, (u64)(-left) & max_period);
236
237	perf_event_update_userpage(event);
238
239	return ret;
240}
241
242u64 armpmu_event_update(struct perf_event *event)
243{
244	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
245	struct hw_perf_event *hwc = &event->hw;
246	u64 delta, prev_raw_count, new_raw_count;
247	u64 max_period = arm_pmu_event_max_period(event);
248
249again:
250	prev_raw_count = local64_read(&hwc->prev_count);
251	new_raw_count = armpmu->read_counter(event);
252
253	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
254			     new_raw_count) != prev_raw_count)
255		goto again;
256
257	delta = (new_raw_count - prev_raw_count) & max_period;
258
259	local64_add(delta, &event->count);
260	local64_sub(delta, &hwc->period_left);
261
262	return new_raw_count;
263}
264
265static void
266armpmu_read(struct perf_event *event)
267{
268	armpmu_event_update(event);
269}
270
271static void
272armpmu_stop(struct perf_event *event, int flags)
273{
274	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
275	struct hw_perf_event *hwc = &event->hw;
276
277	/*
278	 * ARM pmu always has to update the counter, so ignore
279	 * PERF_EF_UPDATE, see comments in armpmu_start().
280	 */
281	if (!(hwc->state & PERF_HES_STOPPED)) {
282		armpmu->disable(event);
283		armpmu_event_update(event);
284		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
285	}
286}
287
288static void armpmu_start(struct perf_event *event, int flags)
289{
290	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
291	struct hw_perf_event *hwc = &event->hw;
292
293	/*
294	 * ARM pmu always has to reprogram the period, so ignore
295	 * PERF_EF_RELOAD, see the comment below.
296	 */
297	if (flags & PERF_EF_RELOAD)
298		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
299
300	hwc->state = 0;
301	/*
302	 * Set the period again. Some counters can't be stopped, so when we
303	 * were stopped we simply disabled the IRQ source and the counter
304	 * may have been left counting. If we don't do this step then we may
305	 * get an interrupt too soon or *way* too late if the overflow has
306	 * happened since disabling.
307	 */
308	armpmu_event_set_period(event);
309	armpmu->enable(event);
310}
311
312static void
313armpmu_del(struct perf_event *event, int flags)
314{
315	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
316	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
317	struct hw_perf_event *hwc = &event->hw;
318	int idx = hwc->idx;
319
320	armpmu_stop(event, PERF_EF_UPDATE);
321	hw_events->events[idx] = NULL;
322	armpmu->clear_event_idx(hw_events, event);
 
 
 
323	perf_event_update_userpage(event);
324	/* Clear the allocated counter */
325	hwc->idx = -1;
326}
327
328static int
329armpmu_add(struct perf_event *event, int flags)
330{
331	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
332	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
333	struct hw_perf_event *hwc = &event->hw;
334	int idx;
 
335
336	/* An event following a process won't be stopped earlier */
337	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
338		return -ENOENT;
339
 
 
340	/* If we don't have a space for the counter then finish early. */
341	idx = armpmu->get_event_idx(hw_events, event);
342	if (idx < 0)
343		return idx;
 
 
344
345	/*
346	 * If there is an event in the counter we are going to use then make
347	 * sure it is disabled.
348	 */
349	event->hw.idx = idx;
350	armpmu->disable(event);
351	hw_events->events[idx] = event;
352
353	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
354	if (flags & PERF_EF_START)
355		armpmu_start(event, PERF_EF_RELOAD);
356
357	/* Propagate our changes to the userspace mapping. */
358	perf_event_update_userpage(event);
359
360	return 0;
 
 
361}
362
363static int
364validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
365			       struct perf_event *event)
366{
367	struct arm_pmu *armpmu;
368
369	if (is_software_event(event))
370		return 1;
371
372	/*
373	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
374	 * core perf code won't check that the pmu->ctx == leader->ctx
375	 * until after pmu->event_init(event).
376	 */
377	if (event->pmu != pmu)
378		return 0;
379
380	if (event->state < PERF_EVENT_STATE_OFF)
381		return 1;
382
383	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
384		return 1;
385
386	armpmu = to_arm_pmu(event->pmu);
387	return armpmu->get_event_idx(hw_events, event) >= 0;
388}
389
390static int
391validate_group(struct perf_event *event)
392{
393	struct perf_event *sibling, *leader = event->group_leader;
394	struct pmu_hw_events fake_pmu;
395
396	/*
397	 * Initialise the fake PMU. We only need to populate the
398	 * used_mask for the purposes of validation.
399	 */
400	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
401
402	if (!validate_event(event->pmu, &fake_pmu, leader))
403		return -EINVAL;
404
405	if (event == leader)
406		return 0;
407
408	for_each_sibling_event(sibling, leader) {
409		if (!validate_event(event->pmu, &fake_pmu, sibling))
410			return -EINVAL;
411	}
412
413	if (!validate_event(event->pmu, &fake_pmu, event))
414		return -EINVAL;
415
416	return 0;
417}
418
419static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
420{
421	struct arm_pmu *armpmu;
 
 
422	int ret;
423	u64 start_clock, finish_clock;
424
425	/*
426	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
427	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
428	 * do any necessary shifting, we just need to perform the first
429	 * dereference.
430	 */
431	armpmu = *(void **)dev;
432	if (WARN_ON_ONCE(!armpmu))
433		return IRQ_NONE;
434
435	start_clock = sched_clock();
436	ret = armpmu->handle_irq(armpmu);
 
 
 
437	finish_clock = sched_clock();
438
439	perf_sample_event_took(finish_clock - start_clock);
440	return ret;
441}
442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443static int
444__hw_perf_event_init(struct perf_event *event)
445{
446	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
447	struct hw_perf_event *hwc = &event->hw;
448	int mapping, ret;
449
450	hwc->flags = 0;
451	mapping = armpmu->map_event(event);
452
453	if (mapping < 0) {
454		pr_debug("event %x:%llx not supported\n", event->attr.type,
455			 event->attr.config);
456		return mapping;
457	}
458
459	/*
460	 * We don't assign an index until we actually place the event onto
461	 * hardware. Use -1 to signify that we haven't decided where to put it
462	 * yet. For SMP systems, each core has it's own PMU so we can't do any
463	 * clever allocation or constraints checking at this point.
464	 */
465	hwc->idx		= -1;
466	hwc->config_base	= 0;
467	hwc->config		= 0;
468	hwc->event_base		= 0;
469
470	/*
471	 * Check whether we need to exclude the counter from certain modes.
472	 */
473	if (armpmu->set_event_filter) {
474		ret = armpmu->set_event_filter(hwc, &event->attr);
475		if (ret)
476			return ret;
 
 
477	}
478
479	/*
480	 * Store the event encoding into the config_base field.
481	 */
482	hwc->config_base	    |= (unsigned long)mapping;
483
484	if (!is_sampling_event(event)) {
485		/*
486		 * For non-sampling runs, limit the sample_period to half
487		 * of the counter width. That way, the new counter value
488		 * is far less likely to overtake the previous one unless
489		 * you have some serious IRQ latency issues.
490		 */
491		hwc->sample_period  = arm_pmu_event_max_period(event) >> 1;
492		hwc->last_period    = hwc->sample_period;
493		local64_set(&hwc->period_left, hwc->sample_period);
494	}
495
496	return validate_group(event);
 
 
 
 
 
497}
498
499static int armpmu_event_init(struct perf_event *event)
500{
501	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 
 
502
503	/*
504	 * Reject CPU-affine events for CPUs that are of a different class to
505	 * that which this PMU handles. Process-following events (where
506	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
507	 * reject them later (in armpmu_add) if they're scheduled on a
508	 * different class of CPU.
509	 */
510	if (event->cpu != -1 &&
511		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
512		return -ENOENT;
513
514	/* does not support taken branch sampling */
515	if (has_branch_stack(event))
516		return -EOPNOTSUPP;
517
518	return __hw_perf_event_init(event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
519}
520
521static void armpmu_enable(struct pmu *pmu)
522{
523	struct arm_pmu *armpmu = to_arm_pmu(pmu);
524	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
525	bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
526
527	/* For task-bound events we may be called on other CPUs */
528	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
529		return;
530
531	if (enabled)
532		armpmu->start(armpmu);
533}
534
535static void armpmu_disable(struct pmu *pmu)
536{
537	struct arm_pmu *armpmu = to_arm_pmu(pmu);
538
539	/* For task-bound events we may be called on other CPUs */
540	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
541		return;
542
543	armpmu->stop(armpmu);
544}
545
546/*
547 * In heterogeneous systems, events are specific to a particular
548 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
549 * the same microarchitecture.
550 */
551static bool armpmu_filter(struct pmu *pmu, int cpu)
552{
553	struct arm_pmu *armpmu = to_arm_pmu(pmu);
554	return !cpumask_test_cpu(cpu, &armpmu->supported_cpus);
 
555}
556
557static ssize_t cpus_show(struct device *dev,
558			 struct device_attribute *attr, char *buf)
559{
560	struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
561	return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
 
 
 
 
 
 
 
 
 
 
 
 
562}
563
564static DEVICE_ATTR_RO(cpus);
 
565
566static struct attribute *armpmu_common_attrs[] = {
567	&dev_attr_cpus.attr,
568	NULL,
569};
 
 
 
 
570
571static const struct attribute_group armpmu_common_attr_group = {
572	.attrs = armpmu_common_attrs,
573};
574
575static int armpmu_count_irq_users(const int irq)
576{
577	int cpu, count = 0;
578
579	for_each_possible_cpu(cpu) {
580		if (per_cpu(cpu_irq, cpu) == irq)
581			count++;
582	}
583
584	return count;
585}
 
586
587static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
588{
589	const struct pmu_irq_ops *ops = NULL;
590	int cpu;
591
592	for_each_possible_cpu(cpu) {
593		if (per_cpu(cpu_irq, cpu) != irq)
594			continue;
595
596		ops = per_cpu(cpu_irq_ops, cpu);
597		if (ops)
598			break;
599	}
600
601	return ops;
602}
603
604void armpmu_free_irq(int irq, int cpu)
605{
606	if (per_cpu(cpu_irq, cpu) == 0)
607		return;
608	if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
609		return;
610
611	per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
612
613	per_cpu(cpu_irq, cpu) = 0;
614	per_cpu(cpu_irq_ops, cpu) = NULL;
615}
616
617int armpmu_request_irq(int irq, int cpu)
618{
619	int err = 0;
620	const irq_handler_t handler = armpmu_dispatch_irq;
621	const struct pmu_irq_ops *irq_ops;
622
623	if (!irq)
624		return 0;
625
626	if (!irq_is_percpu_devid(irq)) {
627		unsigned long irq_flags;
 
 
 
 
 
628
629		err = irq_force_affinity(irq, cpumask_of(cpu));
 
630
631		if (err && num_possible_cpus() > 1) {
632			pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
633				irq, cpu);
634			goto err_out;
 
635		}
 
 
636
637		irq_flags = IRQF_PERCPU |
638			    IRQF_NOBALANCING | IRQF_NO_AUTOEN |
639			    IRQF_NO_THREAD;
640
641		err = request_nmi(irq, handler, irq_flags, "arm-pmu",
642				  per_cpu_ptr(&cpu_armpmu, cpu));
643
644		/* If cannot get an NMI, get a normal interrupt */
645		if (err) {
646			err = request_irq(irq, handler, irq_flags, "arm-pmu",
647					  per_cpu_ptr(&cpu_armpmu, cpu));
648			irq_ops = &pmuirq_ops;
649		} else {
650			has_nmi = true;
651			irq_ops = &pmunmi_ops;
652		}
653	} else if (armpmu_count_irq_users(irq) == 0) {
654		err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
655
656		/* If cannot get an NMI, get a normal interrupt */
 
 
 
657		if (err) {
658			err = request_percpu_irq(irq, handler, "arm-pmu",
659						 &cpu_armpmu);
660			irq_ops = &percpu_pmuirq_ops;
661		} else {
662			has_nmi = true;
663			irq_ops = &percpu_pmunmi_ops;
664		}
 
665	} else {
666		/* Per cpudevid irq was already requested by another CPU */
667		irq_ops = armpmu_find_irq_ops(irq);
668
669		if (WARN_ON(!irq_ops))
670			err = -EINVAL;
671	}
672
673	if (err)
674		goto err_out;
 
 
675
676	per_cpu(cpu_irq, cpu) = irq;
677	per_cpu(cpu_irq_ops, cpu) = irq_ops;
678	return 0;
679
680err_out:
681	pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
682	return err;
683}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
684
685static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
686{
687	struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
688	return per_cpu(hw_events->irq, cpu);
689}
690
691bool arm_pmu_irq_is_nmi(void)
692{
693	return has_nmi;
694}
695
696/*
697 * PMU hardware loses all context when a CPU goes offline.
698 * When a CPU is hotplugged back in, since some hardware registers are
699 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
700 * junk values out of them.
701 */
702static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
 
703{
704	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
705	int irq;
706
707	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
708		return 0;
709	if (pmu->reset)
710		pmu->reset(pmu);
711
712	per_cpu(cpu_armpmu, cpu) = pmu;
713
714	irq = armpmu_get_cpu_irq(pmu, cpu);
715	if (irq)
716		per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
717
718	return 0;
719}
720
721static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
722{
723	struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
724	int irq;
725
726	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
727		return 0;
728
729	irq = armpmu_get_cpu_irq(pmu, cpu);
730	if (irq)
731		per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
732
733	per_cpu(cpu_armpmu, cpu) = NULL;
 
 
 
734
735	return 0;
736}
737
738#ifdef CONFIG_CPU_PM
739static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
740{
741	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
742	struct perf_event *event;
743	int idx;
744
745	for (idx = 0; idx < armpmu->num_events; idx++) {
746		event = hw_events->events[idx];
747		if (!event)
 
 
 
748			continue;
749
 
 
750		switch (cmd) {
751		case CPU_PM_ENTER:
752			/*
753			 * Stop and update the counter
754			 */
755			armpmu_stop(event, PERF_EF_UPDATE);
756			break;
757		case CPU_PM_EXIT:
758		case CPU_PM_ENTER_FAILED:
759			 /*
760			  * Restore and enable the counter.
 
 
 
 
 
 
 
 
 
761			  */
762			armpmu_start(event, PERF_EF_RELOAD);
763			break;
764		default:
765			break;
766		}
767	}
768}
769
770static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
771			     void *v)
772{
773	struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
774	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
775	bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
776
777	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
778		return NOTIFY_DONE;
779
780	/*
781	 * Always reset the PMU registers on power-up even if
782	 * there are no events running.
783	 */
784	if (cmd == CPU_PM_EXIT && armpmu->reset)
785		armpmu->reset(armpmu);
786
787	if (!enabled)
788		return NOTIFY_OK;
789
790	switch (cmd) {
791	case CPU_PM_ENTER:
792		armpmu->stop(armpmu);
793		cpu_pm_pmu_setup(armpmu, cmd);
794		break;
795	case CPU_PM_EXIT:
796	case CPU_PM_ENTER_FAILED:
797		cpu_pm_pmu_setup(armpmu, cmd);
 
798		armpmu->start(armpmu);
799		break;
800	default:
801		return NOTIFY_DONE;
802	}
803
804	return NOTIFY_OK;
805}
806
807static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
808{
809	cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
810	return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
811}
812
813static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
814{
815	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
816}
817#else
818static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
819static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
820#endif
821
822static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
823{
824	int err;
 
 
 
 
 
 
825
826	err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
827				       &cpu_pmu->node);
828	if (err)
829		goto out;
830
831	err = cpu_pm_pmu_register(cpu_pmu);
832	if (err)
833		goto out_unregister;
834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
835	return 0;
836
837out_unregister:
838	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
839					    &cpu_pmu->node);
840out:
841	return err;
842}
843
844static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
845{
846	cpu_pm_pmu_unregister(cpu_pmu);
847	cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
848					    &cpu_pmu->node);
849}
850
851struct arm_pmu *armpmu_alloc(void)
 
 
 
 
852{
853	struct arm_pmu *pmu;
854	int cpu;
 
855
856	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
857	if (!pmu)
858		goto out;
859
860	pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, GFP_KERNEL);
861	if (!pmu->hw_events) {
862		pr_info("failed to allocate per-cpu PMU data.\n");
863		goto out_free_pmu;
 
864	}
865
866	pmu->pmu = (struct pmu) {
867		.pmu_enable	= armpmu_enable,
868		.pmu_disable	= armpmu_disable,
869		.event_init	= armpmu_event_init,
870		.add		= armpmu_add,
871		.del		= armpmu_del,
872		.start		= armpmu_start,
873		.stop		= armpmu_stop,
874		.read		= armpmu_read,
875		.filter		= armpmu_filter,
876		.attr_groups	= pmu->attr_groups,
877		/*
878		 * This is a CPU PMU potentially in a heterogeneous
879		 * configuration (e.g. big.LITTLE) so
880		 * PERF_PMU_CAP_EXTENDED_HW_TYPE is required to open
881		 * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE events on a
882		 * specific PMU.
883		 */
884		.capabilities	= PERF_PMU_CAP_EXTENDED_REGS |
885				  PERF_PMU_CAP_EXTENDED_HW_TYPE,
886	};
887
888	pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
889		&armpmu_common_attr_group;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
890
891	for_each_possible_cpu(cpu) {
892		struct pmu_hw_events *events;
 
 
 
 
 
 
 
 
 
893
894		events = per_cpu_ptr(pmu->hw_events, cpu);
895		events->percpu_pmu = pmu;
896	}
897
898	return pmu;
 
 
899
900out_free_pmu:
901	kfree(pmu);
902out:
903	return NULL;
904}
905
906void armpmu_free(struct arm_pmu *pmu)
907{
908	free_percpu(pmu->hw_events);
909	kfree(pmu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
910}
911
912int armpmu_register(struct arm_pmu *pmu)
913{
914	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
915
916	ret = cpu_pmu_init(pmu);
917	if (ret)
918		return ret;
919
920	if (!pmu->set_event_filter)
921		pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
922
923	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
924	if (ret)
925		goto out_destroy;
926
927	pr_info("enabled with %s PMU driver, %d counters available%s\n",
928		pmu->name, pmu->num_events,
929		has_nmi ? ", using NMIs" : "");
930
931	kvm_host_pmu_init(pmu);
932
933	return 0;
934
935out_destroy:
936	cpu_pmu_destroy(pmu);
 
 
 
 
937	return ret;
938}
939
940static int arm_pmu_hp_init(void)
941{
942	int ret;
943
944	ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
945				      "perf/arm/pmu:starting",
946				      arm_perf_starting_cpu,
947				      arm_perf_teardown_cpu);
948	if (ret)
949		pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
950		       ret);
951	return ret;
952}
953subsys_initcall(arm_pmu_hp_init);
v4.6
 
   1#undef DEBUG
   2
   3/*
   4 * ARM performance counter support.
   5 *
   6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
   7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
   8 *
   9 * This code is based on the sparc64 perf event code, which is in turn based
  10 * on the x86 code.
  11 */
  12#define pr_fmt(fmt) "hw perfevents: " fmt
  13
  14#include <linux/bitmap.h>
  15#include <linux/cpumask.h>
  16#include <linux/cpu_pm.h>
  17#include <linux/export.h>
  18#include <linux/kernel.h>
  19#include <linux/of_device.h>
  20#include <linux/perf/arm_pmu.h>
  21#include <linux/platform_device.h>
  22#include <linux/slab.h>
 
  23#include <linux/spinlock.h>
  24#include <linux/irq.h>
  25#include <linux/irqdesc.h>
  26
  27#include <asm/cputype.h>
  28#include <asm/irq_regs.h>
  29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  30static int
  31armpmu_map_cache_event(const unsigned (*cache_map)
  32				      [PERF_COUNT_HW_CACHE_MAX]
  33				      [PERF_COUNT_HW_CACHE_OP_MAX]
  34				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
  35		       u64 config)
  36{
  37	unsigned int cache_type, cache_op, cache_result, ret;
  38
  39	cache_type = (config >>  0) & 0xff;
  40	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
  41		return -EINVAL;
  42
  43	cache_op = (config >>  8) & 0xff;
  44	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
  45		return -EINVAL;
  46
  47	cache_result = (config >> 16) & 0xff;
  48	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
  49		return -EINVAL;
  50
 
 
 
  51	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
  52
  53	if (ret == CACHE_OP_UNSUPPORTED)
  54		return -ENOENT;
  55
  56	return ret;
  57}
  58
  59static int
  60armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
  61{
  62	int mapping;
  63
  64	if (config >= PERF_COUNT_HW_MAX)
  65		return -EINVAL;
  66
 
 
 
  67	mapping = (*event_map)[config];
  68	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
  69}
  70
  71static int
  72armpmu_map_raw_event(u32 raw_event_mask, u64 config)
  73{
  74	return (int)(config & raw_event_mask);
  75}
  76
  77int
  78armpmu_map_event(struct perf_event *event,
  79		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
  80		 const unsigned (*cache_map)
  81				[PERF_COUNT_HW_CACHE_MAX]
  82				[PERF_COUNT_HW_CACHE_OP_MAX]
  83				[PERF_COUNT_HW_CACHE_RESULT_MAX],
  84		 u32 raw_event_mask)
  85{
  86	u64 config = event->attr.config;
  87	int type = event->attr.type;
  88
  89	if (type == event->pmu->type)
  90		return armpmu_map_raw_event(raw_event_mask, config);
  91
  92	switch (type) {
  93	case PERF_TYPE_HARDWARE:
  94		return armpmu_map_hw_event(event_map, config);
  95	case PERF_TYPE_HW_CACHE:
  96		return armpmu_map_cache_event(cache_map, config);
  97	case PERF_TYPE_RAW:
  98		return armpmu_map_raw_event(raw_event_mask, config);
  99	}
 100
 101	return -ENOENT;
 102}
 103
 104int armpmu_event_set_period(struct perf_event *event)
 105{
 106	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 107	struct hw_perf_event *hwc = &event->hw;
 108	s64 left = local64_read(&hwc->period_left);
 109	s64 period = hwc->sample_period;
 
 110	int ret = 0;
 111
 
 112	if (unlikely(left <= -period)) {
 113		left = period;
 114		local64_set(&hwc->period_left, left);
 115		hwc->last_period = period;
 116		ret = 1;
 117	}
 118
 119	if (unlikely(left <= 0)) {
 120		left += period;
 121		local64_set(&hwc->period_left, left);
 122		hwc->last_period = period;
 123		ret = 1;
 124	}
 125
 126	/*
 127	 * Limit the maximum period to prevent the counter value
 128	 * from overtaking the one we are about to program. In
 129	 * effect we are reducing max_period to account for
 130	 * interrupt latency (and we are being very conservative).
 131	 */
 132	if (left > (armpmu->max_period >> 1))
 133		left = armpmu->max_period >> 1;
 134
 135	local64_set(&hwc->prev_count, (u64)-left);
 136
 137	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
 138
 139	perf_event_update_userpage(event);
 140
 141	return ret;
 142}
 143
 144u64 armpmu_event_update(struct perf_event *event)
 145{
 146	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 147	struct hw_perf_event *hwc = &event->hw;
 148	u64 delta, prev_raw_count, new_raw_count;
 
 149
 150again:
 151	prev_raw_count = local64_read(&hwc->prev_count);
 152	new_raw_count = armpmu->read_counter(event);
 153
 154	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 155			     new_raw_count) != prev_raw_count)
 156		goto again;
 157
 158	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
 159
 160	local64_add(delta, &event->count);
 161	local64_sub(delta, &hwc->period_left);
 162
 163	return new_raw_count;
 164}
 165
 166static void
 167armpmu_read(struct perf_event *event)
 168{
 169	armpmu_event_update(event);
 170}
 171
 172static void
 173armpmu_stop(struct perf_event *event, int flags)
 174{
 175	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 176	struct hw_perf_event *hwc = &event->hw;
 177
 178	/*
 179	 * ARM pmu always has to update the counter, so ignore
 180	 * PERF_EF_UPDATE, see comments in armpmu_start().
 181	 */
 182	if (!(hwc->state & PERF_HES_STOPPED)) {
 183		armpmu->disable(event);
 184		armpmu_event_update(event);
 185		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 186	}
 187}
 188
 189static void armpmu_start(struct perf_event *event, int flags)
 190{
 191	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 192	struct hw_perf_event *hwc = &event->hw;
 193
 194	/*
 195	 * ARM pmu always has to reprogram the period, so ignore
 196	 * PERF_EF_RELOAD, see the comment below.
 197	 */
 198	if (flags & PERF_EF_RELOAD)
 199		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
 200
 201	hwc->state = 0;
 202	/*
 203	 * Set the period again. Some counters can't be stopped, so when we
 204	 * were stopped we simply disabled the IRQ source and the counter
 205	 * may have been left counting. If we don't do this step then we may
 206	 * get an interrupt too soon or *way* too late if the overflow has
 207	 * happened since disabling.
 208	 */
 209	armpmu_event_set_period(event);
 210	armpmu->enable(event);
 211}
 212
 213static void
 214armpmu_del(struct perf_event *event, int flags)
 215{
 216	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 217	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 218	struct hw_perf_event *hwc = &event->hw;
 219	int idx = hwc->idx;
 220
 221	armpmu_stop(event, PERF_EF_UPDATE);
 222	hw_events->events[idx] = NULL;
 223	clear_bit(idx, hw_events->used_mask);
 224	if (armpmu->clear_event_idx)
 225		armpmu->clear_event_idx(hw_events, event);
 226
 227	perf_event_update_userpage(event);
 
 
 228}
 229
 230static int
 231armpmu_add(struct perf_event *event, int flags)
 232{
 233	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 234	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 235	struct hw_perf_event *hwc = &event->hw;
 236	int idx;
 237	int err = 0;
 238
 239	/* An event following a process won't be stopped earlier */
 240	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
 241		return -ENOENT;
 242
 243	perf_pmu_disable(event->pmu);
 244
 245	/* If we don't have a space for the counter then finish early. */
 246	idx = armpmu->get_event_idx(hw_events, event);
 247	if (idx < 0) {
 248		err = idx;
 249		goto out;
 250	}
 251
 252	/*
 253	 * If there is an event in the counter we are going to use then make
 254	 * sure it is disabled.
 255	 */
 256	event->hw.idx = idx;
 257	armpmu->disable(event);
 258	hw_events->events[idx] = event;
 259
 260	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 261	if (flags & PERF_EF_START)
 262		armpmu_start(event, PERF_EF_RELOAD);
 263
 264	/* Propagate our changes to the userspace mapping. */
 265	perf_event_update_userpage(event);
 266
 267out:
 268	perf_pmu_enable(event->pmu);
 269	return err;
 270}
 271
 272static int
 273validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
 274			       struct perf_event *event)
 275{
 276	struct arm_pmu *armpmu;
 277
 278	if (is_software_event(event))
 279		return 1;
 280
 281	/*
 282	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
 283	 * core perf code won't check that the pmu->ctx == leader->ctx
 284	 * until after pmu->event_init(event).
 285	 */
 286	if (event->pmu != pmu)
 287		return 0;
 288
 289	if (event->state < PERF_EVENT_STATE_OFF)
 290		return 1;
 291
 292	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
 293		return 1;
 294
 295	armpmu = to_arm_pmu(event->pmu);
 296	return armpmu->get_event_idx(hw_events, event) >= 0;
 297}
 298
 299static int
 300validate_group(struct perf_event *event)
 301{
 302	struct perf_event *sibling, *leader = event->group_leader;
 303	struct pmu_hw_events fake_pmu;
 304
 305	/*
 306	 * Initialise the fake PMU. We only need to populate the
 307	 * used_mask for the purposes of validation.
 308	 */
 309	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
 310
 311	if (!validate_event(event->pmu, &fake_pmu, leader))
 312		return -EINVAL;
 313
 314	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
 
 
 
 315		if (!validate_event(event->pmu, &fake_pmu, sibling))
 316			return -EINVAL;
 317	}
 318
 319	if (!validate_event(event->pmu, &fake_pmu, event))
 320		return -EINVAL;
 321
 322	return 0;
 323}
 324
 325static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
 326{
 327	struct arm_pmu *armpmu;
 328	struct platform_device *plat_device;
 329	struct arm_pmu_platdata *plat;
 330	int ret;
 331	u64 start_clock, finish_clock;
 332
 333	/*
 334	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
 335	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
 336	 * do any necessary shifting, we just need to perform the first
 337	 * dereference.
 338	 */
 339	armpmu = *(void **)dev;
 340	plat_device = armpmu->plat_device;
 341	plat = dev_get_platdata(&plat_device->dev);
 342
 343	start_clock = sched_clock();
 344	if (plat && plat->handle_irq)
 345		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
 346	else
 347		ret = armpmu->handle_irq(irq, armpmu);
 348	finish_clock = sched_clock();
 349
 350	perf_sample_event_took(finish_clock - start_clock);
 351	return ret;
 352}
 353
 354static void
 355armpmu_release_hardware(struct arm_pmu *armpmu)
 356{
 357	armpmu->free_irq(armpmu);
 358}
 359
 360static int
 361armpmu_reserve_hardware(struct arm_pmu *armpmu)
 362{
 363	int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
 364	if (err) {
 365		armpmu_release_hardware(armpmu);
 366		return err;
 367	}
 368
 369	return 0;
 370}
 371
 372static void
 373hw_perf_event_destroy(struct perf_event *event)
 374{
 375	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 376	atomic_t *active_events	 = &armpmu->active_events;
 377	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
 378
 379	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
 380		armpmu_release_hardware(armpmu);
 381		mutex_unlock(pmu_reserve_mutex);
 382	}
 383}
 384
 385static int
 386event_requires_mode_exclusion(struct perf_event_attr *attr)
 387{
 388	return attr->exclude_idle || attr->exclude_user ||
 389	       attr->exclude_kernel || attr->exclude_hv;
 390}
 391
 392static int
 393__hw_perf_event_init(struct perf_event *event)
 394{
 395	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 396	struct hw_perf_event *hwc = &event->hw;
 397	int mapping;
 398
 
 399	mapping = armpmu->map_event(event);
 400
 401	if (mapping < 0) {
 402		pr_debug("event %x:%llx not supported\n", event->attr.type,
 403			 event->attr.config);
 404		return mapping;
 405	}
 406
 407	/*
 408	 * We don't assign an index until we actually place the event onto
 409	 * hardware. Use -1 to signify that we haven't decided where to put it
 410	 * yet. For SMP systems, each core has it's own PMU so we can't do any
 411	 * clever allocation or constraints checking at this point.
 412	 */
 413	hwc->idx		= -1;
 414	hwc->config_base	= 0;
 415	hwc->config		= 0;
 416	hwc->event_base		= 0;
 417
 418	/*
 419	 * Check whether we need to exclude the counter from certain modes.
 420	 */
 421	if ((!armpmu->set_event_filter ||
 422	     armpmu->set_event_filter(hwc, &event->attr)) &&
 423	     event_requires_mode_exclusion(&event->attr)) {
 424		pr_debug("ARM performance counters do not support "
 425			 "mode exclusion\n");
 426		return -EOPNOTSUPP;
 427	}
 428
 429	/*
 430	 * Store the event encoding into the config_base field.
 431	 */
 432	hwc->config_base	    |= (unsigned long)mapping;
 433
 434	if (!is_sampling_event(event)) {
 435		/*
 436		 * For non-sampling runs, limit the sample_period to half
 437		 * of the counter width. That way, the new counter value
 438		 * is far less likely to overtake the previous one unless
 439		 * you have some serious IRQ latency issues.
 440		 */
 441		hwc->sample_period  = armpmu->max_period >> 1;
 442		hwc->last_period    = hwc->sample_period;
 443		local64_set(&hwc->period_left, hwc->sample_period);
 444	}
 445
 446	if (event->group_leader != event) {
 447		if (validate_group(event) != 0)
 448			return -EINVAL;
 449	}
 450
 451	return 0;
 452}
 453
 454static int armpmu_event_init(struct perf_event *event)
 455{
 456	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 457	int err = 0;
 458	atomic_t *active_events = &armpmu->active_events;
 459
 460	/*
 461	 * Reject CPU-affine events for CPUs that are of a different class to
 462	 * that which this PMU handles. Process-following events (where
 463	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
 464	 * reject them later (in armpmu_add) if they're scheduled on a
 465	 * different class of CPU.
 466	 */
 467	if (event->cpu != -1 &&
 468		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
 469		return -ENOENT;
 470
 471	/* does not support taken branch sampling */
 472	if (has_branch_stack(event))
 473		return -EOPNOTSUPP;
 474
 475	if (armpmu->map_event(event) == -ENOENT)
 476		return -ENOENT;
 477
 478	event->destroy = hw_perf_event_destroy;
 479
 480	if (!atomic_inc_not_zero(active_events)) {
 481		mutex_lock(&armpmu->reserve_mutex);
 482		if (atomic_read(active_events) == 0)
 483			err = armpmu_reserve_hardware(armpmu);
 484
 485		if (!err)
 486			atomic_inc(active_events);
 487		mutex_unlock(&armpmu->reserve_mutex);
 488	}
 489
 490	if (err)
 491		return err;
 492
 493	err = __hw_perf_event_init(event);
 494	if (err)
 495		hw_perf_event_destroy(event);
 496
 497	return err;
 498}
 499
 500static void armpmu_enable(struct pmu *pmu)
 501{
 502	struct arm_pmu *armpmu = to_arm_pmu(pmu);
 503	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 504	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
 505
 506	/* For task-bound events we may be called on other CPUs */
 507	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
 508		return;
 509
 510	if (enabled)
 511		armpmu->start(armpmu);
 512}
 513
 514static void armpmu_disable(struct pmu *pmu)
 515{
 516	struct arm_pmu *armpmu = to_arm_pmu(pmu);
 517
 518	/* For task-bound events we may be called on other CPUs */
 519	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
 520		return;
 521
 522	armpmu->stop(armpmu);
 523}
 524
 525/*
 526 * In heterogeneous systems, events are specific to a particular
 527 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
 528 * the same microarchitecture.
 529 */
 530static int armpmu_filter_match(struct perf_event *event)
 531{
 532	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
 533	unsigned int cpu = smp_processor_id();
 534	return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
 535}
 536
 537static void armpmu_init(struct arm_pmu *armpmu)
 
 538{
 539	atomic_set(&armpmu->active_events, 0);
 540	mutex_init(&armpmu->reserve_mutex);
 541
 542	armpmu->pmu = (struct pmu) {
 543		.pmu_enable	= armpmu_enable,
 544		.pmu_disable	= armpmu_disable,
 545		.event_init	= armpmu_event_init,
 546		.add		= armpmu_add,
 547		.del		= armpmu_del,
 548		.start		= armpmu_start,
 549		.stop		= armpmu_stop,
 550		.read		= armpmu_read,
 551		.filter_match	= armpmu_filter_match,
 552	};
 553}
 554
 555/* Set at runtime when we know what CPU type we are. */
 556static struct arm_pmu *__oprofile_cpu_pmu;
 557
 558/*
 559 * Despite the names, these two functions are CPU-specific and are used
 560 * by the OProfile/perf code.
 561 */
 562const char *perf_pmu_name(void)
 563{
 564	if (!__oprofile_cpu_pmu)
 565		return NULL;
 566
 567	return __oprofile_cpu_pmu->name;
 568}
 569EXPORT_SYMBOL_GPL(perf_pmu_name);
 570
 571int perf_num_counters(void)
 572{
 573	int max_events = 0;
 574
 575	if (__oprofile_cpu_pmu != NULL)
 576		max_events = __oprofile_cpu_pmu->num_events;
 
 
 577
 578	return max_events;
 579}
 580EXPORT_SYMBOL_GPL(perf_num_counters);
 581
 582static void cpu_pmu_enable_percpu_irq(void *data)
 583{
 584	int irq = *(int *)data;
 
 585
 586	enable_percpu_irq(irq, IRQ_TYPE_NONE);
 
 
 
 
 
 
 
 
 
 587}
 588
 589static void cpu_pmu_disable_percpu_irq(void *data)
 590{
 591	int irq = *(int *)data;
 
 
 
 
 
 592
 593	disable_percpu_irq(irq);
 
 594}
 595
 596static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
 597{
 598	int i, irq, irqs;
 599	struct platform_device *pmu_device = cpu_pmu->plat_device;
 600	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
 601
 602	irqs = min(pmu_device->num_resources, num_possible_cpus());
 
 603
 604	irq = platform_get_irq(pmu_device, 0);
 605	if (irq >= 0 && irq_is_percpu(irq)) {
 606		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
 607		free_percpu_irq(irq, &hw_events->percpu_pmu);
 608	} else {
 609		for (i = 0; i < irqs; ++i) {
 610			int cpu = i;
 611
 612			if (cpu_pmu->irq_affinity)
 613				cpu = cpu_pmu->irq_affinity[i];
 614
 615			if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
 616				continue;
 617			irq = platform_get_irq(pmu_device, i);
 618			if (irq >= 0)
 619				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
 620		}
 621	}
 622}
 623
 624static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
 625{
 626	int i, err, irq, irqs;
 627	struct platform_device *pmu_device = cpu_pmu->plat_device;
 628	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
 629
 630	if (!pmu_device)
 631		return -ENODEV;
 632
 633	irqs = min(pmu_device->num_resources, num_possible_cpus());
 634	if (irqs < 1) {
 635		pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
 636		return 0;
 637	}
 
 
 
 
 638
 639	irq = platform_get_irq(pmu_device, 0);
 640	if (irq >= 0 && irq_is_percpu(irq)) {
 641		err = request_percpu_irq(irq, handler, "arm-pmu",
 642					 &hw_events->percpu_pmu);
 643		if (err) {
 644			pr_err("unable to request IRQ%d for ARM PMU counters\n",
 645				irq);
 646			return err;
 
 
 
 647		}
 648		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
 649	} else {
 650		for (i = 0; i < irqs; ++i) {
 651			int cpu = i;
 
 
 
 
 652
 653			err = 0;
 654			irq = platform_get_irq(pmu_device, i);
 655			if (irq < 0)
 656				continue;
 657
 658			if (cpu_pmu->irq_affinity)
 659				cpu = cpu_pmu->irq_affinity[i];
 
 660
 661			/*
 662			 * If we have a single PMU interrupt that we can't shift,
 663			 * assume that we're running on a uniprocessor machine and
 664			 * continue. Otherwise, continue without this interrupt.
 665			 */
 666			if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
 667				pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
 668					irq, cpu);
 669				continue;
 670			}
 671
 672			err = request_irq(irq, handler,
 673					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
 674					  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
 675			if (err) {
 676				pr_err("unable to request IRQ%d for ARM PMU counters\n",
 677					irq);
 678				return err;
 679			}
 680
 681			cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
 682		}
 683	}
 
 
 684
 685	return 0;
 
 
 686}
 687
 688/*
 689 * PMU hardware loses all context when a CPU goes offline.
 690 * When a CPU is hotplugged back in, since some hardware registers are
 691 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 692 * junk values out of them.
 693 */
 694static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
 695			  void *hcpu)
 696{
 697	int cpu = (unsigned long)hcpu;
 698	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
 
 
 
 
 
 699
 700	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
 701		return NOTIFY_DONE;
 
 
 
 
 
 
 
 
 
 
 
 702
 703	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
 704		return NOTIFY_DONE;
 
 
 
 
 705
 706	if (pmu->reset)
 707		pmu->reset(pmu);
 708	else
 709		return NOTIFY_DONE;
 710
 711	return NOTIFY_OK;
 712}
 713
 714#ifdef CONFIG_CPU_PM
 715static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
 716{
 717	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 718	struct perf_event *event;
 719	int idx;
 720
 721	for (idx = 0; idx < armpmu->num_events; idx++) {
 722		/*
 723		 * If the counter is not used skip it, there is no
 724		 * need of stopping/restarting it.
 725		 */
 726		if (!test_bit(idx, hw_events->used_mask))
 727			continue;
 728
 729		event = hw_events->events[idx];
 730
 731		switch (cmd) {
 732		case CPU_PM_ENTER:
 733			/*
 734			 * Stop and update the counter
 735			 */
 736			armpmu_stop(event, PERF_EF_UPDATE);
 737			break;
 738		case CPU_PM_EXIT:
 739		case CPU_PM_ENTER_FAILED:
 740			 /*
 741			  * Restore and enable the counter.
 742			  * armpmu_start() indirectly calls
 743			  *
 744			  * perf_event_update_userpage()
 745			  *
 746			  * that requires RCU read locking to be functional,
 747			  * wrap the call within RCU_NONIDLE to make the
 748			  * RCU subsystem aware this cpu is not idle from
 749			  * an RCU perspective for the armpmu_start() call
 750			  * duration.
 751			  */
 752			RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
 753			break;
 754		default:
 755			break;
 756		}
 757	}
 758}
 759
 760static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
 761			     void *v)
 762{
 763	struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
 764	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
 765	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
 766
 767	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
 768		return NOTIFY_DONE;
 769
 770	/*
 771	 * Always reset the PMU registers on power-up even if
 772	 * there are no events running.
 773	 */
 774	if (cmd == CPU_PM_EXIT && armpmu->reset)
 775		armpmu->reset(armpmu);
 776
 777	if (!enabled)
 778		return NOTIFY_OK;
 779
 780	switch (cmd) {
 781	case CPU_PM_ENTER:
 782		armpmu->stop(armpmu);
 783		cpu_pm_pmu_setup(armpmu, cmd);
 784		break;
 785	case CPU_PM_EXIT:
 
 786		cpu_pm_pmu_setup(armpmu, cmd);
 787	case CPU_PM_ENTER_FAILED:
 788		armpmu->start(armpmu);
 789		break;
 790	default:
 791		return NOTIFY_DONE;
 792	}
 793
 794	return NOTIFY_OK;
 795}
 796
 797static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
 798{
 799	cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
 800	return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
 801}
 802
 803static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
 804{
 805	cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
 806}
 807#else
 808static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
 809static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
 810#endif
 811
 812static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
 813{
 814	int err;
 815	int cpu;
 816	struct pmu_hw_events __percpu *cpu_hw_events;
 817
 818	cpu_hw_events = alloc_percpu(struct pmu_hw_events);
 819	if (!cpu_hw_events)
 820		return -ENOMEM;
 821
 822	cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
 823	err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
 824	if (err)
 825		goto out_hw_events;
 826
 827	err = cpu_pm_pmu_register(cpu_pmu);
 828	if (err)
 829		goto out_unregister;
 830
 831	for_each_possible_cpu(cpu) {
 832		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
 833		raw_spin_lock_init(&events->pmu_lock);
 834		events->percpu_pmu = cpu_pmu;
 835	}
 836
 837	cpu_pmu->hw_events	= cpu_hw_events;
 838	cpu_pmu->request_irq	= cpu_pmu_request_irq;
 839	cpu_pmu->free_irq	= cpu_pmu_free_irq;
 840
 841	/* Ensure the PMU has sane values out of reset. */
 842	if (cpu_pmu->reset)
 843		on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
 844			 cpu_pmu, 1);
 845
 846	/* If no interrupts available, set the corresponding capability flag */
 847	if (!platform_get_irq(cpu_pmu->plat_device, 0))
 848		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 849
 850	return 0;
 851
 852out_unregister:
 853	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
 854out_hw_events:
 855	free_percpu(cpu_hw_events);
 856	return err;
 857}
 858
 859static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 860{
 861	cpu_pm_pmu_unregister(cpu_pmu);
 862	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
 863	free_percpu(cpu_pmu->hw_events);
 864}
 865
 866/*
 867 * CPU PMU identification and probing.
 868 */
 869static int probe_current_pmu(struct arm_pmu *pmu,
 870			     const struct pmu_probe_info *info)
 871{
 872	int cpu = get_cpu();
 873	unsigned int cpuid = read_cpuid_id();
 874	int ret = -ENODEV;
 875
 876	pr_info("probing PMU on CPU %d\n", cpu);
 
 
 877
 878	for (; info->init != NULL; info++) {
 879		if ((cpuid & info->mask) != info->cpuid)
 880			continue;
 881		ret = info->init(pmu);
 882		break;
 883	}
 884
 885	put_cpu();
 886	return ret;
 887}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 888
 889static int of_pmu_irq_cfg(struct arm_pmu *pmu)
 890{
 891	int *irqs, i = 0;
 892	bool using_spi = false;
 893	struct platform_device *pdev = pmu->plat_device;
 894
 895	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
 896	if (!irqs)
 897		return -ENOMEM;
 898
 899	do {
 900		struct device_node *dn;
 901		int cpu, irq;
 902
 903		/* See if we have an affinity entry */
 904		dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
 905		if (!dn)
 906			break;
 907
 908		/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
 909		irq = platform_get_irq(pdev, i);
 910		if (irq >= 0) {
 911			bool spi = !irq_is_percpu(irq);
 912
 913			if (i > 0 && spi != using_spi) {
 914				pr_err("PPI/SPI IRQ type mismatch for %s!\n",
 915					dn->name);
 916				kfree(irqs);
 917				return -EINVAL;
 918			}
 919
 920			using_spi = spi;
 921		}
 
 922
 923		/* Now look up the logical CPU number */
 924		for_each_possible_cpu(cpu) {
 925			struct device_node *cpu_dn;
 926
 927			cpu_dn = of_cpu_device_node_get(cpu);
 928			of_node_put(cpu_dn);
 
 
 
 929
 930			if (dn == cpu_dn)
 931				break;
 932		}
 933
 934		if (cpu >= nr_cpu_ids) {
 935			pr_warn("Failed to find logical CPU for %s\n",
 936				dn->name);
 937			of_node_put(dn);
 938			cpumask_setall(&pmu->supported_cpus);
 939			break;
 940		}
 941		of_node_put(dn);
 942
 943		/* For SPIs, we need to track the affinity per IRQ */
 944		if (using_spi) {
 945			if (i >= pdev->num_resources) {
 946				of_node_put(dn);
 947				break;
 948			}
 949
 950			irqs[i] = cpu;
 951		}
 952
 953		/* Keep track of the CPUs containing this PMU type */
 954		cpumask_set_cpu(cpu, &pmu->supported_cpus);
 955		of_node_put(dn);
 956		i++;
 957	} while (1);
 958
 959	/* If we didn't manage to parse anything, claim to support all CPUs */
 960	if (cpumask_weight(&pmu->supported_cpus) == 0)
 961		cpumask_setall(&pmu->supported_cpus);
 962
 963	/* If we matched up the IRQ affinities, use them to route the SPIs */
 964	if (using_spi && i == pdev->num_resources)
 965		pmu->irq_affinity = irqs;
 966	else
 967		kfree(irqs);
 968
 969	return 0;
 970}
 971
 972int arm_pmu_device_probe(struct platform_device *pdev,
 973			 const struct of_device_id *of_table,
 974			 const struct pmu_probe_info *probe_table)
 975{
 976	const struct of_device_id *of_id;
 977	const int (*init_fn)(struct arm_pmu *);
 978	struct device_node *node = pdev->dev.of_node;
 979	struct arm_pmu *pmu;
 980	int ret = -ENODEV;
 981
 982	pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
 983	if (!pmu) {
 984		pr_info("failed to allocate PMU device!\n");
 985		return -ENOMEM;
 986	}
 987
 988	armpmu_init(pmu);
 989
 990	if (!__oprofile_cpu_pmu)
 991		__oprofile_cpu_pmu = pmu;
 992
 993	pmu->plat_device = pdev;
 994
 995	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
 996		init_fn = of_id->data;
 997
 998		pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
 999							   "secure-reg-access");
1000
1001		/* arm64 systems boot only as non-secure */
1002		if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
1003			pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
1004			pmu->secure_access = false;
1005		}
1006
1007		ret = of_pmu_irq_cfg(pmu);
1008		if (!ret)
1009			ret = init_fn(pmu);
1010	} else {
1011		ret = probe_current_pmu(pmu, probe_table);
1012		cpumask_setall(&pmu->supported_cpus);
1013	}
1014
1015	if (ret) {
1016		pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
1017		goto out_free;
1018	}
1019
1020	ret = cpu_pmu_init(pmu);
1021	if (ret)
1022		goto out_free;
 
 
 
1023
1024	ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
1025	if (ret)
1026		goto out_destroy;
1027
1028	pr_info("enabled with %s PMU driver, %d counters available\n",
1029			pmu->name, pmu->num_events);
 
 
 
1030
1031	return 0;
1032
1033out_destroy:
1034	cpu_pmu_destroy(pmu);
1035out_free:
1036	pr_info("%s: failed to register PMU devices!\n",
1037		of_node_full_name(node));
1038	kfree(pmu);
1039	return ret;
1040}