Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ARMv6 Performance counter handling code.
  4 *
  5 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  6 *
  7 * ARMv6 has 2 configurable performance counters and a single cycle counter.
  8 * They all share a single reset bit but can be written to zero so we can use
  9 * that for a reset.
 10 *
 11 * The counters can't be individually enabled or disabled so when we remove
 12 * one event and replace it with another we could get spurious counts from the
 13 * wrong event. However, we can take advantage of the fact that the
 14 * performance counters can export events to the event bus, and the event bus
 15 * itself can be monitored. This requires that we *don't* export the events to
 16 * the event bus. The procedure for disabling a configurable counter is:
 17 *	- change the counter to count the ETMEXTOUT[0] signal (0x20). This
 18 *	  effectively stops the counter from counting.
 19 *	- disable the counter's interrupt generation (each counter has it's
 20 *	  own interrupt enable bit).
 21 * Once stopped, the counter value can be written as 0 to reset.
 22 *
 23 * To enable a counter:
 24 *	- enable the counter's interrupt generation.
 25 *	- set the new event type.
 26 *
 27 * Note: the dedicated cycle counter only counts cycles and can't be
 28 * enabled/disabled independently of the others. When we want to disable the
 29 * cycle counter, we have to just disable the interrupt reporting and start
 30 * ignoring that counter. When re-enabling, we have to reset the value and
 31 * enable the interrupt.
 32 */
 33
 34#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 35
 36#include <asm/cputype.h>
 37#include <asm/irq_regs.h>
 38
 39#include <linux/of.h>
 40#include <linux/perf/arm_pmu.h>
 41#include <linux/platform_device.h>
 42
 43enum armv6_perf_types {
 44	ARMV6_PERFCTR_ICACHE_MISS	    = 0x0,
 45	ARMV6_PERFCTR_IBUF_STALL	    = 0x1,
 46	ARMV6_PERFCTR_DDEP_STALL	    = 0x2,
 47	ARMV6_PERFCTR_ITLB_MISS		    = 0x3,
 48	ARMV6_PERFCTR_DTLB_MISS		    = 0x4,
 49	ARMV6_PERFCTR_BR_EXEC		    = 0x5,
 50	ARMV6_PERFCTR_BR_MISPREDICT	    = 0x6,
 51	ARMV6_PERFCTR_INSTR_EXEC	    = 0x7,
 52	ARMV6_PERFCTR_DCACHE_HIT	    = 0x9,
 53	ARMV6_PERFCTR_DCACHE_ACCESS	    = 0xA,
 54	ARMV6_PERFCTR_DCACHE_MISS	    = 0xB,
 55	ARMV6_PERFCTR_DCACHE_WBACK	    = 0xC,
 56	ARMV6_PERFCTR_SW_PC_CHANGE	    = 0xD,
 57	ARMV6_PERFCTR_MAIN_TLB_MISS	    = 0xF,
 58	ARMV6_PERFCTR_EXPL_D_ACCESS	    = 0x10,
 59	ARMV6_PERFCTR_LSU_FULL_STALL	    = 0x11,
 60	ARMV6_PERFCTR_WBUF_DRAINED	    = 0x12,
 61	ARMV6_PERFCTR_CPU_CYCLES	    = 0xFF,
 62	ARMV6_PERFCTR_NOP		    = 0x20,
 63};
 64
 65enum armv6_counters {
 66	ARMV6_CYCLE_COUNTER = 0,
 67	ARMV6_COUNTER0,
 68	ARMV6_COUNTER1,
 69};
 70
 71/*
 72 * The hardware events that we support. We do support cache operations but
 73 * we have harvard caches and no way to combine instruction and data
 74 * accesses/misses in hardware.
 75 */
 76static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
 77	PERF_MAP_ALL_UNSUPPORTED,
 78	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6_PERFCTR_CPU_CYCLES,
 79	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6_PERFCTR_INSTR_EXEC,
 80	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6_PERFCTR_BR_EXEC,
 81	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6_PERFCTR_BR_MISPREDICT,
 82	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6_PERFCTR_IBUF_STALL,
 83	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6_PERFCTR_LSU_FULL_STALL,
 84};
 85
 86static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 87					  [PERF_COUNT_HW_CACHE_OP_MAX]
 88					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 89	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 90
 91	/*
 92	 * The performance counters don't differentiate between read and write
 93	 * accesses/misses so this isn't strictly correct, but it's the best we
 94	 * can do. Writes and reads get combined.
 95	 */
 96	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
 97	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
 98	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
 99	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
100
101	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_ICACHE_MISS,
102
103	/*
104	 * The ARM performance counters can count micro DTLB misses, micro ITLB
105	 * misses and main TLB misses. There isn't an event for TLB misses, so
106	 * use the micro misses here and if users want the main TLB misses they
107	 * can use a raw counter.
108	 */
109	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
110	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
111
112	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
113	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
114};
115
116enum armv6mpcore_perf_types {
117	ARMV6MPCORE_PERFCTR_ICACHE_MISS	    = 0x0,
118	ARMV6MPCORE_PERFCTR_IBUF_STALL	    = 0x1,
119	ARMV6MPCORE_PERFCTR_DDEP_STALL	    = 0x2,
120	ARMV6MPCORE_PERFCTR_ITLB_MISS	    = 0x3,
121	ARMV6MPCORE_PERFCTR_DTLB_MISS	    = 0x4,
122	ARMV6MPCORE_PERFCTR_BR_EXEC	    = 0x5,
123	ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
124	ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
125	ARMV6MPCORE_PERFCTR_INSTR_EXEC	    = 0x8,
126	ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
127	ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
128	ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
129	ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
130	ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
131	ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
132	ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
133	ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
134	ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
135	ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
136	ARMV6MPCORE_PERFCTR_CPU_CYCLES	    = 0xFF,
137};
138
139/*
140 * The hardware events that we support. We do support cache operations but
141 * we have harvard caches and no way to combine instruction and data
142 * accesses/misses in hardware.
143 */
144static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
145	PERF_MAP_ALL_UNSUPPORTED,
146	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6MPCORE_PERFCTR_CPU_CYCLES,
147	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6MPCORE_PERFCTR_INSTR_EXEC,
148	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6MPCORE_PERFCTR_BR_EXEC,
149	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
150	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6MPCORE_PERFCTR_IBUF_STALL,
151	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
152};
153
154static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
155					[PERF_COUNT_HW_CACHE_OP_MAX]
156					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
157	PERF_CACHE_MAP_ALL_UNSUPPORTED,
158
159	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
160	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
161	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
162	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
163
164	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_ICACHE_MISS,
165
166	/*
167	 * The ARM performance counters can count micro DTLB misses, micro ITLB
168	 * misses and main TLB misses. There isn't an event for TLB misses, so
169	 * use the micro misses here and if users want the main TLB misses they
170	 * can use a raw counter.
171	 */
172	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DTLB_MISS,
173	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DTLB_MISS,
174
175	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_ITLB_MISS,
176	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_ITLB_MISS,
177};
178
179static inline unsigned long
180armv6_pmcr_read(void)
181{
182	u32 val;
183	asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
184	return val;
185}
186
187static inline void
188armv6_pmcr_write(unsigned long val)
189{
190	asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
191}
192
193#define ARMV6_PMCR_ENABLE		(1 << 0)
194#define ARMV6_PMCR_CTR01_RESET		(1 << 1)
195#define ARMV6_PMCR_CCOUNT_RESET		(1 << 2)
196#define ARMV6_PMCR_CCOUNT_DIV		(1 << 3)
197#define ARMV6_PMCR_COUNT0_IEN		(1 << 4)
198#define ARMV6_PMCR_COUNT1_IEN		(1 << 5)
199#define ARMV6_PMCR_CCOUNT_IEN		(1 << 6)
200#define ARMV6_PMCR_COUNT0_OVERFLOW	(1 << 8)
201#define ARMV6_PMCR_COUNT1_OVERFLOW	(1 << 9)
202#define ARMV6_PMCR_CCOUNT_OVERFLOW	(1 << 10)
203#define ARMV6_PMCR_EVT_COUNT0_SHIFT	20
204#define ARMV6_PMCR_EVT_COUNT0_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
205#define ARMV6_PMCR_EVT_COUNT1_SHIFT	12
206#define ARMV6_PMCR_EVT_COUNT1_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
207
208#define ARMV6_PMCR_OVERFLOWED_MASK \
209	(ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
210	 ARMV6_PMCR_CCOUNT_OVERFLOW)
211
212static inline int
213armv6_pmcr_has_overflowed(unsigned long pmcr)
214{
215	return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
216}
217
218static inline int
219armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
220				  enum armv6_counters counter)
221{
222	int ret = 0;
223
224	if (ARMV6_CYCLE_COUNTER == counter)
225		ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
226	else if (ARMV6_COUNTER0 == counter)
227		ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
228	else if (ARMV6_COUNTER1 == counter)
229		ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
230	else
231		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
232
233	return ret;
234}
235
236static inline u64 armv6pmu_read_counter(struct perf_event *event)
237{
238	struct hw_perf_event *hwc = &event->hw;
239	int counter = hwc->idx;
240	unsigned long value = 0;
241
242	if (ARMV6_CYCLE_COUNTER == counter)
243		asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
244	else if (ARMV6_COUNTER0 == counter)
245		asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
246	else if (ARMV6_COUNTER1 == counter)
247		asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
248	else
249		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
250
251	return value;
252}
253
254static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
255{
256	struct hw_perf_event *hwc = &event->hw;
257	int counter = hwc->idx;
258
259	if (ARMV6_CYCLE_COUNTER == counter)
260		asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
261	else if (ARMV6_COUNTER0 == counter)
262		asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
263	else if (ARMV6_COUNTER1 == counter)
264		asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
265	else
266		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
267}
268
269static void armv6pmu_enable_event(struct perf_event *event)
270{
271	unsigned long val, mask, evt, flags;
272	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
273	struct hw_perf_event *hwc = &event->hw;
274	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
275	int idx = hwc->idx;
276
277	if (ARMV6_CYCLE_COUNTER == idx) {
278		mask	= 0;
279		evt	= ARMV6_PMCR_CCOUNT_IEN;
280	} else if (ARMV6_COUNTER0 == idx) {
281		mask	= ARMV6_PMCR_EVT_COUNT0_MASK;
282		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
283			  ARMV6_PMCR_COUNT0_IEN;
284	} else if (ARMV6_COUNTER1 == idx) {
285		mask	= ARMV6_PMCR_EVT_COUNT1_MASK;
286		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
287			  ARMV6_PMCR_COUNT1_IEN;
288	} else {
289		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
290		return;
291	}
292
293	/*
294	 * Mask out the current event and set the counter to count the event
295	 * that we're interested in.
296	 */
297	raw_spin_lock_irqsave(&events->pmu_lock, flags);
298	val = armv6_pmcr_read();
299	val &= ~mask;
300	val |= evt;
301	armv6_pmcr_write(val);
302	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
303}
304
305static irqreturn_t
306armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
 
307{
308	unsigned long pmcr = armv6_pmcr_read();
309	struct perf_sample_data data;
 
310	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
311	struct pt_regs *regs;
312	int idx;
313
314	if (!armv6_pmcr_has_overflowed(pmcr))
315		return IRQ_NONE;
316
317	regs = get_irq_regs();
318
319	/*
320	 * The interrupts are cleared by writing the overflow flags back to
321	 * the control register. All of the other bits don't have any effect
322	 * if they are rewritten, so write the whole value back.
323	 */
324	armv6_pmcr_write(pmcr);
325
326	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
327		struct perf_event *event = cpuc->events[idx];
328		struct hw_perf_event *hwc;
329
330		/* Ignore if we don't have an event. */
331		if (!event)
332			continue;
333
334		/*
335		 * We have a single interrupt for all counters. Check that
336		 * each counter has overflowed before we process it.
337		 */
338		if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
339			continue;
340
341		hwc = &event->hw;
342		armpmu_event_update(event);
343		perf_sample_data_init(&data, 0, hwc->last_period);
344		if (!armpmu_event_set_period(event))
345			continue;
346
347		if (perf_event_overflow(event, &data, regs))
348			cpu_pmu->disable(event);
349	}
350
351	/*
352	 * Handle the pending perf events.
353	 *
354	 * Note: this call *must* be run with interrupts disabled. For
355	 * platforms that can have the PMU interrupts raised as an NMI, this
356	 * will not work.
357	 */
358	irq_work_run();
359
360	return IRQ_HANDLED;
361}
362
363static void armv6pmu_start(struct arm_pmu *cpu_pmu)
364{
365	unsigned long flags, val;
366	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
367
368	raw_spin_lock_irqsave(&events->pmu_lock, flags);
369	val = armv6_pmcr_read();
370	val |= ARMV6_PMCR_ENABLE;
371	armv6_pmcr_write(val);
372	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
373}
374
375static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
376{
377	unsigned long flags, val;
378	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
379
380	raw_spin_lock_irqsave(&events->pmu_lock, flags);
381	val = armv6_pmcr_read();
382	val &= ~ARMV6_PMCR_ENABLE;
383	armv6_pmcr_write(val);
384	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
385}
386
387static int
388armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
389				struct perf_event *event)
390{
391	struct hw_perf_event *hwc = &event->hw;
392	/* Always place a cycle counter into the cycle counter. */
393	if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
394		if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
395			return -EAGAIN;
396
397		return ARMV6_CYCLE_COUNTER;
398	} else {
399		/*
400		 * For anything other than a cycle counter, try and use
401		 * counter0 and counter1.
402		 */
403		if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
404			return ARMV6_COUNTER1;
405
406		if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
407			return ARMV6_COUNTER0;
408
409		/* The counters are all in use. */
410		return -EAGAIN;
411	}
412}
413
414static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
415				     struct perf_event *event)
416{
417	clear_bit(event->hw.idx, cpuc->used_mask);
418}
419
420static void armv6pmu_disable_event(struct perf_event *event)
421{
422	unsigned long val, mask, evt, flags;
423	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
424	struct hw_perf_event *hwc = &event->hw;
425	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
426	int idx = hwc->idx;
427
428	if (ARMV6_CYCLE_COUNTER == idx) {
429		mask	= ARMV6_PMCR_CCOUNT_IEN;
430		evt	= 0;
431	} else if (ARMV6_COUNTER0 == idx) {
432		mask	= ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
433		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
434	} else if (ARMV6_COUNTER1 == idx) {
435		mask	= ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
436		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
437	} else {
438		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
439		return;
440	}
441
442	/*
443	 * Mask out the current event and set the counter to count the number
444	 * of ETM bus signal assertion cycles. The external reporting should
445	 * be disabled and so this should never increment.
446	 */
447	raw_spin_lock_irqsave(&events->pmu_lock, flags);
448	val = armv6_pmcr_read();
449	val &= ~mask;
450	val |= evt;
451	armv6_pmcr_write(val);
452	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
453}
454
455static void armv6mpcore_pmu_disable_event(struct perf_event *event)
456{
457	unsigned long val, mask, flags, evt = 0;
458	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
459	struct hw_perf_event *hwc = &event->hw;
460	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
461	int idx = hwc->idx;
462
463	if (ARMV6_CYCLE_COUNTER == idx) {
464		mask	= ARMV6_PMCR_CCOUNT_IEN;
465	} else if (ARMV6_COUNTER0 == idx) {
466		mask	= ARMV6_PMCR_COUNT0_IEN;
467	} else if (ARMV6_COUNTER1 == idx) {
468		mask	= ARMV6_PMCR_COUNT1_IEN;
469	} else {
470		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
471		return;
472	}
473
474	/*
475	 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
476	 * simply disable the interrupt reporting.
477	 */
478	raw_spin_lock_irqsave(&events->pmu_lock, flags);
479	val = armv6_pmcr_read();
480	val &= ~mask;
481	val |= evt;
482	armv6_pmcr_write(val);
483	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
484}
485
486static int armv6_map_event(struct perf_event *event)
487{
488	return armpmu_map_event(event, &armv6_perf_map,
489				&armv6_perf_cache_map, 0xFF);
490}
491
492static void armv6pmu_init(struct arm_pmu *cpu_pmu)
493{
494	cpu_pmu->handle_irq	= armv6pmu_handle_irq;
495	cpu_pmu->enable		= armv6pmu_enable_event;
496	cpu_pmu->disable	= armv6pmu_disable_event;
497	cpu_pmu->read_counter	= armv6pmu_read_counter;
498	cpu_pmu->write_counter	= armv6pmu_write_counter;
499	cpu_pmu->get_event_idx	= armv6pmu_get_event_idx;
500	cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
501	cpu_pmu->start		= armv6pmu_start;
502	cpu_pmu->stop		= armv6pmu_stop;
503	cpu_pmu->map_event	= armv6_map_event;
504	cpu_pmu->num_events	= 3;
 
505}
506
507static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
508{
509	armv6pmu_init(cpu_pmu);
510	cpu_pmu->name		= "armv6_1136";
511	return 0;
512}
513
514static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
515{
516	armv6pmu_init(cpu_pmu);
517	cpu_pmu->name		= "armv6_1156";
518	return 0;
519}
520
521static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
522{
523	armv6pmu_init(cpu_pmu);
524	cpu_pmu->name		= "armv6_1176";
525	return 0;
526}
527
528/*
529 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
530 * that some of the events have different enumerations and that there is no
531 * *hack* to stop the programmable counters. To stop the counters we simply
532 * disable the interrupt reporting and update the event. When unthrottling we
533 * reset the period and enable the interrupt reporting.
534 */
535
536static int armv6mpcore_map_event(struct perf_event *event)
537{
538	return armpmu_map_event(event, &armv6mpcore_perf_map,
539				&armv6mpcore_perf_cache_map, 0xFF);
540}
541
542static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
543{
544	cpu_pmu->name		= "armv6_11mpcore";
545	cpu_pmu->handle_irq	= armv6pmu_handle_irq;
546	cpu_pmu->enable		= armv6pmu_enable_event;
547	cpu_pmu->disable	= armv6mpcore_pmu_disable_event;
548	cpu_pmu->read_counter	= armv6pmu_read_counter;
549	cpu_pmu->write_counter	= armv6pmu_write_counter;
550	cpu_pmu->get_event_idx	= armv6pmu_get_event_idx;
551	cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
552	cpu_pmu->start		= armv6pmu_start;
553	cpu_pmu->stop		= armv6pmu_stop;
554	cpu_pmu->map_event	= armv6mpcore_map_event;
555	cpu_pmu->num_events	= 3;
 
556
557	return 0;
558}
559
560static const struct of_device_id armv6_pmu_of_device_ids[] = {
561	{.compatible = "arm,arm11mpcore-pmu",	.data = armv6mpcore_pmu_init},
562	{.compatible = "arm,arm1176-pmu",	.data = armv6_1176_pmu_init},
563	{.compatible = "arm,arm1136-pmu",	.data = armv6_1136_pmu_init},
564	{ /* sentinel value */ }
565};
566
567static const struct pmu_probe_info armv6_pmu_probe_table[] = {
568	ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
569	ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
570	ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
571	ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
572	{ /* sentinel value */ }
573};
574
575static int armv6_pmu_device_probe(struct platform_device *pdev)
576{
577	return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids,
578				    armv6_pmu_probe_table);
579}
580
581static struct platform_driver armv6_pmu_driver = {
582	.driver		= {
583		.name	= "armv6-pmu",
584		.of_match_table = armv6_pmu_of_device_ids,
585	},
586	.probe		= armv6_pmu_device_probe,
587};
588
589builtin_platform_driver(armv6_pmu_driver);
 
 
 
 
590#endif	/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
v4.6
 
  1/*
  2 * ARMv6 Performance counter handling code.
  3 *
  4 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  5 *
  6 * ARMv6 has 2 configurable performance counters and a single cycle counter.
  7 * They all share a single reset bit but can be written to zero so we can use
  8 * that for a reset.
  9 *
 10 * The counters can't be individually enabled or disabled so when we remove
 11 * one event and replace it with another we could get spurious counts from the
 12 * wrong event. However, we can take advantage of the fact that the
 13 * performance counters can export events to the event bus, and the event bus
 14 * itself can be monitored. This requires that we *don't* export the events to
 15 * the event bus. The procedure for disabling a configurable counter is:
 16 *	- change the counter to count the ETMEXTOUT[0] signal (0x20). This
 17 *	  effectively stops the counter from counting.
 18 *	- disable the counter's interrupt generation (each counter has it's
 19 *	  own interrupt enable bit).
 20 * Once stopped, the counter value can be written as 0 to reset.
 21 *
 22 * To enable a counter:
 23 *	- enable the counter's interrupt generation.
 24 *	- set the new event type.
 25 *
 26 * Note: the dedicated cycle counter only counts cycles and can't be
 27 * enabled/disabled independently of the others. When we want to disable the
 28 * cycle counter, we have to just disable the interrupt reporting and start
 29 * ignoring that counter. When re-enabling, we have to reset the value and
 30 * enable the interrupt.
 31 */
 32
 33#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 34
 35#include <asm/cputype.h>
 36#include <asm/irq_regs.h>
 37
 38#include <linux/of.h>
 39#include <linux/perf/arm_pmu.h>
 40#include <linux/platform_device.h>
 41
 42enum armv6_perf_types {
 43	ARMV6_PERFCTR_ICACHE_MISS	    = 0x0,
 44	ARMV6_PERFCTR_IBUF_STALL	    = 0x1,
 45	ARMV6_PERFCTR_DDEP_STALL	    = 0x2,
 46	ARMV6_PERFCTR_ITLB_MISS		    = 0x3,
 47	ARMV6_PERFCTR_DTLB_MISS		    = 0x4,
 48	ARMV6_PERFCTR_BR_EXEC		    = 0x5,
 49	ARMV6_PERFCTR_BR_MISPREDICT	    = 0x6,
 50	ARMV6_PERFCTR_INSTR_EXEC	    = 0x7,
 51	ARMV6_PERFCTR_DCACHE_HIT	    = 0x9,
 52	ARMV6_PERFCTR_DCACHE_ACCESS	    = 0xA,
 53	ARMV6_PERFCTR_DCACHE_MISS	    = 0xB,
 54	ARMV6_PERFCTR_DCACHE_WBACK	    = 0xC,
 55	ARMV6_PERFCTR_SW_PC_CHANGE	    = 0xD,
 56	ARMV6_PERFCTR_MAIN_TLB_MISS	    = 0xF,
 57	ARMV6_PERFCTR_EXPL_D_ACCESS	    = 0x10,
 58	ARMV6_PERFCTR_LSU_FULL_STALL	    = 0x11,
 59	ARMV6_PERFCTR_WBUF_DRAINED	    = 0x12,
 60	ARMV6_PERFCTR_CPU_CYCLES	    = 0xFF,
 61	ARMV6_PERFCTR_NOP		    = 0x20,
 62};
 63
 64enum armv6_counters {
 65	ARMV6_CYCLE_COUNTER = 0,
 66	ARMV6_COUNTER0,
 67	ARMV6_COUNTER1,
 68};
 69
 70/*
 71 * The hardware events that we support. We do support cache operations but
 72 * we have harvard caches and no way to combine instruction and data
 73 * accesses/misses in hardware.
 74 */
 75static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
 76	PERF_MAP_ALL_UNSUPPORTED,
 77	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6_PERFCTR_CPU_CYCLES,
 78	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6_PERFCTR_INSTR_EXEC,
 79	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6_PERFCTR_BR_EXEC,
 80	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6_PERFCTR_BR_MISPREDICT,
 81	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6_PERFCTR_IBUF_STALL,
 82	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6_PERFCTR_LSU_FULL_STALL,
 83};
 84
 85static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 86					  [PERF_COUNT_HW_CACHE_OP_MAX]
 87					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 88	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 89
 90	/*
 91	 * The performance counters don't differentiate between read and write
 92	 * accesses/misses so this isn't strictly correct, but it's the best we
 93	 * can do. Writes and reads get combined.
 94	 */
 95	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
 96	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
 97	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
 98	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
 99
100	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_ICACHE_MISS,
101
102	/*
103	 * The ARM performance counters can count micro DTLB misses, micro ITLB
104	 * misses and main TLB misses. There isn't an event for TLB misses, so
105	 * use the micro misses here and if users want the main TLB misses they
106	 * can use a raw counter.
107	 */
108	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
109	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
110
111	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
112	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
113};
114
115enum armv6mpcore_perf_types {
116	ARMV6MPCORE_PERFCTR_ICACHE_MISS	    = 0x0,
117	ARMV6MPCORE_PERFCTR_IBUF_STALL	    = 0x1,
118	ARMV6MPCORE_PERFCTR_DDEP_STALL	    = 0x2,
119	ARMV6MPCORE_PERFCTR_ITLB_MISS	    = 0x3,
120	ARMV6MPCORE_PERFCTR_DTLB_MISS	    = 0x4,
121	ARMV6MPCORE_PERFCTR_BR_EXEC	    = 0x5,
122	ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
123	ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
124	ARMV6MPCORE_PERFCTR_INSTR_EXEC	    = 0x8,
125	ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
126	ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
127	ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
128	ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
129	ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
130	ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
131	ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
132	ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
133	ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
134	ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
135	ARMV6MPCORE_PERFCTR_CPU_CYCLES	    = 0xFF,
136};
137
138/*
139 * The hardware events that we support. We do support cache operations but
140 * we have harvard caches and no way to combine instruction and data
141 * accesses/misses in hardware.
142 */
143static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
144	PERF_MAP_ALL_UNSUPPORTED,
145	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV6MPCORE_PERFCTR_CPU_CYCLES,
146	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV6MPCORE_PERFCTR_INSTR_EXEC,
147	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV6MPCORE_PERFCTR_BR_EXEC,
148	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
149	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV6MPCORE_PERFCTR_IBUF_STALL,
150	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
151};
152
153static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
154					[PERF_COUNT_HW_CACHE_OP_MAX]
155					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
156	PERF_CACHE_MAP_ALL_UNSUPPORTED,
157
158	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
159	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
160	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
161	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
162
163	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_ICACHE_MISS,
164
165	/*
166	 * The ARM performance counters can count micro DTLB misses, micro ITLB
167	 * misses and main TLB misses. There isn't an event for TLB misses, so
168	 * use the micro misses here and if users want the main TLB misses they
169	 * can use a raw counter.
170	 */
171	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DTLB_MISS,
172	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_DTLB_MISS,
173
174	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_ITLB_MISS,
175	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV6MPCORE_PERFCTR_ITLB_MISS,
176};
177
178static inline unsigned long
179armv6_pmcr_read(void)
180{
181	u32 val;
182	asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
183	return val;
184}
185
186static inline void
187armv6_pmcr_write(unsigned long val)
188{
189	asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
190}
191
192#define ARMV6_PMCR_ENABLE		(1 << 0)
193#define ARMV6_PMCR_CTR01_RESET		(1 << 1)
194#define ARMV6_PMCR_CCOUNT_RESET		(1 << 2)
195#define ARMV6_PMCR_CCOUNT_DIV		(1 << 3)
196#define ARMV6_PMCR_COUNT0_IEN		(1 << 4)
197#define ARMV6_PMCR_COUNT1_IEN		(1 << 5)
198#define ARMV6_PMCR_CCOUNT_IEN		(1 << 6)
199#define ARMV6_PMCR_COUNT0_OVERFLOW	(1 << 8)
200#define ARMV6_PMCR_COUNT1_OVERFLOW	(1 << 9)
201#define ARMV6_PMCR_CCOUNT_OVERFLOW	(1 << 10)
202#define ARMV6_PMCR_EVT_COUNT0_SHIFT	20
203#define ARMV6_PMCR_EVT_COUNT0_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
204#define ARMV6_PMCR_EVT_COUNT1_SHIFT	12
205#define ARMV6_PMCR_EVT_COUNT1_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
206
207#define ARMV6_PMCR_OVERFLOWED_MASK \
208	(ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
209	 ARMV6_PMCR_CCOUNT_OVERFLOW)
210
211static inline int
212armv6_pmcr_has_overflowed(unsigned long pmcr)
213{
214	return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
215}
216
217static inline int
218armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
219				  enum armv6_counters counter)
220{
221	int ret = 0;
222
223	if (ARMV6_CYCLE_COUNTER == counter)
224		ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
225	else if (ARMV6_COUNTER0 == counter)
226		ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
227	else if (ARMV6_COUNTER1 == counter)
228		ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
229	else
230		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
231
232	return ret;
233}
234
235static inline u32 armv6pmu_read_counter(struct perf_event *event)
236{
237	struct hw_perf_event *hwc = &event->hw;
238	int counter = hwc->idx;
239	unsigned long value = 0;
240
241	if (ARMV6_CYCLE_COUNTER == counter)
242		asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
243	else if (ARMV6_COUNTER0 == counter)
244		asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
245	else if (ARMV6_COUNTER1 == counter)
246		asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
247	else
248		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
249
250	return value;
251}
252
253static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
254{
255	struct hw_perf_event *hwc = &event->hw;
256	int counter = hwc->idx;
257
258	if (ARMV6_CYCLE_COUNTER == counter)
259		asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
260	else if (ARMV6_COUNTER0 == counter)
261		asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
262	else if (ARMV6_COUNTER1 == counter)
263		asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
264	else
265		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
266}
267
268static void armv6pmu_enable_event(struct perf_event *event)
269{
270	unsigned long val, mask, evt, flags;
271	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
272	struct hw_perf_event *hwc = &event->hw;
273	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
274	int idx = hwc->idx;
275
276	if (ARMV6_CYCLE_COUNTER == idx) {
277		mask	= 0;
278		evt	= ARMV6_PMCR_CCOUNT_IEN;
279	} else if (ARMV6_COUNTER0 == idx) {
280		mask	= ARMV6_PMCR_EVT_COUNT0_MASK;
281		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
282			  ARMV6_PMCR_COUNT0_IEN;
283	} else if (ARMV6_COUNTER1 == idx) {
284		mask	= ARMV6_PMCR_EVT_COUNT1_MASK;
285		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
286			  ARMV6_PMCR_COUNT1_IEN;
287	} else {
288		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
289		return;
290	}
291
292	/*
293	 * Mask out the current event and set the counter to count the event
294	 * that we're interested in.
295	 */
296	raw_spin_lock_irqsave(&events->pmu_lock, flags);
297	val = armv6_pmcr_read();
298	val &= ~mask;
299	val |= evt;
300	armv6_pmcr_write(val);
301	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
302}
303
304static irqreturn_t
305armv6pmu_handle_irq(int irq_num,
306		    void *dev)
307{
308	unsigned long pmcr = armv6_pmcr_read();
309	struct perf_sample_data data;
310	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
311	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
312	struct pt_regs *regs;
313	int idx;
314
315	if (!armv6_pmcr_has_overflowed(pmcr))
316		return IRQ_NONE;
317
318	regs = get_irq_regs();
319
320	/*
321	 * The interrupts are cleared by writing the overflow flags back to
322	 * the control register. All of the other bits don't have any effect
323	 * if they are rewritten, so write the whole value back.
324	 */
325	armv6_pmcr_write(pmcr);
326
327	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
328		struct perf_event *event = cpuc->events[idx];
329		struct hw_perf_event *hwc;
330
331		/* Ignore if we don't have an event. */
332		if (!event)
333			continue;
334
335		/*
336		 * We have a single interrupt for all counters. Check that
337		 * each counter has overflowed before we process it.
338		 */
339		if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
340			continue;
341
342		hwc = &event->hw;
343		armpmu_event_update(event);
344		perf_sample_data_init(&data, 0, hwc->last_period);
345		if (!armpmu_event_set_period(event))
346			continue;
347
348		if (perf_event_overflow(event, &data, regs))
349			cpu_pmu->disable(event);
350	}
351
352	/*
353	 * Handle the pending perf events.
354	 *
355	 * Note: this call *must* be run with interrupts disabled. For
356	 * platforms that can have the PMU interrupts raised as an NMI, this
357	 * will not work.
358	 */
359	irq_work_run();
360
361	return IRQ_HANDLED;
362}
363
364static void armv6pmu_start(struct arm_pmu *cpu_pmu)
365{
366	unsigned long flags, val;
367	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
368
369	raw_spin_lock_irqsave(&events->pmu_lock, flags);
370	val = armv6_pmcr_read();
371	val |= ARMV6_PMCR_ENABLE;
372	armv6_pmcr_write(val);
373	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
374}
375
376static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
377{
378	unsigned long flags, val;
379	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
380
381	raw_spin_lock_irqsave(&events->pmu_lock, flags);
382	val = armv6_pmcr_read();
383	val &= ~ARMV6_PMCR_ENABLE;
384	armv6_pmcr_write(val);
385	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
386}
387
388static int
389armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
390				struct perf_event *event)
391{
392	struct hw_perf_event *hwc = &event->hw;
393	/* Always place a cycle counter into the cycle counter. */
394	if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
395		if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
396			return -EAGAIN;
397
398		return ARMV6_CYCLE_COUNTER;
399	} else {
400		/*
401		 * For anything other than a cycle counter, try and use
402		 * counter0 and counter1.
403		 */
404		if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
405			return ARMV6_COUNTER1;
406
407		if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
408			return ARMV6_COUNTER0;
409
410		/* The counters are all in use. */
411		return -EAGAIN;
412	}
413}
414
 
 
 
 
 
 
415static void armv6pmu_disable_event(struct perf_event *event)
416{
417	unsigned long val, mask, evt, flags;
418	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
419	struct hw_perf_event *hwc = &event->hw;
420	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
421	int idx = hwc->idx;
422
423	if (ARMV6_CYCLE_COUNTER == idx) {
424		mask	= ARMV6_PMCR_CCOUNT_IEN;
425		evt	= 0;
426	} else if (ARMV6_COUNTER0 == idx) {
427		mask	= ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
428		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
429	} else if (ARMV6_COUNTER1 == idx) {
430		mask	= ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
431		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
432	} else {
433		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
434		return;
435	}
436
437	/*
438	 * Mask out the current event and set the counter to count the number
439	 * of ETM bus signal assertion cycles. The external reporting should
440	 * be disabled and so this should never increment.
441	 */
442	raw_spin_lock_irqsave(&events->pmu_lock, flags);
443	val = armv6_pmcr_read();
444	val &= ~mask;
445	val |= evt;
446	armv6_pmcr_write(val);
447	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
448}
449
450static void armv6mpcore_pmu_disable_event(struct perf_event *event)
451{
452	unsigned long val, mask, flags, evt = 0;
453	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
454	struct hw_perf_event *hwc = &event->hw;
455	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
456	int idx = hwc->idx;
457
458	if (ARMV6_CYCLE_COUNTER == idx) {
459		mask	= ARMV6_PMCR_CCOUNT_IEN;
460	} else if (ARMV6_COUNTER0 == idx) {
461		mask	= ARMV6_PMCR_COUNT0_IEN;
462	} else if (ARMV6_COUNTER1 == idx) {
463		mask	= ARMV6_PMCR_COUNT1_IEN;
464	} else {
465		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
466		return;
467	}
468
469	/*
470	 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
471	 * simply disable the interrupt reporting.
472	 */
473	raw_spin_lock_irqsave(&events->pmu_lock, flags);
474	val = armv6_pmcr_read();
475	val &= ~mask;
476	val |= evt;
477	armv6_pmcr_write(val);
478	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
479}
480
481static int armv6_map_event(struct perf_event *event)
482{
483	return armpmu_map_event(event, &armv6_perf_map,
484				&armv6_perf_cache_map, 0xFF);
485}
486
487static void armv6pmu_init(struct arm_pmu *cpu_pmu)
488{
489	cpu_pmu->handle_irq	= armv6pmu_handle_irq;
490	cpu_pmu->enable		= armv6pmu_enable_event;
491	cpu_pmu->disable	= armv6pmu_disable_event;
492	cpu_pmu->read_counter	= armv6pmu_read_counter;
493	cpu_pmu->write_counter	= armv6pmu_write_counter;
494	cpu_pmu->get_event_idx	= armv6pmu_get_event_idx;
 
495	cpu_pmu->start		= armv6pmu_start;
496	cpu_pmu->stop		= armv6pmu_stop;
497	cpu_pmu->map_event	= armv6_map_event;
498	cpu_pmu->num_events	= 3;
499	cpu_pmu->max_period	= (1LLU << 32) - 1;
500}
501
502static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
503{
504	armv6pmu_init(cpu_pmu);
505	cpu_pmu->name		= "armv6_1136";
506	return 0;
507}
508
509static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
510{
511	armv6pmu_init(cpu_pmu);
512	cpu_pmu->name		= "armv6_1156";
513	return 0;
514}
515
516static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
517{
518	armv6pmu_init(cpu_pmu);
519	cpu_pmu->name		= "armv6_1176";
520	return 0;
521}
522
523/*
524 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
525 * that some of the events have different enumerations and that there is no
526 * *hack* to stop the programmable counters. To stop the counters we simply
527 * disable the interrupt reporting and update the event. When unthrottling we
528 * reset the period and enable the interrupt reporting.
529 */
530
531static int armv6mpcore_map_event(struct perf_event *event)
532{
533	return armpmu_map_event(event, &armv6mpcore_perf_map,
534				&armv6mpcore_perf_cache_map, 0xFF);
535}
536
537static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
538{
539	cpu_pmu->name		= "armv6_11mpcore";
540	cpu_pmu->handle_irq	= armv6pmu_handle_irq;
541	cpu_pmu->enable		= armv6pmu_enable_event;
542	cpu_pmu->disable	= armv6mpcore_pmu_disable_event;
543	cpu_pmu->read_counter	= armv6pmu_read_counter;
544	cpu_pmu->write_counter	= armv6pmu_write_counter;
545	cpu_pmu->get_event_idx	= armv6pmu_get_event_idx;
 
546	cpu_pmu->start		= armv6pmu_start;
547	cpu_pmu->stop		= armv6pmu_stop;
548	cpu_pmu->map_event	= armv6mpcore_map_event;
549	cpu_pmu->num_events	= 3;
550	cpu_pmu->max_period	= (1LLU << 32) - 1;
551
552	return 0;
553}
554
555static struct of_device_id armv6_pmu_of_device_ids[] = {
556	{.compatible = "arm,arm11mpcore-pmu",	.data = armv6mpcore_pmu_init},
557	{.compatible = "arm,arm1176-pmu",	.data = armv6_1176_pmu_init},
558	{.compatible = "arm,arm1136-pmu",	.data = armv6_1136_pmu_init},
559	{ /* sentinel value */ }
560};
561
562static const struct pmu_probe_info armv6_pmu_probe_table[] = {
563	ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
564	ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
565	ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
566	ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
567	{ /* sentinel value */ }
568};
569
570static int armv6_pmu_device_probe(struct platform_device *pdev)
571{
572	return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids,
573				    armv6_pmu_probe_table);
574}
575
576static struct platform_driver armv6_pmu_driver = {
577	.driver		= {
578		.name	= "armv6-pmu",
579		.of_match_table = armv6_pmu_of_device_ids,
580	},
581	.probe		= armv6_pmu_device_probe,
582};
583
584static int __init register_armv6_pmu_driver(void)
585{
586	return platform_driver_register(&armv6_pmu_driver);
587}
588device_initcall(register_armv6_pmu_driver);
589#endif	/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */