Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ARMv5 [xscale] Performance counter handling code.
  4 *
  5 * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
  6 *
  7 * Based on the previous xscale OProfile code.
  8 *
  9 * There are two variants of the xscale PMU that we support:
 10 * 	- xscale1pmu: 2 event counters and a cycle counter
 11 * 	- xscale2pmu: 4 event counters and a cycle counter
 12 * The two variants share event definitions, but have different
 13 * PMU structures.
 14 */
 15
 16#include <asm/cputype.h>
 17#include <asm/irq_regs.h>
 18
 19#include <linux/of.h>
 20#include <linux/perf/arm_pmu.h>
 21#include <linux/platform_device.h>
 22
 23enum xscale_perf_types {
 24	XSCALE_PERFCTR_ICACHE_MISS		= 0x00,
 25	XSCALE_PERFCTR_ICACHE_NO_DELIVER	= 0x01,
 26	XSCALE_PERFCTR_DATA_STALL		= 0x02,
 27	XSCALE_PERFCTR_ITLB_MISS		= 0x03,
 28	XSCALE_PERFCTR_DTLB_MISS		= 0x04,
 29	XSCALE_PERFCTR_BRANCH			= 0x05,
 30	XSCALE_PERFCTR_BRANCH_MISS		= 0x06,
 31	XSCALE_PERFCTR_INSTRUCTION		= 0x07,
 32	XSCALE_PERFCTR_DCACHE_FULL_STALL	= 0x08,
 33	XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG	= 0x09,
 34	XSCALE_PERFCTR_DCACHE_ACCESS		= 0x0A,
 35	XSCALE_PERFCTR_DCACHE_MISS		= 0x0B,
 36	XSCALE_PERFCTR_DCACHE_WRITE_BACK	= 0x0C,
 37	XSCALE_PERFCTR_PC_CHANGED		= 0x0D,
 38	XSCALE_PERFCTR_BCU_REQUEST		= 0x10,
 39	XSCALE_PERFCTR_BCU_FULL			= 0x11,
 40	XSCALE_PERFCTR_BCU_DRAIN		= 0x12,
 41	XSCALE_PERFCTR_BCU_ECC_NO_ELOG		= 0x14,
 42	XSCALE_PERFCTR_BCU_1_BIT_ERR		= 0x15,
 43	XSCALE_PERFCTR_RMW			= 0x16,
 44	/* XSCALE_PERFCTR_CCNT is not hardware defined */
 45	XSCALE_PERFCTR_CCNT			= 0xFE,
 46	XSCALE_PERFCTR_UNUSED			= 0xFF,
 47};
 48
 49enum xscale_counters {
 50	XSCALE_CYCLE_COUNTER	= 0,
 51	XSCALE_COUNTER0,
 52	XSCALE_COUNTER1,
 53	XSCALE_COUNTER2,
 54	XSCALE_COUNTER3,
 55};
 56#define XSCALE1_NUM_COUNTERS	3
 57#define XSCALE2_NUM_COUNTERS	5
 58
 59static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
 60	PERF_MAP_ALL_UNSUPPORTED,
 61	[PERF_COUNT_HW_CPU_CYCLES]		= XSCALE_PERFCTR_CCNT,
 62	[PERF_COUNT_HW_INSTRUCTIONS]		= XSCALE_PERFCTR_INSTRUCTION,
 63	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= XSCALE_PERFCTR_BRANCH,
 64	[PERF_COUNT_HW_BRANCH_MISSES]		= XSCALE_PERFCTR_BRANCH_MISS,
 65	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= XSCALE_PERFCTR_ICACHE_NO_DELIVER,
 66};
 67
 68static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 69					   [PERF_COUNT_HW_CACHE_OP_MAX]
 70					   [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 71	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 72
 73	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= XSCALE_PERFCTR_DCACHE_ACCESS,
 74	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= XSCALE_PERFCTR_DCACHE_MISS,
 75	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= XSCALE_PERFCTR_DCACHE_ACCESS,
 76	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= XSCALE_PERFCTR_DCACHE_MISS,
 77
 78	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= XSCALE_PERFCTR_ICACHE_MISS,
 79
 80	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= XSCALE_PERFCTR_DTLB_MISS,
 81	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= XSCALE_PERFCTR_DTLB_MISS,
 82
 83	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= XSCALE_PERFCTR_ITLB_MISS,
 84	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= XSCALE_PERFCTR_ITLB_MISS,
 85};
 86
 87#define	XSCALE_PMU_ENABLE	0x001
 88#define XSCALE_PMN_RESET	0x002
 89#define	XSCALE_CCNT_RESET	0x004
 90#define	XSCALE_PMU_RESET	(CCNT_RESET | PMN_RESET)
 91#define XSCALE_PMU_CNT64	0x008
 92
 93#define XSCALE1_OVERFLOWED_MASK	0x700
 94#define XSCALE1_CCOUNT_OVERFLOW	0x400
 95#define XSCALE1_COUNT0_OVERFLOW	0x100
 96#define XSCALE1_COUNT1_OVERFLOW	0x200
 97#define XSCALE1_CCOUNT_INT_EN	0x040
 98#define XSCALE1_COUNT0_INT_EN	0x010
 99#define XSCALE1_COUNT1_INT_EN	0x020
100#define XSCALE1_COUNT0_EVT_SHFT	12
101#define XSCALE1_COUNT0_EVT_MASK	(0xff << XSCALE1_COUNT0_EVT_SHFT)
102#define XSCALE1_COUNT1_EVT_SHFT	20
103#define XSCALE1_COUNT1_EVT_MASK	(0xff << XSCALE1_COUNT1_EVT_SHFT)
104
105static inline u32
106xscale1pmu_read_pmnc(void)
107{
108	u32 val;
109	asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
110	return val;
111}
112
113static inline void
114xscale1pmu_write_pmnc(u32 val)
115{
116	/* upper 4bits and 7, 11 are write-as-0 */
117	val &= 0xffff77f;
118	asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
119}
120
121static inline int
122xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
123					enum xscale_counters counter)
124{
125	int ret = 0;
126
127	switch (counter) {
128	case XSCALE_CYCLE_COUNTER:
129		ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
130		break;
131	case XSCALE_COUNTER0:
132		ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
133		break;
134	case XSCALE_COUNTER1:
135		ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
136		break;
137	default:
138		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
139	}
140
141	return ret;
142}
143
144static irqreturn_t
145xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
146{
147	unsigned long pmnc;
148	struct perf_sample_data data;
149	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
150	struct pt_regs *regs;
151	int idx;
152
153	/*
154	 * NOTE: there's an A stepping erratum that states if an overflow
155	 *       bit already exists and another occurs, the previous
156	 *       Overflow bit gets cleared. There's no workaround.
157	 *	 Fixed in B stepping or later.
158	 */
159	pmnc = xscale1pmu_read_pmnc();
160
161	/*
162	 * Write the value back to clear the overflow flags. Overflow
163	 * flags remain in pmnc for use below. We also disable the PMU
164	 * while we process the interrupt.
165	 */
166	xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
167
168	if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
169		return IRQ_NONE;
170
171	regs = get_irq_regs();
172
173	for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE1_NUM_COUNTERS) {
174		struct perf_event *event = cpuc->events[idx];
175		struct hw_perf_event *hwc;
176
177		if (!event)
178			continue;
179
180		if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
181			continue;
182
183		hwc = &event->hw;
184		armpmu_event_update(event);
185		perf_sample_data_init(&data, 0, hwc->last_period);
186		if (!armpmu_event_set_period(event))
187			continue;
188
189		if (perf_event_overflow(event, &data, regs))
190			cpu_pmu->disable(event);
191	}
192
193	irq_work_run();
194
195	/*
196	 * Re-enable the PMU.
197	 */
198	pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
199	xscale1pmu_write_pmnc(pmnc);
200
201	return IRQ_HANDLED;
202}
203
204static void xscale1pmu_enable_event(struct perf_event *event)
205{
206	unsigned long val, mask, evt;
207	struct hw_perf_event *hwc = &event->hw;
208	int idx = hwc->idx;
209
210	switch (idx) {
211	case XSCALE_CYCLE_COUNTER:
212		mask = 0;
213		evt = XSCALE1_CCOUNT_INT_EN;
214		break;
215	case XSCALE_COUNTER0:
216		mask = XSCALE1_COUNT0_EVT_MASK;
217		evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
218			XSCALE1_COUNT0_INT_EN;
219		break;
220	case XSCALE_COUNTER1:
221		mask = XSCALE1_COUNT1_EVT_MASK;
222		evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
223			XSCALE1_COUNT1_INT_EN;
224		break;
225	default:
226		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
227		return;
228	}
229
230	val = xscale1pmu_read_pmnc();
231	val &= ~mask;
232	val |= evt;
233	xscale1pmu_write_pmnc(val);
234}
235
236static void xscale1pmu_disable_event(struct perf_event *event)
237{
238	unsigned long val, mask, evt;
239	struct hw_perf_event *hwc = &event->hw;
240	int idx = hwc->idx;
241
242	switch (idx) {
243	case XSCALE_CYCLE_COUNTER:
244		mask = XSCALE1_CCOUNT_INT_EN;
245		evt = 0;
246		break;
247	case XSCALE_COUNTER0:
248		mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
249		evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
250		break;
251	case XSCALE_COUNTER1:
252		mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
253		evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
254		break;
255	default:
256		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
257		return;
258	}
259
260	val = xscale1pmu_read_pmnc();
261	val &= ~mask;
262	val |= evt;
263	xscale1pmu_write_pmnc(val);
264}
265
266static int
267xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
268				struct perf_event *event)
269{
270	struct hw_perf_event *hwc = &event->hw;
271	if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
272		if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
273			return -EAGAIN;
274
275		return XSCALE_CYCLE_COUNTER;
276	} else {
277		if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
278			return XSCALE_COUNTER1;
279
280		if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
281			return XSCALE_COUNTER0;
282
283		return -EAGAIN;
284	}
285}
286
287static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
288				     struct perf_event *event)
289{
290	clear_bit(event->hw.idx, cpuc->used_mask);
291}
292
293static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
294{
295	unsigned long val;
296
297	val = xscale1pmu_read_pmnc();
298	val |= XSCALE_PMU_ENABLE;
299	xscale1pmu_write_pmnc(val);
300}
301
302static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
303{
304	unsigned long val;
305
306	val = xscale1pmu_read_pmnc();
307	val &= ~XSCALE_PMU_ENABLE;
308	xscale1pmu_write_pmnc(val);
309}
310
311static inline u64 xscale1pmu_read_counter(struct perf_event *event)
312{
313	struct hw_perf_event *hwc = &event->hw;
314	int counter = hwc->idx;
315	u32 val = 0;
316
317	switch (counter) {
318	case XSCALE_CYCLE_COUNTER:
319		asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
320		break;
321	case XSCALE_COUNTER0:
322		asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
323		break;
324	case XSCALE_COUNTER1:
325		asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
326		break;
327	}
328
329	return val;
330}
331
332static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
333{
334	struct hw_perf_event *hwc = &event->hw;
335	int counter = hwc->idx;
336
337	switch (counter) {
338	case XSCALE_CYCLE_COUNTER:
339		asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
340		break;
341	case XSCALE_COUNTER0:
342		asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
343		break;
344	case XSCALE_COUNTER1:
345		asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
346		break;
347	}
348}
349
350static int xscale_map_event(struct perf_event *event)
351{
352	return armpmu_map_event(event, &xscale_perf_map,
353				&xscale_perf_cache_map, 0xFF);
354}
355
356static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
357{
358	cpu_pmu->name		= "armv5_xscale1";
359	cpu_pmu->handle_irq	= xscale1pmu_handle_irq;
360	cpu_pmu->enable		= xscale1pmu_enable_event;
361	cpu_pmu->disable	= xscale1pmu_disable_event;
362	cpu_pmu->read_counter	= xscale1pmu_read_counter;
363	cpu_pmu->write_counter	= xscale1pmu_write_counter;
364	cpu_pmu->get_event_idx	= xscale1pmu_get_event_idx;
365	cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
366	cpu_pmu->start		= xscale1pmu_start;
367	cpu_pmu->stop		= xscale1pmu_stop;
368	cpu_pmu->map_event	= xscale_map_event;
369
370	bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE1_NUM_COUNTERS);
371
372	return 0;
373}
374
375#define XSCALE2_OVERFLOWED_MASK	0x01f
376#define XSCALE2_CCOUNT_OVERFLOW	0x001
377#define XSCALE2_COUNT0_OVERFLOW	0x002
378#define XSCALE2_COUNT1_OVERFLOW	0x004
379#define XSCALE2_COUNT2_OVERFLOW	0x008
380#define XSCALE2_COUNT3_OVERFLOW	0x010
381#define XSCALE2_CCOUNT_INT_EN	0x001
382#define XSCALE2_COUNT0_INT_EN	0x002
383#define XSCALE2_COUNT1_INT_EN	0x004
384#define XSCALE2_COUNT2_INT_EN	0x008
385#define XSCALE2_COUNT3_INT_EN	0x010
386#define XSCALE2_COUNT0_EVT_SHFT	0
387#define XSCALE2_COUNT0_EVT_MASK	(0xff << XSCALE2_COUNT0_EVT_SHFT)
388#define XSCALE2_COUNT1_EVT_SHFT	8
389#define XSCALE2_COUNT1_EVT_MASK	(0xff << XSCALE2_COUNT1_EVT_SHFT)
390#define XSCALE2_COUNT2_EVT_SHFT	16
391#define XSCALE2_COUNT2_EVT_MASK	(0xff << XSCALE2_COUNT2_EVT_SHFT)
392#define XSCALE2_COUNT3_EVT_SHFT	24
393#define XSCALE2_COUNT3_EVT_MASK	(0xff << XSCALE2_COUNT3_EVT_SHFT)
394
395static inline u32
396xscale2pmu_read_pmnc(void)
397{
398	u32 val;
399	asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
400	/* bits 1-2 and 4-23 are read-unpredictable */
401	return val & 0xff000009;
402}
403
404static inline void
405xscale2pmu_write_pmnc(u32 val)
406{
407	/* bits 4-23 are write-as-0, 24-31 are write ignored */
408	val &= 0xf;
409	asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
410}
411
412static inline u32
413xscale2pmu_read_overflow_flags(void)
414{
415	u32 val;
416	asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
417	return val;
418}
419
420static inline void
421xscale2pmu_write_overflow_flags(u32 val)
422{
423	asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
424}
425
426static inline u32
427xscale2pmu_read_event_select(void)
428{
429	u32 val;
430	asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
431	return val;
432}
433
434static inline void
435xscale2pmu_write_event_select(u32 val)
436{
437	asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
438}
439
440static inline u32
441xscale2pmu_read_int_enable(void)
442{
443	u32 val;
444	asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
445	return val;
446}
447
448static void
449xscale2pmu_write_int_enable(u32 val)
450{
451	asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
452}
453
454static inline int
455xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
456					enum xscale_counters counter)
457{
458	int ret = 0;
459
460	switch (counter) {
461	case XSCALE_CYCLE_COUNTER:
462		ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
463		break;
464	case XSCALE_COUNTER0:
465		ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
466		break;
467	case XSCALE_COUNTER1:
468		ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
469		break;
470	case XSCALE_COUNTER2:
471		ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
472		break;
473	case XSCALE_COUNTER3:
474		ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
475		break;
476	default:
477		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
478	}
479
480	return ret;
481}
482
483static irqreturn_t
484xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
485{
486	unsigned long pmnc, of_flags;
487	struct perf_sample_data data;
488	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
489	struct pt_regs *regs;
490	int idx;
491
492	/* Disable the PMU. */
493	pmnc = xscale2pmu_read_pmnc();
494	xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
495
496	/* Check the overflow flag register. */
497	of_flags = xscale2pmu_read_overflow_flags();
498	if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
499		return IRQ_NONE;
500
501	/* Clear the overflow bits. */
502	xscale2pmu_write_overflow_flags(of_flags);
503
504	regs = get_irq_regs();
505
506	for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE2_NUM_COUNTERS) {
507		struct perf_event *event = cpuc->events[idx];
508		struct hw_perf_event *hwc;
509
510		if (!event)
511			continue;
512
513		if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
514			continue;
515
516		hwc = &event->hw;
517		armpmu_event_update(event);
518		perf_sample_data_init(&data, 0, hwc->last_period);
519		if (!armpmu_event_set_period(event))
520			continue;
521
522		if (perf_event_overflow(event, &data, regs))
523			cpu_pmu->disable(event);
524	}
525
526	irq_work_run();
527
528	/*
529	 * Re-enable the PMU.
530	 */
531	pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
532	xscale2pmu_write_pmnc(pmnc);
533
534	return IRQ_HANDLED;
535}
536
537static void xscale2pmu_enable_event(struct perf_event *event)
538{
539	unsigned long ien, evtsel;
540	struct hw_perf_event *hwc = &event->hw;
541	int idx = hwc->idx;
542
543	ien = xscale2pmu_read_int_enable();
544	evtsel = xscale2pmu_read_event_select();
545
546	switch (idx) {
547	case XSCALE_CYCLE_COUNTER:
548		ien |= XSCALE2_CCOUNT_INT_EN;
549		break;
550	case XSCALE_COUNTER0:
551		ien |= XSCALE2_COUNT0_INT_EN;
552		evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
553		evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
554		break;
555	case XSCALE_COUNTER1:
556		ien |= XSCALE2_COUNT1_INT_EN;
557		evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
558		evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
559		break;
560	case XSCALE_COUNTER2:
561		ien |= XSCALE2_COUNT2_INT_EN;
562		evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
563		evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
564		break;
565	case XSCALE_COUNTER3:
566		ien |= XSCALE2_COUNT3_INT_EN;
567		evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
568		evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
569		break;
570	default:
571		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
572		return;
573	}
574
575	xscale2pmu_write_event_select(evtsel);
576	xscale2pmu_write_int_enable(ien);
577}
578
579static void xscale2pmu_disable_event(struct perf_event *event)
580{
581	unsigned long ien, evtsel, of_flags;
582	struct hw_perf_event *hwc = &event->hw;
583	int idx = hwc->idx;
584
585	ien = xscale2pmu_read_int_enable();
586	evtsel = xscale2pmu_read_event_select();
587
588	switch (idx) {
589	case XSCALE_CYCLE_COUNTER:
590		ien &= ~XSCALE2_CCOUNT_INT_EN;
591		of_flags = XSCALE2_CCOUNT_OVERFLOW;
592		break;
593	case XSCALE_COUNTER0:
594		ien &= ~XSCALE2_COUNT0_INT_EN;
595		evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
596		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
597		of_flags = XSCALE2_COUNT0_OVERFLOW;
598		break;
599	case XSCALE_COUNTER1:
600		ien &= ~XSCALE2_COUNT1_INT_EN;
601		evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
602		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
603		of_flags = XSCALE2_COUNT1_OVERFLOW;
604		break;
605	case XSCALE_COUNTER2:
606		ien &= ~XSCALE2_COUNT2_INT_EN;
607		evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
608		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
609		of_flags = XSCALE2_COUNT2_OVERFLOW;
610		break;
611	case XSCALE_COUNTER3:
612		ien &= ~XSCALE2_COUNT3_INT_EN;
613		evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
614		evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
615		of_flags = XSCALE2_COUNT3_OVERFLOW;
616		break;
617	default:
618		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
619		return;
620	}
621
622	xscale2pmu_write_event_select(evtsel);
623	xscale2pmu_write_int_enable(ien);
624	xscale2pmu_write_overflow_flags(of_flags);
625}
626
627static int
628xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
629				struct perf_event *event)
630{
631	int idx = xscale1pmu_get_event_idx(cpuc, event);
632	if (idx >= 0)
633		goto out;
634
635	if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
636		idx = XSCALE_COUNTER3;
637	else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
638		idx = XSCALE_COUNTER2;
639out:
640	return idx;
641}
642
643static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
644{
645	unsigned long val;
646
647	val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
648	val |= XSCALE_PMU_ENABLE;
649	xscale2pmu_write_pmnc(val);
650}
651
652static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
653{
654	unsigned long val;
655
656	val = xscale2pmu_read_pmnc();
657	val &= ~XSCALE_PMU_ENABLE;
658	xscale2pmu_write_pmnc(val);
659}
660
661static inline u64 xscale2pmu_read_counter(struct perf_event *event)
662{
663	struct hw_perf_event *hwc = &event->hw;
664	int counter = hwc->idx;
665	u32 val = 0;
666
667	switch (counter) {
668	case XSCALE_CYCLE_COUNTER:
669		asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
670		break;
671	case XSCALE_COUNTER0:
672		asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
673		break;
674	case XSCALE_COUNTER1:
675		asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
676		break;
677	case XSCALE_COUNTER2:
678		asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
679		break;
680	case XSCALE_COUNTER3:
681		asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
682		break;
683	}
684
685	return val;
686}
687
688static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
689{
690	struct hw_perf_event *hwc = &event->hw;
691	int counter = hwc->idx;
692
693	switch (counter) {
694	case XSCALE_CYCLE_COUNTER:
695		asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
696		break;
697	case XSCALE_COUNTER0:
698		asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
699		break;
700	case XSCALE_COUNTER1:
701		asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
702		break;
703	case XSCALE_COUNTER2:
704		asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
705		break;
706	case XSCALE_COUNTER3:
707		asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
708		break;
709	}
710}
711
712static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
713{
714	cpu_pmu->name		= "armv5_xscale2";
715	cpu_pmu->handle_irq	= xscale2pmu_handle_irq;
716	cpu_pmu->enable		= xscale2pmu_enable_event;
717	cpu_pmu->disable	= xscale2pmu_disable_event;
718	cpu_pmu->read_counter	= xscale2pmu_read_counter;
719	cpu_pmu->write_counter	= xscale2pmu_write_counter;
720	cpu_pmu->get_event_idx	= xscale2pmu_get_event_idx;
721	cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
722	cpu_pmu->start		= xscale2pmu_start;
723	cpu_pmu->stop		= xscale2pmu_stop;
724	cpu_pmu->map_event	= xscale_map_event;
725
726	bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE2_NUM_COUNTERS);
727
728	return 0;
729}
730
731static const struct pmu_probe_info xscale_pmu_probe_table[] = {
732	XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
733	XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
734	{ /* sentinel value */ }
735};
736
737static int xscale_pmu_device_probe(struct platform_device *pdev)
738{
739	return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table);
740}
741
742static struct platform_driver xscale_pmu_driver = {
743	.driver		= {
744		.name	= "xscale-pmu",
745	},
746	.probe		= xscale_pmu_device_probe,
747};
748
749builtin_platform_driver(xscale_pmu_driver);