Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Performance events support for SH-4A performance counters
  4 *
  5 *  Copyright (C) 2009, 2010  Paul Mundt
  6 */
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/io.h>
 10#include <linux/irq.h>
 11#include <linux/perf_event.h>
 12#include <asm/processor.h>
 13
 14#define PPC_CCBR(idx)	(0xff200800 + (sizeof(u32) * idx))
 15#define PPC_PMCTR(idx)	(0xfc100000 + (sizeof(u32) * idx))
 16
 17#define CCBR_CIT_MASK	(0x7ff << 6)
 18#define CCBR_DUC	(1 << 3)
 19#define CCBR_CMDS	(1 << 1)
 20#define CCBR_PPCE	(1 << 0)
 21
 22#ifdef CONFIG_CPU_SHX3
 23/*
 24 * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
 25 * and PMCTR locations remains tentatively constant. This change remains
 26 * wholly undocumented, and was simply found through trial and error.
 27 *
 28 * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
 29 * it's unclear when this ceased to be the case. For now we always use
 30 * the new location (if future parts keep up with this trend then
 31 * scanning for them at runtime also remains a viable option.)
 32 *
 33 * The gap in the register space also suggests that there are other
 34 * undocumented counters, so this will need to be revisited at a later
 35 * point in time.
 36 */
 37#define PPC_PMCAT	0xfc100240
 38#else
 39#define PPC_PMCAT	0xfc100080
 40#endif
 41
 42#define PMCAT_OVF3	(1 << 27)
 43#define PMCAT_CNN3	(1 << 26)
 44#define PMCAT_CLR3	(1 << 25)
 45#define PMCAT_OVF2	(1 << 19)
 46#define PMCAT_CLR2	(1 << 17)
 47#define PMCAT_OVF1	(1 << 11)
 48#define PMCAT_CNN1	(1 << 10)
 49#define PMCAT_CLR1	(1 << 9)
 50#define PMCAT_OVF0	(1 << 3)
 51#define PMCAT_CLR0	(1 << 1)
 52
 53static struct sh_pmu sh4a_pmu;
 54
 55/*
 56 * Supported raw event codes:
 57 *
 58 *	Event Code	Description
 59 *	----------	-----------
 60 *
 61 *	0x0000		number of elapsed cycles
 62 *	0x0200		number of elapsed cycles in privileged mode
 63 *	0x0280		number of elapsed cycles while SR.BL is asserted
 64 *	0x0202		instruction execution
 65 *	0x0203		instruction execution in parallel
 66 *	0x0204		number of unconditional branches
 67 *	0x0208		number of exceptions
 68 *	0x0209		number of interrupts
 69 *	0x0220		UTLB miss caused by instruction fetch
 70 *	0x0222		UTLB miss caused by operand access
 71 *	0x02a0		number of ITLB misses
 72 *	0x0028		number of accesses to instruction memories
 73 *	0x0029		number of accesses to instruction cache
 74 *	0x002a		instruction cache miss
 75 *	0x022e		number of access to instruction X/Y memory
 76 *	0x0030		number of reads to operand memories
 77 *	0x0038		number of writes to operand memories
 78 *	0x0031		number of operand cache read accesses
 79 *	0x0039		number of operand cache write accesses
 80 *	0x0032		operand cache read miss
 81 *	0x003a		operand cache write miss
 82 *	0x0236		number of reads to operand X/Y memory
 83 *	0x023e		number of writes to operand X/Y memory
 84 *	0x0237		number of reads to operand U memory
 85 *	0x023f		number of writes to operand U memory
 86 *	0x0337		number of U memory read buffer misses
 87 *	0x02b4		number of wait cycles due to operand read access
 88 *	0x02bc		number of wait cycles due to operand write access
 89 *	0x0033		number of wait cycles due to operand cache read miss
 90 *	0x003b		number of wait cycles due to operand cache write miss
 91 */
 92
 93/*
 94 * Special reserved bits used by hardware emulators, read values will
 95 * vary, but writes must always be 0.
 96 */
 97#define PMCAT_EMU_CLR_MASK	((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
 98
 99static const int sh4a_general_events[] = {
100	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0000,
101	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x0202,
102	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0029,	/* I-cache */
103	[PERF_COUNT_HW_CACHE_MISSES]		= 0x002a,	/* I-cache */
104	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x0204,
105	[PERF_COUNT_HW_BRANCH_MISSES]		= -1,
106	[PERF_COUNT_HW_BUS_CYCLES]		= -1,
107};
108
109#define C(x)	PERF_COUNT_HW_CACHE_##x
110
111static const int sh4a_cache_events
112			[PERF_COUNT_HW_CACHE_MAX]
113			[PERF_COUNT_HW_CACHE_OP_MAX]
114			[PERF_COUNT_HW_CACHE_RESULT_MAX] =
115{
116	[ C(L1D) ] = {
117		[ C(OP_READ) ] = {
118			[ C(RESULT_ACCESS) ] = 0x0031,
119			[ C(RESULT_MISS)   ] = 0x0032,
120		},
121		[ C(OP_WRITE) ] = {
122			[ C(RESULT_ACCESS) ] = 0x0039,
123			[ C(RESULT_MISS)   ] = 0x003a,
124		},
125		[ C(OP_PREFETCH) ] = {
126			[ C(RESULT_ACCESS) ] = 0,
127			[ C(RESULT_MISS)   ] = 0,
128		},
129	},
130
131	[ C(L1I) ] = {
132		[ C(OP_READ) ] = {
133			[ C(RESULT_ACCESS) ] = 0x0029,
134			[ C(RESULT_MISS)   ] = 0x002a,
135		},
136		[ C(OP_WRITE) ] = {
137			[ C(RESULT_ACCESS) ] = -1,
138			[ C(RESULT_MISS)   ] = -1,
139		},
140		[ C(OP_PREFETCH) ] = {
141			[ C(RESULT_ACCESS) ] = 0,
142			[ C(RESULT_MISS)   ] = 0,
143		},
144	},
145
146	[ C(LL) ] = {
147		[ C(OP_READ) ] = {
148			[ C(RESULT_ACCESS) ] = 0x0030,
149			[ C(RESULT_MISS)   ] = 0,
150		},
151		[ C(OP_WRITE) ] = {
152			[ C(RESULT_ACCESS) ] = 0x0038,
153			[ C(RESULT_MISS)   ] = 0,
154		},
155		[ C(OP_PREFETCH) ] = {
156			[ C(RESULT_ACCESS) ] = 0,
157			[ C(RESULT_MISS)   ] = 0,
158		},
159	},
160
161	[ C(DTLB) ] = {
162		[ C(OP_READ) ] = {
163			[ C(RESULT_ACCESS) ] = 0x0222,
164			[ C(RESULT_MISS)   ] = 0x0220,
165		},
166		[ C(OP_WRITE) ] = {
167			[ C(RESULT_ACCESS) ] = 0,
168			[ C(RESULT_MISS)   ] = 0,
169		},
170		[ C(OP_PREFETCH) ] = {
171			[ C(RESULT_ACCESS) ] = 0,
172			[ C(RESULT_MISS)   ] = 0,
173		},
174	},
175
176	[ C(ITLB) ] = {
177		[ C(OP_READ) ] = {
178			[ C(RESULT_ACCESS) ] = 0,
179			[ C(RESULT_MISS)   ] = 0x02a0,
180		},
181		[ C(OP_WRITE) ] = {
182			[ C(RESULT_ACCESS) ] = -1,
183			[ C(RESULT_MISS)   ] = -1,
184		},
185		[ C(OP_PREFETCH) ] = {
186			[ C(RESULT_ACCESS) ] = -1,
187			[ C(RESULT_MISS)   ] = -1,
188		},
189	},
190
191	[ C(BPU) ] = {
192		[ C(OP_READ) ] = {
193			[ C(RESULT_ACCESS) ] = -1,
194			[ C(RESULT_MISS)   ] = -1,
195		},
196		[ C(OP_WRITE) ] = {
197			[ C(RESULT_ACCESS) ] = -1,
198			[ C(RESULT_MISS)   ] = -1,
199		},
200		[ C(OP_PREFETCH) ] = {
201			[ C(RESULT_ACCESS) ] = -1,
202			[ C(RESULT_MISS)   ] = -1,
203		},
204	},
205
206	[ C(NODE) ] = {
207		[ C(OP_READ) ] = {
208			[ C(RESULT_ACCESS) ] = -1,
209			[ C(RESULT_MISS)   ] = -1,
210		},
211		[ C(OP_WRITE) ] = {
212			[ C(RESULT_ACCESS) ] = -1,
213			[ C(RESULT_MISS)   ] = -1,
214		},
215		[ C(OP_PREFETCH) ] = {
216			[ C(RESULT_ACCESS) ] = -1,
217			[ C(RESULT_MISS)   ] = -1,
218		},
219	},
220};
221
222static int sh4a_event_map(int event)
223{
224	return sh4a_general_events[event];
225}
226
227static u64 sh4a_pmu_read(int idx)
228{
229	return __raw_readl(PPC_PMCTR(idx));
230}
231
232static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
233{
234	unsigned int tmp;
235
236	tmp = __raw_readl(PPC_CCBR(idx));
237	tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
238	__raw_writel(tmp, PPC_CCBR(idx));
239}
240
241static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
242{
243	unsigned int tmp;
244
245	tmp = __raw_readl(PPC_PMCAT);
246	tmp &= ~PMCAT_EMU_CLR_MASK;
247	tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
248	__raw_writel(tmp, PPC_PMCAT);
249
250	tmp = __raw_readl(PPC_CCBR(idx));
251	tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
252	__raw_writel(tmp, PPC_CCBR(idx));
253
254	__raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
255}
256
257static void sh4a_pmu_disable_all(void)
258{
259	int i;
260
261	for (i = 0; i < sh4a_pmu.num_events; i++)
262		__raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
263}
264
265static void sh4a_pmu_enable_all(void)
266{
267	int i;
268
269	for (i = 0; i < sh4a_pmu.num_events; i++)
270		__raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
271}
272
273static struct sh_pmu sh4a_pmu = {
274	.name		= "sh4a",
275	.num_events	= 2,
276	.event_map	= sh4a_event_map,
277	.max_events	= ARRAY_SIZE(sh4a_general_events),
278	.raw_event_mask	= 0x3ff,
279	.cache_events	= &sh4a_cache_events,
280	.read		= sh4a_pmu_read,
281	.disable	= sh4a_pmu_disable,
282	.enable		= sh4a_pmu_enable,
283	.disable_all	= sh4a_pmu_disable_all,
284	.enable_all	= sh4a_pmu_enable_all,
285};
286
287static int __init sh4a_pmu_init(void)
288{
289	/*
290	 * Make sure this CPU actually has perf counters.
291	 */
292	if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
293		pr_notice("HW perf events unsupported, software events only.\n");
294		return -ENODEV;
295	}
296
297	return register_sh_pmu(&sh4a_pmu);
298}
299early_initcall(sh4a_pmu_init);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Performance events support for SH-4A performance counters
  4 *
  5 *  Copyright (C) 2009, 2010  Paul Mundt
  6 */
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/io.h>
 10#include <linux/irq.h>
 11#include <linux/perf_event.h>
 12#include <asm/processor.h>
 13
 14#define PPC_CCBR(idx)	(0xff200800 + (sizeof(u32) * idx))
 15#define PPC_PMCTR(idx)	(0xfc100000 + (sizeof(u32) * idx))
 16
 17#define CCBR_CIT_MASK	(0x7ff << 6)
 18#define CCBR_DUC	(1 << 3)
 19#define CCBR_CMDS	(1 << 1)
 20#define CCBR_PPCE	(1 << 0)
 21
 22#ifdef CONFIG_CPU_SHX3
 23/*
 24 * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR
 25 * and PMCTR locations remains tentatively constant. This change remains
 26 * wholly undocumented, and was simply found through trial and error.
 27 *
 28 * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and
 29 * it's unclear when this ceased to be the case. For now we always use
 30 * the new location (if future parts keep up with this trend then
 31 * scanning for them at runtime also remains a viable option.)
 32 *
 33 * The gap in the register space also suggests that there are other
 34 * undocumented counters, so this will need to be revisited at a later
 35 * point in time.
 36 */
 37#define PPC_PMCAT	0xfc100240
 38#else
 39#define PPC_PMCAT	0xfc100080
 40#endif
 41
 42#define PMCAT_OVF3	(1 << 27)
 43#define PMCAT_CNN3	(1 << 26)
 44#define PMCAT_CLR3	(1 << 25)
 45#define PMCAT_OVF2	(1 << 19)
 46#define PMCAT_CLR2	(1 << 17)
 47#define PMCAT_OVF1	(1 << 11)
 48#define PMCAT_CNN1	(1 << 10)
 49#define PMCAT_CLR1	(1 << 9)
 50#define PMCAT_OVF0	(1 << 3)
 51#define PMCAT_CLR0	(1 << 1)
 52
 53static struct sh_pmu sh4a_pmu;
 54
 55/*
 56 * Supported raw event codes:
 57 *
 58 *	Event Code	Description
 59 *	----------	-----------
 60 *
 61 *	0x0000		number of elapsed cycles
 62 *	0x0200		number of elapsed cycles in privileged mode
 63 *	0x0280		number of elapsed cycles while SR.BL is asserted
 64 *	0x0202		instruction execution
 65 *	0x0203		instruction execution in parallel
 66 *	0x0204		number of unconditional branches
 67 *	0x0208		number of exceptions
 68 *	0x0209		number of interrupts
 69 *	0x0220		UTLB miss caused by instruction fetch
 70 *	0x0222		UTLB miss caused by operand access
 71 *	0x02a0		number of ITLB misses
 72 *	0x0028		number of accesses to instruction memories
 73 *	0x0029		number of accesses to instruction cache
 74 *	0x002a		instruction cache miss
 75 *	0x022e		number of access to instruction X/Y memory
 76 *	0x0030		number of reads to operand memories
 77 *	0x0038		number of writes to operand memories
 78 *	0x0031		number of operand cache read accesses
 79 *	0x0039		number of operand cache write accesses
 80 *	0x0032		operand cache read miss
 81 *	0x003a		operand cache write miss
 82 *	0x0236		number of reads to operand X/Y memory
 83 *	0x023e		number of writes to operand X/Y memory
 84 *	0x0237		number of reads to operand U memory
 85 *	0x023f		number of writes to operand U memory
 86 *	0x0337		number of U memory read buffer misses
 87 *	0x02b4		number of wait cycles due to operand read access
 88 *	0x02bc		number of wait cycles due to operand write access
 89 *	0x0033		number of wait cycles due to operand cache read miss
 90 *	0x003b		number of wait cycles due to operand cache write miss
 91 */
 92
 93/*
 94 * Special reserved bits used by hardware emulators, read values will
 95 * vary, but writes must always be 0.
 96 */
 97#define PMCAT_EMU_CLR_MASK	((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0))
 98
 99static const int sh4a_general_events[] = {
100	[PERF_COUNT_HW_CPU_CYCLES]		= 0x0000,
101	[PERF_COUNT_HW_INSTRUCTIONS]		= 0x0202,
102	[PERF_COUNT_HW_CACHE_REFERENCES]	= 0x0029,	/* I-cache */
103	[PERF_COUNT_HW_CACHE_MISSES]		= 0x002a,	/* I-cache */
104	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= 0x0204,
105	[PERF_COUNT_HW_BRANCH_MISSES]		= -1,
106	[PERF_COUNT_HW_BUS_CYCLES]		= -1,
107};
108
109#define C(x)	PERF_COUNT_HW_CACHE_##x
110
111static const int sh4a_cache_events
112			[PERF_COUNT_HW_CACHE_MAX]
113			[PERF_COUNT_HW_CACHE_OP_MAX]
114			[PERF_COUNT_HW_CACHE_RESULT_MAX] =
115{
116	[ C(L1D) ] = {
117		[ C(OP_READ) ] = {
118			[ C(RESULT_ACCESS) ] = 0x0031,
119			[ C(RESULT_MISS)   ] = 0x0032,
120		},
121		[ C(OP_WRITE) ] = {
122			[ C(RESULT_ACCESS) ] = 0x0039,
123			[ C(RESULT_MISS)   ] = 0x003a,
124		},
125		[ C(OP_PREFETCH) ] = {
126			[ C(RESULT_ACCESS) ] = 0,
127			[ C(RESULT_MISS)   ] = 0,
128		},
129	},
130
131	[ C(L1I) ] = {
132		[ C(OP_READ) ] = {
133			[ C(RESULT_ACCESS) ] = 0x0029,
134			[ C(RESULT_MISS)   ] = 0x002a,
135		},
136		[ C(OP_WRITE) ] = {
137			[ C(RESULT_ACCESS) ] = -1,
138			[ C(RESULT_MISS)   ] = -1,
139		},
140		[ C(OP_PREFETCH) ] = {
141			[ C(RESULT_ACCESS) ] = 0,
142			[ C(RESULT_MISS)   ] = 0,
143		},
144	},
145
146	[ C(LL) ] = {
147		[ C(OP_READ) ] = {
148			[ C(RESULT_ACCESS) ] = 0x0030,
149			[ C(RESULT_MISS)   ] = 0,
150		},
151		[ C(OP_WRITE) ] = {
152			[ C(RESULT_ACCESS) ] = 0x0038,
153			[ C(RESULT_MISS)   ] = 0,
154		},
155		[ C(OP_PREFETCH) ] = {
156			[ C(RESULT_ACCESS) ] = 0,
157			[ C(RESULT_MISS)   ] = 0,
158		},
159	},
160
161	[ C(DTLB) ] = {
162		[ C(OP_READ) ] = {
163			[ C(RESULT_ACCESS) ] = 0x0222,
164			[ C(RESULT_MISS)   ] = 0x0220,
165		},
166		[ C(OP_WRITE) ] = {
167			[ C(RESULT_ACCESS) ] = 0,
168			[ C(RESULT_MISS)   ] = 0,
169		},
170		[ C(OP_PREFETCH) ] = {
171			[ C(RESULT_ACCESS) ] = 0,
172			[ C(RESULT_MISS)   ] = 0,
173		},
174	},
175
176	[ C(ITLB) ] = {
177		[ C(OP_READ) ] = {
178			[ C(RESULT_ACCESS) ] = 0,
179			[ C(RESULT_MISS)   ] = 0x02a0,
180		},
181		[ C(OP_WRITE) ] = {
182			[ C(RESULT_ACCESS) ] = -1,
183			[ C(RESULT_MISS)   ] = -1,
184		},
185		[ C(OP_PREFETCH) ] = {
186			[ C(RESULT_ACCESS) ] = -1,
187			[ C(RESULT_MISS)   ] = -1,
188		},
189	},
190
191	[ C(BPU) ] = {
192		[ C(OP_READ) ] = {
193			[ C(RESULT_ACCESS) ] = -1,
194			[ C(RESULT_MISS)   ] = -1,
195		},
196		[ C(OP_WRITE) ] = {
197			[ C(RESULT_ACCESS) ] = -1,
198			[ C(RESULT_MISS)   ] = -1,
199		},
200		[ C(OP_PREFETCH) ] = {
201			[ C(RESULT_ACCESS) ] = -1,
202			[ C(RESULT_MISS)   ] = -1,
203		},
204	},
205
206	[ C(NODE) ] = {
207		[ C(OP_READ) ] = {
208			[ C(RESULT_ACCESS) ] = -1,
209			[ C(RESULT_MISS)   ] = -1,
210		},
211		[ C(OP_WRITE) ] = {
212			[ C(RESULT_ACCESS) ] = -1,
213			[ C(RESULT_MISS)   ] = -1,
214		},
215		[ C(OP_PREFETCH) ] = {
216			[ C(RESULT_ACCESS) ] = -1,
217			[ C(RESULT_MISS)   ] = -1,
218		},
219	},
220};
221
222static int sh4a_event_map(int event)
223{
224	return sh4a_general_events[event];
225}
226
227static u64 sh4a_pmu_read(int idx)
228{
229	return __raw_readl(PPC_PMCTR(idx));
230}
231
232static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx)
233{
234	unsigned int tmp;
235
236	tmp = __raw_readl(PPC_CCBR(idx));
237	tmp &= ~(CCBR_CIT_MASK | CCBR_DUC);
238	__raw_writel(tmp, PPC_CCBR(idx));
239}
240
241static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx)
242{
243	unsigned int tmp;
244
245	tmp = __raw_readl(PPC_PMCAT);
246	tmp &= ~PMCAT_EMU_CLR_MASK;
247	tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0;
248	__raw_writel(tmp, PPC_PMCAT);
249
250	tmp = __raw_readl(PPC_CCBR(idx));
251	tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE;
252	__raw_writel(tmp, PPC_CCBR(idx));
253
254	__raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx));
255}
256
257static void sh4a_pmu_disable_all(void)
258{
259	int i;
260
261	for (i = 0; i < sh4a_pmu.num_events; i++)
262		__raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i));
263}
264
265static void sh4a_pmu_enable_all(void)
266{
267	int i;
268
269	for (i = 0; i < sh4a_pmu.num_events; i++)
270		__raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i));
271}
272
273static struct sh_pmu sh4a_pmu = {
274	.name		= "sh4a",
275	.num_events	= 2,
276	.event_map	= sh4a_event_map,
277	.max_events	= ARRAY_SIZE(sh4a_general_events),
278	.raw_event_mask	= 0x3ff,
279	.cache_events	= &sh4a_cache_events,
280	.read		= sh4a_pmu_read,
281	.disable	= sh4a_pmu_disable,
282	.enable		= sh4a_pmu_enable,
283	.disable_all	= sh4a_pmu_disable_all,
284	.enable_all	= sh4a_pmu_enable_all,
285};
286
287static int __init sh4a_pmu_init(void)
288{
289	/*
290	 * Make sure this CPU actually has perf counters.
291	 */
292	if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) {
293		pr_notice("HW perf events unsupported, software events only.\n");
294		return -ENODEV;
295	}
296
297	return register_sh_pmu(&sh4a_pmu);
298}
299early_initcall(sh4a_pmu_init);