Loading...
1/*
2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
3 *
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
6 *
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
9 *
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
14 *
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
17 */
18
19#ifdef CONFIG_CPU_V7
20/*
21 * Common ARMv7 event types
22 *
23 * Note: An implementation may not be able to count all of these events
24 * but the encodings are considered to be `reserved' in the case that
25 * they are not available.
26 */
27enum armv7_perf_types {
28 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
29 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
30 ARMV7_PERFCTR_ITLB_MISS = 0x02,
31 ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */
32 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */
33 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
34 ARMV7_PERFCTR_DREAD = 0x06,
35 ARMV7_PERFCTR_DWRITE = 0x07,
36 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
37 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
38 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
39 ARMV7_PERFCTR_CID_WRITE = 0x0B,
40 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
41 * It counts:
42 * - all branch instructions,
43 * - instructions that explicitly write the PC,
44 * - exception generating instructions.
45 */
46 ARMV7_PERFCTR_PC_WRITE = 0x0C,
47 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
48 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
49 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
50
51 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
52 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
53 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
54 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
55 ARMV7_PERFCTR_MEM_ACCESS = 0x13,
56 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
57 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
58 ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16,
59 ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17,
60 ARMV7_PERFCTR_L2_DCACHE_WB = 0x18,
61 ARMV7_PERFCTR_BUS_ACCESS = 0x19,
62 ARMV7_PERFCTR_MEMORY_ERROR = 0x1A,
63 ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
64 ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
65 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
66
67 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
68};
69
70/* ARMv7 Cortex-A8 specific event types */
71enum armv7_a8_perf_types {
72 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
73 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
74 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
75 ARMV7_PERFCTR_L2_ACCESS = 0x43,
76 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
77 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
78 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
79 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
80 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
81 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
82 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
83 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
84 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
85 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
86 ARMV7_PERFCTR_L2_NEON = 0x4E,
87 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
88 ARMV7_PERFCTR_L1_INST = 0x50,
89 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
90 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
91 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
92 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
93 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
94 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
95 ARMV7_PERFCTR_CYCLES_INST = 0x57,
96 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
97 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
98 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
99
100 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
101 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
102 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
103};
104
105/* ARMv7 Cortex-A9 specific event types */
106enum armv7_a9_perf_types {
107 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
108 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
109 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
110
111 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
112 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
113
114 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
115 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
116 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
117 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
118 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
119 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
120 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
121 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
122 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
123
124 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
125
126 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
127 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
128 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
129 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
130 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
131
132 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
133 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
134 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
135 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
136 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
137 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
138 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
139
140 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
141 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
142
143 ARMV7_PERFCTR_ISB_INST = 0x90,
144 ARMV7_PERFCTR_DSB_INST = 0x91,
145 ARMV7_PERFCTR_DMB_INST = 0x92,
146 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
147
148 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
149 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
150 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
151 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
152 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
153 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
154};
155
156/* ARMv7 Cortex-A5 specific event types */
157enum armv7_a5_perf_types {
158 ARMV7_PERFCTR_IRQ_TAKEN = 0x86,
159 ARMV7_PERFCTR_FIQ_TAKEN = 0x87,
160
161 ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
162 ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
163 ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
164 ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
165 ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
166 ARMV7_PERFCTR_READ_ALLOC = 0xc5,
167
168 ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
169};
170
171/* ARMv7 Cortex-A15 specific event types */
172enum armv7_a15_perf_types {
173 ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40,
174 ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41,
175 ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42,
176 ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43,
177
178 ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C,
179 ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D,
180
181 ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50,
182 ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51,
183 ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52,
184 ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53,
185
186 ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76,
187};
188
189/*
190 * Cortex-A8 HW events mapping
191 *
192 * The hardware events that we support. We do support cache operations but
193 * we have harvard caches and no way to combine instruction and data
194 * accesses/misses in hardware.
195 */
196static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
197 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
198 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
199 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
200 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
201 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
202 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
203 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
204};
205
206static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
207 [PERF_COUNT_HW_CACHE_OP_MAX]
208 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
209 [C(L1D)] = {
210 /*
211 * The performance counters don't differentiate between read
212 * and write accesses/misses so this isn't strictly correct,
213 * but it's the best we can do. Writes and reads get
214 * combined.
215 */
216 [C(OP_READ)] = {
217 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
218 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
219 },
220 [C(OP_WRITE)] = {
221 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
222 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
223 },
224 [C(OP_PREFETCH)] = {
225 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
226 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
227 },
228 },
229 [C(L1I)] = {
230 [C(OP_READ)] = {
231 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
232 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
233 },
234 [C(OP_WRITE)] = {
235 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
236 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
237 },
238 [C(OP_PREFETCH)] = {
239 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
240 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
241 },
242 },
243 [C(LL)] = {
244 [C(OP_READ)] = {
245 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
246 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
247 },
248 [C(OP_WRITE)] = {
249 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
250 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
251 },
252 [C(OP_PREFETCH)] = {
253 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
254 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
255 },
256 },
257 [C(DTLB)] = {
258 [C(OP_READ)] = {
259 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
260 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
261 },
262 [C(OP_WRITE)] = {
263 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
264 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
265 },
266 [C(OP_PREFETCH)] = {
267 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
268 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
269 },
270 },
271 [C(ITLB)] = {
272 [C(OP_READ)] = {
273 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
274 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
275 },
276 [C(OP_WRITE)] = {
277 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
278 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
279 },
280 [C(OP_PREFETCH)] = {
281 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
282 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
283 },
284 },
285 [C(BPU)] = {
286 [C(OP_READ)] = {
287 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
288 [C(RESULT_MISS)]
289 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
290 },
291 [C(OP_WRITE)] = {
292 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
293 [C(RESULT_MISS)]
294 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
295 },
296 [C(OP_PREFETCH)] = {
297 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
298 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
299 },
300 },
301 [C(NODE)] = {
302 [C(OP_READ)] = {
303 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
304 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
305 },
306 [C(OP_WRITE)] = {
307 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
308 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
309 },
310 [C(OP_PREFETCH)] = {
311 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
312 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
313 },
314 },
315};
316
317/*
318 * Cortex-A9 HW events mapping
319 */
320static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
321 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
322 [PERF_COUNT_HW_INSTRUCTIONS] =
323 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
324 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
325 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
326 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
327 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
328 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
329};
330
331static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
332 [PERF_COUNT_HW_CACHE_OP_MAX]
333 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
334 [C(L1D)] = {
335 /*
336 * The performance counters don't differentiate between read
337 * and write accesses/misses so this isn't strictly correct,
338 * but it's the best we can do. Writes and reads get
339 * combined.
340 */
341 [C(OP_READ)] = {
342 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
343 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
344 },
345 [C(OP_WRITE)] = {
346 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
347 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
348 },
349 [C(OP_PREFETCH)] = {
350 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
351 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
352 },
353 },
354 [C(L1I)] = {
355 [C(OP_READ)] = {
356 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
357 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
358 },
359 [C(OP_WRITE)] = {
360 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
361 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
362 },
363 [C(OP_PREFETCH)] = {
364 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
365 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
366 },
367 },
368 [C(LL)] = {
369 [C(OP_READ)] = {
370 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
371 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
372 },
373 [C(OP_WRITE)] = {
374 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
375 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
376 },
377 [C(OP_PREFETCH)] = {
378 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
379 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
380 },
381 },
382 [C(DTLB)] = {
383 [C(OP_READ)] = {
384 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
385 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
386 },
387 [C(OP_WRITE)] = {
388 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
389 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
390 },
391 [C(OP_PREFETCH)] = {
392 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
393 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
394 },
395 },
396 [C(ITLB)] = {
397 [C(OP_READ)] = {
398 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
399 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
400 },
401 [C(OP_WRITE)] = {
402 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
403 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
404 },
405 [C(OP_PREFETCH)] = {
406 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
407 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
408 },
409 },
410 [C(BPU)] = {
411 [C(OP_READ)] = {
412 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
413 [C(RESULT_MISS)]
414 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
415 },
416 [C(OP_WRITE)] = {
417 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
418 [C(RESULT_MISS)]
419 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
420 },
421 [C(OP_PREFETCH)] = {
422 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
423 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
424 },
425 },
426 [C(NODE)] = {
427 [C(OP_READ)] = {
428 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
429 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
430 },
431 [C(OP_WRITE)] = {
432 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
433 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
434 },
435 [C(OP_PREFETCH)] = {
436 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
437 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
438 },
439 },
440};
441
442/*
443 * Cortex-A5 HW events mapping
444 */
445static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
446 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
447 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
448 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
449 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
450 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
451 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
452 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
453};
454
455static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
456 [PERF_COUNT_HW_CACHE_OP_MAX]
457 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
458 [C(L1D)] = {
459 [C(OP_READ)] = {
460 [C(RESULT_ACCESS)]
461 = ARMV7_PERFCTR_DCACHE_ACCESS,
462 [C(RESULT_MISS)]
463 = ARMV7_PERFCTR_DCACHE_REFILL,
464 },
465 [C(OP_WRITE)] = {
466 [C(RESULT_ACCESS)]
467 = ARMV7_PERFCTR_DCACHE_ACCESS,
468 [C(RESULT_MISS)]
469 = ARMV7_PERFCTR_DCACHE_REFILL,
470 },
471 [C(OP_PREFETCH)] = {
472 [C(RESULT_ACCESS)]
473 = ARMV7_PERFCTR_PREFETCH_LINEFILL,
474 [C(RESULT_MISS)]
475 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
476 },
477 },
478 [C(L1I)] = {
479 [C(OP_READ)] = {
480 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
481 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
482 },
483 [C(OP_WRITE)] = {
484 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
485 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
486 },
487 /*
488 * The prefetch counters don't differentiate between the I
489 * side and the D side.
490 */
491 [C(OP_PREFETCH)] = {
492 [C(RESULT_ACCESS)]
493 = ARMV7_PERFCTR_PREFETCH_LINEFILL,
494 [C(RESULT_MISS)]
495 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
496 },
497 },
498 [C(LL)] = {
499 [C(OP_READ)] = {
500 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
501 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
502 },
503 [C(OP_WRITE)] = {
504 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
505 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
506 },
507 [C(OP_PREFETCH)] = {
508 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
509 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
510 },
511 },
512 [C(DTLB)] = {
513 [C(OP_READ)] = {
514 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
515 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
516 },
517 [C(OP_WRITE)] = {
518 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
519 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
520 },
521 [C(OP_PREFETCH)] = {
522 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
523 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
524 },
525 },
526 [C(ITLB)] = {
527 [C(OP_READ)] = {
528 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
529 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
530 },
531 [C(OP_WRITE)] = {
532 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
533 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
534 },
535 [C(OP_PREFETCH)] = {
536 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
537 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
538 },
539 },
540 [C(BPU)] = {
541 [C(OP_READ)] = {
542 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
543 [C(RESULT_MISS)]
544 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
545 },
546 [C(OP_WRITE)] = {
547 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
548 [C(RESULT_MISS)]
549 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
550 },
551 [C(OP_PREFETCH)] = {
552 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
553 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
554 },
555 },
556};
557
558/*
559 * Cortex-A15 HW events mapping
560 */
561static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
562 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
563 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
564 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
565 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
566 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
567 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
568 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
569};
570
571static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
572 [PERF_COUNT_HW_CACHE_OP_MAX]
573 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
574 [C(L1D)] = {
575 [C(OP_READ)] = {
576 [C(RESULT_ACCESS)]
577 = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
578 [C(RESULT_MISS)]
579 = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
580 },
581 [C(OP_WRITE)] = {
582 [C(RESULT_ACCESS)]
583 = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
584 [C(RESULT_MISS)]
585 = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
586 },
587 [C(OP_PREFETCH)] = {
588 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
589 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
590 },
591 },
592 [C(L1I)] = {
593 /*
594 * Not all performance counters differentiate between read
595 * and write accesses/misses so we're not always strictly
596 * correct, but it's the best we can do. Writes and reads get
597 * combined in these cases.
598 */
599 [C(OP_READ)] = {
600 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
601 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
602 },
603 [C(OP_WRITE)] = {
604 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
605 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
606 },
607 [C(OP_PREFETCH)] = {
608 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
609 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
610 },
611 },
612 [C(LL)] = {
613 [C(OP_READ)] = {
614 [C(RESULT_ACCESS)]
615 = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
616 [C(RESULT_MISS)]
617 = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
618 },
619 [C(OP_WRITE)] = {
620 [C(RESULT_ACCESS)]
621 = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
622 [C(RESULT_MISS)]
623 = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
624 },
625 [C(OP_PREFETCH)] = {
626 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
627 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
628 },
629 },
630 [C(DTLB)] = {
631 [C(OP_READ)] = {
632 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
633 [C(RESULT_MISS)]
634 = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
635 },
636 [C(OP_WRITE)] = {
637 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
638 [C(RESULT_MISS)]
639 = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
640 },
641 [C(OP_PREFETCH)] = {
642 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
643 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
644 },
645 },
646 [C(ITLB)] = {
647 [C(OP_READ)] = {
648 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
649 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
650 },
651 [C(OP_WRITE)] = {
652 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
653 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
654 },
655 [C(OP_PREFETCH)] = {
656 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
657 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
658 },
659 },
660 [C(BPU)] = {
661 [C(OP_READ)] = {
662 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
663 [C(RESULT_MISS)]
664 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
665 },
666 [C(OP_WRITE)] = {
667 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
668 [C(RESULT_MISS)]
669 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
670 },
671 [C(OP_PREFETCH)] = {
672 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
673 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
674 },
675 },
676};
677
678/*
679 * Perf Events counters
680 */
681enum armv7_counters {
682 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
683 ARMV7_COUNTER0 = 2, /* First event counter */
684};
685
686/*
687 * The cycle counter is ARMV7_CYCLE_COUNTER.
688 * The first event counter is ARMV7_COUNTER0.
689 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
690 */
691#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
692
693/*
694 * ARMv7 low level PMNC access
695 */
696
697/*
698 * Per-CPU PMNC: config reg
699 */
700#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
701#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
702#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
703#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
704#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
705#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
706#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
707#define ARMV7_PMNC_N_MASK 0x1f
708#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
709
710/*
711 * Available counters
712 */
713#define ARMV7_CNT0 0 /* First event counter */
714#define ARMV7_CCNT 31 /* Cycle counter */
715
716/* Perf Event to low level counters mapping */
717#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
718
719/*
720 * CNTENS: counters enable reg
721 */
722#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
723#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
724
725/*
726 * CNTENC: counters disable reg
727 */
728#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
729#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
730
731/*
732 * INTENS: counters overflow interrupt enable reg
733 */
734#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
735#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
736
737/*
738 * INTENC: counters overflow interrupt disable reg
739 */
740#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
741#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
742
743/*
744 * EVTSEL: Event selection reg
745 */
746#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
747
748/*
749 * SELECT: Counter selection reg
750 */
751#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
752
753/*
754 * FLAG: counters overflow flag status reg
755 */
756#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
757#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
758#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
759#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
760
761static inline unsigned long armv7_pmnc_read(void)
762{
763 u32 val;
764 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
765 return val;
766}
767
768static inline void armv7_pmnc_write(unsigned long val)
769{
770 val &= ARMV7_PMNC_MASK;
771 isb();
772 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
773}
774
775static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
776{
777 return pmnc & ARMV7_OVERFLOWED_MASK;
778}
779
780static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
781 enum armv7_counters counter)
782{
783 int ret = 0;
784
785 if (counter == ARMV7_CYCLE_COUNTER)
786 ret = pmnc & ARMV7_FLAG_C;
787 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
788 ret = pmnc & ARMV7_FLAG_P(counter);
789 else
790 pr_err("CPU%u checking wrong counter %d overflow status\n",
791 smp_processor_id(), counter);
792
793 return ret;
794}
795
796static inline int armv7_pmnc_select_counter(unsigned int idx)
797{
798 u32 val;
799
800 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
801 pr_err("CPU%u selecting wrong PMNC counter"
802 " %d\n", smp_processor_id(), idx);
803 return -1;
804 }
805
806 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
807 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
808 isb();
809
810 return idx;
811}
812
813static inline u32 armv7pmu_read_counter(int idx)
814{
815 unsigned long value = 0;
816
817 if (idx == ARMV7_CYCLE_COUNTER)
818 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
819 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
820 if (armv7_pmnc_select_counter(idx) == idx)
821 asm volatile("mrc p15, 0, %0, c9, c13, 2"
822 : "=r" (value));
823 } else
824 pr_err("CPU%u reading wrong counter %d\n",
825 smp_processor_id(), idx);
826
827 return value;
828}
829
830static inline void armv7pmu_write_counter(int idx, u32 value)
831{
832 if (idx == ARMV7_CYCLE_COUNTER)
833 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
834 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
835 if (armv7_pmnc_select_counter(idx) == idx)
836 asm volatile("mcr p15, 0, %0, c9, c13, 2"
837 : : "r" (value));
838 } else
839 pr_err("CPU%u writing wrong counter %d\n",
840 smp_processor_id(), idx);
841}
842
843static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
844{
845 if (armv7_pmnc_select_counter(idx) == idx) {
846 val &= ARMV7_EVTSEL_MASK;
847 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
848 }
849}
850
851static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
852{
853 u32 val;
854
855 if ((idx != ARMV7_CYCLE_COUNTER) &&
856 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
857 pr_err("CPU%u enabling wrong PMNC counter"
858 " %d\n", smp_processor_id(), idx);
859 return -1;
860 }
861
862 if (idx == ARMV7_CYCLE_COUNTER)
863 val = ARMV7_CNTENS_C;
864 else
865 val = ARMV7_CNTENS_P(idx);
866
867 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
868
869 return idx;
870}
871
872static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
873{
874 u32 val;
875
876
877 if ((idx != ARMV7_CYCLE_COUNTER) &&
878 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
879 pr_err("CPU%u disabling wrong PMNC counter"
880 " %d\n", smp_processor_id(), idx);
881 return -1;
882 }
883
884 if (idx == ARMV7_CYCLE_COUNTER)
885 val = ARMV7_CNTENC_C;
886 else
887 val = ARMV7_CNTENC_P(idx);
888
889 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
890
891 return idx;
892}
893
894static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
895{
896 u32 val;
897
898 if ((idx != ARMV7_CYCLE_COUNTER) &&
899 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
900 pr_err("CPU%u enabling wrong PMNC counter"
901 " interrupt enable %d\n", smp_processor_id(), idx);
902 return -1;
903 }
904
905 if (idx == ARMV7_CYCLE_COUNTER)
906 val = ARMV7_INTENS_C;
907 else
908 val = ARMV7_INTENS_P(idx);
909
910 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
911
912 return idx;
913}
914
915static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
916{
917 u32 val;
918
919 if ((idx != ARMV7_CYCLE_COUNTER) &&
920 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
921 pr_err("CPU%u disabling wrong PMNC counter"
922 " interrupt enable %d\n", smp_processor_id(), idx);
923 return -1;
924 }
925
926 if (idx == ARMV7_CYCLE_COUNTER)
927 val = ARMV7_INTENC_C;
928 else
929 val = ARMV7_INTENC_P(idx);
930
931 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
932
933 return idx;
934}
935
936static inline u32 armv7_pmnc_getreset_flags(void)
937{
938 u32 val;
939
940 /* Read */
941 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
942
943 /* Write to clear flags */
944 val &= ARMV7_FLAG_MASK;
945 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
946
947 return val;
948}
949
950#ifdef DEBUG
951static void armv7_pmnc_dump_regs(void)
952{
953 u32 val;
954 unsigned int cnt;
955
956 printk(KERN_INFO "PMNC registers dump:\n");
957
958 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
959 printk(KERN_INFO "PMNC =0x%08x\n", val);
960
961 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
962 printk(KERN_INFO "CNTENS=0x%08x\n", val);
963
964 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
965 printk(KERN_INFO "INTENS=0x%08x\n", val);
966
967 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
968 printk(KERN_INFO "FLAGS =0x%08x\n", val);
969
970 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
971 printk(KERN_INFO "SELECT=0x%08x\n", val);
972
973 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
974 printk(KERN_INFO "CCNT =0x%08x\n", val);
975
976 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
977 armv7_pmnc_select_counter(cnt);
978 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
979 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
980 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
981 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
982 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
983 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
984 }
985}
986#endif
987
988static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
989{
990 unsigned long flags;
991
992 /*
993 * Enable counter and interrupt, and set the counter to count
994 * the event that we're interested in.
995 */
996 raw_spin_lock_irqsave(&pmu_lock, flags);
997
998 /*
999 * Disable counter
1000 */
1001 armv7_pmnc_disable_counter(idx);
1002
1003 /*
1004 * Set event (if destined for PMNx counters)
1005 * We don't need to set the event if it's a cycle count
1006 */
1007 if (idx != ARMV7_CYCLE_COUNTER)
1008 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1009
1010 /*
1011 * Enable interrupt for this counter
1012 */
1013 armv7_pmnc_enable_intens(idx);
1014
1015 /*
1016 * Enable counter
1017 */
1018 armv7_pmnc_enable_counter(idx);
1019
1020 raw_spin_unlock_irqrestore(&pmu_lock, flags);
1021}
1022
1023static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1024{
1025 unsigned long flags;
1026
1027 /*
1028 * Disable counter and interrupt
1029 */
1030 raw_spin_lock_irqsave(&pmu_lock, flags);
1031
1032 /*
1033 * Disable counter
1034 */
1035 armv7_pmnc_disable_counter(idx);
1036
1037 /*
1038 * Disable interrupt for this counter
1039 */
1040 armv7_pmnc_disable_intens(idx);
1041
1042 raw_spin_unlock_irqrestore(&pmu_lock, flags);
1043}
1044
1045static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1046{
1047 unsigned long pmnc;
1048 struct perf_sample_data data;
1049 struct cpu_hw_events *cpuc;
1050 struct pt_regs *regs;
1051 int idx;
1052
1053 /*
1054 * Get and reset the IRQ flags
1055 */
1056 pmnc = armv7_pmnc_getreset_flags();
1057
1058 /*
1059 * Did an overflow occur?
1060 */
1061 if (!armv7_pmnc_has_overflowed(pmnc))
1062 return IRQ_NONE;
1063
1064 /*
1065 * Handle the counter(s) overflow(s)
1066 */
1067 regs = get_irq_regs();
1068
1069 perf_sample_data_init(&data, 0);
1070
1071 cpuc = &__get_cpu_var(cpu_hw_events);
1072 for (idx = 0; idx <= armpmu->num_events; ++idx) {
1073 struct perf_event *event = cpuc->events[idx];
1074 struct hw_perf_event *hwc;
1075
1076 if (!test_bit(idx, cpuc->active_mask))
1077 continue;
1078
1079 /*
1080 * We have a single interrupt for all counters. Check that
1081 * each counter has overflowed before we process it.
1082 */
1083 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1084 continue;
1085
1086 hwc = &event->hw;
1087 armpmu_event_update(event, hwc, idx, 1);
1088 data.period = event->hw.last_period;
1089 if (!armpmu_event_set_period(event, hwc, idx))
1090 continue;
1091
1092 if (perf_event_overflow(event, &data, regs))
1093 armpmu->disable(hwc, idx);
1094 }
1095
1096 /*
1097 * Handle the pending perf events.
1098 *
1099 * Note: this call *must* be run with interrupts disabled. For
1100 * platforms that can have the PMU interrupts raised as an NMI, this
1101 * will not work.
1102 */
1103 irq_work_run();
1104
1105 return IRQ_HANDLED;
1106}
1107
1108static void armv7pmu_start(void)
1109{
1110 unsigned long flags;
1111
1112 raw_spin_lock_irqsave(&pmu_lock, flags);
1113 /* Enable all counters */
1114 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1115 raw_spin_unlock_irqrestore(&pmu_lock, flags);
1116}
1117
1118static void armv7pmu_stop(void)
1119{
1120 unsigned long flags;
1121
1122 raw_spin_lock_irqsave(&pmu_lock, flags);
1123 /* Disable all counters */
1124 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1125 raw_spin_unlock_irqrestore(&pmu_lock, flags);
1126}
1127
1128static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
1129 struct hw_perf_event *event)
1130{
1131 int idx;
1132
1133 /* Always place a cycle counter into the cycle counter. */
1134 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
1135 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
1136 return -EAGAIN;
1137
1138 return ARMV7_CYCLE_COUNTER;
1139 } else {
1140 /*
1141 * For anything other than a cycle counter, try and use
1142 * the events counters
1143 */
1144 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
1145 if (!test_and_set_bit(idx, cpuc->used_mask))
1146 return idx;
1147 }
1148
1149 /* The counters are all in use. */
1150 return -EAGAIN;
1151 }
1152}
1153
1154static void armv7pmu_reset(void *info)
1155{
1156 u32 idx, nb_cnt = armpmu->num_events;
1157
1158 /* The counter and interrupt enable registers are unknown at reset. */
1159 for (idx = 1; idx < nb_cnt; ++idx)
1160 armv7pmu_disable_event(NULL, idx);
1161
1162 /* Initialize & Reset PMNC: C and P bits */
1163 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1164}
1165
1166static struct arm_pmu armv7pmu = {
1167 .handle_irq = armv7pmu_handle_irq,
1168 .enable = armv7pmu_enable_event,
1169 .disable = armv7pmu_disable_event,
1170 .read_counter = armv7pmu_read_counter,
1171 .write_counter = armv7pmu_write_counter,
1172 .get_event_idx = armv7pmu_get_event_idx,
1173 .start = armv7pmu_start,
1174 .stop = armv7pmu_stop,
1175 .reset = armv7pmu_reset,
1176 .raw_event_mask = 0xFF,
1177 .max_period = (1LLU << 32) - 1,
1178};
1179
1180static u32 __init armv7_read_num_pmnc_events(void)
1181{
1182 u32 nb_cnt;
1183
1184 /* Read the nb of CNTx counters supported from PMNC */
1185 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1186
1187 /* Add the CPU cycles counter and return */
1188 return nb_cnt + 1;
1189}
1190
1191static const struct arm_pmu *__init armv7_a8_pmu_init(void)
1192{
1193 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
1194 armv7pmu.name = "ARMv7 Cortex-A8";
1195 armv7pmu.cache_map = &armv7_a8_perf_cache_map;
1196 armv7pmu.event_map = &armv7_a8_perf_map;
1197 armv7pmu.num_events = armv7_read_num_pmnc_events();
1198 return &armv7pmu;
1199}
1200
1201static const struct arm_pmu *__init armv7_a9_pmu_init(void)
1202{
1203 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
1204 armv7pmu.name = "ARMv7 Cortex-A9";
1205 armv7pmu.cache_map = &armv7_a9_perf_cache_map;
1206 armv7pmu.event_map = &armv7_a9_perf_map;
1207 armv7pmu.num_events = armv7_read_num_pmnc_events();
1208 return &armv7pmu;
1209}
1210
1211static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1212{
1213 armv7pmu.id = ARM_PERF_PMU_ID_CA5;
1214 armv7pmu.name = "ARMv7 Cortex-A5";
1215 armv7pmu.cache_map = &armv7_a5_perf_cache_map;
1216 armv7pmu.event_map = &armv7_a5_perf_map;
1217 armv7pmu.num_events = armv7_read_num_pmnc_events();
1218 return &armv7pmu;
1219}
1220
1221static const struct arm_pmu *__init armv7_a15_pmu_init(void)
1222{
1223 armv7pmu.id = ARM_PERF_PMU_ID_CA15;
1224 armv7pmu.name = "ARMv7 Cortex-A15";
1225 armv7pmu.cache_map = &armv7_a15_perf_cache_map;
1226 armv7pmu.event_map = &armv7_a15_perf_map;
1227 armv7pmu.num_events = armv7_read_num_pmnc_events();
1228 return &armv7pmu;
1229}
1230#else
1231static const struct arm_pmu *__init armv7_a8_pmu_init(void)
1232{
1233 return NULL;
1234}
1235
1236static const struct arm_pmu *__init armv7_a9_pmu_init(void)
1237{
1238 return NULL;
1239}
1240
1241static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1242{
1243 return NULL;
1244}
1245
1246static const struct arm_pmu *__init armv7_a15_pmu_init(void)
1247{
1248 return NULL;
1249}
1250#endif /* CONFIG_CPU_V7 */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
4 *
5 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
6 * 2010 (c) MontaVista Software, LLC.
7 *
8 * Copied from ARMv6 code, with the low level code inspired
9 * by the ARMv7 Oprofile code.
10 *
11 * Cortex-A8 has up to 4 configurable performance counters and
12 * a single cycle counter.
13 * Cortex-A9 has up to 31 configurable performance counters and
14 * a single cycle counter.
15 *
16 * All counters can be enabled/disabled and IRQ masked separately. The cycle
17 * counter and all 4 performance counters together can be reset separately.
18 */
19
20#ifdef CONFIG_CPU_V7
21
22#include <asm/cp15.h>
23#include <asm/cputype.h>
24#include <asm/irq_regs.h>
25#include <asm/vfp.h>
26#include "../vfp/vfpinstr.h"
27
28#include <linux/of.h>
29#include <linux/perf/arm_pmu.h>
30#include <linux/platform_device.h>
31
32/*
33 * Common ARMv7 event types
34 *
35 * Note: An implementation may not be able to count all of these events
36 * but the encodings are considered to be `reserved' in the case that
37 * they are not available.
38 */
39#define ARMV7_PERFCTR_PMNC_SW_INCR 0x00
40#define ARMV7_PERFCTR_L1_ICACHE_REFILL 0x01
41#define ARMV7_PERFCTR_ITLB_REFILL 0x02
42#define ARMV7_PERFCTR_L1_DCACHE_REFILL 0x03
43#define ARMV7_PERFCTR_L1_DCACHE_ACCESS 0x04
44#define ARMV7_PERFCTR_DTLB_REFILL 0x05
45#define ARMV7_PERFCTR_MEM_READ 0x06
46#define ARMV7_PERFCTR_MEM_WRITE 0x07
47#define ARMV7_PERFCTR_INSTR_EXECUTED 0x08
48#define ARMV7_PERFCTR_EXC_TAKEN 0x09
49#define ARMV7_PERFCTR_EXC_EXECUTED 0x0A
50#define ARMV7_PERFCTR_CID_WRITE 0x0B
51
52/*
53 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
54 * It counts:
55 * - all (taken) branch instructions,
56 * - instructions that explicitly write the PC,
57 * - exception generating instructions.
58 */
59#define ARMV7_PERFCTR_PC_WRITE 0x0C
60#define ARMV7_PERFCTR_PC_IMM_BRANCH 0x0D
61#define ARMV7_PERFCTR_PC_PROC_RETURN 0x0E
62#define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F
63#define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED 0x10
64#define ARMV7_PERFCTR_CLOCK_CYCLES 0x11
65#define ARMV7_PERFCTR_PC_BRANCH_PRED 0x12
66
67/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
68#define ARMV7_PERFCTR_MEM_ACCESS 0x13
69#define ARMV7_PERFCTR_L1_ICACHE_ACCESS 0x14
70#define ARMV7_PERFCTR_L1_DCACHE_WB 0x15
71#define ARMV7_PERFCTR_L2_CACHE_ACCESS 0x16
72#define ARMV7_PERFCTR_L2_CACHE_REFILL 0x17
73#define ARMV7_PERFCTR_L2_CACHE_WB 0x18
74#define ARMV7_PERFCTR_BUS_ACCESS 0x19
75#define ARMV7_PERFCTR_MEM_ERROR 0x1A
76#define ARMV7_PERFCTR_INSTR_SPEC 0x1B
77#define ARMV7_PERFCTR_TTBR_WRITE 0x1C
78#define ARMV7_PERFCTR_BUS_CYCLES 0x1D
79
80#define ARMV7_PERFCTR_CPU_CYCLES 0xFF
81
82/* ARMv7 Cortex-A8 specific event types */
83#define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS 0x43
84#define ARMV7_A8_PERFCTR_L2_CACHE_REFILL 0x44
85#define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS 0x50
86#define ARMV7_A8_PERFCTR_STALL_ISIDE 0x56
87
88/* ARMv7 Cortex-A9 specific event types */
89#define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME 0x68
90#define ARMV7_A9_PERFCTR_STALL_ICACHE 0x60
91#define ARMV7_A9_PERFCTR_STALL_DISPATCH 0x66
92
93/* ARMv7 Cortex-A5 specific event types */
94#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL 0xc2
95#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP 0xc3
96
97/* ARMv7 Cortex-A15 specific event types */
98#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
99#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
100#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ 0x42
101#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE 0x43
102
103#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ 0x4C
104#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE 0x4D
105
106#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ 0x50
107#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
108#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ 0x52
109#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE 0x53
110
111#define ARMV7_A15_PERFCTR_PC_WRITE_SPEC 0x76
112
113/* ARMv7 Cortex-A12 specific event types */
114#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
115#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
116
117#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ 0x50
118#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
119
120#define ARMV7_A12_PERFCTR_PC_WRITE_SPEC 0x76
121
122#define ARMV7_A12_PERFCTR_PF_TLB_REFILL 0xe7
123
124/* ARMv7 Krait specific event types */
125#define KRAIT_PMRESR0_GROUP0 0xcc
126#define KRAIT_PMRESR1_GROUP0 0xd0
127#define KRAIT_PMRESR2_GROUP0 0xd4
128#define KRAIT_VPMRESR0_GROUP0 0xd8
129
130#define KRAIT_PERFCTR_L1_ICACHE_ACCESS 0x10011
131#define KRAIT_PERFCTR_L1_ICACHE_MISS 0x10010
132
133#define KRAIT_PERFCTR_L1_ITLB_ACCESS 0x12222
134#define KRAIT_PERFCTR_L1_DTLB_ACCESS 0x12210
135
136/* ARMv7 Scorpion specific event types */
137#define SCORPION_LPM0_GROUP0 0x4c
138#define SCORPION_LPM1_GROUP0 0x50
139#define SCORPION_LPM2_GROUP0 0x54
140#define SCORPION_L2LPM_GROUP0 0x58
141#define SCORPION_VLPM_GROUP0 0x5c
142
143#define SCORPION_ICACHE_ACCESS 0x10053
144#define SCORPION_ICACHE_MISS 0x10052
145
146#define SCORPION_DTLB_ACCESS 0x12013
147#define SCORPION_DTLB_MISS 0x12012
148
149#define SCORPION_ITLB_MISS 0x12021
150
151/*
152 * Cortex-A8 HW events mapping
153 *
154 * The hardware events that we support. We do support cache operations but
155 * we have harvard caches and no way to combine instruction and data
156 * accesses/misses in hardware.
157 */
158static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
159 PERF_MAP_ALL_UNSUPPORTED,
160 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
161 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
162 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
163 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
164 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
165 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
166 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
167};
168
169static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
170 [PERF_COUNT_HW_CACHE_OP_MAX]
171 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
172 PERF_CACHE_MAP_ALL_UNSUPPORTED,
173
174 /*
175 * The performance counters don't differentiate between read and write
176 * accesses/misses so this isn't strictly correct, but it's the best we
177 * can do. Writes and reads get combined.
178 */
179 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
180 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
181 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
182 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
183
184 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
185 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
186
187 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
188 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
189 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
190 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
191
192 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
193 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
194
195 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
196 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
197
198 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
199 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
200 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
201 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
202};
203
204/*
205 * Cortex-A9 HW events mapping
206 */
207static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
208 PERF_MAP_ALL_UNSUPPORTED,
209 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
210 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
211 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
212 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
213 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
214 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
215 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
216 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
217};
218
219static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
220 [PERF_COUNT_HW_CACHE_OP_MAX]
221 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
222 PERF_CACHE_MAP_ALL_UNSUPPORTED,
223
224 /*
225 * The performance counters don't differentiate between read and write
226 * accesses/misses so this isn't strictly correct, but it's the best we
227 * can do. Writes and reads get combined.
228 */
229 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
230 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
231 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
232 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
233
234 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
235
236 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
237 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
238
239 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
240 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
241
242 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
243 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
244 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
245 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
246};
247
248/*
249 * Cortex-A5 HW events mapping
250 */
251static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
252 PERF_MAP_ALL_UNSUPPORTED,
253 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
254 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
255 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
256 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
257 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
258 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
259};
260
261static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
262 [PERF_COUNT_HW_CACHE_OP_MAX]
263 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
264 PERF_CACHE_MAP_ALL_UNSUPPORTED,
265
266 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
267 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
268 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
269 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
270 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
271 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
272
273 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
274 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
275 /*
276 * The prefetch counters don't differentiate between the I side and the
277 * D side.
278 */
279 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
280 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
281
282 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
283 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
284
285 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
286 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
287
288 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
289 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
290 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
291 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
292};
293
294/*
295 * Cortex-A15 HW events mapping
296 */
297static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
298 PERF_MAP_ALL_UNSUPPORTED,
299 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
300 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
301 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
302 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
303 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
304 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
305 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
306};
307
308static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
309 [PERF_COUNT_HW_CACHE_OP_MAX]
310 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
311 PERF_CACHE_MAP_ALL_UNSUPPORTED,
312
313 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
314 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
315 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
316 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
317
318 /*
319 * Not all performance counters differentiate between read and write
320 * accesses/misses so we're not always strictly correct, but it's the
321 * best we can do. Writes and reads get combined in these cases.
322 */
323 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
324 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
325
326 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
327 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
328 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
329 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
330
331 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
332 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
333
334 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
335 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
336
337 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
338 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
339 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
340 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
341};
342
343/*
344 * Cortex-A7 HW events mapping
345 */
346static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
347 PERF_MAP_ALL_UNSUPPORTED,
348 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
349 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
350 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
351 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
352 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
353 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
354 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
355};
356
357static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
358 [PERF_COUNT_HW_CACHE_OP_MAX]
359 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
360 PERF_CACHE_MAP_ALL_UNSUPPORTED,
361
362 /*
363 * The performance counters don't differentiate between read and write
364 * accesses/misses so this isn't strictly correct, but it's the best we
365 * can do. Writes and reads get combined.
366 */
367 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
368 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
369 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
370 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
371
372 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
373 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
374
375 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
376 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
377 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
378 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
379
380 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
381 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
382
383 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
384 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
385
386 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
387 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
388 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
389 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
390};
391
392/*
393 * Cortex-A12 HW events mapping
394 */
395static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
396 PERF_MAP_ALL_UNSUPPORTED,
397 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
398 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
399 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
400 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
401 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
402 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
403 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
404};
405
406static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
407 [PERF_COUNT_HW_CACHE_OP_MAX]
408 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
409 PERF_CACHE_MAP_ALL_UNSUPPORTED,
410
411 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
412 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
413 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
414 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
415
416 /*
417 * Not all performance counters differentiate between read and write
418 * accesses/misses so we're not always strictly correct, but it's the
419 * best we can do. Writes and reads get combined in these cases.
420 */
421 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
422 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
423
424 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
425 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
426 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
427 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
428
429 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
430 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
431 [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
432
433 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
434 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
435
436 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
437 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
438 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
439 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
440};
441
442/*
443 * Krait HW events mapping
444 */
445static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
446 PERF_MAP_ALL_UNSUPPORTED,
447 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
448 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
449 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
450 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
451 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
452};
453
454static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
455 PERF_MAP_ALL_UNSUPPORTED,
456 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
457 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
458 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
459 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
460};
461
462static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
463 [PERF_COUNT_HW_CACHE_OP_MAX]
464 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
465 PERF_CACHE_MAP_ALL_UNSUPPORTED,
466
467 /*
468 * The performance counters don't differentiate between read and write
469 * accesses/misses so this isn't strictly correct, but it's the best we
470 * can do. Writes and reads get combined.
471 */
472 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
473 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
474 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
475 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
476
477 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
478 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
479
480 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
481 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
482
483 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
484 [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
485
486 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
487 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
488 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
489 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
490};
491
492/*
493 * Scorpion HW events mapping
494 */
495static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
496 PERF_MAP_ALL_UNSUPPORTED,
497 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
498 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
499 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
500 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
501 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
502};
503
504static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
505 [PERF_COUNT_HW_CACHE_OP_MAX]
506 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
507 PERF_CACHE_MAP_ALL_UNSUPPORTED,
508 /*
509 * The performance counters don't differentiate between read and write
510 * accesses/misses so this isn't strictly correct, but it's the best we
511 * can do. Writes and reads get combined.
512 */
513 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
514 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
515 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
516 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
517 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
518 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
519 /*
520 * Only ITLB misses and DTLB refills are supported. If users want the
521 * DTLB refills misses a raw counter must be used.
522 */
523 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
524 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
525 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
526 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
527 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
528 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
529 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
530 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
531 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
532 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
533};
534
535PMU_FORMAT_ATTR(event, "config:0-7");
536
537static struct attribute *armv7_pmu_format_attrs[] = {
538 &format_attr_event.attr,
539 NULL,
540};
541
542static struct attribute_group armv7_pmu_format_attr_group = {
543 .name = "format",
544 .attrs = armv7_pmu_format_attrs,
545};
546
547#define ARMV7_EVENT_ATTR_RESOLVE(m) #m
548#define ARMV7_EVENT_ATTR(name, config) \
549 PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
550 "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
551
552ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
553ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
554ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
555ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
556ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
557ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
558ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
559ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
560ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
561ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
562ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
563ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
564ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
565ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
566ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
567ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
568ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
569ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
570ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
571
572static struct attribute *armv7_pmuv1_event_attrs[] = {
573 &armv7_event_attr_sw_incr.attr.attr,
574 &armv7_event_attr_l1i_cache_refill.attr.attr,
575 &armv7_event_attr_l1i_tlb_refill.attr.attr,
576 &armv7_event_attr_l1d_cache_refill.attr.attr,
577 &armv7_event_attr_l1d_cache.attr.attr,
578 &armv7_event_attr_l1d_tlb_refill.attr.attr,
579 &armv7_event_attr_ld_retired.attr.attr,
580 &armv7_event_attr_st_retired.attr.attr,
581 &armv7_event_attr_inst_retired.attr.attr,
582 &armv7_event_attr_exc_taken.attr.attr,
583 &armv7_event_attr_exc_return.attr.attr,
584 &armv7_event_attr_cid_write_retired.attr.attr,
585 &armv7_event_attr_pc_write_retired.attr.attr,
586 &armv7_event_attr_br_immed_retired.attr.attr,
587 &armv7_event_attr_br_return_retired.attr.attr,
588 &armv7_event_attr_unaligned_ldst_retired.attr.attr,
589 &armv7_event_attr_br_mis_pred.attr.attr,
590 &armv7_event_attr_cpu_cycles.attr.attr,
591 &armv7_event_attr_br_pred.attr.attr,
592 NULL,
593};
594
595static struct attribute_group armv7_pmuv1_events_attr_group = {
596 .name = "events",
597 .attrs = armv7_pmuv1_event_attrs,
598};
599
600ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
601ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
602ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
603ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
604ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
605ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
606ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
607ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
608ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
609ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
610ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
611
612static struct attribute *armv7_pmuv2_event_attrs[] = {
613 &armv7_event_attr_sw_incr.attr.attr,
614 &armv7_event_attr_l1i_cache_refill.attr.attr,
615 &armv7_event_attr_l1i_tlb_refill.attr.attr,
616 &armv7_event_attr_l1d_cache_refill.attr.attr,
617 &armv7_event_attr_l1d_cache.attr.attr,
618 &armv7_event_attr_l1d_tlb_refill.attr.attr,
619 &armv7_event_attr_ld_retired.attr.attr,
620 &armv7_event_attr_st_retired.attr.attr,
621 &armv7_event_attr_inst_retired.attr.attr,
622 &armv7_event_attr_exc_taken.attr.attr,
623 &armv7_event_attr_exc_return.attr.attr,
624 &armv7_event_attr_cid_write_retired.attr.attr,
625 &armv7_event_attr_pc_write_retired.attr.attr,
626 &armv7_event_attr_br_immed_retired.attr.attr,
627 &armv7_event_attr_br_return_retired.attr.attr,
628 &armv7_event_attr_unaligned_ldst_retired.attr.attr,
629 &armv7_event_attr_br_mis_pred.attr.attr,
630 &armv7_event_attr_cpu_cycles.attr.attr,
631 &armv7_event_attr_br_pred.attr.attr,
632 &armv7_event_attr_mem_access.attr.attr,
633 &armv7_event_attr_l1i_cache.attr.attr,
634 &armv7_event_attr_l1d_cache_wb.attr.attr,
635 &armv7_event_attr_l2d_cache.attr.attr,
636 &armv7_event_attr_l2d_cache_refill.attr.attr,
637 &armv7_event_attr_l2d_cache_wb.attr.attr,
638 &armv7_event_attr_bus_access.attr.attr,
639 &armv7_event_attr_memory_error.attr.attr,
640 &armv7_event_attr_inst_spec.attr.attr,
641 &armv7_event_attr_ttbr_write_retired.attr.attr,
642 &armv7_event_attr_bus_cycles.attr.attr,
643 NULL,
644};
645
646static struct attribute_group armv7_pmuv2_events_attr_group = {
647 .name = "events",
648 .attrs = armv7_pmuv2_event_attrs,
649};
650
651/*
652 * Perf Events' indices
653 */
654#define ARMV7_IDX_CYCLE_COUNTER 0
655#define ARMV7_IDX_COUNTER0 1
656#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
657 (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
658
659#define ARMV7_MAX_COUNTERS 32
660#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
661
662/*
663 * ARMv7 low level PMNC access
664 */
665
666/*
667 * Perf Event to low level counters mapping
668 */
669#define ARMV7_IDX_TO_COUNTER(x) \
670 (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
671
672/*
673 * Per-CPU PMNC: config reg
674 */
675#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
676#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
677#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
678#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
679#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
680#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
681#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
682#define ARMV7_PMNC_N_MASK 0x1f
683#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
684
685/*
686 * FLAG: counters overflow flag status reg
687 */
688#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
689#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
690
691/*
692 * PMXEVTYPER: Event selection reg
693 */
694#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
695#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
696
697/*
698 * Event filters for PMUv2
699 */
700#define ARMV7_EXCLUDE_PL1 (1 << 31)
701#define ARMV7_EXCLUDE_USER (1 << 30)
702#define ARMV7_INCLUDE_HYP (1 << 27)
703
704/*
705 * Secure debug enable reg
706 */
707#define ARMV7_SDER_SUNIDEN BIT(1) /* Permit non-invasive debug */
708
709static inline u32 armv7_pmnc_read(void)
710{
711 u32 val;
712 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
713 return val;
714}
715
716static inline void armv7_pmnc_write(u32 val)
717{
718 val &= ARMV7_PMNC_MASK;
719 isb();
720 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
721}
722
723static inline int armv7_pmnc_has_overflowed(u32 pmnc)
724{
725 return pmnc & ARMV7_OVERFLOWED_MASK;
726}
727
728static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
729{
730 return idx >= ARMV7_IDX_CYCLE_COUNTER &&
731 idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
732}
733
734static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
735{
736 return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
737}
738
739static inline void armv7_pmnc_select_counter(int idx)
740{
741 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
742 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
743 isb();
744}
745
746static inline u32 armv7pmu_read_counter(struct perf_event *event)
747{
748 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
749 struct hw_perf_event *hwc = &event->hw;
750 int idx = hwc->idx;
751 u32 value = 0;
752
753 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
754 pr_err("CPU%u reading wrong counter %d\n",
755 smp_processor_id(), idx);
756 } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
757 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
758 } else {
759 armv7_pmnc_select_counter(idx);
760 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
761 }
762
763 return value;
764}
765
766static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
767{
768 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
769 struct hw_perf_event *hwc = &event->hw;
770 int idx = hwc->idx;
771
772 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
773 pr_err("CPU%u writing wrong counter %d\n",
774 smp_processor_id(), idx);
775 } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
776 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
777 } else {
778 armv7_pmnc_select_counter(idx);
779 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
780 }
781}
782
783static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
784{
785 armv7_pmnc_select_counter(idx);
786 val &= ARMV7_EVTYPE_MASK;
787 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
788}
789
790static inline void armv7_pmnc_enable_counter(int idx)
791{
792 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
793 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
794}
795
796static inline void armv7_pmnc_disable_counter(int idx)
797{
798 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
799 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
800}
801
802static inline void armv7_pmnc_enable_intens(int idx)
803{
804 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
805 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
806}
807
808static inline void armv7_pmnc_disable_intens(int idx)
809{
810 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
811 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
812 isb();
813 /* Clear the overflow flag in case an interrupt is pending. */
814 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
815 isb();
816}
817
818static inline u32 armv7_pmnc_getreset_flags(void)
819{
820 u32 val;
821
822 /* Read */
823 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
824
825 /* Write to clear flags */
826 val &= ARMV7_FLAG_MASK;
827 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
828
829 return val;
830}
831
832#ifdef DEBUG
833static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
834{
835 u32 val;
836 unsigned int cnt;
837
838 pr_info("PMNC registers dump:\n");
839
840 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
841 pr_info("PMNC =0x%08x\n", val);
842
843 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
844 pr_info("CNTENS=0x%08x\n", val);
845
846 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
847 pr_info("INTENS=0x%08x\n", val);
848
849 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
850 pr_info("FLAGS =0x%08x\n", val);
851
852 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
853 pr_info("SELECT=0x%08x\n", val);
854
855 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
856 pr_info("CCNT =0x%08x\n", val);
857
858 for (cnt = ARMV7_IDX_COUNTER0;
859 cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
860 armv7_pmnc_select_counter(cnt);
861 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
862 pr_info("CNT[%d] count =0x%08x\n",
863 ARMV7_IDX_TO_COUNTER(cnt), val);
864 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
865 pr_info("CNT[%d] evtsel=0x%08x\n",
866 ARMV7_IDX_TO_COUNTER(cnt), val);
867 }
868}
869#endif
870
871static void armv7pmu_enable_event(struct perf_event *event)
872{
873 unsigned long flags;
874 struct hw_perf_event *hwc = &event->hw;
875 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
876 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
877 int idx = hwc->idx;
878
879 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
880 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
881 smp_processor_id(), idx);
882 return;
883 }
884
885 /*
886 * Enable counter and interrupt, and set the counter to count
887 * the event that we're interested in.
888 */
889 raw_spin_lock_irqsave(&events->pmu_lock, flags);
890
891 /*
892 * Disable counter
893 */
894 armv7_pmnc_disable_counter(idx);
895
896 /*
897 * Set event (if destined for PMNx counters)
898 * We only need to set the event for the cycle counter if we
899 * have the ability to perform event filtering.
900 */
901 if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
902 armv7_pmnc_write_evtsel(idx, hwc->config_base);
903
904 /*
905 * Enable interrupt for this counter
906 */
907 armv7_pmnc_enable_intens(idx);
908
909 /*
910 * Enable counter
911 */
912 armv7_pmnc_enable_counter(idx);
913
914 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
915}
916
917static void armv7pmu_disable_event(struct perf_event *event)
918{
919 unsigned long flags;
920 struct hw_perf_event *hwc = &event->hw;
921 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
922 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
923 int idx = hwc->idx;
924
925 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
926 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
927 smp_processor_id(), idx);
928 return;
929 }
930
931 /*
932 * Disable counter and interrupt
933 */
934 raw_spin_lock_irqsave(&events->pmu_lock, flags);
935
936 /*
937 * Disable counter
938 */
939 armv7_pmnc_disable_counter(idx);
940
941 /*
942 * Disable interrupt for this counter
943 */
944 armv7_pmnc_disable_intens(idx);
945
946 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
947}
948
949static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
950{
951 u32 pmnc;
952 struct perf_sample_data data;
953 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
954 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
955 struct pt_regs *regs;
956 int idx;
957
958 /*
959 * Get and reset the IRQ flags
960 */
961 pmnc = armv7_pmnc_getreset_flags();
962
963 /*
964 * Did an overflow occur?
965 */
966 if (!armv7_pmnc_has_overflowed(pmnc))
967 return IRQ_NONE;
968
969 /*
970 * Handle the counter(s) overflow(s)
971 */
972 regs = get_irq_regs();
973
974 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
975 struct perf_event *event = cpuc->events[idx];
976 struct hw_perf_event *hwc;
977
978 /* Ignore if we don't have an event. */
979 if (!event)
980 continue;
981
982 /*
983 * We have a single interrupt for all counters. Check that
984 * each counter has overflowed before we process it.
985 */
986 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
987 continue;
988
989 hwc = &event->hw;
990 armpmu_event_update(event);
991 perf_sample_data_init(&data, 0, hwc->last_period);
992 if (!armpmu_event_set_period(event))
993 continue;
994
995 if (perf_event_overflow(event, &data, regs))
996 cpu_pmu->disable(event);
997 }
998
999 /*
1000 * Handle the pending perf events.
1001 *
1002 * Note: this call *must* be run with interrupts disabled. For
1003 * platforms that can have the PMU interrupts raised as an NMI, this
1004 * will not work.
1005 */
1006 irq_work_run();
1007
1008 return IRQ_HANDLED;
1009}
1010
1011static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1012{
1013 unsigned long flags;
1014 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1015
1016 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1017 /* Enable all counters */
1018 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1019 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1020}
1021
1022static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1023{
1024 unsigned long flags;
1025 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1026
1027 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1028 /* Disable all counters */
1029 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1030 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1031}
1032
1033static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1034 struct perf_event *event)
1035{
1036 int idx;
1037 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1038 struct hw_perf_event *hwc = &event->hw;
1039 unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1040
1041 /* Always place a cycle counter into the cycle counter. */
1042 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1043 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1044 return -EAGAIN;
1045
1046 return ARMV7_IDX_CYCLE_COUNTER;
1047 }
1048
1049 /*
1050 * For anything other than a cycle counter, try and use
1051 * the events counters
1052 */
1053 for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1054 if (!test_and_set_bit(idx, cpuc->used_mask))
1055 return idx;
1056 }
1057
1058 /* The counters are all in use. */
1059 return -EAGAIN;
1060}
1061
1062/*
1063 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1064 */
1065static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1066 struct perf_event_attr *attr)
1067{
1068 unsigned long config_base = 0;
1069
1070 if (attr->exclude_idle)
1071 return -EPERM;
1072 if (attr->exclude_user)
1073 config_base |= ARMV7_EXCLUDE_USER;
1074 if (attr->exclude_kernel)
1075 config_base |= ARMV7_EXCLUDE_PL1;
1076 if (!attr->exclude_hv)
1077 config_base |= ARMV7_INCLUDE_HYP;
1078
1079 /*
1080 * Install the filter into config_base as this is used to
1081 * construct the event type.
1082 */
1083 event->config_base = config_base;
1084
1085 return 0;
1086}
1087
1088static void armv7pmu_reset(void *info)
1089{
1090 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1091 u32 idx, nb_cnt = cpu_pmu->num_events, val;
1092
1093 if (cpu_pmu->secure_access) {
1094 asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
1095 val |= ARMV7_SDER_SUNIDEN;
1096 asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
1097 }
1098
1099 /* The counter and interrupt enable registers are unknown at reset. */
1100 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1101 armv7_pmnc_disable_counter(idx);
1102 armv7_pmnc_disable_intens(idx);
1103 }
1104
1105 /* Initialize & Reset PMNC: C and P bits */
1106 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1107}
1108
1109static int armv7_a8_map_event(struct perf_event *event)
1110{
1111 return armpmu_map_event(event, &armv7_a8_perf_map,
1112 &armv7_a8_perf_cache_map, 0xFF);
1113}
1114
1115static int armv7_a9_map_event(struct perf_event *event)
1116{
1117 return armpmu_map_event(event, &armv7_a9_perf_map,
1118 &armv7_a9_perf_cache_map, 0xFF);
1119}
1120
1121static int armv7_a5_map_event(struct perf_event *event)
1122{
1123 return armpmu_map_event(event, &armv7_a5_perf_map,
1124 &armv7_a5_perf_cache_map, 0xFF);
1125}
1126
1127static int armv7_a15_map_event(struct perf_event *event)
1128{
1129 return armpmu_map_event(event, &armv7_a15_perf_map,
1130 &armv7_a15_perf_cache_map, 0xFF);
1131}
1132
1133static int armv7_a7_map_event(struct perf_event *event)
1134{
1135 return armpmu_map_event(event, &armv7_a7_perf_map,
1136 &armv7_a7_perf_cache_map, 0xFF);
1137}
1138
1139static int armv7_a12_map_event(struct perf_event *event)
1140{
1141 return armpmu_map_event(event, &armv7_a12_perf_map,
1142 &armv7_a12_perf_cache_map, 0xFF);
1143}
1144
1145static int krait_map_event(struct perf_event *event)
1146{
1147 return armpmu_map_event(event, &krait_perf_map,
1148 &krait_perf_cache_map, 0xFFFFF);
1149}
1150
1151static int krait_map_event_no_branch(struct perf_event *event)
1152{
1153 return armpmu_map_event(event, &krait_perf_map_no_branch,
1154 &krait_perf_cache_map, 0xFFFFF);
1155}
1156
1157static int scorpion_map_event(struct perf_event *event)
1158{
1159 return armpmu_map_event(event, &scorpion_perf_map,
1160 &scorpion_perf_cache_map, 0xFFFFF);
1161}
1162
1163static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1164{
1165 cpu_pmu->handle_irq = armv7pmu_handle_irq;
1166 cpu_pmu->enable = armv7pmu_enable_event;
1167 cpu_pmu->disable = armv7pmu_disable_event;
1168 cpu_pmu->read_counter = armv7pmu_read_counter;
1169 cpu_pmu->write_counter = armv7pmu_write_counter;
1170 cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
1171 cpu_pmu->start = armv7pmu_start;
1172 cpu_pmu->stop = armv7pmu_stop;
1173 cpu_pmu->reset = armv7pmu_reset;
1174 cpu_pmu->max_period = (1LLU << 32) - 1;
1175};
1176
1177static void armv7_read_num_pmnc_events(void *info)
1178{
1179 int *nb_cnt = info;
1180
1181 /* Read the nb of CNTx counters supported from PMNC */
1182 *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1183
1184 /* Add the CPU cycles counter */
1185 *nb_cnt += 1;
1186}
1187
1188static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1189{
1190 return smp_call_function_any(&arm_pmu->supported_cpus,
1191 armv7_read_num_pmnc_events,
1192 &arm_pmu->num_events, 1);
1193}
1194
1195static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1196{
1197 armv7pmu_init(cpu_pmu);
1198 cpu_pmu->name = "armv7_cortex_a8";
1199 cpu_pmu->map_event = armv7_a8_map_event;
1200 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1201 &armv7_pmuv1_events_attr_group;
1202 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1203 &armv7_pmu_format_attr_group;
1204 return armv7_probe_num_events(cpu_pmu);
1205}
1206
1207static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1208{
1209 armv7pmu_init(cpu_pmu);
1210 cpu_pmu->name = "armv7_cortex_a9";
1211 cpu_pmu->map_event = armv7_a9_map_event;
1212 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1213 &armv7_pmuv1_events_attr_group;
1214 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1215 &armv7_pmu_format_attr_group;
1216 return armv7_probe_num_events(cpu_pmu);
1217}
1218
1219static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1220{
1221 armv7pmu_init(cpu_pmu);
1222 cpu_pmu->name = "armv7_cortex_a5";
1223 cpu_pmu->map_event = armv7_a5_map_event;
1224 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1225 &armv7_pmuv1_events_attr_group;
1226 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1227 &armv7_pmu_format_attr_group;
1228 return armv7_probe_num_events(cpu_pmu);
1229}
1230
1231static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1232{
1233 armv7pmu_init(cpu_pmu);
1234 cpu_pmu->name = "armv7_cortex_a15";
1235 cpu_pmu->map_event = armv7_a15_map_event;
1236 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1237 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1238 &armv7_pmuv2_events_attr_group;
1239 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1240 &armv7_pmu_format_attr_group;
1241 return armv7_probe_num_events(cpu_pmu);
1242}
1243
1244static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1245{
1246 armv7pmu_init(cpu_pmu);
1247 cpu_pmu->name = "armv7_cortex_a7";
1248 cpu_pmu->map_event = armv7_a7_map_event;
1249 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1250 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1251 &armv7_pmuv2_events_attr_group;
1252 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1253 &armv7_pmu_format_attr_group;
1254 return armv7_probe_num_events(cpu_pmu);
1255}
1256
1257static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1258{
1259 armv7pmu_init(cpu_pmu);
1260 cpu_pmu->name = "armv7_cortex_a12";
1261 cpu_pmu->map_event = armv7_a12_map_event;
1262 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1263 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1264 &armv7_pmuv2_events_attr_group;
1265 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1266 &armv7_pmu_format_attr_group;
1267 return armv7_probe_num_events(cpu_pmu);
1268}
1269
1270static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1271{
1272 int ret = armv7_a12_pmu_init(cpu_pmu);
1273 cpu_pmu->name = "armv7_cortex_a17";
1274 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1275 &armv7_pmuv2_events_attr_group;
1276 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1277 &armv7_pmu_format_attr_group;
1278 return ret;
1279}
1280
1281/*
1282 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1283 *
1284 * 31 30 24 16 8 0
1285 * +--------------------------------+
1286 * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
1287 * +--------------------------------+
1288 * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
1289 * +--------------------------------+
1290 * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
1291 * +--------------------------------+
1292 * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
1293 * +--------------------------------+
1294 * EN | G=3 | G=2 | G=1 | G=0
1295 *
1296 * Event Encoding:
1297 *
1298 * hwc->config_base = 0xNRCCG
1299 *
1300 * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1301 * R = region register
1302 * CC = class of events the group G is choosing from
1303 * G = group or particular event
1304 *
1305 * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1306 *
1307 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1308 * unit, etc.) while the event code (CC) corresponds to a particular class of
1309 * events (interrupts for example). An event code is broken down into
1310 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1311 * example).
1312 */
1313
1314#define KRAIT_EVENT (1 << 16)
1315#define VENUM_EVENT (2 << 16)
1316#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
1317#define PMRESRn_EN BIT(31)
1318
1319#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
1320#define EVENT_GROUP(event) ((event) & 0xf) /* G */
1321#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
1322#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
1323#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
1324
1325static u32 krait_read_pmresrn(int n)
1326{
1327 u32 val;
1328
1329 switch (n) {
1330 case 0:
1331 asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1332 break;
1333 case 1:
1334 asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1335 break;
1336 case 2:
1337 asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1338 break;
1339 default:
1340 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1341 }
1342
1343 return val;
1344}
1345
1346static void krait_write_pmresrn(int n, u32 val)
1347{
1348 switch (n) {
1349 case 0:
1350 asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1351 break;
1352 case 1:
1353 asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1354 break;
1355 case 2:
1356 asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1357 break;
1358 default:
1359 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1360 }
1361}
1362
1363static u32 venum_read_pmresr(void)
1364{
1365 u32 val;
1366 asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1367 return val;
1368}
1369
1370static void venum_write_pmresr(u32 val)
1371{
1372 asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1373}
1374
1375static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1376{
1377 u32 venum_new_val;
1378 u32 fp_new_val;
1379
1380 BUG_ON(preemptible());
1381 /* CPACR Enable CP10 and CP11 access */
1382 *venum_orig_val = get_copro_access();
1383 venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1384 set_copro_access(venum_new_val);
1385
1386 /* Enable FPEXC */
1387 *fp_orig_val = fmrx(FPEXC);
1388 fp_new_val = *fp_orig_val | FPEXC_EN;
1389 fmxr(FPEXC, fp_new_val);
1390}
1391
1392static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1393{
1394 BUG_ON(preemptible());
1395 /* Restore FPEXC */
1396 fmxr(FPEXC, fp_orig_val);
1397 isb();
1398 /* Restore CPACR */
1399 set_copro_access(venum_orig_val);
1400}
1401
1402static u32 krait_get_pmresrn_event(unsigned int region)
1403{
1404 static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1405 KRAIT_PMRESR1_GROUP0,
1406 KRAIT_PMRESR2_GROUP0 };
1407 return pmresrn_table[region];
1408}
1409
1410static void krait_evt_setup(int idx, u32 config_base)
1411{
1412 u32 val;
1413 u32 mask;
1414 u32 vval, fval;
1415 unsigned int region = EVENT_REGION(config_base);
1416 unsigned int group = EVENT_GROUP(config_base);
1417 unsigned int code = EVENT_CODE(config_base);
1418 unsigned int group_shift;
1419 bool venum_event = EVENT_VENUM(config_base);
1420
1421 group_shift = group * 8;
1422 mask = 0xff << group_shift;
1423
1424 /* Configure evtsel for the region and group */
1425 if (venum_event)
1426 val = KRAIT_VPMRESR0_GROUP0;
1427 else
1428 val = krait_get_pmresrn_event(region);
1429 val += group;
1430 /* Mix in mode-exclusion bits */
1431 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1432 armv7_pmnc_write_evtsel(idx, val);
1433
1434 if (venum_event) {
1435 venum_pre_pmresr(&vval, &fval);
1436 val = venum_read_pmresr();
1437 val &= ~mask;
1438 val |= code << group_shift;
1439 val |= PMRESRn_EN;
1440 venum_write_pmresr(val);
1441 venum_post_pmresr(vval, fval);
1442 } else {
1443 val = krait_read_pmresrn(region);
1444 val &= ~mask;
1445 val |= code << group_shift;
1446 val |= PMRESRn_EN;
1447 krait_write_pmresrn(region, val);
1448 }
1449}
1450
1451static u32 clear_pmresrn_group(u32 val, int group)
1452{
1453 u32 mask;
1454 int group_shift;
1455
1456 group_shift = group * 8;
1457 mask = 0xff << group_shift;
1458 val &= ~mask;
1459
1460 /* Don't clear enable bit if entire region isn't disabled */
1461 if (val & ~PMRESRn_EN)
1462 return val |= PMRESRn_EN;
1463
1464 return 0;
1465}
1466
1467static void krait_clearpmu(u32 config_base)
1468{
1469 u32 val;
1470 u32 vval, fval;
1471 unsigned int region = EVENT_REGION(config_base);
1472 unsigned int group = EVENT_GROUP(config_base);
1473 bool venum_event = EVENT_VENUM(config_base);
1474
1475 if (venum_event) {
1476 venum_pre_pmresr(&vval, &fval);
1477 val = venum_read_pmresr();
1478 val = clear_pmresrn_group(val, group);
1479 venum_write_pmresr(val);
1480 venum_post_pmresr(vval, fval);
1481 } else {
1482 val = krait_read_pmresrn(region);
1483 val = clear_pmresrn_group(val, group);
1484 krait_write_pmresrn(region, val);
1485 }
1486}
1487
1488static void krait_pmu_disable_event(struct perf_event *event)
1489{
1490 unsigned long flags;
1491 struct hw_perf_event *hwc = &event->hw;
1492 int idx = hwc->idx;
1493 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1494 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1495
1496 /* Disable counter and interrupt */
1497 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1498
1499 /* Disable counter */
1500 armv7_pmnc_disable_counter(idx);
1501
1502 /*
1503 * Clear pmresr code (if destined for PMNx counters)
1504 */
1505 if (hwc->config_base & KRAIT_EVENT_MASK)
1506 krait_clearpmu(hwc->config_base);
1507
1508 /* Disable interrupt for this counter */
1509 armv7_pmnc_disable_intens(idx);
1510
1511 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1512}
1513
1514static void krait_pmu_enable_event(struct perf_event *event)
1515{
1516 unsigned long flags;
1517 struct hw_perf_event *hwc = &event->hw;
1518 int idx = hwc->idx;
1519 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1520 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1521
1522 /*
1523 * Enable counter and interrupt, and set the counter to count
1524 * the event that we're interested in.
1525 */
1526 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1527
1528 /* Disable counter */
1529 armv7_pmnc_disable_counter(idx);
1530
1531 /*
1532 * Set event (if destined for PMNx counters)
1533 * We set the event for the cycle counter because we
1534 * have the ability to perform event filtering.
1535 */
1536 if (hwc->config_base & KRAIT_EVENT_MASK)
1537 krait_evt_setup(idx, hwc->config_base);
1538 else
1539 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1540
1541 /* Enable interrupt for this counter */
1542 armv7_pmnc_enable_intens(idx);
1543
1544 /* Enable counter */
1545 armv7_pmnc_enable_counter(idx);
1546
1547 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1548}
1549
1550static void krait_pmu_reset(void *info)
1551{
1552 u32 vval, fval;
1553 struct arm_pmu *cpu_pmu = info;
1554 u32 idx, nb_cnt = cpu_pmu->num_events;
1555
1556 armv7pmu_reset(info);
1557
1558 /* Clear all pmresrs */
1559 krait_write_pmresrn(0, 0);
1560 krait_write_pmresrn(1, 0);
1561 krait_write_pmresrn(2, 0);
1562
1563 venum_pre_pmresr(&vval, &fval);
1564 venum_write_pmresr(0);
1565 venum_post_pmresr(vval, fval);
1566
1567 /* Reset PMxEVNCTCR to sane default */
1568 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1569 armv7_pmnc_select_counter(idx);
1570 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1571 }
1572
1573}
1574
1575static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1576 unsigned int group)
1577{
1578 int bit;
1579 struct hw_perf_event *hwc = &event->hw;
1580 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1581
1582 if (hwc->config_base & VENUM_EVENT)
1583 bit = KRAIT_VPMRESR0_GROUP0;
1584 else
1585 bit = krait_get_pmresrn_event(region);
1586 bit -= krait_get_pmresrn_event(0);
1587 bit += group;
1588 /*
1589 * Lower bits are reserved for use by the counters (see
1590 * armv7pmu_get_event_idx() for more info)
1591 */
1592 bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1593
1594 return bit;
1595}
1596
1597/*
1598 * We check for column exclusion constraints here.
1599 * Two events cant use the same group within a pmresr register.
1600 */
1601static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1602 struct perf_event *event)
1603{
1604 int idx;
1605 int bit = -1;
1606 struct hw_perf_event *hwc = &event->hw;
1607 unsigned int region = EVENT_REGION(hwc->config_base);
1608 unsigned int code = EVENT_CODE(hwc->config_base);
1609 unsigned int group = EVENT_GROUP(hwc->config_base);
1610 bool venum_event = EVENT_VENUM(hwc->config_base);
1611 bool krait_event = EVENT_CPU(hwc->config_base);
1612
1613 if (venum_event || krait_event) {
1614 /* Ignore invalid events */
1615 if (group > 3 || region > 2)
1616 return -EINVAL;
1617 if (venum_event && (code & 0xe0))
1618 return -EINVAL;
1619
1620 bit = krait_event_to_bit(event, region, group);
1621 if (test_and_set_bit(bit, cpuc->used_mask))
1622 return -EAGAIN;
1623 }
1624
1625 idx = armv7pmu_get_event_idx(cpuc, event);
1626 if (idx < 0 && bit >= 0)
1627 clear_bit(bit, cpuc->used_mask);
1628
1629 return idx;
1630}
1631
1632static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1633 struct perf_event *event)
1634{
1635 int bit;
1636 struct hw_perf_event *hwc = &event->hw;
1637 unsigned int region = EVENT_REGION(hwc->config_base);
1638 unsigned int group = EVENT_GROUP(hwc->config_base);
1639 bool venum_event = EVENT_VENUM(hwc->config_base);
1640 bool krait_event = EVENT_CPU(hwc->config_base);
1641
1642 if (venum_event || krait_event) {
1643 bit = krait_event_to_bit(event, region, group);
1644 clear_bit(bit, cpuc->used_mask);
1645 }
1646}
1647
1648static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1649{
1650 armv7pmu_init(cpu_pmu);
1651 cpu_pmu->name = "armv7_krait";
1652 /* Some early versions of Krait don't support PC write events */
1653 if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1654 "qcom,no-pc-write"))
1655 cpu_pmu->map_event = krait_map_event_no_branch;
1656 else
1657 cpu_pmu->map_event = krait_map_event;
1658 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1659 cpu_pmu->reset = krait_pmu_reset;
1660 cpu_pmu->enable = krait_pmu_enable_event;
1661 cpu_pmu->disable = krait_pmu_disable_event;
1662 cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
1663 cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1664 return armv7_probe_num_events(cpu_pmu);
1665}
1666
1667/*
1668 * Scorpion Local Performance Monitor Register (LPMn)
1669 *
1670 * 31 30 24 16 8 0
1671 * +--------------------------------+
1672 * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
1673 * +--------------------------------+
1674 * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
1675 * +--------------------------------+
1676 * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
1677 * +--------------------------------+
1678 * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
1679 * +--------------------------------+
1680 * VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
1681 * +--------------------------------+
1682 * EN | G=3 | G=2 | G=1 | G=0
1683 *
1684 *
1685 * Event Encoding:
1686 *
1687 * hwc->config_base = 0xNRCCG
1688 *
1689 * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1690 * R = region register
1691 * CC = class of events the group G is choosing from
1692 * G = group or particular event
1693 *
1694 * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1695 *
1696 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1697 * unit, etc.) while the event code (CC) corresponds to a particular class of
1698 * events (interrupts for example). An event code is broken down into
1699 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1700 * example).
1701 */
1702
1703static u32 scorpion_read_pmresrn(int n)
1704{
1705 u32 val;
1706
1707 switch (n) {
1708 case 0:
1709 asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1710 break;
1711 case 1:
1712 asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1713 break;
1714 case 2:
1715 asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1716 break;
1717 case 3:
1718 asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1719 break;
1720 default:
1721 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1722 }
1723
1724 return val;
1725}
1726
1727static void scorpion_write_pmresrn(int n, u32 val)
1728{
1729 switch (n) {
1730 case 0:
1731 asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1732 break;
1733 case 1:
1734 asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1735 break;
1736 case 2:
1737 asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1738 break;
1739 case 3:
1740 asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1741 break;
1742 default:
1743 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1744 }
1745}
1746
1747static u32 scorpion_get_pmresrn_event(unsigned int region)
1748{
1749 static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1750 SCORPION_LPM1_GROUP0,
1751 SCORPION_LPM2_GROUP0,
1752 SCORPION_L2LPM_GROUP0 };
1753 return pmresrn_table[region];
1754}
1755
1756static void scorpion_evt_setup(int idx, u32 config_base)
1757{
1758 u32 val;
1759 u32 mask;
1760 u32 vval, fval;
1761 unsigned int region = EVENT_REGION(config_base);
1762 unsigned int group = EVENT_GROUP(config_base);
1763 unsigned int code = EVENT_CODE(config_base);
1764 unsigned int group_shift;
1765 bool venum_event = EVENT_VENUM(config_base);
1766
1767 group_shift = group * 8;
1768 mask = 0xff << group_shift;
1769
1770 /* Configure evtsel for the region and group */
1771 if (venum_event)
1772 val = SCORPION_VLPM_GROUP0;
1773 else
1774 val = scorpion_get_pmresrn_event(region);
1775 val += group;
1776 /* Mix in mode-exclusion bits */
1777 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1778 armv7_pmnc_write_evtsel(idx, val);
1779
1780 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1781
1782 if (venum_event) {
1783 venum_pre_pmresr(&vval, &fval);
1784 val = venum_read_pmresr();
1785 val &= ~mask;
1786 val |= code << group_shift;
1787 val |= PMRESRn_EN;
1788 venum_write_pmresr(val);
1789 venum_post_pmresr(vval, fval);
1790 } else {
1791 val = scorpion_read_pmresrn(region);
1792 val &= ~mask;
1793 val |= code << group_shift;
1794 val |= PMRESRn_EN;
1795 scorpion_write_pmresrn(region, val);
1796 }
1797}
1798
1799static void scorpion_clearpmu(u32 config_base)
1800{
1801 u32 val;
1802 u32 vval, fval;
1803 unsigned int region = EVENT_REGION(config_base);
1804 unsigned int group = EVENT_GROUP(config_base);
1805 bool venum_event = EVENT_VENUM(config_base);
1806
1807 if (venum_event) {
1808 venum_pre_pmresr(&vval, &fval);
1809 val = venum_read_pmresr();
1810 val = clear_pmresrn_group(val, group);
1811 venum_write_pmresr(val);
1812 venum_post_pmresr(vval, fval);
1813 } else {
1814 val = scorpion_read_pmresrn(region);
1815 val = clear_pmresrn_group(val, group);
1816 scorpion_write_pmresrn(region, val);
1817 }
1818}
1819
1820static void scorpion_pmu_disable_event(struct perf_event *event)
1821{
1822 unsigned long flags;
1823 struct hw_perf_event *hwc = &event->hw;
1824 int idx = hwc->idx;
1825 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1826 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1827
1828 /* Disable counter and interrupt */
1829 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1830
1831 /* Disable counter */
1832 armv7_pmnc_disable_counter(idx);
1833
1834 /*
1835 * Clear pmresr code (if destined for PMNx counters)
1836 */
1837 if (hwc->config_base & KRAIT_EVENT_MASK)
1838 scorpion_clearpmu(hwc->config_base);
1839
1840 /* Disable interrupt for this counter */
1841 armv7_pmnc_disable_intens(idx);
1842
1843 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1844}
1845
1846static void scorpion_pmu_enable_event(struct perf_event *event)
1847{
1848 unsigned long flags;
1849 struct hw_perf_event *hwc = &event->hw;
1850 int idx = hwc->idx;
1851 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1852 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1853
1854 /*
1855 * Enable counter and interrupt, and set the counter to count
1856 * the event that we're interested in.
1857 */
1858 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1859
1860 /* Disable counter */
1861 armv7_pmnc_disable_counter(idx);
1862
1863 /*
1864 * Set event (if destined for PMNx counters)
1865 * We don't set the event for the cycle counter because we
1866 * don't have the ability to perform event filtering.
1867 */
1868 if (hwc->config_base & KRAIT_EVENT_MASK)
1869 scorpion_evt_setup(idx, hwc->config_base);
1870 else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1871 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1872
1873 /* Enable interrupt for this counter */
1874 armv7_pmnc_enable_intens(idx);
1875
1876 /* Enable counter */
1877 armv7_pmnc_enable_counter(idx);
1878
1879 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1880}
1881
1882static void scorpion_pmu_reset(void *info)
1883{
1884 u32 vval, fval;
1885 struct arm_pmu *cpu_pmu = info;
1886 u32 idx, nb_cnt = cpu_pmu->num_events;
1887
1888 armv7pmu_reset(info);
1889
1890 /* Clear all pmresrs */
1891 scorpion_write_pmresrn(0, 0);
1892 scorpion_write_pmresrn(1, 0);
1893 scorpion_write_pmresrn(2, 0);
1894 scorpion_write_pmresrn(3, 0);
1895
1896 venum_pre_pmresr(&vval, &fval);
1897 venum_write_pmresr(0);
1898 venum_post_pmresr(vval, fval);
1899
1900 /* Reset PMxEVNCTCR to sane default */
1901 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1902 armv7_pmnc_select_counter(idx);
1903 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1904 }
1905}
1906
1907static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1908 unsigned int group)
1909{
1910 int bit;
1911 struct hw_perf_event *hwc = &event->hw;
1912 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1913
1914 if (hwc->config_base & VENUM_EVENT)
1915 bit = SCORPION_VLPM_GROUP0;
1916 else
1917 bit = scorpion_get_pmresrn_event(region);
1918 bit -= scorpion_get_pmresrn_event(0);
1919 bit += group;
1920 /*
1921 * Lower bits are reserved for use by the counters (see
1922 * armv7pmu_get_event_idx() for more info)
1923 */
1924 bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1925
1926 return bit;
1927}
1928
1929/*
1930 * We check for column exclusion constraints here.
1931 * Two events cant use the same group within a pmresr register.
1932 */
1933static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1934 struct perf_event *event)
1935{
1936 int idx;
1937 int bit = -1;
1938 struct hw_perf_event *hwc = &event->hw;
1939 unsigned int region = EVENT_REGION(hwc->config_base);
1940 unsigned int group = EVENT_GROUP(hwc->config_base);
1941 bool venum_event = EVENT_VENUM(hwc->config_base);
1942 bool scorpion_event = EVENT_CPU(hwc->config_base);
1943
1944 if (venum_event || scorpion_event) {
1945 /* Ignore invalid events */
1946 if (group > 3 || region > 3)
1947 return -EINVAL;
1948
1949 bit = scorpion_event_to_bit(event, region, group);
1950 if (test_and_set_bit(bit, cpuc->used_mask))
1951 return -EAGAIN;
1952 }
1953
1954 idx = armv7pmu_get_event_idx(cpuc, event);
1955 if (idx < 0 && bit >= 0)
1956 clear_bit(bit, cpuc->used_mask);
1957
1958 return idx;
1959}
1960
1961static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1962 struct perf_event *event)
1963{
1964 int bit;
1965 struct hw_perf_event *hwc = &event->hw;
1966 unsigned int region = EVENT_REGION(hwc->config_base);
1967 unsigned int group = EVENT_GROUP(hwc->config_base);
1968 bool venum_event = EVENT_VENUM(hwc->config_base);
1969 bool scorpion_event = EVENT_CPU(hwc->config_base);
1970
1971 if (venum_event || scorpion_event) {
1972 bit = scorpion_event_to_bit(event, region, group);
1973 clear_bit(bit, cpuc->used_mask);
1974 }
1975}
1976
1977static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1978{
1979 armv7pmu_init(cpu_pmu);
1980 cpu_pmu->name = "armv7_scorpion";
1981 cpu_pmu->map_event = scorpion_map_event;
1982 cpu_pmu->reset = scorpion_pmu_reset;
1983 cpu_pmu->enable = scorpion_pmu_enable_event;
1984 cpu_pmu->disable = scorpion_pmu_disable_event;
1985 cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
1986 cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1987 return armv7_probe_num_events(cpu_pmu);
1988}
1989
1990static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1991{
1992 armv7pmu_init(cpu_pmu);
1993 cpu_pmu->name = "armv7_scorpion_mp";
1994 cpu_pmu->map_event = scorpion_map_event;
1995 cpu_pmu->reset = scorpion_pmu_reset;
1996 cpu_pmu->enable = scorpion_pmu_enable_event;
1997 cpu_pmu->disable = scorpion_pmu_disable_event;
1998 cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
1999 cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
2000 return armv7_probe_num_events(cpu_pmu);
2001}
2002
2003static const struct of_device_id armv7_pmu_of_device_ids[] = {
2004 {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
2005 {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
2006 {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
2007 {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
2008 {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
2009 {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
2010 {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
2011 {.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
2012 {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
2013 {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init},
2014 {},
2015};
2016
2017static const struct pmu_probe_info armv7_pmu_probe_table[] = {
2018 ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
2019 ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
2020 { /* sentinel value */ }
2021};
2022
2023
2024static int armv7_pmu_device_probe(struct platform_device *pdev)
2025{
2026 return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
2027 armv7_pmu_probe_table);
2028}
2029
2030static struct platform_driver armv7_pmu_driver = {
2031 .driver = {
2032 .name = "armv7-pmu",
2033 .of_match_table = armv7_pmu_of_device_ids,
2034 },
2035 .probe = armv7_pmu_device_probe,
2036};
2037
2038builtin_platform_driver(armv7_pmu_driver);
2039#endif /* CONFIG_CPU_V7 */