Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   4 *
   5 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   6 * 2010 (c) MontaVista Software, LLC.
   7 *
   8 * Copied from ARMv6 code, with the low level code inspired
   9 *  by the ARMv7 Oprofile code.
  10 *
  11 * Cortex-A8 has up to 4 configurable performance counters and
  12 *  a single cycle counter.
  13 * Cortex-A9 has up to 31 configurable performance counters and
  14 *  a single cycle counter.
  15 *
  16 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  17 *  counter and all 4 performance counters together can be reset separately.
  18 */
  19
  20#ifdef CONFIG_CPU_V7
  21
  22#include <asm/cp15.h>
  23#include <asm/cputype.h>
  24#include <asm/irq_regs.h>
  25#include <asm/vfp.h>
  26#include "../vfp/vfpinstr.h"
  27
  28#include <linux/of.h>
  29#include <linux/perf/arm_pmu.h>
  30#include <linux/platform_device.h>
  31
  32/*
  33 * Common ARMv7 event types
  34 *
  35 * Note: An implementation may not be able to count all of these events
  36 * but the encodings are considered to be `reserved' in the case that
  37 * they are not available.
  38 */
  39#define ARMV7_PERFCTR_PMNC_SW_INCR			0x00
  40#define ARMV7_PERFCTR_L1_ICACHE_REFILL			0x01
  41#define ARMV7_PERFCTR_ITLB_REFILL			0x02
  42#define ARMV7_PERFCTR_L1_DCACHE_REFILL			0x03
  43#define ARMV7_PERFCTR_L1_DCACHE_ACCESS			0x04
  44#define ARMV7_PERFCTR_DTLB_REFILL			0x05
  45#define ARMV7_PERFCTR_MEM_READ				0x06
  46#define ARMV7_PERFCTR_MEM_WRITE				0x07
  47#define ARMV7_PERFCTR_INSTR_EXECUTED			0x08
  48#define ARMV7_PERFCTR_EXC_TAKEN				0x09
  49#define ARMV7_PERFCTR_EXC_EXECUTED			0x0A
  50#define ARMV7_PERFCTR_CID_WRITE				0x0B
  51
  52/*
  53 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  54 * It counts:
  55 *  - all (taken) branch instructions,
  56 *  - instructions that explicitly write the PC,
  57 *  - exception generating instructions.
  58 */
  59#define ARMV7_PERFCTR_PC_WRITE				0x0C
  60#define ARMV7_PERFCTR_PC_IMM_BRANCH			0x0D
  61#define ARMV7_PERFCTR_PC_PROC_RETURN			0x0E
  62#define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		0x0F
  63#define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		0x10
  64#define ARMV7_PERFCTR_CLOCK_CYCLES			0x11
  65#define ARMV7_PERFCTR_PC_BRANCH_PRED			0x12
  66
  67/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  68#define ARMV7_PERFCTR_MEM_ACCESS			0x13
  69#define ARMV7_PERFCTR_L1_ICACHE_ACCESS			0x14
  70#define ARMV7_PERFCTR_L1_DCACHE_WB			0x15
  71#define ARMV7_PERFCTR_L2_CACHE_ACCESS			0x16
  72#define ARMV7_PERFCTR_L2_CACHE_REFILL			0x17
  73#define ARMV7_PERFCTR_L2_CACHE_WB			0x18
  74#define ARMV7_PERFCTR_BUS_ACCESS			0x19
  75#define ARMV7_PERFCTR_MEM_ERROR				0x1A
  76#define ARMV7_PERFCTR_INSTR_SPEC			0x1B
  77#define ARMV7_PERFCTR_TTBR_WRITE			0x1C
  78#define ARMV7_PERFCTR_BUS_CYCLES			0x1D
  79
  80#define ARMV7_PERFCTR_CPU_CYCLES			0xFF
  81
  82/* ARMv7 Cortex-A8 specific event types */
  83#define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		0x43
  84#define ARMV7_A8_PERFCTR_L2_CACHE_REFILL		0x44
  85#define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		0x50
  86#define ARMV7_A8_PERFCTR_STALL_ISIDE			0x56
  87
  88/* ARMv7 Cortex-A9 specific event types */
  89#define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		0x68
  90#define ARMV7_A9_PERFCTR_STALL_ICACHE			0x60
  91#define ARMV7_A9_PERFCTR_STALL_DISPATCH			0x66
  92
  93/* ARMv7 Cortex-A5 specific event types */
  94#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		0xc2
  95#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		0xc3
  96
  97/* ARMv7 Cortex-A15 specific event types */
  98#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
  99#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
 100#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		0x42
 101#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	0x43
 102
 103#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		0x4C
 104#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		0x4D
 105
 106#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		0x50
 107#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
 108#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		0x52
 109#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		0x53
 110
 111#define ARMV7_A15_PERFCTR_PC_WRITE_SPEC			0x76
 112
 113/* ARMv7 Cortex-A12 specific event types */
 114#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
 115#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
 116
 117#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		0x50
 118#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
 119
 120#define ARMV7_A12_PERFCTR_PC_WRITE_SPEC			0x76
 121
 122#define ARMV7_A12_PERFCTR_PF_TLB_REFILL			0xe7
 123
 124/* ARMv7 Krait specific event types */
 125#define KRAIT_PMRESR0_GROUP0				0xcc
 126#define KRAIT_PMRESR1_GROUP0				0xd0
 127#define KRAIT_PMRESR2_GROUP0				0xd4
 128#define KRAIT_VPMRESR0_GROUP0				0xd8
 129
 130#define KRAIT_PERFCTR_L1_ICACHE_ACCESS			0x10011
 131#define KRAIT_PERFCTR_L1_ICACHE_MISS			0x10010
 132
 133#define KRAIT_PERFCTR_L1_ITLB_ACCESS			0x12222
 134#define KRAIT_PERFCTR_L1_DTLB_ACCESS			0x12210
 135
 136/* ARMv7 Scorpion specific event types */
 137#define SCORPION_LPM0_GROUP0				0x4c
 138#define SCORPION_LPM1_GROUP0				0x50
 139#define SCORPION_LPM2_GROUP0				0x54
 140#define SCORPION_L2LPM_GROUP0				0x58
 141#define SCORPION_VLPM_GROUP0				0x5c
 142
 143#define SCORPION_ICACHE_ACCESS				0x10053
 144#define SCORPION_ICACHE_MISS				0x10052
 145
 146#define SCORPION_DTLB_ACCESS				0x12013
 147#define SCORPION_DTLB_MISS				0x12012
 148
 149#define SCORPION_ITLB_MISS				0x12021
 150
 151/*
 152 * Cortex-A8 HW events mapping
 153 *
 154 * The hardware events that we support. We do support cache operations but
 155 * we have harvard caches and no way to combine instruction and data
 156 * accesses/misses in hardware.
 157 */
 158static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 159	PERF_MAP_ALL_UNSUPPORTED,
 160	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 161	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 162	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 163	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 164	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 165	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 166	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
 167};
 168
 169static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 170					  [PERF_COUNT_HW_CACHE_OP_MAX]
 171					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 172	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 173
 174	/*
 175	 * The performance counters don't differentiate between read and write
 176	 * accesses/misses so this isn't strictly correct, but it's the best we
 177	 * can do. Writes and reads get combined.
 178	 */
 179	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 180	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 181	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 182	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 183
 184	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 185	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 186
 187	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 188	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 189	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 190	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 191
 192	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 193	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 194
 195	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 196	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 197
 198	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 199	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 200	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 201	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 202};
 203
 204/*
 205 * Cortex-A9 HW events mapping
 206 */
 207static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 208	PERF_MAP_ALL_UNSUPPORTED,
 209	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 210	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
 211	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 212	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 213	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 214	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 215	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
 216	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
 217};
 218
 219static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 220					  [PERF_COUNT_HW_CACHE_OP_MAX]
 221					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 222	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 223
 224	/*
 225	 * The performance counters don't differentiate between read and write
 226	 * accesses/misses so this isn't strictly correct, but it's the best we
 227	 * can do. Writes and reads get combined.
 228	 */
 229	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 230	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 231	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 232	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 233
 234	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 235
 236	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 237	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 238
 239	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 240	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 241
 242	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 243	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 244	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 245	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 246};
 247
 248/*
 249 * Cortex-A5 HW events mapping
 250 */
 251static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 252	PERF_MAP_ALL_UNSUPPORTED,
 253	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 254	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 255	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 256	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 257	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 258	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 259};
 260
 261static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 262					[PERF_COUNT_HW_CACHE_OP_MAX]
 263					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 264	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 265
 266	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 267	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 268	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 269	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 270	[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 271	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 272
 273	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 274	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 275	/*
 276	 * The prefetch counters don't differentiate between the I side and the
 277	 * D side.
 278	 */
 279	[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 280	[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 281
 282	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 283	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 284
 285	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 286	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 287
 288	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 289	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 290	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 291	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 292};
 293
 294/*
 295 * Cortex-A15 HW events mapping
 296 */
 297static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 298	PERF_MAP_ALL_UNSUPPORTED,
 299	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 300	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 301	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 302	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 303	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
 304	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 305	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 306};
 307
 308static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 309					[PERF_COUNT_HW_CACHE_OP_MAX]
 310					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 311	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 312
 313	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
 314	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 315	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 316	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 317
 318	/*
 319	 * Not all performance counters differentiate between read and write
 320	 * accesses/misses so we're not always strictly correct, but it's the
 321	 * best we can do. Writes and reads get combined in these cases.
 322	 */
 323	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 324	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 325
 326	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
 327	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 328	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
 329	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 330
 331	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 332	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 333
 334	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 335	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 336
 337	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 338	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 339	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 340	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 341};
 342
 343/*
 344 * Cortex-A7 HW events mapping
 345 */
 346static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
 347	PERF_MAP_ALL_UNSUPPORTED,
 348	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 349	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 350	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 351	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 352	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 353	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 354	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 355};
 356
 357static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 358					[PERF_COUNT_HW_CACHE_OP_MAX]
 359					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 360	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 361
 362	/*
 363	 * The performance counters don't differentiate between read and write
 364	 * accesses/misses so this isn't strictly correct, but it's the best we
 365	 * can do. Writes and reads get combined.
 366	 */
 367	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 368	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 369	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 370	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 371
 372	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 373	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 374
 375	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 376	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 377	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 378	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 379
 380	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 381	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 382
 383	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 384	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 385
 386	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 387	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 388	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 389	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 390};
 391
 392/*
 393 * Cortex-A12 HW events mapping
 394 */
 395static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
 396	PERF_MAP_ALL_UNSUPPORTED,
 397	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 398	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 399	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 400	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 401	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
 402	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 403	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 404};
 405
 406static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 407					[PERF_COUNT_HW_CACHE_OP_MAX]
 408					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 409	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 410
 411	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
 412	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 413	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 414	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 415
 416	/*
 417	 * Not all performance counters differentiate between read and write
 418	 * accesses/misses so we're not always strictly correct, but it's the
 419	 * best we can do. Writes and reads get combined in these cases.
 420	 */
 421	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 422	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 423
 424	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
 425	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 426	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
 427	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 428
 429	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 430	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 431	[C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
 432
 433	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 434	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 435
 436	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 437	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 438	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 439	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 440};
 441
 442/*
 443 * Krait HW events mapping
 444 */
 445static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
 446	PERF_MAP_ALL_UNSUPPORTED,
 447	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 448	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 449	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 450	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 451	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 452};
 453
 454static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
 455	PERF_MAP_ALL_UNSUPPORTED,
 456	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 457	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 458	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 459	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 460};
 461
 462static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 463					  [PERF_COUNT_HW_CACHE_OP_MAX]
 464					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 465	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 466
 467	/*
 468	 * The performance counters don't differentiate between read and write
 469	 * accesses/misses so this isn't strictly correct, but it's the best we
 470	 * can do. Writes and reads get combined.
 471	 */
 472	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 473	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 474	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 475	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 476
 477	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
 478	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
 479
 480	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 481	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 482
 483	[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 484	[C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 485
 486	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 487	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 488	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 489	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 490};
 491
 492/*
 493 * Scorpion HW events mapping
 494 */
 495static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
 496	PERF_MAP_ALL_UNSUPPORTED,
 497	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 498	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 499	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 500	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 501	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 502};
 503
 504static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 505					    [PERF_COUNT_HW_CACHE_OP_MAX]
 506					    [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 507	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 508	/*
 509	 * The performance counters don't differentiate between read and write
 510	 * accesses/misses so this isn't strictly correct, but it's the best we
 511	 * can do. Writes and reads get combined.
 512	 */
 513	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 514	[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 515	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 516	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 517	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
 518	[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
 519	/*
 520	 * Only ITLB misses and DTLB refills are supported.  If users want the
 521	 * DTLB refills misses a raw counter must be used.
 522	 */
 523	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 524	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 525	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 526	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 527	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 528	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 529	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 530	[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 531	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 532	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 533};
 534
 535PMU_FORMAT_ATTR(event, "config:0-7");
 536
 537static struct attribute *armv7_pmu_format_attrs[] = {
 538	&format_attr_event.attr,
 539	NULL,
 540};
 541
 542static struct attribute_group armv7_pmu_format_attr_group = {
 543	.name = "format",
 544	.attrs = armv7_pmu_format_attrs,
 545};
 546
 547#define ARMV7_EVENT_ATTR_RESOLVE(m) #m
 548#define ARMV7_EVENT_ATTR(name, config) \
 549	PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
 550			      "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
 551
 552ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
 553ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
 554ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
 555ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
 556ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
 557ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
 558ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
 559ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
 560ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
 561ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
 562ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
 563ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
 564ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
 565ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
 566ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
 567ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
 568ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
 569ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
 570ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
 571
 572static struct attribute *armv7_pmuv1_event_attrs[] = {
 573	&armv7_event_attr_sw_incr.attr.attr,
 574	&armv7_event_attr_l1i_cache_refill.attr.attr,
 575	&armv7_event_attr_l1i_tlb_refill.attr.attr,
 576	&armv7_event_attr_l1d_cache_refill.attr.attr,
 577	&armv7_event_attr_l1d_cache.attr.attr,
 578	&armv7_event_attr_l1d_tlb_refill.attr.attr,
 579	&armv7_event_attr_ld_retired.attr.attr,
 580	&armv7_event_attr_st_retired.attr.attr,
 581	&armv7_event_attr_inst_retired.attr.attr,
 582	&armv7_event_attr_exc_taken.attr.attr,
 583	&armv7_event_attr_exc_return.attr.attr,
 584	&armv7_event_attr_cid_write_retired.attr.attr,
 585	&armv7_event_attr_pc_write_retired.attr.attr,
 586	&armv7_event_attr_br_immed_retired.attr.attr,
 587	&armv7_event_attr_br_return_retired.attr.attr,
 588	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
 589	&armv7_event_attr_br_mis_pred.attr.attr,
 590	&armv7_event_attr_cpu_cycles.attr.attr,
 591	&armv7_event_attr_br_pred.attr.attr,
 592	NULL,
 593};
 594
 595static struct attribute_group armv7_pmuv1_events_attr_group = {
 596	.name = "events",
 597	.attrs = armv7_pmuv1_event_attrs,
 598};
 599
 600ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
 601ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
 602ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
 603ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
 604ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
 605ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
 606ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
 607ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
 608ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
 609ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
 610ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
 611
 612static struct attribute *armv7_pmuv2_event_attrs[] = {
 613	&armv7_event_attr_sw_incr.attr.attr,
 614	&armv7_event_attr_l1i_cache_refill.attr.attr,
 615	&armv7_event_attr_l1i_tlb_refill.attr.attr,
 616	&armv7_event_attr_l1d_cache_refill.attr.attr,
 617	&armv7_event_attr_l1d_cache.attr.attr,
 618	&armv7_event_attr_l1d_tlb_refill.attr.attr,
 619	&armv7_event_attr_ld_retired.attr.attr,
 620	&armv7_event_attr_st_retired.attr.attr,
 621	&armv7_event_attr_inst_retired.attr.attr,
 622	&armv7_event_attr_exc_taken.attr.attr,
 623	&armv7_event_attr_exc_return.attr.attr,
 624	&armv7_event_attr_cid_write_retired.attr.attr,
 625	&armv7_event_attr_pc_write_retired.attr.attr,
 626	&armv7_event_attr_br_immed_retired.attr.attr,
 627	&armv7_event_attr_br_return_retired.attr.attr,
 628	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
 629	&armv7_event_attr_br_mis_pred.attr.attr,
 630	&armv7_event_attr_cpu_cycles.attr.attr,
 631	&armv7_event_attr_br_pred.attr.attr,
 632	&armv7_event_attr_mem_access.attr.attr,
 633	&armv7_event_attr_l1i_cache.attr.attr,
 634	&armv7_event_attr_l1d_cache_wb.attr.attr,
 635	&armv7_event_attr_l2d_cache.attr.attr,
 636	&armv7_event_attr_l2d_cache_refill.attr.attr,
 637	&armv7_event_attr_l2d_cache_wb.attr.attr,
 638	&armv7_event_attr_bus_access.attr.attr,
 639	&armv7_event_attr_memory_error.attr.attr,
 640	&armv7_event_attr_inst_spec.attr.attr,
 641	&armv7_event_attr_ttbr_write_retired.attr.attr,
 642	&armv7_event_attr_bus_cycles.attr.attr,
 643	NULL,
 644};
 645
 646static struct attribute_group armv7_pmuv2_events_attr_group = {
 647	.name = "events",
 648	.attrs = armv7_pmuv2_event_attrs,
 649};
 650
 651/*
 652 * Perf Events' indices
 653 */
 654#define	ARMV7_IDX_CYCLE_COUNTER	0
 655#define	ARMV7_IDX_COUNTER0	1
 656#define	ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
 657	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 658
 659#define	ARMV7_MAX_COUNTERS	32
 660#define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
 661
 662/*
 663 * ARMv7 low level PMNC access
 664 */
 665
 666/*
 667 * Perf Event to low level counters mapping
 668 */
 669#define	ARMV7_IDX_TO_COUNTER(x)	\
 670	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
 671
 672/*
 673 * Per-CPU PMNC: config reg
 674 */
 675#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
 676#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
 677#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
 678#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
 679#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
 680#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
 681#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
 682#define	ARMV7_PMNC_N_MASK	0x1f
 683#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
 684
 685/*
 686 * FLAG: counters overflow flag status reg
 687 */
 688#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
 689#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
 690
 691/*
 692 * PMXEVTYPER: Event selection reg
 693 */
 694#define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
 695#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
 696
 697/*
 698 * Event filters for PMUv2
 699 */
 700#define	ARMV7_EXCLUDE_PL1	BIT(31)
 701#define	ARMV7_EXCLUDE_USER	BIT(30)
 702#define	ARMV7_INCLUDE_HYP	BIT(27)
 703
 704/*
 705 * Secure debug enable reg
 706 */
 707#define ARMV7_SDER_SUNIDEN	BIT(1) /* Permit non-invasive debug */
 708
 709static inline u32 armv7_pmnc_read(void)
 710{
 711	u32 val;
 712	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
 713	return val;
 714}
 715
 716static inline void armv7_pmnc_write(u32 val)
 717{
 718	val &= ARMV7_PMNC_MASK;
 719	isb();
 720	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 721}
 722
 723static inline int armv7_pmnc_has_overflowed(u32 pmnc)
 724{
 725	return pmnc & ARMV7_OVERFLOWED_MASK;
 726}
 727
 728static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
 729{
 730	return idx >= ARMV7_IDX_CYCLE_COUNTER &&
 731		idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
 732}
 733
 734static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
 735{
 736	return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
 737}
 738
 739static inline void armv7_pmnc_select_counter(int idx)
 740{
 741	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 742	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
 743	isb();
 744}
 745
 746static inline u64 armv7pmu_read_counter(struct perf_event *event)
 747{
 748	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 749	struct hw_perf_event *hwc = &event->hw;
 750	int idx = hwc->idx;
 751	u32 value = 0;
 752
 753	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 754		pr_err("CPU%u reading wrong counter %d\n",
 755			smp_processor_id(), idx);
 756	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 757		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
 758	} else {
 759		armv7_pmnc_select_counter(idx);
 760		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
 761	}
 762
 763	return value;
 764}
 765
 766static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
 767{
 768	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 769	struct hw_perf_event *hwc = &event->hw;
 770	int idx = hwc->idx;
 771
 772	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 773		pr_err("CPU%u writing wrong counter %d\n",
 774			smp_processor_id(), idx);
 775	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 776		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
 777	} else {
 778		armv7_pmnc_select_counter(idx);
 779		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
 780	}
 781}
 782
 783static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
 784{
 785	armv7_pmnc_select_counter(idx);
 786	val &= ARMV7_EVTYPE_MASK;
 787	asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 788}
 789
 790static inline void armv7_pmnc_enable_counter(int idx)
 791{
 792	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 793	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
 794}
 795
 796static inline void armv7_pmnc_disable_counter(int idx)
 797{
 798	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 799	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
 800}
 801
 802static inline void armv7_pmnc_enable_intens(int idx)
 803{
 804	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 805	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
 806}
 807
 808static inline void armv7_pmnc_disable_intens(int idx)
 809{
 810	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 811	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
 812	isb();
 813	/* Clear the overflow flag in case an interrupt is pending. */
 814	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
 815	isb();
 816}
 817
 818static inline u32 armv7_pmnc_getreset_flags(void)
 819{
 820	u32 val;
 821
 822	/* Read */
 823	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 824
 825	/* Write to clear flags */
 826	val &= ARMV7_FLAG_MASK;
 827	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
 828
 829	return val;
 830}
 831
 832#ifdef DEBUG
 833static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
 834{
 835	u32 val;
 836	unsigned int cnt;
 837
 838	pr_info("PMNC registers dump:\n");
 839
 840	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
 841	pr_info("PMNC  =0x%08x\n", val);
 842
 843	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
 844	pr_info("CNTENS=0x%08x\n", val);
 845
 846	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
 847	pr_info("INTENS=0x%08x\n", val);
 848
 849	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 850	pr_info("FLAGS =0x%08x\n", val);
 851
 852	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
 853	pr_info("SELECT=0x%08x\n", val);
 854
 855	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
 856	pr_info("CCNT  =0x%08x\n", val);
 857
 858	for (cnt = ARMV7_IDX_COUNTER0;
 859			cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
 860		armv7_pmnc_select_counter(cnt);
 861		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
 862		pr_info("CNT[%d] count =0x%08x\n",
 863			ARMV7_IDX_TO_COUNTER(cnt), val);
 864		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
 865		pr_info("CNT[%d] evtsel=0x%08x\n",
 866			ARMV7_IDX_TO_COUNTER(cnt), val);
 867	}
 868}
 869#endif
 870
 871static void armv7pmu_enable_event(struct perf_event *event)
 872{
 873	unsigned long flags;
 874	struct hw_perf_event *hwc = &event->hw;
 875	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 876	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 877	int idx = hwc->idx;
 878
 879	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 880		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
 881			smp_processor_id(), idx);
 882		return;
 883	}
 884
 885	/*
 886	 * Enable counter and interrupt, and set the counter to count
 887	 * the event that we're interested in.
 888	 */
 889	raw_spin_lock_irqsave(&events->pmu_lock, flags);
 890
 891	/*
 892	 * Disable counter
 893	 */
 894	armv7_pmnc_disable_counter(idx);
 895
 896	/*
 897	 * Set event (if destined for PMNx counters)
 898	 * We only need to set the event for the cycle counter if we
 899	 * have the ability to perform event filtering.
 900	 */
 901	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
 902		armv7_pmnc_write_evtsel(idx, hwc->config_base);
 903
 904	/*
 905	 * Enable interrupt for this counter
 906	 */
 907	armv7_pmnc_enable_intens(idx);
 908
 909	/*
 910	 * Enable counter
 911	 */
 912	armv7_pmnc_enable_counter(idx);
 913
 914	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 915}
 916
 917static void armv7pmu_disable_event(struct perf_event *event)
 918{
 919	unsigned long flags;
 920	struct hw_perf_event *hwc = &event->hw;
 921	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 922	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 923	int idx = hwc->idx;
 924
 925	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 926		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
 927			smp_processor_id(), idx);
 928		return;
 929	}
 930
 931	/*
 932	 * Disable counter and interrupt
 933	 */
 934	raw_spin_lock_irqsave(&events->pmu_lock, flags);
 935
 936	/*
 937	 * Disable counter
 938	 */
 939	armv7_pmnc_disable_counter(idx);
 940
 941	/*
 942	 * Disable interrupt for this counter
 943	 */
 944	armv7_pmnc_disable_intens(idx);
 945
 946	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 947}
 948
 949static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
 950{
 951	u32 pmnc;
 952	struct perf_sample_data data;
 953	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 954	struct pt_regs *regs;
 955	int idx;
 956
 957	/*
 958	 * Get and reset the IRQ flags
 959	 */
 960	pmnc = armv7_pmnc_getreset_flags();
 961
 962	/*
 963	 * Did an overflow occur?
 964	 */
 965	if (!armv7_pmnc_has_overflowed(pmnc))
 966		return IRQ_NONE;
 967
 968	/*
 969	 * Handle the counter(s) overflow(s)
 970	 */
 971	regs = get_irq_regs();
 972
 973	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 974		struct perf_event *event = cpuc->events[idx];
 975		struct hw_perf_event *hwc;
 976
 977		/* Ignore if we don't have an event. */
 978		if (!event)
 979			continue;
 980
 981		/*
 982		 * We have a single interrupt for all counters. Check that
 983		 * each counter has overflowed before we process it.
 984		 */
 985		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
 986			continue;
 987
 988		hwc = &event->hw;
 989		armpmu_event_update(event);
 990		perf_sample_data_init(&data, 0, hwc->last_period);
 991		if (!armpmu_event_set_period(event))
 992			continue;
 993
 994		if (perf_event_overflow(event, &data, regs))
 995			cpu_pmu->disable(event);
 996	}
 997
 998	/*
 999	 * Handle the pending perf events.
1000	 *
1001	 * Note: this call *must* be run with interrupts disabled. For
1002	 * platforms that can have the PMU interrupts raised as an NMI, this
1003	 * will not work.
1004	 */
1005	irq_work_run();
1006
1007	return IRQ_HANDLED;
1008}
1009
1010static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1011{
1012	unsigned long flags;
1013	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1014
1015	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1016	/* Enable all counters */
1017	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1018	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1019}
1020
1021static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1022{
1023	unsigned long flags;
1024	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1025
1026	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1027	/* Disable all counters */
1028	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1029	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1030}
1031
1032static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1033				  struct perf_event *event)
1034{
1035	int idx;
1036	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1037	struct hw_perf_event *hwc = &event->hw;
1038	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1039
1040	/* Always place a cycle counter into the cycle counter. */
1041	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1042		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1043			return -EAGAIN;
1044
1045		return ARMV7_IDX_CYCLE_COUNTER;
1046	}
1047
1048	/*
1049	 * For anything other than a cycle counter, try and use
1050	 * the events counters
1051	 */
1052	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1053		if (!test_and_set_bit(idx, cpuc->used_mask))
1054			return idx;
1055	}
1056
1057	/* The counters are all in use. */
1058	return -EAGAIN;
1059}
1060
1061static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1062				     struct perf_event *event)
1063{
1064	clear_bit(event->hw.idx, cpuc->used_mask);
1065}
1066
1067/*
1068 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1069 */
1070static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1071				     struct perf_event_attr *attr)
1072{
1073	unsigned long config_base = 0;
1074
1075	if (attr->exclude_idle)
1076		return -EPERM;
 
 
1077	if (attr->exclude_user)
1078		config_base |= ARMV7_EXCLUDE_USER;
1079	if (attr->exclude_kernel)
1080		config_base |= ARMV7_EXCLUDE_PL1;
1081	if (!attr->exclude_hv)
1082		config_base |= ARMV7_INCLUDE_HYP;
1083
1084	/*
1085	 * Install the filter into config_base as this is used to
1086	 * construct the event type.
1087	 */
1088	event->config_base = config_base;
1089
1090	return 0;
1091}
1092
1093static void armv7pmu_reset(void *info)
1094{
1095	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1096	u32 idx, nb_cnt = cpu_pmu->num_events, val;
1097
1098	if (cpu_pmu->secure_access) {
1099		asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
1100		val |= ARMV7_SDER_SUNIDEN;
1101		asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
1102	}
1103
1104	/* The counter and interrupt enable registers are unknown at reset. */
1105	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1106		armv7_pmnc_disable_counter(idx);
1107		armv7_pmnc_disable_intens(idx);
1108	}
1109
1110	/* Initialize & Reset PMNC: C and P bits */
1111	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1112}
1113
1114static int armv7_a8_map_event(struct perf_event *event)
1115{
1116	return armpmu_map_event(event, &armv7_a8_perf_map,
1117				&armv7_a8_perf_cache_map, 0xFF);
1118}
1119
1120static int armv7_a9_map_event(struct perf_event *event)
1121{
1122	return armpmu_map_event(event, &armv7_a9_perf_map,
1123				&armv7_a9_perf_cache_map, 0xFF);
1124}
1125
1126static int armv7_a5_map_event(struct perf_event *event)
1127{
1128	return armpmu_map_event(event, &armv7_a5_perf_map,
1129				&armv7_a5_perf_cache_map, 0xFF);
1130}
1131
1132static int armv7_a15_map_event(struct perf_event *event)
1133{
1134	return armpmu_map_event(event, &armv7_a15_perf_map,
1135				&armv7_a15_perf_cache_map, 0xFF);
1136}
1137
1138static int armv7_a7_map_event(struct perf_event *event)
1139{
1140	return armpmu_map_event(event, &armv7_a7_perf_map,
1141				&armv7_a7_perf_cache_map, 0xFF);
1142}
1143
1144static int armv7_a12_map_event(struct perf_event *event)
1145{
1146	return armpmu_map_event(event, &armv7_a12_perf_map,
1147				&armv7_a12_perf_cache_map, 0xFF);
1148}
1149
1150static int krait_map_event(struct perf_event *event)
1151{
1152	return armpmu_map_event(event, &krait_perf_map,
1153				&krait_perf_cache_map, 0xFFFFF);
1154}
1155
1156static int krait_map_event_no_branch(struct perf_event *event)
1157{
1158	return armpmu_map_event(event, &krait_perf_map_no_branch,
1159				&krait_perf_cache_map, 0xFFFFF);
1160}
1161
1162static int scorpion_map_event(struct perf_event *event)
1163{
1164	return armpmu_map_event(event, &scorpion_perf_map,
1165				&scorpion_perf_cache_map, 0xFFFFF);
1166}
1167
1168static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1169{
1170	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1171	cpu_pmu->enable		= armv7pmu_enable_event;
1172	cpu_pmu->disable	= armv7pmu_disable_event;
1173	cpu_pmu->read_counter	= armv7pmu_read_counter;
1174	cpu_pmu->write_counter	= armv7pmu_write_counter;
1175	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
1176	cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
1177	cpu_pmu->start		= armv7pmu_start;
1178	cpu_pmu->stop		= armv7pmu_stop;
1179	cpu_pmu->reset		= armv7pmu_reset;
1180};
1181
1182static void armv7_read_num_pmnc_events(void *info)
1183{
1184	int *nb_cnt = info;
1185
1186	/* Read the nb of CNTx counters supported from PMNC */
1187	*nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1188
1189	/* Add the CPU cycles counter */
1190	*nb_cnt += 1;
1191}
1192
1193static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1194{
1195	return smp_call_function_any(&arm_pmu->supported_cpus,
1196				     armv7_read_num_pmnc_events,
1197				     &arm_pmu->num_events, 1);
1198}
1199
1200static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1201{
1202	armv7pmu_init(cpu_pmu);
1203	cpu_pmu->name		= "armv7_cortex_a8";
1204	cpu_pmu->map_event	= armv7_a8_map_event;
1205	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1206		&armv7_pmuv1_events_attr_group;
1207	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1208		&armv7_pmu_format_attr_group;
1209	return armv7_probe_num_events(cpu_pmu);
1210}
1211
1212static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1213{
1214	armv7pmu_init(cpu_pmu);
1215	cpu_pmu->name		= "armv7_cortex_a9";
1216	cpu_pmu->map_event	= armv7_a9_map_event;
1217	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1218		&armv7_pmuv1_events_attr_group;
1219	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1220		&armv7_pmu_format_attr_group;
1221	return armv7_probe_num_events(cpu_pmu);
1222}
1223
1224static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1225{
1226	armv7pmu_init(cpu_pmu);
1227	cpu_pmu->name		= "armv7_cortex_a5";
1228	cpu_pmu->map_event	= armv7_a5_map_event;
1229	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1230		&armv7_pmuv1_events_attr_group;
1231	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1232		&armv7_pmu_format_attr_group;
1233	return armv7_probe_num_events(cpu_pmu);
1234}
1235
1236static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1237{
1238	armv7pmu_init(cpu_pmu);
1239	cpu_pmu->name		= "armv7_cortex_a15";
1240	cpu_pmu->map_event	= armv7_a15_map_event;
1241	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1242	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1243		&armv7_pmuv2_events_attr_group;
1244	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1245		&armv7_pmu_format_attr_group;
1246	return armv7_probe_num_events(cpu_pmu);
1247}
1248
1249static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1250{
1251	armv7pmu_init(cpu_pmu);
1252	cpu_pmu->name		= "armv7_cortex_a7";
1253	cpu_pmu->map_event	= armv7_a7_map_event;
1254	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1255	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1256		&armv7_pmuv2_events_attr_group;
1257	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1258		&armv7_pmu_format_attr_group;
1259	return armv7_probe_num_events(cpu_pmu);
1260}
1261
1262static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1263{
1264	armv7pmu_init(cpu_pmu);
1265	cpu_pmu->name		= "armv7_cortex_a12";
1266	cpu_pmu->map_event	= armv7_a12_map_event;
1267	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1268	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1269		&armv7_pmuv2_events_attr_group;
1270	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1271		&armv7_pmu_format_attr_group;
1272	return armv7_probe_num_events(cpu_pmu);
1273}
1274
1275static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1276{
1277	int ret = armv7_a12_pmu_init(cpu_pmu);
1278	cpu_pmu->name = "armv7_cortex_a17";
1279	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1280		&armv7_pmuv2_events_attr_group;
1281	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1282		&armv7_pmu_format_attr_group;
1283	return ret;
1284}
1285
1286/*
1287 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1288 *
1289 *            31   30     24     16     8      0
1290 *            +--------------------------------+
1291 *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1292 *            +--------------------------------+
1293 *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1294 *            +--------------------------------+
1295 *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1296 *            +--------------------------------+
1297 *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1298 *            +--------------------------------+
1299 *              EN | G=3  | G=2  | G=1  | G=0
1300 *
1301 *  Event Encoding:
1302 *
1303 *      hwc->config_base = 0xNRCCG
1304 *
1305 *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1306 *      R  = region register
1307 *      CC = class of events the group G is choosing from
1308 *      G  = group or particular event
1309 *
1310 *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1311 *
1312 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1313 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1314 *  events (interrupts for example). An event code is broken down into
1315 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1316 *  example).
1317 */
1318
1319#define KRAIT_EVENT		(1 << 16)
1320#define VENUM_EVENT		(2 << 16)
1321#define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1322#define PMRESRn_EN		BIT(31)
1323
1324#define EVENT_REGION(event)	(((event) >> 12) & 0xf)		/* R */
1325#define EVENT_GROUP(event)	((event) & 0xf)			/* G */
1326#define EVENT_CODE(event)	(((event) >> 4) & 0xff)		/* CC */
1327#define EVENT_VENUM(event)	(!!(event & VENUM_EVENT))	/* N=2 */
1328#define EVENT_CPU(event)	(!!(event & KRAIT_EVENT))	/* N=1 */
1329
1330static u32 krait_read_pmresrn(int n)
1331{
1332	u32 val;
1333
1334	switch (n) {
1335	case 0:
1336		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1337		break;
1338	case 1:
1339		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1340		break;
1341	case 2:
1342		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1343		break;
1344	default:
1345		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1346	}
1347
1348	return val;
1349}
1350
1351static void krait_write_pmresrn(int n, u32 val)
1352{
1353	switch (n) {
1354	case 0:
1355		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1356		break;
1357	case 1:
1358		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1359		break;
1360	case 2:
1361		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1362		break;
1363	default:
1364		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1365	}
1366}
1367
1368static u32 venum_read_pmresr(void)
1369{
1370	u32 val;
1371	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1372	return val;
1373}
1374
1375static void venum_write_pmresr(u32 val)
1376{
1377	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1378}
1379
1380static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1381{
1382	u32 venum_new_val;
1383	u32 fp_new_val;
1384
1385	BUG_ON(preemptible());
1386	/* CPACR Enable CP10 and CP11 access */
1387	*venum_orig_val = get_copro_access();
1388	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1389	set_copro_access(venum_new_val);
1390
1391	/* Enable FPEXC */
1392	*fp_orig_val = fmrx(FPEXC);
1393	fp_new_val = *fp_orig_val | FPEXC_EN;
1394	fmxr(FPEXC, fp_new_val);
1395}
1396
1397static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1398{
1399	BUG_ON(preemptible());
1400	/* Restore FPEXC */
1401	fmxr(FPEXC, fp_orig_val);
1402	isb();
1403	/* Restore CPACR */
1404	set_copro_access(venum_orig_val);
1405}
1406
1407static u32 krait_get_pmresrn_event(unsigned int region)
1408{
1409	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1410					     KRAIT_PMRESR1_GROUP0,
1411					     KRAIT_PMRESR2_GROUP0 };
1412	return pmresrn_table[region];
1413}
1414
1415static void krait_evt_setup(int idx, u32 config_base)
1416{
1417	u32 val;
1418	u32 mask;
1419	u32 vval, fval;
1420	unsigned int region = EVENT_REGION(config_base);
1421	unsigned int group = EVENT_GROUP(config_base);
1422	unsigned int code = EVENT_CODE(config_base);
1423	unsigned int group_shift;
1424	bool venum_event = EVENT_VENUM(config_base);
1425
1426	group_shift = group * 8;
1427	mask = 0xff << group_shift;
1428
1429	/* Configure evtsel for the region and group */
1430	if (venum_event)
1431		val = KRAIT_VPMRESR0_GROUP0;
1432	else
1433		val = krait_get_pmresrn_event(region);
1434	val += group;
1435	/* Mix in mode-exclusion bits */
1436	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1437	armv7_pmnc_write_evtsel(idx, val);
1438
1439	if (venum_event) {
1440		venum_pre_pmresr(&vval, &fval);
1441		val = venum_read_pmresr();
1442		val &= ~mask;
1443		val |= code << group_shift;
1444		val |= PMRESRn_EN;
1445		venum_write_pmresr(val);
1446		venum_post_pmresr(vval, fval);
1447	} else {
1448		val = krait_read_pmresrn(region);
1449		val &= ~mask;
1450		val |= code << group_shift;
1451		val |= PMRESRn_EN;
1452		krait_write_pmresrn(region, val);
1453	}
1454}
1455
1456static u32 clear_pmresrn_group(u32 val, int group)
1457{
1458	u32 mask;
1459	int group_shift;
1460
1461	group_shift = group * 8;
1462	mask = 0xff << group_shift;
1463	val &= ~mask;
1464
1465	/* Don't clear enable bit if entire region isn't disabled */
1466	if (val & ~PMRESRn_EN)
1467		return val |= PMRESRn_EN;
1468
1469	return 0;
1470}
1471
1472static void krait_clearpmu(u32 config_base)
1473{
1474	u32 val;
1475	u32 vval, fval;
1476	unsigned int region = EVENT_REGION(config_base);
1477	unsigned int group = EVENT_GROUP(config_base);
1478	bool venum_event = EVENT_VENUM(config_base);
1479
1480	if (venum_event) {
1481		venum_pre_pmresr(&vval, &fval);
1482		val = venum_read_pmresr();
1483		val = clear_pmresrn_group(val, group);
1484		venum_write_pmresr(val);
1485		venum_post_pmresr(vval, fval);
1486	} else {
1487		val = krait_read_pmresrn(region);
1488		val = clear_pmresrn_group(val, group);
1489		krait_write_pmresrn(region, val);
1490	}
1491}
1492
1493static void krait_pmu_disable_event(struct perf_event *event)
1494{
1495	unsigned long flags;
1496	struct hw_perf_event *hwc = &event->hw;
1497	int idx = hwc->idx;
1498	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1499	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1500
1501	/* Disable counter and interrupt */
1502	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1503
1504	/* Disable counter */
1505	armv7_pmnc_disable_counter(idx);
1506
1507	/*
1508	 * Clear pmresr code (if destined for PMNx counters)
1509	 */
1510	if (hwc->config_base & KRAIT_EVENT_MASK)
1511		krait_clearpmu(hwc->config_base);
1512
1513	/* Disable interrupt for this counter */
1514	armv7_pmnc_disable_intens(idx);
1515
1516	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1517}
1518
1519static void krait_pmu_enable_event(struct perf_event *event)
1520{
1521	unsigned long flags;
1522	struct hw_perf_event *hwc = &event->hw;
1523	int idx = hwc->idx;
1524	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1525	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1526
1527	/*
1528	 * Enable counter and interrupt, and set the counter to count
1529	 * the event that we're interested in.
1530	 */
1531	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1532
1533	/* Disable counter */
1534	armv7_pmnc_disable_counter(idx);
1535
1536	/*
1537	 * Set event (if destined for PMNx counters)
1538	 * We set the event for the cycle counter because we
1539	 * have the ability to perform event filtering.
1540	 */
1541	if (hwc->config_base & KRAIT_EVENT_MASK)
1542		krait_evt_setup(idx, hwc->config_base);
1543	else
1544		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1545
1546	/* Enable interrupt for this counter */
1547	armv7_pmnc_enable_intens(idx);
1548
1549	/* Enable counter */
1550	armv7_pmnc_enable_counter(idx);
1551
1552	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1553}
1554
1555static void krait_pmu_reset(void *info)
1556{
1557	u32 vval, fval;
1558	struct arm_pmu *cpu_pmu = info;
1559	u32 idx, nb_cnt = cpu_pmu->num_events;
1560
1561	armv7pmu_reset(info);
1562
1563	/* Clear all pmresrs */
1564	krait_write_pmresrn(0, 0);
1565	krait_write_pmresrn(1, 0);
1566	krait_write_pmresrn(2, 0);
1567
1568	venum_pre_pmresr(&vval, &fval);
1569	venum_write_pmresr(0);
1570	venum_post_pmresr(vval, fval);
1571
1572	/* Reset PMxEVNCTCR to sane default */
1573	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1574		armv7_pmnc_select_counter(idx);
1575		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1576	}
1577
1578}
1579
1580static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1581			      unsigned int group)
1582{
1583	int bit;
1584	struct hw_perf_event *hwc = &event->hw;
1585	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1586
1587	if (hwc->config_base & VENUM_EVENT)
1588		bit = KRAIT_VPMRESR0_GROUP0;
1589	else
1590		bit = krait_get_pmresrn_event(region);
1591	bit -= krait_get_pmresrn_event(0);
1592	bit += group;
1593	/*
1594	 * Lower bits are reserved for use by the counters (see
1595	 * armv7pmu_get_event_idx() for more info)
1596	 */
1597	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1598
1599	return bit;
1600}
1601
1602/*
1603 * We check for column exclusion constraints here.
1604 * Two events cant use the same group within a pmresr register.
1605 */
1606static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1607				   struct perf_event *event)
1608{
1609	int idx;
1610	int bit = -1;
1611	struct hw_perf_event *hwc = &event->hw;
1612	unsigned int region = EVENT_REGION(hwc->config_base);
1613	unsigned int code = EVENT_CODE(hwc->config_base);
1614	unsigned int group = EVENT_GROUP(hwc->config_base);
1615	bool venum_event = EVENT_VENUM(hwc->config_base);
1616	bool krait_event = EVENT_CPU(hwc->config_base);
1617
1618	if (venum_event || krait_event) {
1619		/* Ignore invalid events */
1620		if (group > 3 || region > 2)
1621			return -EINVAL;
1622		if (venum_event && (code & 0xe0))
1623			return -EINVAL;
1624
1625		bit = krait_event_to_bit(event, region, group);
1626		if (test_and_set_bit(bit, cpuc->used_mask))
1627			return -EAGAIN;
1628	}
1629
1630	idx = armv7pmu_get_event_idx(cpuc, event);
1631	if (idx < 0 && bit >= 0)
1632		clear_bit(bit, cpuc->used_mask);
1633
1634	return idx;
1635}
1636
1637static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1638				      struct perf_event *event)
1639{
1640	int bit;
1641	struct hw_perf_event *hwc = &event->hw;
1642	unsigned int region = EVENT_REGION(hwc->config_base);
1643	unsigned int group = EVENT_GROUP(hwc->config_base);
1644	bool venum_event = EVENT_VENUM(hwc->config_base);
1645	bool krait_event = EVENT_CPU(hwc->config_base);
1646
1647	armv7pmu_clear_event_idx(cpuc, event);
1648	if (venum_event || krait_event) {
1649		bit = krait_event_to_bit(event, region, group);
1650		clear_bit(bit, cpuc->used_mask);
1651	}
1652}
1653
1654static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1655{
1656	armv7pmu_init(cpu_pmu);
1657	cpu_pmu->name		= "armv7_krait";
1658	/* Some early versions of Krait don't support PC write events */
1659	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1660				  "qcom,no-pc-write"))
1661		cpu_pmu->map_event = krait_map_event_no_branch;
1662	else
1663		cpu_pmu->map_event = krait_map_event;
1664	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1665	cpu_pmu->reset		= krait_pmu_reset;
1666	cpu_pmu->enable		= krait_pmu_enable_event;
1667	cpu_pmu->disable	= krait_pmu_disable_event;
1668	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1669	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1670	return armv7_probe_num_events(cpu_pmu);
1671}
1672
1673/*
1674 * Scorpion Local Performance Monitor Register (LPMn)
1675 *
1676 *            31   30     24     16     8      0
1677 *            +--------------------------------+
1678 *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1679 *            +--------------------------------+
1680 *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1681 *            +--------------------------------+
1682 *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1683 *            +--------------------------------+
1684 *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1685 *            +--------------------------------+
1686 *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1687 *            +--------------------------------+
1688 *              EN | G=3  | G=2  | G=1  | G=0
1689 *
1690 *
1691 *  Event Encoding:
1692 *
1693 *      hwc->config_base = 0xNRCCG
1694 *
1695 *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1696 *      R  = region register
1697 *      CC = class of events the group G is choosing from
1698 *      G  = group or particular event
1699 *
1700 *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1701 *
1702 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1703 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1704 *  events (interrupts for example). An event code is broken down into
1705 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1706 *  example).
1707 */
1708
1709static u32 scorpion_read_pmresrn(int n)
1710{
1711	u32 val;
1712
1713	switch (n) {
1714	case 0:
1715		asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1716		break;
1717	case 1:
1718		asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1719		break;
1720	case 2:
1721		asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1722		break;
1723	case 3:
1724		asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1725		break;
1726	default:
1727		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1728	}
1729
1730	return val;
1731}
1732
1733static void scorpion_write_pmresrn(int n, u32 val)
1734{
1735	switch (n) {
1736	case 0:
1737		asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1738		break;
1739	case 1:
1740		asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1741		break;
1742	case 2:
1743		asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1744		break;
1745	case 3:
1746		asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1747		break;
1748	default:
1749		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1750	}
1751}
1752
1753static u32 scorpion_get_pmresrn_event(unsigned int region)
1754{
1755	static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1756					     SCORPION_LPM1_GROUP0,
1757					     SCORPION_LPM2_GROUP0,
1758					     SCORPION_L2LPM_GROUP0 };
1759	return pmresrn_table[region];
1760}
1761
1762static void scorpion_evt_setup(int idx, u32 config_base)
1763{
1764	u32 val;
1765	u32 mask;
1766	u32 vval, fval;
1767	unsigned int region = EVENT_REGION(config_base);
1768	unsigned int group = EVENT_GROUP(config_base);
1769	unsigned int code = EVENT_CODE(config_base);
1770	unsigned int group_shift;
1771	bool venum_event = EVENT_VENUM(config_base);
1772
1773	group_shift = group * 8;
1774	mask = 0xff << group_shift;
1775
1776	/* Configure evtsel for the region and group */
1777	if (venum_event)
1778		val = SCORPION_VLPM_GROUP0;
1779	else
1780		val = scorpion_get_pmresrn_event(region);
1781	val += group;
1782	/* Mix in mode-exclusion bits */
1783	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1784	armv7_pmnc_write_evtsel(idx, val);
1785
1786	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1787
1788	if (venum_event) {
1789		venum_pre_pmresr(&vval, &fval);
1790		val = venum_read_pmresr();
1791		val &= ~mask;
1792		val |= code << group_shift;
1793		val |= PMRESRn_EN;
1794		venum_write_pmresr(val);
1795		venum_post_pmresr(vval, fval);
1796	} else {
1797		val = scorpion_read_pmresrn(region);
1798		val &= ~mask;
1799		val |= code << group_shift;
1800		val |= PMRESRn_EN;
1801		scorpion_write_pmresrn(region, val);
1802	}
1803}
1804
1805static void scorpion_clearpmu(u32 config_base)
1806{
1807	u32 val;
1808	u32 vval, fval;
1809	unsigned int region = EVENT_REGION(config_base);
1810	unsigned int group = EVENT_GROUP(config_base);
1811	bool venum_event = EVENT_VENUM(config_base);
1812
1813	if (venum_event) {
1814		venum_pre_pmresr(&vval, &fval);
1815		val = venum_read_pmresr();
1816		val = clear_pmresrn_group(val, group);
1817		venum_write_pmresr(val);
1818		venum_post_pmresr(vval, fval);
1819	} else {
1820		val = scorpion_read_pmresrn(region);
1821		val = clear_pmresrn_group(val, group);
1822		scorpion_write_pmresrn(region, val);
1823	}
1824}
1825
1826static void scorpion_pmu_disable_event(struct perf_event *event)
1827{
1828	unsigned long flags;
1829	struct hw_perf_event *hwc = &event->hw;
1830	int idx = hwc->idx;
1831	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1832	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1833
1834	/* Disable counter and interrupt */
1835	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1836
1837	/* Disable counter */
1838	armv7_pmnc_disable_counter(idx);
1839
1840	/*
1841	 * Clear pmresr code (if destined for PMNx counters)
1842	 */
1843	if (hwc->config_base & KRAIT_EVENT_MASK)
1844		scorpion_clearpmu(hwc->config_base);
1845
1846	/* Disable interrupt for this counter */
1847	armv7_pmnc_disable_intens(idx);
1848
1849	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1850}
1851
1852static void scorpion_pmu_enable_event(struct perf_event *event)
1853{
1854	unsigned long flags;
1855	struct hw_perf_event *hwc = &event->hw;
1856	int idx = hwc->idx;
1857	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1858	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1859
1860	/*
1861	 * Enable counter and interrupt, and set the counter to count
1862	 * the event that we're interested in.
1863	 */
1864	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1865
1866	/* Disable counter */
1867	armv7_pmnc_disable_counter(idx);
1868
1869	/*
1870	 * Set event (if destined for PMNx counters)
1871	 * We don't set the event for the cycle counter because we
1872	 * don't have the ability to perform event filtering.
1873	 */
1874	if (hwc->config_base & KRAIT_EVENT_MASK)
1875		scorpion_evt_setup(idx, hwc->config_base);
1876	else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1877		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1878
1879	/* Enable interrupt for this counter */
1880	armv7_pmnc_enable_intens(idx);
1881
1882	/* Enable counter */
1883	armv7_pmnc_enable_counter(idx);
1884
1885	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1886}
1887
1888static void scorpion_pmu_reset(void *info)
1889{
1890	u32 vval, fval;
1891	struct arm_pmu *cpu_pmu = info;
1892	u32 idx, nb_cnt = cpu_pmu->num_events;
1893
1894	armv7pmu_reset(info);
1895
1896	/* Clear all pmresrs */
1897	scorpion_write_pmresrn(0, 0);
1898	scorpion_write_pmresrn(1, 0);
1899	scorpion_write_pmresrn(2, 0);
1900	scorpion_write_pmresrn(3, 0);
1901
1902	venum_pre_pmresr(&vval, &fval);
1903	venum_write_pmresr(0);
1904	venum_post_pmresr(vval, fval);
1905
1906	/* Reset PMxEVNCTCR to sane default */
1907	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1908		armv7_pmnc_select_counter(idx);
1909		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1910	}
1911}
1912
1913static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1914			      unsigned int group)
1915{
1916	int bit;
1917	struct hw_perf_event *hwc = &event->hw;
1918	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1919
1920	if (hwc->config_base & VENUM_EVENT)
1921		bit = SCORPION_VLPM_GROUP0;
1922	else
1923		bit = scorpion_get_pmresrn_event(region);
1924	bit -= scorpion_get_pmresrn_event(0);
1925	bit += group;
1926	/*
1927	 * Lower bits are reserved for use by the counters (see
1928	 * armv7pmu_get_event_idx() for more info)
1929	 */
1930	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1931
1932	return bit;
1933}
1934
1935/*
1936 * We check for column exclusion constraints here.
1937 * Two events cant use the same group within a pmresr register.
1938 */
1939static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1940				   struct perf_event *event)
1941{
1942	int idx;
1943	int bit = -1;
1944	struct hw_perf_event *hwc = &event->hw;
1945	unsigned int region = EVENT_REGION(hwc->config_base);
1946	unsigned int group = EVENT_GROUP(hwc->config_base);
1947	bool venum_event = EVENT_VENUM(hwc->config_base);
1948	bool scorpion_event = EVENT_CPU(hwc->config_base);
1949
1950	if (venum_event || scorpion_event) {
1951		/* Ignore invalid events */
1952		if (group > 3 || region > 3)
1953			return -EINVAL;
1954
1955		bit = scorpion_event_to_bit(event, region, group);
1956		if (test_and_set_bit(bit, cpuc->used_mask))
1957			return -EAGAIN;
1958	}
1959
1960	idx = armv7pmu_get_event_idx(cpuc, event);
1961	if (idx < 0 && bit >= 0)
1962		clear_bit(bit, cpuc->used_mask);
1963
1964	return idx;
1965}
1966
1967static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1968				      struct perf_event *event)
1969{
1970	int bit;
1971	struct hw_perf_event *hwc = &event->hw;
1972	unsigned int region = EVENT_REGION(hwc->config_base);
1973	unsigned int group = EVENT_GROUP(hwc->config_base);
1974	bool venum_event = EVENT_VENUM(hwc->config_base);
1975	bool scorpion_event = EVENT_CPU(hwc->config_base);
1976
1977	armv7pmu_clear_event_idx(cpuc, event);
1978	if (venum_event || scorpion_event) {
1979		bit = scorpion_event_to_bit(event, region, group);
1980		clear_bit(bit, cpuc->used_mask);
1981	}
1982}
1983
1984static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1985{
1986	armv7pmu_init(cpu_pmu);
1987	cpu_pmu->name		= "armv7_scorpion";
1988	cpu_pmu->map_event	= scorpion_map_event;
1989	cpu_pmu->reset		= scorpion_pmu_reset;
1990	cpu_pmu->enable		= scorpion_pmu_enable_event;
1991	cpu_pmu->disable	= scorpion_pmu_disable_event;
1992	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1993	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1994	return armv7_probe_num_events(cpu_pmu);
1995}
1996
1997static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1998{
1999	armv7pmu_init(cpu_pmu);
2000	cpu_pmu->name		= "armv7_scorpion_mp";
2001	cpu_pmu->map_event	= scorpion_map_event;
2002	cpu_pmu->reset		= scorpion_pmu_reset;
2003	cpu_pmu->enable		= scorpion_pmu_enable_event;
2004	cpu_pmu->disable	= scorpion_pmu_disable_event;
2005	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
2006	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
2007	return armv7_probe_num_events(cpu_pmu);
2008}
2009
2010static const struct of_device_id armv7_pmu_of_device_ids[] = {
2011	{.compatible = "arm,cortex-a17-pmu",	.data = armv7_a17_pmu_init},
2012	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init},
2013	{.compatible = "arm,cortex-a12-pmu",	.data = armv7_a12_pmu_init},
2014	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init},
2015	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init},
2016	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init},
2017	{.compatible = "arm,cortex-a5-pmu",	.data = armv7_a5_pmu_init},
2018	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},
2019	{.compatible = "qcom,scorpion-pmu",	.data = scorpion_pmu_init},
2020	{.compatible = "qcom,scorpion-mp-pmu",	.data = scorpion_mp_pmu_init},
2021	{},
2022};
2023
2024static const struct pmu_probe_info armv7_pmu_probe_table[] = {
2025	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
2026	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
2027	{ /* sentinel value */ }
2028};
2029
2030
2031static int armv7_pmu_device_probe(struct platform_device *pdev)
2032{
2033	return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
2034				    armv7_pmu_probe_table);
2035}
2036
2037static struct platform_driver armv7_pmu_driver = {
2038	.driver		= {
2039		.name	= "armv7-pmu",
2040		.of_match_table = armv7_pmu_of_device_ids,
2041		.suppress_bind_attrs = true,
2042	},
2043	.probe		= armv7_pmu_device_probe,
2044};
2045
2046builtin_platform_driver(armv7_pmu_driver);
2047#endif	/* CONFIG_CPU_V7 */
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   4 *
   5 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   6 * 2010 (c) MontaVista Software, LLC.
   7 *
   8 * Copied from ARMv6 code, with the low level code inspired
   9 *  by the ARMv7 Oprofile code.
  10 *
  11 * Cortex-A8 has up to 4 configurable performance counters and
  12 *  a single cycle counter.
  13 * Cortex-A9 has up to 31 configurable performance counters and
  14 *  a single cycle counter.
  15 *
  16 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  17 *  counter and all 4 performance counters together can be reset separately.
  18 */
  19
  20#ifdef CONFIG_CPU_V7
  21
  22#include <asm/cp15.h>
  23#include <asm/cputype.h>
  24#include <asm/irq_regs.h>
  25#include <asm/vfp.h>
  26#include "../vfp/vfpinstr.h"
  27
  28#include <linux/of.h>
  29#include <linux/perf/arm_pmu.h>
  30#include <linux/platform_device.h>
  31
  32/*
  33 * Common ARMv7 event types
  34 *
  35 * Note: An implementation may not be able to count all of these events
  36 * but the encodings are considered to be `reserved' in the case that
  37 * they are not available.
  38 */
  39#define ARMV7_PERFCTR_PMNC_SW_INCR			0x00
  40#define ARMV7_PERFCTR_L1_ICACHE_REFILL			0x01
  41#define ARMV7_PERFCTR_ITLB_REFILL			0x02
  42#define ARMV7_PERFCTR_L1_DCACHE_REFILL			0x03
  43#define ARMV7_PERFCTR_L1_DCACHE_ACCESS			0x04
  44#define ARMV7_PERFCTR_DTLB_REFILL			0x05
  45#define ARMV7_PERFCTR_MEM_READ				0x06
  46#define ARMV7_PERFCTR_MEM_WRITE				0x07
  47#define ARMV7_PERFCTR_INSTR_EXECUTED			0x08
  48#define ARMV7_PERFCTR_EXC_TAKEN				0x09
  49#define ARMV7_PERFCTR_EXC_EXECUTED			0x0A
  50#define ARMV7_PERFCTR_CID_WRITE				0x0B
  51
  52/*
  53 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  54 * It counts:
  55 *  - all (taken) branch instructions,
  56 *  - instructions that explicitly write the PC,
  57 *  - exception generating instructions.
  58 */
  59#define ARMV7_PERFCTR_PC_WRITE				0x0C
  60#define ARMV7_PERFCTR_PC_IMM_BRANCH			0x0D
  61#define ARMV7_PERFCTR_PC_PROC_RETURN			0x0E
  62#define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		0x0F
  63#define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		0x10
  64#define ARMV7_PERFCTR_CLOCK_CYCLES			0x11
  65#define ARMV7_PERFCTR_PC_BRANCH_PRED			0x12
  66
  67/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  68#define ARMV7_PERFCTR_MEM_ACCESS			0x13
  69#define ARMV7_PERFCTR_L1_ICACHE_ACCESS			0x14
  70#define ARMV7_PERFCTR_L1_DCACHE_WB			0x15
  71#define ARMV7_PERFCTR_L2_CACHE_ACCESS			0x16
  72#define ARMV7_PERFCTR_L2_CACHE_REFILL			0x17
  73#define ARMV7_PERFCTR_L2_CACHE_WB			0x18
  74#define ARMV7_PERFCTR_BUS_ACCESS			0x19
  75#define ARMV7_PERFCTR_MEM_ERROR				0x1A
  76#define ARMV7_PERFCTR_INSTR_SPEC			0x1B
  77#define ARMV7_PERFCTR_TTBR_WRITE			0x1C
  78#define ARMV7_PERFCTR_BUS_CYCLES			0x1D
  79
  80#define ARMV7_PERFCTR_CPU_CYCLES			0xFF
  81
  82/* ARMv7 Cortex-A8 specific event types */
  83#define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		0x43
  84#define ARMV7_A8_PERFCTR_L2_CACHE_REFILL		0x44
  85#define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		0x50
  86#define ARMV7_A8_PERFCTR_STALL_ISIDE			0x56
  87
  88/* ARMv7 Cortex-A9 specific event types */
  89#define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		0x68
  90#define ARMV7_A9_PERFCTR_STALL_ICACHE			0x60
  91#define ARMV7_A9_PERFCTR_STALL_DISPATCH			0x66
  92
  93/* ARMv7 Cortex-A5 specific event types */
  94#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		0xc2
  95#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		0xc3
  96
  97/* ARMv7 Cortex-A15 specific event types */
  98#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
  99#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
 100#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		0x42
 101#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	0x43
 102
 103#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		0x4C
 104#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		0x4D
 105
 106#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		0x50
 107#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
 108#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		0x52
 109#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		0x53
 110
 111#define ARMV7_A15_PERFCTR_PC_WRITE_SPEC			0x76
 112
 113/* ARMv7 Cortex-A12 specific event types */
 114#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
 115#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
 116
 117#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		0x50
 118#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
 119
 120#define ARMV7_A12_PERFCTR_PC_WRITE_SPEC			0x76
 121
 122#define ARMV7_A12_PERFCTR_PF_TLB_REFILL			0xe7
 123
 124/* ARMv7 Krait specific event types */
 125#define KRAIT_PMRESR0_GROUP0				0xcc
 126#define KRAIT_PMRESR1_GROUP0				0xd0
 127#define KRAIT_PMRESR2_GROUP0				0xd4
 128#define KRAIT_VPMRESR0_GROUP0				0xd8
 129
 130#define KRAIT_PERFCTR_L1_ICACHE_ACCESS			0x10011
 131#define KRAIT_PERFCTR_L1_ICACHE_MISS			0x10010
 132
 133#define KRAIT_PERFCTR_L1_ITLB_ACCESS			0x12222
 134#define KRAIT_PERFCTR_L1_DTLB_ACCESS			0x12210
 135
 136/* ARMv7 Scorpion specific event types */
 137#define SCORPION_LPM0_GROUP0				0x4c
 138#define SCORPION_LPM1_GROUP0				0x50
 139#define SCORPION_LPM2_GROUP0				0x54
 140#define SCORPION_L2LPM_GROUP0				0x58
 141#define SCORPION_VLPM_GROUP0				0x5c
 142
 143#define SCORPION_ICACHE_ACCESS				0x10053
 144#define SCORPION_ICACHE_MISS				0x10052
 145
 146#define SCORPION_DTLB_ACCESS				0x12013
 147#define SCORPION_DTLB_MISS				0x12012
 148
 149#define SCORPION_ITLB_MISS				0x12021
 150
 151/*
 152 * Cortex-A8 HW events mapping
 153 *
 154 * The hardware events that we support. We do support cache operations but
 155 * we have harvard caches and no way to combine instruction and data
 156 * accesses/misses in hardware.
 157 */
 158static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 159	PERF_MAP_ALL_UNSUPPORTED,
 160	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 161	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 162	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 163	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 164	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 165	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 166	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
 167};
 168
 169static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 170					  [PERF_COUNT_HW_CACHE_OP_MAX]
 171					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 172	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 173
 174	/*
 175	 * The performance counters don't differentiate between read and write
 176	 * accesses/misses so this isn't strictly correct, but it's the best we
 177	 * can do. Writes and reads get combined.
 178	 */
 179	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 180	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 181	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 182	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 183
 184	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 185	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 186
 187	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 188	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 189	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 190	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 191
 192	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 193	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 194
 195	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 196	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 197
 198	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 199	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 200	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 201	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 202};
 203
 204/*
 205 * Cortex-A9 HW events mapping
 206 */
 207static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 208	PERF_MAP_ALL_UNSUPPORTED,
 209	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 210	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
 211	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 212	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 213	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 214	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 215	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
 216	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
 217};
 218
 219static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 220					  [PERF_COUNT_HW_CACHE_OP_MAX]
 221					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 222	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 223
 224	/*
 225	 * The performance counters don't differentiate between read and write
 226	 * accesses/misses so this isn't strictly correct, but it's the best we
 227	 * can do. Writes and reads get combined.
 228	 */
 229	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 230	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 231	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 232	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 233
 234	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 235
 236	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 237	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 238
 239	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 240	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 241
 242	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 243	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 244	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 245	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 246};
 247
 248/*
 249 * Cortex-A5 HW events mapping
 250 */
 251static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 252	PERF_MAP_ALL_UNSUPPORTED,
 253	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 254	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 255	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 256	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 257	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 258	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 259};
 260
 261static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 262					[PERF_COUNT_HW_CACHE_OP_MAX]
 263					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 264	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 265
 266	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 267	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 268	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 269	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 270	[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 271	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 272
 273	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 274	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 275	/*
 276	 * The prefetch counters don't differentiate between the I side and the
 277	 * D side.
 278	 */
 279	[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 280	[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 281
 282	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 283	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 284
 285	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 286	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 287
 288	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 289	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 290	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 291	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 292};
 293
 294/*
 295 * Cortex-A15 HW events mapping
 296 */
 297static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 298	PERF_MAP_ALL_UNSUPPORTED,
 299	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 300	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 301	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 302	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 303	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
 304	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 305	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 306};
 307
 308static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 309					[PERF_COUNT_HW_CACHE_OP_MAX]
 310					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 311	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 312
 313	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
 314	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 315	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 316	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 317
 318	/*
 319	 * Not all performance counters differentiate between read and write
 320	 * accesses/misses so we're not always strictly correct, but it's the
 321	 * best we can do. Writes and reads get combined in these cases.
 322	 */
 323	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 324	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 325
 326	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
 327	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 328	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
 329	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 330
 331	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 332	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 333
 334	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 335	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 336
 337	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 338	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 339	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 340	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 341};
 342
 343/*
 344 * Cortex-A7 HW events mapping
 345 */
 346static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
 347	PERF_MAP_ALL_UNSUPPORTED,
 348	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 349	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 350	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 351	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 352	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 353	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 354	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 355};
 356
 357static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 358					[PERF_COUNT_HW_CACHE_OP_MAX]
 359					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 360	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 361
 362	/*
 363	 * The performance counters don't differentiate between read and write
 364	 * accesses/misses so this isn't strictly correct, but it's the best we
 365	 * can do. Writes and reads get combined.
 366	 */
 367	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 368	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 369	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 370	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 371
 372	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 373	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 374
 375	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 376	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 377	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 378	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 379
 380	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 381	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 382
 383	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 384	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 385
 386	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 387	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 388	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 389	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 390};
 391
 392/*
 393 * Cortex-A12 HW events mapping
 394 */
 395static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
 396	PERF_MAP_ALL_UNSUPPORTED,
 397	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 398	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 399	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 400	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 401	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
 402	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 403	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 404};
 405
 406static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 407					[PERF_COUNT_HW_CACHE_OP_MAX]
 408					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 409	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 410
 411	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
 412	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 413	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 414	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 415
 416	/*
 417	 * Not all performance counters differentiate between read and write
 418	 * accesses/misses so we're not always strictly correct, but it's the
 419	 * best we can do. Writes and reads get combined in these cases.
 420	 */
 421	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 422	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 423
 424	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
 425	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 426	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
 427	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 428
 429	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 430	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 431	[C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
 432
 433	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 434	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 435
 436	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 437	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 438	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 439	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 440};
 441
 442/*
 443 * Krait HW events mapping
 444 */
 445static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
 446	PERF_MAP_ALL_UNSUPPORTED,
 447	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 448	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 449	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 450	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 451	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 452};
 453
 454static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
 455	PERF_MAP_ALL_UNSUPPORTED,
 456	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 457	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 458	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 459	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 460};
 461
 462static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 463					  [PERF_COUNT_HW_CACHE_OP_MAX]
 464					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 465	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 466
 467	/*
 468	 * The performance counters don't differentiate between read and write
 469	 * accesses/misses so this isn't strictly correct, but it's the best we
 470	 * can do. Writes and reads get combined.
 471	 */
 472	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 473	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 474	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 475	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 476
 477	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
 478	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
 479
 480	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 481	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 482
 483	[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 484	[C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 485
 486	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 487	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 488	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 489	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 490};
 491
 492/*
 493 * Scorpion HW events mapping
 494 */
 495static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
 496	PERF_MAP_ALL_UNSUPPORTED,
 497	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 498	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 499	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 500	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 501	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 502};
 503
 504static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 505					    [PERF_COUNT_HW_CACHE_OP_MAX]
 506					    [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 507	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 508	/*
 509	 * The performance counters don't differentiate between read and write
 510	 * accesses/misses so this isn't strictly correct, but it's the best we
 511	 * can do. Writes and reads get combined.
 512	 */
 513	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 514	[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 515	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 516	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 517	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
 518	[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
 519	/*
 520	 * Only ITLB misses and DTLB refills are supported.  If users want the
 521	 * DTLB refills misses a raw counter must be used.
 522	 */
 523	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 524	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 525	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 526	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 527	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 528	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 529	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 530	[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 531	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 532	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 533};
 534
 535PMU_FORMAT_ATTR(event, "config:0-7");
 536
 537static struct attribute *armv7_pmu_format_attrs[] = {
 538	&format_attr_event.attr,
 539	NULL,
 540};
 541
 542static struct attribute_group armv7_pmu_format_attr_group = {
 543	.name = "format",
 544	.attrs = armv7_pmu_format_attrs,
 545};
 546
 547#define ARMV7_EVENT_ATTR_RESOLVE(m) #m
 548#define ARMV7_EVENT_ATTR(name, config) \
 549	PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
 550			      "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
 551
 552ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
 553ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
 554ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
 555ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
 556ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
 557ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
 558ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
 559ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
 560ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
 561ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
 562ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
 563ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
 564ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
 565ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
 566ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
 567ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
 568ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
 569ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
 570ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
 571
 572static struct attribute *armv7_pmuv1_event_attrs[] = {
 573	&armv7_event_attr_sw_incr.attr.attr,
 574	&armv7_event_attr_l1i_cache_refill.attr.attr,
 575	&armv7_event_attr_l1i_tlb_refill.attr.attr,
 576	&armv7_event_attr_l1d_cache_refill.attr.attr,
 577	&armv7_event_attr_l1d_cache.attr.attr,
 578	&armv7_event_attr_l1d_tlb_refill.attr.attr,
 579	&armv7_event_attr_ld_retired.attr.attr,
 580	&armv7_event_attr_st_retired.attr.attr,
 581	&armv7_event_attr_inst_retired.attr.attr,
 582	&armv7_event_attr_exc_taken.attr.attr,
 583	&armv7_event_attr_exc_return.attr.attr,
 584	&armv7_event_attr_cid_write_retired.attr.attr,
 585	&armv7_event_attr_pc_write_retired.attr.attr,
 586	&armv7_event_attr_br_immed_retired.attr.attr,
 587	&armv7_event_attr_br_return_retired.attr.attr,
 588	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
 589	&armv7_event_attr_br_mis_pred.attr.attr,
 590	&armv7_event_attr_cpu_cycles.attr.attr,
 591	&armv7_event_attr_br_pred.attr.attr,
 592	NULL,
 593};
 594
 595static struct attribute_group armv7_pmuv1_events_attr_group = {
 596	.name = "events",
 597	.attrs = armv7_pmuv1_event_attrs,
 598};
 599
 600ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
 601ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
 602ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
 603ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
 604ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
 605ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
 606ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
 607ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
 608ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
 609ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
 610ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
 611
 612static struct attribute *armv7_pmuv2_event_attrs[] = {
 613	&armv7_event_attr_sw_incr.attr.attr,
 614	&armv7_event_attr_l1i_cache_refill.attr.attr,
 615	&armv7_event_attr_l1i_tlb_refill.attr.attr,
 616	&armv7_event_attr_l1d_cache_refill.attr.attr,
 617	&armv7_event_attr_l1d_cache.attr.attr,
 618	&armv7_event_attr_l1d_tlb_refill.attr.attr,
 619	&armv7_event_attr_ld_retired.attr.attr,
 620	&armv7_event_attr_st_retired.attr.attr,
 621	&armv7_event_attr_inst_retired.attr.attr,
 622	&armv7_event_attr_exc_taken.attr.attr,
 623	&armv7_event_attr_exc_return.attr.attr,
 624	&armv7_event_attr_cid_write_retired.attr.attr,
 625	&armv7_event_attr_pc_write_retired.attr.attr,
 626	&armv7_event_attr_br_immed_retired.attr.attr,
 627	&armv7_event_attr_br_return_retired.attr.attr,
 628	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
 629	&armv7_event_attr_br_mis_pred.attr.attr,
 630	&armv7_event_attr_cpu_cycles.attr.attr,
 631	&armv7_event_attr_br_pred.attr.attr,
 632	&armv7_event_attr_mem_access.attr.attr,
 633	&armv7_event_attr_l1i_cache.attr.attr,
 634	&armv7_event_attr_l1d_cache_wb.attr.attr,
 635	&armv7_event_attr_l2d_cache.attr.attr,
 636	&armv7_event_attr_l2d_cache_refill.attr.attr,
 637	&armv7_event_attr_l2d_cache_wb.attr.attr,
 638	&armv7_event_attr_bus_access.attr.attr,
 639	&armv7_event_attr_memory_error.attr.attr,
 640	&armv7_event_attr_inst_spec.attr.attr,
 641	&armv7_event_attr_ttbr_write_retired.attr.attr,
 642	&armv7_event_attr_bus_cycles.attr.attr,
 643	NULL,
 644};
 645
 646static struct attribute_group armv7_pmuv2_events_attr_group = {
 647	.name = "events",
 648	.attrs = armv7_pmuv2_event_attrs,
 649};
 650
 651/*
 652 * Perf Events' indices
 653 */
 654#define	ARMV7_IDX_CYCLE_COUNTER	0
 655#define	ARMV7_IDX_COUNTER0	1
 656#define	ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
 657	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 658
 659#define	ARMV7_MAX_COUNTERS	32
 660#define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
 661
 662/*
 663 * ARMv7 low level PMNC access
 664 */
 665
 666/*
 667 * Perf Event to low level counters mapping
 668 */
 669#define	ARMV7_IDX_TO_COUNTER(x)	\
 670	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
 671
 672/*
 673 * Per-CPU PMNC: config reg
 674 */
 675#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
 676#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
 677#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
 678#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
 679#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
 680#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
 681#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
 682#define	ARMV7_PMNC_N_MASK	0x1f
 683#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
 684
 685/*
 686 * FLAG: counters overflow flag status reg
 687 */
 688#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
 689#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
 690
 691/*
 692 * PMXEVTYPER: Event selection reg
 693 */
 694#define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
 695#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
 696
 697/*
 698 * Event filters for PMUv2
 699 */
 700#define	ARMV7_EXCLUDE_PL1	BIT(31)
 701#define	ARMV7_EXCLUDE_USER	BIT(30)
 702#define	ARMV7_INCLUDE_HYP	BIT(27)
 703
 704/*
 705 * Secure debug enable reg
 706 */
 707#define ARMV7_SDER_SUNIDEN	BIT(1) /* Permit non-invasive debug */
 708
 709static inline u32 armv7_pmnc_read(void)
 710{
 711	u32 val;
 712	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
 713	return val;
 714}
 715
 716static inline void armv7_pmnc_write(u32 val)
 717{
 718	val &= ARMV7_PMNC_MASK;
 719	isb();
 720	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 721}
 722
 723static inline int armv7_pmnc_has_overflowed(u32 pmnc)
 724{
 725	return pmnc & ARMV7_OVERFLOWED_MASK;
 726}
 727
 728static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
 729{
 730	return idx >= ARMV7_IDX_CYCLE_COUNTER &&
 731		idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
 732}
 733
 734static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
 735{
 736	return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
 737}
 738
 739static inline void armv7_pmnc_select_counter(int idx)
 740{
 741	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 742	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
 743	isb();
 744}
 745
 746static inline u64 armv7pmu_read_counter(struct perf_event *event)
 747{
 748	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 749	struct hw_perf_event *hwc = &event->hw;
 750	int idx = hwc->idx;
 751	u32 value = 0;
 752
 753	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 754		pr_err("CPU%u reading wrong counter %d\n",
 755			smp_processor_id(), idx);
 756	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 757		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
 758	} else {
 759		armv7_pmnc_select_counter(idx);
 760		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
 761	}
 762
 763	return value;
 764}
 765
 766static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
 767{
 768	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 769	struct hw_perf_event *hwc = &event->hw;
 770	int idx = hwc->idx;
 771
 772	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 773		pr_err("CPU%u writing wrong counter %d\n",
 774			smp_processor_id(), idx);
 775	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 776		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
 777	} else {
 778		armv7_pmnc_select_counter(idx);
 779		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
 780	}
 781}
 782
 783static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
 784{
 785	armv7_pmnc_select_counter(idx);
 786	val &= ARMV7_EVTYPE_MASK;
 787	asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 788}
 789
 790static inline void armv7_pmnc_enable_counter(int idx)
 791{
 792	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 793	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
 794}
 795
 796static inline void armv7_pmnc_disable_counter(int idx)
 797{
 798	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 799	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
 800}
 801
 802static inline void armv7_pmnc_enable_intens(int idx)
 803{
 804	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 805	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
 806}
 807
 808static inline void armv7_pmnc_disable_intens(int idx)
 809{
 810	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 811	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
 812	isb();
 813	/* Clear the overflow flag in case an interrupt is pending. */
 814	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
 815	isb();
 816}
 817
 818static inline u32 armv7_pmnc_getreset_flags(void)
 819{
 820	u32 val;
 821
 822	/* Read */
 823	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 824
 825	/* Write to clear flags */
 826	val &= ARMV7_FLAG_MASK;
 827	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
 828
 829	return val;
 830}
 831
 832#ifdef DEBUG
 833static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
 834{
 835	u32 val;
 836	unsigned int cnt;
 837
 838	pr_info("PMNC registers dump:\n");
 839
 840	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
 841	pr_info("PMNC  =0x%08x\n", val);
 842
 843	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
 844	pr_info("CNTENS=0x%08x\n", val);
 845
 846	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
 847	pr_info("INTENS=0x%08x\n", val);
 848
 849	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 850	pr_info("FLAGS =0x%08x\n", val);
 851
 852	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
 853	pr_info("SELECT=0x%08x\n", val);
 854
 855	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
 856	pr_info("CCNT  =0x%08x\n", val);
 857
 858	for (cnt = ARMV7_IDX_COUNTER0;
 859			cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
 860		armv7_pmnc_select_counter(cnt);
 861		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
 862		pr_info("CNT[%d] count =0x%08x\n",
 863			ARMV7_IDX_TO_COUNTER(cnt), val);
 864		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
 865		pr_info("CNT[%d] evtsel=0x%08x\n",
 866			ARMV7_IDX_TO_COUNTER(cnt), val);
 867	}
 868}
 869#endif
 870
 871static void armv7pmu_enable_event(struct perf_event *event)
 872{
 
 873	struct hw_perf_event *hwc = &event->hw;
 874	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 
 875	int idx = hwc->idx;
 876
 877	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 878		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
 879			smp_processor_id(), idx);
 880		return;
 881	}
 882
 883	/*
 884	 * Enable counter and interrupt, and set the counter to count
 885	 * the event that we're interested in.
 886	 */
 
 887
 888	/*
 889	 * Disable counter
 890	 */
 891	armv7_pmnc_disable_counter(idx);
 892
 893	/*
 894	 * Set event (if destined for PMNx counters)
 895	 * We only need to set the event for the cycle counter if we
 896	 * have the ability to perform event filtering.
 897	 */
 898	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
 899		armv7_pmnc_write_evtsel(idx, hwc->config_base);
 900
 901	/*
 902	 * Enable interrupt for this counter
 903	 */
 904	armv7_pmnc_enable_intens(idx);
 905
 906	/*
 907	 * Enable counter
 908	 */
 909	armv7_pmnc_enable_counter(idx);
 
 
 910}
 911
 912static void armv7pmu_disable_event(struct perf_event *event)
 913{
 
 914	struct hw_perf_event *hwc = &event->hw;
 915	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 
 916	int idx = hwc->idx;
 917
 918	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 919		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
 920			smp_processor_id(), idx);
 921		return;
 922	}
 923
 924	/*
 925	 * Disable counter and interrupt
 926	 */
 
 927
 928	/*
 929	 * Disable counter
 930	 */
 931	armv7_pmnc_disable_counter(idx);
 932
 933	/*
 934	 * Disable interrupt for this counter
 935	 */
 936	armv7_pmnc_disable_intens(idx);
 
 
 937}
 938
 939static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
 940{
 941	u32 pmnc;
 942	struct perf_sample_data data;
 943	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 944	struct pt_regs *regs;
 945	int idx;
 946
 947	/*
 948	 * Get and reset the IRQ flags
 949	 */
 950	pmnc = armv7_pmnc_getreset_flags();
 951
 952	/*
 953	 * Did an overflow occur?
 954	 */
 955	if (!armv7_pmnc_has_overflowed(pmnc))
 956		return IRQ_NONE;
 957
 958	/*
 959	 * Handle the counter(s) overflow(s)
 960	 */
 961	regs = get_irq_regs();
 962
 963	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 964		struct perf_event *event = cpuc->events[idx];
 965		struct hw_perf_event *hwc;
 966
 967		/* Ignore if we don't have an event. */
 968		if (!event)
 969			continue;
 970
 971		/*
 972		 * We have a single interrupt for all counters. Check that
 973		 * each counter has overflowed before we process it.
 974		 */
 975		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
 976			continue;
 977
 978		hwc = &event->hw;
 979		armpmu_event_update(event);
 980		perf_sample_data_init(&data, 0, hwc->last_period);
 981		if (!armpmu_event_set_period(event))
 982			continue;
 983
 984		if (perf_event_overflow(event, &data, regs))
 985			cpu_pmu->disable(event);
 986	}
 987
 988	/*
 989	 * Handle the pending perf events.
 990	 *
 991	 * Note: this call *must* be run with interrupts disabled. For
 992	 * platforms that can have the PMU interrupts raised as an NMI, this
 993	 * will not work.
 994	 */
 995	irq_work_run();
 996
 997	return IRQ_HANDLED;
 998}
 999
1000static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1001{
 
 
 
 
1002	/* Enable all counters */
1003	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
 
1004}
1005
1006static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1007{
 
 
 
 
1008	/* Disable all counters */
1009	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
 
1010}
1011
1012static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1013				  struct perf_event *event)
1014{
1015	int idx;
1016	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1017	struct hw_perf_event *hwc = &event->hw;
1018	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1019
1020	/* Always place a cycle counter into the cycle counter. */
1021	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1022		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1023			return -EAGAIN;
1024
1025		return ARMV7_IDX_CYCLE_COUNTER;
1026	}
1027
1028	/*
1029	 * For anything other than a cycle counter, try and use
1030	 * the events counters
1031	 */
1032	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1033		if (!test_and_set_bit(idx, cpuc->used_mask))
1034			return idx;
1035	}
1036
1037	/* The counters are all in use. */
1038	return -EAGAIN;
1039}
1040
1041static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1042				     struct perf_event *event)
1043{
1044	clear_bit(event->hw.idx, cpuc->used_mask);
1045}
1046
1047/*
1048 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1049 */
1050static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1051				     struct perf_event_attr *attr)
1052{
1053	unsigned long config_base = 0;
1054
1055	if (attr->exclude_idle) {
1056		pr_debug("ARM performance counters do not support mode exclusion\n");
1057		return -EOPNOTSUPP;
1058	}
1059	if (attr->exclude_user)
1060		config_base |= ARMV7_EXCLUDE_USER;
1061	if (attr->exclude_kernel)
1062		config_base |= ARMV7_EXCLUDE_PL1;
1063	if (!attr->exclude_hv)
1064		config_base |= ARMV7_INCLUDE_HYP;
1065
1066	/*
1067	 * Install the filter into config_base as this is used to
1068	 * construct the event type.
1069	 */
1070	event->config_base = config_base;
1071
1072	return 0;
1073}
1074
1075static void armv7pmu_reset(void *info)
1076{
1077	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1078	u32 idx, nb_cnt = cpu_pmu->num_events, val;
1079
1080	if (cpu_pmu->secure_access) {
1081		asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
1082		val |= ARMV7_SDER_SUNIDEN;
1083		asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
1084	}
1085
1086	/* The counter and interrupt enable registers are unknown at reset. */
1087	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1088		armv7_pmnc_disable_counter(idx);
1089		armv7_pmnc_disable_intens(idx);
1090	}
1091
1092	/* Initialize & Reset PMNC: C and P bits */
1093	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1094}
1095
1096static int armv7_a8_map_event(struct perf_event *event)
1097{
1098	return armpmu_map_event(event, &armv7_a8_perf_map,
1099				&armv7_a8_perf_cache_map, 0xFF);
1100}
1101
1102static int armv7_a9_map_event(struct perf_event *event)
1103{
1104	return armpmu_map_event(event, &armv7_a9_perf_map,
1105				&armv7_a9_perf_cache_map, 0xFF);
1106}
1107
1108static int armv7_a5_map_event(struct perf_event *event)
1109{
1110	return armpmu_map_event(event, &armv7_a5_perf_map,
1111				&armv7_a5_perf_cache_map, 0xFF);
1112}
1113
1114static int armv7_a15_map_event(struct perf_event *event)
1115{
1116	return armpmu_map_event(event, &armv7_a15_perf_map,
1117				&armv7_a15_perf_cache_map, 0xFF);
1118}
1119
1120static int armv7_a7_map_event(struct perf_event *event)
1121{
1122	return armpmu_map_event(event, &armv7_a7_perf_map,
1123				&armv7_a7_perf_cache_map, 0xFF);
1124}
1125
1126static int armv7_a12_map_event(struct perf_event *event)
1127{
1128	return armpmu_map_event(event, &armv7_a12_perf_map,
1129				&armv7_a12_perf_cache_map, 0xFF);
1130}
1131
1132static int krait_map_event(struct perf_event *event)
1133{
1134	return armpmu_map_event(event, &krait_perf_map,
1135				&krait_perf_cache_map, 0xFFFFF);
1136}
1137
1138static int krait_map_event_no_branch(struct perf_event *event)
1139{
1140	return armpmu_map_event(event, &krait_perf_map_no_branch,
1141				&krait_perf_cache_map, 0xFFFFF);
1142}
1143
1144static int scorpion_map_event(struct perf_event *event)
1145{
1146	return armpmu_map_event(event, &scorpion_perf_map,
1147				&scorpion_perf_cache_map, 0xFFFFF);
1148}
1149
1150static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1151{
1152	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1153	cpu_pmu->enable		= armv7pmu_enable_event;
1154	cpu_pmu->disable	= armv7pmu_disable_event;
1155	cpu_pmu->read_counter	= armv7pmu_read_counter;
1156	cpu_pmu->write_counter	= armv7pmu_write_counter;
1157	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
1158	cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
1159	cpu_pmu->start		= armv7pmu_start;
1160	cpu_pmu->stop		= armv7pmu_stop;
1161	cpu_pmu->reset		= armv7pmu_reset;
1162};
1163
1164static void armv7_read_num_pmnc_events(void *info)
1165{
1166	int *nb_cnt = info;
1167
1168	/* Read the nb of CNTx counters supported from PMNC */
1169	*nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1170
1171	/* Add the CPU cycles counter */
1172	*nb_cnt += 1;
1173}
1174
1175static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1176{
1177	return smp_call_function_any(&arm_pmu->supported_cpus,
1178				     armv7_read_num_pmnc_events,
1179				     &arm_pmu->num_events, 1);
1180}
1181
1182static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1183{
1184	armv7pmu_init(cpu_pmu);
1185	cpu_pmu->name		= "armv7_cortex_a8";
1186	cpu_pmu->map_event	= armv7_a8_map_event;
1187	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1188		&armv7_pmuv1_events_attr_group;
1189	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1190		&armv7_pmu_format_attr_group;
1191	return armv7_probe_num_events(cpu_pmu);
1192}
1193
1194static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1195{
1196	armv7pmu_init(cpu_pmu);
1197	cpu_pmu->name		= "armv7_cortex_a9";
1198	cpu_pmu->map_event	= armv7_a9_map_event;
1199	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1200		&armv7_pmuv1_events_attr_group;
1201	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1202		&armv7_pmu_format_attr_group;
1203	return armv7_probe_num_events(cpu_pmu);
1204}
1205
1206static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1207{
1208	armv7pmu_init(cpu_pmu);
1209	cpu_pmu->name		= "armv7_cortex_a5";
1210	cpu_pmu->map_event	= armv7_a5_map_event;
1211	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1212		&armv7_pmuv1_events_attr_group;
1213	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1214		&armv7_pmu_format_attr_group;
1215	return armv7_probe_num_events(cpu_pmu);
1216}
1217
1218static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1219{
1220	armv7pmu_init(cpu_pmu);
1221	cpu_pmu->name		= "armv7_cortex_a15";
1222	cpu_pmu->map_event	= armv7_a15_map_event;
1223	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1224	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1225		&armv7_pmuv2_events_attr_group;
1226	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1227		&armv7_pmu_format_attr_group;
1228	return armv7_probe_num_events(cpu_pmu);
1229}
1230
1231static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1232{
1233	armv7pmu_init(cpu_pmu);
1234	cpu_pmu->name		= "armv7_cortex_a7";
1235	cpu_pmu->map_event	= armv7_a7_map_event;
1236	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1237	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1238		&armv7_pmuv2_events_attr_group;
1239	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1240		&armv7_pmu_format_attr_group;
1241	return armv7_probe_num_events(cpu_pmu);
1242}
1243
1244static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1245{
1246	armv7pmu_init(cpu_pmu);
1247	cpu_pmu->name		= "armv7_cortex_a12";
1248	cpu_pmu->map_event	= armv7_a12_map_event;
1249	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1250	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1251		&armv7_pmuv2_events_attr_group;
1252	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1253		&armv7_pmu_format_attr_group;
1254	return armv7_probe_num_events(cpu_pmu);
1255}
1256
1257static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1258{
1259	int ret = armv7_a12_pmu_init(cpu_pmu);
1260	cpu_pmu->name = "armv7_cortex_a17";
1261	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1262		&armv7_pmuv2_events_attr_group;
1263	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1264		&armv7_pmu_format_attr_group;
1265	return ret;
1266}
1267
1268/*
1269 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1270 *
1271 *            31   30     24     16     8      0
1272 *            +--------------------------------+
1273 *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1274 *            +--------------------------------+
1275 *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1276 *            +--------------------------------+
1277 *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1278 *            +--------------------------------+
1279 *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1280 *            +--------------------------------+
1281 *              EN | G=3  | G=2  | G=1  | G=0
1282 *
1283 *  Event Encoding:
1284 *
1285 *      hwc->config_base = 0xNRCCG
1286 *
1287 *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1288 *      R  = region register
1289 *      CC = class of events the group G is choosing from
1290 *      G  = group or particular event
1291 *
1292 *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1293 *
1294 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1295 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1296 *  events (interrupts for example). An event code is broken down into
1297 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1298 *  example).
1299 */
1300
1301#define KRAIT_EVENT		(1 << 16)
1302#define VENUM_EVENT		(2 << 16)
1303#define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1304#define PMRESRn_EN		BIT(31)
1305
1306#define EVENT_REGION(event)	(((event) >> 12) & 0xf)		/* R */
1307#define EVENT_GROUP(event)	((event) & 0xf)			/* G */
1308#define EVENT_CODE(event)	(((event) >> 4) & 0xff)		/* CC */
1309#define EVENT_VENUM(event)	(!!(event & VENUM_EVENT))	/* N=2 */
1310#define EVENT_CPU(event)	(!!(event & KRAIT_EVENT))	/* N=1 */
1311
1312static u32 krait_read_pmresrn(int n)
1313{
1314	u32 val;
1315
1316	switch (n) {
1317	case 0:
1318		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1319		break;
1320	case 1:
1321		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1322		break;
1323	case 2:
1324		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1325		break;
1326	default:
1327		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1328	}
1329
1330	return val;
1331}
1332
1333static void krait_write_pmresrn(int n, u32 val)
1334{
1335	switch (n) {
1336	case 0:
1337		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1338		break;
1339	case 1:
1340		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1341		break;
1342	case 2:
1343		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1344		break;
1345	default:
1346		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1347	}
1348}
1349
1350static u32 venum_read_pmresr(void)
1351{
1352	u32 val;
1353	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1354	return val;
1355}
1356
1357static void venum_write_pmresr(u32 val)
1358{
1359	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1360}
1361
1362static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1363{
1364	u32 venum_new_val;
1365	u32 fp_new_val;
1366
1367	BUG_ON(preemptible());
1368	/* CPACR Enable CP10 and CP11 access */
1369	*venum_orig_val = get_copro_access();
1370	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1371	set_copro_access(venum_new_val);
1372
1373	/* Enable FPEXC */
1374	*fp_orig_val = fmrx(FPEXC);
1375	fp_new_val = *fp_orig_val | FPEXC_EN;
1376	fmxr(FPEXC, fp_new_val);
1377}
1378
1379static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1380{
1381	BUG_ON(preemptible());
1382	/* Restore FPEXC */
1383	fmxr(FPEXC, fp_orig_val);
1384	isb();
1385	/* Restore CPACR */
1386	set_copro_access(venum_orig_val);
1387}
1388
1389static u32 krait_get_pmresrn_event(unsigned int region)
1390{
1391	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1392					     KRAIT_PMRESR1_GROUP0,
1393					     KRAIT_PMRESR2_GROUP0 };
1394	return pmresrn_table[region];
1395}
1396
1397static void krait_evt_setup(int idx, u32 config_base)
1398{
1399	u32 val;
1400	u32 mask;
1401	u32 vval, fval;
1402	unsigned int region = EVENT_REGION(config_base);
1403	unsigned int group = EVENT_GROUP(config_base);
1404	unsigned int code = EVENT_CODE(config_base);
1405	unsigned int group_shift;
1406	bool venum_event = EVENT_VENUM(config_base);
1407
1408	group_shift = group * 8;
1409	mask = 0xff << group_shift;
1410
1411	/* Configure evtsel for the region and group */
1412	if (venum_event)
1413		val = KRAIT_VPMRESR0_GROUP0;
1414	else
1415		val = krait_get_pmresrn_event(region);
1416	val += group;
1417	/* Mix in mode-exclusion bits */
1418	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1419	armv7_pmnc_write_evtsel(idx, val);
1420
1421	if (venum_event) {
1422		venum_pre_pmresr(&vval, &fval);
1423		val = venum_read_pmresr();
1424		val &= ~mask;
1425		val |= code << group_shift;
1426		val |= PMRESRn_EN;
1427		venum_write_pmresr(val);
1428		venum_post_pmresr(vval, fval);
1429	} else {
1430		val = krait_read_pmresrn(region);
1431		val &= ~mask;
1432		val |= code << group_shift;
1433		val |= PMRESRn_EN;
1434		krait_write_pmresrn(region, val);
1435	}
1436}
1437
1438static u32 clear_pmresrn_group(u32 val, int group)
1439{
1440	u32 mask;
1441	int group_shift;
1442
1443	group_shift = group * 8;
1444	mask = 0xff << group_shift;
1445	val &= ~mask;
1446
1447	/* Don't clear enable bit if entire region isn't disabled */
1448	if (val & ~PMRESRn_EN)
1449		return val |= PMRESRn_EN;
1450
1451	return 0;
1452}
1453
1454static void krait_clearpmu(u32 config_base)
1455{
1456	u32 val;
1457	u32 vval, fval;
1458	unsigned int region = EVENT_REGION(config_base);
1459	unsigned int group = EVENT_GROUP(config_base);
1460	bool venum_event = EVENT_VENUM(config_base);
1461
1462	if (venum_event) {
1463		venum_pre_pmresr(&vval, &fval);
1464		val = venum_read_pmresr();
1465		val = clear_pmresrn_group(val, group);
1466		venum_write_pmresr(val);
1467		venum_post_pmresr(vval, fval);
1468	} else {
1469		val = krait_read_pmresrn(region);
1470		val = clear_pmresrn_group(val, group);
1471		krait_write_pmresrn(region, val);
1472	}
1473}
1474
1475static void krait_pmu_disable_event(struct perf_event *event)
1476{
 
1477	struct hw_perf_event *hwc = &event->hw;
1478	int idx = hwc->idx;
 
 
1479
1480	/* Disable counter and interrupt */
 
1481
1482	/* Disable counter */
1483	armv7_pmnc_disable_counter(idx);
1484
1485	/*
1486	 * Clear pmresr code (if destined for PMNx counters)
1487	 */
1488	if (hwc->config_base & KRAIT_EVENT_MASK)
1489		krait_clearpmu(hwc->config_base);
1490
1491	/* Disable interrupt for this counter */
1492	armv7_pmnc_disable_intens(idx);
 
 
1493}
1494
1495static void krait_pmu_enable_event(struct perf_event *event)
1496{
 
1497	struct hw_perf_event *hwc = &event->hw;
1498	int idx = hwc->idx;
 
 
1499
1500	/*
1501	 * Enable counter and interrupt, and set the counter to count
1502	 * the event that we're interested in.
1503	 */
 
1504
1505	/* Disable counter */
1506	armv7_pmnc_disable_counter(idx);
1507
1508	/*
1509	 * Set event (if destined for PMNx counters)
1510	 * We set the event for the cycle counter because we
1511	 * have the ability to perform event filtering.
1512	 */
1513	if (hwc->config_base & KRAIT_EVENT_MASK)
1514		krait_evt_setup(idx, hwc->config_base);
1515	else
1516		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1517
1518	/* Enable interrupt for this counter */
1519	armv7_pmnc_enable_intens(idx);
1520
1521	/* Enable counter */
1522	armv7_pmnc_enable_counter(idx);
 
 
1523}
1524
1525static void krait_pmu_reset(void *info)
1526{
1527	u32 vval, fval;
1528	struct arm_pmu *cpu_pmu = info;
1529	u32 idx, nb_cnt = cpu_pmu->num_events;
1530
1531	armv7pmu_reset(info);
1532
1533	/* Clear all pmresrs */
1534	krait_write_pmresrn(0, 0);
1535	krait_write_pmresrn(1, 0);
1536	krait_write_pmresrn(2, 0);
1537
1538	venum_pre_pmresr(&vval, &fval);
1539	venum_write_pmresr(0);
1540	venum_post_pmresr(vval, fval);
1541
1542	/* Reset PMxEVNCTCR to sane default */
1543	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1544		armv7_pmnc_select_counter(idx);
1545		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1546	}
1547
1548}
1549
1550static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1551			      unsigned int group)
1552{
1553	int bit;
1554	struct hw_perf_event *hwc = &event->hw;
1555	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1556
1557	if (hwc->config_base & VENUM_EVENT)
1558		bit = KRAIT_VPMRESR0_GROUP0;
1559	else
1560		bit = krait_get_pmresrn_event(region);
1561	bit -= krait_get_pmresrn_event(0);
1562	bit += group;
1563	/*
1564	 * Lower bits are reserved for use by the counters (see
1565	 * armv7pmu_get_event_idx() for more info)
1566	 */
1567	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1568
1569	return bit;
1570}
1571
1572/*
1573 * We check for column exclusion constraints here.
1574 * Two events cant use the same group within a pmresr register.
1575 */
1576static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1577				   struct perf_event *event)
1578{
1579	int idx;
1580	int bit = -1;
1581	struct hw_perf_event *hwc = &event->hw;
1582	unsigned int region = EVENT_REGION(hwc->config_base);
1583	unsigned int code = EVENT_CODE(hwc->config_base);
1584	unsigned int group = EVENT_GROUP(hwc->config_base);
1585	bool venum_event = EVENT_VENUM(hwc->config_base);
1586	bool krait_event = EVENT_CPU(hwc->config_base);
1587
1588	if (venum_event || krait_event) {
1589		/* Ignore invalid events */
1590		if (group > 3 || region > 2)
1591			return -EINVAL;
1592		if (venum_event && (code & 0xe0))
1593			return -EINVAL;
1594
1595		bit = krait_event_to_bit(event, region, group);
1596		if (test_and_set_bit(bit, cpuc->used_mask))
1597			return -EAGAIN;
1598	}
1599
1600	idx = armv7pmu_get_event_idx(cpuc, event);
1601	if (idx < 0 && bit >= 0)
1602		clear_bit(bit, cpuc->used_mask);
1603
1604	return idx;
1605}
1606
1607static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1608				      struct perf_event *event)
1609{
1610	int bit;
1611	struct hw_perf_event *hwc = &event->hw;
1612	unsigned int region = EVENT_REGION(hwc->config_base);
1613	unsigned int group = EVENT_GROUP(hwc->config_base);
1614	bool venum_event = EVENT_VENUM(hwc->config_base);
1615	bool krait_event = EVENT_CPU(hwc->config_base);
1616
1617	armv7pmu_clear_event_idx(cpuc, event);
1618	if (venum_event || krait_event) {
1619		bit = krait_event_to_bit(event, region, group);
1620		clear_bit(bit, cpuc->used_mask);
1621	}
1622}
1623
1624static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1625{
1626	armv7pmu_init(cpu_pmu);
1627	cpu_pmu->name		= "armv7_krait";
1628	/* Some early versions of Krait don't support PC write events */
1629	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1630				  "qcom,no-pc-write"))
1631		cpu_pmu->map_event = krait_map_event_no_branch;
1632	else
1633		cpu_pmu->map_event = krait_map_event;
1634	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1635	cpu_pmu->reset		= krait_pmu_reset;
1636	cpu_pmu->enable		= krait_pmu_enable_event;
1637	cpu_pmu->disable	= krait_pmu_disable_event;
1638	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1639	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1640	return armv7_probe_num_events(cpu_pmu);
1641}
1642
1643/*
1644 * Scorpion Local Performance Monitor Register (LPMn)
1645 *
1646 *            31   30     24     16     8      0
1647 *            +--------------------------------+
1648 *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1649 *            +--------------------------------+
1650 *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1651 *            +--------------------------------+
1652 *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1653 *            +--------------------------------+
1654 *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1655 *            +--------------------------------+
1656 *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1657 *            +--------------------------------+
1658 *              EN | G=3  | G=2  | G=1  | G=0
1659 *
1660 *
1661 *  Event Encoding:
1662 *
1663 *      hwc->config_base = 0xNRCCG
1664 *
1665 *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1666 *      R  = region register
1667 *      CC = class of events the group G is choosing from
1668 *      G  = group or particular event
1669 *
1670 *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1671 *
1672 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1673 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1674 *  events (interrupts for example). An event code is broken down into
1675 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1676 *  example).
1677 */
1678
1679static u32 scorpion_read_pmresrn(int n)
1680{
1681	u32 val;
1682
1683	switch (n) {
1684	case 0:
1685		asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1686		break;
1687	case 1:
1688		asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1689		break;
1690	case 2:
1691		asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1692		break;
1693	case 3:
1694		asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1695		break;
1696	default:
1697		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1698	}
1699
1700	return val;
1701}
1702
1703static void scorpion_write_pmresrn(int n, u32 val)
1704{
1705	switch (n) {
1706	case 0:
1707		asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1708		break;
1709	case 1:
1710		asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1711		break;
1712	case 2:
1713		asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1714		break;
1715	case 3:
1716		asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1717		break;
1718	default:
1719		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1720	}
1721}
1722
1723static u32 scorpion_get_pmresrn_event(unsigned int region)
1724{
1725	static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1726					     SCORPION_LPM1_GROUP0,
1727					     SCORPION_LPM2_GROUP0,
1728					     SCORPION_L2LPM_GROUP0 };
1729	return pmresrn_table[region];
1730}
1731
1732static void scorpion_evt_setup(int idx, u32 config_base)
1733{
1734	u32 val;
1735	u32 mask;
1736	u32 vval, fval;
1737	unsigned int region = EVENT_REGION(config_base);
1738	unsigned int group = EVENT_GROUP(config_base);
1739	unsigned int code = EVENT_CODE(config_base);
1740	unsigned int group_shift;
1741	bool venum_event = EVENT_VENUM(config_base);
1742
1743	group_shift = group * 8;
1744	mask = 0xff << group_shift;
1745
1746	/* Configure evtsel for the region and group */
1747	if (venum_event)
1748		val = SCORPION_VLPM_GROUP0;
1749	else
1750		val = scorpion_get_pmresrn_event(region);
1751	val += group;
1752	/* Mix in mode-exclusion bits */
1753	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1754	armv7_pmnc_write_evtsel(idx, val);
1755
1756	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1757
1758	if (venum_event) {
1759		venum_pre_pmresr(&vval, &fval);
1760		val = venum_read_pmresr();
1761		val &= ~mask;
1762		val |= code << group_shift;
1763		val |= PMRESRn_EN;
1764		venum_write_pmresr(val);
1765		venum_post_pmresr(vval, fval);
1766	} else {
1767		val = scorpion_read_pmresrn(region);
1768		val &= ~mask;
1769		val |= code << group_shift;
1770		val |= PMRESRn_EN;
1771		scorpion_write_pmresrn(region, val);
1772	}
1773}
1774
1775static void scorpion_clearpmu(u32 config_base)
1776{
1777	u32 val;
1778	u32 vval, fval;
1779	unsigned int region = EVENT_REGION(config_base);
1780	unsigned int group = EVENT_GROUP(config_base);
1781	bool venum_event = EVENT_VENUM(config_base);
1782
1783	if (venum_event) {
1784		venum_pre_pmresr(&vval, &fval);
1785		val = venum_read_pmresr();
1786		val = clear_pmresrn_group(val, group);
1787		venum_write_pmresr(val);
1788		venum_post_pmresr(vval, fval);
1789	} else {
1790		val = scorpion_read_pmresrn(region);
1791		val = clear_pmresrn_group(val, group);
1792		scorpion_write_pmresrn(region, val);
1793	}
1794}
1795
1796static void scorpion_pmu_disable_event(struct perf_event *event)
1797{
 
1798	struct hw_perf_event *hwc = &event->hw;
1799	int idx = hwc->idx;
 
 
1800
1801	/* Disable counter and interrupt */
 
1802
1803	/* Disable counter */
1804	armv7_pmnc_disable_counter(idx);
1805
1806	/*
1807	 * Clear pmresr code (if destined for PMNx counters)
1808	 */
1809	if (hwc->config_base & KRAIT_EVENT_MASK)
1810		scorpion_clearpmu(hwc->config_base);
1811
1812	/* Disable interrupt for this counter */
1813	armv7_pmnc_disable_intens(idx);
 
 
1814}
1815
1816static void scorpion_pmu_enable_event(struct perf_event *event)
1817{
 
1818	struct hw_perf_event *hwc = &event->hw;
1819	int idx = hwc->idx;
 
 
1820
1821	/*
1822	 * Enable counter and interrupt, and set the counter to count
1823	 * the event that we're interested in.
1824	 */
 
1825
1826	/* Disable counter */
1827	armv7_pmnc_disable_counter(idx);
1828
1829	/*
1830	 * Set event (if destined for PMNx counters)
1831	 * We don't set the event for the cycle counter because we
1832	 * don't have the ability to perform event filtering.
1833	 */
1834	if (hwc->config_base & KRAIT_EVENT_MASK)
1835		scorpion_evt_setup(idx, hwc->config_base);
1836	else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1837		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1838
1839	/* Enable interrupt for this counter */
1840	armv7_pmnc_enable_intens(idx);
1841
1842	/* Enable counter */
1843	armv7_pmnc_enable_counter(idx);
 
 
1844}
1845
1846static void scorpion_pmu_reset(void *info)
1847{
1848	u32 vval, fval;
1849	struct arm_pmu *cpu_pmu = info;
1850	u32 idx, nb_cnt = cpu_pmu->num_events;
1851
1852	armv7pmu_reset(info);
1853
1854	/* Clear all pmresrs */
1855	scorpion_write_pmresrn(0, 0);
1856	scorpion_write_pmresrn(1, 0);
1857	scorpion_write_pmresrn(2, 0);
1858	scorpion_write_pmresrn(3, 0);
1859
1860	venum_pre_pmresr(&vval, &fval);
1861	venum_write_pmresr(0);
1862	venum_post_pmresr(vval, fval);
1863
1864	/* Reset PMxEVNCTCR to sane default */
1865	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1866		armv7_pmnc_select_counter(idx);
1867		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1868	}
1869}
1870
1871static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1872			      unsigned int group)
1873{
1874	int bit;
1875	struct hw_perf_event *hwc = &event->hw;
1876	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1877
1878	if (hwc->config_base & VENUM_EVENT)
1879		bit = SCORPION_VLPM_GROUP0;
1880	else
1881		bit = scorpion_get_pmresrn_event(region);
1882	bit -= scorpion_get_pmresrn_event(0);
1883	bit += group;
1884	/*
1885	 * Lower bits are reserved for use by the counters (see
1886	 * armv7pmu_get_event_idx() for more info)
1887	 */
1888	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1889
1890	return bit;
1891}
1892
1893/*
1894 * We check for column exclusion constraints here.
1895 * Two events cant use the same group within a pmresr register.
1896 */
1897static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1898				   struct perf_event *event)
1899{
1900	int idx;
1901	int bit = -1;
1902	struct hw_perf_event *hwc = &event->hw;
1903	unsigned int region = EVENT_REGION(hwc->config_base);
1904	unsigned int group = EVENT_GROUP(hwc->config_base);
1905	bool venum_event = EVENT_VENUM(hwc->config_base);
1906	bool scorpion_event = EVENT_CPU(hwc->config_base);
1907
1908	if (venum_event || scorpion_event) {
1909		/* Ignore invalid events */
1910		if (group > 3 || region > 3)
1911			return -EINVAL;
1912
1913		bit = scorpion_event_to_bit(event, region, group);
1914		if (test_and_set_bit(bit, cpuc->used_mask))
1915			return -EAGAIN;
1916	}
1917
1918	idx = armv7pmu_get_event_idx(cpuc, event);
1919	if (idx < 0 && bit >= 0)
1920		clear_bit(bit, cpuc->used_mask);
1921
1922	return idx;
1923}
1924
1925static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1926				      struct perf_event *event)
1927{
1928	int bit;
1929	struct hw_perf_event *hwc = &event->hw;
1930	unsigned int region = EVENT_REGION(hwc->config_base);
1931	unsigned int group = EVENT_GROUP(hwc->config_base);
1932	bool venum_event = EVENT_VENUM(hwc->config_base);
1933	bool scorpion_event = EVENT_CPU(hwc->config_base);
1934
1935	armv7pmu_clear_event_idx(cpuc, event);
1936	if (venum_event || scorpion_event) {
1937		bit = scorpion_event_to_bit(event, region, group);
1938		clear_bit(bit, cpuc->used_mask);
1939	}
1940}
1941
1942static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1943{
1944	armv7pmu_init(cpu_pmu);
1945	cpu_pmu->name		= "armv7_scorpion";
1946	cpu_pmu->map_event	= scorpion_map_event;
1947	cpu_pmu->reset		= scorpion_pmu_reset;
1948	cpu_pmu->enable		= scorpion_pmu_enable_event;
1949	cpu_pmu->disable	= scorpion_pmu_disable_event;
1950	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1951	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1952	return armv7_probe_num_events(cpu_pmu);
1953}
1954
1955static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1956{
1957	armv7pmu_init(cpu_pmu);
1958	cpu_pmu->name		= "armv7_scorpion_mp";
1959	cpu_pmu->map_event	= scorpion_map_event;
1960	cpu_pmu->reset		= scorpion_pmu_reset;
1961	cpu_pmu->enable		= scorpion_pmu_enable_event;
1962	cpu_pmu->disable	= scorpion_pmu_disable_event;
1963	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1964	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1965	return armv7_probe_num_events(cpu_pmu);
1966}
1967
1968static const struct of_device_id armv7_pmu_of_device_ids[] = {
1969	{.compatible = "arm,cortex-a17-pmu",	.data = armv7_a17_pmu_init},
1970	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init},
1971	{.compatible = "arm,cortex-a12-pmu",	.data = armv7_a12_pmu_init},
1972	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init},
1973	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init},
1974	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init},
1975	{.compatible = "arm,cortex-a5-pmu",	.data = armv7_a5_pmu_init},
1976	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},
1977	{.compatible = "qcom,scorpion-pmu",	.data = scorpion_pmu_init},
1978	{.compatible = "qcom,scorpion-mp-pmu",	.data = scorpion_mp_pmu_init},
1979	{},
1980};
1981
1982static const struct pmu_probe_info armv7_pmu_probe_table[] = {
1983	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
1984	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
1985	{ /* sentinel value */ }
1986};
1987
1988
1989static int armv7_pmu_device_probe(struct platform_device *pdev)
1990{
1991	return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
1992				    armv7_pmu_probe_table);
1993}
1994
1995static struct platform_driver armv7_pmu_driver = {
1996	.driver		= {
1997		.name	= "armv7-pmu",
1998		.of_match_table = armv7_pmu_of_device_ids,
1999		.suppress_bind_attrs = true,
2000	},
2001	.probe		= armv7_pmu_device_probe,
2002};
2003
2004builtin_platform_driver(armv7_pmu_driver);
2005#endif	/* CONFIG_CPU_V7 */