Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   3 *
   4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   5 * 2010 (c) MontaVista Software, LLC.
   6 *
   7 * Copied from ARMv6 code, with the low level code inspired
   8 *  by the ARMv7 Oprofile code.
   9 *
  10 * Cortex-A8 has up to 4 configurable performance counters and
  11 *  a single cycle counter.
  12 * Cortex-A9 has up to 31 configurable performance counters and
  13 *  a single cycle counter.
  14 *
  15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16 *  counter and all 4 performance counters together can be reset separately.
  17 */
  18
  19#ifdef CONFIG_CPU_V7
 
 
 
 
 
 
 
 
 
 
 
  20/*
  21 * Common ARMv7 event types
  22 *
  23 * Note: An implementation may not be able to count all of these events
  24 * but the encodings are considered to be `reserved' in the case that
  25 * they are not available.
  26 */
  27enum armv7_perf_types {
  28	ARMV7_PERFCTR_PMNC_SW_INCR		= 0x00,
  29	ARMV7_PERFCTR_IFETCH_MISS		= 0x01,
  30	ARMV7_PERFCTR_ITLB_MISS			= 0x02,
  31	ARMV7_PERFCTR_DCACHE_REFILL		= 0x03,	/* L1 */
  32	ARMV7_PERFCTR_DCACHE_ACCESS		= 0x04,	/* L1 */
  33	ARMV7_PERFCTR_DTLB_REFILL		= 0x05,
  34	ARMV7_PERFCTR_DREAD			= 0x06,
  35	ARMV7_PERFCTR_DWRITE			= 0x07,
  36	ARMV7_PERFCTR_INSTR_EXECUTED		= 0x08,
  37	ARMV7_PERFCTR_EXC_TAKEN			= 0x09,
  38	ARMV7_PERFCTR_EXC_EXECUTED		= 0x0A,
  39	ARMV7_PERFCTR_CID_WRITE			= 0x0B,
  40	/* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  41	 * It counts:
  42	 *  - all branch instructions,
  43	 *  - instructions that explicitly write the PC,
  44	 *  - exception generating instructions.
  45	 */
  46	ARMV7_PERFCTR_PC_WRITE			= 0x0C,
  47	ARMV7_PERFCTR_PC_IMM_BRANCH		= 0x0D,
  48	ARMV7_PERFCTR_PC_PROC_RETURN		= 0x0E,
  49	ARMV7_PERFCTR_UNALIGNED_ACCESS		= 0x0F,
  50
  51	/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  52	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED	= 0x10,
  53	ARMV7_PERFCTR_CLOCK_CYCLES		= 0x11,
  54	ARMV7_PERFCTR_PC_BRANCH_PRED		= 0x12,
  55	ARMV7_PERFCTR_MEM_ACCESS		= 0x13,
  56	ARMV7_PERFCTR_L1_ICACHE_ACCESS		= 0x14,
  57	ARMV7_PERFCTR_L1_DCACHE_WB		= 0x15,
  58	ARMV7_PERFCTR_L2_DCACHE_ACCESS		= 0x16,
  59	ARMV7_PERFCTR_L2_DCACHE_REFILL		= 0x17,
  60	ARMV7_PERFCTR_L2_DCACHE_WB		= 0x18,
  61	ARMV7_PERFCTR_BUS_ACCESS		= 0x19,
  62	ARMV7_PERFCTR_MEMORY_ERROR		= 0x1A,
  63	ARMV7_PERFCTR_INSTR_SPEC		= 0x1B,
  64	ARMV7_PERFCTR_TTBR_WRITE		= 0x1C,
  65	ARMV7_PERFCTR_BUS_CYCLES		= 0x1D,
  66
  67	ARMV7_PERFCTR_CPU_CYCLES		= 0xFF
  68};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  69
  70/* ARMv7 Cortex-A8 specific event types */
  71enum armv7_a8_perf_types {
  72	ARMV7_PERFCTR_WRITE_BUFFER_FULL		= 0x40,
  73	ARMV7_PERFCTR_L2_STORE_MERGED		= 0x41,
  74	ARMV7_PERFCTR_L2_STORE_BUFF		= 0x42,
  75	ARMV7_PERFCTR_L2_ACCESS			= 0x43,
  76	ARMV7_PERFCTR_L2_CACH_MISS		= 0x44,
  77	ARMV7_PERFCTR_AXI_READ_CYCLES		= 0x45,
  78	ARMV7_PERFCTR_AXI_WRITE_CYCLES		= 0x46,
  79	ARMV7_PERFCTR_MEMORY_REPLAY		= 0x47,
  80	ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY	= 0x48,
  81	ARMV7_PERFCTR_L1_DATA_MISS		= 0x49,
  82	ARMV7_PERFCTR_L1_INST_MISS		= 0x4A,
  83	ARMV7_PERFCTR_L1_DATA_COLORING		= 0x4B,
  84	ARMV7_PERFCTR_L1_NEON_DATA		= 0x4C,
  85	ARMV7_PERFCTR_L1_NEON_CACH_DATA		= 0x4D,
  86	ARMV7_PERFCTR_L2_NEON			= 0x4E,
  87	ARMV7_PERFCTR_L2_NEON_HIT		= 0x4F,
  88	ARMV7_PERFCTR_L1_INST			= 0x50,
  89	ARMV7_PERFCTR_PC_RETURN_MIS_PRED	= 0x51,
  90	ARMV7_PERFCTR_PC_BRANCH_FAILED		= 0x52,
  91	ARMV7_PERFCTR_PC_BRANCH_TAKEN		= 0x53,
  92	ARMV7_PERFCTR_PC_BRANCH_EXECUTED	= 0x54,
  93	ARMV7_PERFCTR_OP_EXECUTED		= 0x55,
  94	ARMV7_PERFCTR_CYCLES_INST_STALL		= 0x56,
  95	ARMV7_PERFCTR_CYCLES_INST		= 0x57,
  96	ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL	= 0x58,
  97	ARMV7_PERFCTR_CYCLES_NEON_INST_STALL	= 0x59,
  98	ARMV7_PERFCTR_NEON_CYCLES		= 0x5A,
  99
 100	ARMV7_PERFCTR_PMU0_EVENTS		= 0x70,
 101	ARMV7_PERFCTR_PMU1_EVENTS		= 0x71,
 102	ARMV7_PERFCTR_PMU_EVENTS		= 0x72,
 103};
 104
 105/* ARMv7 Cortex-A9 specific event types */
 106enum armv7_a9_perf_types {
 107	ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC	= 0x40,
 108	ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC	= 0x41,
 109	ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC	= 0x42,
 110
 111	ARMV7_PERFCTR_COHERENT_LINE_MISS	= 0x50,
 112	ARMV7_PERFCTR_COHERENT_LINE_HIT		= 0x51,
 113
 114	ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES	= 0x60,
 115	ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES	= 0x61,
 116	ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES	= 0x62,
 117	ARMV7_PERFCTR_STREX_EXECUTED_PASSED	= 0x63,
 118	ARMV7_PERFCTR_STREX_EXECUTED_FAILED	= 0x64,
 119	ARMV7_PERFCTR_DATA_EVICTION		= 0x65,
 120	ARMV7_PERFCTR_ISSUE_STAGE_NO_INST	= 0x66,
 121	ARMV7_PERFCTR_ISSUE_STAGE_EMPTY		= 0x67,
 122	ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE	= 0x68,
 123
 124	ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS	= 0x6E,
 125
 126	ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST	= 0x70,
 127	ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST	= 0x71,
 128	ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST	= 0x72,
 129	ARMV7_PERFCTR_FP_EXECUTED_INST		= 0x73,
 130	ARMV7_PERFCTR_NEON_EXECUTED_INST	= 0x74,
 131
 132	ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES	= 0x80,
 133	ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES	= 0x81,
 134	ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES	= 0x82,
 135	ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES	= 0x83,
 136	ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES	= 0x84,
 137	ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES	= 0x85,
 138	ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES	= 0x86,
 139
 140	ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES	= 0x8A,
 141	ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES	= 0x8B,
 142
 143	ARMV7_PERFCTR_ISB_INST			= 0x90,
 144	ARMV7_PERFCTR_DSB_INST			= 0x91,
 145	ARMV7_PERFCTR_DMB_INST			= 0x92,
 146	ARMV7_PERFCTR_EXT_INTERRUPTS		= 0x93,
 147
 148	ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED	= 0xA0,
 149	ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED	= 0xA1,
 150	ARMV7_PERFCTR_PLE_FIFO_FLUSH		= 0xA2,
 151	ARMV7_PERFCTR_PLE_RQST_COMPLETED	= 0xA3,
 152	ARMV7_PERFCTR_PLE_FIFO_OVERFLOW		= 0xA4,
 153	ARMV7_PERFCTR_PLE_RQST_PROG		= 0xA5
 154};
 155
 156/* ARMv7 Cortex-A5 specific event types */
 157enum armv7_a5_perf_types {
 158	ARMV7_PERFCTR_IRQ_TAKEN			= 0x86,
 159	ARMV7_PERFCTR_FIQ_TAKEN			= 0x87,
 160
 161	ARMV7_PERFCTR_EXT_MEM_RQST		= 0xc0,
 162	ARMV7_PERFCTR_NC_EXT_MEM_RQST		= 0xc1,
 163	ARMV7_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
 164	ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP	= 0xc3,
 165	ARMV7_PERFCTR_ENTER_READ_ALLOC		= 0xc4,
 166	ARMV7_PERFCTR_READ_ALLOC		= 0xc5,
 167
 168	ARMV7_PERFCTR_STALL_SB_FULL		= 0xc9,
 169};
 170
 171/* ARMv7 Cortex-A15 specific event types */
 172enum armv7_a15_perf_types {
 173	ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS	= 0x40,
 174	ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS	= 0x41,
 175	ARMV7_PERFCTR_L1_DCACHE_READ_REFILL	= 0x42,
 176	ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL	= 0x43,
 177
 178	ARMV7_PERFCTR_L1_DTLB_READ_REFILL	= 0x4C,
 179	ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL	= 0x4D,
 180
 181	ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS	= 0x50,
 182	ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS	= 0x51,
 183	ARMV7_PERFCTR_L2_DCACHE_READ_REFILL	= 0x52,
 184	ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL	= 0x53,
 185
 186	ARMV7_PERFCTR_SPEC_PC_WRITE		= 0x76,
 187};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 188
 189/*
 190 * Cortex-A8 HW events mapping
 191 *
 192 * The hardware events that we support. We do support cache operations but
 193 * we have harvard caches and no way to combine instruction and data
 194 * accesses/misses in hardware.
 195 */
 196static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 197	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 198	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 199	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 200	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 201	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 202	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 203	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 
 204};
 205
 206static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 207					  [PERF_COUNT_HW_CACHE_OP_MAX]
 208					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 209	[C(L1D)] = {
 210		/*
 211		 * The performance counters don't differentiate between read
 212		 * and write accesses/misses so this isn't strictly correct,
 213		 * but it's the best we can do. Writes and reads get
 214		 * combined.
 215		 */
 216		[C(OP_READ)] = {
 217			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
 218			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
 219		},
 220		[C(OP_WRITE)] = {
 221			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
 222			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
 223		},
 224		[C(OP_PREFETCH)] = {
 225			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 226			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 227		},
 228	},
 229	[C(L1I)] = {
 230		[C(OP_READ)] = {
 231			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_INST,
 232			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_INST_MISS,
 233		},
 234		[C(OP_WRITE)] = {
 235			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_INST,
 236			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_INST_MISS,
 237		},
 238		[C(OP_PREFETCH)] = {
 239			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 240			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 241		},
 242	},
 243	[C(LL)] = {
 244		[C(OP_READ)] = {
 245			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_ACCESS,
 246			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACH_MISS,
 247		},
 248		[C(OP_WRITE)] = {
 249			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_ACCESS,
 250			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACH_MISS,
 251		},
 252		[C(OP_PREFETCH)] = {
 253			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 254			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 255		},
 256	},
 257	[C(DTLB)] = {
 258		[C(OP_READ)] = {
 259			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 260			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 261		},
 262		[C(OP_WRITE)] = {
 263			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 264			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 265		},
 266		[C(OP_PREFETCH)] = {
 267			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 268			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 269		},
 270	},
 271	[C(ITLB)] = {
 272		[C(OP_READ)] = {
 273			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 274			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 275		},
 276		[C(OP_WRITE)] = {
 277			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 278			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 279		},
 280		[C(OP_PREFETCH)] = {
 281			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 282			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 283		},
 284	},
 285	[C(BPU)] = {
 286		[C(OP_READ)] = {
 287			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
 288			[C(RESULT_MISS)]
 289					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 290		},
 291		[C(OP_WRITE)] = {
 292			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
 293			[C(RESULT_MISS)]
 294					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 295		},
 296		[C(OP_PREFETCH)] = {
 297			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 298			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 299		},
 300	},
 301	[C(NODE)] = {
 302		[C(OP_READ)] = {
 303			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 304			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 305		},
 306		[C(OP_WRITE)] = {
 307			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 308			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 309		},
 310		[C(OP_PREFETCH)] = {
 311			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 312			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 313		},
 314	},
 315};
 316
 317/*
 318 * Cortex-A9 HW events mapping
 319 */
 320static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 321	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 322	[PERF_COUNT_HW_INSTRUCTIONS]	    =
 323					ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
 324	[PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_DCACHE_ACCESS,
 325	[PERF_COUNT_HW_CACHE_MISSES]	    = ARMV7_PERFCTR_DCACHE_REFILL,
 326	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 327	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 328	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 
 329};
 330
 331static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 332					  [PERF_COUNT_HW_CACHE_OP_MAX]
 333					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 334	[C(L1D)] = {
 335		/*
 336		 * The performance counters don't differentiate between read
 337		 * and write accesses/misses so this isn't strictly correct,
 338		 * but it's the best we can do. Writes and reads get
 339		 * combined.
 340		 */
 341		[C(OP_READ)] = {
 342			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
 343			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
 344		},
 345		[C(OP_WRITE)] = {
 346			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
 347			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
 348		},
 349		[C(OP_PREFETCH)] = {
 350			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 351			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 352		},
 353	},
 354	[C(L1I)] = {
 355		[C(OP_READ)] = {
 356			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 357			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 358		},
 359		[C(OP_WRITE)] = {
 360			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 361			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 362		},
 363		[C(OP_PREFETCH)] = {
 364			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 365			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 366		},
 367	},
 368	[C(LL)] = {
 369		[C(OP_READ)] = {
 370			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 371			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 372		},
 373		[C(OP_WRITE)] = {
 374			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 375			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 376		},
 377		[C(OP_PREFETCH)] = {
 378			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 379			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 380		},
 381	},
 382	[C(DTLB)] = {
 383		[C(OP_READ)] = {
 384			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 385			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 386		},
 387		[C(OP_WRITE)] = {
 388			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 389			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 390		},
 391		[C(OP_PREFETCH)] = {
 392			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 393			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 394		},
 395	},
 396	[C(ITLB)] = {
 397		[C(OP_READ)] = {
 398			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 399			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 400		},
 401		[C(OP_WRITE)] = {
 402			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 403			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 404		},
 405		[C(OP_PREFETCH)] = {
 406			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 407			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 408		},
 409	},
 410	[C(BPU)] = {
 411		[C(OP_READ)] = {
 412			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
 413			[C(RESULT_MISS)]
 414					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 415		},
 416		[C(OP_WRITE)] = {
 417			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
 418			[C(RESULT_MISS)]
 419					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 420		},
 421		[C(OP_PREFETCH)] = {
 422			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 423			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 424		},
 425	},
 426	[C(NODE)] = {
 427		[C(OP_READ)] = {
 428			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 429			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 430		},
 431		[C(OP_WRITE)] = {
 432			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 433			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 434		},
 435		[C(OP_PREFETCH)] = {
 436			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 437			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 438		},
 439	},
 440};
 441
 442/*
 443 * Cortex-A5 HW events mapping
 444 */
 445static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 446	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 447	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 448	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 449	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 450	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 451	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 452	[PERF_COUNT_HW_BUS_CYCLES]	    = HW_OP_UNSUPPORTED,
 453};
 454
 455static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 456					[PERF_COUNT_HW_CACHE_OP_MAX]
 457					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 458	[C(L1D)] = {
 459		[C(OP_READ)] = {
 460			[C(RESULT_ACCESS)]
 461					= ARMV7_PERFCTR_DCACHE_ACCESS,
 462			[C(RESULT_MISS)]
 463					= ARMV7_PERFCTR_DCACHE_REFILL,
 464		},
 465		[C(OP_WRITE)] = {
 466			[C(RESULT_ACCESS)]
 467					= ARMV7_PERFCTR_DCACHE_ACCESS,
 468			[C(RESULT_MISS)]
 469					= ARMV7_PERFCTR_DCACHE_REFILL,
 470		},
 471		[C(OP_PREFETCH)] = {
 472			[C(RESULT_ACCESS)]
 473					= ARMV7_PERFCTR_PREFETCH_LINEFILL,
 474			[C(RESULT_MISS)]
 475					= ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
 476		},
 477	},
 478	[C(L1I)] = {
 479		[C(OP_READ)] = {
 480			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 481			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 482		},
 483		[C(OP_WRITE)] = {
 484			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 485			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 486		},
 487		/*
 488		 * The prefetch counters don't differentiate between the I
 489		 * side and the D side.
 490		 */
 491		[C(OP_PREFETCH)] = {
 492			[C(RESULT_ACCESS)]
 493					= ARMV7_PERFCTR_PREFETCH_LINEFILL,
 494			[C(RESULT_MISS)]
 495					= ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
 496		},
 497	},
 498	[C(LL)] = {
 499		[C(OP_READ)] = {
 500			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 501			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 502		},
 503		[C(OP_WRITE)] = {
 504			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 505			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 506		},
 507		[C(OP_PREFETCH)] = {
 508			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 509			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 510		},
 511	},
 512	[C(DTLB)] = {
 513		[C(OP_READ)] = {
 514			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 515			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 516		},
 517		[C(OP_WRITE)] = {
 518			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 519			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 520		},
 521		[C(OP_PREFETCH)] = {
 522			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 523			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 524		},
 525	},
 526	[C(ITLB)] = {
 527		[C(OP_READ)] = {
 528			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 529			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 530		},
 531		[C(OP_WRITE)] = {
 532			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 533			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 534		},
 535		[C(OP_PREFETCH)] = {
 536			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 537			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 538		},
 539	},
 540	[C(BPU)] = {
 541		[C(OP_READ)] = {
 542			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 543			[C(RESULT_MISS)]
 544					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 545		},
 546		[C(OP_WRITE)] = {
 547			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 548			[C(RESULT_MISS)]
 549					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 550		},
 551		[C(OP_PREFETCH)] = {
 552			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 553			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 554		},
 555	},
 556};
 557
 558/*
 559 * Cortex-A15 HW events mapping
 560 */
 561static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 562	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 563	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 564	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 565	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 566	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
 567	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 568	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_BUS_CYCLES,
 
 569};
 570
 571static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 572					[PERF_COUNT_HW_CACHE_OP_MAX]
 573					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 574	[C(L1D)] = {
 575		[C(OP_READ)] = {
 576			[C(RESULT_ACCESS)]
 577					= ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
 578			[C(RESULT_MISS)]
 579					= ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
 580		},
 581		[C(OP_WRITE)] = {
 582			[C(RESULT_ACCESS)]
 583					= ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
 584			[C(RESULT_MISS)]
 585					= ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
 586		},
 587		[C(OP_PREFETCH)] = {
 588			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 589			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 590		},
 591	},
 592	[C(L1I)] = {
 593		/*
 594		 * Not all performance counters differentiate between read
 595		 * and write accesses/misses so we're not always strictly
 596		 * correct, but it's the best we can do. Writes and reads get
 597		 * combined in these cases.
 598		 */
 599		[C(OP_READ)] = {
 600			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 601			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 602		},
 603		[C(OP_WRITE)] = {
 604			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 605			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 606		},
 607		[C(OP_PREFETCH)] = {
 608			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 609			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 610		},
 611	},
 612	[C(LL)] = {
 613		[C(OP_READ)] = {
 614			[C(RESULT_ACCESS)]
 615					= ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
 616			[C(RESULT_MISS)]
 617					= ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
 618		},
 619		[C(OP_WRITE)] = {
 620			[C(RESULT_ACCESS)]
 621					= ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
 622			[C(RESULT_MISS)]
 623					= ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
 624		},
 625		[C(OP_PREFETCH)] = {
 626			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 627			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 628		},
 629	},
 630	[C(DTLB)] = {
 631		[C(OP_READ)] = {
 632			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 633			[C(RESULT_MISS)]
 634					= ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
 635		},
 636		[C(OP_WRITE)] = {
 637			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 638			[C(RESULT_MISS)]
 639					= ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
 640		},
 641		[C(OP_PREFETCH)] = {
 642			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 643			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 644		},
 645	},
 646	[C(ITLB)] = {
 647		[C(OP_READ)] = {
 648			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 649			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 650		},
 651		[C(OP_WRITE)] = {
 652			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 653			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 654		},
 655		[C(OP_PREFETCH)] = {
 656			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 657			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 658		},
 659	},
 660	[C(BPU)] = {
 661		[C(OP_READ)] = {
 662			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 663			[C(RESULT_MISS)]
 664					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 665		},
 666		[C(OP_WRITE)] = {
 667			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 668			[C(RESULT_MISS)]
 669					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 670		},
 671		[C(OP_PREFETCH)] = {
 672			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 673			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 674		},
 675	},
 676};
 677
 678/*
 679 * Perf Events counters
 680 */
 681enum armv7_counters {
 682	ARMV7_CYCLE_COUNTER		= 1,	/* Cycle counter */
 683	ARMV7_COUNTER0			= 2,	/* First event counter */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684};
 685
 686/*
 687 * The cycle counter is ARMV7_CYCLE_COUNTER.
 688 * The first event counter is ARMV7_COUNTER0.
 689 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
 690 */
 691#define	ARMV7_COUNTER_LAST	(ARMV7_COUNTER0 + armpmu->num_events - 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692
 693/*
 694 * ARMv7 low level PMNC access
 695 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 696
 697/*
 698 * Per-CPU PMNC: config reg
 699 */
 700#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
 701#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
 702#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
 703#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
 704#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
 705#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
 706#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
 707#define	ARMV7_PMNC_N_MASK	0x1f
 708#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 709
 710/*
 711 * Available counters
 712 */
 713#define ARMV7_CNT0		0	/* First event counter */
 714#define ARMV7_CCNT		31	/* Cycle counter */
 
 
 715
 716/* Perf Event to low level counters mapping */
 717#define ARMV7_EVENT_CNT_TO_CNTx	(ARMV7_COUNTER0 - ARMV7_CNT0)
 718
 719/*
 720 * CNTENS: counters enable reg
 721 */
 722#define ARMV7_CNTENS_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 723#define ARMV7_CNTENS_C		(1 << ARMV7_CCNT)
 724
 725/*
 726 * CNTENC: counters disable reg
 727 */
 728#define ARMV7_CNTENC_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 729#define ARMV7_CNTENC_C		(1 << ARMV7_CCNT)
 730
 731/*
 732 * INTENS: counters overflow interrupt enable reg
 733 */
 734#define ARMV7_INTENS_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 735#define ARMV7_INTENS_C		(1 << ARMV7_CCNT)
 
 
 
 
 
 
 
 736
 737/*
 738 * INTENC: counters overflow interrupt disable reg
 739 */
 740#define ARMV7_INTENC_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 741#define ARMV7_INTENC_C		(1 << ARMV7_CCNT)
 742
 743/*
 744 * EVTSEL: Event selection reg
 745 */
 746#define	ARMV7_EVTSEL_MASK	0xff		/* Mask for writable bits */
 
 747
 748/*
 749 * SELECT: Counter selection reg
 750 */
 751#define	ARMV7_SELECT_MASK	0x1f		/* Mask for writable bits */
 
 
 752
 753/*
 754 * FLAG: counters overflow flag status reg
 755 */
 756#define ARMV7_FLAG_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 757#define ARMV7_FLAG_C		(1 << ARMV7_CCNT)
 758#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
 759#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
 760
 761static inline unsigned long armv7_pmnc_read(void)
 762{
 763	u32 val;
 764	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
 765	return val;
 766}
 767
 768static inline void armv7_pmnc_write(unsigned long val)
 769{
 770	val &= ARMV7_PMNC_MASK;
 771	isb();
 772	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 773}
 774
 775static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
 776{
 777	return pmnc & ARMV7_OVERFLOWED_MASK;
 778}
 779
 780static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
 781					enum armv7_counters counter)
 782{
 783	int ret = 0;
 784
 785	if (counter == ARMV7_CYCLE_COUNTER)
 786		ret = pmnc & ARMV7_FLAG_C;
 787	else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
 788		ret = pmnc & ARMV7_FLAG_P(counter);
 789	else
 790		pr_err("CPU%u checking wrong counter %d overflow status\n",
 791			smp_processor_id(), counter);
 792
 793	return ret;
 794}
 795
 796static inline int armv7_pmnc_select_counter(unsigned int idx)
 797{
 798	u32 val;
 799
 800	if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
 801		pr_err("CPU%u selecting wrong PMNC counter"
 802			" %d\n", smp_processor_id(), idx);
 803		return -1;
 804	}
 805
 806	val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
 807	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
 
 
 808	isb();
 809
 810	return idx;
 811}
 812
 813static inline u32 armv7pmu_read_counter(int idx)
 814{
 815	unsigned long value = 0;
 
 
 
 816
 817	if (idx == ARMV7_CYCLE_COUNTER)
 818		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
 819	else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
 820		if (armv7_pmnc_select_counter(idx) == idx)
 821			asm volatile("mrc p15, 0, %0, c9, c13, 2"
 822				     : "=r" (value));
 823	} else
 824		pr_err("CPU%u reading wrong counter %d\n",
 825			smp_processor_id(), idx);
 
 
 
 
 
 
 826
 827	return value;
 828}
 829
 830static inline void armv7pmu_write_counter(int idx, u32 value)
 831{
 832	if (idx == ARMV7_CYCLE_COUNTER)
 833		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
 834	else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
 835		if (armv7_pmnc_select_counter(idx) == idx)
 836			asm volatile("mcr p15, 0, %0, c9, c13, 2"
 837				     : : "r" (value));
 838	} else
 839		pr_err("CPU%u writing wrong counter %d\n",
 840			smp_processor_id(), idx);
 
 
 
 
 
 
 841}
 842
 843static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
 844{
 845	if (armv7_pmnc_select_counter(idx) == idx) {
 846		val &= ARMV7_EVTSEL_MASK;
 847		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 848	}
 849}
 850
 851static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
 852{
 853	u32 val;
 854
 855	if ((idx != ARMV7_CYCLE_COUNTER) &&
 856	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
 857		pr_err("CPU%u enabling wrong PMNC counter"
 858			" %d\n", smp_processor_id(), idx);
 859		return -1;
 860	}
 861
 862	if (idx == ARMV7_CYCLE_COUNTER)
 863		val = ARMV7_CNTENS_C;
 864	else
 865		val = ARMV7_CNTENS_P(idx);
 866
 867	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
 868
 869	return idx;
 870}
 871
 872static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
 873{
 874	u32 val;
 875
 876
 877	if ((idx != ARMV7_CYCLE_COUNTER) &&
 878	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
 879		pr_err("CPU%u disabling wrong PMNC counter"
 880			" %d\n", smp_processor_id(), idx);
 881		return -1;
 882	}
 883
 884	if (idx == ARMV7_CYCLE_COUNTER)
 885		val = ARMV7_CNTENC_C;
 886	else
 887		val = ARMV7_CNTENC_P(idx);
 888
 889	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
 890
 891	return idx;
 892}
 893
 894static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
 895{
 896	u32 val;
 897
 898	if ((idx != ARMV7_CYCLE_COUNTER) &&
 899	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
 900		pr_err("CPU%u enabling wrong PMNC counter"
 901			" interrupt enable %d\n", smp_processor_id(), idx);
 902		return -1;
 903	}
 904
 905	if (idx == ARMV7_CYCLE_COUNTER)
 906		val = ARMV7_INTENS_C;
 907	else
 908		val = ARMV7_INTENS_P(idx);
 909
 910	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
 911
 912	return idx;
 913}
 914
 915static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
 916{
 917	u32 val;
 918
 919	if ((idx != ARMV7_CYCLE_COUNTER) &&
 920	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
 921		pr_err("CPU%u disabling wrong PMNC counter"
 922			" interrupt enable %d\n", smp_processor_id(), idx);
 923		return -1;
 924	}
 925
 926	if (idx == ARMV7_CYCLE_COUNTER)
 927		val = ARMV7_INTENC_C;
 928	else
 929		val = ARMV7_INTENC_P(idx);
 930
 931	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
 932
 933	return idx;
 934}
 935
 936static inline u32 armv7_pmnc_getreset_flags(void)
 937{
 938	u32 val;
 939
 940	/* Read */
 941	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 942
 943	/* Write to clear flags */
 944	val &= ARMV7_FLAG_MASK;
 945	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
 946
 947	return val;
 948}
 949
 950#ifdef DEBUG
 951static void armv7_pmnc_dump_regs(void)
 952{
 953	u32 val;
 954	unsigned int cnt;
 955
 956	printk(KERN_INFO "PMNC registers dump:\n");
 957
 958	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
 959	printk(KERN_INFO "PMNC  =0x%08x\n", val);
 960
 961	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
 962	printk(KERN_INFO "CNTENS=0x%08x\n", val);
 963
 964	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
 965	printk(KERN_INFO "INTENS=0x%08x\n", val);
 966
 967	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 968	printk(KERN_INFO "FLAGS =0x%08x\n", val);
 969
 970	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
 971	printk(KERN_INFO "SELECT=0x%08x\n", val);
 972
 973	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
 974	printk(KERN_INFO "CCNT  =0x%08x\n", val);
 975
 976	for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
 
 977		armv7_pmnc_select_counter(cnt);
 978		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
 979		printk(KERN_INFO "CNT[%d] count =0x%08x\n",
 980			cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
 981		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
 982		printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
 983			cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
 984	}
 985}
 986#endif
 987
 988static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
 989{
 990	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 991
 992	/*
 993	 * Enable counter and interrupt, and set the counter to count
 994	 * the event that we're interested in.
 995	 */
 996	raw_spin_lock_irqsave(&pmu_lock, flags);
 997
 998	/*
 999	 * Disable counter
1000	 */
1001	armv7_pmnc_disable_counter(idx);
1002
1003	/*
1004	 * Set event (if destined for PMNx counters)
1005	 * We don't need to set the event if it's a cycle count
 
1006	 */
1007	if (idx != ARMV7_CYCLE_COUNTER)
1008		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1009
1010	/*
1011	 * Enable interrupt for this counter
1012	 */
1013	armv7_pmnc_enable_intens(idx);
1014
1015	/*
1016	 * Enable counter
1017	 */
1018	armv7_pmnc_enable_counter(idx);
1019
1020	raw_spin_unlock_irqrestore(&pmu_lock, flags);
1021}
1022
1023static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1024{
1025	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
1026
1027	/*
1028	 * Disable counter and interrupt
1029	 */
1030	raw_spin_lock_irqsave(&pmu_lock, flags);
1031
1032	/*
1033	 * Disable counter
1034	 */
1035	armv7_pmnc_disable_counter(idx);
1036
1037	/*
1038	 * Disable interrupt for this counter
1039	 */
1040	armv7_pmnc_disable_intens(idx);
1041
1042	raw_spin_unlock_irqrestore(&pmu_lock, flags);
1043}
1044
1045static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1046{
1047	unsigned long pmnc;
1048	struct perf_sample_data data;
1049	struct cpu_hw_events *cpuc;
 
1050	struct pt_regs *regs;
1051	int idx;
1052
1053	/*
1054	 * Get and reset the IRQ flags
1055	 */
1056	pmnc = armv7_pmnc_getreset_flags();
1057
1058	/*
1059	 * Did an overflow occur?
1060	 */
1061	if (!armv7_pmnc_has_overflowed(pmnc))
1062		return IRQ_NONE;
1063
1064	/*
1065	 * Handle the counter(s) overflow(s)
1066	 */
1067	regs = get_irq_regs();
1068
1069	perf_sample_data_init(&data, 0);
1070
1071	cpuc = &__get_cpu_var(cpu_hw_events);
1072	for (idx = 0; idx <= armpmu->num_events; ++idx) {
1073		struct perf_event *event = cpuc->events[idx];
1074		struct hw_perf_event *hwc;
1075
1076		if (!test_bit(idx, cpuc->active_mask))
 
1077			continue;
1078
1079		/*
1080		 * We have a single interrupt for all counters. Check that
1081		 * each counter has overflowed before we process it.
1082		 */
1083		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1084			continue;
1085
1086		hwc = &event->hw;
1087		armpmu_event_update(event, hwc, idx, 1);
1088		data.period = event->hw.last_period;
1089		if (!armpmu_event_set_period(event, hwc, idx))
1090			continue;
1091
1092		if (perf_event_overflow(event, &data, regs))
1093			armpmu->disable(hwc, idx);
1094	}
1095
1096	/*
1097	 * Handle the pending perf events.
1098	 *
1099	 * Note: this call *must* be run with interrupts disabled. For
1100	 * platforms that can have the PMU interrupts raised as an NMI, this
1101	 * will not work.
1102	 */
1103	irq_work_run();
1104
1105	return IRQ_HANDLED;
1106}
1107
1108static void armv7pmu_start(void)
1109{
1110	unsigned long flags;
 
1111
1112	raw_spin_lock_irqsave(&pmu_lock, flags);
1113	/* Enable all counters */
1114	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1115	raw_spin_unlock_irqrestore(&pmu_lock, flags);
1116}
1117
1118static void armv7pmu_stop(void)
1119{
1120	unsigned long flags;
 
1121
1122	raw_spin_lock_irqsave(&pmu_lock, flags);
1123	/* Disable all counters */
1124	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1125	raw_spin_unlock_irqrestore(&pmu_lock, flags);
1126}
1127
1128static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
1129				  struct hw_perf_event *event)
1130{
1131	int idx;
 
 
 
1132
1133	/* Always place a cycle counter into the cycle counter. */
1134	if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
1135		if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
1136			return -EAGAIN;
1137
1138		return ARMV7_CYCLE_COUNTER;
1139	} else {
1140		/*
1141		 * For anything other than a cycle counter, try and use
1142		 * the events counters
1143		 */
1144		for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
1145			if (!test_and_set_bit(idx, cpuc->used_mask))
1146				return idx;
1147		}
1148
1149		/* The counters are all in use. */
1150		return -EAGAIN;
 
 
 
 
 
1151	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1152}
1153
1154static void armv7pmu_reset(void *info)
1155{
1156	u32 idx, nb_cnt = armpmu->num_events;
 
 
 
 
 
 
 
1157
1158	/* The counter and interrupt enable registers are unknown at reset. */
1159	for (idx = 1; idx < nb_cnt; ++idx)
1160		armv7pmu_disable_event(NULL, idx);
 
 
1161
1162	/* Initialize & Reset PMNC: C and P bits */
1163	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1164}
1165
1166static struct arm_pmu armv7pmu = {
1167	.handle_irq		= armv7pmu_handle_irq,
1168	.enable			= armv7pmu_enable_event,
1169	.disable		= armv7pmu_disable_event,
1170	.read_counter		= armv7pmu_read_counter,
1171	.write_counter		= armv7pmu_write_counter,
1172	.get_event_idx		= armv7pmu_get_event_idx,
1173	.start			= armv7pmu_start,
1174	.stop			= armv7pmu_stop,
1175	.reset			= armv7pmu_reset,
1176	.raw_event_mask		= 0xFF,
1177	.max_period		= (1LLU << 32) - 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1178};
1179
1180static u32 __init armv7_read_num_pmnc_events(void)
1181{
1182	u32 nb_cnt;
1183
1184	/* Read the nb of CNTx counters supported from PMNC */
1185	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1186
1187	/* Add the CPU cycles counter and return */
1188	return nb_cnt + 1;
1189}
1190
1191static const struct arm_pmu *__init armv7_a8_pmu_init(void)
1192{
1193	armv7pmu.id		= ARM_PERF_PMU_ID_CA8;
1194	armv7pmu.name		= "ARMv7 Cortex-A8";
1195	armv7pmu.cache_map	= &armv7_a8_perf_cache_map;
1196	armv7pmu.event_map	= &armv7_a8_perf_map;
1197	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1198	return &armv7pmu;
1199}
1200
1201static const struct arm_pmu *__init armv7_a9_pmu_init(void)
1202{
1203	armv7pmu.id		= ARM_PERF_PMU_ID_CA9;
1204	armv7pmu.name		= "ARMv7 Cortex-A9";
1205	armv7pmu.cache_map	= &armv7_a9_perf_cache_map;
1206	armv7pmu.event_map	= &armv7_a9_perf_map;
1207	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1208	return &armv7pmu;
 
 
1209}
1210
1211static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1212{
1213	armv7pmu.id		= ARM_PERF_PMU_ID_CA5;
1214	armv7pmu.name		= "ARMv7 Cortex-A5";
1215	armv7pmu.cache_map	= &armv7_a5_perf_cache_map;
1216	armv7pmu.event_map	= &armv7_a5_perf_map;
1217	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1218	return &armv7pmu;
 
 
1219}
1220
1221static const struct arm_pmu *__init armv7_a15_pmu_init(void)
1222{
1223	armv7pmu.id		= ARM_PERF_PMU_ID_CA15;
1224	armv7pmu.name		= "ARMv7 Cortex-A15";
1225	armv7pmu.cache_map	= &armv7_a15_perf_cache_map;
1226	armv7pmu.event_map	= &armv7_a15_perf_map;
1227	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1228	return &armv7pmu;
 
 
1229}
1230#else
1231static const struct arm_pmu *__init armv7_a8_pmu_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
1232{
1233	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1234}
1235
1236static const struct arm_pmu *__init armv7_a9_pmu_init(void)
1237{
1238	return NULL;
 
 
 
1239}
1240
1241static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1242{
1243	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244}
1245
1246static const struct arm_pmu *__init armv7_a15_pmu_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247{
1248	return NULL;
1249}
 
1250#endif	/* CONFIG_CPU_V7 */
v4.10.11
   1/*
   2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   3 *
   4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   5 * 2010 (c) MontaVista Software, LLC.
   6 *
   7 * Copied from ARMv6 code, with the low level code inspired
   8 *  by the ARMv7 Oprofile code.
   9 *
  10 * Cortex-A8 has up to 4 configurable performance counters and
  11 *  a single cycle counter.
  12 * Cortex-A9 has up to 31 configurable performance counters and
  13 *  a single cycle counter.
  14 *
  15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16 *  counter and all 4 performance counters together can be reset separately.
  17 */
  18
  19#ifdef CONFIG_CPU_V7
  20
  21#include <asm/cp15.h>
  22#include <asm/cputype.h>
  23#include <asm/irq_regs.h>
  24#include <asm/vfp.h>
  25#include "../vfp/vfpinstr.h"
  26
  27#include <linux/of.h>
  28#include <linux/perf/arm_pmu.h>
  29#include <linux/platform_device.h>
  30
  31/*
  32 * Common ARMv7 event types
  33 *
  34 * Note: An implementation may not be able to count all of these events
  35 * but the encodings are considered to be `reserved' in the case that
  36 * they are not available.
  37 */
  38#define ARMV7_PERFCTR_PMNC_SW_INCR			0x00
  39#define ARMV7_PERFCTR_L1_ICACHE_REFILL			0x01
  40#define ARMV7_PERFCTR_ITLB_REFILL			0x02
  41#define ARMV7_PERFCTR_L1_DCACHE_REFILL			0x03
  42#define ARMV7_PERFCTR_L1_DCACHE_ACCESS			0x04
  43#define ARMV7_PERFCTR_DTLB_REFILL			0x05
  44#define ARMV7_PERFCTR_MEM_READ				0x06
  45#define ARMV7_PERFCTR_MEM_WRITE				0x07
  46#define ARMV7_PERFCTR_INSTR_EXECUTED			0x08
  47#define ARMV7_PERFCTR_EXC_TAKEN				0x09
  48#define ARMV7_PERFCTR_EXC_EXECUTED			0x0A
  49#define ARMV7_PERFCTR_CID_WRITE				0x0B
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50
  51/*
  52 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  53 * It counts:
  54 *  - all (taken) branch instructions,
  55 *  - instructions that explicitly write the PC,
  56 *  - exception generating instructions.
  57 */
  58#define ARMV7_PERFCTR_PC_WRITE				0x0C
  59#define ARMV7_PERFCTR_PC_IMM_BRANCH			0x0D
  60#define ARMV7_PERFCTR_PC_PROC_RETURN			0x0E
  61#define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		0x0F
  62#define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		0x10
  63#define ARMV7_PERFCTR_CLOCK_CYCLES			0x11
  64#define ARMV7_PERFCTR_PC_BRANCH_PRED			0x12
  65
  66/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  67#define ARMV7_PERFCTR_MEM_ACCESS			0x13
  68#define ARMV7_PERFCTR_L1_ICACHE_ACCESS			0x14
  69#define ARMV7_PERFCTR_L1_DCACHE_WB			0x15
  70#define ARMV7_PERFCTR_L2_CACHE_ACCESS			0x16
  71#define ARMV7_PERFCTR_L2_CACHE_REFILL			0x17
  72#define ARMV7_PERFCTR_L2_CACHE_WB			0x18
  73#define ARMV7_PERFCTR_BUS_ACCESS			0x19
  74#define ARMV7_PERFCTR_MEM_ERROR				0x1A
  75#define ARMV7_PERFCTR_INSTR_SPEC			0x1B
  76#define ARMV7_PERFCTR_TTBR_WRITE			0x1C
  77#define ARMV7_PERFCTR_BUS_CYCLES			0x1D
  78
  79#define ARMV7_PERFCTR_CPU_CYCLES			0xFF
  80
  81/* ARMv7 Cortex-A8 specific event types */
  82#define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		0x43
  83#define ARMV7_A8_PERFCTR_L2_CACHE_REFILL		0x44
  84#define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		0x50
  85#define ARMV7_A8_PERFCTR_STALL_ISIDE			0x56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  86
  87/* ARMv7 Cortex-A9 specific event types */
  88#define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		0x68
  89#define ARMV7_A9_PERFCTR_STALL_ICACHE			0x60
  90#define ARMV7_A9_PERFCTR_STALL_DISPATCH			0x66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  91
  92/* ARMv7 Cortex-A5 specific event types */
  93#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		0xc2
  94#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		0xc3
 
 
 
 
 
 
 
 
 
 
 
  95
  96/* ARMv7 Cortex-A15 specific event types */
  97#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
  98#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
  99#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		0x42
 100#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	0x43
 
 
 
 
 
 
 
 
 
 101
 102#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		0x4C
 103#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		0x4D
 104
 105#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		0x50
 106#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
 107#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		0x52
 108#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		0x53
 109
 110#define ARMV7_A15_PERFCTR_PC_WRITE_SPEC			0x76
 111
 112/* ARMv7 Cortex-A12 specific event types */
 113#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
 114#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
 115
 116#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		0x50
 117#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
 118
 119#define ARMV7_A12_PERFCTR_PC_WRITE_SPEC			0x76
 120
 121#define ARMV7_A12_PERFCTR_PF_TLB_REFILL			0xe7
 122
 123/* ARMv7 Krait specific event types */
 124#define KRAIT_PMRESR0_GROUP0				0xcc
 125#define KRAIT_PMRESR1_GROUP0				0xd0
 126#define KRAIT_PMRESR2_GROUP0				0xd4
 127#define KRAIT_VPMRESR0_GROUP0				0xd8
 128
 129#define KRAIT_PERFCTR_L1_ICACHE_ACCESS			0x10011
 130#define KRAIT_PERFCTR_L1_ICACHE_MISS			0x10010
 131
 132#define KRAIT_PERFCTR_L1_ITLB_ACCESS			0x12222
 133#define KRAIT_PERFCTR_L1_DTLB_ACCESS			0x12210
 134
 135/* ARMv7 Scorpion specific event types */
 136#define SCORPION_LPM0_GROUP0				0x4c
 137#define SCORPION_LPM1_GROUP0				0x50
 138#define SCORPION_LPM2_GROUP0				0x54
 139#define SCORPION_L2LPM_GROUP0				0x58
 140#define SCORPION_VLPM_GROUP0				0x5c
 141
 142#define SCORPION_ICACHE_ACCESS				0x10053
 143#define SCORPION_ICACHE_MISS				0x10052
 144
 145#define SCORPION_DTLB_ACCESS				0x12013
 146#define SCORPION_DTLB_MISS				0x12012
 147
 148#define SCORPION_ITLB_MISS				0x12021
 149
 150/*
 151 * Cortex-A8 HW events mapping
 152 *
 153 * The hardware events that we support. We do support cache operations but
 154 * we have harvard caches and no way to combine instruction and data
 155 * accesses/misses in hardware.
 156 */
 157static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 158	PERF_MAP_ALL_UNSUPPORTED,
 159	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 160	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 161	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 162	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 163	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 164	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 165	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
 166};
 167
 168static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 169					  [PERF_COUNT_HW_CACHE_OP_MAX]
 170					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 171	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 172
 173	/*
 174	 * The performance counters don't differentiate between read and write
 175	 * accesses/misses so this isn't strictly correct, but it's the best we
 176	 * can do. Writes and reads get combined.
 177	 */
 178	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 179	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 180	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 181	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 182
 183	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 184	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 185
 186	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 187	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 188	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 189	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 190
 191	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 192	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 193
 194	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 195	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 196
 197	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 198	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 199	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 200	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 201};
 202
 203/*
 204 * Cortex-A9 HW events mapping
 205 */
 206static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 207	PERF_MAP_ALL_UNSUPPORTED,
 208	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 209	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
 210	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 211	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 212	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 213	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 214	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
 215	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
 216};
 217
 218static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 219					  [PERF_COUNT_HW_CACHE_OP_MAX]
 220					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 221	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 222
 223	/*
 224	 * The performance counters don't differentiate between read and write
 225	 * accesses/misses so this isn't strictly correct, but it's the best we
 226	 * can do. Writes and reads get combined.
 227	 */
 228	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 229	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 230	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 231	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 232
 233	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 234
 235	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 236	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 237
 238	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 239	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 240
 241	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 242	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 243	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 244	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245};
 246
 247/*
 248 * Cortex-A5 HW events mapping
 249 */
 250static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 251	PERF_MAP_ALL_UNSUPPORTED,
 252	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 253	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 254	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 255	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 256	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 257	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 258};
 259
 260static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 261					[PERF_COUNT_HW_CACHE_OP_MAX]
 262					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 263	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 264
 265	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 266	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 267	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 268	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 269	[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 270	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 271
 272	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 273	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 274	/*
 275	 * The prefetch counters don't differentiate between the I side and the
 276	 * D side.
 277	 */
 278	[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 279	[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 280
 281	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 282	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 283
 284	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 285	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 286
 287	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 288	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 289	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 290	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 291};
 292
 293/*
 294 * Cortex-A15 HW events mapping
 295 */
 296static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 297	PERF_MAP_ALL_UNSUPPORTED,
 298	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 299	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 300	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 301	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 302	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
 303	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 304	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 305};
 306
 307static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 308					[PERF_COUNT_HW_CACHE_OP_MAX]
 309					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 310	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 311
 312	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
 313	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 314	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 315	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 316
 317	/*
 318	 * Not all performance counters differentiate between read and write
 319	 * accesses/misses so we're not always strictly correct, but it's the
 320	 * best we can do. Writes and reads get combined in these cases.
 321	 */
 322	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 323	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 324
 325	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
 326	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 327	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
 328	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 329
 330	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 331	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 332
 333	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 334	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 335
 336	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 337	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 338	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 339	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 340};
 341
 342/*
 343 * Cortex-A7 HW events mapping
 344 */
 345static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
 346	PERF_MAP_ALL_UNSUPPORTED,
 347	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 348	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 349	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 350	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 351	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 352	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 353	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 354};
 355
 356static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 357					[PERF_COUNT_HW_CACHE_OP_MAX]
 358					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 359	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 360
 361	/*
 362	 * The performance counters don't differentiate between read and write
 363	 * accesses/misses so this isn't strictly correct, but it's the best we
 364	 * can do. Writes and reads get combined.
 365	 */
 366	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 367	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 368	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 369	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 370
 371	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 372	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 373
 374	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 375	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 376	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 377	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 378
 379	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 380	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 381
 382	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 383	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 384
 385	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 386	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 387	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 388	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 389};
 390
 391/*
 392 * Cortex-A12 HW events mapping
 
 
 393 */
 394static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
 395	PERF_MAP_ALL_UNSUPPORTED,
 396	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 397	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 398	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 399	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 400	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
 401	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 402	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 403};
 404
 405static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 406					[PERF_COUNT_HW_CACHE_OP_MAX]
 407					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 408	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 409
 410	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
 411	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 412	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 413	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 414
 415	/*
 416	 * Not all performance counters differentiate between read and write
 417	 * accesses/misses so we're not always strictly correct, but it's the
 418	 * best we can do. Writes and reads get combined in these cases.
 419	 */
 420	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 421	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 422
 423	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
 424	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 425	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
 426	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 427
 428	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 429	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 430	[C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
 431
 432	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 433	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 434
 435	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 436	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 437	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 438	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 439};
 440
 441/*
 442 * Krait HW events mapping
 443 */
 444static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
 445	PERF_MAP_ALL_UNSUPPORTED,
 446	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 447	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 448	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 449	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 450	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 451};
 452
 453static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
 454	PERF_MAP_ALL_UNSUPPORTED,
 455	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 456	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 457	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 458	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 459};
 460
 461static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 462					  [PERF_COUNT_HW_CACHE_OP_MAX]
 463					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 464	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 465
 466	/*
 467	 * The performance counters don't differentiate between read and write
 468	 * accesses/misses so this isn't strictly correct, but it's the best we
 469	 * can do. Writes and reads get combined.
 470	 */
 471	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 472	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 473	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 474	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 475
 476	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
 477	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
 478
 479	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 480	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 481
 482	[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 483	[C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 484
 485	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 486	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 487	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 488	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 489};
 490
 491/*
 492 * Scorpion HW events mapping
 493 */
 494static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
 495	PERF_MAP_ALL_UNSUPPORTED,
 496	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 497	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 498	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 499	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 500	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 501};
 502
 503static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 504					    [PERF_COUNT_HW_CACHE_OP_MAX]
 505					    [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 506	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 507	/*
 508	 * The performance counters don't differentiate between read and write
 509	 * accesses/misses so this isn't strictly correct, but it's the best we
 510	 * can do. Writes and reads get combined.
 511	 */
 512	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 513	[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 514	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 515	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 516	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
 517	[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
 518	/*
 519	 * Only ITLB misses and DTLB refills are supported.  If users want the
 520	 * DTLB refills misses a raw counter must be used.
 521	 */
 522	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 523	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 524	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 525	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 526	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 527	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 528	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 529	[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 530	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 531	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 532};
 533
 534PMU_FORMAT_ATTR(event, "config:0-7");
 535
 536static struct attribute *armv7_pmu_format_attrs[] = {
 537	&format_attr_event.attr,
 538	NULL,
 539};
 540
 541static struct attribute_group armv7_pmu_format_attr_group = {
 542	.name = "format",
 543	.attrs = armv7_pmu_format_attrs,
 544};
 545
 546#define ARMV7_EVENT_ATTR_RESOLVE(m) #m
 547#define ARMV7_EVENT_ATTR(name, config) \
 548	PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
 549			      "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
 550
 551ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
 552ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
 553ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
 554ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
 555ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
 556ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
 557ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
 558ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
 559ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
 560ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
 561ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
 562ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
 563ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
 564ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
 565ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
 566ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
 567ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
 568ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
 569ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
 570
 571static struct attribute *armv7_pmuv1_event_attrs[] = {
 572	&armv7_event_attr_sw_incr.attr.attr,
 573	&armv7_event_attr_l1i_cache_refill.attr.attr,
 574	&armv7_event_attr_l1i_tlb_refill.attr.attr,
 575	&armv7_event_attr_l1d_cache_refill.attr.attr,
 576	&armv7_event_attr_l1d_cache.attr.attr,
 577	&armv7_event_attr_l1d_tlb_refill.attr.attr,
 578	&armv7_event_attr_ld_retired.attr.attr,
 579	&armv7_event_attr_st_retired.attr.attr,
 580	&armv7_event_attr_inst_retired.attr.attr,
 581	&armv7_event_attr_exc_taken.attr.attr,
 582	&armv7_event_attr_exc_return.attr.attr,
 583	&armv7_event_attr_cid_write_retired.attr.attr,
 584	&armv7_event_attr_pc_write_retired.attr.attr,
 585	&armv7_event_attr_br_immed_retired.attr.attr,
 586	&armv7_event_attr_br_return_retired.attr.attr,
 587	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
 588	&armv7_event_attr_br_mis_pred.attr.attr,
 589	&armv7_event_attr_cpu_cycles.attr.attr,
 590	&armv7_event_attr_br_pred.attr.attr,
 591	NULL,
 592};
 593
 594static struct attribute_group armv7_pmuv1_events_attr_group = {
 595	.name = "events",
 596	.attrs = armv7_pmuv1_event_attrs,
 597};
 598
 599ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
 600ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
 601ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
 602ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
 603ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
 604ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
 605ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
 606ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
 607ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
 608ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
 609ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
 610
 611static struct attribute *armv7_pmuv2_event_attrs[] = {
 612	&armv7_event_attr_sw_incr.attr.attr,
 613	&armv7_event_attr_l1i_cache_refill.attr.attr,
 614	&armv7_event_attr_l1i_tlb_refill.attr.attr,
 615	&armv7_event_attr_l1d_cache_refill.attr.attr,
 616	&armv7_event_attr_l1d_cache.attr.attr,
 617	&armv7_event_attr_l1d_tlb_refill.attr.attr,
 618	&armv7_event_attr_ld_retired.attr.attr,
 619	&armv7_event_attr_st_retired.attr.attr,
 620	&armv7_event_attr_inst_retired.attr.attr,
 621	&armv7_event_attr_exc_taken.attr.attr,
 622	&armv7_event_attr_exc_return.attr.attr,
 623	&armv7_event_attr_cid_write_retired.attr.attr,
 624	&armv7_event_attr_pc_write_retired.attr.attr,
 625	&armv7_event_attr_br_immed_retired.attr.attr,
 626	&armv7_event_attr_br_return_retired.attr.attr,
 627	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
 628	&armv7_event_attr_br_mis_pred.attr.attr,
 629	&armv7_event_attr_cpu_cycles.attr.attr,
 630	&armv7_event_attr_br_pred.attr.attr,
 631	&armv7_event_attr_mem_access.attr.attr,
 632	&armv7_event_attr_l1i_cache.attr.attr,
 633	&armv7_event_attr_l1d_cache_wb.attr.attr,
 634	&armv7_event_attr_l2d_cache.attr.attr,
 635	&armv7_event_attr_l2d_cache_refill.attr.attr,
 636	&armv7_event_attr_l2d_cache_wb.attr.attr,
 637	&armv7_event_attr_bus_access.attr.attr,
 638	&armv7_event_attr_memory_error.attr.attr,
 639	&armv7_event_attr_inst_spec.attr.attr,
 640	&armv7_event_attr_ttbr_write_retired.attr.attr,
 641	&armv7_event_attr_bus_cycles.attr.attr,
 642	NULL,
 643};
 644
 645static struct attribute_group armv7_pmuv2_events_attr_group = {
 646	.name = "events",
 647	.attrs = armv7_pmuv2_event_attrs,
 648};
 649
 650/*
 651 * Perf Events' indices
 652 */
 653#define	ARMV7_IDX_CYCLE_COUNTER	0
 654#define	ARMV7_IDX_COUNTER0	1
 655#define	ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
 656	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 657
 658#define	ARMV7_MAX_COUNTERS	32
 659#define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
 660
 661/*
 662 * ARMv7 low level PMNC access
 663 */
 
 
 664
 665/*
 666 * Perf Event to low level counters mapping
 667 */
 668#define	ARMV7_IDX_TO_COUNTER(x)	\
 669	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
 670
 671/*
 672 * Per-CPU PMNC: config reg
 673 */
 674#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
 675#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
 676#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
 677#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
 678#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
 679#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
 680#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
 681#define	ARMV7_PMNC_N_MASK	0x1f
 682#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
 683
 684/*
 685 * FLAG: counters overflow flag status reg
 686 */
 687#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
 688#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
 689
 690/*
 691 * PMXEVTYPER: Event selection reg
 692 */
 693#define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
 694#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
 695
 696/*
 697 * Event filters for PMUv2
 698 */
 699#define	ARMV7_EXCLUDE_PL1	(1 << 31)
 700#define	ARMV7_EXCLUDE_USER	(1 << 30)
 701#define	ARMV7_INCLUDE_HYP	(1 << 27)
 702
 703/*
 704 * Secure debug enable reg
 705 */
 706#define ARMV7_SDER_SUNIDEN	BIT(1) /* Permit non-invasive debug */
 
 
 
 707
 708static inline u32 armv7_pmnc_read(void)
 709{
 710	u32 val;
 711	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
 712	return val;
 713}
 714
 715static inline void armv7_pmnc_write(u32 val)
 716{
 717	val &= ARMV7_PMNC_MASK;
 718	isb();
 719	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 720}
 721
 722static inline int armv7_pmnc_has_overflowed(u32 pmnc)
 723{
 724	return pmnc & ARMV7_OVERFLOWED_MASK;
 725}
 726
 727static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
 
 728{
 729	return idx >= ARMV7_IDX_CYCLE_COUNTER &&
 730		idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
 
 
 
 
 
 
 
 
 
 731}
 732
 733static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
 734{
 735	return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
 736}
 
 
 
 
 
 737
 738static inline void armv7_pmnc_select_counter(int idx)
 739{
 740	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 741	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
 742	isb();
 
 
 743}
 744
 745static inline u32 armv7pmu_read_counter(struct perf_event *event)
 746{
 747	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 748	struct hw_perf_event *hwc = &event->hw;
 749	int idx = hwc->idx;
 750	u32 value = 0;
 751
 752	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 
 
 
 
 
 
 753		pr_err("CPU%u reading wrong counter %d\n",
 754			smp_processor_id(), idx);
 755	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 756		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
 757	} else {
 758		armv7_pmnc_select_counter(idx);
 759		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
 760	}
 761
 762	return value;
 763}
 764
 765static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
 766{
 767	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 768	struct hw_perf_event *hwc = &event->hw;
 769	int idx = hwc->idx;
 770
 771	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 
 
 772		pr_err("CPU%u writing wrong counter %d\n",
 773			smp_processor_id(), idx);
 774	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 775		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
 776	} else {
 777		armv7_pmnc_select_counter(idx);
 778		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
 779	}
 780}
 781
 782static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
 783{
 784	armv7_pmnc_select_counter(idx);
 785	val &= ARMV7_EVTYPE_MASK;
 786	asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 
 787}
 788
 789static inline void armv7_pmnc_enable_counter(int idx)
 790{
 791	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 792	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 793}
 794
 795static inline void armv7_pmnc_disable_counter(int idx)
 796{
 797	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 798	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 799}
 800
 801static inline void armv7_pmnc_enable_intens(int idx)
 802{
 803	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 804	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 805}
 806
 807static inline void armv7_pmnc_disable_intens(int idx)
 808{
 809	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 810	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
 811	isb();
 812	/* Clear the overflow flag in case an interrupt is pending. */
 813	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
 814	isb();
 
 
 
 
 
 
 
 
 
 
 
 815}
 816
 817static inline u32 armv7_pmnc_getreset_flags(void)
 818{
 819	u32 val;
 820
 821	/* Read */
 822	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 823
 824	/* Write to clear flags */
 825	val &= ARMV7_FLAG_MASK;
 826	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
 827
 828	return val;
 829}
 830
 831#ifdef DEBUG
 832static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
 833{
 834	u32 val;
 835	unsigned int cnt;
 836
 837	pr_info("PMNC registers dump:\n");
 838
 839	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
 840	pr_info("PMNC  =0x%08x\n", val);
 841
 842	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
 843	pr_info("CNTENS=0x%08x\n", val);
 844
 845	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
 846	pr_info("INTENS=0x%08x\n", val);
 847
 848	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 849	pr_info("FLAGS =0x%08x\n", val);
 850
 851	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
 852	pr_info("SELECT=0x%08x\n", val);
 853
 854	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
 855	pr_info("CCNT  =0x%08x\n", val);
 856
 857	for (cnt = ARMV7_IDX_COUNTER0;
 858			cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
 859		armv7_pmnc_select_counter(cnt);
 860		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
 861		pr_info("CNT[%d] count =0x%08x\n",
 862			ARMV7_IDX_TO_COUNTER(cnt), val);
 863		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
 864		pr_info("CNT[%d] evtsel=0x%08x\n",
 865			ARMV7_IDX_TO_COUNTER(cnt), val);
 866	}
 867}
 868#endif
 869
 870static void armv7pmu_enable_event(struct perf_event *event)
 871{
 872	unsigned long flags;
 873	struct hw_perf_event *hwc = &event->hw;
 874	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 875	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 876	int idx = hwc->idx;
 877
 878	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 879		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
 880			smp_processor_id(), idx);
 881		return;
 882	}
 883
 884	/*
 885	 * Enable counter and interrupt, and set the counter to count
 886	 * the event that we're interested in.
 887	 */
 888	raw_spin_lock_irqsave(&events->pmu_lock, flags);
 889
 890	/*
 891	 * Disable counter
 892	 */
 893	armv7_pmnc_disable_counter(idx);
 894
 895	/*
 896	 * Set event (if destined for PMNx counters)
 897	 * We only need to set the event for the cycle counter if we
 898	 * have the ability to perform event filtering.
 899	 */
 900	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
 901		armv7_pmnc_write_evtsel(idx, hwc->config_base);
 902
 903	/*
 904	 * Enable interrupt for this counter
 905	 */
 906	armv7_pmnc_enable_intens(idx);
 907
 908	/*
 909	 * Enable counter
 910	 */
 911	armv7_pmnc_enable_counter(idx);
 912
 913	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 914}
 915
 916static void armv7pmu_disable_event(struct perf_event *event)
 917{
 918	unsigned long flags;
 919	struct hw_perf_event *hwc = &event->hw;
 920	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 921	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 922	int idx = hwc->idx;
 923
 924	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 925		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
 926			smp_processor_id(), idx);
 927		return;
 928	}
 929
 930	/*
 931	 * Disable counter and interrupt
 932	 */
 933	raw_spin_lock_irqsave(&events->pmu_lock, flags);
 934
 935	/*
 936	 * Disable counter
 937	 */
 938	armv7_pmnc_disable_counter(idx);
 939
 940	/*
 941	 * Disable interrupt for this counter
 942	 */
 943	armv7_pmnc_disable_intens(idx);
 944
 945	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 946}
 947
 948static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
 949{
 950	u32 pmnc;
 951	struct perf_sample_data data;
 952	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
 953	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 954	struct pt_regs *regs;
 955	int idx;
 956
 957	/*
 958	 * Get and reset the IRQ flags
 959	 */
 960	pmnc = armv7_pmnc_getreset_flags();
 961
 962	/*
 963	 * Did an overflow occur?
 964	 */
 965	if (!armv7_pmnc_has_overflowed(pmnc))
 966		return IRQ_NONE;
 967
 968	/*
 969	 * Handle the counter(s) overflow(s)
 970	 */
 971	regs = get_irq_regs();
 972
 973	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 
 
 
 974		struct perf_event *event = cpuc->events[idx];
 975		struct hw_perf_event *hwc;
 976
 977		/* Ignore if we don't have an event. */
 978		if (!event)
 979			continue;
 980
 981		/*
 982		 * We have a single interrupt for all counters. Check that
 983		 * each counter has overflowed before we process it.
 984		 */
 985		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
 986			continue;
 987
 988		hwc = &event->hw;
 989		armpmu_event_update(event);
 990		perf_sample_data_init(&data, 0, hwc->last_period);
 991		if (!armpmu_event_set_period(event))
 992			continue;
 993
 994		if (perf_event_overflow(event, &data, regs))
 995			cpu_pmu->disable(event);
 996	}
 997
 998	/*
 999	 * Handle the pending perf events.
1000	 *
1001	 * Note: this call *must* be run with interrupts disabled. For
1002	 * platforms that can have the PMU interrupts raised as an NMI, this
1003	 * will not work.
1004	 */
1005	irq_work_run();
1006
1007	return IRQ_HANDLED;
1008}
1009
1010static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1011{
1012	unsigned long flags;
1013	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1014
1015	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1016	/* Enable all counters */
1017	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1018	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1019}
1020
1021static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1022{
1023	unsigned long flags;
1024	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1025
1026	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1027	/* Disable all counters */
1028	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1029	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1030}
1031
1032static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1033				  struct perf_event *event)
1034{
1035	int idx;
1036	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1037	struct hw_perf_event *hwc = &event->hw;
1038	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1039
1040	/* Always place a cycle counter into the cycle counter. */
1041	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1042		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1043			return -EAGAIN;
1044
1045		return ARMV7_IDX_CYCLE_COUNTER;
1046	}
 
 
 
 
 
 
 
 
1047
1048	/*
1049	 * For anything other than a cycle counter, try and use
1050	 * the events counters
1051	 */
1052	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1053		if (!test_and_set_bit(idx, cpuc->used_mask))
1054			return idx;
1055	}
1056
1057	/* The counters are all in use. */
1058	return -EAGAIN;
1059}
1060
1061/*
1062 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1063 */
1064static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1065				     struct perf_event_attr *attr)
1066{
1067	unsigned long config_base = 0;
1068
1069	if (attr->exclude_idle)
1070		return -EPERM;
1071	if (attr->exclude_user)
1072		config_base |= ARMV7_EXCLUDE_USER;
1073	if (attr->exclude_kernel)
1074		config_base |= ARMV7_EXCLUDE_PL1;
1075	if (!attr->exclude_hv)
1076		config_base |= ARMV7_INCLUDE_HYP;
1077
1078	/*
1079	 * Install the filter into config_base as this is used to
1080	 * construct the event type.
1081	 */
1082	event->config_base = config_base;
1083
1084	return 0;
1085}
1086
1087static void armv7pmu_reset(void *info)
1088{
1089	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1090	u32 idx, nb_cnt = cpu_pmu->num_events, val;
1091
1092	if (cpu_pmu->secure_access) {
1093		asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
1094		val |= ARMV7_SDER_SUNIDEN;
1095		asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
1096	}
1097
1098	/* The counter and interrupt enable registers are unknown at reset. */
1099	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1100		armv7_pmnc_disable_counter(idx);
1101		armv7_pmnc_disable_intens(idx);
1102	}
1103
1104	/* Initialize & Reset PMNC: C and P bits */
1105	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1106}
1107
1108static int armv7_a8_map_event(struct perf_event *event)
1109{
1110	return armpmu_map_event(event, &armv7_a8_perf_map,
1111				&armv7_a8_perf_cache_map, 0xFF);
1112}
1113
1114static int armv7_a9_map_event(struct perf_event *event)
1115{
1116	return armpmu_map_event(event, &armv7_a9_perf_map,
1117				&armv7_a9_perf_cache_map, 0xFF);
1118}
1119
1120static int armv7_a5_map_event(struct perf_event *event)
1121{
1122	return armpmu_map_event(event, &armv7_a5_perf_map,
1123				&armv7_a5_perf_cache_map, 0xFF);
1124}
1125
1126static int armv7_a15_map_event(struct perf_event *event)
1127{
1128	return armpmu_map_event(event, &armv7_a15_perf_map,
1129				&armv7_a15_perf_cache_map, 0xFF);
1130}
1131
1132static int armv7_a7_map_event(struct perf_event *event)
1133{
1134	return armpmu_map_event(event, &armv7_a7_perf_map,
1135				&armv7_a7_perf_cache_map, 0xFF);
1136}
1137
1138static int armv7_a12_map_event(struct perf_event *event)
1139{
1140	return armpmu_map_event(event, &armv7_a12_perf_map,
1141				&armv7_a12_perf_cache_map, 0xFF);
1142}
1143
1144static int krait_map_event(struct perf_event *event)
1145{
1146	return armpmu_map_event(event, &krait_perf_map,
1147				&krait_perf_cache_map, 0xFFFFF);
1148}
1149
1150static int krait_map_event_no_branch(struct perf_event *event)
1151{
1152	return armpmu_map_event(event, &krait_perf_map_no_branch,
1153				&krait_perf_cache_map, 0xFFFFF);
1154}
1155
1156static int scorpion_map_event(struct perf_event *event)
1157{
1158	return armpmu_map_event(event, &scorpion_perf_map,
1159				&scorpion_perf_cache_map, 0xFFFFF);
1160}
1161
1162static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1163{
1164	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1165	cpu_pmu->enable		= armv7pmu_enable_event;
1166	cpu_pmu->disable	= armv7pmu_disable_event;
1167	cpu_pmu->read_counter	= armv7pmu_read_counter;
1168	cpu_pmu->write_counter	= armv7pmu_write_counter;
1169	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
1170	cpu_pmu->start		= armv7pmu_start;
1171	cpu_pmu->stop		= armv7pmu_stop;
1172	cpu_pmu->reset		= armv7pmu_reset;
1173	cpu_pmu->max_period	= (1LLU << 32) - 1;
1174};
1175
1176static void armv7_read_num_pmnc_events(void *info)
1177{
1178	int *nb_cnt = info;
1179
1180	/* Read the nb of CNTx counters supported from PMNC */
1181	*nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1182
1183	/* Add the CPU cycles counter */
1184	*nb_cnt += 1;
1185}
1186
1187static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1188{
1189	return smp_call_function_any(&arm_pmu->supported_cpus,
1190				     armv7_read_num_pmnc_events,
1191				     &arm_pmu->num_events, 1);
 
 
 
1192}
1193
1194static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1195{
1196	armv7pmu_init(cpu_pmu);
1197	cpu_pmu->name		= "armv7_cortex_a8";
1198	cpu_pmu->map_event	= armv7_a8_map_event;
1199	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1200		&armv7_pmuv1_events_attr_group;
1201	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1202		&armv7_pmu_format_attr_group;
1203	return armv7_probe_num_events(cpu_pmu);
1204}
1205
1206static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1207{
1208	armv7pmu_init(cpu_pmu);
1209	cpu_pmu->name		= "armv7_cortex_a9";
1210	cpu_pmu->map_event	= armv7_a9_map_event;
1211	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1212		&armv7_pmuv1_events_attr_group;
1213	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1214		&armv7_pmu_format_attr_group;
1215	return armv7_probe_num_events(cpu_pmu);
1216}
1217
1218static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1219{
1220	armv7pmu_init(cpu_pmu);
1221	cpu_pmu->name		= "armv7_cortex_a5";
1222	cpu_pmu->map_event	= armv7_a5_map_event;
1223	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1224		&armv7_pmuv1_events_attr_group;
1225	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1226		&armv7_pmu_format_attr_group;
1227	return armv7_probe_num_events(cpu_pmu);
1228}
1229
1230static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1231{
1232	armv7pmu_init(cpu_pmu);
1233	cpu_pmu->name		= "armv7_cortex_a15";
1234	cpu_pmu->map_event	= armv7_a15_map_event;
1235	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1236	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1237		&armv7_pmuv2_events_attr_group;
1238	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1239		&armv7_pmu_format_attr_group;
1240	return armv7_probe_num_events(cpu_pmu);
1241}
1242
1243static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1244{
1245	armv7pmu_init(cpu_pmu);
1246	cpu_pmu->name		= "armv7_cortex_a7";
1247	cpu_pmu->map_event	= armv7_a7_map_event;
1248	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1249	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1250		&armv7_pmuv2_events_attr_group;
1251	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1252		&armv7_pmu_format_attr_group;
1253	return armv7_probe_num_events(cpu_pmu);
1254}
1255
1256static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1257{
1258	armv7pmu_init(cpu_pmu);
1259	cpu_pmu->name		= "armv7_cortex_a12";
1260	cpu_pmu->map_event	= armv7_a12_map_event;
1261	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1262	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1263		&armv7_pmuv2_events_attr_group;
1264	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1265		&armv7_pmu_format_attr_group;
1266	return armv7_probe_num_events(cpu_pmu);
1267}
1268
1269static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1270{
1271	int ret = armv7_a12_pmu_init(cpu_pmu);
1272	cpu_pmu->name = "armv7_cortex_a17";
1273	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1274		&armv7_pmuv2_events_attr_group;
1275	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1276		&armv7_pmu_format_attr_group;
1277	return ret;
1278}
1279
1280/*
1281 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1282 *
1283 *            31   30     24     16     8      0
1284 *            +--------------------------------+
1285 *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1286 *            +--------------------------------+
1287 *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1288 *            +--------------------------------+
1289 *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1290 *            +--------------------------------+
1291 *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1292 *            +--------------------------------+
1293 *              EN | G=3  | G=2  | G=1  | G=0
1294 *
1295 *  Event Encoding:
1296 *
1297 *      hwc->config_base = 0xNRCCG
1298 *
1299 *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1300 *      R  = region register
1301 *      CC = class of events the group G is choosing from
1302 *      G  = group or particular event
1303 *
1304 *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1305 *
1306 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1307 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1308 *  events (interrupts for example). An event code is broken down into
1309 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1310 *  example).
1311 */
1312
1313#define KRAIT_EVENT		(1 << 16)
1314#define VENUM_EVENT		(2 << 16)
1315#define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1316#define PMRESRn_EN		BIT(31)
1317
1318#define EVENT_REGION(event)	(((event) >> 12) & 0xf)		/* R */
1319#define EVENT_GROUP(event)	((event) & 0xf)			/* G */
1320#define EVENT_CODE(event)	(((event) >> 4) & 0xff)		/* CC */
1321#define EVENT_VENUM(event)	(!!(event & VENUM_EVENT))	/* N=2 */
1322#define EVENT_CPU(event)	(!!(event & KRAIT_EVENT))	/* N=1 */
1323
1324static u32 krait_read_pmresrn(int n)
1325{
1326	u32 val;
1327
1328	switch (n) {
1329	case 0:
1330		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1331		break;
1332	case 1:
1333		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1334		break;
1335	case 2:
1336		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1337		break;
1338	default:
1339		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1340	}
1341
1342	return val;
1343}
1344
1345static void krait_write_pmresrn(int n, u32 val)
1346{
1347	switch (n) {
1348	case 0:
1349		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1350		break;
1351	case 1:
1352		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1353		break;
1354	case 2:
1355		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1356		break;
1357	default:
1358		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1359	}
1360}
1361
1362static u32 venum_read_pmresr(void)
1363{
1364	u32 val;
1365	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1366	return val;
1367}
1368
1369static void venum_write_pmresr(u32 val)
1370{
1371	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1372}
1373
1374static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1375{
1376	u32 venum_new_val;
1377	u32 fp_new_val;
1378
1379	BUG_ON(preemptible());
1380	/* CPACR Enable CP10 and CP11 access */
1381	*venum_orig_val = get_copro_access();
1382	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1383	set_copro_access(venum_new_val);
1384
1385	/* Enable FPEXC */
1386	*fp_orig_val = fmrx(FPEXC);
1387	fp_new_val = *fp_orig_val | FPEXC_EN;
1388	fmxr(FPEXC, fp_new_val);
1389}
1390
1391static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1392{
1393	BUG_ON(preemptible());
1394	/* Restore FPEXC */
1395	fmxr(FPEXC, fp_orig_val);
1396	isb();
1397	/* Restore CPACR */
1398	set_copro_access(venum_orig_val);
1399}
1400
1401static u32 krait_get_pmresrn_event(unsigned int region)
1402{
1403	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1404					     KRAIT_PMRESR1_GROUP0,
1405					     KRAIT_PMRESR2_GROUP0 };
1406	return pmresrn_table[region];
1407}
1408
1409static void krait_evt_setup(int idx, u32 config_base)
1410{
1411	u32 val;
1412	u32 mask;
1413	u32 vval, fval;
1414	unsigned int region = EVENT_REGION(config_base);
1415	unsigned int group = EVENT_GROUP(config_base);
1416	unsigned int code = EVENT_CODE(config_base);
1417	unsigned int group_shift;
1418	bool venum_event = EVENT_VENUM(config_base);
1419
1420	group_shift = group * 8;
1421	mask = 0xff << group_shift;
1422
1423	/* Configure evtsel for the region and group */
1424	if (venum_event)
1425		val = KRAIT_VPMRESR0_GROUP0;
1426	else
1427		val = krait_get_pmresrn_event(region);
1428	val += group;
1429	/* Mix in mode-exclusion bits */
1430	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1431	armv7_pmnc_write_evtsel(idx, val);
1432
1433	if (venum_event) {
1434		venum_pre_pmresr(&vval, &fval);
1435		val = venum_read_pmresr();
1436		val &= ~mask;
1437		val |= code << group_shift;
1438		val |= PMRESRn_EN;
1439		venum_write_pmresr(val);
1440		venum_post_pmresr(vval, fval);
1441	} else {
1442		val = krait_read_pmresrn(region);
1443		val &= ~mask;
1444		val |= code << group_shift;
1445		val |= PMRESRn_EN;
1446		krait_write_pmresrn(region, val);
1447	}
1448}
1449
1450static u32 clear_pmresrn_group(u32 val, int group)
1451{
1452	u32 mask;
1453	int group_shift;
1454
1455	group_shift = group * 8;
1456	mask = 0xff << group_shift;
1457	val &= ~mask;
1458
1459	/* Don't clear enable bit if entire region isn't disabled */
1460	if (val & ~PMRESRn_EN)
1461		return val |= PMRESRn_EN;
1462
1463	return 0;
1464}
1465
1466static void krait_clearpmu(u32 config_base)
1467{
1468	u32 val;
1469	u32 vval, fval;
1470	unsigned int region = EVENT_REGION(config_base);
1471	unsigned int group = EVENT_GROUP(config_base);
1472	bool venum_event = EVENT_VENUM(config_base);
1473
1474	if (venum_event) {
1475		venum_pre_pmresr(&vval, &fval);
1476		val = venum_read_pmresr();
1477		val = clear_pmresrn_group(val, group);
1478		venum_write_pmresr(val);
1479		venum_post_pmresr(vval, fval);
1480	} else {
1481		val = krait_read_pmresrn(region);
1482		val = clear_pmresrn_group(val, group);
1483		krait_write_pmresrn(region, val);
1484	}
1485}
1486
1487static void krait_pmu_disable_event(struct perf_event *event)
1488{
1489	unsigned long flags;
1490	struct hw_perf_event *hwc = &event->hw;
1491	int idx = hwc->idx;
1492	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1493	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1494
1495	/* Disable counter and interrupt */
1496	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1497
1498	/* Disable counter */
1499	armv7_pmnc_disable_counter(idx);
1500
1501	/*
1502	 * Clear pmresr code (if destined for PMNx counters)
1503	 */
1504	if (hwc->config_base & KRAIT_EVENT_MASK)
1505		krait_clearpmu(hwc->config_base);
1506
1507	/* Disable interrupt for this counter */
1508	armv7_pmnc_disable_intens(idx);
1509
1510	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1511}
1512
1513static void krait_pmu_enable_event(struct perf_event *event)
1514{
1515	unsigned long flags;
1516	struct hw_perf_event *hwc = &event->hw;
1517	int idx = hwc->idx;
1518	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1519	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1520
1521	/*
1522	 * Enable counter and interrupt, and set the counter to count
1523	 * the event that we're interested in.
1524	 */
1525	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1526
1527	/* Disable counter */
1528	armv7_pmnc_disable_counter(idx);
1529
1530	/*
1531	 * Set event (if destined for PMNx counters)
1532	 * We set the event for the cycle counter because we
1533	 * have the ability to perform event filtering.
1534	 */
1535	if (hwc->config_base & KRAIT_EVENT_MASK)
1536		krait_evt_setup(idx, hwc->config_base);
1537	else
1538		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1539
1540	/* Enable interrupt for this counter */
1541	armv7_pmnc_enable_intens(idx);
1542
1543	/* Enable counter */
1544	armv7_pmnc_enable_counter(idx);
1545
1546	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1547}
1548
1549static void krait_pmu_reset(void *info)
1550{
1551	u32 vval, fval;
1552	struct arm_pmu *cpu_pmu = info;
1553	u32 idx, nb_cnt = cpu_pmu->num_events;
1554
1555	armv7pmu_reset(info);
1556
1557	/* Clear all pmresrs */
1558	krait_write_pmresrn(0, 0);
1559	krait_write_pmresrn(1, 0);
1560	krait_write_pmresrn(2, 0);
1561
1562	venum_pre_pmresr(&vval, &fval);
1563	venum_write_pmresr(0);
1564	venum_post_pmresr(vval, fval);
1565
1566	/* Reset PMxEVNCTCR to sane default */
1567	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1568		armv7_pmnc_select_counter(idx);
1569		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1570	}
1571
1572}
1573
1574static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1575			      unsigned int group)
1576{
1577	int bit;
1578	struct hw_perf_event *hwc = &event->hw;
1579	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1580
1581	if (hwc->config_base & VENUM_EVENT)
1582		bit = KRAIT_VPMRESR0_GROUP0;
1583	else
1584		bit = krait_get_pmresrn_event(region);
1585	bit -= krait_get_pmresrn_event(0);
1586	bit += group;
1587	/*
1588	 * Lower bits are reserved for use by the counters (see
1589	 * armv7pmu_get_event_idx() for more info)
1590	 */
1591	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1592
1593	return bit;
1594}
1595
1596/*
1597 * We check for column exclusion constraints here.
1598 * Two events cant use the same group within a pmresr register.
1599 */
1600static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1601				   struct perf_event *event)
1602{
1603	int idx;
1604	int bit = -1;
1605	struct hw_perf_event *hwc = &event->hw;
1606	unsigned int region = EVENT_REGION(hwc->config_base);
1607	unsigned int code = EVENT_CODE(hwc->config_base);
1608	unsigned int group = EVENT_GROUP(hwc->config_base);
1609	bool venum_event = EVENT_VENUM(hwc->config_base);
1610	bool krait_event = EVENT_CPU(hwc->config_base);
1611
1612	if (venum_event || krait_event) {
1613		/* Ignore invalid events */
1614		if (group > 3 || region > 2)
1615			return -EINVAL;
1616		if (venum_event && (code & 0xe0))
1617			return -EINVAL;
1618
1619		bit = krait_event_to_bit(event, region, group);
1620		if (test_and_set_bit(bit, cpuc->used_mask))
1621			return -EAGAIN;
1622	}
1623
1624	idx = armv7pmu_get_event_idx(cpuc, event);
1625	if (idx < 0 && bit >= 0)
1626		clear_bit(bit, cpuc->used_mask);
1627
1628	return idx;
1629}
1630
1631static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1632				      struct perf_event *event)
1633{
1634	int bit;
1635	struct hw_perf_event *hwc = &event->hw;
1636	unsigned int region = EVENT_REGION(hwc->config_base);
1637	unsigned int group = EVENT_GROUP(hwc->config_base);
1638	bool venum_event = EVENT_VENUM(hwc->config_base);
1639	bool krait_event = EVENT_CPU(hwc->config_base);
1640
1641	if (venum_event || krait_event) {
1642		bit = krait_event_to_bit(event, region, group);
1643		clear_bit(bit, cpuc->used_mask);
1644	}
1645}
1646
1647static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1648{
1649	armv7pmu_init(cpu_pmu);
1650	cpu_pmu->name		= "armv7_krait";
1651	/* Some early versions of Krait don't support PC write events */
1652	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1653				  "qcom,no-pc-write"))
1654		cpu_pmu->map_event = krait_map_event_no_branch;
1655	else
1656		cpu_pmu->map_event = krait_map_event;
1657	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1658	cpu_pmu->reset		= krait_pmu_reset;
1659	cpu_pmu->enable		= krait_pmu_enable_event;
1660	cpu_pmu->disable	= krait_pmu_disable_event;
1661	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1662	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1663	return armv7_probe_num_events(cpu_pmu);
1664}
1665
1666/*
1667 * Scorpion Local Performance Monitor Register (LPMn)
1668 *
1669 *            31   30     24     16     8      0
1670 *            +--------------------------------+
1671 *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1672 *            +--------------------------------+
1673 *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1674 *            +--------------------------------+
1675 *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1676 *            +--------------------------------+
1677 *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1678 *            +--------------------------------+
1679 *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1680 *            +--------------------------------+
1681 *              EN | G=3  | G=2  | G=1  | G=0
1682 *
1683 *
1684 *  Event Encoding:
1685 *
1686 *      hwc->config_base = 0xNRCCG
1687 *
1688 *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1689 *      R  = region register
1690 *      CC = class of events the group G is choosing from
1691 *      G  = group or particular event
1692 *
1693 *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1694 *
1695 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1696 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1697 *  events (interrupts for example). An event code is broken down into
1698 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1699 *  example).
1700 */
1701
1702static u32 scorpion_read_pmresrn(int n)
1703{
1704	u32 val;
1705
1706	switch (n) {
1707	case 0:
1708		asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1709		break;
1710	case 1:
1711		asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1712		break;
1713	case 2:
1714		asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1715		break;
1716	case 3:
1717		asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1718		break;
1719	default:
1720		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1721	}
1722
1723	return val;
1724}
1725
1726static void scorpion_write_pmresrn(int n, u32 val)
1727{
1728	switch (n) {
1729	case 0:
1730		asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1731		break;
1732	case 1:
1733		asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1734		break;
1735	case 2:
1736		asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1737		break;
1738	case 3:
1739		asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1740		break;
1741	default:
1742		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1743	}
1744}
1745
1746static u32 scorpion_get_pmresrn_event(unsigned int region)
1747{
1748	static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1749					     SCORPION_LPM1_GROUP0,
1750					     SCORPION_LPM2_GROUP0,
1751					     SCORPION_L2LPM_GROUP0 };
1752	return pmresrn_table[region];
1753}
1754
1755static void scorpion_evt_setup(int idx, u32 config_base)
1756{
1757	u32 val;
1758	u32 mask;
1759	u32 vval, fval;
1760	unsigned int region = EVENT_REGION(config_base);
1761	unsigned int group = EVENT_GROUP(config_base);
1762	unsigned int code = EVENT_CODE(config_base);
1763	unsigned int group_shift;
1764	bool venum_event = EVENT_VENUM(config_base);
1765
1766	group_shift = group * 8;
1767	mask = 0xff << group_shift;
1768
1769	/* Configure evtsel for the region and group */
1770	if (venum_event)
1771		val = SCORPION_VLPM_GROUP0;
1772	else
1773		val = scorpion_get_pmresrn_event(region);
1774	val += group;
1775	/* Mix in mode-exclusion bits */
1776	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1777	armv7_pmnc_write_evtsel(idx, val);
1778
1779	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1780
1781	if (venum_event) {
1782		venum_pre_pmresr(&vval, &fval);
1783		val = venum_read_pmresr();
1784		val &= ~mask;
1785		val |= code << group_shift;
1786		val |= PMRESRn_EN;
1787		venum_write_pmresr(val);
1788		venum_post_pmresr(vval, fval);
1789	} else {
1790		val = scorpion_read_pmresrn(region);
1791		val &= ~mask;
1792		val |= code << group_shift;
1793		val |= PMRESRn_EN;
1794		scorpion_write_pmresrn(region, val);
1795	}
1796}
1797
1798static void scorpion_clearpmu(u32 config_base)
1799{
1800	u32 val;
1801	u32 vval, fval;
1802	unsigned int region = EVENT_REGION(config_base);
1803	unsigned int group = EVENT_GROUP(config_base);
1804	bool venum_event = EVENT_VENUM(config_base);
1805
1806	if (venum_event) {
1807		venum_pre_pmresr(&vval, &fval);
1808		val = venum_read_pmresr();
1809		val = clear_pmresrn_group(val, group);
1810		venum_write_pmresr(val);
1811		venum_post_pmresr(vval, fval);
1812	} else {
1813		val = scorpion_read_pmresrn(region);
1814		val = clear_pmresrn_group(val, group);
1815		scorpion_write_pmresrn(region, val);
1816	}
1817}
1818
1819static void scorpion_pmu_disable_event(struct perf_event *event)
1820{
1821	unsigned long flags;
1822	struct hw_perf_event *hwc = &event->hw;
1823	int idx = hwc->idx;
1824	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1825	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1826
1827	/* Disable counter and interrupt */
1828	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1829
1830	/* Disable counter */
1831	armv7_pmnc_disable_counter(idx);
1832
1833	/*
1834	 * Clear pmresr code (if destined for PMNx counters)
1835	 */
1836	if (hwc->config_base & KRAIT_EVENT_MASK)
1837		scorpion_clearpmu(hwc->config_base);
1838
1839	/* Disable interrupt for this counter */
1840	armv7_pmnc_disable_intens(idx);
1841
1842	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1843}
1844
1845static void scorpion_pmu_enable_event(struct perf_event *event)
1846{
1847	unsigned long flags;
1848	struct hw_perf_event *hwc = &event->hw;
1849	int idx = hwc->idx;
1850	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1851	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1852
1853	/*
1854	 * Enable counter and interrupt, and set the counter to count
1855	 * the event that we're interested in.
1856	 */
1857	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1858
1859	/* Disable counter */
1860	armv7_pmnc_disable_counter(idx);
1861
1862	/*
1863	 * Set event (if destined for PMNx counters)
1864	 * We don't set the event for the cycle counter because we
1865	 * don't have the ability to perform event filtering.
1866	 */
1867	if (hwc->config_base & KRAIT_EVENT_MASK)
1868		scorpion_evt_setup(idx, hwc->config_base);
1869	else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1870		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1871
1872	/* Enable interrupt for this counter */
1873	armv7_pmnc_enable_intens(idx);
1874
1875	/* Enable counter */
1876	armv7_pmnc_enable_counter(idx);
1877
1878	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1879}
1880
1881static void scorpion_pmu_reset(void *info)
1882{
1883	u32 vval, fval;
1884	struct arm_pmu *cpu_pmu = info;
1885	u32 idx, nb_cnt = cpu_pmu->num_events;
1886
1887	armv7pmu_reset(info);
1888
1889	/* Clear all pmresrs */
1890	scorpion_write_pmresrn(0, 0);
1891	scorpion_write_pmresrn(1, 0);
1892	scorpion_write_pmresrn(2, 0);
1893	scorpion_write_pmresrn(3, 0);
1894
1895	venum_pre_pmresr(&vval, &fval);
1896	venum_write_pmresr(0);
1897	venum_post_pmresr(vval, fval);
1898
1899	/* Reset PMxEVNCTCR to sane default */
1900	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1901		armv7_pmnc_select_counter(idx);
1902		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1903	}
1904}
1905
1906static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1907			      unsigned int group)
1908{
1909	int bit;
1910	struct hw_perf_event *hwc = &event->hw;
1911	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1912
1913	if (hwc->config_base & VENUM_EVENT)
1914		bit = SCORPION_VLPM_GROUP0;
1915	else
1916		bit = scorpion_get_pmresrn_event(region);
1917	bit -= scorpion_get_pmresrn_event(0);
1918	bit += group;
1919	/*
1920	 * Lower bits are reserved for use by the counters (see
1921	 * armv7pmu_get_event_idx() for more info)
1922	 */
1923	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1924
1925	return bit;
1926}
1927
1928/*
1929 * We check for column exclusion constraints here.
1930 * Two events cant use the same group within a pmresr register.
1931 */
1932static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1933				   struct perf_event *event)
1934{
1935	int idx;
1936	int bit = -1;
1937	struct hw_perf_event *hwc = &event->hw;
1938	unsigned int region = EVENT_REGION(hwc->config_base);
1939	unsigned int group = EVENT_GROUP(hwc->config_base);
1940	bool venum_event = EVENT_VENUM(hwc->config_base);
1941	bool scorpion_event = EVENT_CPU(hwc->config_base);
1942
1943	if (venum_event || scorpion_event) {
1944		/* Ignore invalid events */
1945		if (group > 3 || region > 3)
1946			return -EINVAL;
1947
1948		bit = scorpion_event_to_bit(event, region, group);
1949		if (test_and_set_bit(bit, cpuc->used_mask))
1950			return -EAGAIN;
1951	}
1952
1953	idx = armv7pmu_get_event_idx(cpuc, event);
1954	if (idx < 0 && bit >= 0)
1955		clear_bit(bit, cpuc->used_mask);
1956
1957	return idx;
1958}
1959
1960static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1961				      struct perf_event *event)
1962{
1963	int bit;
1964	struct hw_perf_event *hwc = &event->hw;
1965	unsigned int region = EVENT_REGION(hwc->config_base);
1966	unsigned int group = EVENT_GROUP(hwc->config_base);
1967	bool venum_event = EVENT_VENUM(hwc->config_base);
1968	bool scorpion_event = EVENT_CPU(hwc->config_base);
1969
1970	if (venum_event || scorpion_event) {
1971		bit = scorpion_event_to_bit(event, region, group);
1972		clear_bit(bit, cpuc->used_mask);
1973	}
1974}
1975
1976static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1977{
1978	armv7pmu_init(cpu_pmu);
1979	cpu_pmu->name		= "armv7_scorpion";
1980	cpu_pmu->map_event	= scorpion_map_event;
1981	cpu_pmu->reset		= scorpion_pmu_reset;
1982	cpu_pmu->enable		= scorpion_pmu_enable_event;
1983	cpu_pmu->disable	= scorpion_pmu_disable_event;
1984	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1985	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1986	return armv7_probe_num_events(cpu_pmu);
1987}
1988
1989static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1990{
1991	armv7pmu_init(cpu_pmu);
1992	cpu_pmu->name		= "armv7_scorpion_mp";
1993	cpu_pmu->map_event	= scorpion_map_event;
1994	cpu_pmu->reset		= scorpion_pmu_reset;
1995	cpu_pmu->enable		= scorpion_pmu_enable_event;
1996	cpu_pmu->disable	= scorpion_pmu_disable_event;
1997	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1998	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1999	return armv7_probe_num_events(cpu_pmu);
2000}
2001
2002static const struct of_device_id armv7_pmu_of_device_ids[] = {
2003	{.compatible = "arm,cortex-a17-pmu",	.data = armv7_a17_pmu_init},
2004	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init},
2005	{.compatible = "arm,cortex-a12-pmu",	.data = armv7_a12_pmu_init},
2006	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init},
2007	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init},
2008	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init},
2009	{.compatible = "arm,cortex-a5-pmu",	.data = armv7_a5_pmu_init},
2010	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},
2011	{.compatible = "qcom,scorpion-pmu",	.data = scorpion_pmu_init},
2012	{.compatible = "qcom,scorpion-mp-pmu",	.data = scorpion_mp_pmu_init},
2013	{},
2014};
2015
2016static const struct pmu_probe_info armv7_pmu_probe_table[] = {
2017	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
2018	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
2019	{ /* sentinel value */ }
2020};
2021
2022
2023static int armv7_pmu_device_probe(struct platform_device *pdev)
2024{
2025	return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
2026				    armv7_pmu_probe_table);
2027}
2028
2029static struct platform_driver armv7_pmu_driver = {
2030	.driver		= {
2031		.name	= "armv7-pmu",
2032		.of_match_table = armv7_pmu_of_device_ids,
2033	},
2034	.probe		= armv7_pmu_device_probe,
2035};
2036
2037static int __init register_armv7_pmu_driver(void)
2038{
2039	return platform_driver_register(&armv7_pmu_driver);
2040}
2041device_initcall(register_armv7_pmu_driver);
2042#endif	/* CONFIG_CPU_V7 */