Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   3 *
   4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   5 * 2010 (c) MontaVista Software, LLC.
   6 *
   7 * Copied from ARMv6 code, with the low level code inspired
   8 *  by the ARMv7 Oprofile code.
   9 *
  10 * Cortex-A8 has up to 4 configurable performance counters and
  11 *  a single cycle counter.
  12 * Cortex-A9 has up to 31 configurable performance counters and
  13 *  a single cycle counter.
  14 *
  15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16 *  counter and all 4 performance counters together can be reset separately.
  17 */
  18
  19#ifdef CONFIG_CPU_V7
 
 
 
  20/*
  21 * Common ARMv7 event types
  22 *
  23 * Note: An implementation may not be able to count all of these events
  24 * but the encodings are considered to be `reserved' in the case that
  25 * they are not available.
  26 */
  27enum armv7_perf_types {
  28	ARMV7_PERFCTR_PMNC_SW_INCR		= 0x00,
  29	ARMV7_PERFCTR_IFETCH_MISS		= 0x01,
  30	ARMV7_PERFCTR_ITLB_MISS			= 0x02,
  31	ARMV7_PERFCTR_DCACHE_REFILL		= 0x03,	/* L1 */
  32	ARMV7_PERFCTR_DCACHE_ACCESS		= 0x04,	/* L1 */
  33	ARMV7_PERFCTR_DTLB_REFILL		= 0x05,
  34	ARMV7_PERFCTR_DREAD			= 0x06,
  35	ARMV7_PERFCTR_DWRITE			= 0x07,
  36	ARMV7_PERFCTR_INSTR_EXECUTED		= 0x08,
  37	ARMV7_PERFCTR_EXC_TAKEN			= 0x09,
  38	ARMV7_PERFCTR_EXC_EXECUTED		= 0x0A,
  39	ARMV7_PERFCTR_CID_WRITE			= 0x0B,
  40	/* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
 
 
  41	 * It counts:
  42	 *  - all branch instructions,
  43	 *  - instructions that explicitly write the PC,
  44	 *  - exception generating instructions.
  45	 */
  46	ARMV7_PERFCTR_PC_WRITE			= 0x0C,
  47	ARMV7_PERFCTR_PC_IMM_BRANCH		= 0x0D,
  48	ARMV7_PERFCTR_PC_PROC_RETURN		= 0x0E,
  49	ARMV7_PERFCTR_UNALIGNED_ACCESS		= 0x0F,
 
 
 
  50
  51	/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  52	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED	= 0x10,
  53	ARMV7_PERFCTR_CLOCK_CYCLES		= 0x11,
  54	ARMV7_PERFCTR_PC_BRANCH_PRED		= 0x12,
  55	ARMV7_PERFCTR_MEM_ACCESS		= 0x13,
  56	ARMV7_PERFCTR_L1_ICACHE_ACCESS		= 0x14,
  57	ARMV7_PERFCTR_L1_DCACHE_WB		= 0x15,
  58	ARMV7_PERFCTR_L2_DCACHE_ACCESS		= 0x16,
  59	ARMV7_PERFCTR_L2_DCACHE_REFILL		= 0x17,
  60	ARMV7_PERFCTR_L2_DCACHE_WB		= 0x18,
  61	ARMV7_PERFCTR_BUS_ACCESS		= 0x19,
  62	ARMV7_PERFCTR_MEMORY_ERROR		= 0x1A,
  63	ARMV7_PERFCTR_INSTR_SPEC		= 0x1B,
  64	ARMV7_PERFCTR_TTBR_WRITE		= 0x1C,
  65	ARMV7_PERFCTR_BUS_CYCLES		= 0x1D,
  66
  67	ARMV7_PERFCTR_CPU_CYCLES		= 0xFF
  68};
  69
  70/* ARMv7 Cortex-A8 specific event types */
  71enum armv7_a8_perf_types {
  72	ARMV7_PERFCTR_WRITE_BUFFER_FULL		= 0x40,
  73	ARMV7_PERFCTR_L2_STORE_MERGED		= 0x41,
  74	ARMV7_PERFCTR_L2_STORE_BUFF		= 0x42,
  75	ARMV7_PERFCTR_L2_ACCESS			= 0x43,
  76	ARMV7_PERFCTR_L2_CACH_MISS		= 0x44,
  77	ARMV7_PERFCTR_AXI_READ_CYCLES		= 0x45,
  78	ARMV7_PERFCTR_AXI_WRITE_CYCLES		= 0x46,
  79	ARMV7_PERFCTR_MEMORY_REPLAY		= 0x47,
  80	ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY	= 0x48,
  81	ARMV7_PERFCTR_L1_DATA_MISS		= 0x49,
  82	ARMV7_PERFCTR_L1_INST_MISS		= 0x4A,
  83	ARMV7_PERFCTR_L1_DATA_COLORING		= 0x4B,
  84	ARMV7_PERFCTR_L1_NEON_DATA		= 0x4C,
  85	ARMV7_PERFCTR_L1_NEON_CACH_DATA		= 0x4D,
  86	ARMV7_PERFCTR_L2_NEON			= 0x4E,
  87	ARMV7_PERFCTR_L2_NEON_HIT		= 0x4F,
  88	ARMV7_PERFCTR_L1_INST			= 0x50,
  89	ARMV7_PERFCTR_PC_RETURN_MIS_PRED	= 0x51,
  90	ARMV7_PERFCTR_PC_BRANCH_FAILED		= 0x52,
  91	ARMV7_PERFCTR_PC_BRANCH_TAKEN		= 0x53,
  92	ARMV7_PERFCTR_PC_BRANCH_EXECUTED	= 0x54,
  93	ARMV7_PERFCTR_OP_EXECUTED		= 0x55,
  94	ARMV7_PERFCTR_CYCLES_INST_STALL		= 0x56,
  95	ARMV7_PERFCTR_CYCLES_INST		= 0x57,
  96	ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL	= 0x58,
  97	ARMV7_PERFCTR_CYCLES_NEON_INST_STALL	= 0x59,
  98	ARMV7_PERFCTR_NEON_CYCLES		= 0x5A,
  99
 100	ARMV7_PERFCTR_PMU0_EVENTS		= 0x70,
 101	ARMV7_PERFCTR_PMU1_EVENTS		= 0x71,
 102	ARMV7_PERFCTR_PMU_EVENTS		= 0x72,
 103};
 104
 105/* ARMv7 Cortex-A9 specific event types */
 106enum armv7_a9_perf_types {
 107	ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC	= 0x40,
 108	ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC	= 0x41,
 109	ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC	= 0x42,
 110
 111	ARMV7_PERFCTR_COHERENT_LINE_MISS	= 0x50,
 112	ARMV7_PERFCTR_COHERENT_LINE_HIT		= 0x51,
 113
 114	ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES	= 0x60,
 115	ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES	= 0x61,
 116	ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES	= 0x62,
 117	ARMV7_PERFCTR_STREX_EXECUTED_PASSED	= 0x63,
 118	ARMV7_PERFCTR_STREX_EXECUTED_FAILED	= 0x64,
 119	ARMV7_PERFCTR_DATA_EVICTION		= 0x65,
 120	ARMV7_PERFCTR_ISSUE_STAGE_NO_INST	= 0x66,
 121	ARMV7_PERFCTR_ISSUE_STAGE_EMPTY		= 0x67,
 122	ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE	= 0x68,
 123
 124	ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS	= 0x6E,
 125
 126	ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST	= 0x70,
 127	ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST	= 0x71,
 128	ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST	= 0x72,
 129	ARMV7_PERFCTR_FP_EXECUTED_INST		= 0x73,
 130	ARMV7_PERFCTR_NEON_EXECUTED_INST	= 0x74,
 131
 132	ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES	= 0x80,
 133	ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES	= 0x81,
 134	ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES	= 0x82,
 135	ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES	= 0x83,
 136	ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES	= 0x84,
 137	ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES	= 0x85,
 138	ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES	= 0x86,
 139
 140	ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES	= 0x8A,
 141	ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES	= 0x8B,
 142
 143	ARMV7_PERFCTR_ISB_INST			= 0x90,
 144	ARMV7_PERFCTR_DSB_INST			= 0x91,
 145	ARMV7_PERFCTR_DMB_INST			= 0x92,
 146	ARMV7_PERFCTR_EXT_INTERRUPTS		= 0x93,
 147
 148	ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED	= 0xA0,
 149	ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED	= 0xA1,
 150	ARMV7_PERFCTR_PLE_FIFO_FLUSH		= 0xA2,
 151	ARMV7_PERFCTR_PLE_RQST_COMPLETED	= 0xA3,
 152	ARMV7_PERFCTR_PLE_FIFO_OVERFLOW		= 0xA4,
 153	ARMV7_PERFCTR_PLE_RQST_PROG		= 0xA5
 154};
 155
 156/* ARMv7 Cortex-A5 specific event types */
 157enum armv7_a5_perf_types {
 158	ARMV7_PERFCTR_IRQ_TAKEN			= 0x86,
 159	ARMV7_PERFCTR_FIQ_TAKEN			= 0x87,
 160
 161	ARMV7_PERFCTR_EXT_MEM_RQST		= 0xc0,
 162	ARMV7_PERFCTR_NC_EXT_MEM_RQST		= 0xc1,
 163	ARMV7_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
 164	ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP	= 0xc3,
 165	ARMV7_PERFCTR_ENTER_READ_ALLOC		= 0xc4,
 166	ARMV7_PERFCTR_READ_ALLOC		= 0xc5,
 167
 168	ARMV7_PERFCTR_STALL_SB_FULL		= 0xc9,
 169};
 170
 171/* ARMv7 Cortex-A15 specific event types */
 172enum armv7_a15_perf_types {
 173	ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS	= 0x40,
 174	ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS	= 0x41,
 175	ARMV7_PERFCTR_L1_DCACHE_READ_REFILL	= 0x42,
 176	ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL	= 0x43,
 177
 178	ARMV7_PERFCTR_L1_DTLB_READ_REFILL	= 0x4C,
 179	ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL	= 0x4D,
 180
 181	ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS	= 0x50,
 182	ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS	= 0x51,
 183	ARMV7_PERFCTR_L2_DCACHE_READ_REFILL	= 0x52,
 184	ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL	= 0x53,
 185
 186	ARMV7_PERFCTR_SPEC_PC_WRITE		= 0x76,
 187};
 188
 189/*
 190 * Cortex-A8 HW events mapping
 191 *
 192 * The hardware events that we support. We do support cache operations but
 193 * we have harvard caches and no way to combine instruction and data
 194 * accesses/misses in hardware.
 195 */
 196static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 197	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 198	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 199	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 200	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 201	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 202	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 203	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 
 
 204};
 205
 206static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 207					  [PERF_COUNT_HW_CACHE_OP_MAX]
 208					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 209	[C(L1D)] = {
 210		/*
 211		 * The performance counters don't differentiate between read
 212		 * and write accesses/misses so this isn't strictly correct,
 213		 * but it's the best we can do. Writes and reads get
 214		 * combined.
 215		 */
 216		[C(OP_READ)] = {
 217			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
 218			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
 219		},
 220		[C(OP_WRITE)] = {
 221			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
 222			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
 223		},
 224		[C(OP_PREFETCH)] = {
 225			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 226			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 227		},
 228	},
 229	[C(L1I)] = {
 230		[C(OP_READ)] = {
 231			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_INST,
 232			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_INST_MISS,
 233		},
 234		[C(OP_WRITE)] = {
 235			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_INST,
 236			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_INST_MISS,
 237		},
 238		[C(OP_PREFETCH)] = {
 239			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 240			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 241		},
 242	},
 243	[C(LL)] = {
 244		[C(OP_READ)] = {
 245			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_ACCESS,
 246			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACH_MISS,
 247		},
 248		[C(OP_WRITE)] = {
 249			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_ACCESS,
 250			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACH_MISS,
 251		},
 252		[C(OP_PREFETCH)] = {
 253			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 254			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 255		},
 256	},
 257	[C(DTLB)] = {
 258		[C(OP_READ)] = {
 259			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 260			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 261		},
 262		[C(OP_WRITE)] = {
 263			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 264			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 265		},
 266		[C(OP_PREFETCH)] = {
 267			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 268			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 269		},
 270	},
 271	[C(ITLB)] = {
 272		[C(OP_READ)] = {
 273			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 274			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 275		},
 276		[C(OP_WRITE)] = {
 277			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 278			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 279		},
 280		[C(OP_PREFETCH)] = {
 281			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 282			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 283		},
 284	},
 285	[C(BPU)] = {
 286		[C(OP_READ)] = {
 287			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
 288			[C(RESULT_MISS)]
 289					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 290		},
 291		[C(OP_WRITE)] = {
 292			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
 293			[C(RESULT_MISS)]
 294					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 295		},
 296		[C(OP_PREFETCH)] = {
 297			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 298			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 299		},
 300	},
 301	[C(NODE)] = {
 302		[C(OP_READ)] = {
 303			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 304			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 305		},
 306		[C(OP_WRITE)] = {
 307			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 308			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 309		},
 310		[C(OP_PREFETCH)] = {
 311			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 312			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 313		},
 314	},
 315};
 316
 317/*
 318 * Cortex-A9 HW events mapping
 319 */
 320static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 321	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 322	[PERF_COUNT_HW_INSTRUCTIONS]	    =
 323					ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
 324	[PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_DCACHE_ACCESS,
 325	[PERF_COUNT_HW_CACHE_MISSES]	    = ARMV7_PERFCTR_DCACHE_REFILL,
 326	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 327	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 328	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 
 329};
 330
 331static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 332					  [PERF_COUNT_HW_CACHE_OP_MAX]
 333					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 334	[C(L1D)] = {
 335		/*
 336		 * The performance counters don't differentiate between read
 337		 * and write accesses/misses so this isn't strictly correct,
 338		 * but it's the best we can do. Writes and reads get
 339		 * combined.
 340		 */
 341		[C(OP_READ)] = {
 342			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
 343			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
 344		},
 345		[C(OP_WRITE)] = {
 346			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
 347			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
 348		},
 349		[C(OP_PREFETCH)] = {
 350			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 351			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 352		},
 353	},
 354	[C(L1I)] = {
 355		[C(OP_READ)] = {
 356			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 357			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 358		},
 359		[C(OP_WRITE)] = {
 360			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 361			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 362		},
 363		[C(OP_PREFETCH)] = {
 364			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 365			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 366		},
 367	},
 368	[C(LL)] = {
 369		[C(OP_READ)] = {
 370			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 371			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 372		},
 373		[C(OP_WRITE)] = {
 374			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 375			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 376		},
 377		[C(OP_PREFETCH)] = {
 378			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 379			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 380		},
 381	},
 382	[C(DTLB)] = {
 383		[C(OP_READ)] = {
 384			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 385			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 386		},
 387		[C(OP_WRITE)] = {
 388			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 389			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 390		},
 391		[C(OP_PREFETCH)] = {
 392			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 393			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 394		},
 395	},
 396	[C(ITLB)] = {
 397		[C(OP_READ)] = {
 398			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 399			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 400		},
 401		[C(OP_WRITE)] = {
 402			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 403			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 404		},
 405		[C(OP_PREFETCH)] = {
 406			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 407			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 408		},
 409	},
 410	[C(BPU)] = {
 411		[C(OP_READ)] = {
 412			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
 413			[C(RESULT_MISS)]
 414					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 415		},
 416		[C(OP_WRITE)] = {
 417			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
 418			[C(RESULT_MISS)]
 419					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 420		},
 421		[C(OP_PREFETCH)] = {
 422			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 423			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 424		},
 425	},
 426	[C(NODE)] = {
 427		[C(OP_READ)] = {
 428			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 429			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 430		},
 431		[C(OP_WRITE)] = {
 432			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 433			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 434		},
 435		[C(OP_PREFETCH)] = {
 436			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 437			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 438		},
 439	},
 440};
 441
 442/*
 443 * Cortex-A5 HW events mapping
 444 */
 445static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 446	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 447	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 448	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 449	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 450	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 451	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 452	[PERF_COUNT_HW_BUS_CYCLES]	    = HW_OP_UNSUPPORTED,
 
 
 453};
 454
 455static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 456					[PERF_COUNT_HW_CACHE_OP_MAX]
 457					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 458	[C(L1D)] = {
 459		[C(OP_READ)] = {
 460			[C(RESULT_ACCESS)]
 461					= ARMV7_PERFCTR_DCACHE_ACCESS,
 462			[C(RESULT_MISS)]
 463					= ARMV7_PERFCTR_DCACHE_REFILL,
 464		},
 465		[C(OP_WRITE)] = {
 466			[C(RESULT_ACCESS)]
 467					= ARMV7_PERFCTR_DCACHE_ACCESS,
 468			[C(RESULT_MISS)]
 469					= ARMV7_PERFCTR_DCACHE_REFILL,
 470		},
 471		[C(OP_PREFETCH)] = {
 472			[C(RESULT_ACCESS)]
 473					= ARMV7_PERFCTR_PREFETCH_LINEFILL,
 474			[C(RESULT_MISS)]
 475					= ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
 476		},
 477	},
 478	[C(L1I)] = {
 479		[C(OP_READ)] = {
 480			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 481			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 482		},
 483		[C(OP_WRITE)] = {
 484			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 485			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 486		},
 487		/*
 488		 * The prefetch counters don't differentiate between the I
 489		 * side and the D side.
 490		 */
 491		[C(OP_PREFETCH)] = {
 492			[C(RESULT_ACCESS)]
 493					= ARMV7_PERFCTR_PREFETCH_LINEFILL,
 494			[C(RESULT_MISS)]
 495					= ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
 496		},
 497	},
 498	[C(LL)] = {
 499		[C(OP_READ)] = {
 500			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 501			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 502		},
 503		[C(OP_WRITE)] = {
 504			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 505			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 506		},
 507		[C(OP_PREFETCH)] = {
 508			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 509			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 510		},
 511	},
 512	[C(DTLB)] = {
 513		[C(OP_READ)] = {
 514			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 515			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 516		},
 517		[C(OP_WRITE)] = {
 518			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 519			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 520		},
 521		[C(OP_PREFETCH)] = {
 522			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 523			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 524		},
 525	},
 526	[C(ITLB)] = {
 527		[C(OP_READ)] = {
 528			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 529			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 530		},
 531		[C(OP_WRITE)] = {
 532			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 533			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 534		},
 535		[C(OP_PREFETCH)] = {
 536			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 537			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 538		},
 539	},
 540	[C(BPU)] = {
 541		[C(OP_READ)] = {
 542			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 543			[C(RESULT_MISS)]
 544					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 545		},
 546		[C(OP_WRITE)] = {
 547			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 548			[C(RESULT_MISS)]
 549					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 550		},
 551		[C(OP_PREFETCH)] = {
 552			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 553			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 554		},
 555	},
 556};
 557
 558/*
 559 * Cortex-A15 HW events mapping
 560 */
 561static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 562	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 563	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 564	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 565	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 566	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
 567	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 568	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_BUS_CYCLES,
 
 
 569};
 570
 571static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 572					[PERF_COUNT_HW_CACHE_OP_MAX]
 573					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 574	[C(L1D)] = {
 575		[C(OP_READ)] = {
 576			[C(RESULT_ACCESS)]
 577					= ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
 578			[C(RESULT_MISS)]
 579					= ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
 580		},
 581		[C(OP_WRITE)] = {
 582			[C(RESULT_ACCESS)]
 583					= ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
 584			[C(RESULT_MISS)]
 585					= ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
 586		},
 587		[C(OP_PREFETCH)] = {
 588			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 589			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 590		},
 591	},
 592	[C(L1I)] = {
 593		/*
 594		 * Not all performance counters differentiate between read
 595		 * and write accesses/misses so we're not always strictly
 596		 * correct, but it's the best we can do. Writes and reads get
 597		 * combined in these cases.
 598		 */
 599		[C(OP_READ)] = {
 600			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 601			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 602		},
 603		[C(OP_WRITE)] = {
 604			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 605			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
 606		},
 607		[C(OP_PREFETCH)] = {
 608			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 609			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 610		},
 611	},
 612	[C(LL)] = {
 613		[C(OP_READ)] = {
 614			[C(RESULT_ACCESS)]
 615					= ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
 616			[C(RESULT_MISS)]
 617					= ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
 618		},
 619		[C(OP_WRITE)] = {
 620			[C(RESULT_ACCESS)]
 621					= ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
 622			[C(RESULT_MISS)]
 623					= ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
 624		},
 625		[C(OP_PREFETCH)] = {
 626			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 627			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 628		},
 629	},
 630	[C(DTLB)] = {
 631		[C(OP_READ)] = {
 632			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 633			[C(RESULT_MISS)]
 634					= ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
 635		},
 636		[C(OP_WRITE)] = {
 637			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 638			[C(RESULT_MISS)]
 639					= ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
 640		},
 641		[C(OP_PREFETCH)] = {
 642			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 643			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 644		},
 645	},
 646	[C(ITLB)] = {
 647		[C(OP_READ)] = {
 648			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 649			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 650		},
 651		[C(OP_WRITE)] = {
 652			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 653			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
 654		},
 655		[C(OP_PREFETCH)] = {
 656			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 657			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 658		},
 659	},
 660	[C(BPU)] = {
 661		[C(OP_READ)] = {
 662			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 663			[C(RESULT_MISS)]
 664					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 665		},
 666		[C(OP_WRITE)] = {
 667			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 668			[C(RESULT_MISS)]
 669					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 670		},
 671		[C(OP_PREFETCH)] = {
 672			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 673			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 674		},
 675	},
 676};
 677
 678/*
 679 * Perf Events counters
 680 */
 681enum armv7_counters {
 682	ARMV7_CYCLE_COUNTER		= 1,	/* Cycle counter */
 683	ARMV7_COUNTER0			= 2,	/* First event counter */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684};
 685
 686/*
 687 * The cycle counter is ARMV7_CYCLE_COUNTER.
 688 * The first event counter is ARMV7_COUNTER0.
 689 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
 690 */
 691#define	ARMV7_COUNTER_LAST	(ARMV7_COUNTER0 + armpmu->num_events - 1)
 
 
 
 
 
 692
 693/*
 694 * ARMv7 low level PMNC access
 695 */
 696
 697/*
 
 
 
 
 
 
 698 * Per-CPU PMNC: config reg
 699 */
 700#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
 701#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
 702#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
 703#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
 704#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
 705#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
 706#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
 707#define	ARMV7_PMNC_N_MASK	0x1f
 708#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
 709
 710/*
 711 * Available counters
 712 */
 713#define ARMV7_CNT0		0	/* First event counter */
 714#define ARMV7_CCNT		31	/* Cycle counter */
 715
 716/* Perf Event to low level counters mapping */
 717#define ARMV7_EVENT_CNT_TO_CNTx	(ARMV7_COUNTER0 - ARMV7_CNT0)
 718
 719/*
 720 * CNTENS: counters enable reg
 721 */
 722#define ARMV7_CNTENS_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 723#define ARMV7_CNTENS_C		(1 << ARMV7_CCNT)
 724
 725/*
 726 * CNTENC: counters disable reg
 727 */
 728#define ARMV7_CNTENC_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 729#define ARMV7_CNTENC_C		(1 << ARMV7_CCNT)
 730
 731/*
 732 * INTENS: counters overflow interrupt enable reg
 733 */
 734#define ARMV7_INTENS_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 735#define ARMV7_INTENS_C		(1 << ARMV7_CCNT)
 736
 737/*
 738 * INTENC: counters overflow interrupt disable reg
 739 */
 740#define ARMV7_INTENC_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 741#define ARMV7_INTENC_C		(1 << ARMV7_CCNT)
 742
 743/*
 744 * EVTSEL: Event selection reg
 745 */
 746#define	ARMV7_EVTSEL_MASK	0xff		/* Mask for writable bits */
 
 747
 748/*
 749 * SELECT: Counter selection reg
 750 */
 751#define	ARMV7_SELECT_MASK	0x1f		/* Mask for writable bits */
 
 752
 753/*
 754 * FLAG: counters overflow flag status reg
 755 */
 756#define ARMV7_FLAG_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
 757#define ARMV7_FLAG_C		(1 << ARMV7_CCNT)
 758#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
 759#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
 760
 761static inline unsigned long armv7_pmnc_read(void)
 762{
 763	u32 val;
 764	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
 765	return val;
 766}
 767
 768static inline void armv7_pmnc_write(unsigned long val)
 769{
 770	val &= ARMV7_PMNC_MASK;
 771	isb();
 772	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 773}
 774
 775static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
 776{
 777	return pmnc & ARMV7_OVERFLOWED_MASK;
 778}
 779
 780static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
 781					enum armv7_counters counter)
 
 
 
 
 782{
 783	int ret = 0;
 
 784
 785	if (counter == ARMV7_CYCLE_COUNTER)
 786		ret = pmnc & ARMV7_FLAG_C;
 787	else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
 788		ret = pmnc & ARMV7_FLAG_P(counter);
 789	else
 790		pr_err("CPU%u checking wrong counter %d overflow status\n",
 791			smp_processor_id(), counter);
 
 
 
 
 792
 793	return ret;
 794}
 795
 796static inline int armv7_pmnc_select_counter(unsigned int idx)
 797{
 798	u32 val;
 799
 800	if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
 801		pr_err("CPU%u selecting wrong PMNC counter"
 802			" %d\n", smp_processor_id(), idx);
 803		return -1;
 804	}
 805
 806	val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
 807	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
 808	isb();
 809
 810	return idx;
 811}
 812
 813static inline u32 armv7pmu_read_counter(int idx)
 814{
 815	unsigned long value = 0;
 816
 817	if (idx == ARMV7_CYCLE_COUNTER)
 818		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
 819	else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
 820		if (armv7_pmnc_select_counter(idx) == idx)
 821			asm volatile("mrc p15, 0, %0, c9, c13, 2"
 822				     : "=r" (value));
 823	} else
 824		pr_err("CPU%u reading wrong counter %d\n",
 825			smp_processor_id(), idx);
 
 
 
 
 826
 827	return value;
 828}
 829
 830static inline void armv7pmu_write_counter(int idx, u32 value)
 831{
 832	if (idx == ARMV7_CYCLE_COUNTER)
 833		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
 834	else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
 835		if (armv7_pmnc_select_counter(idx) == idx)
 836			asm volatile("mcr p15, 0, %0, c9, c13, 2"
 837				     : : "r" (value));
 838	} else
 839		pr_err("CPU%u writing wrong counter %d\n",
 840			smp_processor_id(), idx);
 
 
 
 
 841}
 842
 843static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
 844{
 845	if (armv7_pmnc_select_counter(idx) == idx) {
 846		val &= ARMV7_EVTSEL_MASK;
 847		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 848	}
 849}
 850
 851static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
 852{
 853	u32 val;
 854
 855	if ((idx != ARMV7_CYCLE_COUNTER) &&
 856	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
 857		pr_err("CPU%u enabling wrong PMNC counter"
 858			" %d\n", smp_processor_id(), idx);
 859		return -1;
 860	}
 861
 862	if (idx == ARMV7_CYCLE_COUNTER)
 863		val = ARMV7_CNTENS_C;
 864	else
 865		val = ARMV7_CNTENS_P(idx);
 866
 867	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
 868
 869	return idx;
 870}
 871
 872static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
 873{
 874	u32 val;
 875
 876
 877	if ((idx != ARMV7_CYCLE_COUNTER) &&
 878	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
 879		pr_err("CPU%u disabling wrong PMNC counter"
 880			" %d\n", smp_processor_id(), idx);
 881		return -1;
 882	}
 883
 884	if (idx == ARMV7_CYCLE_COUNTER)
 885		val = ARMV7_CNTENC_C;
 886	else
 887		val = ARMV7_CNTENC_P(idx);
 888
 889	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
 890
 891	return idx;
 892}
 893
 894static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
 895{
 896	u32 val;
 897
 898	if ((idx != ARMV7_CYCLE_COUNTER) &&
 899	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
 900		pr_err("CPU%u enabling wrong PMNC counter"
 901			" interrupt enable %d\n", smp_processor_id(), idx);
 902		return -1;
 903	}
 904
 905	if (idx == ARMV7_CYCLE_COUNTER)
 906		val = ARMV7_INTENS_C;
 907	else
 908		val = ARMV7_INTENS_P(idx);
 909
 910	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
 911
 912	return idx;
 913}
 914
 915static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
 916{
 917	u32 val;
 918
 919	if ((idx != ARMV7_CYCLE_COUNTER) &&
 920	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
 921		pr_err("CPU%u disabling wrong PMNC counter"
 922			" interrupt enable %d\n", smp_processor_id(), idx);
 923		return -1;
 924	}
 925
 926	if (idx == ARMV7_CYCLE_COUNTER)
 927		val = ARMV7_INTENC_C;
 928	else
 929		val = ARMV7_INTENC_P(idx);
 930
 931	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
 932
 933	return idx;
 934}
 935
 936static inline u32 armv7_pmnc_getreset_flags(void)
 937{
 938	u32 val;
 939
 940	/* Read */
 941	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 942
 943	/* Write to clear flags */
 944	val &= ARMV7_FLAG_MASK;
 945	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
 946
 947	return val;
 948}
 949
 950#ifdef DEBUG
 951static void armv7_pmnc_dump_regs(void)
 952{
 953	u32 val;
 954	unsigned int cnt;
 955
 956	printk(KERN_INFO "PMNC registers dump:\n");
 957
 958	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
 959	printk(KERN_INFO "PMNC  =0x%08x\n", val);
 960
 961	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
 962	printk(KERN_INFO "CNTENS=0x%08x\n", val);
 963
 964	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
 965	printk(KERN_INFO "INTENS=0x%08x\n", val);
 966
 967	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 968	printk(KERN_INFO "FLAGS =0x%08x\n", val);
 969
 970	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
 971	printk(KERN_INFO "SELECT=0x%08x\n", val);
 972
 973	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
 974	printk(KERN_INFO "CCNT  =0x%08x\n", val);
 975
 976	for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
 977		armv7_pmnc_select_counter(cnt);
 978		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
 979		printk(KERN_INFO "CNT[%d] count =0x%08x\n",
 980			cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
 981		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
 982		printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
 983			cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
 984	}
 985}
 986#endif
 987
 988static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
 989{
 990	unsigned long flags;
 
 991
 992	/*
 993	 * Enable counter and interrupt, and set the counter to count
 994	 * the event that we're interested in.
 995	 */
 996	raw_spin_lock_irqsave(&pmu_lock, flags);
 997
 998	/*
 999	 * Disable counter
1000	 */
1001	armv7_pmnc_disable_counter(idx);
1002
1003	/*
1004	 * Set event (if destined for PMNx counters)
1005	 * We don't need to set the event if it's a cycle count
 
1006	 */
1007	if (idx != ARMV7_CYCLE_COUNTER)
1008		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1009
1010	/*
1011	 * Enable interrupt for this counter
1012	 */
1013	armv7_pmnc_enable_intens(idx);
1014
1015	/*
1016	 * Enable counter
1017	 */
1018	armv7_pmnc_enable_counter(idx);
1019
1020	raw_spin_unlock_irqrestore(&pmu_lock, flags);
1021}
1022
1023static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1024{
1025	unsigned long flags;
 
1026
1027	/*
1028	 * Disable counter and interrupt
1029	 */
1030	raw_spin_lock_irqsave(&pmu_lock, flags);
1031
1032	/*
1033	 * Disable counter
1034	 */
1035	armv7_pmnc_disable_counter(idx);
1036
1037	/*
1038	 * Disable interrupt for this counter
1039	 */
1040	armv7_pmnc_disable_intens(idx);
1041
1042	raw_spin_unlock_irqrestore(&pmu_lock, flags);
1043}
1044
1045static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1046{
1047	unsigned long pmnc;
1048	struct perf_sample_data data;
1049	struct cpu_hw_events *cpuc;
1050	struct pt_regs *regs;
1051	int idx;
1052
1053	/*
1054	 * Get and reset the IRQ flags
1055	 */
1056	pmnc = armv7_pmnc_getreset_flags();
1057
1058	/*
1059	 * Did an overflow occur?
1060	 */
1061	if (!armv7_pmnc_has_overflowed(pmnc))
1062		return IRQ_NONE;
1063
1064	/*
1065	 * Handle the counter(s) overflow(s)
1066	 */
1067	regs = get_irq_regs();
1068
1069	perf_sample_data_init(&data, 0);
1070
1071	cpuc = &__get_cpu_var(cpu_hw_events);
1072	for (idx = 0; idx <= armpmu->num_events; ++idx) {
1073		struct perf_event *event = cpuc->events[idx];
1074		struct hw_perf_event *hwc;
1075
1076		if (!test_bit(idx, cpuc->active_mask))
 
1077			continue;
1078
1079		/*
1080		 * We have a single interrupt for all counters. Check that
1081		 * each counter has overflowed before we process it.
1082		 */
1083		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1084			continue;
1085
1086		hwc = &event->hw;
1087		armpmu_event_update(event, hwc, idx, 1);
1088		data.period = event->hw.last_period;
1089		if (!armpmu_event_set_period(event, hwc, idx))
1090			continue;
1091
1092		if (perf_event_overflow(event, &data, regs))
1093			armpmu->disable(hwc, idx);
1094	}
1095
1096	/*
1097	 * Handle the pending perf events.
1098	 *
1099	 * Note: this call *must* be run with interrupts disabled. For
1100	 * platforms that can have the PMU interrupts raised as an NMI, this
1101	 * will not work.
1102	 */
1103	irq_work_run();
1104
1105	return IRQ_HANDLED;
1106}
1107
1108static void armv7pmu_start(void)
1109{
1110	unsigned long flags;
 
1111
1112	raw_spin_lock_irqsave(&pmu_lock, flags);
1113	/* Enable all counters */
1114	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1115	raw_spin_unlock_irqrestore(&pmu_lock, flags);
1116}
1117
1118static void armv7pmu_stop(void)
1119{
1120	unsigned long flags;
 
1121
1122	raw_spin_lock_irqsave(&pmu_lock, flags);
1123	/* Disable all counters */
1124	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1125	raw_spin_unlock_irqrestore(&pmu_lock, flags);
1126}
1127
1128static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
1129				  struct hw_perf_event *event)
1130{
1131	int idx;
 
1132
1133	/* Always place a cycle counter into the cycle counter. */
1134	if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
1135		if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
1136			return -EAGAIN;
1137
1138		return ARMV7_CYCLE_COUNTER;
1139	} else {
1140		/*
1141		 * For anything other than a cycle counter, try and use
1142		 * the events counters
1143		 */
1144		for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
1145			if (!test_and_set_bit(idx, cpuc->used_mask))
1146				return idx;
1147		}
1148
1149		/* The counters are all in use. */
1150		return -EAGAIN;
 
 
 
 
 
1151	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1152}
1153
1154static void armv7pmu_reset(void *info)
1155{
1156	u32 idx, nb_cnt = armpmu->num_events;
1157
1158	/* The counter and interrupt enable registers are unknown at reset. */
1159	for (idx = 1; idx < nb_cnt; ++idx)
1160		armv7pmu_disable_event(NULL, idx);
1161
1162	/* Initialize & Reset PMNC: C and P bits */
1163	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1164}
1165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1166static struct arm_pmu armv7pmu = {
1167	.handle_irq		= armv7pmu_handle_irq,
1168	.enable			= armv7pmu_enable_event,
1169	.disable		= armv7pmu_disable_event,
1170	.read_counter		= armv7pmu_read_counter,
1171	.write_counter		= armv7pmu_write_counter,
1172	.get_event_idx		= armv7pmu_get_event_idx,
1173	.start			= armv7pmu_start,
1174	.stop			= armv7pmu_stop,
1175	.reset			= armv7pmu_reset,
1176	.raw_event_mask		= 0xFF,
1177	.max_period		= (1LLU << 32) - 1,
1178};
1179
1180static u32 __init armv7_read_num_pmnc_events(void)
1181{
1182	u32 nb_cnt;
1183
1184	/* Read the nb of CNTx counters supported from PMNC */
1185	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1186
1187	/* Add the CPU cycles counter and return */
1188	return nb_cnt + 1;
1189}
1190
1191static const struct arm_pmu *__init armv7_a8_pmu_init(void)
1192{
1193	armv7pmu.id		= ARM_PERF_PMU_ID_CA8;
1194	armv7pmu.name		= "ARMv7 Cortex-A8";
1195	armv7pmu.cache_map	= &armv7_a8_perf_cache_map;
1196	armv7pmu.event_map	= &armv7_a8_perf_map;
1197	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1198	return &armv7pmu;
1199}
1200
1201static const struct arm_pmu *__init armv7_a9_pmu_init(void)
1202{
1203	armv7pmu.id		= ARM_PERF_PMU_ID_CA9;
1204	armv7pmu.name		= "ARMv7 Cortex-A9";
1205	armv7pmu.cache_map	= &armv7_a9_perf_cache_map;
1206	armv7pmu.event_map	= &armv7_a9_perf_map;
1207	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1208	return &armv7pmu;
1209}
1210
1211static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1212{
1213	armv7pmu.id		= ARM_PERF_PMU_ID_CA5;
1214	armv7pmu.name		= "ARMv7 Cortex-A5";
1215	armv7pmu.cache_map	= &armv7_a5_perf_cache_map;
1216	armv7pmu.event_map	= &armv7_a5_perf_map;
1217	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1218	return &armv7pmu;
1219}
1220
1221static const struct arm_pmu *__init armv7_a15_pmu_init(void)
1222{
1223	armv7pmu.id		= ARM_PERF_PMU_ID_CA15;
1224	armv7pmu.name		= "ARMv7 Cortex-A15";
1225	armv7pmu.cache_map	= &armv7_a15_perf_cache_map;
1226	armv7pmu.event_map	= &armv7_a15_perf_map;
 
 
 
 
 
 
 
 
 
1227	armv7pmu.num_events	= armv7_read_num_pmnc_events();
 
1228	return &armv7pmu;
1229}
1230#else
1231static const struct arm_pmu *__init armv7_a8_pmu_init(void)
 
 
 
 
 
1232{
1233	return NULL;
1234}
1235
1236static const struct arm_pmu *__init armv7_a9_pmu_init(void)
1237{
1238	return NULL;
1239}
1240
1241static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1242{
1243	return NULL;
1244}
1245
1246static const struct arm_pmu *__init armv7_a15_pmu_init(void)
1247{
1248	return NULL;
1249}
1250#endif	/* CONFIG_CPU_V7 */
v3.5.6
   1/*
   2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   3 *
   4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   5 * 2010 (c) MontaVista Software, LLC.
   6 *
   7 * Copied from ARMv6 code, with the low level code inspired
   8 *  by the ARMv7 Oprofile code.
   9 *
  10 * Cortex-A8 has up to 4 configurable performance counters and
  11 *  a single cycle counter.
  12 * Cortex-A9 has up to 31 configurable performance counters and
  13 *  a single cycle counter.
  14 *
  15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16 *  counter and all 4 performance counters together can be reset separately.
  17 */
  18
  19#ifdef CONFIG_CPU_V7
  20
  21static struct arm_pmu armv7pmu;
  22
  23/*
  24 * Common ARMv7 event types
  25 *
  26 * Note: An implementation may not be able to count all of these events
  27 * but the encodings are considered to be `reserved' in the case that
  28 * they are not available.
  29 */
  30enum armv7_perf_types {
  31	ARMV7_PERFCTR_PMNC_SW_INCR			= 0x00,
  32	ARMV7_PERFCTR_L1_ICACHE_REFILL			= 0x01,
  33	ARMV7_PERFCTR_ITLB_REFILL			= 0x02,
  34	ARMV7_PERFCTR_L1_DCACHE_REFILL			= 0x03,
  35	ARMV7_PERFCTR_L1_DCACHE_ACCESS			= 0x04,
  36	ARMV7_PERFCTR_DTLB_REFILL			= 0x05,
  37	ARMV7_PERFCTR_MEM_READ				= 0x06,
  38	ARMV7_PERFCTR_MEM_WRITE				= 0x07,
  39	ARMV7_PERFCTR_INSTR_EXECUTED			= 0x08,
  40	ARMV7_PERFCTR_EXC_TAKEN				= 0x09,
  41	ARMV7_PERFCTR_EXC_EXECUTED			= 0x0A,
  42	ARMV7_PERFCTR_CID_WRITE				= 0x0B,
  43
  44	/*
  45	 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  46	 * It counts:
  47	 *  - all (taken) branch instructions,
  48	 *  - instructions that explicitly write the PC,
  49	 *  - exception generating instructions.
  50	 */
  51	ARMV7_PERFCTR_PC_WRITE				= 0x0C,
  52	ARMV7_PERFCTR_PC_IMM_BRANCH			= 0x0D,
  53	ARMV7_PERFCTR_PC_PROC_RETURN			= 0x0E,
  54	ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		= 0x0F,
  55	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		= 0x10,
  56	ARMV7_PERFCTR_CLOCK_CYCLES			= 0x11,
  57	ARMV7_PERFCTR_PC_BRANCH_PRED			= 0x12,
  58
  59	/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  60	ARMV7_PERFCTR_MEM_ACCESS			= 0x13,
  61	ARMV7_PERFCTR_L1_ICACHE_ACCESS			= 0x14,
  62	ARMV7_PERFCTR_L1_DCACHE_WB			= 0x15,
  63	ARMV7_PERFCTR_L2_CACHE_ACCESS			= 0x16,
  64	ARMV7_PERFCTR_L2_CACHE_REFILL			= 0x17,
  65	ARMV7_PERFCTR_L2_CACHE_WB			= 0x18,
  66	ARMV7_PERFCTR_BUS_ACCESS			= 0x19,
  67	ARMV7_PERFCTR_MEM_ERROR				= 0x1A,
  68	ARMV7_PERFCTR_INSTR_SPEC			= 0x1B,
  69	ARMV7_PERFCTR_TTBR_WRITE			= 0x1C,
  70	ARMV7_PERFCTR_BUS_CYCLES			= 0x1D,
 
 
 
  71
  72	ARMV7_PERFCTR_CPU_CYCLES			= 0xFF
  73};
  74
  75/* ARMv7 Cortex-A8 specific event types */
  76enum armv7_a8_perf_types {
  77	ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		= 0x43,
  78	ARMV7_A8_PERFCTR_L2_CACHE_REFILL		= 0x44,
  79	ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		= 0x50,
  80	ARMV7_A8_PERFCTR_STALL_ISIDE			= 0x56,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81};
  82
  83/* ARMv7 Cortex-A9 specific event types */
  84enum armv7_a9_perf_types {
  85	ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		= 0x68,
  86	ARMV7_A9_PERFCTR_STALL_ICACHE			= 0x60,
  87	ARMV7_A9_PERFCTR_STALL_DISPATCH			= 0x66,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  88};
  89
  90/* ARMv7 Cortex-A5 specific event types */
  91enum armv7_a5_perf_types {
  92	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
  93	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		= 0xc3,
 
 
 
 
 
 
 
 
 
  94};
  95
  96/* ARMv7 Cortex-A15 specific event types */
  97enum armv7_a15_perf_types {
  98	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
  99	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
 100	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		= 0x42,
 101	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	= 0x43,
 102
 103	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		= 0x4C,
 104	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		= 0x4D,
 105
 106	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
 107	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
 108	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		= 0x52,
 109	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		= 0x53,
 110
 111	ARMV7_A15_PERFCTR_PC_WRITE_SPEC			= 0x76,
 112};
 113
 114/*
 115 * Cortex-A8 HW events mapping
 116 *
 117 * The hardware events that we support. We do support cache operations but
 118 * we have harvard caches and no way to combine instruction and data
 119 * accesses/misses in hardware.
 120 */
 121static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 122	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 123	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 124	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 125	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 126	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 127	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 128	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 129	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
 130	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 131};
 132
 133static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 134					  [PERF_COUNT_HW_CACHE_OP_MAX]
 135					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 136	[C(L1D)] = {
 137		/*
 138		 * The performance counters don't differentiate between read
 139		 * and write accesses/misses so this isn't strictly correct,
 140		 * but it's the best we can do. Writes and reads get
 141		 * combined.
 142		 */
 143		[C(OP_READ)] = {
 144			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 145			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 146		},
 147		[C(OP_WRITE)] = {
 148			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 149			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 150		},
 151		[C(OP_PREFETCH)] = {
 152			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 153			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 154		},
 155	},
 156	[C(L1I)] = {
 157		[C(OP_READ)] = {
 158			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 159			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 160		},
 161		[C(OP_WRITE)] = {
 162			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 163			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 164		},
 165		[C(OP_PREFETCH)] = {
 166			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 167			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 168		},
 169	},
 170	[C(LL)] = {
 171		[C(OP_READ)] = {
 172			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 173			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 174		},
 175		[C(OP_WRITE)] = {
 176			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 177			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 178		},
 179		[C(OP_PREFETCH)] = {
 180			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 181			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 182		},
 183	},
 184	[C(DTLB)] = {
 185		[C(OP_READ)] = {
 186			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 187			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 188		},
 189		[C(OP_WRITE)] = {
 190			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 191			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 192		},
 193		[C(OP_PREFETCH)] = {
 194			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 195			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 196		},
 197	},
 198	[C(ITLB)] = {
 199		[C(OP_READ)] = {
 200			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 201			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 202		},
 203		[C(OP_WRITE)] = {
 204			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 205			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 206		},
 207		[C(OP_PREFETCH)] = {
 208			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 209			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 210		},
 211	},
 212	[C(BPU)] = {
 213		[C(OP_READ)] = {
 214			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 215			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 216		},
 217		[C(OP_WRITE)] = {
 218			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 219			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 220		},
 221		[C(OP_PREFETCH)] = {
 222			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 223			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 224		},
 225	},
 226	[C(NODE)] = {
 227		[C(OP_READ)] = {
 228			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 229			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 230		},
 231		[C(OP_WRITE)] = {
 232			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 233			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 234		},
 235		[C(OP_PREFETCH)] = {
 236			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 237			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 238		},
 239	},
 240};
 241
 242/*
 243 * Cortex-A9 HW events mapping
 244 */
 245static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 246	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 247	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
 248	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 249	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 250	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 251	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 252	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 253	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
 254	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
 255};
 256
 257static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 258					  [PERF_COUNT_HW_CACHE_OP_MAX]
 259					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 260	[C(L1D)] = {
 261		/*
 262		 * The performance counters don't differentiate between read
 263		 * and write accesses/misses so this isn't strictly correct,
 264		 * but it's the best we can do. Writes and reads get
 265		 * combined.
 266		 */
 267		[C(OP_READ)] = {
 268			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 269			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 270		},
 271		[C(OP_WRITE)] = {
 272			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 273			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 274		},
 275		[C(OP_PREFETCH)] = {
 276			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 277			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 278		},
 279	},
 280	[C(L1I)] = {
 281		[C(OP_READ)] = {
 282			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 283			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 284		},
 285		[C(OP_WRITE)] = {
 286			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 287			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 288		},
 289		[C(OP_PREFETCH)] = {
 290			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 291			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 292		},
 293	},
 294	[C(LL)] = {
 295		[C(OP_READ)] = {
 296			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 297			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 298		},
 299		[C(OP_WRITE)] = {
 300			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 301			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 302		},
 303		[C(OP_PREFETCH)] = {
 304			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 305			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 306		},
 307	},
 308	[C(DTLB)] = {
 309		[C(OP_READ)] = {
 310			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 311			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 312		},
 313		[C(OP_WRITE)] = {
 314			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 315			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 316		},
 317		[C(OP_PREFETCH)] = {
 318			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 319			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 320		},
 321	},
 322	[C(ITLB)] = {
 323		[C(OP_READ)] = {
 324			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 325			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 326		},
 327		[C(OP_WRITE)] = {
 328			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 329			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 330		},
 331		[C(OP_PREFETCH)] = {
 332			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 333			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 334		},
 335	},
 336	[C(BPU)] = {
 337		[C(OP_READ)] = {
 338			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 339			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 340		},
 341		[C(OP_WRITE)] = {
 342			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 343			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 344		},
 345		[C(OP_PREFETCH)] = {
 346			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 347			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 348		},
 349	},
 350	[C(NODE)] = {
 351		[C(OP_READ)] = {
 352			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 353			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 354		},
 355		[C(OP_WRITE)] = {
 356			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 357			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 358		},
 359		[C(OP_PREFETCH)] = {
 360			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 361			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 362		},
 363	},
 364};
 365
 366/*
 367 * Cortex-A5 HW events mapping
 368 */
 369static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 370	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 371	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 372	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 373	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 374	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 375	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 376	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 377	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 378	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 379};
 380
 381static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 382					[PERF_COUNT_HW_CACHE_OP_MAX]
 383					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 384	[C(L1D)] = {
 385		[C(OP_READ)] = {
 386			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 387			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 388		},
 389		[C(OP_WRITE)] = {
 390			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 391			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 392		},
 393		[C(OP_PREFETCH)] = {
 394			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 395			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 
 
 
 
 
 
 396		},
 397	},
 398	[C(L1I)] = {
 399		[C(OP_READ)] = {
 400			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 401			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 402		},
 403		[C(OP_WRITE)] = {
 404			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 405			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 406		},
 407		/*
 408		 * The prefetch counters don't differentiate between the I
 409		 * side and the D side.
 410		 */
 411		[C(OP_PREFETCH)] = {
 412			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 413			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 
 
 414		},
 415	},
 416	[C(LL)] = {
 417		[C(OP_READ)] = {
 418			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 419			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 420		},
 421		[C(OP_WRITE)] = {
 422			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 423			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 424		},
 425		[C(OP_PREFETCH)] = {
 426			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 427			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 428		},
 429	},
 430	[C(DTLB)] = {
 431		[C(OP_READ)] = {
 432			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 433			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 434		},
 435		[C(OP_WRITE)] = {
 436			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 437			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 438		},
 439		[C(OP_PREFETCH)] = {
 440			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 441			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 442		},
 443	},
 444	[C(ITLB)] = {
 445		[C(OP_READ)] = {
 446			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 447			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 448		},
 449		[C(OP_WRITE)] = {
 450			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 451			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 452		},
 453		[C(OP_PREFETCH)] = {
 454			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 455			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 456		},
 457	},
 458	[C(BPU)] = {
 459		[C(OP_READ)] = {
 460			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 461			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 462		},
 463		[C(OP_WRITE)] = {
 464			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 465			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 466		},
 467		[C(OP_PREFETCH)] = {
 468			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 469			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 470		},
 471	},
 472	[C(NODE)] = {
 473		[C(OP_READ)] = {
 474			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 475			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 476		},
 477		[C(OP_WRITE)] = {
 478			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 479			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 480		},
 481		[C(OP_PREFETCH)] = {
 482			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 483			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 484		},
 485	},
 486};
 487
 488/*
 489 * Cortex-A15 HW events mapping
 490 */
 491static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 492	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 493	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 494	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 495	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 496	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
 497	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 498	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 499	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 500	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 501};
 502
 503static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 504					[PERF_COUNT_HW_CACHE_OP_MAX]
 505					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 506	[C(L1D)] = {
 507		[C(OP_READ)] = {
 508			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
 509			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 
 
 510		},
 511		[C(OP_WRITE)] = {
 512			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 513			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 
 
 514		},
 515		[C(OP_PREFETCH)] = {
 516			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 517			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 518		},
 519	},
 520	[C(L1I)] = {
 521		/*
 522		 * Not all performance counters differentiate between read
 523		 * and write accesses/misses so we're not always strictly
 524		 * correct, but it's the best we can do. Writes and reads get
 525		 * combined in these cases.
 526		 */
 527		[C(OP_READ)] = {
 528			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 529			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 530		},
 531		[C(OP_WRITE)] = {
 532			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 533			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 534		},
 535		[C(OP_PREFETCH)] = {
 536			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 537			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 538		},
 539	},
 540	[C(LL)] = {
 541		[C(OP_READ)] = {
 542			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
 543			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 
 
 544		},
 545		[C(OP_WRITE)] = {
 546			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
 547			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 
 
 548		},
 549		[C(OP_PREFETCH)] = {
 550			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 551			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 552		},
 553	},
 554	[C(DTLB)] = {
 555		[C(OP_READ)] = {
 556			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 557			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 
 558		},
 559		[C(OP_WRITE)] = {
 560			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 561			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 
 562		},
 563		[C(OP_PREFETCH)] = {
 564			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 565			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 566		},
 567	},
 568	[C(ITLB)] = {
 569		[C(OP_READ)] = {
 570			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 571			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 572		},
 573		[C(OP_WRITE)] = {
 574			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 575			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 576		},
 577		[C(OP_PREFETCH)] = {
 578			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 579			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 580		},
 581	},
 582	[C(BPU)] = {
 583		[C(OP_READ)] = {
 584			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 585			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 586		},
 587		[C(OP_WRITE)] = {
 588			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 589			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 590		},
 591		[C(OP_PREFETCH)] = {
 592			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 593			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 594		},
 595	},
 596	[C(NODE)] = {
 597		[C(OP_READ)] = {
 598			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 599			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 600		},
 601		[C(OP_WRITE)] = {
 602			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 603			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 604		},
 605		[C(OP_PREFETCH)] = {
 606			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 607			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 608		},
 609	},
 610};
 611
 612/*
 613 * Cortex-A7 HW events mapping
 614 */
 615static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
 616	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 617	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 618	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 619	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 620	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 621	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 622	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 623	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 624	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 625};
 626
 627static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 628					[PERF_COUNT_HW_CACHE_OP_MAX]
 629					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 630	[C(L1D)] = {
 631		/*
 632		 * The performance counters don't differentiate between read
 633		 * and write accesses/misses so this isn't strictly correct,
 634		 * but it's the best we can do. Writes and reads get
 635		 * combined.
 636		 */
 637		[C(OP_READ)] = {
 638			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 639			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 640		},
 641		[C(OP_WRITE)] = {
 642			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 643			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 644		},
 645		[C(OP_PREFETCH)] = {
 646			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 647			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 648		},
 649	},
 650	[C(L1I)] = {
 651		[C(OP_READ)] = {
 652			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 653			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 654		},
 655		[C(OP_WRITE)] = {
 656			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 657			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 658		},
 659		[C(OP_PREFETCH)] = {
 660			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 661			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 662		},
 663	},
 664	[C(LL)] = {
 665		[C(OP_READ)] = {
 666			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 667			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 668		},
 669		[C(OP_WRITE)] = {
 670			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 671			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 672		},
 673		[C(OP_PREFETCH)] = {
 674			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 675			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 676		},
 677	},
 678	[C(DTLB)] = {
 679		[C(OP_READ)] = {
 680			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 681			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 682		},
 683		[C(OP_WRITE)] = {
 684			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 685			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 686		},
 687		[C(OP_PREFETCH)] = {
 688			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 689			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 690		},
 691	},
 692	[C(ITLB)] = {
 693		[C(OP_READ)] = {
 694			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 695			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 696		},
 697		[C(OP_WRITE)] = {
 698			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 699			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 700		},
 701		[C(OP_PREFETCH)] = {
 702			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 703			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 704		},
 705	},
 706	[C(BPU)] = {
 707		[C(OP_READ)] = {
 708			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 709			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 710		},
 711		[C(OP_WRITE)] = {
 712			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 713			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 714		},
 715		[C(OP_PREFETCH)] = {
 716			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 717			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 718		},
 719	},
 720	[C(NODE)] = {
 721		[C(OP_READ)] = {
 722			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 723			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 724		},
 725		[C(OP_WRITE)] = {
 726			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 727			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 728		},
 729		[C(OP_PREFETCH)] = {
 730			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 731			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 732		},
 733	},
 734};
 735
 736/*
 737 * Perf Events' indices
 
 
 738 */
 739#define	ARMV7_IDX_CYCLE_COUNTER	0
 740#define	ARMV7_IDX_COUNTER0	1
 741#define	ARMV7_IDX_COUNTER_LAST	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 742
 743#define	ARMV7_MAX_COUNTERS	32
 744#define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
 745
 746/*
 747 * ARMv7 low level PMNC access
 748 */
 749
 750/*
 751 * Perf Event to low level counters mapping
 752 */
 753#define	ARMV7_IDX_TO_COUNTER(x)	\
 754	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
 755
 756/*
 757 * Per-CPU PMNC: config reg
 758 */
 759#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
 760#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
 761#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
 762#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
 763#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
 764#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
 765#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
 766#define	ARMV7_PMNC_N_MASK	0x1f
 767#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
 768
 769/*
 770 * FLAG: counters overflow flag status reg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 771 */
 772#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
 773#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
 774
 775/*
 776 * PMXEVTYPER: Event selection reg
 777 */
 778#define	ARMV7_EVTYPE_MASK	0xc00000ff	/* Mask for writable bits */
 779#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
 780
 781/*
 782 * Event filters for PMUv2
 783 */
 784#define	ARMV7_EXCLUDE_PL1	(1 << 31)
 785#define	ARMV7_EXCLUDE_USER	(1 << 30)
 786#define	ARMV7_INCLUDE_HYP	(1 << 27)
 
 787
 788static inline u32 armv7_pmnc_read(void)
 789{
 790	u32 val;
 791	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
 792	return val;
 793}
 794
 795static inline void armv7_pmnc_write(u32 val)
 796{
 797	val &= ARMV7_PMNC_MASK;
 798	isb();
 799	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 800}
 801
 802static inline int armv7_pmnc_has_overflowed(u32 pmnc)
 803{
 804	return pmnc & ARMV7_OVERFLOWED_MASK;
 805}
 806
 807static inline int armv7_pmnc_counter_valid(int idx)
 808{
 809	return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
 810}
 811
 812static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
 813{
 814	int ret = 0;
 815	u32 counter;
 816
 817	if (!armv7_pmnc_counter_valid(idx)) {
 
 
 
 
 818		pr_err("CPU%u checking wrong counter %d overflow status\n",
 819			smp_processor_id(), idx);
 820	} else {
 821		counter = ARMV7_IDX_TO_COUNTER(idx);
 822		ret = pmnc & BIT(counter);
 823	}
 824
 825	return ret;
 826}
 827
 828static inline int armv7_pmnc_select_counter(int idx)
 829{
 830	u32 counter;
 831
 832	if (!armv7_pmnc_counter_valid(idx)) {
 833		pr_err("CPU%u selecting wrong PMNC counter %d\n",
 834			smp_processor_id(), idx);
 835		return -EINVAL;
 836	}
 837
 838	counter = ARMV7_IDX_TO_COUNTER(idx);
 839	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
 840	isb();
 841
 842	return idx;
 843}
 844
 845static inline u32 armv7pmu_read_counter(int idx)
 846{
 847	u32 value = 0;
 848
 849	if (!armv7_pmnc_counter_valid(idx))
 
 
 
 
 
 
 850		pr_err("CPU%u reading wrong counter %d\n",
 851			smp_processor_id(), idx);
 852	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
 853		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
 854	else if (armv7_pmnc_select_counter(idx) == idx)
 855		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
 856
 857	return value;
 858}
 859
 860static inline void armv7pmu_write_counter(int idx, u32 value)
 861{
 862	if (!armv7_pmnc_counter_valid(idx))
 
 
 
 
 
 
 863		pr_err("CPU%u writing wrong counter %d\n",
 864			smp_processor_id(), idx);
 865	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
 866		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
 867	else if (armv7_pmnc_select_counter(idx) == idx)
 868		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
 869}
 870
 871static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
 872{
 873	if (armv7_pmnc_select_counter(idx) == idx) {
 874		val &= ARMV7_EVTYPE_MASK;
 875		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 876	}
 877}
 878
 879static inline int armv7_pmnc_enable_counter(int idx)
 880{
 881	u32 counter;
 882
 883	if (!armv7_pmnc_counter_valid(idx)) {
 884		pr_err("CPU%u enabling wrong PMNC counter %d\n",
 885			smp_processor_id(), idx);
 886		return -EINVAL;
 
 887	}
 888
 889	counter = ARMV7_IDX_TO_COUNTER(idx);
 890	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
 
 
 
 
 
 891	return idx;
 892}
 893
 894static inline int armv7_pmnc_disable_counter(int idx)
 895{
 896	u32 counter;
 
 897
 898	if (!armv7_pmnc_counter_valid(idx)) {
 899		pr_err("CPU%u disabling wrong PMNC counter %d\n",
 900			smp_processor_id(), idx);
 901		return -EINVAL;
 
 902	}
 903
 904	counter = ARMV7_IDX_TO_COUNTER(idx);
 905	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
 
 
 
 
 
 906	return idx;
 907}
 908
 909static inline int armv7_pmnc_enable_intens(int idx)
 910{
 911	u32 counter;
 912
 913	if (!armv7_pmnc_counter_valid(idx)) {
 914		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
 915			smp_processor_id(), idx);
 916		return -EINVAL;
 
 917	}
 918
 919	counter = ARMV7_IDX_TO_COUNTER(idx);
 920	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
 
 
 
 
 
 921	return idx;
 922}
 923
 924static inline int armv7_pmnc_disable_intens(int idx)
 925{
 926	u32 counter;
 927
 928	if (!armv7_pmnc_counter_valid(idx)) {
 929		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
 930			smp_processor_id(), idx);
 931		return -EINVAL;
 
 932	}
 933
 934	counter = ARMV7_IDX_TO_COUNTER(idx);
 935	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
 936	isb();
 937	/* Clear the overflow flag in case an interrupt is pending. */
 938	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
 939	isb();
 940
 941	return idx;
 942}
 943
 944static inline u32 armv7_pmnc_getreset_flags(void)
 945{
 946	u32 val;
 947
 948	/* Read */
 949	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 950
 951	/* Write to clear flags */
 952	val &= ARMV7_FLAG_MASK;
 953	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
 954
 955	return val;
 956}
 957
 958#ifdef DEBUG
 959static void armv7_pmnc_dump_regs(void)
 960{
 961	u32 val;
 962	unsigned int cnt;
 963
 964	printk(KERN_INFO "PMNC registers dump:\n");
 965
 966	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
 967	printk(KERN_INFO "PMNC  =0x%08x\n", val);
 968
 969	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
 970	printk(KERN_INFO "CNTENS=0x%08x\n", val);
 971
 972	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
 973	printk(KERN_INFO "INTENS=0x%08x\n", val);
 974
 975	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 976	printk(KERN_INFO "FLAGS =0x%08x\n", val);
 977
 978	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
 979	printk(KERN_INFO "SELECT=0x%08x\n", val);
 980
 981	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
 982	printk(KERN_INFO "CCNT  =0x%08x\n", val);
 983
 984	for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
 985		armv7_pmnc_select_counter(cnt);
 986		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
 987		printk(KERN_INFO "CNT[%d] count =0x%08x\n",
 988			ARMV7_IDX_TO_COUNTER(cnt), val);
 989		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
 990		printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
 991			ARMV7_IDX_TO_COUNTER(cnt), val);
 992	}
 993}
 994#endif
 995
 996static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
 997{
 998	unsigned long flags;
 999	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1000
1001	/*
1002	 * Enable counter and interrupt, and set the counter to count
1003	 * the event that we're interested in.
1004	 */
1005	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1006
1007	/*
1008	 * Disable counter
1009	 */
1010	armv7_pmnc_disable_counter(idx);
1011
1012	/*
1013	 * Set event (if destined for PMNx counters)
1014	 * We only need to set the event for the cycle counter if we
1015	 * have the ability to perform event filtering.
1016	 */
1017	if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1018		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1019
1020	/*
1021	 * Enable interrupt for this counter
1022	 */
1023	armv7_pmnc_enable_intens(idx);
1024
1025	/*
1026	 * Enable counter
1027	 */
1028	armv7_pmnc_enable_counter(idx);
1029
1030	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1031}
1032
1033static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1034{
1035	unsigned long flags;
1036	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1037
1038	/*
1039	 * Disable counter and interrupt
1040	 */
1041	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1042
1043	/*
1044	 * Disable counter
1045	 */
1046	armv7_pmnc_disable_counter(idx);
1047
1048	/*
1049	 * Disable interrupt for this counter
1050	 */
1051	armv7_pmnc_disable_intens(idx);
1052
1053	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1054}
1055
1056static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1057{
1058	u32 pmnc;
1059	struct perf_sample_data data;
1060	struct pmu_hw_events *cpuc;
1061	struct pt_regs *regs;
1062	int idx;
1063
1064	/*
1065	 * Get and reset the IRQ flags
1066	 */
1067	pmnc = armv7_pmnc_getreset_flags();
1068
1069	/*
1070	 * Did an overflow occur?
1071	 */
1072	if (!armv7_pmnc_has_overflowed(pmnc))
1073		return IRQ_NONE;
1074
1075	/*
1076	 * Handle the counter(s) overflow(s)
1077	 */
1078	regs = get_irq_regs();
1079
 
 
1080	cpuc = &__get_cpu_var(cpu_hw_events);
1081	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1082		struct perf_event *event = cpuc->events[idx];
1083		struct hw_perf_event *hwc;
1084
1085		/* Ignore if we don't have an event. */
1086		if (!event)
1087			continue;
1088
1089		/*
1090		 * We have a single interrupt for all counters. Check that
1091		 * each counter has overflowed before we process it.
1092		 */
1093		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1094			continue;
1095
1096		hwc = &event->hw;
1097		armpmu_event_update(event, hwc, idx);
1098		perf_sample_data_init(&data, 0, hwc->last_period);
1099		if (!armpmu_event_set_period(event, hwc, idx))
1100			continue;
1101
1102		if (perf_event_overflow(event, &data, regs))
1103			cpu_pmu->disable(hwc, idx);
1104	}
1105
1106	/*
1107	 * Handle the pending perf events.
1108	 *
1109	 * Note: this call *must* be run with interrupts disabled. For
1110	 * platforms that can have the PMU interrupts raised as an NMI, this
1111	 * will not work.
1112	 */
1113	irq_work_run();
1114
1115	return IRQ_HANDLED;
1116}
1117
1118static void armv7pmu_start(void)
1119{
1120	unsigned long flags;
1121	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1122
1123	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1124	/* Enable all counters */
1125	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1126	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1127}
1128
1129static void armv7pmu_stop(void)
1130{
1131	unsigned long flags;
1132	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1133
1134	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1135	/* Disable all counters */
1136	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1137	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1138}
1139
1140static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1141				  struct hw_perf_event *event)
1142{
1143	int idx;
1144	unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
1145
1146	/* Always place a cycle counter into the cycle counter. */
1147	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1148		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1149			return -EAGAIN;
1150
1151		return ARMV7_IDX_CYCLE_COUNTER;
1152	}
 
 
 
 
 
 
 
 
1153
1154	/*
1155	 * For anything other than a cycle counter, try and use
1156	 * the events counters
1157	 */
1158	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1159		if (!test_and_set_bit(idx, cpuc->used_mask))
1160			return idx;
1161	}
1162
1163	/* The counters are all in use. */
1164	return -EAGAIN;
1165}
1166
1167/*
1168 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1169 */
1170static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1171				     struct perf_event_attr *attr)
1172{
1173	unsigned long config_base = 0;
1174
1175	if (attr->exclude_idle)
1176		return -EPERM;
1177	if (attr->exclude_user)
1178		config_base |= ARMV7_EXCLUDE_USER;
1179	if (attr->exclude_kernel)
1180		config_base |= ARMV7_EXCLUDE_PL1;
1181	if (!attr->exclude_hv)
1182		config_base |= ARMV7_INCLUDE_HYP;
1183
1184	/*
1185	 * Install the filter into config_base as this is used to
1186	 * construct the event type.
1187	 */
1188	event->config_base = config_base;
1189
1190	return 0;
1191}
1192
1193static void armv7pmu_reset(void *info)
1194{
1195	u32 idx, nb_cnt = cpu_pmu->num_events;
1196
1197	/* The counter and interrupt enable registers are unknown at reset. */
1198	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
1199		armv7pmu_disable_event(NULL, idx);
1200
1201	/* Initialize & Reset PMNC: C and P bits */
1202	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1203}
1204
1205static int armv7_a8_map_event(struct perf_event *event)
1206{
1207	return map_cpu_event(event, &armv7_a8_perf_map,
1208				&armv7_a8_perf_cache_map, 0xFF);
1209}
1210
1211static int armv7_a9_map_event(struct perf_event *event)
1212{
1213	return map_cpu_event(event, &armv7_a9_perf_map,
1214				&armv7_a9_perf_cache_map, 0xFF);
1215}
1216
1217static int armv7_a5_map_event(struct perf_event *event)
1218{
1219	return map_cpu_event(event, &armv7_a5_perf_map,
1220				&armv7_a5_perf_cache_map, 0xFF);
1221}
1222
1223static int armv7_a15_map_event(struct perf_event *event)
1224{
1225	return map_cpu_event(event, &armv7_a15_perf_map,
1226				&armv7_a15_perf_cache_map, 0xFF);
1227}
1228
1229static int armv7_a7_map_event(struct perf_event *event)
1230{
1231	return map_cpu_event(event, &armv7_a7_perf_map,
1232				&armv7_a7_perf_cache_map, 0xFF);
1233}
1234
1235static struct arm_pmu armv7pmu = {
1236	.handle_irq		= armv7pmu_handle_irq,
1237	.enable			= armv7pmu_enable_event,
1238	.disable		= armv7pmu_disable_event,
1239	.read_counter		= armv7pmu_read_counter,
1240	.write_counter		= armv7pmu_write_counter,
1241	.get_event_idx		= armv7pmu_get_event_idx,
1242	.start			= armv7pmu_start,
1243	.stop			= armv7pmu_stop,
1244	.reset			= armv7pmu_reset,
 
1245	.max_period		= (1LLU << 32) - 1,
1246};
1247
1248static u32 __init armv7_read_num_pmnc_events(void)
1249{
1250	u32 nb_cnt;
1251
1252	/* Read the nb of CNTx counters supported from PMNC */
1253	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1254
1255	/* Add the CPU cycles counter and return */
1256	return nb_cnt + 1;
1257}
1258
1259static struct arm_pmu *__init armv7_a8_pmu_init(void)
1260{
1261	armv7pmu.id		= ARM_PERF_PMU_ID_CA8;
1262	armv7pmu.name		= "ARMv7 Cortex-A8";
1263	armv7pmu.map_event	= armv7_a8_map_event;
 
1264	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1265	return &armv7pmu;
1266}
1267
1268static struct arm_pmu *__init armv7_a9_pmu_init(void)
1269{
1270	armv7pmu.id		= ARM_PERF_PMU_ID_CA9;
1271	armv7pmu.name		= "ARMv7 Cortex-A9";
1272	armv7pmu.map_event	= armv7_a9_map_event;
 
1273	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1274	return &armv7pmu;
1275}
1276
1277static struct arm_pmu *__init armv7_a5_pmu_init(void)
1278{
1279	armv7pmu.id		= ARM_PERF_PMU_ID_CA5;
1280	armv7pmu.name		= "ARMv7 Cortex-A5";
1281	armv7pmu.map_event	= armv7_a5_map_event;
 
1282	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1283	return &armv7pmu;
1284}
1285
1286static struct arm_pmu *__init armv7_a15_pmu_init(void)
1287{
1288	armv7pmu.id		= ARM_PERF_PMU_ID_CA15;
1289	armv7pmu.name		= "ARMv7 Cortex-A15";
1290	armv7pmu.map_event	= armv7_a15_map_event;
1291	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1292	armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1293	return &armv7pmu;
1294}
1295
1296static struct arm_pmu *__init armv7_a7_pmu_init(void)
1297{
1298	armv7pmu.id		= ARM_PERF_PMU_ID_CA7;
1299	armv7pmu.name		= "ARMv7 Cortex-A7";
1300	armv7pmu.map_event	= armv7_a7_map_event;
1301	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1302	armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1303	return &armv7pmu;
1304}
1305#else
1306static struct arm_pmu *__init armv7_a8_pmu_init(void)
1307{
1308	return NULL;
1309}
1310
1311static struct arm_pmu *__init armv7_a9_pmu_init(void)
1312{
1313	return NULL;
1314}
1315
1316static struct arm_pmu *__init armv7_a5_pmu_init(void)
1317{
1318	return NULL;
1319}
1320
1321static struct arm_pmu *__init armv7_a15_pmu_init(void)
1322{
1323	return NULL;
1324}
1325
1326static struct arm_pmu *__init armv7_a7_pmu_init(void)
1327{
1328	return NULL;
1329}
1330#endif	/* CONFIG_CPU_V7 */