Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   3 *
   4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   5 * 2010 (c) MontaVista Software, LLC.
   6 *
   7 * Copied from ARMv6 code, with the low level code inspired
   8 *  by the ARMv7 Oprofile code.
   9 *
  10 * Cortex-A8 has up to 4 configurable performance counters and
  11 *  a single cycle counter.
  12 * Cortex-A9 has up to 31 configurable performance counters and
  13 *  a single cycle counter.
  14 *
  15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16 *  counter and all 4 performance counters together can be reset separately.
  17 */
  18
  19#ifdef CONFIG_CPU_V7
  20
  21#include <asm/cp15.h>
  22#include <asm/vfp.h>
  23#include "../vfp/vfpinstr.h"
  24
  25/*
  26 * Common ARMv7 event types
  27 *
  28 * Note: An implementation may not be able to count all of these events
  29 * but the encodings are considered to be `reserved' in the case that
  30 * they are not available.
  31 */
  32enum armv7_perf_types {
  33	ARMV7_PERFCTR_PMNC_SW_INCR			= 0x00,
  34	ARMV7_PERFCTR_L1_ICACHE_REFILL			= 0x01,
  35	ARMV7_PERFCTR_ITLB_REFILL			= 0x02,
  36	ARMV7_PERFCTR_L1_DCACHE_REFILL			= 0x03,
  37	ARMV7_PERFCTR_L1_DCACHE_ACCESS			= 0x04,
  38	ARMV7_PERFCTR_DTLB_REFILL			= 0x05,
  39	ARMV7_PERFCTR_MEM_READ				= 0x06,
  40	ARMV7_PERFCTR_MEM_WRITE				= 0x07,
  41	ARMV7_PERFCTR_INSTR_EXECUTED			= 0x08,
  42	ARMV7_PERFCTR_EXC_TAKEN				= 0x09,
  43	ARMV7_PERFCTR_EXC_EXECUTED			= 0x0A,
  44	ARMV7_PERFCTR_CID_WRITE				= 0x0B,
  45
  46	/*
  47	 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  48	 * It counts:
  49	 *  - all (taken) branch instructions,
  50	 *  - instructions that explicitly write the PC,
  51	 *  - exception generating instructions.
  52	 */
  53	ARMV7_PERFCTR_PC_WRITE				= 0x0C,
  54	ARMV7_PERFCTR_PC_IMM_BRANCH			= 0x0D,
  55	ARMV7_PERFCTR_PC_PROC_RETURN			= 0x0E,
  56	ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		= 0x0F,
  57	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		= 0x10,
  58	ARMV7_PERFCTR_CLOCK_CYCLES			= 0x11,
  59	ARMV7_PERFCTR_PC_BRANCH_PRED			= 0x12,
  60
  61	/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  62	ARMV7_PERFCTR_MEM_ACCESS			= 0x13,
  63	ARMV7_PERFCTR_L1_ICACHE_ACCESS			= 0x14,
  64	ARMV7_PERFCTR_L1_DCACHE_WB			= 0x15,
  65	ARMV7_PERFCTR_L2_CACHE_ACCESS			= 0x16,
  66	ARMV7_PERFCTR_L2_CACHE_REFILL			= 0x17,
  67	ARMV7_PERFCTR_L2_CACHE_WB			= 0x18,
  68	ARMV7_PERFCTR_BUS_ACCESS			= 0x19,
  69	ARMV7_PERFCTR_MEM_ERROR				= 0x1A,
  70	ARMV7_PERFCTR_INSTR_SPEC			= 0x1B,
  71	ARMV7_PERFCTR_TTBR_WRITE			= 0x1C,
  72	ARMV7_PERFCTR_BUS_CYCLES			= 0x1D,
  73
  74	ARMV7_PERFCTR_CPU_CYCLES			= 0xFF
  75};
  76
  77/* ARMv7 Cortex-A8 specific event types */
  78enum armv7_a8_perf_types {
  79	ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		= 0x43,
  80	ARMV7_A8_PERFCTR_L2_CACHE_REFILL		= 0x44,
  81	ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		= 0x50,
  82	ARMV7_A8_PERFCTR_STALL_ISIDE			= 0x56,
  83};
  84
  85/* ARMv7 Cortex-A9 specific event types */
  86enum armv7_a9_perf_types {
  87	ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		= 0x68,
  88	ARMV7_A9_PERFCTR_STALL_ICACHE			= 0x60,
  89	ARMV7_A9_PERFCTR_STALL_DISPATCH			= 0x66,
  90};
  91
  92/* ARMv7 Cortex-A5 specific event types */
  93enum armv7_a5_perf_types {
  94	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
  95	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		= 0xc3,
  96};
  97
  98/* ARMv7 Cortex-A15 specific event types */
  99enum armv7_a15_perf_types {
 100	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
 101	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
 102	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		= 0x42,
 103	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	= 0x43,
 104
 105	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		= 0x4C,
 106	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		= 0x4D,
 107
 108	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
 109	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
 110	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		= 0x52,
 111	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		= 0x53,
 112
 113	ARMV7_A15_PERFCTR_PC_WRITE_SPEC			= 0x76,
 114};
 115
 116/* ARMv7 Cortex-A12 specific event types */
 117enum armv7_a12_perf_types {
 118	ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
 119	ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
 120
 121	ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
 122	ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
 123
 124	ARMV7_A12_PERFCTR_PC_WRITE_SPEC			= 0x76,
 125
 126	ARMV7_A12_PERFCTR_PF_TLB_REFILL			= 0xe7,
 127};
 128
 129/* ARMv7 Krait specific event types */
 130enum krait_perf_types {
 131	KRAIT_PMRESR0_GROUP0				= 0xcc,
 132	KRAIT_PMRESR1_GROUP0				= 0xd0,
 133	KRAIT_PMRESR2_GROUP0				= 0xd4,
 134	KRAIT_VPMRESR0_GROUP0				= 0xd8,
 135
 136	KRAIT_PERFCTR_L1_ICACHE_ACCESS			= 0x10011,
 137	KRAIT_PERFCTR_L1_ICACHE_MISS			= 0x10010,
 138
 139	KRAIT_PERFCTR_L1_ITLB_ACCESS			= 0x12222,
 140	KRAIT_PERFCTR_L1_DTLB_ACCESS			= 0x12210,
 141};
 142
 143/*
 144 * Cortex-A8 HW events mapping
 145 *
 146 * The hardware events that we support. We do support cache operations but
 147 * we have harvard caches and no way to combine instruction and data
 148 * accesses/misses in hardware.
 149 */
 150static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 151	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 152	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 153	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 154	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 155	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 156	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 157	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 158	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
 159	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 160};
 161
 162static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 163					  [PERF_COUNT_HW_CACHE_OP_MAX]
 164					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 165	[C(L1D)] = {
 166		/*
 167		 * The performance counters don't differentiate between read
 168		 * and write accesses/misses so this isn't strictly correct,
 169		 * but it's the best we can do. Writes and reads get
 170		 * combined.
 171		 */
 172		[C(OP_READ)] = {
 173			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 174			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 175		},
 176		[C(OP_WRITE)] = {
 177			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 178			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 179		},
 180		[C(OP_PREFETCH)] = {
 181			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 182			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 183		},
 184	},
 185	[C(L1I)] = {
 186		[C(OP_READ)] = {
 187			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 188			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 189		},
 190		[C(OP_WRITE)] = {
 191			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 192			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 193		},
 194		[C(OP_PREFETCH)] = {
 195			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 196			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 197		},
 198	},
 199	[C(LL)] = {
 200		[C(OP_READ)] = {
 201			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 202			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 203		},
 204		[C(OP_WRITE)] = {
 205			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 206			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 207		},
 208		[C(OP_PREFETCH)] = {
 209			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 210			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 211		},
 212	},
 213	[C(DTLB)] = {
 214		[C(OP_READ)] = {
 215			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 216			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 217		},
 218		[C(OP_WRITE)] = {
 219			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 220			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 221		},
 222		[C(OP_PREFETCH)] = {
 223			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 224			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 225		},
 226	},
 227	[C(ITLB)] = {
 228		[C(OP_READ)] = {
 229			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 230			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 231		},
 232		[C(OP_WRITE)] = {
 233			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 234			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 235		},
 236		[C(OP_PREFETCH)] = {
 237			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 238			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 239		},
 240	},
 241	[C(BPU)] = {
 242		[C(OP_READ)] = {
 243			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 244			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 245		},
 246		[C(OP_WRITE)] = {
 247			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 248			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 249		},
 250		[C(OP_PREFETCH)] = {
 251			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 252			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 253		},
 254	},
 255	[C(NODE)] = {
 256		[C(OP_READ)] = {
 257			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 258			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 259		},
 260		[C(OP_WRITE)] = {
 261			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 262			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 263		},
 264		[C(OP_PREFETCH)] = {
 265			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 266			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 267		},
 268	},
 269};
 270
 271/*
 272 * Cortex-A9 HW events mapping
 273 */
 274static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 275	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 276	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
 277	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 278	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 279	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 280	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 281	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 282	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
 283	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
 284};
 285
 286static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 287					  [PERF_COUNT_HW_CACHE_OP_MAX]
 288					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 289	[C(L1D)] = {
 290		/*
 291		 * The performance counters don't differentiate between read
 292		 * and write accesses/misses so this isn't strictly correct,
 293		 * but it's the best we can do. Writes and reads get
 294		 * combined.
 295		 */
 296		[C(OP_READ)] = {
 297			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 298			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 299		},
 300		[C(OP_WRITE)] = {
 301			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 302			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 303		},
 304		[C(OP_PREFETCH)] = {
 305			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 306			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 307		},
 308	},
 309	[C(L1I)] = {
 310		[C(OP_READ)] = {
 311			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 312			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 313		},
 314		[C(OP_WRITE)] = {
 315			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 316			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 317		},
 318		[C(OP_PREFETCH)] = {
 319			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 320			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 321		},
 322	},
 323	[C(LL)] = {
 324		[C(OP_READ)] = {
 325			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 326			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 327		},
 328		[C(OP_WRITE)] = {
 329			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 330			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 331		},
 332		[C(OP_PREFETCH)] = {
 333			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 334			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 335		},
 336	},
 337	[C(DTLB)] = {
 338		[C(OP_READ)] = {
 339			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 340			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 341		},
 342		[C(OP_WRITE)] = {
 343			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 344			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 345		},
 346		[C(OP_PREFETCH)] = {
 347			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 348			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 349		},
 350	},
 351	[C(ITLB)] = {
 352		[C(OP_READ)] = {
 353			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 354			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 355		},
 356		[C(OP_WRITE)] = {
 357			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 358			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 359		},
 360		[C(OP_PREFETCH)] = {
 361			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 362			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 363		},
 364	},
 365	[C(BPU)] = {
 366		[C(OP_READ)] = {
 367			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 368			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 369		},
 370		[C(OP_WRITE)] = {
 371			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 372			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 373		},
 374		[C(OP_PREFETCH)] = {
 375			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 376			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 377		},
 378	},
 379	[C(NODE)] = {
 380		[C(OP_READ)] = {
 381			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 382			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 383		},
 384		[C(OP_WRITE)] = {
 385			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 386			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 387		},
 388		[C(OP_PREFETCH)] = {
 389			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 390			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 391		},
 392	},
 393};
 394
 395/*
 396 * Cortex-A5 HW events mapping
 397 */
 398static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 399	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 400	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 401	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 402	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 403	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 404	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 405	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 406	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 407	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 408};
 409
 410static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 411					[PERF_COUNT_HW_CACHE_OP_MAX]
 412					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 413	[C(L1D)] = {
 414		[C(OP_READ)] = {
 415			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 416			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 417		},
 418		[C(OP_WRITE)] = {
 419			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 420			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 421		},
 422		[C(OP_PREFETCH)] = {
 423			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 424			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 425		},
 426	},
 427	[C(L1I)] = {
 428		[C(OP_READ)] = {
 429			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 430			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 431		},
 432		[C(OP_WRITE)] = {
 433			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 434			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 435		},
 436		/*
 437		 * The prefetch counters don't differentiate between the I
 438		 * side and the D side.
 439		 */
 440		[C(OP_PREFETCH)] = {
 441			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 442			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 443		},
 444	},
 445	[C(LL)] = {
 446		[C(OP_READ)] = {
 447			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 448			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 449		},
 450		[C(OP_WRITE)] = {
 451			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 452			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 453		},
 454		[C(OP_PREFETCH)] = {
 455			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 456			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 457		},
 458	},
 459	[C(DTLB)] = {
 460		[C(OP_READ)] = {
 461			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 462			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 463		},
 464		[C(OP_WRITE)] = {
 465			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 466			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 467		},
 468		[C(OP_PREFETCH)] = {
 469			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 470			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 471		},
 472	},
 473	[C(ITLB)] = {
 474		[C(OP_READ)] = {
 475			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 476			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 477		},
 478		[C(OP_WRITE)] = {
 479			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 480			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 481		},
 482		[C(OP_PREFETCH)] = {
 483			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 484			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 485		},
 486	},
 487	[C(BPU)] = {
 488		[C(OP_READ)] = {
 489			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 490			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 491		},
 492		[C(OP_WRITE)] = {
 493			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 494			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 495		},
 496		[C(OP_PREFETCH)] = {
 497			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 498			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 499		},
 500	},
 501	[C(NODE)] = {
 502		[C(OP_READ)] = {
 503			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 504			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 505		},
 506		[C(OP_WRITE)] = {
 507			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 508			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 509		},
 510		[C(OP_PREFETCH)] = {
 511			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 512			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 513		},
 514	},
 515};
 516
 517/*
 518 * Cortex-A15 HW events mapping
 519 */
 520static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 521	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 522	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 523	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 524	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 525	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
 526	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 527	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 528	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 529	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 530};
 531
 532static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 533					[PERF_COUNT_HW_CACHE_OP_MAX]
 534					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 535	[C(L1D)] = {
 536		[C(OP_READ)] = {
 537			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
 538			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 539		},
 540		[C(OP_WRITE)] = {
 541			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 542			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 543		},
 544		[C(OP_PREFETCH)] = {
 545			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 546			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 547		},
 548	},
 549	[C(L1I)] = {
 550		/*
 551		 * Not all performance counters differentiate between read
 552		 * and write accesses/misses so we're not always strictly
 553		 * correct, but it's the best we can do. Writes and reads get
 554		 * combined in these cases.
 555		 */
 556		[C(OP_READ)] = {
 557			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 558			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 559		},
 560		[C(OP_WRITE)] = {
 561			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 562			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 563		},
 564		[C(OP_PREFETCH)] = {
 565			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 566			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 567		},
 568	},
 569	[C(LL)] = {
 570		[C(OP_READ)] = {
 571			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
 572			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 573		},
 574		[C(OP_WRITE)] = {
 575			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
 576			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 577		},
 578		[C(OP_PREFETCH)] = {
 579			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 580			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 581		},
 582	},
 583	[C(DTLB)] = {
 584		[C(OP_READ)] = {
 585			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 586			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 587		},
 588		[C(OP_WRITE)] = {
 589			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 590			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 591		},
 592		[C(OP_PREFETCH)] = {
 593			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 594			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 595		},
 596	},
 597	[C(ITLB)] = {
 598		[C(OP_READ)] = {
 599			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 600			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 601		},
 602		[C(OP_WRITE)] = {
 603			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 604			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 605		},
 606		[C(OP_PREFETCH)] = {
 607			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 608			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 609		},
 610	},
 611	[C(BPU)] = {
 612		[C(OP_READ)] = {
 613			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 614			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 615		},
 616		[C(OP_WRITE)] = {
 617			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 618			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 619		},
 620		[C(OP_PREFETCH)] = {
 621			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 622			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 623		},
 624	},
 625	[C(NODE)] = {
 626		[C(OP_READ)] = {
 627			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 628			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 629		},
 630		[C(OP_WRITE)] = {
 631			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 632			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 633		},
 634		[C(OP_PREFETCH)] = {
 635			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 636			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 637		},
 638	},
 639};
 640
 641/*
 642 * Cortex-A7 HW events mapping
 643 */
 644static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
 645	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 646	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 647	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 648	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 649	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 650	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 651	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 652	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 653	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 654};
 655
 656static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 657					[PERF_COUNT_HW_CACHE_OP_MAX]
 658					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 659	[C(L1D)] = {
 660		/*
 661		 * The performance counters don't differentiate between read
 662		 * and write accesses/misses so this isn't strictly correct,
 663		 * but it's the best we can do. Writes and reads get
 664		 * combined.
 665		 */
 666		[C(OP_READ)] = {
 667			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 668			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 669		},
 670		[C(OP_WRITE)] = {
 671			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 672			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 673		},
 674		[C(OP_PREFETCH)] = {
 675			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 676			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 677		},
 678	},
 679	[C(L1I)] = {
 680		[C(OP_READ)] = {
 681			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 682			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 683		},
 684		[C(OP_WRITE)] = {
 685			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 686			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 687		},
 688		[C(OP_PREFETCH)] = {
 689			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 690			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 691		},
 692	},
 693	[C(LL)] = {
 694		[C(OP_READ)] = {
 695			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 696			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 697		},
 698		[C(OP_WRITE)] = {
 699			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 700			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 701		},
 702		[C(OP_PREFETCH)] = {
 703			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 704			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 705		},
 706	},
 707	[C(DTLB)] = {
 708		[C(OP_READ)] = {
 709			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 710			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 711		},
 712		[C(OP_WRITE)] = {
 713			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 714			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 715		},
 716		[C(OP_PREFETCH)] = {
 717			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 718			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 719		},
 720	},
 721	[C(ITLB)] = {
 722		[C(OP_READ)] = {
 723			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 724			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 725		},
 726		[C(OP_WRITE)] = {
 727			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 728			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 729		},
 730		[C(OP_PREFETCH)] = {
 731			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 732			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 733		},
 734	},
 735	[C(BPU)] = {
 736		[C(OP_READ)] = {
 737			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 738			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 739		},
 740		[C(OP_WRITE)] = {
 741			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 742			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 743		},
 744		[C(OP_PREFETCH)] = {
 745			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 746			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 747		},
 748	},
 749	[C(NODE)] = {
 750		[C(OP_READ)] = {
 751			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 752			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 753		},
 754		[C(OP_WRITE)] = {
 755			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 756			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 757		},
 758		[C(OP_PREFETCH)] = {
 759			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 760			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 761		},
 762	},
 763};
 764
 765/*
 766 * Cortex-A12 HW events mapping
 767 */
 768static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
 769	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 770	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 771	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 772	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 773	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
 774	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 775	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 776	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 777	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 778};
 779
 780static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 781					[PERF_COUNT_HW_CACHE_OP_MAX]
 782					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 783	[C(L1D)] = {
 784		[C(OP_READ)] = {
 785			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
 786			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 787		},
 788		[C(OP_WRITE)] = {
 789			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 790			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 791		},
 792		[C(OP_PREFETCH)] = {
 793			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 794			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 795		},
 796	},
 797	[C(L1I)] = {
 798		/*
 799		 * Not all performance counters differentiate between read
 800		 * and write accesses/misses so we're not always strictly
 801		 * correct, but it's the best we can do. Writes and reads get
 802		 * combined in these cases.
 803		 */
 804		[C(OP_READ)] = {
 805			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 806			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 807		},
 808		[C(OP_WRITE)] = {
 809			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 810			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 811		},
 812		[C(OP_PREFETCH)] = {
 813			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 814			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 815		},
 816	},
 817	[C(LL)] = {
 818		[C(OP_READ)] = {
 819			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
 820			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 821		},
 822		[C(OP_WRITE)] = {
 823			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
 824			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 825		},
 826		[C(OP_PREFETCH)] = {
 827			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 828			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 829		},
 830	},
 831	[C(DTLB)] = {
 832		[C(OP_READ)] = {
 833			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 834			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 835		},
 836		[C(OP_WRITE)] = {
 837			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 838			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 839		},
 840		[C(OP_PREFETCH)] = {
 841			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 842			[C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
 843		},
 844	},
 845	[C(ITLB)] = {
 846		[C(OP_READ)] = {
 847			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 848			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 849		},
 850		[C(OP_WRITE)] = {
 851			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 852			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 853		},
 854		[C(OP_PREFETCH)] = {
 855			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 856			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 857		},
 858	},
 859	[C(BPU)] = {
 860		[C(OP_READ)] = {
 861			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 862			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 863		},
 864		[C(OP_WRITE)] = {
 865			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 866			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 867		},
 868		[C(OP_PREFETCH)] = {
 869			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 870			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 871		},
 872	},
 873	[C(NODE)] = {
 874		[C(OP_READ)] = {
 875			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 876			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 877		},
 878		[C(OP_WRITE)] = {
 879			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 880			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 881		},
 882		[C(OP_PREFETCH)] = {
 883			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 884			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 885		},
 886	},
 887};
 888
 889/*
 890 * Krait HW events mapping
 891 */
 892static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
 893	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 894	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 895	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 896	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 897	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 898	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 899	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 900};
 901
 902static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
 903	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 904	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 905	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 906	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 907	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
 908	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 909	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 910};
 911
 912static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 913					  [PERF_COUNT_HW_CACHE_OP_MAX]
 914					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 915	[C(L1D)] = {
 916		/*
 917		 * The performance counters don't differentiate between read
 918		 * and write accesses/misses so this isn't strictly correct,
 919		 * but it's the best we can do. Writes and reads get
 920		 * combined.
 921		 */
 922		[C(OP_READ)] = {
 923			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 924			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 925		},
 926		[C(OP_WRITE)] = {
 927			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 928			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 929		},
 930		[C(OP_PREFETCH)] = {
 931			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 932			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 933		},
 934	},
 935	[C(L1I)] = {
 936		[C(OP_READ)] = {
 937			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
 938			[C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
 939		},
 940		[C(OP_WRITE)] = {
 941			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 942			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 943		},
 944		[C(OP_PREFETCH)] = {
 945			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 946			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 947		},
 948	},
 949	[C(LL)] = {
 950		[C(OP_READ)] = {
 951			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 952			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 953		},
 954		[C(OP_WRITE)] = {
 955			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 956			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 957		},
 958		[C(OP_PREFETCH)] = {
 959			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 960			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 961		},
 962	},
 963	[C(DTLB)] = {
 964		[C(OP_READ)] = {
 965			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 966			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 967		},
 968		[C(OP_WRITE)] = {
 969			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 970			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 971		},
 972		[C(OP_PREFETCH)] = {
 973			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 974			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 975		},
 976	},
 977	[C(ITLB)] = {
 978		[C(OP_READ)] = {
 979			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 980			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 981		},
 982		[C(OP_WRITE)] = {
 983			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 984			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 985		},
 986		[C(OP_PREFETCH)] = {
 987			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 988			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 989		},
 990	},
 991	[C(BPU)] = {
 992		[C(OP_READ)] = {
 993			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 994			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 995		},
 996		[C(OP_WRITE)] = {
 997			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 998			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 999		},
1000		[C(OP_PREFETCH)] = {
1001			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1002			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1003		},
1004	},
1005	[C(NODE)] = {
1006		[C(OP_READ)] = {
1007			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1008			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1009		},
1010		[C(OP_WRITE)] = {
1011			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1012			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1013		},
1014		[C(OP_PREFETCH)] = {
1015			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1016			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1017		},
1018	},
1019};
1020
1021/*
1022 * Perf Events' indices
1023 */
1024#define	ARMV7_IDX_CYCLE_COUNTER	0
1025#define	ARMV7_IDX_COUNTER0	1
1026#define	ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
1027	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
1028
1029#define	ARMV7_MAX_COUNTERS	32
1030#define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
1031
1032/*
1033 * ARMv7 low level PMNC access
1034 */
1035
1036/*
1037 * Perf Event to low level counters mapping
1038 */
1039#define	ARMV7_IDX_TO_COUNTER(x)	\
1040	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
1041
1042/*
1043 * Per-CPU PMNC: config reg
1044 */
1045#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
1046#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
1047#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
1048#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
1049#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
1050#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
1051#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
1052#define	ARMV7_PMNC_N_MASK	0x1f
1053#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
1054
1055/*
1056 * FLAG: counters overflow flag status reg
1057 */
1058#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
1059#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
1060
1061/*
1062 * PMXEVTYPER: Event selection reg
1063 */
1064#define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
1065#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
1066
1067/*
1068 * Event filters for PMUv2
1069 */
1070#define	ARMV7_EXCLUDE_PL1	(1 << 31)
1071#define	ARMV7_EXCLUDE_USER	(1 << 30)
1072#define	ARMV7_INCLUDE_HYP	(1 << 27)
1073
1074static inline u32 armv7_pmnc_read(void)
1075{
1076	u32 val;
1077	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
1078	return val;
1079}
1080
1081static inline void armv7_pmnc_write(u32 val)
1082{
1083	val &= ARMV7_PMNC_MASK;
1084	isb();
1085	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
1086}
1087
1088static inline int armv7_pmnc_has_overflowed(u32 pmnc)
1089{
1090	return pmnc & ARMV7_OVERFLOWED_MASK;
1091}
1092
1093static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
1094{
1095	return idx >= ARMV7_IDX_CYCLE_COUNTER &&
1096		idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
1097}
1098
1099static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
1100{
1101	return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
 
 
 
 
 
 
 
 
 
 
 
1102}
1103
1104static inline int armv7_pmnc_select_counter(int idx)
1105{
1106	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 
 
 
 
 
 
 
 
1107	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
1108	isb();
1109
1110	return idx;
1111}
1112
1113static inline u32 armv7pmu_read_counter(struct perf_event *event)
1114{
1115	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1116	struct hw_perf_event *hwc = &event->hw;
1117	int idx = hwc->idx;
1118	u32 value = 0;
1119
1120	if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
1121		pr_err("CPU%u reading wrong counter %d\n",
1122			smp_processor_id(), idx);
1123	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
1124		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
1125	else if (armv7_pmnc_select_counter(idx) == idx)
1126		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
1127
1128	return value;
1129}
1130
1131static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
1132{
1133	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1134	struct hw_perf_event *hwc = &event->hw;
1135	int idx = hwc->idx;
1136
1137	if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
1138		pr_err("CPU%u writing wrong counter %d\n",
1139			smp_processor_id(), idx);
1140	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
1141		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
1142	else if (armv7_pmnc_select_counter(idx) == idx)
1143		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
1144}
1145
1146static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
1147{
1148	if (armv7_pmnc_select_counter(idx) == idx) {
1149		val &= ARMV7_EVTYPE_MASK;
1150		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
1151	}
1152}
1153
1154static inline int armv7_pmnc_enable_counter(int idx)
1155{
1156	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 
 
 
 
 
 
 
 
1157	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
1158	return idx;
1159}
1160
1161static inline int armv7_pmnc_disable_counter(int idx)
1162{
1163	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 
 
 
 
 
 
 
 
1164	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
1165	return idx;
1166}
1167
1168static inline int armv7_pmnc_enable_intens(int idx)
1169{
1170	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 
 
 
 
 
 
 
 
1171	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
1172	return idx;
1173}
1174
1175static inline int armv7_pmnc_disable_intens(int idx)
1176{
1177	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 
 
 
 
 
 
 
 
1178	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
1179	isb();
1180	/* Clear the overflow flag in case an interrupt is pending. */
1181	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
1182	isb();
1183
1184	return idx;
1185}
1186
1187static inline u32 armv7_pmnc_getreset_flags(void)
1188{
1189	u32 val;
1190
1191	/* Read */
1192	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1193
1194	/* Write to clear flags */
1195	val &= ARMV7_FLAG_MASK;
1196	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
1197
1198	return val;
1199}
1200
1201#ifdef DEBUG
1202static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
1203{
1204	u32 val;
1205	unsigned int cnt;
1206
1207	printk(KERN_INFO "PMNC registers dump:\n");
1208
1209	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
1210	printk(KERN_INFO "PMNC  =0x%08x\n", val);
1211
1212	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
1213	printk(KERN_INFO "CNTENS=0x%08x\n", val);
1214
1215	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
1216	printk(KERN_INFO "INTENS=0x%08x\n", val);
1217
1218	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1219	printk(KERN_INFO "FLAGS =0x%08x\n", val);
1220
1221	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
1222	printk(KERN_INFO "SELECT=0x%08x\n", val);
1223
1224	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
1225	printk(KERN_INFO "CCNT  =0x%08x\n", val);
1226
1227	for (cnt = ARMV7_IDX_COUNTER0;
1228			cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
1229		armv7_pmnc_select_counter(cnt);
1230		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
1231		printk(KERN_INFO "CNT[%d] count =0x%08x\n",
1232			ARMV7_IDX_TO_COUNTER(cnt), val);
1233		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
1234		printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
1235			ARMV7_IDX_TO_COUNTER(cnt), val);
1236	}
1237}
1238#endif
1239
1240static void armv7pmu_enable_event(struct perf_event *event)
1241{
1242	unsigned long flags;
1243	struct hw_perf_event *hwc = &event->hw;
1244	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1245	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1246	int idx = hwc->idx;
1247
1248	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1249		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
1250			smp_processor_id(), idx);
1251		return;
1252	}
1253
1254	/*
1255	 * Enable counter and interrupt, and set the counter to count
1256	 * the event that we're interested in.
1257	 */
1258	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1259
1260	/*
1261	 * Disable counter
1262	 */
1263	armv7_pmnc_disable_counter(idx);
1264
1265	/*
1266	 * Set event (if destined for PMNx counters)
1267	 * We only need to set the event for the cycle counter if we
1268	 * have the ability to perform event filtering.
1269	 */
1270	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1271		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1272
1273	/*
1274	 * Enable interrupt for this counter
1275	 */
1276	armv7_pmnc_enable_intens(idx);
1277
1278	/*
1279	 * Enable counter
1280	 */
1281	armv7_pmnc_enable_counter(idx);
1282
1283	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1284}
1285
1286static void armv7pmu_disable_event(struct perf_event *event)
1287{
1288	unsigned long flags;
1289	struct hw_perf_event *hwc = &event->hw;
1290	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1291	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1292	int idx = hwc->idx;
1293
1294	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1295		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
1296			smp_processor_id(), idx);
1297		return;
1298	}
1299
1300	/*
1301	 * Disable counter and interrupt
1302	 */
1303	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1304
1305	/*
1306	 * Disable counter
1307	 */
1308	armv7_pmnc_disable_counter(idx);
1309
1310	/*
1311	 * Disable interrupt for this counter
1312	 */
1313	armv7_pmnc_disable_intens(idx);
1314
1315	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1316}
1317
1318static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1319{
1320	u32 pmnc;
1321	struct perf_sample_data data;
1322	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
1323	struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
1324	struct pt_regs *regs;
1325	int idx;
1326
1327	/*
1328	 * Get and reset the IRQ flags
1329	 */
1330	pmnc = armv7_pmnc_getreset_flags();
1331
1332	/*
1333	 * Did an overflow occur?
1334	 */
1335	if (!armv7_pmnc_has_overflowed(pmnc))
1336		return IRQ_NONE;
1337
1338	/*
1339	 * Handle the counter(s) overflow(s)
1340	 */
1341	regs = get_irq_regs();
1342
 
1343	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1344		struct perf_event *event = cpuc->events[idx];
1345		struct hw_perf_event *hwc;
1346
1347		/* Ignore if we don't have an event. */
1348		if (!event)
1349			continue;
1350
1351		/*
1352		 * We have a single interrupt for all counters. Check that
1353		 * each counter has overflowed before we process it.
1354		 */
1355		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1356			continue;
1357
1358		hwc = &event->hw;
1359		armpmu_event_update(event);
1360		perf_sample_data_init(&data, 0, hwc->last_period);
1361		if (!armpmu_event_set_period(event))
1362			continue;
1363
1364		if (perf_event_overflow(event, &data, regs))
1365			cpu_pmu->disable(event);
1366	}
1367
1368	/*
1369	 * Handle the pending perf events.
1370	 *
1371	 * Note: this call *must* be run with interrupts disabled. For
1372	 * platforms that can have the PMU interrupts raised as an NMI, this
1373	 * will not work.
1374	 */
1375	irq_work_run();
1376
1377	return IRQ_HANDLED;
1378}
1379
1380static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1381{
1382	unsigned long flags;
1383	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1384
1385	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1386	/* Enable all counters */
1387	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1388	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1389}
1390
1391static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1392{
1393	unsigned long flags;
1394	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1395
1396	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1397	/* Disable all counters */
1398	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1399	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1400}
1401
1402static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1403				  struct perf_event *event)
1404{
1405	int idx;
1406	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1407	struct hw_perf_event *hwc = &event->hw;
1408	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1409
1410	/* Always place a cycle counter into the cycle counter. */
1411	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1412		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1413			return -EAGAIN;
1414
1415		return ARMV7_IDX_CYCLE_COUNTER;
1416	}
1417
1418	/*
1419	 * For anything other than a cycle counter, try and use
1420	 * the events counters
1421	 */
1422	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1423		if (!test_and_set_bit(idx, cpuc->used_mask))
1424			return idx;
1425	}
1426
1427	/* The counters are all in use. */
1428	return -EAGAIN;
1429}
1430
1431/*
1432 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1433 */
1434static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1435				     struct perf_event_attr *attr)
1436{
1437	unsigned long config_base = 0;
1438
1439	if (attr->exclude_idle)
1440		return -EPERM;
1441	if (attr->exclude_user)
1442		config_base |= ARMV7_EXCLUDE_USER;
1443	if (attr->exclude_kernel)
1444		config_base |= ARMV7_EXCLUDE_PL1;
1445	if (!attr->exclude_hv)
1446		config_base |= ARMV7_INCLUDE_HYP;
1447
1448	/*
1449	 * Install the filter into config_base as this is used to
1450	 * construct the event type.
1451	 */
1452	event->config_base = config_base;
1453
1454	return 0;
1455}
1456
1457static void armv7pmu_reset(void *info)
1458{
1459	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1460	u32 idx, nb_cnt = cpu_pmu->num_events;
1461
1462	/* The counter and interrupt enable registers are unknown at reset. */
1463	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1464		armv7_pmnc_disable_counter(idx);
1465		armv7_pmnc_disable_intens(idx);
1466	}
1467
1468	/* Initialize & Reset PMNC: C and P bits */
1469	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1470}
1471
1472static int armv7_a8_map_event(struct perf_event *event)
1473{
1474	return armpmu_map_event(event, &armv7_a8_perf_map,
1475				&armv7_a8_perf_cache_map, 0xFF);
1476}
1477
1478static int armv7_a9_map_event(struct perf_event *event)
1479{
1480	return armpmu_map_event(event, &armv7_a9_perf_map,
1481				&armv7_a9_perf_cache_map, 0xFF);
1482}
1483
1484static int armv7_a5_map_event(struct perf_event *event)
1485{
1486	return armpmu_map_event(event, &armv7_a5_perf_map,
1487				&armv7_a5_perf_cache_map, 0xFF);
1488}
1489
1490static int armv7_a15_map_event(struct perf_event *event)
1491{
1492	return armpmu_map_event(event, &armv7_a15_perf_map,
1493				&armv7_a15_perf_cache_map, 0xFF);
1494}
1495
1496static int armv7_a7_map_event(struct perf_event *event)
1497{
1498	return armpmu_map_event(event, &armv7_a7_perf_map,
1499				&armv7_a7_perf_cache_map, 0xFF);
1500}
1501
1502static int armv7_a12_map_event(struct perf_event *event)
1503{
1504	return armpmu_map_event(event, &armv7_a12_perf_map,
1505				&armv7_a12_perf_cache_map, 0xFF);
1506}
1507
1508static int krait_map_event(struct perf_event *event)
1509{
1510	return armpmu_map_event(event, &krait_perf_map,
1511				&krait_perf_cache_map, 0xFFFFF);
1512}
1513
1514static int krait_map_event_no_branch(struct perf_event *event)
1515{
1516	return armpmu_map_event(event, &krait_perf_map_no_branch,
1517				&krait_perf_cache_map, 0xFFFFF);
1518}
1519
1520static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1521{
1522	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1523	cpu_pmu->enable		= armv7pmu_enable_event;
1524	cpu_pmu->disable	= armv7pmu_disable_event;
1525	cpu_pmu->read_counter	= armv7pmu_read_counter;
1526	cpu_pmu->write_counter	= armv7pmu_write_counter;
1527	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
1528	cpu_pmu->start		= armv7pmu_start;
1529	cpu_pmu->stop		= armv7pmu_stop;
1530	cpu_pmu->reset		= armv7pmu_reset;
1531	cpu_pmu->max_period	= (1LLU << 32) - 1;
1532};
1533
1534static u32 armv7_read_num_pmnc_events(void)
1535{
1536	u32 nb_cnt;
1537
1538	/* Read the nb of CNTx counters supported from PMNC */
1539	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1540
1541	/* Add the CPU cycles counter and return */
1542	return nb_cnt + 1;
1543}
1544
1545static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1546{
1547	armv7pmu_init(cpu_pmu);
1548	cpu_pmu->name		= "ARMv7 Cortex-A8";
1549	cpu_pmu->map_event	= armv7_a8_map_event;
1550	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1551	return 0;
1552}
1553
1554static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1555{
1556	armv7pmu_init(cpu_pmu);
1557	cpu_pmu->name		= "ARMv7 Cortex-A9";
1558	cpu_pmu->map_event	= armv7_a9_map_event;
1559	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1560	return 0;
1561}
1562
1563static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1564{
1565	armv7pmu_init(cpu_pmu);
1566	cpu_pmu->name		= "ARMv7 Cortex-A5";
1567	cpu_pmu->map_event	= armv7_a5_map_event;
1568	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1569	return 0;
1570}
1571
1572static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1573{
1574	armv7pmu_init(cpu_pmu);
1575	cpu_pmu->name		= "ARMv7 Cortex-A15";
1576	cpu_pmu->map_event	= armv7_a15_map_event;
1577	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1578	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1579	return 0;
1580}
1581
1582static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1583{
1584	armv7pmu_init(cpu_pmu);
1585	cpu_pmu->name		= "ARMv7 Cortex-A7";
1586	cpu_pmu->map_event	= armv7_a7_map_event;
1587	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1588	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1589	return 0;
1590}
1591
1592static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1593{
1594	armv7pmu_init(cpu_pmu);
1595	cpu_pmu->name		= "ARMv7 Cortex-A12";
1596	cpu_pmu->map_event	= armv7_a12_map_event;
1597	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1598	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1599	return 0;
1600}
1601
1602/*
1603 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1604 *
1605 *            31   30     24     16     8      0
1606 *            +--------------------------------+
1607 *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1608 *            +--------------------------------+
1609 *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1610 *            +--------------------------------+
1611 *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1612 *            +--------------------------------+
1613 *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1614 *            +--------------------------------+
1615 *              EN | G=3  | G=2  | G=1  | G=0
1616 *
1617 *  Event Encoding:
1618 *
1619 *      hwc->config_base = 0xNRCCG
1620 *
1621 *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1622 *      R  = region register
1623 *      CC = class of events the group G is choosing from
1624 *      G  = group or particular event
1625 *
1626 *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1627 *
1628 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1629 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1630 *  events (interrupts for example). An event code is broken down into
1631 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1632 *  example).
1633 */
1634
1635#define KRAIT_EVENT		(1 << 16)
1636#define VENUM_EVENT		(2 << 16)
1637#define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1638#define PMRESRn_EN		BIT(31)
1639
1640static u32 krait_read_pmresrn(int n)
1641{
1642	u32 val;
1643
1644	switch (n) {
1645	case 0:
1646		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1647		break;
1648	case 1:
1649		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1650		break;
1651	case 2:
1652		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1653		break;
1654	default:
1655		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1656	}
1657
1658	return val;
1659}
1660
1661static void krait_write_pmresrn(int n, u32 val)
1662{
1663	switch (n) {
1664	case 0:
1665		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1666		break;
1667	case 1:
1668		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1669		break;
1670	case 2:
1671		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1672		break;
1673	default:
1674		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1675	}
1676}
1677
1678static u32 krait_read_vpmresr0(void)
1679{
1680	u32 val;
1681	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1682	return val;
1683}
1684
1685static void krait_write_vpmresr0(u32 val)
1686{
1687	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1688}
1689
1690static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
1691{
1692	u32 venum_new_val;
1693	u32 fp_new_val;
1694
1695	BUG_ON(preemptible());
1696	/* CPACR Enable CP10 and CP11 access */
1697	*venum_orig_val = get_copro_access();
1698	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1699	set_copro_access(venum_new_val);
1700
1701	/* Enable FPEXC */
1702	*fp_orig_val = fmrx(FPEXC);
1703	fp_new_val = *fp_orig_val | FPEXC_EN;
1704	fmxr(FPEXC, fp_new_val);
1705}
1706
1707static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val)
1708{
1709	BUG_ON(preemptible());
1710	/* Restore FPEXC */
1711	fmxr(FPEXC, fp_orig_val);
1712	isb();
1713	/* Restore CPACR */
1714	set_copro_access(venum_orig_val);
1715}
1716
1717static u32 krait_get_pmresrn_event(unsigned int region)
1718{
1719	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1720					     KRAIT_PMRESR1_GROUP0,
1721					     KRAIT_PMRESR2_GROUP0 };
1722	return pmresrn_table[region];
1723}
1724
1725static void krait_evt_setup(int idx, u32 config_base)
1726{
1727	u32 val;
1728	u32 mask;
1729	u32 vval, fval;
1730	unsigned int region;
1731	unsigned int group;
1732	unsigned int code;
1733	unsigned int group_shift;
1734	bool venum_event;
1735
1736	venum_event = !!(config_base & VENUM_EVENT);
1737	region = (config_base >> 12) & 0xf;
1738	code   = (config_base >> 4) & 0xff;
1739	group  = (config_base >> 0)  & 0xf;
1740
1741	group_shift = group * 8;
1742	mask = 0xff << group_shift;
1743
1744	/* Configure evtsel for the region and group */
1745	if (venum_event)
1746		val = KRAIT_VPMRESR0_GROUP0;
1747	else
1748		val = krait_get_pmresrn_event(region);
1749	val += group;
1750	/* Mix in mode-exclusion bits */
1751	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1752	armv7_pmnc_write_evtsel(idx, val);
1753
1754	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1755
1756	if (venum_event) {
1757		krait_pre_vpmresr0(&vval, &fval);
1758		val = krait_read_vpmresr0();
1759		val &= ~mask;
1760		val |= code << group_shift;
1761		val |= PMRESRn_EN;
1762		krait_write_vpmresr0(val);
1763		krait_post_vpmresr0(vval, fval);
1764	} else {
1765		val = krait_read_pmresrn(region);
1766		val &= ~mask;
1767		val |= code << group_shift;
1768		val |= PMRESRn_EN;
1769		krait_write_pmresrn(region, val);
1770	}
1771}
1772
1773static u32 krait_clear_pmresrn_group(u32 val, int group)
1774{
1775	u32 mask;
1776	int group_shift;
1777
1778	group_shift = group * 8;
1779	mask = 0xff << group_shift;
1780	val &= ~mask;
1781
1782	/* Don't clear enable bit if entire region isn't disabled */
1783	if (val & ~PMRESRn_EN)
1784		return val |= PMRESRn_EN;
1785
1786	return 0;
1787}
1788
1789static void krait_clearpmu(u32 config_base)
1790{
1791	u32 val;
1792	u32 vval, fval;
1793	unsigned int region;
1794	unsigned int group;
1795	bool venum_event;
1796
1797	venum_event = !!(config_base & VENUM_EVENT);
1798	region = (config_base >> 12) & 0xf;
1799	group  = (config_base >> 0)  & 0xf;
1800
1801	if (venum_event) {
1802		krait_pre_vpmresr0(&vval, &fval);
1803		val = krait_read_vpmresr0();
1804		val = krait_clear_pmresrn_group(val, group);
1805		krait_write_vpmresr0(val);
1806		krait_post_vpmresr0(vval, fval);
1807	} else {
1808		val = krait_read_pmresrn(region);
1809		val = krait_clear_pmresrn_group(val, group);
1810		krait_write_pmresrn(region, val);
1811	}
1812}
1813
1814static void krait_pmu_disable_event(struct perf_event *event)
1815{
1816	unsigned long flags;
1817	struct hw_perf_event *hwc = &event->hw;
1818	int idx = hwc->idx;
1819	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1820
1821	/* Disable counter and interrupt */
1822	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1823
1824	/* Disable counter */
1825	armv7_pmnc_disable_counter(idx);
1826
1827	/*
1828	 * Clear pmresr code (if destined for PMNx counters)
1829	 */
1830	if (hwc->config_base & KRAIT_EVENT_MASK)
1831		krait_clearpmu(hwc->config_base);
1832
1833	/* Disable interrupt for this counter */
1834	armv7_pmnc_disable_intens(idx);
1835
1836	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1837}
1838
1839static void krait_pmu_enable_event(struct perf_event *event)
1840{
1841	unsigned long flags;
1842	struct hw_perf_event *hwc = &event->hw;
1843	int idx = hwc->idx;
1844	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1845
1846	/*
1847	 * Enable counter and interrupt, and set the counter to count
1848	 * the event that we're interested in.
1849	 */
1850	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1851
1852	/* Disable counter */
1853	armv7_pmnc_disable_counter(idx);
1854
1855	/*
1856	 * Set event (if destined for PMNx counters)
1857	 * We set the event for the cycle counter because we
1858	 * have the ability to perform event filtering.
1859	 */
1860	if (hwc->config_base & KRAIT_EVENT_MASK)
1861		krait_evt_setup(idx, hwc->config_base);
1862	else
1863		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1864
1865	/* Enable interrupt for this counter */
1866	armv7_pmnc_enable_intens(idx);
1867
1868	/* Enable counter */
1869	armv7_pmnc_enable_counter(idx);
1870
1871	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1872}
1873
1874static void krait_pmu_reset(void *info)
1875{
1876	u32 vval, fval;
1877
1878	armv7pmu_reset(info);
1879
1880	/* Clear all pmresrs */
1881	krait_write_pmresrn(0, 0);
1882	krait_write_pmresrn(1, 0);
1883	krait_write_pmresrn(2, 0);
1884
1885	krait_pre_vpmresr0(&vval, &fval);
1886	krait_write_vpmresr0(0);
1887	krait_post_vpmresr0(vval, fval);
1888}
1889
1890static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1891			      unsigned int group)
1892{
1893	int bit;
1894	struct hw_perf_event *hwc = &event->hw;
1895	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1896
1897	if (hwc->config_base & VENUM_EVENT)
1898		bit = KRAIT_VPMRESR0_GROUP0;
1899	else
1900		bit = krait_get_pmresrn_event(region);
1901	bit -= krait_get_pmresrn_event(0);
1902	bit += group;
1903	/*
1904	 * Lower bits are reserved for use by the counters (see
1905	 * armv7pmu_get_event_idx() for more info)
1906	 */
1907	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1908
1909	return bit;
1910}
1911
1912/*
1913 * We check for column exclusion constraints here.
1914 * Two events cant use the same group within a pmresr register.
1915 */
1916static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1917				   struct perf_event *event)
1918{
1919	int idx;
1920	int bit;
1921	unsigned int prefix;
1922	unsigned int region;
1923	unsigned int code;
1924	unsigned int group;
1925	bool krait_event;
1926	struct hw_perf_event *hwc = &event->hw;
1927
1928	region = (hwc->config_base >> 12) & 0xf;
1929	code   = (hwc->config_base >> 4) & 0xff;
1930	group  = (hwc->config_base >> 0) & 0xf;
1931	krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1932
1933	if (krait_event) {
1934		/* Ignore invalid events */
1935		if (group > 3 || region > 2)
1936			return -EINVAL;
1937		prefix = hwc->config_base & KRAIT_EVENT_MASK;
1938		if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
1939			return -EINVAL;
1940		if (prefix == VENUM_EVENT && (code & 0xe0))
1941			return -EINVAL;
1942
1943		bit = krait_event_to_bit(event, region, group);
1944		if (test_and_set_bit(bit, cpuc->used_mask))
1945			return -EAGAIN;
1946	}
1947
1948	idx = armv7pmu_get_event_idx(cpuc, event);
1949	if (idx < 0 && krait_event)
1950		clear_bit(bit, cpuc->used_mask);
1951
1952	return idx;
1953}
1954
1955static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1956				      struct perf_event *event)
1957{
1958	int bit;
1959	struct hw_perf_event *hwc = &event->hw;
1960	unsigned int region;
1961	unsigned int group;
1962	bool krait_event;
1963
1964	region = (hwc->config_base >> 12) & 0xf;
1965	group  = (hwc->config_base >> 0) & 0xf;
1966	krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1967
1968	if (krait_event) {
1969		bit = krait_event_to_bit(event, region, group);
1970		clear_bit(bit, cpuc->used_mask);
1971	}
1972}
1973
1974static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1975{
1976	armv7pmu_init(cpu_pmu);
1977	cpu_pmu->name		= "ARMv7 Krait";
1978	/* Some early versions of Krait don't support PC write events */
1979	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1980				  "qcom,no-pc-write"))
1981		cpu_pmu->map_event = krait_map_event_no_branch;
1982	else
1983		cpu_pmu->map_event = krait_map_event;
1984	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1985	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1986	cpu_pmu->reset		= krait_pmu_reset;
1987	cpu_pmu->enable		= krait_pmu_enable_event;
1988	cpu_pmu->disable	= krait_pmu_disable_event;
1989	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1990	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1991	return 0;
1992}
1993#else
1994static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1995{
1996	return -ENODEV;
1997}
1998
1999static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
2000{
2001	return -ENODEV;
2002}
2003
2004static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
2005{
2006	return -ENODEV;
2007}
2008
2009static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
2010{
2011	return -ENODEV;
2012}
2013
2014static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
2015{
2016	return -ENODEV;
2017}
2018
2019static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
2020{
2021	return -ENODEV;
2022}
2023
2024static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
2025{
2026	return -ENODEV;
2027}
2028#endif	/* CONFIG_CPU_V7 */
v3.5.6
   1/*
   2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   3 *
   4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   5 * 2010 (c) MontaVista Software, LLC.
   6 *
   7 * Copied from ARMv6 code, with the low level code inspired
   8 *  by the ARMv7 Oprofile code.
   9 *
  10 * Cortex-A8 has up to 4 configurable performance counters and
  11 *  a single cycle counter.
  12 * Cortex-A9 has up to 31 configurable performance counters and
  13 *  a single cycle counter.
  14 *
  15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16 *  counter and all 4 performance counters together can be reset separately.
  17 */
  18
  19#ifdef CONFIG_CPU_V7
  20
  21static struct arm_pmu armv7pmu;
 
 
  22
  23/*
  24 * Common ARMv7 event types
  25 *
  26 * Note: An implementation may not be able to count all of these events
  27 * but the encodings are considered to be `reserved' in the case that
  28 * they are not available.
  29 */
  30enum armv7_perf_types {
  31	ARMV7_PERFCTR_PMNC_SW_INCR			= 0x00,
  32	ARMV7_PERFCTR_L1_ICACHE_REFILL			= 0x01,
  33	ARMV7_PERFCTR_ITLB_REFILL			= 0x02,
  34	ARMV7_PERFCTR_L1_DCACHE_REFILL			= 0x03,
  35	ARMV7_PERFCTR_L1_DCACHE_ACCESS			= 0x04,
  36	ARMV7_PERFCTR_DTLB_REFILL			= 0x05,
  37	ARMV7_PERFCTR_MEM_READ				= 0x06,
  38	ARMV7_PERFCTR_MEM_WRITE				= 0x07,
  39	ARMV7_PERFCTR_INSTR_EXECUTED			= 0x08,
  40	ARMV7_PERFCTR_EXC_TAKEN				= 0x09,
  41	ARMV7_PERFCTR_EXC_EXECUTED			= 0x0A,
  42	ARMV7_PERFCTR_CID_WRITE				= 0x0B,
  43
  44	/*
  45	 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  46	 * It counts:
  47	 *  - all (taken) branch instructions,
  48	 *  - instructions that explicitly write the PC,
  49	 *  - exception generating instructions.
  50	 */
  51	ARMV7_PERFCTR_PC_WRITE				= 0x0C,
  52	ARMV7_PERFCTR_PC_IMM_BRANCH			= 0x0D,
  53	ARMV7_PERFCTR_PC_PROC_RETURN			= 0x0E,
  54	ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		= 0x0F,
  55	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		= 0x10,
  56	ARMV7_PERFCTR_CLOCK_CYCLES			= 0x11,
  57	ARMV7_PERFCTR_PC_BRANCH_PRED			= 0x12,
  58
  59	/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  60	ARMV7_PERFCTR_MEM_ACCESS			= 0x13,
  61	ARMV7_PERFCTR_L1_ICACHE_ACCESS			= 0x14,
  62	ARMV7_PERFCTR_L1_DCACHE_WB			= 0x15,
  63	ARMV7_PERFCTR_L2_CACHE_ACCESS			= 0x16,
  64	ARMV7_PERFCTR_L2_CACHE_REFILL			= 0x17,
  65	ARMV7_PERFCTR_L2_CACHE_WB			= 0x18,
  66	ARMV7_PERFCTR_BUS_ACCESS			= 0x19,
  67	ARMV7_PERFCTR_MEM_ERROR				= 0x1A,
  68	ARMV7_PERFCTR_INSTR_SPEC			= 0x1B,
  69	ARMV7_PERFCTR_TTBR_WRITE			= 0x1C,
  70	ARMV7_PERFCTR_BUS_CYCLES			= 0x1D,
  71
  72	ARMV7_PERFCTR_CPU_CYCLES			= 0xFF
  73};
  74
  75/* ARMv7 Cortex-A8 specific event types */
  76enum armv7_a8_perf_types {
  77	ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		= 0x43,
  78	ARMV7_A8_PERFCTR_L2_CACHE_REFILL		= 0x44,
  79	ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		= 0x50,
  80	ARMV7_A8_PERFCTR_STALL_ISIDE			= 0x56,
  81};
  82
  83/* ARMv7 Cortex-A9 specific event types */
  84enum armv7_a9_perf_types {
  85	ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		= 0x68,
  86	ARMV7_A9_PERFCTR_STALL_ICACHE			= 0x60,
  87	ARMV7_A9_PERFCTR_STALL_DISPATCH			= 0x66,
  88};
  89
  90/* ARMv7 Cortex-A5 specific event types */
  91enum armv7_a5_perf_types {
  92	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
  93	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		= 0xc3,
  94};
  95
  96/* ARMv7 Cortex-A15 specific event types */
  97enum armv7_a15_perf_types {
  98	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
  99	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
 100	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		= 0x42,
 101	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	= 0x43,
 102
 103	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		= 0x4C,
 104	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		= 0x4D,
 105
 106	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
 107	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
 108	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		= 0x52,
 109	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		= 0x53,
 110
 111	ARMV7_A15_PERFCTR_PC_WRITE_SPEC			= 0x76,
 112};
 113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 114/*
 115 * Cortex-A8 HW events mapping
 116 *
 117 * The hardware events that we support. We do support cache operations but
 118 * we have harvard caches and no way to combine instruction and data
 119 * accesses/misses in hardware.
 120 */
 121static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 122	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 123	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 124	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 125	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 126	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 127	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 128	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 129	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
 130	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 131};
 132
 133static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 134					  [PERF_COUNT_HW_CACHE_OP_MAX]
 135					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 136	[C(L1D)] = {
 137		/*
 138		 * The performance counters don't differentiate between read
 139		 * and write accesses/misses so this isn't strictly correct,
 140		 * but it's the best we can do. Writes and reads get
 141		 * combined.
 142		 */
 143		[C(OP_READ)] = {
 144			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 145			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 146		},
 147		[C(OP_WRITE)] = {
 148			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 149			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 150		},
 151		[C(OP_PREFETCH)] = {
 152			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 153			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 154		},
 155	},
 156	[C(L1I)] = {
 157		[C(OP_READ)] = {
 158			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 159			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 160		},
 161		[C(OP_WRITE)] = {
 162			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 163			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 164		},
 165		[C(OP_PREFETCH)] = {
 166			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 167			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 168		},
 169	},
 170	[C(LL)] = {
 171		[C(OP_READ)] = {
 172			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 173			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 174		},
 175		[C(OP_WRITE)] = {
 176			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 177			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 178		},
 179		[C(OP_PREFETCH)] = {
 180			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 181			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 182		},
 183	},
 184	[C(DTLB)] = {
 185		[C(OP_READ)] = {
 186			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 187			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 188		},
 189		[C(OP_WRITE)] = {
 190			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 191			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 192		},
 193		[C(OP_PREFETCH)] = {
 194			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 195			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 196		},
 197	},
 198	[C(ITLB)] = {
 199		[C(OP_READ)] = {
 200			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 201			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 202		},
 203		[C(OP_WRITE)] = {
 204			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 205			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 206		},
 207		[C(OP_PREFETCH)] = {
 208			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 209			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 210		},
 211	},
 212	[C(BPU)] = {
 213		[C(OP_READ)] = {
 214			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 215			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 216		},
 217		[C(OP_WRITE)] = {
 218			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 219			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 220		},
 221		[C(OP_PREFETCH)] = {
 222			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 223			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 224		},
 225	},
 226	[C(NODE)] = {
 227		[C(OP_READ)] = {
 228			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 229			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 230		},
 231		[C(OP_WRITE)] = {
 232			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 233			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 234		},
 235		[C(OP_PREFETCH)] = {
 236			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 237			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 238		},
 239	},
 240};
 241
 242/*
 243 * Cortex-A9 HW events mapping
 244 */
 245static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 246	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 247	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
 248	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 249	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 250	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 251	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 252	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 253	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
 254	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
 255};
 256
 257static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 258					  [PERF_COUNT_HW_CACHE_OP_MAX]
 259					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 260	[C(L1D)] = {
 261		/*
 262		 * The performance counters don't differentiate between read
 263		 * and write accesses/misses so this isn't strictly correct,
 264		 * but it's the best we can do. Writes and reads get
 265		 * combined.
 266		 */
 267		[C(OP_READ)] = {
 268			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 269			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 270		},
 271		[C(OP_WRITE)] = {
 272			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 273			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 274		},
 275		[C(OP_PREFETCH)] = {
 276			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 277			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 278		},
 279	},
 280	[C(L1I)] = {
 281		[C(OP_READ)] = {
 282			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 283			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 284		},
 285		[C(OP_WRITE)] = {
 286			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 287			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 288		},
 289		[C(OP_PREFETCH)] = {
 290			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 291			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 292		},
 293	},
 294	[C(LL)] = {
 295		[C(OP_READ)] = {
 296			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 297			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 298		},
 299		[C(OP_WRITE)] = {
 300			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 301			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 302		},
 303		[C(OP_PREFETCH)] = {
 304			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 305			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 306		},
 307	},
 308	[C(DTLB)] = {
 309		[C(OP_READ)] = {
 310			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 311			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 312		},
 313		[C(OP_WRITE)] = {
 314			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 315			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 316		},
 317		[C(OP_PREFETCH)] = {
 318			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 319			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 320		},
 321	},
 322	[C(ITLB)] = {
 323		[C(OP_READ)] = {
 324			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 325			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 326		},
 327		[C(OP_WRITE)] = {
 328			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 329			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 330		},
 331		[C(OP_PREFETCH)] = {
 332			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 333			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 334		},
 335	},
 336	[C(BPU)] = {
 337		[C(OP_READ)] = {
 338			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 339			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 340		},
 341		[C(OP_WRITE)] = {
 342			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 343			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 344		},
 345		[C(OP_PREFETCH)] = {
 346			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 347			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 348		},
 349	},
 350	[C(NODE)] = {
 351		[C(OP_READ)] = {
 352			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 353			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 354		},
 355		[C(OP_WRITE)] = {
 356			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 357			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 358		},
 359		[C(OP_PREFETCH)] = {
 360			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 361			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 362		},
 363	},
 364};
 365
 366/*
 367 * Cortex-A5 HW events mapping
 368 */
 369static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 370	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 371	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 372	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 373	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 374	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 375	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 376	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 377	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 378	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 379};
 380
 381static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 382					[PERF_COUNT_HW_CACHE_OP_MAX]
 383					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 384	[C(L1D)] = {
 385		[C(OP_READ)] = {
 386			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 387			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 388		},
 389		[C(OP_WRITE)] = {
 390			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 391			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 392		},
 393		[C(OP_PREFETCH)] = {
 394			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 395			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 396		},
 397	},
 398	[C(L1I)] = {
 399		[C(OP_READ)] = {
 400			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 401			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 402		},
 403		[C(OP_WRITE)] = {
 404			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 405			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 406		},
 407		/*
 408		 * The prefetch counters don't differentiate between the I
 409		 * side and the D side.
 410		 */
 411		[C(OP_PREFETCH)] = {
 412			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 413			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 414		},
 415	},
 416	[C(LL)] = {
 417		[C(OP_READ)] = {
 418			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 419			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 420		},
 421		[C(OP_WRITE)] = {
 422			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 423			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 424		},
 425		[C(OP_PREFETCH)] = {
 426			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 427			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 428		},
 429	},
 430	[C(DTLB)] = {
 431		[C(OP_READ)] = {
 432			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 433			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 434		},
 435		[C(OP_WRITE)] = {
 436			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 437			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 438		},
 439		[C(OP_PREFETCH)] = {
 440			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 441			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 442		},
 443	},
 444	[C(ITLB)] = {
 445		[C(OP_READ)] = {
 446			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 447			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 448		},
 449		[C(OP_WRITE)] = {
 450			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 451			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 452		},
 453		[C(OP_PREFETCH)] = {
 454			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 455			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 456		},
 457	},
 458	[C(BPU)] = {
 459		[C(OP_READ)] = {
 460			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 461			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 462		},
 463		[C(OP_WRITE)] = {
 464			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 465			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 466		},
 467		[C(OP_PREFETCH)] = {
 468			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 469			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 470		},
 471	},
 472	[C(NODE)] = {
 473		[C(OP_READ)] = {
 474			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 475			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 476		},
 477		[C(OP_WRITE)] = {
 478			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 479			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 480		},
 481		[C(OP_PREFETCH)] = {
 482			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 483			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 484		},
 485	},
 486};
 487
 488/*
 489 * Cortex-A15 HW events mapping
 490 */
 491static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 492	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 493	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 494	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 495	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 496	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
 497	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 498	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 499	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 500	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 501};
 502
 503static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 504					[PERF_COUNT_HW_CACHE_OP_MAX]
 505					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 506	[C(L1D)] = {
 507		[C(OP_READ)] = {
 508			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
 509			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 510		},
 511		[C(OP_WRITE)] = {
 512			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 513			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 514		},
 515		[C(OP_PREFETCH)] = {
 516			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 517			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 518		},
 519	},
 520	[C(L1I)] = {
 521		/*
 522		 * Not all performance counters differentiate between read
 523		 * and write accesses/misses so we're not always strictly
 524		 * correct, but it's the best we can do. Writes and reads get
 525		 * combined in these cases.
 526		 */
 527		[C(OP_READ)] = {
 528			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 529			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 530		},
 531		[C(OP_WRITE)] = {
 532			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 533			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 534		},
 535		[C(OP_PREFETCH)] = {
 536			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 537			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 538		},
 539	},
 540	[C(LL)] = {
 541		[C(OP_READ)] = {
 542			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
 543			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 544		},
 545		[C(OP_WRITE)] = {
 546			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
 547			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 548		},
 549		[C(OP_PREFETCH)] = {
 550			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 551			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 552		},
 553	},
 554	[C(DTLB)] = {
 555		[C(OP_READ)] = {
 556			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 557			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 558		},
 559		[C(OP_WRITE)] = {
 560			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 561			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 562		},
 563		[C(OP_PREFETCH)] = {
 564			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 565			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 566		},
 567	},
 568	[C(ITLB)] = {
 569		[C(OP_READ)] = {
 570			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 571			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 572		},
 573		[C(OP_WRITE)] = {
 574			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 575			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 576		},
 577		[C(OP_PREFETCH)] = {
 578			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 579			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 580		},
 581	},
 582	[C(BPU)] = {
 583		[C(OP_READ)] = {
 584			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 585			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 586		},
 587		[C(OP_WRITE)] = {
 588			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 589			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 590		},
 591		[C(OP_PREFETCH)] = {
 592			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 593			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 594		},
 595	},
 596	[C(NODE)] = {
 597		[C(OP_READ)] = {
 598			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 599			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 600		},
 601		[C(OP_WRITE)] = {
 602			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 603			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 604		},
 605		[C(OP_PREFETCH)] = {
 606			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 607			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 608		},
 609	},
 610};
 611
 612/*
 613 * Cortex-A7 HW events mapping
 614 */
 615static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
 616	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 617	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 618	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 619	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 620	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 621	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 622	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 623	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 624	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 625};
 626
 627static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 628					[PERF_COUNT_HW_CACHE_OP_MAX]
 629					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 630	[C(L1D)] = {
 631		/*
 632		 * The performance counters don't differentiate between read
 633		 * and write accesses/misses so this isn't strictly correct,
 634		 * but it's the best we can do. Writes and reads get
 635		 * combined.
 636		 */
 637		[C(OP_READ)] = {
 638			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 639			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 640		},
 641		[C(OP_WRITE)] = {
 642			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 643			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 644		},
 645		[C(OP_PREFETCH)] = {
 646			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 647			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 648		},
 649	},
 650	[C(L1I)] = {
 651		[C(OP_READ)] = {
 652			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 653			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 654		},
 655		[C(OP_WRITE)] = {
 656			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 657			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 658		},
 659		[C(OP_PREFETCH)] = {
 660			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 661			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 662		},
 663	},
 664	[C(LL)] = {
 665		[C(OP_READ)] = {
 666			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 667			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 668		},
 669		[C(OP_WRITE)] = {
 670			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 671			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 672		},
 673		[C(OP_PREFETCH)] = {
 674			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 675			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 676		},
 677	},
 678	[C(DTLB)] = {
 679		[C(OP_READ)] = {
 680			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 681			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 682		},
 683		[C(OP_WRITE)] = {
 684			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 685			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 686		},
 687		[C(OP_PREFETCH)] = {
 688			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 689			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 690		},
 691	},
 692	[C(ITLB)] = {
 693		[C(OP_READ)] = {
 694			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 695			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 696		},
 697		[C(OP_WRITE)] = {
 698			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 699			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 700		},
 701		[C(OP_PREFETCH)] = {
 702			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 703			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 704		},
 705	},
 706	[C(BPU)] = {
 707		[C(OP_READ)] = {
 708			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 709			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 710		},
 711		[C(OP_WRITE)] = {
 712			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 713			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 714		},
 715		[C(OP_PREFETCH)] = {
 716			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 717			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 718		},
 719	},
 720	[C(NODE)] = {
 721		[C(OP_READ)] = {
 722			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 723			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 724		},
 725		[C(OP_WRITE)] = {
 726			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 727			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 728		},
 729		[C(OP_PREFETCH)] = {
 730			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 731			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 732		},
 733	},
 734};
 735
 736/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 737 * Perf Events' indices
 738 */
 739#define	ARMV7_IDX_CYCLE_COUNTER	0
 740#define	ARMV7_IDX_COUNTER0	1
 741#define	ARMV7_IDX_COUNTER_LAST	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 
 742
 743#define	ARMV7_MAX_COUNTERS	32
 744#define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
 745
 746/*
 747 * ARMv7 low level PMNC access
 748 */
 749
 750/*
 751 * Perf Event to low level counters mapping
 752 */
 753#define	ARMV7_IDX_TO_COUNTER(x)	\
 754	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
 755
 756/*
 757 * Per-CPU PMNC: config reg
 758 */
 759#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
 760#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
 761#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
 762#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
 763#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
 764#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
 765#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
 766#define	ARMV7_PMNC_N_MASK	0x1f
 767#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
 768
 769/*
 770 * FLAG: counters overflow flag status reg
 771 */
 772#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
 773#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
 774
 775/*
 776 * PMXEVTYPER: Event selection reg
 777 */
 778#define	ARMV7_EVTYPE_MASK	0xc00000ff	/* Mask for writable bits */
 779#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
 780
 781/*
 782 * Event filters for PMUv2
 783 */
 784#define	ARMV7_EXCLUDE_PL1	(1 << 31)
 785#define	ARMV7_EXCLUDE_USER	(1 << 30)
 786#define	ARMV7_INCLUDE_HYP	(1 << 27)
 787
 788static inline u32 armv7_pmnc_read(void)
 789{
 790	u32 val;
 791	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
 792	return val;
 793}
 794
 795static inline void armv7_pmnc_write(u32 val)
 796{
 797	val &= ARMV7_PMNC_MASK;
 798	isb();
 799	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 800}
 801
 802static inline int armv7_pmnc_has_overflowed(u32 pmnc)
 803{
 804	return pmnc & ARMV7_OVERFLOWED_MASK;
 805}
 806
 807static inline int armv7_pmnc_counter_valid(int idx)
 808{
 809	return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
 
 810}
 811
 812static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
 813{
 814	int ret = 0;
 815	u32 counter;
 816
 817	if (!armv7_pmnc_counter_valid(idx)) {
 818		pr_err("CPU%u checking wrong counter %d overflow status\n",
 819			smp_processor_id(), idx);
 820	} else {
 821		counter = ARMV7_IDX_TO_COUNTER(idx);
 822		ret = pmnc & BIT(counter);
 823	}
 824
 825	return ret;
 826}
 827
 828static inline int armv7_pmnc_select_counter(int idx)
 829{
 830	u32 counter;
 831
 832	if (!armv7_pmnc_counter_valid(idx)) {
 833		pr_err("CPU%u selecting wrong PMNC counter %d\n",
 834			smp_processor_id(), idx);
 835		return -EINVAL;
 836	}
 837
 838	counter = ARMV7_IDX_TO_COUNTER(idx);
 839	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
 840	isb();
 841
 842	return idx;
 843}
 844
 845static inline u32 armv7pmu_read_counter(int idx)
 846{
 
 
 
 847	u32 value = 0;
 848
 849	if (!armv7_pmnc_counter_valid(idx))
 850		pr_err("CPU%u reading wrong counter %d\n",
 851			smp_processor_id(), idx);
 852	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
 853		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
 854	else if (armv7_pmnc_select_counter(idx) == idx)
 855		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
 856
 857	return value;
 858}
 859
 860static inline void armv7pmu_write_counter(int idx, u32 value)
 861{
 862	if (!armv7_pmnc_counter_valid(idx))
 
 
 
 
 863		pr_err("CPU%u writing wrong counter %d\n",
 864			smp_processor_id(), idx);
 865	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
 866		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
 867	else if (armv7_pmnc_select_counter(idx) == idx)
 868		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
 869}
 870
 871static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
 872{
 873	if (armv7_pmnc_select_counter(idx) == idx) {
 874		val &= ARMV7_EVTYPE_MASK;
 875		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 876	}
 877}
 878
 879static inline int armv7_pmnc_enable_counter(int idx)
 880{
 881	u32 counter;
 882
 883	if (!armv7_pmnc_counter_valid(idx)) {
 884		pr_err("CPU%u enabling wrong PMNC counter %d\n",
 885			smp_processor_id(), idx);
 886		return -EINVAL;
 887	}
 888
 889	counter = ARMV7_IDX_TO_COUNTER(idx);
 890	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
 891	return idx;
 892}
 893
 894static inline int armv7_pmnc_disable_counter(int idx)
 895{
 896	u32 counter;
 897
 898	if (!armv7_pmnc_counter_valid(idx)) {
 899		pr_err("CPU%u disabling wrong PMNC counter %d\n",
 900			smp_processor_id(), idx);
 901		return -EINVAL;
 902	}
 903
 904	counter = ARMV7_IDX_TO_COUNTER(idx);
 905	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
 906	return idx;
 907}
 908
 909static inline int armv7_pmnc_enable_intens(int idx)
 910{
 911	u32 counter;
 912
 913	if (!armv7_pmnc_counter_valid(idx)) {
 914		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
 915			smp_processor_id(), idx);
 916		return -EINVAL;
 917	}
 918
 919	counter = ARMV7_IDX_TO_COUNTER(idx);
 920	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
 921	return idx;
 922}
 923
 924static inline int armv7_pmnc_disable_intens(int idx)
 925{
 926	u32 counter;
 927
 928	if (!armv7_pmnc_counter_valid(idx)) {
 929		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
 930			smp_processor_id(), idx);
 931		return -EINVAL;
 932	}
 933
 934	counter = ARMV7_IDX_TO_COUNTER(idx);
 935	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
 936	isb();
 937	/* Clear the overflow flag in case an interrupt is pending. */
 938	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
 939	isb();
 940
 941	return idx;
 942}
 943
 944static inline u32 armv7_pmnc_getreset_flags(void)
 945{
 946	u32 val;
 947
 948	/* Read */
 949	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 950
 951	/* Write to clear flags */
 952	val &= ARMV7_FLAG_MASK;
 953	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
 954
 955	return val;
 956}
 957
 958#ifdef DEBUG
 959static void armv7_pmnc_dump_regs(void)
 960{
 961	u32 val;
 962	unsigned int cnt;
 963
 964	printk(KERN_INFO "PMNC registers dump:\n");
 965
 966	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
 967	printk(KERN_INFO "PMNC  =0x%08x\n", val);
 968
 969	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
 970	printk(KERN_INFO "CNTENS=0x%08x\n", val);
 971
 972	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
 973	printk(KERN_INFO "INTENS=0x%08x\n", val);
 974
 975	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 976	printk(KERN_INFO "FLAGS =0x%08x\n", val);
 977
 978	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
 979	printk(KERN_INFO "SELECT=0x%08x\n", val);
 980
 981	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
 982	printk(KERN_INFO "CCNT  =0x%08x\n", val);
 983
 984	for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
 
 985		armv7_pmnc_select_counter(cnt);
 986		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
 987		printk(KERN_INFO "CNT[%d] count =0x%08x\n",
 988			ARMV7_IDX_TO_COUNTER(cnt), val);
 989		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
 990		printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
 991			ARMV7_IDX_TO_COUNTER(cnt), val);
 992	}
 993}
 994#endif
 995
 996static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
 997{
 998	unsigned long flags;
 
 
 999	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
 
 
 
 
 
 
 
1000
1001	/*
1002	 * Enable counter and interrupt, and set the counter to count
1003	 * the event that we're interested in.
1004	 */
1005	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1006
1007	/*
1008	 * Disable counter
1009	 */
1010	armv7_pmnc_disable_counter(idx);
1011
1012	/*
1013	 * Set event (if destined for PMNx counters)
1014	 * We only need to set the event for the cycle counter if we
1015	 * have the ability to perform event filtering.
1016	 */
1017	if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1018		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1019
1020	/*
1021	 * Enable interrupt for this counter
1022	 */
1023	armv7_pmnc_enable_intens(idx);
1024
1025	/*
1026	 * Enable counter
1027	 */
1028	armv7_pmnc_enable_counter(idx);
1029
1030	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1031}
1032
1033static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1034{
1035	unsigned long flags;
 
 
1036	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
 
 
 
 
 
 
 
1037
1038	/*
1039	 * Disable counter and interrupt
1040	 */
1041	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1042
1043	/*
1044	 * Disable counter
1045	 */
1046	armv7_pmnc_disable_counter(idx);
1047
1048	/*
1049	 * Disable interrupt for this counter
1050	 */
1051	armv7_pmnc_disable_intens(idx);
1052
1053	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1054}
1055
1056static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1057{
1058	u32 pmnc;
1059	struct perf_sample_data data;
1060	struct pmu_hw_events *cpuc;
 
1061	struct pt_regs *regs;
1062	int idx;
1063
1064	/*
1065	 * Get and reset the IRQ flags
1066	 */
1067	pmnc = armv7_pmnc_getreset_flags();
1068
1069	/*
1070	 * Did an overflow occur?
1071	 */
1072	if (!armv7_pmnc_has_overflowed(pmnc))
1073		return IRQ_NONE;
1074
1075	/*
1076	 * Handle the counter(s) overflow(s)
1077	 */
1078	regs = get_irq_regs();
1079
1080	cpuc = &__get_cpu_var(cpu_hw_events);
1081	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1082		struct perf_event *event = cpuc->events[idx];
1083		struct hw_perf_event *hwc;
1084
1085		/* Ignore if we don't have an event. */
1086		if (!event)
1087			continue;
1088
1089		/*
1090		 * We have a single interrupt for all counters. Check that
1091		 * each counter has overflowed before we process it.
1092		 */
1093		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1094			continue;
1095
1096		hwc = &event->hw;
1097		armpmu_event_update(event, hwc, idx);
1098		perf_sample_data_init(&data, 0, hwc->last_period);
1099		if (!armpmu_event_set_period(event, hwc, idx))
1100			continue;
1101
1102		if (perf_event_overflow(event, &data, regs))
1103			cpu_pmu->disable(hwc, idx);
1104	}
1105
1106	/*
1107	 * Handle the pending perf events.
1108	 *
1109	 * Note: this call *must* be run with interrupts disabled. For
1110	 * platforms that can have the PMU interrupts raised as an NMI, this
1111	 * will not work.
1112	 */
1113	irq_work_run();
1114
1115	return IRQ_HANDLED;
1116}
1117
1118static void armv7pmu_start(void)
1119{
1120	unsigned long flags;
1121	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1122
1123	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1124	/* Enable all counters */
1125	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1126	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1127}
1128
1129static void armv7pmu_stop(void)
1130{
1131	unsigned long flags;
1132	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1133
1134	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1135	/* Disable all counters */
1136	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1137	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1138}
1139
1140static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1141				  struct hw_perf_event *event)
1142{
1143	int idx;
1144	unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
 
 
1145
1146	/* Always place a cycle counter into the cycle counter. */
1147	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1148		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1149			return -EAGAIN;
1150
1151		return ARMV7_IDX_CYCLE_COUNTER;
1152	}
1153
1154	/*
1155	 * For anything other than a cycle counter, try and use
1156	 * the events counters
1157	 */
1158	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1159		if (!test_and_set_bit(idx, cpuc->used_mask))
1160			return idx;
1161	}
1162
1163	/* The counters are all in use. */
1164	return -EAGAIN;
1165}
1166
1167/*
1168 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1169 */
1170static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1171				     struct perf_event_attr *attr)
1172{
1173	unsigned long config_base = 0;
1174
1175	if (attr->exclude_idle)
1176		return -EPERM;
1177	if (attr->exclude_user)
1178		config_base |= ARMV7_EXCLUDE_USER;
1179	if (attr->exclude_kernel)
1180		config_base |= ARMV7_EXCLUDE_PL1;
1181	if (!attr->exclude_hv)
1182		config_base |= ARMV7_INCLUDE_HYP;
1183
1184	/*
1185	 * Install the filter into config_base as this is used to
1186	 * construct the event type.
1187	 */
1188	event->config_base = config_base;
1189
1190	return 0;
1191}
1192
1193static void armv7pmu_reset(void *info)
1194{
 
1195	u32 idx, nb_cnt = cpu_pmu->num_events;
1196
1197	/* The counter and interrupt enable registers are unknown at reset. */
1198	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
1199		armv7pmu_disable_event(NULL, idx);
 
 
1200
1201	/* Initialize & Reset PMNC: C and P bits */
1202	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1203}
1204
1205static int armv7_a8_map_event(struct perf_event *event)
1206{
1207	return map_cpu_event(event, &armv7_a8_perf_map,
1208				&armv7_a8_perf_cache_map, 0xFF);
1209}
1210
1211static int armv7_a9_map_event(struct perf_event *event)
1212{
1213	return map_cpu_event(event, &armv7_a9_perf_map,
1214				&armv7_a9_perf_cache_map, 0xFF);
1215}
1216
1217static int armv7_a5_map_event(struct perf_event *event)
1218{
1219	return map_cpu_event(event, &armv7_a5_perf_map,
1220				&armv7_a5_perf_cache_map, 0xFF);
1221}
1222
1223static int armv7_a15_map_event(struct perf_event *event)
1224{
1225	return map_cpu_event(event, &armv7_a15_perf_map,
1226				&armv7_a15_perf_cache_map, 0xFF);
1227}
1228
1229static int armv7_a7_map_event(struct perf_event *event)
1230{
1231	return map_cpu_event(event, &armv7_a7_perf_map,
1232				&armv7_a7_perf_cache_map, 0xFF);
1233}
1234
1235static struct arm_pmu armv7pmu = {
1236	.handle_irq		= armv7pmu_handle_irq,
1237	.enable			= armv7pmu_enable_event,
1238	.disable		= armv7pmu_disable_event,
1239	.read_counter		= armv7pmu_read_counter,
1240	.write_counter		= armv7pmu_write_counter,
1241	.get_event_idx		= armv7pmu_get_event_idx,
1242	.start			= armv7pmu_start,
1243	.stop			= armv7pmu_stop,
1244	.reset			= armv7pmu_reset,
1245	.max_period		= (1LLU << 32) - 1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1246};
1247
1248static u32 __init armv7_read_num_pmnc_events(void)
1249{
1250	u32 nb_cnt;
1251
1252	/* Read the nb of CNTx counters supported from PMNC */
1253	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1254
1255	/* Add the CPU cycles counter and return */
1256	return nb_cnt + 1;
1257}
1258
1259static struct arm_pmu *__init armv7_a8_pmu_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1260{
1261	armv7pmu.id		= ARM_PERF_PMU_ID_CA8;
1262	armv7pmu.name		= "ARMv7 Cortex-A8";
1263	armv7pmu.map_event	= armv7_a8_map_event;
1264	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1265	return &armv7pmu;
 
 
 
 
 
 
 
 
 
 
 
1266}
1267
1268static struct arm_pmu *__init armv7_a9_pmu_init(void)
1269{
1270	armv7pmu.id		= ARM_PERF_PMU_ID_CA9;
1271	armv7pmu.name		= "ARMv7 Cortex-A9";
1272	armv7pmu.map_event	= armv7_a9_map_event;
1273	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1274	return &armv7pmu;
 
1275}
1276
1277static struct arm_pmu *__init armv7_a5_pmu_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278{
1279	armv7pmu.id		= ARM_PERF_PMU_ID_CA5;
1280	armv7pmu.name		= "ARMv7 Cortex-A5";
1281	armv7pmu.map_event	= armv7_a5_map_event;
1282	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1283	return &armv7pmu;
 
 
 
 
 
 
 
 
 
 
 
 
1284}
1285
1286static struct arm_pmu *__init armv7_a15_pmu_init(void)
1287{
1288	armv7pmu.id		= ARM_PERF_PMU_ID_CA15;
1289	armv7pmu.name		= "ARMv7 Cortex-A15";
1290	armv7pmu.map_event	= armv7_a15_map_event;
1291	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1292	armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1293	return &armv7pmu;
 
 
 
 
 
 
 
1294}
1295
1296static struct arm_pmu *__init armv7_a7_pmu_init(void)
1297{
1298	armv7pmu.id		= ARM_PERF_PMU_ID_CA7;
1299	armv7pmu.name		= "ARMv7 Cortex-A7";
1300	armv7pmu.map_event	= armv7_a7_map_event;
1301	armv7pmu.num_events	= armv7_read_num_pmnc_events();
1302	armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1303	return &armv7pmu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1304}
1305#else
1306static struct arm_pmu *__init armv7_a8_pmu_init(void)
 
 
 
 
 
 
 
 
 
 
1307{
1308	return NULL;
1309}
1310
1311static struct arm_pmu *__init armv7_a9_pmu_init(void)
1312{
1313	return NULL;
1314}
1315
1316static struct arm_pmu *__init armv7_a5_pmu_init(void)
1317{
1318	return NULL;
1319}
1320
1321static struct arm_pmu *__init armv7_a15_pmu_init(void)
1322{
1323	return NULL;
1324}
1325
1326static struct arm_pmu *__init armv7_a7_pmu_init(void)
1327{
1328	return NULL;
1329}
1330#endif	/* CONFIG_CPU_V7 */