Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   3 *
   4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   5 * 2010 (c) MontaVista Software, LLC.
   6 *
   7 * Copied from ARMv6 code, with the low level code inspired
   8 *  by the ARMv7 Oprofile code.
   9 *
  10 * Cortex-A8 has up to 4 configurable performance counters and
  11 *  a single cycle counter.
  12 * Cortex-A9 has up to 31 configurable performance counters and
  13 *  a single cycle counter.
  14 *
  15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  16 *  counter and all 4 performance counters together can be reset separately.
  17 */
  18
  19#ifdef CONFIG_CPU_V7
  20
  21#include <asm/cp15.h>
 
 
  22#include <asm/vfp.h>
  23#include "../vfp/vfpinstr.h"
  24
 
 
 
 
  25/*
  26 * Common ARMv7 event types
  27 *
  28 * Note: An implementation may not be able to count all of these events
  29 * but the encodings are considered to be `reserved' in the case that
  30 * they are not available.
  31 */
  32enum armv7_perf_types {
  33	ARMV7_PERFCTR_PMNC_SW_INCR			= 0x00,
  34	ARMV7_PERFCTR_L1_ICACHE_REFILL			= 0x01,
  35	ARMV7_PERFCTR_ITLB_REFILL			= 0x02,
  36	ARMV7_PERFCTR_L1_DCACHE_REFILL			= 0x03,
  37	ARMV7_PERFCTR_L1_DCACHE_ACCESS			= 0x04,
  38	ARMV7_PERFCTR_DTLB_REFILL			= 0x05,
  39	ARMV7_PERFCTR_MEM_READ				= 0x06,
  40	ARMV7_PERFCTR_MEM_WRITE				= 0x07,
  41	ARMV7_PERFCTR_INSTR_EXECUTED			= 0x08,
  42	ARMV7_PERFCTR_EXC_TAKEN				= 0x09,
  43	ARMV7_PERFCTR_EXC_EXECUTED			= 0x0A,
  44	ARMV7_PERFCTR_CID_WRITE				= 0x0B,
  45
  46	/*
  47	 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  48	 * It counts:
  49	 *  - all (taken) branch instructions,
  50	 *  - instructions that explicitly write the PC,
  51	 *  - exception generating instructions.
  52	 */
  53	ARMV7_PERFCTR_PC_WRITE				= 0x0C,
  54	ARMV7_PERFCTR_PC_IMM_BRANCH			= 0x0D,
  55	ARMV7_PERFCTR_PC_PROC_RETURN			= 0x0E,
  56	ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		= 0x0F,
  57	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		= 0x10,
  58	ARMV7_PERFCTR_CLOCK_CYCLES			= 0x11,
  59	ARMV7_PERFCTR_PC_BRANCH_PRED			= 0x12,
  60
  61	/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  62	ARMV7_PERFCTR_MEM_ACCESS			= 0x13,
  63	ARMV7_PERFCTR_L1_ICACHE_ACCESS			= 0x14,
  64	ARMV7_PERFCTR_L1_DCACHE_WB			= 0x15,
  65	ARMV7_PERFCTR_L2_CACHE_ACCESS			= 0x16,
  66	ARMV7_PERFCTR_L2_CACHE_REFILL			= 0x17,
  67	ARMV7_PERFCTR_L2_CACHE_WB			= 0x18,
  68	ARMV7_PERFCTR_BUS_ACCESS			= 0x19,
  69	ARMV7_PERFCTR_MEM_ERROR				= 0x1A,
  70	ARMV7_PERFCTR_INSTR_SPEC			= 0x1B,
  71	ARMV7_PERFCTR_TTBR_WRITE			= 0x1C,
  72	ARMV7_PERFCTR_BUS_CYCLES			= 0x1D,
  73
  74	ARMV7_PERFCTR_CPU_CYCLES			= 0xFF
  75};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76
  77/* ARMv7 Cortex-A8 specific event types */
  78enum armv7_a8_perf_types {
  79	ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		= 0x43,
  80	ARMV7_A8_PERFCTR_L2_CACHE_REFILL		= 0x44,
  81	ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		= 0x50,
  82	ARMV7_A8_PERFCTR_STALL_ISIDE			= 0x56,
  83};
  84
  85/* ARMv7 Cortex-A9 specific event types */
  86enum armv7_a9_perf_types {
  87	ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		= 0x68,
  88	ARMV7_A9_PERFCTR_STALL_ICACHE			= 0x60,
  89	ARMV7_A9_PERFCTR_STALL_DISPATCH			= 0x66,
  90};
  91
  92/* ARMv7 Cortex-A5 specific event types */
  93enum armv7_a5_perf_types {
  94	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		= 0xc2,
  95	ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		= 0xc3,
  96};
  97
  98/* ARMv7 Cortex-A15 specific event types */
  99enum armv7_a15_perf_types {
 100	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
 101	ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
 102	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		= 0x42,
 103	ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	= 0x43,
 104
 105	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		= 0x4C,
 106	ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		= 0x4D,
 107
 108	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
 109	ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
 110	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		= 0x52,
 111	ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		= 0x53,
 112
 113	ARMV7_A15_PERFCTR_PC_WRITE_SPEC			= 0x76,
 114};
 115
 116/* ARMv7 Cortex-A12 specific event types */
 117enum armv7_a12_perf_types {
 118	ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		= 0x40,
 119	ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	= 0x41,
 120
 121	ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		= 0x50,
 122	ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		= 0x51,
 123
 124	ARMV7_A12_PERFCTR_PC_WRITE_SPEC			= 0x76,
 125
 126	ARMV7_A12_PERFCTR_PF_TLB_REFILL			= 0xe7,
 127};
 128
 129/* ARMv7 Krait specific event types */
 130enum krait_perf_types {
 131	KRAIT_PMRESR0_GROUP0				= 0xcc,
 132	KRAIT_PMRESR1_GROUP0				= 0xd0,
 133	KRAIT_PMRESR2_GROUP0				= 0xd4,
 134	KRAIT_VPMRESR0_GROUP0				= 0xd8,
 
 
 
 
 
 
 
 
 
 
 
 
 135
 136	KRAIT_PERFCTR_L1_ICACHE_ACCESS			= 0x10011,
 137	KRAIT_PERFCTR_L1_ICACHE_MISS			= 0x10010,
 138
 139	KRAIT_PERFCTR_L1_ITLB_ACCESS			= 0x12222,
 140	KRAIT_PERFCTR_L1_DTLB_ACCESS			= 0x12210,
 141};
 
 142
 143/*
 144 * Cortex-A8 HW events mapping
 145 *
 146 * The hardware events that we support. We do support cache operations but
 147 * we have harvard caches and no way to combine instruction and data
 148 * accesses/misses in hardware.
 149 */
 150static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 
 151	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 152	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 153	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 154	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 155	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 156	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 157	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 158	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
 159	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 160};
 161
 162static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 163					  [PERF_COUNT_HW_CACHE_OP_MAX]
 164					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 165	[C(L1D)] = {
 166		/*
 167		 * The performance counters don't differentiate between read
 168		 * and write accesses/misses so this isn't strictly correct,
 169		 * but it's the best we can do. Writes and reads get
 170		 * combined.
 171		 */
 172		[C(OP_READ)] = {
 173			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 174			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 175		},
 176		[C(OP_WRITE)] = {
 177			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 178			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 179		},
 180		[C(OP_PREFETCH)] = {
 181			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 182			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 183		},
 184	},
 185	[C(L1I)] = {
 186		[C(OP_READ)] = {
 187			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 188			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 189		},
 190		[C(OP_WRITE)] = {
 191			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 192			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 193		},
 194		[C(OP_PREFETCH)] = {
 195			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 196			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 197		},
 198	},
 199	[C(LL)] = {
 200		[C(OP_READ)] = {
 201			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 202			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 203		},
 204		[C(OP_WRITE)] = {
 205			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 206			[C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 207		},
 208		[C(OP_PREFETCH)] = {
 209			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 210			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 211		},
 212	},
 213	[C(DTLB)] = {
 214		[C(OP_READ)] = {
 215			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 216			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 217		},
 218		[C(OP_WRITE)] = {
 219			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 220			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 221		},
 222		[C(OP_PREFETCH)] = {
 223			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 224			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 225		},
 226	},
 227	[C(ITLB)] = {
 228		[C(OP_READ)] = {
 229			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 230			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 231		},
 232		[C(OP_WRITE)] = {
 233			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 234			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 235		},
 236		[C(OP_PREFETCH)] = {
 237			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 238			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 239		},
 240	},
 241	[C(BPU)] = {
 242		[C(OP_READ)] = {
 243			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 244			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 245		},
 246		[C(OP_WRITE)] = {
 247			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 248			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 249		},
 250		[C(OP_PREFETCH)] = {
 251			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 252			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 253		},
 254	},
 255	[C(NODE)] = {
 256		[C(OP_READ)] = {
 257			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 258			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 259		},
 260		[C(OP_WRITE)] = {
 261			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 262			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 263		},
 264		[C(OP_PREFETCH)] = {
 265			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 266			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 267		},
 268	},
 269};
 270
 271/*
 272 * Cortex-A9 HW events mapping
 273 */
 274static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 
 275	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 276	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
 277	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 278	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 279	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 280	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 281	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 282	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
 283	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
 284};
 285
 286static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 287					  [PERF_COUNT_HW_CACHE_OP_MAX]
 288					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 289	[C(L1D)] = {
 290		/*
 291		 * The performance counters don't differentiate between read
 292		 * and write accesses/misses so this isn't strictly correct,
 293		 * but it's the best we can do. Writes and reads get
 294		 * combined.
 295		 */
 296		[C(OP_READ)] = {
 297			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 298			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 299		},
 300		[C(OP_WRITE)] = {
 301			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 302			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 303		},
 304		[C(OP_PREFETCH)] = {
 305			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 306			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 307		},
 308	},
 309	[C(L1I)] = {
 310		[C(OP_READ)] = {
 311			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 312			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 313		},
 314		[C(OP_WRITE)] = {
 315			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 316			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 317		},
 318		[C(OP_PREFETCH)] = {
 319			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 320			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 321		},
 322	},
 323	[C(LL)] = {
 324		[C(OP_READ)] = {
 325			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 326			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 327		},
 328		[C(OP_WRITE)] = {
 329			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 330			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 331		},
 332		[C(OP_PREFETCH)] = {
 333			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 334			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 335		},
 336	},
 337	[C(DTLB)] = {
 338		[C(OP_READ)] = {
 339			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 340			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 341		},
 342		[C(OP_WRITE)] = {
 343			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 344			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 345		},
 346		[C(OP_PREFETCH)] = {
 347			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 348			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 349		},
 350	},
 351	[C(ITLB)] = {
 352		[C(OP_READ)] = {
 353			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 354			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 355		},
 356		[C(OP_WRITE)] = {
 357			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 358			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 359		},
 360		[C(OP_PREFETCH)] = {
 361			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 362			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 363		},
 364	},
 365	[C(BPU)] = {
 366		[C(OP_READ)] = {
 367			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 368			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 369		},
 370		[C(OP_WRITE)] = {
 371			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 372			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 373		},
 374		[C(OP_PREFETCH)] = {
 375			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 376			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 377		},
 378	},
 379	[C(NODE)] = {
 380		[C(OP_READ)] = {
 381			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 382			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 383		},
 384		[C(OP_WRITE)] = {
 385			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 386			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 387		},
 388		[C(OP_PREFETCH)] = {
 389			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 390			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 391		},
 392	},
 393};
 394
 395/*
 396 * Cortex-A5 HW events mapping
 397 */
 398static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 
 399	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 400	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 401	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 402	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 403	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 404	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 405	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
 406	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 407	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 408};
 409
 410static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 411					[PERF_COUNT_HW_CACHE_OP_MAX]
 412					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 413	[C(L1D)] = {
 414		[C(OP_READ)] = {
 415			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 416			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 417		},
 418		[C(OP_WRITE)] = {
 419			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 420			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 421		},
 422		[C(OP_PREFETCH)] = {
 423			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 424			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 425		},
 426	},
 427	[C(L1I)] = {
 428		[C(OP_READ)] = {
 429			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 430			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 431		},
 432		[C(OP_WRITE)] = {
 433			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 434			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 435		},
 436		/*
 437		 * The prefetch counters don't differentiate between the I
 438		 * side and the D side.
 439		 */
 440		[C(OP_PREFETCH)] = {
 441			[C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 442			[C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 443		},
 444	},
 445	[C(LL)] = {
 446		[C(OP_READ)] = {
 447			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 448			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 449		},
 450		[C(OP_WRITE)] = {
 451			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 452			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 453		},
 454		[C(OP_PREFETCH)] = {
 455			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 456			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 457		},
 458	},
 459	[C(DTLB)] = {
 460		[C(OP_READ)] = {
 461			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 462			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 463		},
 464		[C(OP_WRITE)] = {
 465			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 466			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 467		},
 468		[C(OP_PREFETCH)] = {
 469			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 470			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 471		},
 472	},
 473	[C(ITLB)] = {
 474		[C(OP_READ)] = {
 475			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 476			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 477		},
 478		[C(OP_WRITE)] = {
 479			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 480			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 481		},
 482		[C(OP_PREFETCH)] = {
 483			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 484			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 485		},
 486	},
 487	[C(BPU)] = {
 488		[C(OP_READ)] = {
 489			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 490			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 491		},
 492		[C(OP_WRITE)] = {
 493			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 494			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 495		},
 496		[C(OP_PREFETCH)] = {
 497			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 498			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 499		},
 500	},
 501	[C(NODE)] = {
 502		[C(OP_READ)] = {
 503			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 504			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 505		},
 506		[C(OP_WRITE)] = {
 507			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 508			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 509		},
 510		[C(OP_PREFETCH)] = {
 511			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 512			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 513		},
 514	},
 515};
 516
 517/*
 518 * Cortex-A15 HW events mapping
 519 */
 520static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 
 521	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 522	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 523	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 524	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 525	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
 526	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 527	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 528	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 529	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 530};
 531
 532static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 533					[PERF_COUNT_HW_CACHE_OP_MAX]
 534					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 535	[C(L1D)] = {
 536		[C(OP_READ)] = {
 537			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
 538			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 539		},
 540		[C(OP_WRITE)] = {
 541			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 542			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 543		},
 544		[C(OP_PREFETCH)] = {
 545			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 546			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 547		},
 548	},
 549	[C(L1I)] = {
 550		/*
 551		 * Not all performance counters differentiate between read
 552		 * and write accesses/misses so we're not always strictly
 553		 * correct, but it's the best we can do. Writes and reads get
 554		 * combined in these cases.
 555		 */
 556		[C(OP_READ)] = {
 557			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 558			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 559		},
 560		[C(OP_WRITE)] = {
 561			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 562			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 563		},
 564		[C(OP_PREFETCH)] = {
 565			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 566			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 567		},
 568	},
 569	[C(LL)] = {
 570		[C(OP_READ)] = {
 571			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
 572			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 573		},
 574		[C(OP_WRITE)] = {
 575			[C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
 576			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 577		},
 578		[C(OP_PREFETCH)] = {
 579			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 580			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 581		},
 582	},
 583	[C(DTLB)] = {
 584		[C(OP_READ)] = {
 585			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 586			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 587		},
 588		[C(OP_WRITE)] = {
 589			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 590			[C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 591		},
 592		[C(OP_PREFETCH)] = {
 593			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 594			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 595		},
 596	},
 597	[C(ITLB)] = {
 598		[C(OP_READ)] = {
 599			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 600			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 601		},
 602		[C(OP_WRITE)] = {
 603			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 604			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 605		},
 606		[C(OP_PREFETCH)] = {
 607			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 608			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 609		},
 610	},
 611	[C(BPU)] = {
 612		[C(OP_READ)] = {
 613			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 614			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 615		},
 616		[C(OP_WRITE)] = {
 617			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 618			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 619		},
 620		[C(OP_PREFETCH)] = {
 621			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 622			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 623		},
 624	},
 625	[C(NODE)] = {
 626		[C(OP_READ)] = {
 627			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 628			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 629		},
 630		[C(OP_WRITE)] = {
 631			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 632			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 633		},
 634		[C(OP_PREFETCH)] = {
 635			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 636			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 637		},
 638	},
 639};
 640
 641/*
 642 * Cortex-A7 HW events mapping
 643 */
 644static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
 
 645	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 646	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 647	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 648	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 649	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 650	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 651	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 652	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 653	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 654};
 655
 656static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 657					[PERF_COUNT_HW_CACHE_OP_MAX]
 658					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 659	[C(L1D)] = {
 660		/*
 661		 * The performance counters don't differentiate between read
 662		 * and write accesses/misses so this isn't strictly correct,
 663		 * but it's the best we can do. Writes and reads get
 664		 * combined.
 665		 */
 666		[C(OP_READ)] = {
 667			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 668			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 669		},
 670		[C(OP_WRITE)] = {
 671			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 672			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 673		},
 674		[C(OP_PREFETCH)] = {
 675			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 676			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 677		},
 678	},
 679	[C(L1I)] = {
 680		[C(OP_READ)] = {
 681			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 682			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 683		},
 684		[C(OP_WRITE)] = {
 685			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 686			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 687		},
 688		[C(OP_PREFETCH)] = {
 689			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 690			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 691		},
 692	},
 693	[C(LL)] = {
 694		[C(OP_READ)] = {
 695			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 696			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 697		},
 698		[C(OP_WRITE)] = {
 699			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 700			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 701		},
 702		[C(OP_PREFETCH)] = {
 703			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 704			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 705		},
 706	},
 707	[C(DTLB)] = {
 708		[C(OP_READ)] = {
 709			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 710			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 711		},
 712		[C(OP_WRITE)] = {
 713			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 714			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 715		},
 716		[C(OP_PREFETCH)] = {
 717			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 718			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 719		},
 720	},
 721	[C(ITLB)] = {
 722		[C(OP_READ)] = {
 723			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 724			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 725		},
 726		[C(OP_WRITE)] = {
 727			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 728			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 729		},
 730		[C(OP_PREFETCH)] = {
 731			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 732			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 733		},
 734	},
 735	[C(BPU)] = {
 736		[C(OP_READ)] = {
 737			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 738			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 739		},
 740		[C(OP_WRITE)] = {
 741			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 742			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 743		},
 744		[C(OP_PREFETCH)] = {
 745			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 746			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 747		},
 748	},
 749	[C(NODE)] = {
 750		[C(OP_READ)] = {
 751			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 752			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 753		},
 754		[C(OP_WRITE)] = {
 755			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 756			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 757		},
 758		[C(OP_PREFETCH)] = {
 759			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 760			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 761		},
 762	},
 763};
 764
 765/*
 766 * Cortex-A12 HW events mapping
 767 */
 768static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
 
 769	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 770	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 771	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 772	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 773	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
 774	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 775	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 776	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
 777	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 778};
 779
 780static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 781					[PERF_COUNT_HW_CACHE_OP_MAX]
 782					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 783	[C(L1D)] = {
 784		[C(OP_READ)] = {
 785			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
 786			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 787		},
 788		[C(OP_WRITE)] = {
 789			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 790			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 791		},
 792		[C(OP_PREFETCH)] = {
 793			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 794			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 795		},
 796	},
 797	[C(L1I)] = {
 798		/*
 799		 * Not all performance counters differentiate between read
 800		 * and write accesses/misses so we're not always strictly
 801		 * correct, but it's the best we can do. Writes and reads get
 802		 * combined in these cases.
 803		 */
 804		[C(OP_READ)] = {
 805			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 806			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 807		},
 808		[C(OP_WRITE)] = {
 809			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 810			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 811		},
 812		[C(OP_PREFETCH)] = {
 813			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 814			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 815		},
 816	},
 817	[C(LL)] = {
 818		[C(OP_READ)] = {
 819			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
 820			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 821		},
 822		[C(OP_WRITE)] = {
 823			[C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
 824			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 825		},
 826		[C(OP_PREFETCH)] = {
 827			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 828			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 829		},
 830	},
 831	[C(DTLB)] = {
 832		[C(OP_READ)] = {
 833			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 834			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 835		},
 836		[C(OP_WRITE)] = {
 837			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 838			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 839		},
 840		[C(OP_PREFETCH)] = {
 841			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 842			[C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
 843		},
 844	},
 845	[C(ITLB)] = {
 846		[C(OP_READ)] = {
 847			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 848			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 849		},
 850		[C(OP_WRITE)] = {
 851			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 852			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 853		},
 854		[C(OP_PREFETCH)] = {
 855			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 856			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 857		},
 858	},
 859	[C(BPU)] = {
 860		[C(OP_READ)] = {
 861			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 862			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 863		},
 864		[C(OP_WRITE)] = {
 865			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 866			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 867		},
 868		[C(OP_PREFETCH)] = {
 869			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 870			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 871		},
 872	},
 873	[C(NODE)] = {
 874		[C(OP_READ)] = {
 875			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 876			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 877		},
 878		[C(OP_WRITE)] = {
 879			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 880			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 881		},
 882		[C(OP_PREFETCH)] = {
 883			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 884			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 885		},
 886	},
 887};
 888
 889/*
 890 * Krait HW events mapping
 891 */
 892static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
 
 893	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 894	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 895	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 896	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 897	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 898	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 899	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 900};
 901
 902static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
 
 903	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 904	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 905	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
 906	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
 907	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
 908	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 909	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 910};
 911
 912static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 913					  [PERF_COUNT_HW_CACHE_OP_MAX]
 914					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 915	[C(L1D)] = {
 916		/*
 917		 * The performance counters don't differentiate between read
 918		 * and write accesses/misses so this isn't strictly correct,
 919		 * but it's the best we can do. Writes and reads get
 920		 * combined.
 921		 */
 922		[C(OP_READ)] = {
 923			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 924			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 925		},
 926		[C(OP_WRITE)] = {
 927			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 928			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 929		},
 930		[C(OP_PREFETCH)] = {
 931			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 932			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 933		},
 934	},
 935	[C(L1I)] = {
 936		[C(OP_READ)] = {
 937			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
 938			[C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
 939		},
 940		[C(OP_WRITE)] = {
 941			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 942			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 943		},
 944		[C(OP_PREFETCH)] = {
 945			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 946			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 947		},
 948	},
 949	[C(LL)] = {
 950		[C(OP_READ)] = {
 951			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 952			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 953		},
 954		[C(OP_WRITE)] = {
 955			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 956			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 957		},
 958		[C(OP_PREFETCH)] = {
 959			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 960			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 961		},
 962	},
 963	[C(DTLB)] = {
 964		[C(OP_READ)] = {
 965			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 966			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 967		},
 968		[C(OP_WRITE)] = {
 969			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 970			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 971		},
 972		[C(OP_PREFETCH)] = {
 973			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 974			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 975		},
 976	},
 977	[C(ITLB)] = {
 978		[C(OP_READ)] = {
 979			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 980			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 981		},
 982		[C(OP_WRITE)] = {
 983			[C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 984			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 985		},
 986		[C(OP_PREFETCH)] = {
 987			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
 988			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
 989		},
 990	},
 991	[C(BPU)] = {
 992		[C(OP_READ)] = {
 993			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 994			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 995		},
 996		[C(OP_WRITE)] = {
 997			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 998			[C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 999		},
1000		[C(OP_PREFETCH)] = {
1001			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1002			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1003		},
1004	},
1005	[C(NODE)] = {
1006		[C(OP_READ)] = {
1007			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1008			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1009		},
1010		[C(OP_WRITE)] = {
1011			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1012			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1013		},
1014		[C(OP_PREFETCH)] = {
1015			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
1016			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
1017		},
1018	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1019};
1020
1021/*
1022 * Perf Events' indices
1023 */
1024#define	ARMV7_IDX_CYCLE_COUNTER	0
1025#define	ARMV7_IDX_COUNTER0	1
1026#define	ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
1027	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
1028
1029#define	ARMV7_MAX_COUNTERS	32
1030#define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
1031
1032/*
1033 * ARMv7 low level PMNC access
1034 */
1035
1036/*
1037 * Perf Event to low level counters mapping
1038 */
1039#define	ARMV7_IDX_TO_COUNTER(x)	\
1040	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
1041
1042/*
1043 * Per-CPU PMNC: config reg
1044 */
1045#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
1046#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
1047#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
1048#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
1049#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
1050#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
1051#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
1052#define	ARMV7_PMNC_N_MASK	0x1f
1053#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
1054
1055/*
1056 * FLAG: counters overflow flag status reg
1057 */
1058#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
1059#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
1060
1061/*
1062 * PMXEVTYPER: Event selection reg
1063 */
1064#define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
1065#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
1066
1067/*
1068 * Event filters for PMUv2
1069 */
1070#define	ARMV7_EXCLUDE_PL1	(1 << 31)
1071#define	ARMV7_EXCLUDE_USER	(1 << 30)
1072#define	ARMV7_INCLUDE_HYP	(1 << 27)
 
 
 
 
 
1073
1074static inline u32 armv7_pmnc_read(void)
1075{
1076	u32 val;
1077	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
1078	return val;
1079}
1080
1081static inline void armv7_pmnc_write(u32 val)
1082{
1083	val &= ARMV7_PMNC_MASK;
1084	isb();
1085	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
1086}
1087
1088static inline int armv7_pmnc_has_overflowed(u32 pmnc)
1089{
1090	return pmnc & ARMV7_OVERFLOWED_MASK;
1091}
1092
1093static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
1094{
1095	return idx >= ARMV7_IDX_CYCLE_COUNTER &&
1096		idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
1097}
1098
1099static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
1100{
1101	return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
1102}
1103
1104static inline int armv7_pmnc_select_counter(int idx)
1105{
1106	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1107	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
1108	isb();
1109
1110	return idx;
1111}
1112
1113static inline u32 armv7pmu_read_counter(struct perf_event *event)
1114{
1115	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1116	struct hw_perf_event *hwc = &event->hw;
1117	int idx = hwc->idx;
1118	u32 value = 0;
1119
1120	if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
1121		pr_err("CPU%u reading wrong counter %d\n",
1122			smp_processor_id(), idx);
1123	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
1124		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
1125	else if (armv7_pmnc_select_counter(idx) == idx)
 
1126		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
 
1127
1128	return value;
1129}
1130
1131static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
1132{
1133	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1134	struct hw_perf_event *hwc = &event->hw;
1135	int idx = hwc->idx;
1136
1137	if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
1138		pr_err("CPU%u writing wrong counter %d\n",
1139			smp_processor_id(), idx);
1140	else if (idx == ARMV7_IDX_CYCLE_COUNTER)
1141		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
1142	else if (armv7_pmnc_select_counter(idx) == idx)
 
1143		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
 
1144}
1145
1146static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
1147{
1148	if (armv7_pmnc_select_counter(idx) == idx) {
1149		val &= ARMV7_EVTYPE_MASK;
1150		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
1151	}
1152}
1153
1154static inline int armv7_pmnc_enable_counter(int idx)
1155{
1156	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1157	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
1158	return idx;
1159}
1160
1161static inline int armv7_pmnc_disable_counter(int idx)
1162{
1163	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1164	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
1165	return idx;
1166}
1167
1168static inline int armv7_pmnc_enable_intens(int idx)
1169{
1170	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1171	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
1172	return idx;
1173}
1174
1175static inline int armv7_pmnc_disable_intens(int idx)
1176{
1177	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
1178	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
1179	isb();
1180	/* Clear the overflow flag in case an interrupt is pending. */
1181	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
1182	isb();
1183
1184	return idx;
1185}
1186
1187static inline u32 armv7_pmnc_getreset_flags(void)
1188{
1189	u32 val;
1190
1191	/* Read */
1192	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1193
1194	/* Write to clear flags */
1195	val &= ARMV7_FLAG_MASK;
1196	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
1197
1198	return val;
1199}
1200
1201#ifdef DEBUG
1202static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
1203{
1204	u32 val;
1205	unsigned int cnt;
1206
1207	printk(KERN_INFO "PMNC registers dump:\n");
1208
1209	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
1210	printk(KERN_INFO "PMNC  =0x%08x\n", val);
1211
1212	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
1213	printk(KERN_INFO "CNTENS=0x%08x\n", val);
1214
1215	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
1216	printk(KERN_INFO "INTENS=0x%08x\n", val);
1217
1218	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1219	printk(KERN_INFO "FLAGS =0x%08x\n", val);
1220
1221	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
1222	printk(KERN_INFO "SELECT=0x%08x\n", val);
1223
1224	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
1225	printk(KERN_INFO "CCNT  =0x%08x\n", val);
1226
1227	for (cnt = ARMV7_IDX_COUNTER0;
1228			cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
1229		armv7_pmnc_select_counter(cnt);
1230		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
1231		printk(KERN_INFO "CNT[%d] count =0x%08x\n",
1232			ARMV7_IDX_TO_COUNTER(cnt), val);
1233		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
1234		printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
1235			ARMV7_IDX_TO_COUNTER(cnt), val);
1236	}
1237}
1238#endif
1239
1240static void armv7pmu_enable_event(struct perf_event *event)
1241{
1242	unsigned long flags;
1243	struct hw_perf_event *hwc = &event->hw;
1244	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1245	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1246	int idx = hwc->idx;
1247
1248	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1249		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
1250			smp_processor_id(), idx);
1251		return;
1252	}
1253
1254	/*
1255	 * Enable counter and interrupt, and set the counter to count
1256	 * the event that we're interested in.
1257	 */
1258	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1259
1260	/*
1261	 * Disable counter
1262	 */
1263	armv7_pmnc_disable_counter(idx);
1264
1265	/*
1266	 * Set event (if destined for PMNx counters)
1267	 * We only need to set the event for the cycle counter if we
1268	 * have the ability to perform event filtering.
1269	 */
1270	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1271		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1272
1273	/*
1274	 * Enable interrupt for this counter
1275	 */
1276	armv7_pmnc_enable_intens(idx);
1277
1278	/*
1279	 * Enable counter
1280	 */
1281	armv7_pmnc_enable_counter(idx);
1282
1283	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1284}
1285
1286static void armv7pmu_disable_event(struct perf_event *event)
1287{
1288	unsigned long flags;
1289	struct hw_perf_event *hwc = &event->hw;
1290	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1291	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1292	int idx = hwc->idx;
1293
1294	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1295		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
1296			smp_processor_id(), idx);
1297		return;
1298	}
1299
1300	/*
1301	 * Disable counter and interrupt
1302	 */
1303	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1304
1305	/*
1306	 * Disable counter
1307	 */
1308	armv7_pmnc_disable_counter(idx);
1309
1310	/*
1311	 * Disable interrupt for this counter
1312	 */
1313	armv7_pmnc_disable_intens(idx);
1314
1315	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1316}
1317
1318static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1319{
1320	u32 pmnc;
1321	struct perf_sample_data data;
1322	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
1323	struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
1324	struct pt_regs *regs;
1325	int idx;
1326
1327	/*
1328	 * Get and reset the IRQ flags
1329	 */
1330	pmnc = armv7_pmnc_getreset_flags();
1331
1332	/*
1333	 * Did an overflow occur?
1334	 */
1335	if (!armv7_pmnc_has_overflowed(pmnc))
1336		return IRQ_NONE;
1337
1338	/*
1339	 * Handle the counter(s) overflow(s)
1340	 */
1341	regs = get_irq_regs();
1342
1343	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1344		struct perf_event *event = cpuc->events[idx];
1345		struct hw_perf_event *hwc;
1346
1347		/* Ignore if we don't have an event. */
1348		if (!event)
1349			continue;
1350
1351		/*
1352		 * We have a single interrupt for all counters. Check that
1353		 * each counter has overflowed before we process it.
1354		 */
1355		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1356			continue;
1357
1358		hwc = &event->hw;
1359		armpmu_event_update(event);
1360		perf_sample_data_init(&data, 0, hwc->last_period);
1361		if (!armpmu_event_set_period(event))
1362			continue;
1363
1364		if (perf_event_overflow(event, &data, regs))
1365			cpu_pmu->disable(event);
1366	}
1367
1368	/*
1369	 * Handle the pending perf events.
1370	 *
1371	 * Note: this call *must* be run with interrupts disabled. For
1372	 * platforms that can have the PMU interrupts raised as an NMI, this
1373	 * will not work.
1374	 */
1375	irq_work_run();
1376
1377	return IRQ_HANDLED;
1378}
1379
1380static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1381{
1382	unsigned long flags;
1383	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1384
1385	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1386	/* Enable all counters */
1387	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1388	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1389}
1390
1391static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1392{
1393	unsigned long flags;
1394	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1395
1396	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1397	/* Disable all counters */
1398	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1399	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1400}
1401
1402static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1403				  struct perf_event *event)
1404{
1405	int idx;
1406	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1407	struct hw_perf_event *hwc = &event->hw;
1408	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1409
1410	/* Always place a cycle counter into the cycle counter. */
1411	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1412		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1413			return -EAGAIN;
1414
1415		return ARMV7_IDX_CYCLE_COUNTER;
1416	}
1417
1418	/*
1419	 * For anything other than a cycle counter, try and use
1420	 * the events counters
1421	 */
1422	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1423		if (!test_and_set_bit(idx, cpuc->used_mask))
1424			return idx;
1425	}
1426
1427	/* The counters are all in use. */
1428	return -EAGAIN;
1429}
1430
 
 
 
 
 
 
1431/*
1432 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1433 */
1434static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1435				     struct perf_event_attr *attr)
1436{
1437	unsigned long config_base = 0;
1438
1439	if (attr->exclude_idle)
1440		return -EPERM;
1441	if (attr->exclude_user)
1442		config_base |= ARMV7_EXCLUDE_USER;
1443	if (attr->exclude_kernel)
1444		config_base |= ARMV7_EXCLUDE_PL1;
1445	if (!attr->exclude_hv)
1446		config_base |= ARMV7_INCLUDE_HYP;
1447
1448	/*
1449	 * Install the filter into config_base as this is used to
1450	 * construct the event type.
1451	 */
1452	event->config_base = config_base;
1453
1454	return 0;
1455}
1456
1457static void armv7pmu_reset(void *info)
1458{
1459	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1460	u32 idx, nb_cnt = cpu_pmu->num_events;
 
 
 
 
 
 
1461
1462	/* The counter and interrupt enable registers are unknown at reset. */
1463	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1464		armv7_pmnc_disable_counter(idx);
1465		armv7_pmnc_disable_intens(idx);
1466	}
1467
1468	/* Initialize & Reset PMNC: C and P bits */
1469	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1470}
1471
1472static int armv7_a8_map_event(struct perf_event *event)
1473{
1474	return armpmu_map_event(event, &armv7_a8_perf_map,
1475				&armv7_a8_perf_cache_map, 0xFF);
1476}
1477
1478static int armv7_a9_map_event(struct perf_event *event)
1479{
1480	return armpmu_map_event(event, &armv7_a9_perf_map,
1481				&armv7_a9_perf_cache_map, 0xFF);
1482}
1483
1484static int armv7_a5_map_event(struct perf_event *event)
1485{
1486	return armpmu_map_event(event, &armv7_a5_perf_map,
1487				&armv7_a5_perf_cache_map, 0xFF);
1488}
1489
1490static int armv7_a15_map_event(struct perf_event *event)
1491{
1492	return armpmu_map_event(event, &armv7_a15_perf_map,
1493				&armv7_a15_perf_cache_map, 0xFF);
1494}
1495
1496static int armv7_a7_map_event(struct perf_event *event)
1497{
1498	return armpmu_map_event(event, &armv7_a7_perf_map,
1499				&armv7_a7_perf_cache_map, 0xFF);
1500}
1501
1502static int armv7_a12_map_event(struct perf_event *event)
1503{
1504	return armpmu_map_event(event, &armv7_a12_perf_map,
1505				&armv7_a12_perf_cache_map, 0xFF);
1506}
1507
1508static int krait_map_event(struct perf_event *event)
1509{
1510	return armpmu_map_event(event, &krait_perf_map,
1511				&krait_perf_cache_map, 0xFFFFF);
1512}
1513
1514static int krait_map_event_no_branch(struct perf_event *event)
1515{
1516	return armpmu_map_event(event, &krait_perf_map_no_branch,
1517				&krait_perf_cache_map, 0xFFFFF);
1518}
1519
 
 
 
 
 
 
1520static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1521{
1522	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1523	cpu_pmu->enable		= armv7pmu_enable_event;
1524	cpu_pmu->disable	= armv7pmu_disable_event;
1525	cpu_pmu->read_counter	= armv7pmu_read_counter;
1526	cpu_pmu->write_counter	= armv7pmu_write_counter;
1527	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
 
1528	cpu_pmu->start		= armv7pmu_start;
1529	cpu_pmu->stop		= armv7pmu_stop;
1530	cpu_pmu->reset		= armv7pmu_reset;
1531	cpu_pmu->max_period	= (1LLU << 32) - 1;
1532};
1533
1534static u32 armv7_read_num_pmnc_events(void)
1535{
1536	u32 nb_cnt;
1537
1538	/* Read the nb of CNTx counters supported from PMNC */
1539	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
 
 
 
 
1540
1541	/* Add the CPU cycles counter and return */
1542	return nb_cnt + 1;
 
 
 
1543}
1544
1545static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1546{
1547	armv7pmu_init(cpu_pmu);
1548	cpu_pmu->name		= "ARMv7 Cortex-A8";
1549	cpu_pmu->map_event	= armv7_a8_map_event;
1550	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1551	return 0;
 
 
 
1552}
1553
1554static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1555{
1556	armv7pmu_init(cpu_pmu);
1557	cpu_pmu->name		= "ARMv7 Cortex-A9";
1558	cpu_pmu->map_event	= armv7_a9_map_event;
1559	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1560	return 0;
 
 
 
1561}
1562
1563static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1564{
1565	armv7pmu_init(cpu_pmu);
1566	cpu_pmu->name		= "ARMv7 Cortex-A5";
1567	cpu_pmu->map_event	= armv7_a5_map_event;
1568	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1569	return 0;
 
 
 
1570}
1571
1572static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1573{
1574	armv7pmu_init(cpu_pmu);
1575	cpu_pmu->name		= "ARMv7 Cortex-A15";
1576	cpu_pmu->map_event	= armv7_a15_map_event;
1577	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1578	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1579	return 0;
 
 
 
 
1580}
1581
1582static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1583{
1584	armv7pmu_init(cpu_pmu);
1585	cpu_pmu->name		= "ARMv7 Cortex-A7";
1586	cpu_pmu->map_event	= armv7_a7_map_event;
1587	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1588	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1589	return 0;
 
 
 
 
1590}
1591
1592static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1593{
1594	armv7pmu_init(cpu_pmu);
1595	cpu_pmu->name		= "ARMv7 Cortex-A12";
1596	cpu_pmu->map_event	= armv7_a12_map_event;
1597	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1598	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1599	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1600}
1601
1602/*
1603 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1604 *
1605 *            31   30     24     16     8      0
1606 *            +--------------------------------+
1607 *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1608 *            +--------------------------------+
1609 *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1610 *            +--------------------------------+
1611 *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1612 *            +--------------------------------+
1613 *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1614 *            +--------------------------------+
1615 *              EN | G=3  | G=2  | G=1  | G=0
1616 *
1617 *  Event Encoding:
1618 *
1619 *      hwc->config_base = 0xNRCCG
1620 *
1621 *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1622 *      R  = region register
1623 *      CC = class of events the group G is choosing from
1624 *      G  = group or particular event
1625 *
1626 *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1627 *
1628 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1629 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1630 *  events (interrupts for example). An event code is broken down into
1631 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1632 *  example).
1633 */
1634
1635#define KRAIT_EVENT		(1 << 16)
1636#define VENUM_EVENT		(2 << 16)
1637#define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1638#define PMRESRn_EN		BIT(31)
1639
 
 
 
 
 
 
1640static u32 krait_read_pmresrn(int n)
1641{
1642	u32 val;
1643
1644	switch (n) {
1645	case 0:
1646		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1647		break;
1648	case 1:
1649		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1650		break;
1651	case 2:
1652		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1653		break;
1654	default:
1655		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1656	}
1657
1658	return val;
1659}
1660
1661static void krait_write_pmresrn(int n, u32 val)
1662{
1663	switch (n) {
1664	case 0:
1665		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1666		break;
1667	case 1:
1668		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1669		break;
1670	case 2:
1671		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1672		break;
1673	default:
1674		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1675	}
1676}
1677
1678static u32 krait_read_vpmresr0(void)
1679{
1680	u32 val;
1681	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1682	return val;
1683}
1684
1685static void krait_write_vpmresr0(u32 val)
1686{
1687	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1688}
1689
1690static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val)
1691{
1692	u32 venum_new_val;
1693	u32 fp_new_val;
1694
1695	BUG_ON(preemptible());
1696	/* CPACR Enable CP10 and CP11 access */
1697	*venum_orig_val = get_copro_access();
1698	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1699	set_copro_access(venum_new_val);
1700
1701	/* Enable FPEXC */
1702	*fp_orig_val = fmrx(FPEXC);
1703	fp_new_val = *fp_orig_val | FPEXC_EN;
1704	fmxr(FPEXC, fp_new_val);
1705}
1706
1707static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val)
1708{
1709	BUG_ON(preemptible());
1710	/* Restore FPEXC */
1711	fmxr(FPEXC, fp_orig_val);
1712	isb();
1713	/* Restore CPACR */
1714	set_copro_access(venum_orig_val);
1715}
1716
1717static u32 krait_get_pmresrn_event(unsigned int region)
1718{
1719	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1720					     KRAIT_PMRESR1_GROUP0,
1721					     KRAIT_PMRESR2_GROUP0 };
1722	return pmresrn_table[region];
1723}
1724
1725static void krait_evt_setup(int idx, u32 config_base)
1726{
1727	u32 val;
1728	u32 mask;
1729	u32 vval, fval;
1730	unsigned int region;
1731	unsigned int group;
1732	unsigned int code;
1733	unsigned int group_shift;
1734	bool venum_event;
1735
1736	venum_event = !!(config_base & VENUM_EVENT);
1737	region = (config_base >> 12) & 0xf;
1738	code   = (config_base >> 4) & 0xff;
1739	group  = (config_base >> 0)  & 0xf;
1740
1741	group_shift = group * 8;
1742	mask = 0xff << group_shift;
1743
1744	/* Configure evtsel for the region and group */
1745	if (venum_event)
1746		val = KRAIT_VPMRESR0_GROUP0;
1747	else
1748		val = krait_get_pmresrn_event(region);
1749	val += group;
1750	/* Mix in mode-exclusion bits */
1751	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1752	armv7_pmnc_write_evtsel(idx, val);
1753
1754	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1755
1756	if (venum_event) {
1757		krait_pre_vpmresr0(&vval, &fval);
1758		val = krait_read_vpmresr0();
1759		val &= ~mask;
1760		val |= code << group_shift;
1761		val |= PMRESRn_EN;
1762		krait_write_vpmresr0(val);
1763		krait_post_vpmresr0(vval, fval);
1764	} else {
1765		val = krait_read_pmresrn(region);
1766		val &= ~mask;
1767		val |= code << group_shift;
1768		val |= PMRESRn_EN;
1769		krait_write_pmresrn(region, val);
1770	}
1771}
1772
1773static u32 krait_clear_pmresrn_group(u32 val, int group)
1774{
1775	u32 mask;
1776	int group_shift;
1777
1778	group_shift = group * 8;
1779	mask = 0xff << group_shift;
1780	val &= ~mask;
1781
1782	/* Don't clear enable bit if entire region isn't disabled */
1783	if (val & ~PMRESRn_EN)
1784		return val |= PMRESRn_EN;
1785
1786	return 0;
1787}
1788
1789static void krait_clearpmu(u32 config_base)
1790{
1791	u32 val;
1792	u32 vval, fval;
1793	unsigned int region;
1794	unsigned int group;
1795	bool venum_event;
1796
1797	venum_event = !!(config_base & VENUM_EVENT);
1798	region = (config_base >> 12) & 0xf;
1799	group  = (config_base >> 0)  & 0xf;
1800
1801	if (venum_event) {
1802		krait_pre_vpmresr0(&vval, &fval);
1803		val = krait_read_vpmresr0();
1804		val = krait_clear_pmresrn_group(val, group);
1805		krait_write_vpmresr0(val);
1806		krait_post_vpmresr0(vval, fval);
1807	} else {
1808		val = krait_read_pmresrn(region);
1809		val = krait_clear_pmresrn_group(val, group);
1810		krait_write_pmresrn(region, val);
1811	}
1812}
1813
1814static void krait_pmu_disable_event(struct perf_event *event)
1815{
1816	unsigned long flags;
1817	struct hw_perf_event *hwc = &event->hw;
1818	int idx = hwc->idx;
1819	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
 
1820
1821	/* Disable counter and interrupt */
1822	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1823
1824	/* Disable counter */
1825	armv7_pmnc_disable_counter(idx);
1826
1827	/*
1828	 * Clear pmresr code (if destined for PMNx counters)
1829	 */
1830	if (hwc->config_base & KRAIT_EVENT_MASK)
1831		krait_clearpmu(hwc->config_base);
1832
1833	/* Disable interrupt for this counter */
1834	armv7_pmnc_disable_intens(idx);
1835
1836	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1837}
1838
1839static void krait_pmu_enable_event(struct perf_event *event)
1840{
1841	unsigned long flags;
1842	struct hw_perf_event *hwc = &event->hw;
1843	int idx = hwc->idx;
1844	struct pmu_hw_events *events = cpu_pmu->get_hw_events();
 
1845
1846	/*
1847	 * Enable counter and interrupt, and set the counter to count
1848	 * the event that we're interested in.
1849	 */
1850	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1851
1852	/* Disable counter */
1853	armv7_pmnc_disable_counter(idx);
1854
1855	/*
1856	 * Set event (if destined for PMNx counters)
1857	 * We set the event for the cycle counter because we
1858	 * have the ability to perform event filtering.
1859	 */
1860	if (hwc->config_base & KRAIT_EVENT_MASK)
1861		krait_evt_setup(idx, hwc->config_base);
1862	else
1863		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1864
1865	/* Enable interrupt for this counter */
1866	armv7_pmnc_enable_intens(idx);
1867
1868	/* Enable counter */
1869	armv7_pmnc_enable_counter(idx);
1870
1871	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1872}
1873
1874static void krait_pmu_reset(void *info)
1875{
1876	u32 vval, fval;
 
 
1877
1878	armv7pmu_reset(info);
1879
1880	/* Clear all pmresrs */
1881	krait_write_pmresrn(0, 0);
1882	krait_write_pmresrn(1, 0);
1883	krait_write_pmresrn(2, 0);
1884
1885	krait_pre_vpmresr0(&vval, &fval);
1886	krait_write_vpmresr0(0);
1887	krait_post_vpmresr0(vval, fval);
 
 
 
 
 
 
 
1888}
1889
1890static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1891			      unsigned int group)
1892{
1893	int bit;
1894	struct hw_perf_event *hwc = &event->hw;
1895	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1896
1897	if (hwc->config_base & VENUM_EVENT)
1898		bit = KRAIT_VPMRESR0_GROUP0;
1899	else
1900		bit = krait_get_pmresrn_event(region);
1901	bit -= krait_get_pmresrn_event(0);
1902	bit += group;
1903	/*
1904	 * Lower bits are reserved for use by the counters (see
1905	 * armv7pmu_get_event_idx() for more info)
1906	 */
1907	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1908
1909	return bit;
1910}
1911
1912/*
1913 * We check for column exclusion constraints here.
1914 * Two events cant use the same group within a pmresr register.
1915 */
1916static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1917				   struct perf_event *event)
1918{
1919	int idx;
1920	int bit;
1921	unsigned int prefix;
1922	unsigned int region;
1923	unsigned int code;
1924	unsigned int group;
1925	bool krait_event;
1926	struct hw_perf_event *hwc = &event->hw;
 
 
 
 
 
1927
1928	region = (hwc->config_base >> 12) & 0xf;
1929	code   = (hwc->config_base >> 4) & 0xff;
1930	group  = (hwc->config_base >> 0) & 0xf;
1931	krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1932
1933	if (krait_event) {
1934		/* Ignore invalid events */
1935		if (group > 3 || region > 2)
1936			return -EINVAL;
1937		prefix = hwc->config_base & KRAIT_EVENT_MASK;
1938		if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT)
1939			return -EINVAL;
1940		if (prefix == VENUM_EVENT && (code & 0xe0))
1941			return -EINVAL;
1942
1943		bit = krait_event_to_bit(event, region, group);
1944		if (test_and_set_bit(bit, cpuc->used_mask))
1945			return -EAGAIN;
1946	}
1947
1948	idx = armv7pmu_get_event_idx(cpuc, event);
1949	if (idx < 0 && krait_event)
1950		clear_bit(bit, cpuc->used_mask);
1951
1952	return idx;
1953}
1954
1955static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1956				      struct perf_event *event)
1957{
1958	int bit;
1959	struct hw_perf_event *hwc = &event->hw;
1960	unsigned int region;
1961	unsigned int group;
1962	bool krait_event;
1963
1964	region = (hwc->config_base >> 12) & 0xf;
1965	group  = (hwc->config_base >> 0) & 0xf;
1966	krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK);
1967
1968	if (krait_event) {
 
1969		bit = krait_event_to_bit(event, region, group);
1970		clear_bit(bit, cpuc->used_mask);
1971	}
1972}
1973
1974static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1975{
1976	armv7pmu_init(cpu_pmu);
1977	cpu_pmu->name		= "ARMv7 Krait";
1978	/* Some early versions of Krait don't support PC write events */
1979	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1980				  "qcom,no-pc-write"))
1981		cpu_pmu->map_event = krait_map_event_no_branch;
1982	else
1983		cpu_pmu->map_event = krait_map_event;
1984	cpu_pmu->num_events	= armv7_read_num_pmnc_events();
1985	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1986	cpu_pmu->reset		= krait_pmu_reset;
1987	cpu_pmu->enable		= krait_pmu_enable_event;
1988	cpu_pmu->disable	= krait_pmu_disable_event;
1989	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1990	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1991	return 0;
1992}
1993#else
1994static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1995{
1996	return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1997}
1998
1999static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
2000{
2001	return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2002}
2003
2004static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
2005{
2006	return -ENODEV;
 
 
 
 
2007}
2008
2009static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
2010{
2011	return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2012}
2013
2014static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
2015{
2016	return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2017}
2018
2019static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
2020{
2021	return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2022}
2023
2024static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
2025{
2026	return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2027}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2028#endif	/* CONFIG_CPU_V7 */
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
   4 *
   5 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
   6 * 2010 (c) MontaVista Software, LLC.
   7 *
   8 * Copied from ARMv6 code, with the low level code inspired
   9 *  by the ARMv7 Oprofile code.
  10 *
  11 * Cortex-A8 has up to 4 configurable performance counters and
  12 *  a single cycle counter.
  13 * Cortex-A9 has up to 31 configurable performance counters and
  14 *  a single cycle counter.
  15 *
  16 * All counters can be enabled/disabled and IRQ masked separately. The cycle
  17 *  counter and all 4 performance counters together can be reset separately.
  18 */
  19
  20#ifdef CONFIG_CPU_V7
  21
  22#include <asm/cp15.h>
  23#include <asm/cputype.h>
  24#include <asm/irq_regs.h>
  25#include <asm/vfp.h>
  26#include "../vfp/vfpinstr.h"
  27
  28#include <linux/of.h>
  29#include <linux/perf/arm_pmu.h>
  30#include <linux/platform_device.h>
  31
  32/*
  33 * Common ARMv7 event types
  34 *
  35 * Note: An implementation may not be able to count all of these events
  36 * but the encodings are considered to be `reserved' in the case that
  37 * they are not available.
  38 */
  39#define ARMV7_PERFCTR_PMNC_SW_INCR			0x00
  40#define ARMV7_PERFCTR_L1_ICACHE_REFILL			0x01
  41#define ARMV7_PERFCTR_ITLB_REFILL			0x02
  42#define ARMV7_PERFCTR_L1_DCACHE_REFILL			0x03
  43#define ARMV7_PERFCTR_L1_DCACHE_ACCESS			0x04
  44#define ARMV7_PERFCTR_DTLB_REFILL			0x05
  45#define ARMV7_PERFCTR_MEM_READ				0x06
  46#define ARMV7_PERFCTR_MEM_WRITE				0x07
  47#define ARMV7_PERFCTR_INSTR_EXECUTED			0x08
  48#define ARMV7_PERFCTR_EXC_TAKEN				0x09
  49#define ARMV7_PERFCTR_EXC_EXECUTED			0x0A
  50#define ARMV7_PERFCTR_CID_WRITE				0x0B
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51
  52/*
  53 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
  54 * It counts:
  55 *  - all (taken) branch instructions,
  56 *  - instructions that explicitly write the PC,
  57 *  - exception generating instructions.
  58 */
  59#define ARMV7_PERFCTR_PC_WRITE				0x0C
  60#define ARMV7_PERFCTR_PC_IMM_BRANCH			0x0D
  61#define ARMV7_PERFCTR_PC_PROC_RETURN			0x0E
  62#define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS		0x0F
  63#define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED		0x10
  64#define ARMV7_PERFCTR_CLOCK_CYCLES			0x11
  65#define ARMV7_PERFCTR_PC_BRANCH_PRED			0x12
  66
  67/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
  68#define ARMV7_PERFCTR_MEM_ACCESS			0x13
  69#define ARMV7_PERFCTR_L1_ICACHE_ACCESS			0x14
  70#define ARMV7_PERFCTR_L1_DCACHE_WB			0x15
  71#define ARMV7_PERFCTR_L2_CACHE_ACCESS			0x16
  72#define ARMV7_PERFCTR_L2_CACHE_REFILL			0x17
  73#define ARMV7_PERFCTR_L2_CACHE_WB			0x18
  74#define ARMV7_PERFCTR_BUS_ACCESS			0x19
  75#define ARMV7_PERFCTR_MEM_ERROR				0x1A
  76#define ARMV7_PERFCTR_INSTR_SPEC			0x1B
  77#define ARMV7_PERFCTR_TTBR_WRITE			0x1C
  78#define ARMV7_PERFCTR_BUS_CYCLES			0x1D
  79
  80#define ARMV7_PERFCTR_CPU_CYCLES			0xFF
  81
  82/* ARMv7 Cortex-A8 specific event types */
  83#define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS		0x43
  84#define ARMV7_A8_PERFCTR_L2_CACHE_REFILL		0x44
  85#define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS		0x50
  86#define ARMV7_A8_PERFCTR_STALL_ISIDE			0x56
 
 
  87
  88/* ARMv7 Cortex-A9 specific event types */
  89#define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME		0x68
  90#define ARMV7_A9_PERFCTR_STALL_ICACHE			0x60
  91#define ARMV7_A9_PERFCTR_STALL_DISPATCH			0x66
 
 
  92
  93/* ARMv7 Cortex-A5 specific event types */
  94#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL		0xc2
  95#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP		0xc3
 
 
  96
  97/* ARMv7 Cortex-A15 specific event types */
  98#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
  99#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
 100#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ		0x42
 101#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE	0x43
 102
 103#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ		0x4C
 104#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE		0x4D
 105
 106#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ		0x50
 107#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
 108#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ		0x52
 109#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE		0x53
 
 110
 111#define ARMV7_A15_PERFCTR_PC_WRITE_SPEC			0x76
 
 112
 113/* ARMv7 Cortex-A12 specific event types */
 114#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ		0x40
 115#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE	0x41
 
 116
 117#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ		0x50
 118#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE		0x51
 119
 120#define ARMV7_A12_PERFCTR_PC_WRITE_SPEC			0x76
 121
 122#define ARMV7_A12_PERFCTR_PF_TLB_REFILL			0xe7
 
 123
 124/* ARMv7 Krait specific event types */
 125#define KRAIT_PMRESR0_GROUP0				0xcc
 126#define KRAIT_PMRESR1_GROUP0				0xd0
 127#define KRAIT_PMRESR2_GROUP0				0xd4
 128#define KRAIT_VPMRESR0_GROUP0				0xd8
 129
 130#define KRAIT_PERFCTR_L1_ICACHE_ACCESS			0x10011
 131#define KRAIT_PERFCTR_L1_ICACHE_MISS			0x10010
 132
 133#define KRAIT_PERFCTR_L1_ITLB_ACCESS			0x12222
 134#define KRAIT_PERFCTR_L1_DTLB_ACCESS			0x12210
 135
 136/* ARMv7 Scorpion specific event types */
 137#define SCORPION_LPM0_GROUP0				0x4c
 138#define SCORPION_LPM1_GROUP0				0x50
 139#define SCORPION_LPM2_GROUP0				0x54
 140#define SCORPION_L2LPM_GROUP0				0x58
 141#define SCORPION_VLPM_GROUP0				0x5c
 142
 143#define SCORPION_ICACHE_ACCESS				0x10053
 144#define SCORPION_ICACHE_MISS				0x10052
 145
 146#define SCORPION_DTLB_ACCESS				0x12013
 147#define SCORPION_DTLB_MISS				0x12012
 148
 149#define SCORPION_ITLB_MISS				0x12021
 150
 151/*
 152 * Cortex-A8 HW events mapping
 153 *
 154 * The hardware events that we support. We do support cache operations but
 155 * we have harvard caches and no way to combine instruction and data
 156 * accesses/misses in hardware.
 157 */
 158static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
 159	PERF_MAP_ALL_UNSUPPORTED,
 160	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 161	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 162	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 163	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 164	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 165	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 166	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A8_PERFCTR_STALL_ISIDE,
 
 167};
 168
 169static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 170					  [PERF_COUNT_HW_CACHE_OP_MAX]
 171					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 172	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 173
 174	/*
 175	 * The performance counters don't differentiate between read and write
 176	 * accesses/misses so this isn't strictly correct, but it's the best we
 177	 * can do. Writes and reads get combined.
 178	 */
 179	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 180	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 181	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 182	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 183
 184	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
 185	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 186
 187	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 188	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 189	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
 190	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
 191
 192	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 193	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 194
 195	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 196	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 197
 198	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 199	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 200	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 201	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 202};
 203
 204/*
 205 * Cortex-A9 HW events mapping
 206 */
 207static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
 208	PERF_MAP_ALL_UNSUPPORTED,
 209	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 210	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
 211	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 212	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 213	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 214	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 215	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= ARMV7_A9_PERFCTR_STALL_ICACHE,
 216	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= ARMV7_A9_PERFCTR_STALL_DISPATCH,
 217};
 218
 219static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 220					  [PERF_COUNT_HW_CACHE_OP_MAX]
 221					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 222	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 223
 224	/*
 225	 * The performance counters don't differentiate between read and write
 226	 * accesses/misses so this isn't strictly correct, but it's the best we
 227	 * can do. Writes and reads get combined.
 228	 */
 229	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 230	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 231	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 232	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 233
 234	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 235
 236	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 237	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 238
 239	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 240	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 241
 242	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 243	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 244	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 245	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 246};
 247
 248/*
 249 * Cortex-A5 HW events mapping
 250 */
 251static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
 252	PERF_MAP_ALL_UNSUPPORTED,
 253	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 254	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 255	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 256	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 257	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 258	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 259};
 260
 261static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 262					[PERF_COUNT_HW_CACHE_OP_MAX]
 263					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 264	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 265
 266	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 267	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 268	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 269	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 270	[C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 271	[C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 272
 273	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 274	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 275	/*
 276	 * The prefetch counters don't differentiate between the I side and the
 277	 * D side.
 278	 */
 279	[C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
 280	[C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
 281
 282	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 283	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 284
 285	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 286	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 287
 288	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 289	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 290	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 291	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 292};
 293
 294/*
 295 * Cortex-A15 HW events mapping
 296 */
 297static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
 298	PERF_MAP_ALL_UNSUPPORTED,
 299	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 300	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 301	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 302	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 303	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
 304	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 305	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 
 
 306};
 307
 308static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 309					[PERF_COUNT_HW_CACHE_OP_MAX]
 310					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 311	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 312
 313	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
 314	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
 315	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 316	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
 317
 318	/*
 319	 * Not all performance counters differentiate between read and write
 320	 * accesses/misses so we're not always strictly correct, but it's the
 321	 * best we can do. Writes and reads get combined in these cases.
 322	 */
 323	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 324	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 325
 326	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
 327	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
 328	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
 329	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
 330
 331	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
 332	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
 333
 334	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 335	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 336
 337	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 338	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 339	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 340	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 341};
 342
 343/*
 344 * Cortex-A7 HW events mapping
 345 */
 346static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
 347	PERF_MAP_ALL_UNSUPPORTED,
 348	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 349	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 350	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 351	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 352	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_PERFCTR_PC_WRITE,
 353	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 354	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 
 
 355};
 356
 357static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 358					[PERF_COUNT_HW_CACHE_OP_MAX]
 359					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 360	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 361
 362	/*
 363	 * The performance counters don't differentiate between read and write
 364	 * accesses/misses so this isn't strictly correct, but it's the best we
 365	 * can do. Writes and reads get combined.
 366	 */
 367	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 368	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 369	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 370	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 371
 372	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 373	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 374
 375	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 376	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 377	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_CACHE_ACCESS,
 378	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 379
 380	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 381	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 382
 383	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 384	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 385
 386	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 387	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 388	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 389	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 390};
 391
 392/*
 393 * Cortex-A12 HW events mapping
 394 */
 395static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
 396	PERF_MAP_ALL_UNSUPPORTED,
 397	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV7_PERFCTR_CPU_CYCLES,
 398	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV7_PERFCTR_INSTR_EXECUTED,
 399	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 400	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 401	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
 402	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 403	[PERF_COUNT_HW_BUS_CYCLES]		= ARMV7_PERFCTR_BUS_CYCLES,
 
 
 404};
 405
 406static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 407					[PERF_COUNT_HW_CACHE_OP_MAX]
 408					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 409	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 410
 411	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
 412	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 413	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
 414	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 415
 416	/*
 417	 * Not all performance counters differentiate between read and write
 418	 * accesses/misses so we're not always strictly correct, but it's the
 419	 * best we can do. Writes and reads get combined in these cases.
 420	 */
 421	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS,
 422	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,
 423
 424	[C(LL)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
 425	[C(LL)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 426	[C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
 427	[C(LL)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACHE_REFILL,
 428
 429	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 430	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
 431	[C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]	= ARMV7_A12_PERFCTR_PF_TLB_REFILL,
 432
 433	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 434	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_REFILL,
 435
 436	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 437	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 438	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 439	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 440};
 441
 442/*
 443 * Krait HW events mapping
 444 */
 445static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
 446	PERF_MAP_ALL_UNSUPPORTED,
 447	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 448	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 
 
 449	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 450	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 451	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 452};
 453
 454static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
 455	PERF_MAP_ALL_UNSUPPORTED,
 456	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 457	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 
 
 
 458	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 459	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 460};
 461
 462static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 463					  [PERF_COUNT_HW_CACHE_OP_MAX]
 464					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 465	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 466
 467	/*
 468	 * The performance counters don't differentiate between read and write
 469	 * accesses/misses so this isn't strictly correct, but it's the best we
 470	 * can do. Writes and reads get combined.
 471	 */
 472	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 473	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 474	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 475	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_DCACHE_REFILL,
 476
 477	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ICACHE_ACCESS,
 478	[C(L1I)][C(OP_READ)][C(RESULT_MISS)]	= KRAIT_PERFCTR_L1_ICACHE_MISS,
 479
 480	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 481	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_DTLB_ACCESS,
 482
 483	[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 484	[C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]	= KRAIT_PERFCTR_L1_ITLB_ACCESS,
 485
 486	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 487	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 488	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_BRANCH_PRED,
 489	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 490};
 491
 492/*
 493 * Scorpion HW events mapping
 494 */
 495static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
 496	PERF_MAP_ALL_UNSUPPORTED,
 497	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
 498	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
 499	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
 500	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 501	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
 502};
 503
 504static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 505					    [PERF_COUNT_HW_CACHE_OP_MAX]
 506					    [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 507	PERF_CACHE_MAP_ALL_UNSUPPORTED,
 508	/*
 509	 * The performance counters don't differentiate between read and write
 510	 * accesses/misses so this isn't strictly correct, but it's the best we
 511	 * can do. Writes and reads get combined.
 512	 */
 513	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 514	[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 515	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
 516	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
 517	[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
 518	[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
 519	/*
 520	 * Only ITLB misses and DTLB refills are supported.  If users want the
 521	 * DTLB refills misses a raw counter must be used.
 522	 */
 523	[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 524	[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 525	[C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
 526	[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
 527	[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 528	[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
 529	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 530	[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 531	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
 532	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
 533};
 534
 535PMU_FORMAT_ATTR(event, "config:0-7");
 536
 537static struct attribute *armv7_pmu_format_attrs[] = {
 538	&format_attr_event.attr,
 539	NULL,
 540};
 541
 542static struct attribute_group armv7_pmu_format_attr_group = {
 543	.name = "format",
 544	.attrs = armv7_pmu_format_attrs,
 545};
 546
 547#define ARMV7_EVENT_ATTR_RESOLVE(m) #m
 548#define ARMV7_EVENT_ATTR(name, config) \
 549	PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
 550			      "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
 551
 552ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
 553ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
 554ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
 555ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
 556ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
 557ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
 558ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
 559ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
 560ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
 561ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
 562ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
 563ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
 564ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
 565ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
 566ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
 567ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
 568ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
 569ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
 570ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
 571
 572static struct attribute *armv7_pmuv1_event_attrs[] = {
 573	&armv7_event_attr_sw_incr.attr.attr,
 574	&armv7_event_attr_l1i_cache_refill.attr.attr,
 575	&armv7_event_attr_l1i_tlb_refill.attr.attr,
 576	&armv7_event_attr_l1d_cache_refill.attr.attr,
 577	&armv7_event_attr_l1d_cache.attr.attr,
 578	&armv7_event_attr_l1d_tlb_refill.attr.attr,
 579	&armv7_event_attr_ld_retired.attr.attr,
 580	&armv7_event_attr_st_retired.attr.attr,
 581	&armv7_event_attr_inst_retired.attr.attr,
 582	&armv7_event_attr_exc_taken.attr.attr,
 583	&armv7_event_attr_exc_return.attr.attr,
 584	&armv7_event_attr_cid_write_retired.attr.attr,
 585	&armv7_event_attr_pc_write_retired.attr.attr,
 586	&armv7_event_attr_br_immed_retired.attr.attr,
 587	&armv7_event_attr_br_return_retired.attr.attr,
 588	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
 589	&armv7_event_attr_br_mis_pred.attr.attr,
 590	&armv7_event_attr_cpu_cycles.attr.attr,
 591	&armv7_event_attr_br_pred.attr.attr,
 592	NULL,
 593};
 594
 595static struct attribute_group armv7_pmuv1_events_attr_group = {
 596	.name = "events",
 597	.attrs = armv7_pmuv1_event_attrs,
 598};
 599
 600ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
 601ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
 602ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
 603ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
 604ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
 605ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
 606ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
 607ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
 608ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
 609ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
 610ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
 611
 612static struct attribute *armv7_pmuv2_event_attrs[] = {
 613	&armv7_event_attr_sw_incr.attr.attr,
 614	&armv7_event_attr_l1i_cache_refill.attr.attr,
 615	&armv7_event_attr_l1i_tlb_refill.attr.attr,
 616	&armv7_event_attr_l1d_cache_refill.attr.attr,
 617	&armv7_event_attr_l1d_cache.attr.attr,
 618	&armv7_event_attr_l1d_tlb_refill.attr.attr,
 619	&armv7_event_attr_ld_retired.attr.attr,
 620	&armv7_event_attr_st_retired.attr.attr,
 621	&armv7_event_attr_inst_retired.attr.attr,
 622	&armv7_event_attr_exc_taken.attr.attr,
 623	&armv7_event_attr_exc_return.attr.attr,
 624	&armv7_event_attr_cid_write_retired.attr.attr,
 625	&armv7_event_attr_pc_write_retired.attr.attr,
 626	&armv7_event_attr_br_immed_retired.attr.attr,
 627	&armv7_event_attr_br_return_retired.attr.attr,
 628	&armv7_event_attr_unaligned_ldst_retired.attr.attr,
 629	&armv7_event_attr_br_mis_pred.attr.attr,
 630	&armv7_event_attr_cpu_cycles.attr.attr,
 631	&armv7_event_attr_br_pred.attr.attr,
 632	&armv7_event_attr_mem_access.attr.attr,
 633	&armv7_event_attr_l1i_cache.attr.attr,
 634	&armv7_event_attr_l1d_cache_wb.attr.attr,
 635	&armv7_event_attr_l2d_cache.attr.attr,
 636	&armv7_event_attr_l2d_cache_refill.attr.attr,
 637	&armv7_event_attr_l2d_cache_wb.attr.attr,
 638	&armv7_event_attr_bus_access.attr.attr,
 639	&armv7_event_attr_memory_error.attr.attr,
 640	&armv7_event_attr_inst_spec.attr.attr,
 641	&armv7_event_attr_ttbr_write_retired.attr.attr,
 642	&armv7_event_attr_bus_cycles.attr.attr,
 643	NULL,
 644};
 645
 646static struct attribute_group armv7_pmuv2_events_attr_group = {
 647	.name = "events",
 648	.attrs = armv7_pmuv2_event_attrs,
 649};
 650
 651/*
 652 * Perf Events' indices
 653 */
 654#define	ARMV7_IDX_CYCLE_COUNTER	0
 655#define	ARMV7_IDX_COUNTER0	1
 656#define	ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
 657	(ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 658
 659#define	ARMV7_MAX_COUNTERS	32
 660#define	ARMV7_COUNTER_MASK	(ARMV7_MAX_COUNTERS - 1)
 661
 662/*
 663 * ARMv7 low level PMNC access
 664 */
 665
 666/*
 667 * Perf Event to low level counters mapping
 668 */
 669#define	ARMV7_IDX_TO_COUNTER(x)	\
 670	(((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
 671
 672/*
 673 * Per-CPU PMNC: config reg
 674 */
 675#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
 676#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
 677#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
 678#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
 679#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
 680#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
 681#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
 682#define	ARMV7_PMNC_N_MASK	0x1f
 683#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
 684
 685/*
 686 * FLAG: counters overflow flag status reg
 687 */
 688#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
 689#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
 690
 691/*
 692 * PMXEVTYPER: Event selection reg
 693 */
 694#define	ARMV7_EVTYPE_MASK	0xc80000ff	/* Mask for writable bits */
 695#define	ARMV7_EVTYPE_EVENT	0xff		/* Mask for EVENT bits */
 696
 697/*
 698 * Event filters for PMUv2
 699 */
 700#define	ARMV7_EXCLUDE_PL1	BIT(31)
 701#define	ARMV7_EXCLUDE_USER	BIT(30)
 702#define	ARMV7_INCLUDE_HYP	BIT(27)
 703
 704/*
 705 * Secure debug enable reg
 706 */
 707#define ARMV7_SDER_SUNIDEN	BIT(1) /* Permit non-invasive debug */
 708
 709static inline u32 armv7_pmnc_read(void)
 710{
 711	u32 val;
 712	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
 713	return val;
 714}
 715
 716static inline void armv7_pmnc_write(u32 val)
 717{
 718	val &= ARMV7_PMNC_MASK;
 719	isb();
 720	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
 721}
 722
 723static inline int armv7_pmnc_has_overflowed(u32 pmnc)
 724{
 725	return pmnc & ARMV7_OVERFLOWED_MASK;
 726}
 727
 728static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
 729{
 730	return idx >= ARMV7_IDX_CYCLE_COUNTER &&
 731		idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
 732}
 733
 734static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
 735{
 736	return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
 737}
 738
 739static inline void armv7_pmnc_select_counter(int idx)
 740{
 741	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 742	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
 743	isb();
 
 
 744}
 745
 746static inline u64 armv7pmu_read_counter(struct perf_event *event)
 747{
 748	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 749	struct hw_perf_event *hwc = &event->hw;
 750	int idx = hwc->idx;
 751	u32 value = 0;
 752
 753	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 754		pr_err("CPU%u reading wrong counter %d\n",
 755			smp_processor_id(), idx);
 756	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 757		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
 758	} else {
 759		armv7_pmnc_select_counter(idx);
 760		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
 761	}
 762
 763	return value;
 764}
 765
 766static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
 767{
 768	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 769	struct hw_perf_event *hwc = &event->hw;
 770	int idx = hwc->idx;
 771
 772	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 773		pr_err("CPU%u writing wrong counter %d\n",
 774			smp_processor_id(), idx);
 775	} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
 776		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
 777	} else {
 778		armv7_pmnc_select_counter(idx);
 779		asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
 780	}
 781}
 782
 783static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
 784{
 785	armv7_pmnc_select_counter(idx);
 786	val &= ARMV7_EVTYPE_MASK;
 787	asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
 
 788}
 789
 790static inline void armv7_pmnc_enable_counter(int idx)
 791{
 792	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 793	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
 
 794}
 795
 796static inline void armv7_pmnc_disable_counter(int idx)
 797{
 798	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 799	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
 
 800}
 801
 802static inline void armv7_pmnc_enable_intens(int idx)
 803{
 804	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 805	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
 
 806}
 807
 808static inline void armv7_pmnc_disable_intens(int idx)
 809{
 810	u32 counter = ARMV7_IDX_TO_COUNTER(idx);
 811	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
 812	isb();
 813	/* Clear the overflow flag in case an interrupt is pending. */
 814	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
 815	isb();
 
 
 816}
 817
 818static inline u32 armv7_pmnc_getreset_flags(void)
 819{
 820	u32 val;
 821
 822	/* Read */
 823	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 824
 825	/* Write to clear flags */
 826	val &= ARMV7_FLAG_MASK;
 827	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
 828
 829	return val;
 830}
 831
 832#ifdef DEBUG
 833static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
 834{
 835	u32 val;
 836	unsigned int cnt;
 837
 838	pr_info("PMNC registers dump:\n");
 839
 840	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
 841	pr_info("PMNC  =0x%08x\n", val);
 842
 843	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
 844	pr_info("CNTENS=0x%08x\n", val);
 845
 846	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
 847	pr_info("INTENS=0x%08x\n", val);
 848
 849	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
 850	pr_info("FLAGS =0x%08x\n", val);
 851
 852	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
 853	pr_info("SELECT=0x%08x\n", val);
 854
 855	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
 856	pr_info("CCNT  =0x%08x\n", val);
 857
 858	for (cnt = ARMV7_IDX_COUNTER0;
 859			cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
 860		armv7_pmnc_select_counter(cnt);
 861		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
 862		pr_info("CNT[%d] count =0x%08x\n",
 863			ARMV7_IDX_TO_COUNTER(cnt), val);
 864		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
 865		pr_info("CNT[%d] evtsel=0x%08x\n",
 866			ARMV7_IDX_TO_COUNTER(cnt), val);
 867	}
 868}
 869#endif
 870
 871static void armv7pmu_enable_event(struct perf_event *event)
 872{
 873	unsigned long flags;
 874	struct hw_perf_event *hwc = &event->hw;
 875	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 876	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 877	int idx = hwc->idx;
 878
 879	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 880		pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
 881			smp_processor_id(), idx);
 882		return;
 883	}
 884
 885	/*
 886	 * Enable counter and interrupt, and set the counter to count
 887	 * the event that we're interested in.
 888	 */
 889	raw_spin_lock_irqsave(&events->pmu_lock, flags);
 890
 891	/*
 892	 * Disable counter
 893	 */
 894	armv7_pmnc_disable_counter(idx);
 895
 896	/*
 897	 * Set event (if destined for PMNx counters)
 898	 * We only need to set the event for the cycle counter if we
 899	 * have the ability to perform event filtering.
 900	 */
 901	if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
 902		armv7_pmnc_write_evtsel(idx, hwc->config_base);
 903
 904	/*
 905	 * Enable interrupt for this counter
 906	 */
 907	armv7_pmnc_enable_intens(idx);
 908
 909	/*
 910	 * Enable counter
 911	 */
 912	armv7_pmnc_enable_counter(idx);
 913
 914	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 915}
 916
 917static void armv7pmu_disable_event(struct perf_event *event)
 918{
 919	unsigned long flags;
 920	struct hw_perf_event *hwc = &event->hw;
 921	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
 922	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 923	int idx = hwc->idx;
 924
 925	if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
 926		pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
 927			smp_processor_id(), idx);
 928		return;
 929	}
 930
 931	/*
 932	 * Disable counter and interrupt
 933	 */
 934	raw_spin_lock_irqsave(&events->pmu_lock, flags);
 935
 936	/*
 937	 * Disable counter
 938	 */
 939	armv7_pmnc_disable_counter(idx);
 940
 941	/*
 942	 * Disable interrupt for this counter
 943	 */
 944	armv7_pmnc_disable_intens(idx);
 945
 946	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 947}
 948
 949static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
 950{
 951	u32 pmnc;
 952	struct perf_sample_data data;
 953	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
 
 954	struct pt_regs *regs;
 955	int idx;
 956
 957	/*
 958	 * Get and reset the IRQ flags
 959	 */
 960	pmnc = armv7_pmnc_getreset_flags();
 961
 962	/*
 963	 * Did an overflow occur?
 964	 */
 965	if (!armv7_pmnc_has_overflowed(pmnc))
 966		return IRQ_NONE;
 967
 968	/*
 969	 * Handle the counter(s) overflow(s)
 970	 */
 971	regs = get_irq_regs();
 972
 973	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
 974		struct perf_event *event = cpuc->events[idx];
 975		struct hw_perf_event *hwc;
 976
 977		/* Ignore if we don't have an event. */
 978		if (!event)
 979			continue;
 980
 981		/*
 982		 * We have a single interrupt for all counters. Check that
 983		 * each counter has overflowed before we process it.
 984		 */
 985		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
 986			continue;
 987
 988		hwc = &event->hw;
 989		armpmu_event_update(event);
 990		perf_sample_data_init(&data, 0, hwc->last_period);
 991		if (!armpmu_event_set_period(event))
 992			continue;
 993
 994		if (perf_event_overflow(event, &data, regs))
 995			cpu_pmu->disable(event);
 996	}
 997
 998	/*
 999	 * Handle the pending perf events.
1000	 *
1001	 * Note: this call *must* be run with interrupts disabled. For
1002	 * platforms that can have the PMU interrupts raised as an NMI, this
1003	 * will not work.
1004	 */
1005	irq_work_run();
1006
1007	return IRQ_HANDLED;
1008}
1009
1010static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1011{
1012	unsigned long flags;
1013	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1014
1015	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1016	/* Enable all counters */
1017	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1018	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1019}
1020
1021static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1022{
1023	unsigned long flags;
1024	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1025
1026	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1027	/* Disable all counters */
1028	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1029	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1030}
1031
1032static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1033				  struct perf_event *event)
1034{
1035	int idx;
1036	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1037	struct hw_perf_event *hwc = &event->hw;
1038	unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1039
1040	/* Always place a cycle counter into the cycle counter. */
1041	if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1042		if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1043			return -EAGAIN;
1044
1045		return ARMV7_IDX_CYCLE_COUNTER;
1046	}
1047
1048	/*
1049	 * For anything other than a cycle counter, try and use
1050	 * the events counters
1051	 */
1052	for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1053		if (!test_and_set_bit(idx, cpuc->used_mask))
1054			return idx;
1055	}
1056
1057	/* The counters are all in use. */
1058	return -EAGAIN;
1059}
1060
1061static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1062				     struct perf_event *event)
1063{
1064	clear_bit(event->hw.idx, cpuc->used_mask);
1065}
1066
1067/*
1068 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1069 */
1070static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1071				     struct perf_event_attr *attr)
1072{
1073	unsigned long config_base = 0;
1074
1075	if (attr->exclude_idle)
1076		return -EPERM;
1077	if (attr->exclude_user)
1078		config_base |= ARMV7_EXCLUDE_USER;
1079	if (attr->exclude_kernel)
1080		config_base |= ARMV7_EXCLUDE_PL1;
1081	if (!attr->exclude_hv)
1082		config_base |= ARMV7_INCLUDE_HYP;
1083
1084	/*
1085	 * Install the filter into config_base as this is used to
1086	 * construct the event type.
1087	 */
1088	event->config_base = config_base;
1089
1090	return 0;
1091}
1092
1093static void armv7pmu_reset(void *info)
1094{
1095	struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1096	u32 idx, nb_cnt = cpu_pmu->num_events, val;
1097
1098	if (cpu_pmu->secure_access) {
1099		asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
1100		val |= ARMV7_SDER_SUNIDEN;
1101		asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
1102	}
1103
1104	/* The counter and interrupt enable registers are unknown at reset. */
1105	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1106		armv7_pmnc_disable_counter(idx);
1107		armv7_pmnc_disable_intens(idx);
1108	}
1109
1110	/* Initialize & Reset PMNC: C and P bits */
1111	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1112}
1113
1114static int armv7_a8_map_event(struct perf_event *event)
1115{
1116	return armpmu_map_event(event, &armv7_a8_perf_map,
1117				&armv7_a8_perf_cache_map, 0xFF);
1118}
1119
1120static int armv7_a9_map_event(struct perf_event *event)
1121{
1122	return armpmu_map_event(event, &armv7_a9_perf_map,
1123				&armv7_a9_perf_cache_map, 0xFF);
1124}
1125
1126static int armv7_a5_map_event(struct perf_event *event)
1127{
1128	return armpmu_map_event(event, &armv7_a5_perf_map,
1129				&armv7_a5_perf_cache_map, 0xFF);
1130}
1131
1132static int armv7_a15_map_event(struct perf_event *event)
1133{
1134	return armpmu_map_event(event, &armv7_a15_perf_map,
1135				&armv7_a15_perf_cache_map, 0xFF);
1136}
1137
1138static int armv7_a7_map_event(struct perf_event *event)
1139{
1140	return armpmu_map_event(event, &armv7_a7_perf_map,
1141				&armv7_a7_perf_cache_map, 0xFF);
1142}
1143
1144static int armv7_a12_map_event(struct perf_event *event)
1145{
1146	return armpmu_map_event(event, &armv7_a12_perf_map,
1147				&armv7_a12_perf_cache_map, 0xFF);
1148}
1149
1150static int krait_map_event(struct perf_event *event)
1151{
1152	return armpmu_map_event(event, &krait_perf_map,
1153				&krait_perf_cache_map, 0xFFFFF);
1154}
1155
1156static int krait_map_event_no_branch(struct perf_event *event)
1157{
1158	return armpmu_map_event(event, &krait_perf_map_no_branch,
1159				&krait_perf_cache_map, 0xFFFFF);
1160}
1161
1162static int scorpion_map_event(struct perf_event *event)
1163{
1164	return armpmu_map_event(event, &scorpion_perf_map,
1165				&scorpion_perf_cache_map, 0xFFFFF);
1166}
1167
1168static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1169{
1170	cpu_pmu->handle_irq	= armv7pmu_handle_irq;
1171	cpu_pmu->enable		= armv7pmu_enable_event;
1172	cpu_pmu->disable	= armv7pmu_disable_event;
1173	cpu_pmu->read_counter	= armv7pmu_read_counter;
1174	cpu_pmu->write_counter	= armv7pmu_write_counter;
1175	cpu_pmu->get_event_idx	= armv7pmu_get_event_idx;
1176	cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
1177	cpu_pmu->start		= armv7pmu_start;
1178	cpu_pmu->stop		= armv7pmu_stop;
1179	cpu_pmu->reset		= armv7pmu_reset;
 
1180};
1181
1182static void armv7_read_num_pmnc_events(void *info)
1183{
1184	int *nb_cnt = info;
1185
1186	/* Read the nb of CNTx counters supported from PMNC */
1187	*nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1188
1189	/* Add the CPU cycles counter */
1190	*nb_cnt += 1;
1191}
1192
1193static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1194{
1195	return smp_call_function_any(&arm_pmu->supported_cpus,
1196				     armv7_read_num_pmnc_events,
1197				     &arm_pmu->num_events, 1);
1198}
1199
1200static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1201{
1202	armv7pmu_init(cpu_pmu);
1203	cpu_pmu->name		= "armv7_cortex_a8";
1204	cpu_pmu->map_event	= armv7_a8_map_event;
1205	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1206		&armv7_pmuv1_events_attr_group;
1207	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1208		&armv7_pmu_format_attr_group;
1209	return armv7_probe_num_events(cpu_pmu);
1210}
1211
1212static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1213{
1214	armv7pmu_init(cpu_pmu);
1215	cpu_pmu->name		= "armv7_cortex_a9";
1216	cpu_pmu->map_event	= armv7_a9_map_event;
1217	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1218		&armv7_pmuv1_events_attr_group;
1219	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1220		&armv7_pmu_format_attr_group;
1221	return armv7_probe_num_events(cpu_pmu);
1222}
1223
1224static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1225{
1226	armv7pmu_init(cpu_pmu);
1227	cpu_pmu->name		= "armv7_cortex_a5";
1228	cpu_pmu->map_event	= armv7_a5_map_event;
1229	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1230		&armv7_pmuv1_events_attr_group;
1231	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1232		&armv7_pmu_format_attr_group;
1233	return armv7_probe_num_events(cpu_pmu);
1234}
1235
1236static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1237{
1238	armv7pmu_init(cpu_pmu);
1239	cpu_pmu->name		= "armv7_cortex_a15";
1240	cpu_pmu->map_event	= armv7_a15_map_event;
 
1241	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1242	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1243		&armv7_pmuv2_events_attr_group;
1244	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1245		&armv7_pmu_format_attr_group;
1246	return armv7_probe_num_events(cpu_pmu);
1247}
1248
1249static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1250{
1251	armv7pmu_init(cpu_pmu);
1252	cpu_pmu->name		= "armv7_cortex_a7";
1253	cpu_pmu->map_event	= armv7_a7_map_event;
 
1254	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1255	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1256		&armv7_pmuv2_events_attr_group;
1257	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1258		&armv7_pmu_format_attr_group;
1259	return armv7_probe_num_events(cpu_pmu);
1260}
1261
1262static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1263{
1264	armv7pmu_init(cpu_pmu);
1265	cpu_pmu->name		= "armv7_cortex_a12";
1266	cpu_pmu->map_event	= armv7_a12_map_event;
 
1267	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1268	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1269		&armv7_pmuv2_events_attr_group;
1270	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1271		&armv7_pmu_format_attr_group;
1272	return armv7_probe_num_events(cpu_pmu);
1273}
1274
1275static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1276{
1277	int ret = armv7_a12_pmu_init(cpu_pmu);
1278	cpu_pmu->name = "armv7_cortex_a17";
1279	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1280		&armv7_pmuv2_events_attr_group;
1281	cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1282		&armv7_pmu_format_attr_group;
1283	return ret;
1284}
1285
1286/*
1287 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1288 *
1289 *            31   30     24     16     8      0
1290 *            +--------------------------------+
1291 *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1292 *            +--------------------------------+
1293 *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1294 *            +--------------------------------+
1295 *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1296 *            +--------------------------------+
1297 *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1298 *            +--------------------------------+
1299 *              EN | G=3  | G=2  | G=1  | G=0
1300 *
1301 *  Event Encoding:
1302 *
1303 *      hwc->config_base = 0xNRCCG
1304 *
1305 *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1306 *      R  = region register
1307 *      CC = class of events the group G is choosing from
1308 *      G  = group or particular event
1309 *
1310 *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1311 *
1312 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1313 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1314 *  events (interrupts for example). An event code is broken down into
1315 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1316 *  example).
1317 */
1318
1319#define KRAIT_EVENT		(1 << 16)
1320#define VENUM_EVENT		(2 << 16)
1321#define KRAIT_EVENT_MASK	(KRAIT_EVENT | VENUM_EVENT)
1322#define PMRESRn_EN		BIT(31)
1323
1324#define EVENT_REGION(event)	(((event) >> 12) & 0xf)		/* R */
1325#define EVENT_GROUP(event)	((event) & 0xf)			/* G */
1326#define EVENT_CODE(event)	(((event) >> 4) & 0xff)		/* CC */
1327#define EVENT_VENUM(event)	(!!(event & VENUM_EVENT))	/* N=2 */
1328#define EVENT_CPU(event)	(!!(event & KRAIT_EVENT))	/* N=1 */
1329
1330static u32 krait_read_pmresrn(int n)
1331{
1332	u32 val;
1333
1334	switch (n) {
1335	case 0:
1336		asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1337		break;
1338	case 1:
1339		asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1340		break;
1341	case 2:
1342		asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1343		break;
1344	default:
1345		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1346	}
1347
1348	return val;
1349}
1350
1351static void krait_write_pmresrn(int n, u32 val)
1352{
1353	switch (n) {
1354	case 0:
1355		asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1356		break;
1357	case 1:
1358		asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1359		break;
1360	case 2:
1361		asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1362		break;
1363	default:
1364		BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1365	}
1366}
1367
1368static u32 venum_read_pmresr(void)
1369{
1370	u32 val;
1371	asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1372	return val;
1373}
1374
1375static void venum_write_pmresr(u32 val)
1376{
1377	asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1378}
1379
1380static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1381{
1382	u32 venum_new_val;
1383	u32 fp_new_val;
1384
1385	BUG_ON(preemptible());
1386	/* CPACR Enable CP10 and CP11 access */
1387	*venum_orig_val = get_copro_access();
1388	venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1389	set_copro_access(venum_new_val);
1390
1391	/* Enable FPEXC */
1392	*fp_orig_val = fmrx(FPEXC);
1393	fp_new_val = *fp_orig_val | FPEXC_EN;
1394	fmxr(FPEXC, fp_new_val);
1395}
1396
1397static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1398{
1399	BUG_ON(preemptible());
1400	/* Restore FPEXC */
1401	fmxr(FPEXC, fp_orig_val);
1402	isb();
1403	/* Restore CPACR */
1404	set_copro_access(venum_orig_val);
1405}
1406
1407static u32 krait_get_pmresrn_event(unsigned int region)
1408{
1409	static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1410					     KRAIT_PMRESR1_GROUP0,
1411					     KRAIT_PMRESR2_GROUP0 };
1412	return pmresrn_table[region];
1413}
1414
1415static void krait_evt_setup(int idx, u32 config_base)
1416{
1417	u32 val;
1418	u32 mask;
1419	u32 vval, fval;
1420	unsigned int region = EVENT_REGION(config_base);
1421	unsigned int group = EVENT_GROUP(config_base);
1422	unsigned int code = EVENT_CODE(config_base);
1423	unsigned int group_shift;
1424	bool venum_event = EVENT_VENUM(config_base);
 
 
 
 
 
1425
1426	group_shift = group * 8;
1427	mask = 0xff << group_shift;
1428
1429	/* Configure evtsel for the region and group */
1430	if (venum_event)
1431		val = KRAIT_VPMRESR0_GROUP0;
1432	else
1433		val = krait_get_pmresrn_event(region);
1434	val += group;
1435	/* Mix in mode-exclusion bits */
1436	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1437	armv7_pmnc_write_evtsel(idx, val);
1438
 
 
1439	if (venum_event) {
1440		venum_pre_pmresr(&vval, &fval);
1441		val = venum_read_pmresr();
1442		val &= ~mask;
1443		val |= code << group_shift;
1444		val |= PMRESRn_EN;
1445		venum_write_pmresr(val);
1446		venum_post_pmresr(vval, fval);
1447	} else {
1448		val = krait_read_pmresrn(region);
1449		val &= ~mask;
1450		val |= code << group_shift;
1451		val |= PMRESRn_EN;
1452		krait_write_pmresrn(region, val);
1453	}
1454}
1455
1456static u32 clear_pmresrn_group(u32 val, int group)
1457{
1458	u32 mask;
1459	int group_shift;
1460
1461	group_shift = group * 8;
1462	mask = 0xff << group_shift;
1463	val &= ~mask;
1464
1465	/* Don't clear enable bit if entire region isn't disabled */
1466	if (val & ~PMRESRn_EN)
1467		return val |= PMRESRn_EN;
1468
1469	return 0;
1470}
1471
1472static void krait_clearpmu(u32 config_base)
1473{
1474	u32 val;
1475	u32 vval, fval;
1476	unsigned int region = EVENT_REGION(config_base);
1477	unsigned int group = EVENT_GROUP(config_base);
1478	bool venum_event = EVENT_VENUM(config_base);
 
 
 
 
1479
1480	if (venum_event) {
1481		venum_pre_pmresr(&vval, &fval);
1482		val = venum_read_pmresr();
1483		val = clear_pmresrn_group(val, group);
1484		venum_write_pmresr(val);
1485		venum_post_pmresr(vval, fval);
1486	} else {
1487		val = krait_read_pmresrn(region);
1488		val = clear_pmresrn_group(val, group);
1489		krait_write_pmresrn(region, val);
1490	}
1491}
1492
1493static void krait_pmu_disable_event(struct perf_event *event)
1494{
1495	unsigned long flags;
1496	struct hw_perf_event *hwc = &event->hw;
1497	int idx = hwc->idx;
1498	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1499	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1500
1501	/* Disable counter and interrupt */
1502	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1503
1504	/* Disable counter */
1505	armv7_pmnc_disable_counter(idx);
1506
1507	/*
1508	 * Clear pmresr code (if destined for PMNx counters)
1509	 */
1510	if (hwc->config_base & KRAIT_EVENT_MASK)
1511		krait_clearpmu(hwc->config_base);
1512
1513	/* Disable interrupt for this counter */
1514	armv7_pmnc_disable_intens(idx);
1515
1516	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1517}
1518
1519static void krait_pmu_enable_event(struct perf_event *event)
1520{
1521	unsigned long flags;
1522	struct hw_perf_event *hwc = &event->hw;
1523	int idx = hwc->idx;
1524	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1525	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1526
1527	/*
1528	 * Enable counter and interrupt, and set the counter to count
1529	 * the event that we're interested in.
1530	 */
1531	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1532
1533	/* Disable counter */
1534	armv7_pmnc_disable_counter(idx);
1535
1536	/*
1537	 * Set event (if destined for PMNx counters)
1538	 * We set the event for the cycle counter because we
1539	 * have the ability to perform event filtering.
1540	 */
1541	if (hwc->config_base & KRAIT_EVENT_MASK)
1542		krait_evt_setup(idx, hwc->config_base);
1543	else
1544		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1545
1546	/* Enable interrupt for this counter */
1547	armv7_pmnc_enable_intens(idx);
1548
1549	/* Enable counter */
1550	armv7_pmnc_enable_counter(idx);
1551
1552	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1553}
1554
1555static void krait_pmu_reset(void *info)
1556{
1557	u32 vval, fval;
1558	struct arm_pmu *cpu_pmu = info;
1559	u32 idx, nb_cnt = cpu_pmu->num_events;
1560
1561	armv7pmu_reset(info);
1562
1563	/* Clear all pmresrs */
1564	krait_write_pmresrn(0, 0);
1565	krait_write_pmresrn(1, 0);
1566	krait_write_pmresrn(2, 0);
1567
1568	venum_pre_pmresr(&vval, &fval);
1569	venum_write_pmresr(0);
1570	venum_post_pmresr(vval, fval);
1571
1572	/* Reset PMxEVNCTCR to sane default */
1573	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1574		armv7_pmnc_select_counter(idx);
1575		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1576	}
1577
1578}
1579
1580static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1581			      unsigned int group)
1582{
1583	int bit;
1584	struct hw_perf_event *hwc = &event->hw;
1585	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1586
1587	if (hwc->config_base & VENUM_EVENT)
1588		bit = KRAIT_VPMRESR0_GROUP0;
1589	else
1590		bit = krait_get_pmresrn_event(region);
1591	bit -= krait_get_pmresrn_event(0);
1592	bit += group;
1593	/*
1594	 * Lower bits are reserved for use by the counters (see
1595	 * armv7pmu_get_event_idx() for more info)
1596	 */
1597	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1598
1599	return bit;
1600}
1601
1602/*
1603 * We check for column exclusion constraints here.
1604 * Two events cant use the same group within a pmresr register.
1605 */
1606static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1607				   struct perf_event *event)
1608{
1609	int idx;
1610	int bit = -1;
 
 
 
 
 
1611	struct hw_perf_event *hwc = &event->hw;
1612	unsigned int region = EVENT_REGION(hwc->config_base);
1613	unsigned int code = EVENT_CODE(hwc->config_base);
1614	unsigned int group = EVENT_GROUP(hwc->config_base);
1615	bool venum_event = EVENT_VENUM(hwc->config_base);
1616	bool krait_event = EVENT_CPU(hwc->config_base);
1617
1618	if (venum_event || krait_event) {
 
 
 
 
 
1619		/* Ignore invalid events */
1620		if (group > 3 || region > 2)
1621			return -EINVAL;
1622		if (venum_event && (code & 0xe0))
 
 
 
1623			return -EINVAL;
1624
1625		bit = krait_event_to_bit(event, region, group);
1626		if (test_and_set_bit(bit, cpuc->used_mask))
1627			return -EAGAIN;
1628	}
1629
1630	idx = armv7pmu_get_event_idx(cpuc, event);
1631	if (idx < 0 && bit >= 0)
1632		clear_bit(bit, cpuc->used_mask);
1633
1634	return idx;
1635}
1636
1637static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1638				      struct perf_event *event)
1639{
1640	int bit;
1641	struct hw_perf_event *hwc = &event->hw;
1642	unsigned int region = EVENT_REGION(hwc->config_base);
1643	unsigned int group = EVENT_GROUP(hwc->config_base);
1644	bool venum_event = EVENT_VENUM(hwc->config_base);
1645	bool krait_event = EVENT_CPU(hwc->config_base);
 
 
 
1646
1647	armv7pmu_clear_event_idx(cpuc, event);
1648	if (venum_event || krait_event) {
1649		bit = krait_event_to_bit(event, region, group);
1650		clear_bit(bit, cpuc->used_mask);
1651	}
1652}
1653
1654static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1655{
1656	armv7pmu_init(cpu_pmu);
1657	cpu_pmu->name		= "armv7_krait";
1658	/* Some early versions of Krait don't support PC write events */
1659	if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1660				  "qcom,no-pc-write"))
1661		cpu_pmu->map_event = krait_map_event_no_branch;
1662	else
1663		cpu_pmu->map_event = krait_map_event;
 
1664	cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1665	cpu_pmu->reset		= krait_pmu_reset;
1666	cpu_pmu->enable		= krait_pmu_enable_event;
1667	cpu_pmu->disable	= krait_pmu_disable_event;
1668	cpu_pmu->get_event_idx	= krait_pmu_get_event_idx;
1669	cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1670	return armv7_probe_num_events(cpu_pmu);
1671}
1672
1673/*
1674 * Scorpion Local Performance Monitor Register (LPMn)
1675 *
1676 *            31   30     24     16     8      0
1677 *            +--------------------------------+
1678 *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1679 *            +--------------------------------+
1680 *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1681 *            +--------------------------------+
1682 *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1683 *            +--------------------------------+
1684 *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1685 *            +--------------------------------+
1686 *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1687 *            +--------------------------------+
1688 *              EN | G=3  | G=2  | G=1  | G=0
1689 *
1690 *
1691 *  Event Encoding:
1692 *
1693 *      hwc->config_base = 0xNRCCG
1694 *
1695 *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1696 *      R  = region register
1697 *      CC = class of events the group G is choosing from
1698 *      G  = group or particular event
1699 *
1700 *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1701 *
1702 *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1703 *  unit, etc.) while the event code (CC) corresponds to a particular class of
1704 *  events (interrupts for example). An event code is broken down into
1705 *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1706 *  example).
1707 */
1708
1709static u32 scorpion_read_pmresrn(int n)
1710{
1711	u32 val;
1712
1713	switch (n) {
1714	case 0:
1715		asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1716		break;
1717	case 1:
1718		asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1719		break;
1720	case 2:
1721		asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1722		break;
1723	case 3:
1724		asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1725		break;
1726	default:
1727		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1728	}
1729
1730	return val;
1731}
1732
1733static void scorpion_write_pmresrn(int n, u32 val)
1734{
1735	switch (n) {
1736	case 0:
1737		asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1738		break;
1739	case 1:
1740		asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1741		break;
1742	case 2:
1743		asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1744		break;
1745	case 3:
1746		asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1747		break;
1748	default:
1749		BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1750	}
1751}
1752
1753static u32 scorpion_get_pmresrn_event(unsigned int region)
1754{
1755	static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1756					     SCORPION_LPM1_GROUP0,
1757					     SCORPION_LPM2_GROUP0,
1758					     SCORPION_L2LPM_GROUP0 };
1759	return pmresrn_table[region];
1760}
1761
1762static void scorpion_evt_setup(int idx, u32 config_base)
1763{
1764	u32 val;
1765	u32 mask;
1766	u32 vval, fval;
1767	unsigned int region = EVENT_REGION(config_base);
1768	unsigned int group = EVENT_GROUP(config_base);
1769	unsigned int code = EVENT_CODE(config_base);
1770	unsigned int group_shift;
1771	bool venum_event = EVENT_VENUM(config_base);
1772
1773	group_shift = group * 8;
1774	mask = 0xff << group_shift;
1775
1776	/* Configure evtsel for the region and group */
1777	if (venum_event)
1778		val = SCORPION_VLPM_GROUP0;
1779	else
1780		val = scorpion_get_pmresrn_event(region);
1781	val += group;
1782	/* Mix in mode-exclusion bits */
1783	val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1784	armv7_pmnc_write_evtsel(idx, val);
1785
1786	asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1787
1788	if (venum_event) {
1789		venum_pre_pmresr(&vval, &fval);
1790		val = venum_read_pmresr();
1791		val &= ~mask;
1792		val |= code << group_shift;
1793		val |= PMRESRn_EN;
1794		venum_write_pmresr(val);
1795		venum_post_pmresr(vval, fval);
1796	} else {
1797		val = scorpion_read_pmresrn(region);
1798		val &= ~mask;
1799		val |= code << group_shift;
1800		val |= PMRESRn_EN;
1801		scorpion_write_pmresrn(region, val);
1802	}
1803}
1804
1805static void scorpion_clearpmu(u32 config_base)
1806{
1807	u32 val;
1808	u32 vval, fval;
1809	unsigned int region = EVENT_REGION(config_base);
1810	unsigned int group = EVENT_GROUP(config_base);
1811	bool venum_event = EVENT_VENUM(config_base);
1812
1813	if (venum_event) {
1814		venum_pre_pmresr(&vval, &fval);
1815		val = venum_read_pmresr();
1816		val = clear_pmresrn_group(val, group);
1817		venum_write_pmresr(val);
1818		venum_post_pmresr(vval, fval);
1819	} else {
1820		val = scorpion_read_pmresrn(region);
1821		val = clear_pmresrn_group(val, group);
1822		scorpion_write_pmresrn(region, val);
1823	}
1824}
1825
1826static void scorpion_pmu_disable_event(struct perf_event *event)
1827{
1828	unsigned long flags;
1829	struct hw_perf_event *hwc = &event->hw;
1830	int idx = hwc->idx;
1831	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1832	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1833
1834	/* Disable counter and interrupt */
1835	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1836
1837	/* Disable counter */
1838	armv7_pmnc_disable_counter(idx);
1839
1840	/*
1841	 * Clear pmresr code (if destined for PMNx counters)
1842	 */
1843	if (hwc->config_base & KRAIT_EVENT_MASK)
1844		scorpion_clearpmu(hwc->config_base);
1845
1846	/* Disable interrupt for this counter */
1847	armv7_pmnc_disable_intens(idx);
1848
1849	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1850}
1851
1852static void scorpion_pmu_enable_event(struct perf_event *event)
1853{
1854	unsigned long flags;
1855	struct hw_perf_event *hwc = &event->hw;
1856	int idx = hwc->idx;
1857	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1858	struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1859
1860	/*
1861	 * Enable counter and interrupt, and set the counter to count
1862	 * the event that we're interested in.
1863	 */
1864	raw_spin_lock_irqsave(&events->pmu_lock, flags);
1865
1866	/* Disable counter */
1867	armv7_pmnc_disable_counter(idx);
1868
1869	/*
1870	 * Set event (if destined for PMNx counters)
1871	 * We don't set the event for the cycle counter because we
1872	 * don't have the ability to perform event filtering.
1873	 */
1874	if (hwc->config_base & KRAIT_EVENT_MASK)
1875		scorpion_evt_setup(idx, hwc->config_base);
1876	else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1877		armv7_pmnc_write_evtsel(idx, hwc->config_base);
1878
1879	/* Enable interrupt for this counter */
1880	armv7_pmnc_enable_intens(idx);
1881
1882	/* Enable counter */
1883	armv7_pmnc_enable_counter(idx);
1884
1885	raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1886}
1887
1888static void scorpion_pmu_reset(void *info)
1889{
1890	u32 vval, fval;
1891	struct arm_pmu *cpu_pmu = info;
1892	u32 idx, nb_cnt = cpu_pmu->num_events;
1893
1894	armv7pmu_reset(info);
1895
1896	/* Clear all pmresrs */
1897	scorpion_write_pmresrn(0, 0);
1898	scorpion_write_pmresrn(1, 0);
1899	scorpion_write_pmresrn(2, 0);
1900	scorpion_write_pmresrn(3, 0);
1901
1902	venum_pre_pmresr(&vval, &fval);
1903	venum_write_pmresr(0);
1904	venum_post_pmresr(vval, fval);
1905
1906	/* Reset PMxEVNCTCR to sane default */
1907	for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1908		armv7_pmnc_select_counter(idx);
1909		asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1910	}
1911}
1912
1913static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1914			      unsigned int group)
1915{
1916	int bit;
1917	struct hw_perf_event *hwc = &event->hw;
1918	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1919
1920	if (hwc->config_base & VENUM_EVENT)
1921		bit = SCORPION_VLPM_GROUP0;
1922	else
1923		bit = scorpion_get_pmresrn_event(region);
1924	bit -= scorpion_get_pmresrn_event(0);
1925	bit += group;
1926	/*
1927	 * Lower bits are reserved for use by the counters (see
1928	 * armv7pmu_get_event_idx() for more info)
1929	 */
1930	bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1931
1932	return bit;
1933}
1934
1935/*
1936 * We check for column exclusion constraints here.
1937 * Two events cant use the same group within a pmresr register.
1938 */
1939static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1940				   struct perf_event *event)
1941{
1942	int idx;
1943	int bit = -1;
1944	struct hw_perf_event *hwc = &event->hw;
1945	unsigned int region = EVENT_REGION(hwc->config_base);
1946	unsigned int group = EVENT_GROUP(hwc->config_base);
1947	bool venum_event = EVENT_VENUM(hwc->config_base);
1948	bool scorpion_event = EVENT_CPU(hwc->config_base);
1949
1950	if (venum_event || scorpion_event) {
1951		/* Ignore invalid events */
1952		if (group > 3 || region > 3)
1953			return -EINVAL;
1954
1955		bit = scorpion_event_to_bit(event, region, group);
1956		if (test_and_set_bit(bit, cpuc->used_mask))
1957			return -EAGAIN;
1958	}
1959
1960	idx = armv7pmu_get_event_idx(cpuc, event);
1961	if (idx < 0 && bit >= 0)
1962		clear_bit(bit, cpuc->used_mask);
1963
1964	return idx;
1965}
1966
1967static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1968				      struct perf_event *event)
1969{
1970	int bit;
1971	struct hw_perf_event *hwc = &event->hw;
1972	unsigned int region = EVENT_REGION(hwc->config_base);
1973	unsigned int group = EVENT_GROUP(hwc->config_base);
1974	bool venum_event = EVENT_VENUM(hwc->config_base);
1975	bool scorpion_event = EVENT_CPU(hwc->config_base);
1976
1977	armv7pmu_clear_event_idx(cpuc, event);
1978	if (venum_event || scorpion_event) {
1979		bit = scorpion_event_to_bit(event, region, group);
1980		clear_bit(bit, cpuc->used_mask);
1981	}
1982}
1983
1984static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1985{
1986	armv7pmu_init(cpu_pmu);
1987	cpu_pmu->name		= "armv7_scorpion";
1988	cpu_pmu->map_event	= scorpion_map_event;
1989	cpu_pmu->reset		= scorpion_pmu_reset;
1990	cpu_pmu->enable		= scorpion_pmu_enable_event;
1991	cpu_pmu->disable	= scorpion_pmu_disable_event;
1992	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
1993	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1994	return armv7_probe_num_events(cpu_pmu);
1995}
1996
1997static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1998{
1999	armv7pmu_init(cpu_pmu);
2000	cpu_pmu->name		= "armv7_scorpion_mp";
2001	cpu_pmu->map_event	= scorpion_map_event;
2002	cpu_pmu->reset		= scorpion_pmu_reset;
2003	cpu_pmu->enable		= scorpion_pmu_enable_event;
2004	cpu_pmu->disable	= scorpion_pmu_disable_event;
2005	cpu_pmu->get_event_idx	= scorpion_pmu_get_event_idx;
2006	cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
2007	return armv7_probe_num_events(cpu_pmu);
2008}
2009
2010static const struct of_device_id armv7_pmu_of_device_ids[] = {
2011	{.compatible = "arm,cortex-a17-pmu",	.data = armv7_a17_pmu_init},
2012	{.compatible = "arm,cortex-a15-pmu",	.data = armv7_a15_pmu_init},
2013	{.compatible = "arm,cortex-a12-pmu",	.data = armv7_a12_pmu_init},
2014	{.compatible = "arm,cortex-a9-pmu",	.data = armv7_a9_pmu_init},
2015	{.compatible = "arm,cortex-a8-pmu",	.data = armv7_a8_pmu_init},
2016	{.compatible = "arm,cortex-a7-pmu",	.data = armv7_a7_pmu_init},
2017	{.compatible = "arm,cortex-a5-pmu",	.data = armv7_a5_pmu_init},
2018	{.compatible = "qcom,krait-pmu",	.data = krait_pmu_init},
2019	{.compatible = "qcom,scorpion-pmu",	.data = scorpion_pmu_init},
2020	{.compatible = "qcom,scorpion-mp-pmu",	.data = scorpion_mp_pmu_init},
2021	{},
2022};
2023
2024static const struct pmu_probe_info armv7_pmu_probe_table[] = {
2025	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
2026	ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
2027	{ /* sentinel value */ }
2028};
2029
2030
2031static int armv7_pmu_device_probe(struct platform_device *pdev)
2032{
2033	return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
2034				    armv7_pmu_probe_table);
2035}
2036
2037static struct platform_driver armv7_pmu_driver = {
2038	.driver		= {
2039		.name	= "armv7-pmu",
2040		.of_match_table = armv7_pmu_of_device_ids,
2041		.suppress_bind_attrs = true,
2042	},
2043	.probe		= armv7_pmu_device_probe,
2044};
2045
2046builtin_platform_driver(armv7_pmu_driver);
2047#endif	/* CONFIG_CPU_V7 */