Linux Audio

Check our new training course

Loading...
v3.1
   1#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
   2    defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   3
   4#define M_CONFIG1_PC	(1 << 4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5
   6#define M_PERFCTL_EXL			(1UL      <<  0)
   7#define M_PERFCTL_KERNEL		(1UL      <<  1)
   8#define M_PERFCTL_SUPERVISOR		(1UL      <<  2)
   9#define M_PERFCTL_USER			(1UL      <<  3)
  10#define M_PERFCTL_INTERRUPT_ENABLE	(1UL      <<  4)
  11#define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5)
  12#define M_PERFCTL_VPEID(vpe)		((vpe)    << 16)
  13#define M_PERFCTL_MT_EN(filter)		((filter) << 20)
  14#define    M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
  15#define    M_TC_EN_VPE			M_PERFCTL_MT_EN(1)
  16#define    M_TC_EN_TC			M_PERFCTL_MT_EN(2)
  17#define M_PERFCTL_TCID(tcid)		((tcid)   << 22)
  18#define M_PERFCTL_WIDE			(1UL      << 30)
  19#define M_PERFCTL_MORE			(1UL      << 31)
  20
  21#define M_PERFCTL_COUNT_EVENT_WHENEVER	(M_PERFCTL_EXL |		\
  22					M_PERFCTL_KERNEL |		\
  23					M_PERFCTL_USER |		\
  24					M_PERFCTL_SUPERVISOR |		\
  25					M_PERFCTL_INTERRUPT_ENABLE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  26
  27#ifdef CONFIG_MIPS_MT_SMP
  28#define M_PERFCTL_CONFIG_MASK		0x3fff801f
  29#else
  30#define M_PERFCTL_CONFIG_MASK		0x1f
  31#endif
  32#define M_PERFCTL_EVENT_MASK		0xfe0
  33
  34#define M_COUNTER_OVERFLOW		(1UL      << 31)
  35
  36#ifdef CONFIG_MIPS_MT_SMP
  37static int cpu_has_mipsmt_pertccounters;
  38
 
 
 
 
 
 
  39/*
  40 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
  41 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
  42 */
  43#if defined(CONFIG_HW_PERF_EVENTS)
  44#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
  45			0 : smp_processor_id())
  46#else
  47#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
  48			0 : cpu_data[smp_processor_id()].vpe_id)
  49#endif
  50
  51/* Copied from op_model_mipsxx.c */
  52static inline unsigned int vpe_shift(void)
  53{
  54	if (num_possible_cpus() > 1)
  55		return 1;
  56
  57	return 0;
  58}
  59#else /* !CONFIG_MIPS_MT_SMP */
  60#define vpe_id()	0
  61
  62static inline unsigned int vpe_shift(void)
  63{
  64	return 0;
  65}
  66#endif /* CONFIG_MIPS_MT_SMP */
  67
  68static inline unsigned int
  69counters_total_to_per_cpu(unsigned int counters)
  70{
  71	return counters >> vpe_shift();
  72}
  73
  74static inline unsigned int
  75counters_per_cpu_to_total(unsigned int counters)
  76{
  77	return counters << vpe_shift();
  78}
  79
  80#define __define_perf_accessors(r, n, np)				\
  81									\
  82static inline unsigned int r_c0_ ## r ## n(void)			\
  83{									\
  84	unsigned int cpu = vpe_id();					\
  85									\
  86	switch (cpu) {							\
  87	case 0:								\
  88		return read_c0_ ## r ## n();				\
  89	case 1:								\
  90		return read_c0_ ## r ## np();				\
  91	default:							\
  92		BUG();							\
  93	}								\
  94	return 0;							\
  95}									\
  96									\
  97static inline void w_c0_ ## r ## n(unsigned int value)			\
  98{									\
  99	unsigned int cpu = vpe_id();					\
 100									\
 101	switch (cpu) {							\
 102	case 0:								\
 103		write_c0_ ## r ## n(value);				\
 104		return;							\
 105	case 1:								\
 106		write_c0_ ## r ## np(value);				\
 107		return;							\
 108	default:							\
 109		BUG();							\
 110	}								\
 111	return;								\
 112}									\
 113
 114__define_perf_accessors(perfcntr, 0, 2)
 115__define_perf_accessors(perfcntr, 1, 3)
 116__define_perf_accessors(perfcntr, 2, 0)
 117__define_perf_accessors(perfcntr, 3, 1)
 118
 119__define_perf_accessors(perfctrl, 0, 2)
 120__define_perf_accessors(perfctrl, 1, 3)
 121__define_perf_accessors(perfctrl, 2, 0)
 122__define_perf_accessors(perfctrl, 3, 1)
 123
 124static inline int __n_counters(void)
 125{
 126	if (!(read_c0_config1() & M_CONFIG1_PC))
 127		return 0;
 128	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
 129		return 1;
 130	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
 131		return 2;
 132	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
 133		return 3;
 134
 135	return 4;
 
 
 
 
 136}
 137
 138static inline int n_counters(void)
 139{
 140	int counters;
 141
 142	switch (current_cpu_type()) {
 143	case CPU_R10000:
 144		counters = 2;
 145		break;
 146
 147	case CPU_R12000:
 148	case CPU_R14000:
 149		counters = 4;
 150		break;
 151
 
 
 
 
 
 
 
 
 
 
 
 
 
 152	default:
 153		counters = __n_counters();
 
 154	}
 155
 156	return counters;
 157}
 158
 159static void reset_counters(void *arg)
 160{
 161	int counters = (int)(long)arg;
 162	switch (counters) {
 163	case 4:
 164		w_c0_perfctrl3(0);
 165		w_c0_perfcntr3(0);
 166	case 3:
 167		w_c0_perfctrl2(0);
 168		w_c0_perfcntr2(0);
 169	case 2:
 170		w_c0_perfctrl1(0);
 171		w_c0_perfcntr1(0);
 172	case 1:
 173		w_c0_perfctrl0(0);
 174		w_c0_perfcntr0(0);
 
 
 
 
 
 
 175	}
 176}
 177
 178static inline u64
 179mipsxx_pmu_read_counter(unsigned int idx)
 180{
 
 
 181	switch (idx) {
 182	case 0:
 183		return r_c0_perfcntr0();
 
 184	case 1:
 185		return r_c0_perfcntr1();
 
 186	case 2:
 187		return r_c0_perfcntr2();
 
 188	case 3:
 189		return r_c0_perfcntr3();
 190	default:
 191		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 192		return 0;
 193	}
 194}
 195
 196static inline void
 197mipsxx_pmu_write_counter(unsigned int idx, u64 val)
 198{
 
 
 199	switch (idx) {
 200	case 0:
 201		w_c0_perfcntr0(val);
 202		return;
 203	case 1:
 204		w_c0_perfcntr1(val);
 205		return;
 206	case 2:
 207		w_c0_perfcntr2(val);
 208		return;
 209	case 3:
 210		w_c0_perfcntr3(val);
 211		return;
 212	}
 213}
 214
 215static inline unsigned int
 216mipsxx_pmu_read_control(unsigned int idx)
 217{
 
 
 218	switch (idx) {
 219	case 0:
 220		return r_c0_perfctrl0();
 221	case 1:
 222		return r_c0_perfctrl1();
 223	case 2:
 224		return r_c0_perfctrl2();
 225	case 3:
 226		return r_c0_perfctrl3();
 227	default:
 228		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 229		return 0;
 230	}
 231}
 232
 233static inline void
 234mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
 235{
 
 
 236	switch (idx) {
 237	case 0:
 238		w_c0_perfctrl0(val);
 239		return;
 240	case 1:
 241		w_c0_perfctrl1(val);
 242		return;
 243	case 2:
 244		w_c0_perfctrl2(val);
 245		return;
 246	case 3:
 247		w_c0_perfctrl3(val);
 248		return;
 249	}
 250}
 251
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 252#ifdef CONFIG_MIPS_MT_SMP
 253static DEFINE_RWLOCK(pmuint_rwlock);
 
 
 
 
 
 254#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255
 256/* 24K/34K/1004K cores can share the same event map. */
 257static const struct mips_perf_event mipsxxcore_event_map
 258				[PERF_COUNT_HW_MAX] = {
 259	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 260	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 261	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
 262	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
 263	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
 264	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
 265	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
 266};
 267
 268/* 74K core has different branch event code. */
 269static const struct mips_perf_event mipsxx74Kcore_event_map
 270				[PERF_COUNT_HW_MAX] = {
 271	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 272	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 273	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
 274	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
 275	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
 276	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
 277	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
 278};
 279
 280/* 24K/34K/1004K cores can share the same cache event map. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281static const struct mips_perf_event mipsxxcore_cache_map
 282				[PERF_COUNT_HW_CACHE_MAX]
 283				[PERF_COUNT_HW_CACHE_OP_MAX]
 284				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 285[C(L1D)] = {
 286	/*
 287	 * Like some other architectures (e.g. ARM), the performance
 288	 * counters don't differentiate between read and write
 289	 * accesses/misses, so this isn't strictly correct, but it's the
 290	 * best we can do. Writes and reads get combined.
 291	 */
 292	[C(OP_READ)] = {
 293		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
 294		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
 295	},
 296	[C(OP_WRITE)] = {
 297		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
 298		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
 299	},
 300	[C(OP_PREFETCH)] = {
 301		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 302		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 303	},
 304},
 305[C(L1I)] = {
 306	[C(OP_READ)] = {
 307		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
 308		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
 309	},
 310	[C(OP_WRITE)] = {
 311		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
 312		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
 313	},
 314	[C(OP_PREFETCH)] = {
 315		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
 316		/*
 317		 * Note that MIPS has only "hit" events countable for
 318		 * the prefetch operation.
 319		 */
 320		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 321	},
 322},
 323[C(LL)] = {
 324	[C(OP_READ)] = {
 325		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
 326		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
 327	},
 328	[C(OP_WRITE)] = {
 329		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
 330		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
 331	},
 332	[C(OP_PREFETCH)] = {
 333		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 334		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 335	},
 336},
 337[C(DTLB)] = {
 338	[C(OP_READ)] = {
 339		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 340		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 341	},
 342	[C(OP_WRITE)] = {
 343		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 344		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 345	},
 346	[C(OP_PREFETCH)] = {
 347		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 348		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 349	},
 350},
 351[C(ITLB)] = {
 352	[C(OP_READ)] = {
 353		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
 354		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
 355	},
 356	[C(OP_WRITE)] = {
 357		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
 358		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
 359	},
 360	[C(OP_PREFETCH)] = {
 361		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 362		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 363	},
 364},
 365[C(BPU)] = {
 366	/* Using the same code for *HW_BRANCH* */
 367	[C(OP_READ)] = {
 368		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
 369		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
 370	},
 371	[C(OP_WRITE)] = {
 372		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
 373		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
 374	},
 375	[C(OP_PREFETCH)] = {
 376		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 377		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 378	},
 379},
 380[C(NODE)] = {
 381	[C(OP_READ)] = {
 382		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 383		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 384	},
 385	[C(OP_WRITE)] = {
 386		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 387		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 388	},
 389	[C(OP_PREFETCH)] = {
 390		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 391		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 392	},
 393},
 394};
 395
 396/* 74K core has completely different cache event map. */
 397static const struct mips_perf_event mipsxx74Kcore_cache_map
 398				[PERF_COUNT_HW_CACHE_MAX]
 399				[PERF_COUNT_HW_CACHE_OP_MAX]
 400				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 401[C(L1D)] = {
 402	/*
 403	 * Like some other architectures (e.g. ARM), the performance
 404	 * counters don't differentiate between read and write
 405	 * accesses/misses, so this isn't strictly correct, but it's the
 406	 * best we can do. Writes and reads get combined.
 407	 */
 408	[C(OP_READ)] = {
 409		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
 410		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
 411	},
 412	[C(OP_WRITE)] = {
 413		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
 414		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
 415	},
 416	[C(OP_PREFETCH)] = {
 417		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 418		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 419	},
 420},
 421[C(L1I)] = {
 422	[C(OP_READ)] = {
 423		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 424		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 425	},
 426	[C(OP_WRITE)] = {
 427		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 428		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 429	},
 430	[C(OP_PREFETCH)] = {
 431		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
 432		/*
 433		 * Note that MIPS has only "hit" events countable for
 434		 * the prefetch operation.
 435		 */
 436		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 437	},
 438},
 439[C(LL)] = {
 440	[C(OP_READ)] = {
 441		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
 442		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
 443	},
 444	[C(OP_WRITE)] = {
 445		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
 446		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
 447	},
 448	[C(OP_PREFETCH)] = {
 449		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 450		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451	},
 452},
 453[C(DTLB)] = {
 454	/* 74K core does not have specific DTLB events. */
 455	[C(OP_READ)] = {
 456		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 457		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 458	},
 459	[C(OP_WRITE)] = {
 460		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 461		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 462	},
 463	[C(OP_PREFETCH)] = {
 464		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 465		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 466	},
 467},
 468[C(ITLB)] = {
 469	[C(OP_READ)] = {
 470		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
 471		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
 472	},
 473	[C(OP_WRITE)] = {
 474		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
 475		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476	},
 477	[C(OP_PREFETCH)] = {
 478		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 479		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 480	},
 481},
 482[C(BPU)] = {
 483	/* Using the same code for *HW_BRANCH* */
 484	[C(OP_READ)] = {
 485		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
 486		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
 487	},
 488	[C(OP_WRITE)] = {
 489		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
 490		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 491	},
 492	[C(OP_PREFETCH)] = {
 493		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 494		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 495	},
 496},
 497[C(NODE)] = {
 
 
 
 
 498	[C(OP_READ)] = {
 499		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 500		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 501	},
 502	[C(OP_WRITE)] = {
 503		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 504		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 505	},
 506	[C(OP_PREFETCH)] = {
 507		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 508		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 509	},
 510},
 511};
 512
 513#ifdef CONFIG_MIPS_MT_SMP
 514static void
 515check_and_calc_range(struct perf_event *event,
 516			const struct mips_perf_event *pev)
 517{
 518	struct hw_perf_event *hwc = &event->hw;
 519
 520	if (event->cpu >= 0) {
 521		if (pev->range > V) {
 522			/*
 523			 * The user selected an event that is processor
 524			 * wide, while expecting it to be VPE wide.
 525			 */
 526			hwc->config_base |= M_TC_EN_ALL;
 527		} else {
 528			/*
 529			 * FIXME: cpu_data[event->cpu].vpe_id reports 0
 530			 * for both CPUs.
 531			 */
 532			hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
 533			hwc->config_base |= M_TC_EN_VPE;
 534		}
 535	} else
 536		hwc->config_base |= M_TC_EN_ALL;
 537}
 538#else
 539static void
 540check_and_calc_range(struct perf_event *event,
 541			const struct mips_perf_event *pev)
 542{
 543}
 544#endif
 545
 546static int __hw_perf_event_init(struct perf_event *event)
 547{
 548	struct perf_event_attr *attr = &event->attr;
 549	struct hw_perf_event *hwc = &event->hw;
 550	const struct mips_perf_event *pev;
 551	int err;
 552
 553	/* Returning MIPS event descriptor for generic perf event. */
 554	if (PERF_TYPE_HARDWARE == event->attr.type) {
 555		if (event->attr.config >= PERF_COUNT_HW_MAX)
 556			return -EINVAL;
 557		pev = mipspmu_map_general_event(event->attr.config);
 558	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
 559		pev = mipspmu_map_cache_event(event->attr.config);
 560	} else if (PERF_TYPE_RAW == event->attr.type) {
 561		/* We are working on the global raw event. */
 562		mutex_lock(&raw_event_mutex);
 563		pev = mipspmu->map_raw_event(event->attr.config);
 564	} else {
 565		/* The event type is not (yet) supported. */
 566		return -EOPNOTSUPP;
 567	}
 568
 569	if (IS_ERR(pev)) {
 570		if (PERF_TYPE_RAW == event->attr.type)
 571			mutex_unlock(&raw_event_mutex);
 572		return PTR_ERR(pev);
 573	}
 574
 575	/*
 576	 * We allow max flexibility on how each individual counter shared
 577	 * by the single CPU operates (the mode exclusion and the range).
 578	 */
 579	hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
 580
 581	/* Calculate range bits and validate it. */
 582	if (num_possible_cpus() > 1)
 583		check_and_calc_range(event, pev);
 584
 585	hwc->event_base = mipspmu_perf_event_encode(pev);
 586	if (PERF_TYPE_RAW == event->attr.type)
 587		mutex_unlock(&raw_event_mutex);
 588
 589	if (!attr->exclude_user)
 590		hwc->config_base |= M_PERFCTL_USER;
 591	if (!attr->exclude_kernel) {
 592		hwc->config_base |= M_PERFCTL_KERNEL;
 593		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
 594		hwc->config_base |= M_PERFCTL_EXL;
 595	}
 596	if (!attr->exclude_hv)
 597		hwc->config_base |= M_PERFCTL_SUPERVISOR;
 598
 599	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
 600	/*
 601	 * The event can belong to another cpu. We do not assign a local
 602	 * counter for it for now.
 603	 */
 604	hwc->idx = -1;
 605	hwc->config = 0;
 606
 607	if (!hwc->sample_period) {
 608		hwc->sample_period  = MAX_PERIOD;
 609		hwc->last_period    = hwc->sample_period;
 610		local64_set(&hwc->period_left, hwc->sample_period);
 611	}
 612
 613	err = 0;
 614	if (event->group_leader != event) {
 615		err = validate_group(event);
 616		if (err)
 617			return -EINVAL;
 618	}
 619
 620	event->destroy = hw_perf_event_destroy;
 621
 
 
 
 622	return err;
 623}
 624
 625static void pause_local_counters(void)
 626{
 627	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 628	int counters = mipspmu->num_counters;
 629	unsigned long flags;
 630
 631	local_irq_save(flags);
 632	switch (counters) {
 633	case 4:
 634		cpuc->saved_ctrl[3] = r_c0_perfctrl3();
 635		w_c0_perfctrl3(cpuc->saved_ctrl[3] &
 636			~M_PERFCTL_COUNT_EVENT_WHENEVER);
 637	case 3:
 638		cpuc->saved_ctrl[2] = r_c0_perfctrl2();
 639		w_c0_perfctrl2(cpuc->saved_ctrl[2] &
 640			~M_PERFCTL_COUNT_EVENT_WHENEVER);
 641	case 2:
 642		cpuc->saved_ctrl[1] = r_c0_perfctrl1();
 643		w_c0_perfctrl1(cpuc->saved_ctrl[1] &
 644			~M_PERFCTL_COUNT_EVENT_WHENEVER);
 645	case 1:
 646		cpuc->saved_ctrl[0] = r_c0_perfctrl0();
 647		w_c0_perfctrl0(cpuc->saved_ctrl[0] &
 648			~M_PERFCTL_COUNT_EVENT_WHENEVER);
 649	}
 650	local_irq_restore(flags);
 651}
 652
 653static void resume_local_counters(void)
 654{
 655	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 656	int counters = mipspmu->num_counters;
 657	unsigned long flags;
 658
 659	local_irq_save(flags);
 660	switch (counters) {
 661	case 4:
 662		w_c0_perfctrl3(cpuc->saved_ctrl[3]);
 663	case 3:
 664		w_c0_perfctrl2(cpuc->saved_ctrl[2]);
 665	case 2:
 666		w_c0_perfctrl1(cpuc->saved_ctrl[1]);
 667	case 1:
 668		w_c0_perfctrl0(cpuc->saved_ctrl[0]);
 669	}
 670	local_irq_restore(flags);
 671}
 672
 673static int mipsxx_pmu_handle_shared_irq(void)
 674{
 675	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 676	struct perf_sample_data data;
 677	unsigned int counters = mipspmu->num_counters;
 678	unsigned int counter;
 679	int handled = IRQ_NONE;
 680	struct pt_regs *regs;
 681
 682	if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
 683		return handled;
 684
 685	/*
 686	 * First we pause the local counters, so that when we are locked
 687	 * here, the counters are all paused. When it gets locked due to
 688	 * perf_disable(), the timer interrupt handler will be delayed.
 689	 *
 690	 * See also mipsxx_pmu_start().
 691	 */
 692	pause_local_counters();
 693#ifdef CONFIG_MIPS_MT_SMP
 694	read_lock(&pmuint_rwlock);
 695#endif
 696
 697	regs = get_irq_regs();
 698
 699	perf_sample_data_init(&data, 0);
 700
 701	switch (counters) {
 702#define HANDLE_COUNTER(n)						\
 703	case n + 1:							\
 704		if (test_bit(n, cpuc->used_mask)) {			\
 705			counter = r_c0_perfcntr ## n();			\
 706			if (counter & M_COUNTER_OVERFLOW) {		\
 707				w_c0_perfcntr ## n(counter &		\
 708						VALID_COUNT);		\
 709				if (test_and_change_bit(n, cpuc->msbs))	\
 710					handle_associated_event(cpuc,	\
 711						n, &data, regs);	\
 712				handled = IRQ_HANDLED;			\
 713			}						\
 714		}
 715	HANDLE_COUNTER(3)
 716	HANDLE_COUNTER(2)
 717	HANDLE_COUNTER(1)
 718	HANDLE_COUNTER(0)
 719	}
 720
 
 
 
 
 
 721	/*
 722	 * Do all the work for the pending perf events. We can do this
 723	 * in here because the performance counter interrupt is a regular
 724	 * interrupt, not NMI.
 725	 */
 726	if (handled == IRQ_HANDLED)
 727		irq_work_run();
 728
 729#ifdef CONFIG_MIPS_MT_SMP
 730	read_unlock(&pmuint_rwlock);
 731#endif
 732	resume_local_counters();
 733	return handled;
 734}
 735
 736static irqreturn_t
 737mipsxx_pmu_handle_irq(int irq, void *dev)
 738{
 739	return mipsxx_pmu_handle_shared_irq();
 740}
 741
 742static void mipsxx_pmu_start(void)
 743{
 744#ifdef CONFIG_MIPS_MT_SMP
 745	write_unlock(&pmuint_rwlock);
 746#endif
 747	resume_local_counters();
 748}
 749
 750/*
 751 * MIPS performance counters can be per-TC. The control registers can
 752 * not be directly accessed across CPUs. Hence if we want to do global
 753 * control, we need cross CPU calls. on_each_cpu() can help us, but we
 754 * can not make sure this function is called with interrupts enabled. So
 755 * here we pause local counters and then grab a rwlock and leave the
 756 * counters on other CPUs alone. If any counter interrupt raises while
 757 * we own the write lock, simply pause local counters on that CPU and
 758 * spin in the handler. Also we know we won't be switched to another
 759 * CPU after pausing local counters and before grabbing the lock.
 760 */
 761static void mipsxx_pmu_stop(void)
 762{
 763	pause_local_counters();
 764#ifdef CONFIG_MIPS_MT_SMP
 765	write_lock(&pmuint_rwlock);
 766#endif
 767}
 768
 769static int
 770mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
 771			struct hw_perf_event *hwc)
 772{
 773	int i;
 774
 775	/*
 776	 * We only need to care the counter mask. The range has been
 777	 * checked definitely.
 778	 */
 779	unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
 780
 781	for (i = mipspmu->num_counters - 1; i >= 0; i--) {
 782		/*
 783		 * Note that some MIPS perf events can be counted by both
 784		 * even and odd counters, wheresas many other are only by
 785		 * even _or_ odd counters. This introduces an issue that
 786		 * when the former kind of event takes the counter the
 787		 * latter kind of event wants to use, then the "counter
 788		 * allocation" for the latter event will fail. In fact if
 789		 * they can be dynamically swapped, they both feel happy.
 790		 * But here we leave this issue alone for now.
 791		 */
 792		if (test_bit(i, &cntr_mask) &&
 793			!test_and_set_bit(i, cpuc->used_mask))
 794			return i;
 795	}
 796
 797	return -EAGAIN;
 798}
 799
 800static void
 801mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 802{
 803	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 804	unsigned long flags;
 805
 806	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
 807
 808	local_irq_save(flags);
 809	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
 810		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
 811		/* Make sure interrupt enabled. */
 812		M_PERFCTL_INTERRUPT_ENABLE;
 813	/*
 814	 * We do not actually let the counter run. Leave it until start().
 815	 */
 816	local_irq_restore(flags);
 817}
 818
 819static void
 820mipsxx_pmu_disable_event(int idx)
 821{
 822	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 823	unsigned long flags;
 824
 825	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
 826
 827	local_irq_save(flags);
 828	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
 829		~M_PERFCTL_COUNT_EVENT_WHENEVER;
 830	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
 831	local_irq_restore(flags);
 832}
 833
 834/* 24K */
 835#define IS_UNSUPPORTED_24K_EVENT(r, b)					\
 836	((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 ||		\
 837	 (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 ||		\
 838	 (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 ||		\
 839	 (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) ||		\
 840	 ((b) >= 68 && (b) <= 127))
 841#define IS_BOTH_COUNTERS_24K_EVENT(b)					\
 842	((b) == 0 || (b) == 1 || (b) == 11)
 843
 844/* 34K */
 845#define IS_UNSUPPORTED_34K_EVENT(r, b)					\
 846	((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 ||		\
 847	 (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) ||		\
 848	 ((b) >= 68 && (b) <= 127))
 849#define IS_BOTH_COUNTERS_34K_EVENT(b)					\
 850	((b) == 0 || (b) == 1 || (b) == 11)
 851#ifdef CONFIG_MIPS_MT_SMP
 852#define IS_RANGE_P_34K_EVENT(r, b)					\
 853	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
 854	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
 855	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
 856	 ((b) >= 64 && (b) <= 67))
 857#define IS_RANGE_V_34K_EVENT(r)	((r) == 47)
 858#endif
 859
 860/* 74K */
 861#define IS_UNSUPPORTED_74K_EVENT(r, b)					\
 862	((r) == 5 || ((r) >= 135 && (r) <= 137) ||			\
 863	 ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 ||		\
 864	 (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) ||		\
 865	 (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 ||		\
 866	 (b) == 61 || (r) == 62 || (r) == 191 ||			\
 867	 ((b) >= 64 && (b) <= 127))
 868#define IS_BOTH_COUNTERS_74K_EVENT(b)					\
 869	((b) == 0 || (b) == 1)
 870
 
 
 
 
 
 
 
 871/* 1004K */
 872#define IS_UNSUPPORTED_1004K_EVENT(r, b)				\
 873	((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 ||		\
 874	 (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
 875#define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
 876	((b) == 0 || (b) == 1 || (b) == 11)
 877#ifdef CONFIG_MIPS_MT_SMP
 878#define IS_RANGE_P_1004K_EVENT(r, b)					\
 879	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
 880	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
 881	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
 882	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
 883	 ((b) >= 64 && (b) <= 67))
 884#define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
 885#endif
 886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 887/*
 888 * User can use 0-255 raw events, where 0-127 for the events of even
 889 * counters, and 128-255 for odd counters. Note that bit 7 is used to
 890 * indicate the parity. So, for example, when user wants to take the
 891 * Event Num of 15 for odd counters (by referring to the user manual),
 892 * then 128 needs to be added to 15 as the input for the event config,
 893 * i.e., 143 (0x8F) to be used.
 
 
 
 
 894 */
 895static const struct mips_perf_event *
 896mipsxx_pmu_map_raw_event(u64 config)
 897{
 
 898	unsigned int raw_id = config & 0xff;
 899	unsigned int base_id = raw_id & 0x7f;
 900
 901	switch (current_cpu_type()) {
 902	case CPU_24K:
 903		if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
 904			return ERR_PTR(-EOPNOTSUPP);
 905		raw_event.event_id = base_id;
 906		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
 907			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
 908		else
 909			raw_event.cntr_mask =
 910				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
 911#ifdef CONFIG_MIPS_MT_SMP
 912		/*
 913		 * This is actually doing nothing. Non-multithreading
 914		 * CPUs will not check and calculate the range.
 915		 */
 916		raw_event.range = P;
 917#endif
 918		break;
 919	case CPU_34K:
 920		if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
 921			return ERR_PTR(-EOPNOTSUPP);
 922		raw_event.event_id = base_id;
 923		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
 924			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
 925		else
 926			raw_event.cntr_mask =
 927				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
 928#ifdef CONFIG_MIPS_MT_SMP
 929		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
 930			raw_event.range = P;
 931		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
 932			raw_event.range = V;
 933		else
 934			raw_event.range = T;
 935#endif
 936		break;
 937	case CPU_74K:
 938		if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
 939			return ERR_PTR(-EOPNOTSUPP);
 940		raw_event.event_id = base_id;
 941		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
 942			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
 943		else
 944			raw_event.cntr_mask =
 945				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
 946#ifdef CONFIG_MIPS_MT_SMP
 947		raw_event.range = P;
 948#endif
 949		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 950	case CPU_1004K:
 951		if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
 952			return ERR_PTR(-EOPNOTSUPP);
 953		raw_event.event_id = base_id;
 954		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
 955			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
 956		else
 957			raw_event.cntr_mask =
 958				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
 959#ifdef CONFIG_MIPS_MT_SMP
 960		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
 961			raw_event.range = P;
 962		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
 963			raw_event.range = V;
 964		else
 965			raw_event.range = T;
 966#endif
 967		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968	}
 969
 
 
 970	return &raw_event;
 971}
 972
 973static struct mips_pmu mipsxxcore_pmu = {
 974	.handle_irq = mipsxx_pmu_handle_irq,
 975	.handle_shared_irq = mipsxx_pmu_handle_shared_irq,
 976	.start = mipsxx_pmu_start,
 977	.stop = mipsxx_pmu_stop,
 978	.alloc_counter = mipsxx_pmu_alloc_counter,
 979	.read_counter = mipsxx_pmu_read_counter,
 980	.write_counter = mipsxx_pmu_write_counter,
 981	.enable_event = mipsxx_pmu_enable_event,
 982	.disable_event = mipsxx_pmu_disable_event,
 983	.map_raw_event = mipsxx_pmu_map_raw_event,
 984	.general_event_map = &mipsxxcore_event_map,
 985	.cache_event_map = &mipsxxcore_cache_map,
 986};
 987
 988static struct mips_pmu mipsxx74Kcore_pmu = {
 989	.handle_irq = mipsxx_pmu_handle_irq,
 990	.handle_shared_irq = mipsxx_pmu_handle_shared_irq,
 991	.start = mipsxx_pmu_start,
 992	.stop = mipsxx_pmu_stop,
 993	.alloc_counter = mipsxx_pmu_alloc_counter,
 994	.read_counter = mipsxx_pmu_read_counter,
 995	.write_counter = mipsxx_pmu_write_counter,
 996	.enable_event = mipsxx_pmu_enable_event,
 997	.disable_event = mipsxx_pmu_disable_event,
 998	.map_raw_event = mipsxx_pmu_map_raw_event,
 999	.general_event_map = &mipsxx74Kcore_event_map,
1000	.cache_event_map = &mipsxx74Kcore_cache_map,
1001};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1002
1003static int __init
1004init_hw_perf_events(void)
1005{
1006	int counters, irq;
 
1007
1008	pr_info("Performance counters: ");
1009
1010	counters = n_counters();
1011	if (counters == 0) {
1012		pr_cont("No available PMU.\n");
1013		return -ENODEV;
1014	}
1015
1016#ifdef CONFIG_MIPS_MT_SMP
1017	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1018	if (!cpu_has_mipsmt_pertccounters)
1019		counters = counters_total_to_per_cpu(counters);
1020#endif
1021
1022#ifdef MSC01E_INT_BASE
1023	if (cpu_has_veic) {
1024		/*
1025		 * Using platform specific interrupt controller defines.
1026		 */
1027		irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1028	} else {
1029#endif
1030		if (cp0_perfcount_irq >= 0)
1031			irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1032		else
1033			irq = -1;
1034#ifdef MSC01E_INT_BASE
1035	}
1036#endif
1037
1038	on_each_cpu(reset_counters, (void *)(long)counters, 1);
1039
1040	switch (current_cpu_type()) {
1041	case CPU_24K:
1042		mipsxxcore_pmu.name = "mips/24K";
1043		mipsxxcore_pmu.num_counters = counters;
1044		mipsxxcore_pmu.irq = irq;
1045		mipspmu = &mipsxxcore_pmu;
1046		break;
1047	case CPU_34K:
1048		mipsxxcore_pmu.name = "mips/34K";
1049		mipsxxcore_pmu.num_counters = counters;
1050		mipsxxcore_pmu.irq = irq;
1051		mipspmu = &mipsxxcore_pmu;
1052		break;
1053	case CPU_74K:
1054		mipsxx74Kcore_pmu.name = "mips/74K";
1055		mipsxx74Kcore_pmu.num_counters = counters;
1056		mipsxx74Kcore_pmu.irq = irq;
1057		mipspmu = &mipsxx74Kcore_pmu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058		break;
1059	case CPU_1004K:
1060		mipsxxcore_pmu.name = "mips/1004K";
1061		mipsxxcore_pmu.num_counters = counters;
1062		mipsxxcore_pmu.irq = irq;
1063		mipspmu = &mipsxxcore_pmu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1064		break;
1065	default:
1066		pr_cont("Either hardware does not support performance "
1067			"counters, or not yet implemented.\n");
1068		return -ENODEV;
1069	}
1070
1071	if (mipspmu)
1072		pr_cont("%s PMU enabled, %d counters available to each "
1073			"CPU, irq %d%s\n", mipspmu->name, counters, irq,
1074			irq < 0 ? " (share with timer interrupt)" : "");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1075
1076	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1077
1078	return 0;
1079}
1080early_initcall(init_hw_perf_events);
1081
1082#endif /* defined(CONFIG_CPU_MIPS32)... */
v4.17
   1/*
   2 * Linux performance counter support for MIPS.
   3 *
   4 * Copyright (C) 2010 MIPS Technologies, Inc.
   5 * Copyright (C) 2011 Cavium Networks, Inc.
   6 * Author: Deng-Cheng Zhu
   7 *
   8 * This code is based on the implementation for ARM, which is in turn
   9 * based on the sparc64 perf event code and the x86 code. Performance
  10 * counter access is based on the MIPS Oprofile code. And the callchain
  11 * support references the code of MIPS stacktrace.c.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License version 2 as
  15 * published by the Free Software Foundation.
  16 */
  17
  18#include <linux/cpumask.h>
  19#include <linux/interrupt.h>
  20#include <linux/smp.h>
  21#include <linux/kernel.h>
  22#include <linux/perf_event.h>
  23#include <linux/uaccess.h>
  24
  25#include <asm/irq.h>
  26#include <asm/irq_regs.h>
  27#include <asm/stacktrace.h>
  28#include <asm/time.h> /* For perf_irq */
  29
  30#define MIPS_MAX_HWEVENTS 4
  31#define MIPS_TCS_PER_COUNTER 2
  32#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
  33
  34struct cpu_hw_events {
  35	/* Array of events on this cpu. */
  36	struct perf_event	*events[MIPS_MAX_HWEVENTS];
  37
  38	/*
  39	 * Set the bit (indexed by the counter number) when the counter
  40	 * is used for an event.
  41	 */
  42	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
  43
  44	/*
  45	 * Software copy of the control register for each performance counter.
  46	 * MIPS CPUs vary in performance counters. They use this differently,
  47	 * and even may not use it.
  48	 */
  49	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
  50};
  51DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  52	.saved_ctrl = {0},
  53};
  54
  55/* The description of MIPS performance events. */
  56struct mips_perf_event {
  57	unsigned int event_id;
  58	/*
  59	 * MIPS performance counters are indexed starting from 0.
  60	 * CNTR_EVEN indicates the indexes of the counters to be used are
  61	 * even numbers.
  62	 */
  63	unsigned int cntr_mask;
  64	#define CNTR_EVEN	0x55555555
  65	#define CNTR_ODD	0xaaaaaaaa
  66	#define CNTR_ALL	0xffffffff
  67#ifdef CONFIG_MIPS_MT_SMP
  68	enum {
  69		T  = 0,
  70		V  = 1,
  71		P  = 2,
  72	} range;
  73#else
  74	#define T
  75	#define V
  76	#define P
  77#endif
  78};
  79
  80static struct mips_perf_event raw_event;
  81static DEFINE_MUTEX(raw_event_mutex);
  82
  83#define C(x) PERF_COUNT_HW_CACHE_##x
  84
  85struct mips_pmu {
  86	u64		max_period;
  87	u64		valid_count;
  88	u64		overflow;
  89	const char	*name;
  90	int		irq;
  91	u64		(*read_counter)(unsigned int idx);
  92	void		(*write_counter)(unsigned int idx, u64 val);
  93	const struct mips_perf_event *(*map_raw_event)(u64 config);
  94	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
  95	const struct mips_perf_event (*cache_event_map)
  96				[PERF_COUNT_HW_CACHE_MAX]
  97				[PERF_COUNT_HW_CACHE_OP_MAX]
  98				[PERF_COUNT_HW_CACHE_RESULT_MAX];
  99	unsigned int	num_counters;
 100};
 101
 102static struct mips_pmu mipspmu;
 103
 104#define M_PERFCTL_EVENT(event)		(((event) << MIPS_PERFCTRL_EVENT_S) & \
 105					 MIPS_PERFCTRL_EVENT)
 106#define M_PERFCTL_VPEID(vpe)		((vpe)	  << MIPS_PERFCTRL_VPEID_S)
 107
 108#ifdef CONFIG_CPU_BMIPS5000
 109#define M_PERFCTL_MT_EN(filter)		0
 110#else /* !CONFIG_CPU_BMIPS5000 */
 111#define M_PERFCTL_MT_EN(filter)		(filter)
 112#endif /* CONFIG_CPU_BMIPS5000 */
 113
 114#define	   M_TC_EN_ALL			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
 115#define	   M_TC_EN_VPE			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
 116#define	   M_TC_EN_TC			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
 117
 118#define M_PERFCTL_COUNT_EVENT_WHENEVER	(MIPS_PERFCTRL_EXL |		\
 119					 MIPS_PERFCTRL_K |		\
 120					 MIPS_PERFCTRL_U |		\
 121					 MIPS_PERFCTRL_S |		\
 122					 MIPS_PERFCTRL_IE)
 123
 124#ifdef CONFIG_MIPS_MT_SMP
 125#define M_PERFCTL_CONFIG_MASK		0x3fff801f
 126#else
 127#define M_PERFCTL_CONFIG_MASK		0x1f
 128#endif
 
 129
 
 130
 131#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 132static int cpu_has_mipsmt_pertccounters;
 133
 134static DEFINE_RWLOCK(pmuint_rwlock);
 135
 136#if defined(CONFIG_CPU_BMIPS5000)
 137#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 138			 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
 139#else
 140/*
 141 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
 142 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
 143 */
 
 
 
 
 144#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 145			 0 : smp_processor_id())
 146#endif
 147
 148/* Copied from op_model_mipsxx.c */
 149static unsigned int vpe_shift(void)
 150{
 151	if (num_possible_cpus() > 1)
 152		return 1;
 153
 154	return 0;
 155}
 
 
 
 
 
 
 
 
 156
 157static unsigned int counters_total_to_per_cpu(unsigned int counters)
 
 158{
 159	return counters >> vpe_shift();
 160}
 161
 162#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
 163#define vpe_id()	0
 
 
 
 164
 165#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166
 167static void resume_local_counters(void);
 168static void pause_local_counters(void);
 169static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
 170static int mipsxx_pmu_handle_shared_irq(void);
 
 
 
 
 
 
 171
 172static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
 173{
 174	if (vpe_id() == 1)
 175		idx = (idx + 2) & 3;
 176	return idx;
 177}
 178
 179static u64 mipsxx_pmu_read_counter(unsigned int idx)
 180{
 181	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 
 
 
 
 
 
 
 
 
 
 182
 183	switch (idx) {
 184	case 0:
 185		/*
 186		 * The counters are unsigned, we must cast to truncate
 187		 * off the high bits.
 188		 */
 189		return (u32)read_c0_perfcntr0();
 190	case 1:
 191		return (u32)read_c0_perfcntr1();
 192	case 2:
 193		return (u32)read_c0_perfcntr2();
 194	case 3:
 195		return (u32)read_c0_perfcntr3();
 196	default:
 197		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 198		return 0;
 199	}
 
 
 200}
 201
 202static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
 203{
 204	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 205
 206	switch (idx) {
 207	case 0:
 208		return read_c0_perfcntr0_64();
 
 
 
 
 
 
 209	case 1:
 210		return read_c0_perfcntr1_64();
 211	case 2:
 212		return read_c0_perfcntr2_64();
 213	case 3:
 214		return read_c0_perfcntr3_64();
 215	default:
 216		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 217		return 0;
 218	}
 219}
 220
 221static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
 
 222{
 223	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 224
 225	switch (idx) {
 226	case 0:
 227		write_c0_perfcntr0(val);
 228		return;
 229	case 1:
 230		write_c0_perfcntr1(val);
 231		return;
 232	case 2:
 233		write_c0_perfcntr2(val);
 234		return;
 235	case 3:
 236		write_c0_perfcntr3(val);
 237		return;
 
 
 238	}
 239}
 240
 241static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
 
 242{
 243	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 244
 245	switch (idx) {
 246	case 0:
 247		write_c0_perfcntr0_64(val);
 248		return;
 249	case 1:
 250		write_c0_perfcntr1_64(val);
 251		return;
 252	case 2:
 253		write_c0_perfcntr2_64(val);
 254		return;
 255	case 3:
 256		write_c0_perfcntr3_64(val);
 257		return;
 258	}
 259}
 260
 261static unsigned int mipsxx_pmu_read_control(unsigned int idx)
 
 262{
 263	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 264
 265	switch (idx) {
 266	case 0:
 267		return read_c0_perfctrl0();
 268	case 1:
 269		return read_c0_perfctrl1();
 270	case 2:
 271		return read_c0_perfctrl2();
 272	case 3:
 273		return read_c0_perfctrl3();
 274	default:
 275		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 276		return 0;
 277	}
 278}
 279
 280static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
 
 281{
 282	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 283
 284	switch (idx) {
 285	case 0:
 286		write_c0_perfctrl0(val);
 287		return;
 288	case 1:
 289		write_c0_perfctrl1(val);
 290		return;
 291	case 2:
 292		write_c0_perfctrl2(val);
 293		return;
 294	case 3:
 295		write_c0_perfctrl3(val);
 296		return;
 297	}
 298}
 299
 300static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
 301				    struct hw_perf_event *hwc)
 302{
 303	int i;
 304
 305	/*
 306	 * We only need to care the counter mask. The range has been
 307	 * checked definitely.
 308	 */
 309	unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
 310
 311	for (i = mipspmu.num_counters - 1; i >= 0; i--) {
 312		/*
 313		 * Note that some MIPS perf events can be counted by both
 314		 * even and odd counters, wheresas many other are only by
 315		 * even _or_ odd counters. This introduces an issue that
 316		 * when the former kind of event takes the counter the
 317		 * latter kind of event wants to use, then the "counter
 318		 * allocation" for the latter event will fail. In fact if
 319		 * they can be dynamically swapped, they both feel happy.
 320		 * But here we leave this issue alone for now.
 321		 */
 322		if (test_bit(i, &cntr_mask) &&
 323			!test_and_set_bit(i, cpuc->used_mask))
 324			return i;
 325	}
 326
 327	return -EAGAIN;
 328}
 329
 330static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 331{
 332	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 333
 334	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 335
 336	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
 337		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
 338		/* Make sure interrupt enabled. */
 339		MIPS_PERFCTRL_IE;
 340	if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
 341		/* enable the counter for the calling thread */
 342		cpuc->saved_ctrl[idx] |=
 343			(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
 344
 345	/*
 346	 * We do not actually let the counter run. Leave it until start().
 347	 */
 348}
 349
 350static void mipsxx_pmu_disable_event(int idx)
 351{
 352	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 353	unsigned long flags;
 354
 355	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 356
 357	local_irq_save(flags);
 358	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
 359		~M_PERFCTL_COUNT_EVENT_WHENEVER;
 360	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
 361	local_irq_restore(flags);
 362}
 363
 364static int mipspmu_event_set_period(struct perf_event *event,
 365				    struct hw_perf_event *hwc,
 366				    int idx)
 367{
 368	u64 left = local64_read(&hwc->period_left);
 369	u64 period = hwc->sample_period;
 370	int ret = 0;
 371
 372	if (unlikely((left + period) & (1ULL << 63))) {
 373		/* left underflowed by more than period. */
 374		left = period;
 375		local64_set(&hwc->period_left, left);
 376		hwc->last_period = period;
 377		ret = 1;
 378	} else	if (unlikely((left + period) <= period)) {
 379		/* left underflowed by less than period. */
 380		left += period;
 381		local64_set(&hwc->period_left, left);
 382		hwc->last_period = period;
 383		ret = 1;
 384	}
 385
 386	if (left > mipspmu.max_period) {
 387		left = mipspmu.max_period;
 388		local64_set(&hwc->period_left, left);
 389	}
 390
 391	local64_set(&hwc->prev_count, mipspmu.overflow - left);
 392
 393	mipspmu.write_counter(idx, mipspmu.overflow - left);
 394
 395	perf_event_update_userpage(event);
 396
 397	return ret;
 398}
 399
 400static void mipspmu_event_update(struct perf_event *event,
 401				 struct hw_perf_event *hwc,
 402				 int idx)
 403{
 404	u64 prev_raw_count, new_raw_count;
 405	u64 delta;
 406
 407again:
 408	prev_raw_count = local64_read(&hwc->prev_count);
 409	new_raw_count = mipspmu.read_counter(idx);
 410
 411	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 412				new_raw_count) != prev_raw_count)
 413		goto again;
 414
 415	delta = new_raw_count - prev_raw_count;
 416
 417	local64_add(delta, &event->count);
 418	local64_sub(delta, &hwc->period_left);
 419}
 420
 421static void mipspmu_start(struct perf_event *event, int flags)
 422{
 423	struct hw_perf_event *hwc = &event->hw;
 424
 425	if (flags & PERF_EF_RELOAD)
 426		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
 427
 428	hwc->state = 0;
 429
 430	/* Set the period for the event. */
 431	mipspmu_event_set_period(event, hwc, hwc->idx);
 432
 433	/* Enable the event. */
 434	mipsxx_pmu_enable_event(hwc, hwc->idx);
 435}
 436
 437static void mipspmu_stop(struct perf_event *event, int flags)
 438{
 439	struct hw_perf_event *hwc = &event->hw;
 440
 441	if (!(hwc->state & PERF_HES_STOPPED)) {
 442		/* We are working on a local event. */
 443		mipsxx_pmu_disable_event(hwc->idx);
 444		barrier();
 445		mipspmu_event_update(event, hwc, hwc->idx);
 446		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 447	}
 448}
 449
 450static int mipspmu_add(struct perf_event *event, int flags)
 451{
 452	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 453	struct hw_perf_event *hwc = &event->hw;
 454	int idx;
 455	int err = 0;
 456
 457	perf_pmu_disable(event->pmu);
 458
 459	/* To look for a free counter for this event. */
 460	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
 461	if (idx < 0) {
 462		err = idx;
 463		goto out;
 464	}
 465
 466	/*
 467	 * If there is an event in the counter we are going to use then
 468	 * make sure it is disabled.
 469	 */
 470	event->hw.idx = idx;
 471	mipsxx_pmu_disable_event(idx);
 472	cpuc->events[idx] = event;
 473
 474	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 475	if (flags & PERF_EF_START)
 476		mipspmu_start(event, PERF_EF_RELOAD);
 477
 478	/* Propagate our changes to the userspace mapping. */
 479	perf_event_update_userpage(event);
 480
 481out:
 482	perf_pmu_enable(event->pmu);
 483	return err;
 484}
 485
 486static void mipspmu_del(struct perf_event *event, int flags)
 487{
 488	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 489	struct hw_perf_event *hwc = &event->hw;
 490	int idx = hwc->idx;
 491
 492	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 493
 494	mipspmu_stop(event, PERF_EF_UPDATE);
 495	cpuc->events[idx] = NULL;
 496	clear_bit(idx, cpuc->used_mask);
 497
 498	perf_event_update_userpage(event);
 499}
 500
 501static void mipspmu_read(struct perf_event *event)
 502{
 503	struct hw_perf_event *hwc = &event->hw;
 504
 505	/* Don't read disabled counters! */
 506	if (hwc->idx < 0)
 507		return;
 508
 509	mipspmu_event_update(event, hwc, hwc->idx);
 510}
 511
 512static void mipspmu_enable(struct pmu *pmu)
 513{
 514#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 515	write_unlock(&pmuint_rwlock);
 516#endif
 517	resume_local_counters();
 518}
 519
 520/*
 521 * MIPS performance counters can be per-TC. The control registers can
 522 * not be directly accessed across CPUs. Hence if we want to do global
 523 * control, we need cross CPU calls. on_each_cpu() can help us, but we
 524 * can not make sure this function is called with interrupts enabled. So
 525 * here we pause local counters and then grab a rwlock and leave the
 526 * counters on other CPUs alone. If any counter interrupt raises while
 527 * we own the write lock, simply pause local counters on that CPU and
 528 * spin in the handler. Also we know we won't be switched to another
 529 * CPU after pausing local counters and before grabbing the lock.
 530 */
 531static void mipspmu_disable(struct pmu *pmu)
 532{
 533	pause_local_counters();
 534#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 535	write_lock(&pmuint_rwlock);
 536#endif
 537}
 538
 539static atomic_t active_events = ATOMIC_INIT(0);
 540static DEFINE_MUTEX(pmu_reserve_mutex);
 541static int (*save_perf_irq)(void);
 542
 543static int mipspmu_get_irq(void)
 544{
 545	int err;
 546
 547	if (mipspmu.irq >= 0) {
 548		/* Request my own irq handler. */
 549		err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
 550				  IRQF_PERCPU | IRQF_NOBALANCING |
 551				  IRQF_NO_THREAD | IRQF_NO_SUSPEND |
 552				  IRQF_SHARED,
 553				  "mips_perf_pmu", &mipspmu);
 554		if (err) {
 555			pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
 556				mipspmu.irq);
 557		}
 558	} else if (cp0_perfcount_irq < 0) {
 559		/*
 560		 * We are sharing the irq number with the timer interrupt.
 561		 */
 562		save_perf_irq = perf_irq;
 563		perf_irq = mipsxx_pmu_handle_shared_irq;
 564		err = 0;
 565	} else {
 566		pr_warn("The platform hasn't properly defined its interrupt controller\n");
 567		err = -ENOENT;
 568	}
 569
 570	return err;
 571}
 572
 573static void mipspmu_free_irq(void)
 574{
 575	if (mipspmu.irq >= 0)
 576		free_irq(mipspmu.irq, &mipspmu);
 577	else if (cp0_perfcount_irq < 0)
 578		perf_irq = save_perf_irq;
 579}
 580
 581/*
 582 * mipsxx/rm9000/loongson2 have different performance counters, they have
 583 * specific low-level init routines.
 584 */
 585static void reset_counters(void *arg);
 586static int __hw_perf_event_init(struct perf_event *event);
 587
 588static void hw_perf_event_destroy(struct perf_event *event)
 589{
 590	if (atomic_dec_and_mutex_lock(&active_events,
 591				&pmu_reserve_mutex)) {
 592		/*
 593		 * We must not call the destroy function with interrupts
 594		 * disabled.
 595		 */
 596		on_each_cpu(reset_counters,
 597			(void *)(long)mipspmu.num_counters, 1);
 598		mipspmu_free_irq();
 599		mutex_unlock(&pmu_reserve_mutex);
 600	}
 601}
 602
 603static int mipspmu_event_init(struct perf_event *event)
 604{
 605	int err = 0;
 606
 607	/* does not support taken branch sampling */
 608	if (has_branch_stack(event))
 609		return -EOPNOTSUPP;
 610
 611	switch (event->attr.type) {
 612	case PERF_TYPE_RAW:
 613	case PERF_TYPE_HARDWARE:
 614	case PERF_TYPE_HW_CACHE:
 615		break;
 616
 617	default:
 618		return -ENOENT;
 619	}
 620
 621	if (event->cpu >= 0 && !cpu_online(event->cpu))
 622		return -ENODEV;
 623
 624	if (!atomic_inc_not_zero(&active_events)) {
 625		mutex_lock(&pmu_reserve_mutex);
 626		if (atomic_read(&active_events) == 0)
 627			err = mipspmu_get_irq();
 628
 629		if (!err)
 630			atomic_inc(&active_events);
 631		mutex_unlock(&pmu_reserve_mutex);
 632	}
 633
 634	if (err)
 635		return err;
 636
 637	return __hw_perf_event_init(event);
 638}
 639
 640static struct pmu pmu = {
 641	.pmu_enable	= mipspmu_enable,
 642	.pmu_disable	= mipspmu_disable,
 643	.event_init	= mipspmu_event_init,
 644	.add		= mipspmu_add,
 645	.del		= mipspmu_del,
 646	.start		= mipspmu_start,
 647	.stop		= mipspmu_stop,
 648	.read		= mipspmu_read,
 649};
 650
 651static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
 652{
 653/*
 654 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
 655 * event_id.
 656 */
 657#ifdef CONFIG_MIPS_MT_SMP
 658	return ((unsigned int)pev->range << 24) |
 659		(pev->cntr_mask & 0xffff00) |
 660		(pev->event_id & 0xff);
 661#else
 662	return (pev->cntr_mask & 0xffff00) |
 663		(pev->event_id & 0xff);
 664#endif
 665}
 666
 667static const struct mips_perf_event *mipspmu_map_general_event(int idx)
 668{
 669
 670	if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
 671		return ERR_PTR(-EOPNOTSUPP);
 672	return &(*mipspmu.general_event_map)[idx];
 673}
 674
 675static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
 676{
 677	unsigned int cache_type, cache_op, cache_result;
 678	const struct mips_perf_event *pev;
 679
 680	cache_type = (config >> 0) & 0xff;
 681	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
 682		return ERR_PTR(-EINVAL);
 683
 684	cache_op = (config >> 8) & 0xff;
 685	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
 686		return ERR_PTR(-EINVAL);
 687
 688	cache_result = (config >> 16) & 0xff;
 689	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
 690		return ERR_PTR(-EINVAL);
 691
 692	pev = &((*mipspmu.cache_event_map)
 693					[cache_type]
 694					[cache_op]
 695					[cache_result]);
 696
 697	if (pev->cntr_mask == 0)
 698		return ERR_PTR(-EOPNOTSUPP);
 699
 700	return pev;
 701
 702}
 703
 704static int validate_group(struct perf_event *event)
 705{
 706	struct perf_event *sibling, *leader = event->group_leader;
 707	struct cpu_hw_events fake_cpuc;
 708
 709	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
 710
 711	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
 712		return -EINVAL;
 713
 714	for_each_sibling_event(sibling, leader) {
 715		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
 716			return -EINVAL;
 717	}
 718
 719	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
 720		return -EINVAL;
 721
 722	return 0;
 723}
 724
 725/* This is needed by specific irq handlers in perf_event_*.c */
 726static void handle_associated_event(struct cpu_hw_events *cpuc,
 727				    int idx, struct perf_sample_data *data,
 728				    struct pt_regs *regs)
 729{
 730	struct perf_event *event = cpuc->events[idx];
 731	struct hw_perf_event *hwc = &event->hw;
 732
 733	mipspmu_event_update(event, hwc, idx);
 734	data->period = event->hw.last_period;
 735	if (!mipspmu_event_set_period(event, hwc, idx))
 736		return;
 737
 738	if (perf_event_overflow(event, data, regs))
 739		mipsxx_pmu_disable_event(idx);
 740}
 741
 742
 743static int __n_counters(void)
 744{
 745	if (!cpu_has_perf)
 746		return 0;
 747	if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
 748		return 1;
 749	if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
 750		return 2;
 751	if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
 752		return 3;
 753
 754	return 4;
 755}
 756
 757static int n_counters(void)
 758{
 759	int counters;
 760
 761	switch (current_cpu_type()) {
 762	case CPU_R10000:
 763		counters = 2;
 764		break;
 765
 766	case CPU_R12000:
 767	case CPU_R14000:
 768	case CPU_R16000:
 769		counters = 4;
 770		break;
 771
 772	default:
 773		counters = __n_counters();
 774	}
 775
 776	return counters;
 777}
 778
 779static void reset_counters(void *arg)
 780{
 781	int counters = (int)(long)arg;
 782	switch (counters) {
 783	case 4:
 784		mipsxx_pmu_write_control(3, 0);
 785		mipspmu.write_counter(3, 0);
 786	case 3:
 787		mipsxx_pmu_write_control(2, 0);
 788		mipspmu.write_counter(2, 0);
 789	case 2:
 790		mipsxx_pmu_write_control(1, 0);
 791		mipspmu.write_counter(1, 0);
 792	case 1:
 793		mipsxx_pmu_write_control(0, 0);
 794		mipspmu.write_counter(0, 0);
 795	}
 796}
 797
 798/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
 799static const struct mips_perf_event mipsxxcore_event_map
 800				[PERF_COUNT_HW_MAX] = {
 801	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 802	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 
 
 803	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
 804	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
 
 805};
 806
 807/* 74K/proAptiv core has different branch event code. */
 808static const struct mips_perf_event mipsxxcore_event_map2
 809				[PERF_COUNT_HW_MAX] = {
 810	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 811	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 
 
 812	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
 813	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
 
 814};
 815
 816static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
 817	[PERF_COUNT_HW_CPU_CYCLES]          = { 0x00, CNTR_EVEN | CNTR_ODD },
 818	[PERF_COUNT_HW_INSTRUCTIONS]        = { 0x01, CNTR_EVEN | CNTR_ODD },
 819	/* These only count dcache, not icache */
 820	[PERF_COUNT_HW_CACHE_REFERENCES]    = { 0x45, CNTR_EVEN | CNTR_ODD },
 821	[PERF_COUNT_HW_CACHE_MISSES]        = { 0x48, CNTR_EVEN | CNTR_ODD },
 822	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
 823	[PERF_COUNT_HW_BRANCH_MISSES]       = { 0x16, CNTR_EVEN | CNTR_ODD },
 824};
 825
 826static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
 827	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
 828	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
 829	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
 830	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
 831};
 832
 833static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
 834	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
 835	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
 836	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
 837	[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL	 },
 838	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
 839	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
 840	[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
 841};
 842
 843static const struct mips_perf_event bmips5000_event_map
 844				[PERF_COUNT_HW_MAX] = {
 845	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
 846	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 847	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
 848};
 849
 850static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
 851	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
 852	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
 853	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
 854	[PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
 855	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
 856	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
 857};
 858
 859/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
 860static const struct mips_perf_event mipsxxcore_cache_map
 861				[PERF_COUNT_HW_CACHE_MAX]
 862				[PERF_COUNT_HW_CACHE_OP_MAX]
 863				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 864[C(L1D)] = {
 865	/*
 866	 * Like some other architectures (e.g. ARM), the performance
 867	 * counters don't differentiate between read and write
 868	 * accesses/misses, so this isn't strictly correct, but it's the
 869	 * best we can do. Writes and reads get combined.
 870	 */
 871	[C(OP_READ)] = {
 872		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
 873		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
 874	},
 875	[C(OP_WRITE)] = {
 876		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
 877		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
 878	},
 
 
 
 
 879},
 880[C(L1I)] = {
 881	[C(OP_READ)] = {
 882		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
 883		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
 884	},
 885	[C(OP_WRITE)] = {
 886		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
 887		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
 888	},
 889	[C(OP_PREFETCH)] = {
 890		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
 891		/*
 892		 * Note that MIPS has only "hit" events countable for
 893		 * the prefetch operation.
 894		 */
 
 895	},
 896},
 897[C(LL)] = {
 898	[C(OP_READ)] = {
 899		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
 900		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
 901	},
 902	[C(OP_WRITE)] = {
 903		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
 904		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
 905	},
 
 
 
 
 906},
 907[C(DTLB)] = {
 908	[C(OP_READ)] = {
 909		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 910		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 911	},
 912	[C(OP_WRITE)] = {
 913		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 914		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 915	},
 
 
 
 
 916},
 917[C(ITLB)] = {
 918	[C(OP_READ)] = {
 919		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
 920		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
 921	},
 922	[C(OP_WRITE)] = {
 923		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
 924		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
 925	},
 
 
 
 
 926},
 927[C(BPU)] = {
 928	/* Using the same code for *HW_BRANCH* */
 929	[C(OP_READ)] = {
 930		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
 931		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
 932	},
 933	[C(OP_WRITE)] = {
 934		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
 935		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
 936	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 937},
 938};
 939
 940/* 74K/proAptiv core has completely different cache event map. */
 941static const struct mips_perf_event mipsxxcore_cache_map2
 942				[PERF_COUNT_HW_CACHE_MAX]
 943				[PERF_COUNT_HW_CACHE_OP_MAX]
 944				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 945[C(L1D)] = {
 946	/*
 947	 * Like some other architectures (e.g. ARM), the performance
 948	 * counters don't differentiate between read and write
 949	 * accesses/misses, so this isn't strictly correct, but it's the
 950	 * best we can do. Writes and reads get combined.
 951	 */
 952	[C(OP_READ)] = {
 953		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
 954		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
 955	},
 956	[C(OP_WRITE)] = {
 957		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
 958		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
 959	},
 
 
 
 
 960},
 961[C(L1I)] = {
 962	[C(OP_READ)] = {
 963		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 964		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 965	},
 966	[C(OP_WRITE)] = {
 967		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 968		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 969	},
 970	[C(OP_PREFETCH)] = {
 971		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
 972		/*
 973		 * Note that MIPS has only "hit" events countable for
 974		 * the prefetch operation.
 975		 */
 
 976	},
 977},
 978[C(LL)] = {
 979	[C(OP_READ)] = {
 980		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
 981		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
 982	},
 983	[C(OP_WRITE)] = {
 984		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
 985		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
 986	},
 987},
 988/*
 989 * 74K core does not have specific DTLB events. proAptiv core has
 990 * "speculative" DTLB events which are numbered 0x63 (even/odd) and
 991 * not included here. One can use raw events if really needed.
 992 */
 993[C(ITLB)] = {
 994	[C(OP_READ)] = {
 995		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
 996		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
 997	},
 998	[C(OP_WRITE)] = {
 999		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1000		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1001	},
1002},
1003[C(BPU)] = {
1004	/* Using the same code for *HW_BRANCH* */
1005	[C(OP_READ)] = {
1006		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1007		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1008	},
1009	[C(OP_WRITE)] = {
1010		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1011		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1012	},
1013},
1014};
1015
1016static const struct mips_perf_event i6x00_cache_map
1017				[PERF_COUNT_HW_CACHE_MAX]
1018				[PERF_COUNT_HW_CACHE_OP_MAX]
1019				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1020[C(L1D)] = {
1021	[C(OP_READ)] = {
1022		[C(RESULT_ACCESS)]	= { 0x46, CNTR_EVEN | CNTR_ODD },
1023		[C(RESULT_MISS)]	= { 0x49, CNTR_EVEN | CNTR_ODD },
1024	},
1025	[C(OP_WRITE)] = {
1026		[C(RESULT_ACCESS)]	= { 0x47, CNTR_EVEN | CNTR_ODD },
1027		[C(RESULT_MISS)]	= { 0x4a, CNTR_EVEN | CNTR_ODD },
1028	},
1029},
1030[C(L1I)] = {
1031	[C(OP_READ)] = {
1032		[C(RESULT_ACCESS)]	= { 0x84, CNTR_EVEN | CNTR_ODD },
1033		[C(RESULT_MISS)]	= { 0x85, CNTR_EVEN | CNTR_ODD },
1034	},
1035},
1036[C(DTLB)] = {
1037	/* Can't distinguish read & write */
1038	[C(OP_READ)] = {
1039		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1040		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1041	},
1042	[C(OP_WRITE)] = {
1043		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1044		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1045	},
1046},
1047[C(BPU)] = {
1048	/* Conditional branches / mispredicted */
1049	[C(OP_READ)] = {
1050		[C(RESULT_ACCESS)]	= { 0x15, CNTR_EVEN | CNTR_ODD },
1051		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN | CNTR_ODD },
1052	},
1053},
1054};
1055
1056static const struct mips_perf_event loongson3_cache_map
1057				[PERF_COUNT_HW_CACHE_MAX]
1058				[PERF_COUNT_HW_CACHE_OP_MAX]
1059				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1060[C(L1D)] = {
1061	/*
1062	 * Like some other architectures (e.g. ARM), the performance
1063	 * counters don't differentiate between read and write
1064	 * accesses/misses, so this isn't strictly correct, but it's the
1065	 * best we can do. Writes and reads get combined.
1066	 */
1067	[C(OP_READ)] = {
1068		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1069	},
1070	[C(OP_WRITE)] = {
1071		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1072	},
1073},
1074[C(L1I)] = {
1075	[C(OP_READ)] = {
1076		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1077	},
1078	[C(OP_WRITE)] = {
1079		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1080	},
1081},
1082[C(DTLB)] = {
1083	[C(OP_READ)] = {
1084		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1085	},
1086	[C(OP_WRITE)] = {
1087		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1088	},
1089},
1090[C(ITLB)] = {
1091	[C(OP_READ)] = {
1092		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
 
1093	},
1094	[C(OP_WRITE)] = {
1095		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1096	},
1097},
1098[C(BPU)] = {
1099	/* Using the same code for *HW_BRANCH* */
1100	[C(OP_READ)] = {
1101		[C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN },
1102		[C(RESULT_MISS)]        = { 0x02, CNTR_ODD },
1103	},
1104	[C(OP_WRITE)] = {
1105		[C(RESULT_ACCESS)]      = { 0x02, CNTR_EVEN },
1106		[C(RESULT_MISS)]        = { 0x02, CNTR_ODD },
1107	},
1108},
1109};
1110
1111/* BMIPS5000 */
1112static const struct mips_perf_event bmips5000_cache_map
1113				[PERF_COUNT_HW_CACHE_MAX]
1114				[PERF_COUNT_HW_CACHE_OP_MAX]
1115				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1116[C(L1D)] = {
1117	/*
1118	 * Like some other architectures (e.g. ARM), the performance
1119	 * counters don't differentiate between read and write
1120	 * accesses/misses, so this isn't strictly correct, but it's the
1121	 * best we can do. Writes and reads get combined.
1122	 */
1123	[C(OP_READ)] = {
1124		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1125		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1126	},
1127	[C(OP_WRITE)] = {
1128		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1129		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1130	},
1131},
1132[C(L1I)] = {
1133	[C(OP_READ)] = {
1134		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1135		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1136	},
1137	[C(OP_WRITE)] = {
1138		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1139		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1140	},
1141	[C(OP_PREFETCH)] = {
1142		[C(RESULT_ACCESS)]	= { 23, CNTR_EVEN, T },
1143		/*
1144		 * Note that MIPS has only "hit" events countable for
1145		 * the prefetch operation.
1146		 */
1147	},
1148},
1149[C(LL)] = {
1150	[C(OP_READ)] = {
1151		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1152		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1153	},
1154	[C(OP_WRITE)] = {
1155		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1156		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1157	},
1158},
1159[C(BPU)] = {
1160	/* Using the same code for *HW_BRANCH* */
1161	[C(OP_READ)] = {
1162		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
 
1163	},
1164	[C(OP_WRITE)] = {
1165		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1166	},
1167},
1168};
1169
1170
1171static const struct mips_perf_event octeon_cache_map
1172				[PERF_COUNT_HW_CACHE_MAX]
1173				[PERF_COUNT_HW_CACHE_OP_MAX]
1174				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1175[C(L1D)] = {
1176	[C(OP_READ)] = {
1177		[C(RESULT_ACCESS)]	= { 0x2b, CNTR_ALL },
1178		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL },
1179	},
1180	[C(OP_WRITE)] = {
1181		[C(RESULT_ACCESS)]	= { 0x30, CNTR_ALL },
1182	},
1183},
1184[C(L1I)] = {
1185	[C(OP_READ)] = {
1186		[C(RESULT_ACCESS)]	= { 0x18, CNTR_ALL },
1187	},
1188	[C(OP_PREFETCH)] = {
1189		[C(RESULT_ACCESS)]	= { 0x19, CNTR_ALL },
 
1190	},
1191},
1192[C(DTLB)] = {
1193	/*
1194	 * Only general DTLB misses are counted use the same event for
1195	 * read and write.
1196	 */
1197	[C(OP_READ)] = {
1198		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
 
1199	},
1200	[C(OP_WRITE)] = {
1201		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
 
1202	},
1203},
1204[C(ITLB)] = {
1205	[C(OP_READ)] = {
1206		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL },
1207	},
1208},
1209};
1210
1211static const struct mips_perf_event xlp_cache_map
1212				[PERF_COUNT_HW_CACHE_MAX]
1213				[PERF_COUNT_HW_CACHE_OP_MAX]
1214				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1215[C(L1D)] = {
1216	[C(OP_READ)] = {
1217		[C(RESULT_ACCESS)]	= { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1218		[C(RESULT_MISS)]	= { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1219	},
1220	[C(OP_WRITE)] = {
1221		[C(RESULT_ACCESS)]	= { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1222		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1223	},
1224},
1225[C(L1I)] = {
1226	[C(OP_READ)] = {
1227		[C(RESULT_ACCESS)]	= { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1228		[C(RESULT_MISS)]	= { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1229	},
1230},
1231[C(LL)] = {
1232	[C(OP_READ)] = {
1233		[C(RESULT_ACCESS)]	= { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1234		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1235	},
1236	[C(OP_WRITE)] = {
1237		[C(RESULT_ACCESS)]	= { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1238		[C(RESULT_MISS)]	= { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1239	},
1240},
1241[C(DTLB)] = {
1242	/*
1243	 * Only general DTLB misses are counted use the same event for
1244	 * read and write.
1245	 */
1246	[C(OP_READ)] = {
1247		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1248	},
1249	[C(OP_WRITE)] = {
1250		[C(RESULT_MISS)]	= { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1251	},
1252},
1253[C(ITLB)] = {
1254	[C(OP_READ)] = {
1255		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1256	},
1257	[C(OP_WRITE)] = {
1258		[C(RESULT_MISS)]	= { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1259	},
1260},
1261[C(BPU)] = {
1262	[C(OP_READ)] = {
1263		[C(RESULT_MISS)]	= { 0x25, CNTR_ALL },
1264	},
1265},
1266};
1267
1268#ifdef CONFIG_MIPS_MT_SMP
1269static void check_and_calc_range(struct perf_event *event,
1270				 const struct mips_perf_event *pev)
 
1271{
1272	struct hw_perf_event *hwc = &event->hw;
1273
1274	if (event->cpu >= 0) {
1275		if (pev->range > V) {
1276			/*
1277			 * The user selected an event that is processor
1278			 * wide, while expecting it to be VPE wide.
1279			 */
1280			hwc->config_base |= M_TC_EN_ALL;
1281		} else {
1282			/*
1283			 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1284			 * for both CPUs.
1285			 */
1286			hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1287			hwc->config_base |= M_TC_EN_VPE;
1288		}
1289	} else
1290		hwc->config_base |= M_TC_EN_ALL;
1291}
1292#else
1293static void check_and_calc_range(struct perf_event *event,
1294				 const struct mips_perf_event *pev)
 
1295{
1296}
1297#endif
1298
1299static int __hw_perf_event_init(struct perf_event *event)
1300{
1301	struct perf_event_attr *attr = &event->attr;
1302	struct hw_perf_event *hwc = &event->hw;
1303	const struct mips_perf_event *pev;
1304	int err;
1305
1306	/* Returning MIPS event descriptor for generic perf event. */
1307	if (PERF_TYPE_HARDWARE == event->attr.type) {
1308		if (event->attr.config >= PERF_COUNT_HW_MAX)
1309			return -EINVAL;
1310		pev = mipspmu_map_general_event(event->attr.config);
1311	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1312		pev = mipspmu_map_cache_event(event->attr.config);
1313	} else if (PERF_TYPE_RAW == event->attr.type) {
1314		/* We are working on the global raw event. */
1315		mutex_lock(&raw_event_mutex);
1316		pev = mipspmu.map_raw_event(event->attr.config);
1317	} else {
1318		/* The event type is not (yet) supported. */
1319		return -EOPNOTSUPP;
1320	}
1321
1322	if (IS_ERR(pev)) {
1323		if (PERF_TYPE_RAW == event->attr.type)
1324			mutex_unlock(&raw_event_mutex);
1325		return PTR_ERR(pev);
1326	}
1327
1328	/*
1329	 * We allow max flexibility on how each individual counter shared
1330	 * by the single CPU operates (the mode exclusion and the range).
1331	 */
1332	hwc->config_base = MIPS_PERFCTRL_IE;
1333
1334	/* Calculate range bits and validate it. */
1335	if (num_possible_cpus() > 1)
1336		check_and_calc_range(event, pev);
1337
1338	hwc->event_base = mipspmu_perf_event_encode(pev);
1339	if (PERF_TYPE_RAW == event->attr.type)
1340		mutex_unlock(&raw_event_mutex);
1341
1342	if (!attr->exclude_user)
1343		hwc->config_base |= MIPS_PERFCTRL_U;
1344	if (!attr->exclude_kernel) {
1345		hwc->config_base |= MIPS_PERFCTRL_K;
1346		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1347		hwc->config_base |= MIPS_PERFCTRL_EXL;
1348	}
1349	if (!attr->exclude_hv)
1350		hwc->config_base |= MIPS_PERFCTRL_S;
1351
1352	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1353	/*
1354	 * The event can belong to another cpu. We do not assign a local
1355	 * counter for it for now.
1356	 */
1357	hwc->idx = -1;
1358	hwc->config = 0;
1359
1360	if (!hwc->sample_period) {
1361		hwc->sample_period  = mipspmu.max_period;
1362		hwc->last_period    = hwc->sample_period;
1363		local64_set(&hwc->period_left, hwc->sample_period);
1364	}
1365
1366	err = 0;
1367	if (event->group_leader != event)
1368		err = validate_group(event);
 
 
 
1369
1370	event->destroy = hw_perf_event_destroy;
1371
1372	if (err)
1373		event->destroy(event);
1374
1375	return err;
1376}
1377
1378static void pause_local_counters(void)
1379{
1380	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1381	int ctr = mipspmu.num_counters;
1382	unsigned long flags;
1383
1384	local_irq_save(flags);
1385	do {
1386		ctr--;
1387		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1388		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1389					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1390	} while (ctr > 0);
 
 
 
 
 
 
 
 
 
 
 
 
1391	local_irq_restore(flags);
1392}
1393
1394static void resume_local_counters(void)
1395{
1396	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1397	int ctr = mipspmu.num_counters;
 
1398
1399	do {
1400		ctr--;
1401		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1402	} while (ctr > 0);
 
 
 
 
 
 
 
 
1403}
1404
1405static int mipsxx_pmu_handle_shared_irq(void)
1406{
1407	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1408	struct perf_sample_data data;
1409	unsigned int counters = mipspmu.num_counters;
1410	u64 counter;
1411	int handled = IRQ_NONE;
1412	struct pt_regs *regs;
1413
1414	if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1415		return handled;
 
1416	/*
1417	 * First we pause the local counters, so that when we are locked
1418	 * here, the counters are all paused. When it gets locked due to
1419	 * perf_disable(), the timer interrupt handler will be delayed.
1420	 *
1421	 * See also mipsxx_pmu_start().
1422	 */
1423	pause_local_counters();
1424#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1425	read_lock(&pmuint_rwlock);
1426#endif
1427
1428	regs = get_irq_regs();
1429
1430	perf_sample_data_init(&data, 0, 0);
1431
1432	switch (counters) {
1433#define HANDLE_COUNTER(n)						\
1434	case n + 1:							\
1435		if (test_bit(n, cpuc->used_mask)) {			\
1436			counter = mipspmu.read_counter(n);		\
1437			if (counter & mipspmu.overflow) {		\
1438				handle_associated_event(cpuc, n, &data, regs); \
 
 
 
 
1439				handled = IRQ_HANDLED;			\
1440			}						\
1441		}
1442	HANDLE_COUNTER(3)
1443	HANDLE_COUNTER(2)
1444	HANDLE_COUNTER(1)
1445	HANDLE_COUNTER(0)
1446	}
1447
1448#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1449	read_unlock(&pmuint_rwlock);
1450#endif
1451	resume_local_counters();
1452
1453	/*
1454	 * Do all the work for the pending perf events. We can do this
1455	 * in here because the performance counter interrupt is a regular
1456	 * interrupt, not NMI.
1457	 */
1458	if (handled == IRQ_HANDLED)
1459		irq_work_run();
1460
 
 
 
 
1461	return handled;
1462}
1463
1464static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
 
1465{
1466	return mipsxx_pmu_handle_shared_irq();
1467}
1468
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1469/* 24K */
 
 
 
 
 
 
1470#define IS_BOTH_COUNTERS_24K_EVENT(b)					\
1471	((b) == 0 || (b) == 1 || (b) == 11)
1472
1473/* 34K */
 
 
 
 
1474#define IS_BOTH_COUNTERS_34K_EVENT(b)					\
1475	((b) == 0 || (b) == 1 || (b) == 11)
1476#ifdef CONFIG_MIPS_MT_SMP
1477#define IS_RANGE_P_34K_EVENT(r, b)					\
1478	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1479	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
1480	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
1481	 ((b) >= 64 && (b) <= 67))
1482#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1483#endif
1484
1485/* 74K */
 
 
 
 
 
 
 
1486#define IS_BOTH_COUNTERS_74K_EVENT(b)					\
1487	((b) == 0 || (b) == 1)
1488
1489/* proAptiv */
1490#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b)				\
1491	((b) == 0 || (b) == 1)
1492/* P5600 */
1493#define IS_BOTH_COUNTERS_P5600_EVENT(b)					\
1494	((b) == 0 || (b) == 1)
1495
1496/* 1004K */
 
 
 
1497#define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
1498	((b) == 0 || (b) == 1 || (b) == 11)
1499#ifdef CONFIG_MIPS_MT_SMP
1500#define IS_RANGE_P_1004K_EVENT(r, b)					\
1501	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1502	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
1503	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
1504	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
1505	 ((b) >= 64 && (b) <= 67))
1506#define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
1507#endif
1508
1509/* interAptiv */
1510#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b)				\
1511	((b) == 0 || (b) == 1 || (b) == 11)
1512#ifdef CONFIG_MIPS_MT_SMP
1513/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1514#define IS_RANGE_P_INTERAPTIV_EVENT(r, b)				\
1515	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1516	 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 ||		\
1517	 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 &&		\
1518	 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 ||		\
1519	 ((b) >= 64 && (b) <= 67))
1520#define IS_RANGE_V_INTERAPTIV_EVENT(r)	((r) == 47 || (r) == 175)
1521#endif
1522
1523/* BMIPS5000 */
1524#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b)				\
1525	((b) == 0 || (b) == 1)
1526
1527
1528/*
1529 * For most cores the user can use 0-255 raw events, where 0-127 for the events
1530 * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1531 * indicate the even/odd bank selector. So, for example, when user wants to take
1532 * the Event Num of 15 for odd counters (by referring to the user manual), then
1533 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1534 * to be used.
1535 *
1536 * Some newer cores have even more events, in which case the user can use raw
1537 * events 0-511, where 0-255 are for the events of even counters, and 256-511
1538 * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1539 */
1540static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
 
1541{
1542	/* currently most cores have 7-bit event numbers */
1543	unsigned int raw_id = config & 0xff;
1544	unsigned int base_id = raw_id & 0x7f;
1545
1546	switch (current_cpu_type()) {
1547	case CPU_24K:
 
 
 
1548		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1549			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1550		else
1551			raw_event.cntr_mask =
1552				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1553#ifdef CONFIG_MIPS_MT_SMP
1554		/*
1555		 * This is actually doing nothing. Non-multithreading
1556		 * CPUs will not check and calculate the range.
1557		 */
1558		raw_event.range = P;
1559#endif
1560		break;
1561	case CPU_34K:
 
 
 
1562		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1563			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1564		else
1565			raw_event.cntr_mask =
1566				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1567#ifdef CONFIG_MIPS_MT_SMP
1568		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1569			raw_event.range = P;
1570		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1571			raw_event.range = V;
1572		else
1573			raw_event.range = T;
1574#endif
1575		break;
1576	case CPU_74K:
1577	case CPU_1074K:
 
 
1578		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1579			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1580		else
1581			raw_event.cntr_mask =
1582				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1583#ifdef CONFIG_MIPS_MT_SMP
1584		raw_event.range = P;
1585#endif
1586		break;
1587	case CPU_PROAPTIV:
1588		if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1589			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1590		else
1591			raw_event.cntr_mask =
1592				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1593#ifdef CONFIG_MIPS_MT_SMP
1594		raw_event.range = P;
1595#endif
1596		break;
1597	case CPU_P5600:
1598	case CPU_P6600:
1599		/* 8-bit event numbers */
1600		raw_id = config & 0x1ff;
1601		base_id = raw_id & 0xff;
1602		if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1603			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1604		else
1605			raw_event.cntr_mask =
1606				raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1607#ifdef CONFIG_MIPS_MT_SMP
1608		raw_event.range = P;
1609#endif
1610		break;
1611	case CPU_I6400:
1612	case CPU_I6500:
1613		/* 8-bit event numbers */
1614		base_id = config & 0xff;
1615		raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1616		break;
1617	case CPU_1004K:
 
 
 
1618		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1619			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1620		else
1621			raw_event.cntr_mask =
1622				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1623#ifdef CONFIG_MIPS_MT_SMP
1624		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1625			raw_event.range = P;
1626		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1627			raw_event.range = V;
1628		else
1629			raw_event.range = T;
1630#endif
1631		break;
1632	case CPU_INTERAPTIV:
1633		if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1634			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1635		else
1636			raw_event.cntr_mask =
1637				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1638#ifdef CONFIG_MIPS_MT_SMP
1639		if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1640			raw_event.range = P;
1641		else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1642			raw_event.range = V;
1643		else
1644			raw_event.range = T;
1645#endif
1646		break;
1647	case CPU_BMIPS5000:
1648		if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1649			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1650		else
1651			raw_event.cntr_mask =
1652				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1653		break;
1654	case CPU_LOONGSON3:
1655		raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1656	break;
1657	}
1658
1659	raw_event.event_id = base_id;
1660
1661	return &raw_event;
1662}
1663
1664static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1665{
1666	unsigned int raw_id = config & 0xff;
1667	unsigned int base_id = raw_id & 0x7f;
 
 
 
 
 
 
 
 
 
 
1668
1669
1670	raw_event.cntr_mask = CNTR_ALL;
1671	raw_event.event_id = base_id;
1672
1673	if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1674		if (base_id > 0x42)
1675			return ERR_PTR(-EOPNOTSUPP);
1676	} else {
1677		if (base_id > 0x3a)
1678			return ERR_PTR(-EOPNOTSUPP);
1679	}
1680
1681	switch (base_id) {
1682	case 0x00:
1683	case 0x0f:
1684	case 0x1e:
1685	case 0x1f:
1686	case 0x2f:
1687	case 0x34:
1688	case 0x3b ... 0x3f:
1689		return ERR_PTR(-EOPNOTSUPP);
1690	default:
1691		break;
1692	}
1693
1694	return &raw_event;
1695}
1696
1697static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1698{
1699	unsigned int raw_id = config & 0xff;
1700
1701	/* Only 1-63 are defined */
1702	if ((raw_id < 0x01) || (raw_id > 0x3f))
1703		return ERR_PTR(-EOPNOTSUPP);
1704
1705	raw_event.cntr_mask = CNTR_ALL;
1706	raw_event.event_id = raw_id;
1707
1708	return &raw_event;
1709}
1710
1711static int __init
1712init_hw_perf_events(void)
1713{
1714	int counters, irq;
1715	int counter_bits;
1716
1717	pr_info("Performance counters: ");
1718
1719	counters = n_counters();
1720	if (counters == 0) {
1721		pr_cont("No available PMU.\n");
1722		return -ENODEV;
1723	}
1724
1725#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1726	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1727	if (!cpu_has_mipsmt_pertccounters)
1728		counters = counters_total_to_per_cpu(counters);
1729#endif
1730
1731	if (get_c0_perfcount_int)
1732		irq = get_c0_perfcount_int();
1733	else if (cp0_perfcount_irq >= 0)
1734		irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1735	else
1736		irq = -1;
 
 
 
 
 
 
 
 
 
1737
1738	mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1739
1740	switch (current_cpu_type()) {
1741	case CPU_24K:
1742		mipspmu.name = "mips/24K";
1743		mipspmu.general_event_map = &mipsxxcore_event_map;
1744		mipspmu.cache_event_map = &mipsxxcore_cache_map;
 
1745		break;
1746	case CPU_34K:
1747		mipspmu.name = "mips/34K";
1748		mipspmu.general_event_map = &mipsxxcore_event_map;
1749		mipspmu.cache_event_map = &mipsxxcore_cache_map;
 
1750		break;
1751	case CPU_74K:
1752		mipspmu.name = "mips/74K";
1753		mipspmu.general_event_map = &mipsxxcore_event_map2;
1754		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1755		break;
1756	case CPU_PROAPTIV:
1757		mipspmu.name = "mips/proAptiv";
1758		mipspmu.general_event_map = &mipsxxcore_event_map2;
1759		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1760		break;
1761	case CPU_P5600:
1762		mipspmu.name = "mips/P5600";
1763		mipspmu.general_event_map = &mipsxxcore_event_map2;
1764		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1765		break;
1766	case CPU_P6600:
1767		mipspmu.name = "mips/P6600";
1768		mipspmu.general_event_map = &mipsxxcore_event_map2;
1769		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1770		break;
1771	case CPU_I6400:
1772		mipspmu.name = "mips/I6400";
1773		mipspmu.general_event_map = &i6x00_event_map;
1774		mipspmu.cache_event_map = &i6x00_cache_map;
1775		break;
1776	case CPU_I6500:
1777		mipspmu.name = "mips/I6500";
1778		mipspmu.general_event_map = &i6x00_event_map;
1779		mipspmu.cache_event_map = &i6x00_cache_map;
1780		break;
1781	case CPU_1004K:
1782		mipspmu.name = "mips/1004K";
1783		mipspmu.general_event_map = &mipsxxcore_event_map;
1784		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1785		break;
1786	case CPU_1074K:
1787		mipspmu.name = "mips/1074K";
1788		mipspmu.general_event_map = &mipsxxcore_event_map;
1789		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1790		break;
1791	case CPU_INTERAPTIV:
1792		mipspmu.name = "mips/interAptiv";
1793		mipspmu.general_event_map = &mipsxxcore_event_map;
1794		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1795		break;
1796	case CPU_LOONGSON1:
1797		mipspmu.name = "mips/loongson1";
1798		mipspmu.general_event_map = &mipsxxcore_event_map;
1799		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1800		break;
1801	case CPU_LOONGSON3:
1802		mipspmu.name = "mips/loongson3";
1803		mipspmu.general_event_map = &loongson3_event_map;
1804		mipspmu.cache_event_map = &loongson3_cache_map;
1805		break;
1806	case CPU_CAVIUM_OCTEON:
1807	case CPU_CAVIUM_OCTEON_PLUS:
1808	case CPU_CAVIUM_OCTEON2:
1809		mipspmu.name = "octeon";
1810		mipspmu.general_event_map = &octeon_event_map;
1811		mipspmu.cache_event_map = &octeon_cache_map;
1812		mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1813		break;
1814	case CPU_BMIPS5000:
1815		mipspmu.name = "BMIPS5000";
1816		mipspmu.general_event_map = &bmips5000_event_map;
1817		mipspmu.cache_event_map = &bmips5000_cache_map;
1818		break;
1819	case CPU_XLP:
1820		mipspmu.name = "xlp";
1821		mipspmu.general_event_map = &xlp_event_map;
1822		mipspmu.cache_event_map = &xlp_cache_map;
1823		mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1824		break;
1825	default:
1826		pr_cont("Either hardware does not support performance "
1827			"counters, or not yet implemented.\n");
1828		return -ENODEV;
1829	}
1830
1831	mipspmu.num_counters = counters;
1832	mipspmu.irq = irq;
1833
1834	if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
1835		mipspmu.max_period = (1ULL << 63) - 1;
1836		mipspmu.valid_count = (1ULL << 63) - 1;
1837		mipspmu.overflow = 1ULL << 63;
1838		mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1839		mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1840		counter_bits = 64;
1841	} else {
1842		mipspmu.max_period = (1ULL << 31) - 1;
1843		mipspmu.valid_count = (1ULL << 31) - 1;
1844		mipspmu.overflow = 1ULL << 31;
1845		mipspmu.read_counter = mipsxx_pmu_read_counter;
1846		mipspmu.write_counter = mipsxx_pmu_write_counter;
1847		counter_bits = 32;
1848	}
1849
1850	on_each_cpu(reset_counters, (void *)(long)counters, 1);
1851
1852	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1853		"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1854		irq < 0 ? " (share with timer interrupt)" : "");
1855
1856	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1857
1858	return 0;
1859}
1860early_initcall(init_hw_perf_events);