Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Linux performance counter support for MIPS.
   4 *
   5 * Copyright (C) 2010 MIPS Technologies, Inc.
   6 * Copyright (C) 2011 Cavium Networks, Inc.
   7 * Author: Deng-Cheng Zhu
   8 *
   9 * This code is based on the implementation for ARM, which is in turn
  10 * based on the sparc64 perf event code and the x86 code. Performance
  11 * counter access is based on the MIPS Oprofile code. And the callchain
  12 * support references the code of MIPS stacktrace.c.
  13 */
  14
  15#include <linux/cpumask.h>
  16#include <linux/interrupt.h>
  17#include <linux/smp.h>
  18#include <linux/kernel.h>
  19#include <linux/perf_event.h>
  20#include <linux/uaccess.h>
  21
  22#include <asm/irq.h>
  23#include <asm/irq_regs.h>
  24#include <asm/stacktrace.h>
  25#include <asm/time.h> /* For perf_irq */
  26
  27#define MIPS_MAX_HWEVENTS 4
  28#define MIPS_TCS_PER_COUNTER 2
  29#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
  30
  31struct cpu_hw_events {
  32	/* Array of events on this cpu. */
  33	struct perf_event	*events[MIPS_MAX_HWEVENTS];
  34
  35	/*
  36	 * Set the bit (indexed by the counter number) when the counter
  37	 * is used for an event.
  38	 */
  39	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
  40
  41	/*
  42	 * Software copy of the control register for each performance counter.
  43	 * MIPS CPUs vary in performance counters. They use this differently,
  44	 * and even may not use it.
  45	 */
  46	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
  47};
  48DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  49	.saved_ctrl = {0},
  50};
  51
  52/* The description of MIPS performance events. */
  53struct mips_perf_event {
  54	unsigned int event_id;
  55	/*
  56	 * MIPS performance counters are indexed starting from 0.
  57	 * CNTR_EVEN indicates the indexes of the counters to be used are
  58	 * even numbers.
  59	 */
  60	unsigned int cntr_mask;
  61	#define CNTR_EVEN	0x55555555
  62	#define CNTR_ODD	0xaaaaaaaa
  63	#define CNTR_ALL	0xffffffff
  64	enum {
  65		T  = 0,
  66		V  = 1,
  67		P  = 2,
  68	} range;
  69};
  70
  71static struct mips_perf_event raw_event;
  72static DEFINE_MUTEX(raw_event_mutex);
  73
  74#define C(x) PERF_COUNT_HW_CACHE_##x
  75
  76struct mips_pmu {
  77	u64		max_period;
  78	u64		valid_count;
  79	u64		overflow;
  80	const char	*name;
  81	int		irq;
  82	u64		(*read_counter)(unsigned int idx);
  83	void		(*write_counter)(unsigned int idx, u64 val);
  84	const struct mips_perf_event *(*map_raw_event)(u64 config);
  85	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
  86	const struct mips_perf_event (*cache_event_map)
  87				[PERF_COUNT_HW_CACHE_MAX]
  88				[PERF_COUNT_HW_CACHE_OP_MAX]
  89				[PERF_COUNT_HW_CACHE_RESULT_MAX];
  90	unsigned int	num_counters;
  91};
  92
  93static int counter_bits;
  94static struct mips_pmu mipspmu;
  95
  96#define M_PERFCTL_EVENT(event)		(((event) << MIPS_PERFCTRL_EVENT_S) & \
  97					 MIPS_PERFCTRL_EVENT)
  98#define M_PERFCTL_VPEID(vpe)		((vpe)	  << MIPS_PERFCTRL_VPEID_S)
  99
 100#ifdef CONFIG_CPU_BMIPS5000
 101#define M_PERFCTL_MT_EN(filter)		0
 102#else /* !CONFIG_CPU_BMIPS5000 */
 103#define M_PERFCTL_MT_EN(filter)		(filter)
 104#endif /* CONFIG_CPU_BMIPS5000 */
 105
 106#define	   M_TC_EN_ALL			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
 107#define	   M_TC_EN_VPE			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
 108#define	   M_TC_EN_TC			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
 109
 110#define M_PERFCTL_COUNT_EVENT_WHENEVER	(MIPS_PERFCTRL_EXL |		\
 111					 MIPS_PERFCTRL_K |		\
 112					 MIPS_PERFCTRL_U |		\
 113					 MIPS_PERFCTRL_S |		\
 114					 MIPS_PERFCTRL_IE)
 115
 116#ifdef CONFIG_MIPS_MT_SMP
 117#define M_PERFCTL_CONFIG_MASK		0x3fff801f
 118#else
 119#define M_PERFCTL_CONFIG_MASK		0x1f
 120#endif
 
 121
 122#define CNTR_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 123
 124#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 125static DEFINE_RWLOCK(pmuint_rwlock);
 126
 127#if defined(CONFIG_CPU_BMIPS5000)
 
 
 
 
 128#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 129			 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
 130#else
 131#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 132			 0 : cpu_vpe_id(&current_cpu_data))
 133#endif
 134
 135/* Copied from op_model_mipsxx.c */
 136static unsigned int vpe_shift(void)
 137{
 138	if (num_possible_cpus() > 1)
 139		return 1;
 140
 141	return 0;
 142}
 
 
 143
 144static unsigned int counters_total_to_per_cpu(unsigned int counters)
 145{
 146	return counters >> vpe_shift();
 147}
 
 148
 149#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
 150#define vpe_id()	0
 151
 152#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
 
 153
 154static void resume_local_counters(void);
 155static void pause_local_counters(void);
 156static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
 157static int mipsxx_pmu_handle_shared_irq(void);
 158
 159/* 0: Not Loongson-3
 160 * 1: Loongson-3A1000/3B1000/3B1500
 161 * 2: Loongson-3A2000/3A3000
 162 * 3: Loongson-3A4000+
 163 */
 164
 165#define LOONGSON_PMU_TYPE0 0
 166#define LOONGSON_PMU_TYPE1 1
 167#define LOONGSON_PMU_TYPE2 2
 168#define LOONGSON_PMU_TYPE3 3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169
 170static inline int get_loongson3_pmu_type(void)
 171{
 172	if (boot_cpu_type() != CPU_LOONGSON64)
 173		return LOONGSON_PMU_TYPE0;
 174	if ((boot_cpu_data.processor_id & PRID_COMP_MASK) == PRID_COMP_LEGACY)
 175		return LOONGSON_PMU_TYPE1;
 176	if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C)
 177		return LOONGSON_PMU_TYPE2;
 178	if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G)
 179		return LOONGSON_PMU_TYPE3;
 180
 181	return LOONGSON_PMU_TYPE0;
 182}
 183
 184static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
 185{
 186	if (vpe_id() == 1)
 187		idx = (idx + 2) & 3;
 188	return idx;
 189}
 190
 191static u64 mipsxx_pmu_read_counter(unsigned int idx)
 192{
 193	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 
 
 
 
 
 
 194
 195	switch (idx) {
 196	case 0:
 197		/*
 198		 * The counters are unsigned, we must cast to truncate
 199		 * off the high bits.
 200		 */
 201		return (u32)read_c0_perfcntr0();
 202	case 1:
 203		return (u32)read_c0_perfcntr1();
 204	case 2:
 205		return (u32)read_c0_perfcntr2();
 206	case 3:
 207		return (u32)read_c0_perfcntr3();
 208	default:
 209		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 210		return 0;
 211	}
 
 
 212}
 213
 214static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
 215{
 216	u64 mask = CNTR_BIT_MASK(counter_bits);
 217	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 218
 219	switch (idx) {
 220	case 0:
 221		return read_c0_perfcntr0_64() & mask;
 222	case 1:
 223		return read_c0_perfcntr1_64() & mask;
 224	case 2:
 225		return read_c0_perfcntr2_64() & mask;
 226	case 3:
 227		return read_c0_perfcntr3_64() & mask;
 228	default:
 229		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 230		return 0;
 
 
 
 
 231	}
 232}
 233
 234static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
 
 235{
 236	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 237
 238	switch (idx) {
 239	case 0:
 240		write_c0_perfcntr0(val);
 241		return;
 242	case 1:
 243		write_c0_perfcntr1(val);
 244		return;
 245	case 2:
 246		write_c0_perfcntr2(val);
 247		return;
 248	case 3:
 249		write_c0_perfcntr3(val);
 250		return;
 
 
 251	}
 252}
 253
 254static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
 
 255{
 256	val &= CNTR_BIT_MASK(counter_bits);
 257	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 258
 259	switch (idx) {
 260	case 0:
 261		write_c0_perfcntr0_64(val);
 262		return;
 263	case 1:
 264		write_c0_perfcntr1_64(val);
 265		return;
 266	case 2:
 267		write_c0_perfcntr2_64(val);
 268		return;
 269	case 3:
 270		write_c0_perfcntr3_64(val);
 271		return;
 272	}
 273}
 274
 275static unsigned int mipsxx_pmu_read_control(unsigned int idx)
 
 276{
 277	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 278
 279	switch (idx) {
 280	case 0:
 281		return read_c0_perfctrl0();
 282	case 1:
 283		return read_c0_perfctrl1();
 284	case 2:
 285		return read_c0_perfctrl2();
 286	case 3:
 287		return read_c0_perfctrl3();
 288	default:
 289		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 290		return 0;
 291	}
 292}
 293
 294static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
 
 295{
 296	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 297
 298	switch (idx) {
 299	case 0:
 300		write_c0_perfctrl0(val);
 301		return;
 302	case 1:
 303		write_c0_perfctrl1(val);
 304		return;
 305	case 2:
 306		write_c0_perfctrl2(val);
 307		return;
 308	case 3:
 309		write_c0_perfctrl3(val);
 310		return;
 311	}
 312}
 313
 314static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
 315				    struct hw_perf_event *hwc)
 316{
 317	int i;
 318	unsigned long cntr_mask;
 319
 320	/*
 321	 * We only need to care the counter mask. The range has been
 322	 * checked definitely.
 323	 */
 324	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
 325		cntr_mask = (hwc->event_base >> 10) & 0xffff;
 326	else
 327		cntr_mask = (hwc->event_base >> 8) & 0xffff;
 328
 329	for (i = mipspmu.num_counters - 1; i >= 0; i--) {
 330		/*
 331		 * Note that some MIPS perf events can be counted by both
 332		 * even and odd counters, whereas many other are only by
 333		 * even _or_ odd counters. This introduces an issue that
 334		 * when the former kind of event takes the counter the
 335		 * latter kind of event wants to use, then the "counter
 336		 * allocation" for the latter event will fail. In fact if
 337		 * they can be dynamically swapped, they both feel happy.
 338		 * But here we leave this issue alone for now.
 339		 */
 340		if (test_bit(i, &cntr_mask) &&
 341			!test_and_set_bit(i, cpuc->used_mask))
 342			return i;
 343	}
 344
 345	return -EAGAIN;
 346}
 347
 348static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 349{
 350	struct perf_event *event = container_of(evt, struct perf_event, hw);
 351	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 352	unsigned int range = evt->event_base >> 24;
 353
 354	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 355
 356	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
 357		cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) |
 358			(evt->config_base & M_PERFCTL_CONFIG_MASK) |
 359			/* Make sure interrupt enabled. */
 360			MIPS_PERFCTRL_IE;
 361	else
 362		cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
 363			(evt->config_base & M_PERFCTL_CONFIG_MASK) |
 364			/* Make sure interrupt enabled. */
 365			MIPS_PERFCTRL_IE;
 366
 367	if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
 368		/* enable the counter for the calling thread */
 369		cpuc->saved_ctrl[idx] |=
 370			(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
 371	} else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
 372		/* The counter is processor wide. Set it up to count all TCs. */
 373		pr_debug("Enabling perf counter for all TCs\n");
 374		cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
 375	} else {
 376		unsigned int cpu, ctrl;
 377
 378		/*
 379		 * Set up the counter for a particular CPU when event->cpu is
 380		 * a valid CPU number. Otherwise set up the counter for the CPU
 381		 * scheduling this thread.
 382		 */
 383		cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
 384
 385		ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
 386		ctrl |= M_TC_EN_VPE;
 387		cpuc->saved_ctrl[idx] |= ctrl;
 388		pr_debug("Enabling perf counter for CPU%d\n", cpu);
 389	}
 390	/*
 391	 * We do not actually let the counter run. Leave it until start().
 392	 */
 393}
 394
 395static void mipsxx_pmu_disable_event(int idx)
 396{
 397	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 398	unsigned long flags;
 399
 400	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 401
 402	local_irq_save(flags);
 403	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
 404		~M_PERFCTL_COUNT_EVENT_WHENEVER;
 405	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
 406	local_irq_restore(flags);
 407}
 408
 409static int mipspmu_event_set_period(struct perf_event *event,
 410				    struct hw_perf_event *hwc,
 411				    int idx)
 412{
 413	u64 left = local64_read(&hwc->period_left);
 414	u64 period = hwc->sample_period;
 415	int ret = 0;
 416
 417	if (unlikely((left + period) & (1ULL << 63))) {
 418		/* left underflowed by more than period. */
 419		left = period;
 420		local64_set(&hwc->period_left, left);
 421		hwc->last_period = period;
 422		ret = 1;
 423	} else	if (unlikely((left + period) <= period)) {
 424		/* left underflowed by less than period. */
 425		left += period;
 426		local64_set(&hwc->period_left, left);
 427		hwc->last_period = period;
 428		ret = 1;
 429	}
 430
 431	if (left > mipspmu.max_period) {
 432		left = mipspmu.max_period;
 433		local64_set(&hwc->period_left, left);
 434	}
 435
 436	local64_set(&hwc->prev_count, mipspmu.overflow - left);
 437
 438	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
 439		mipsxx_pmu_write_control(idx,
 440				M_PERFCTL_EVENT(hwc->event_base & 0x3ff));
 441
 442	mipspmu.write_counter(idx, mipspmu.overflow - left);
 443
 444	perf_event_update_userpage(event);
 445
 446	return ret;
 447}
 448
 449static void mipspmu_event_update(struct perf_event *event,
 450				 struct hw_perf_event *hwc,
 451				 int idx)
 452{
 453	u64 prev_raw_count, new_raw_count;
 454	u64 delta;
 455
 456again:
 457	prev_raw_count = local64_read(&hwc->prev_count);
 458	new_raw_count = mipspmu.read_counter(idx);
 459
 460	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 461				new_raw_count) != prev_raw_count)
 462		goto again;
 463
 464	delta = new_raw_count - prev_raw_count;
 465
 466	local64_add(delta, &event->count);
 467	local64_sub(delta, &hwc->period_left);
 468}
 469
 470static void mipspmu_start(struct perf_event *event, int flags)
 471{
 472	struct hw_perf_event *hwc = &event->hw;
 473
 474	if (flags & PERF_EF_RELOAD)
 475		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
 476
 477	hwc->state = 0;
 478
 479	/* Set the period for the event. */
 480	mipspmu_event_set_period(event, hwc, hwc->idx);
 481
 482	/* Enable the event. */
 483	mipsxx_pmu_enable_event(hwc, hwc->idx);
 484}
 485
 486static void mipspmu_stop(struct perf_event *event, int flags)
 487{
 488	struct hw_perf_event *hwc = &event->hw;
 489
 490	if (!(hwc->state & PERF_HES_STOPPED)) {
 491		/* We are working on a local event. */
 492		mipsxx_pmu_disable_event(hwc->idx);
 493		barrier();
 494		mipspmu_event_update(event, hwc, hwc->idx);
 495		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 496	}
 497}
 498
 499static int mipspmu_add(struct perf_event *event, int flags)
 500{
 501	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 502	struct hw_perf_event *hwc = &event->hw;
 503	int idx;
 504	int err = 0;
 505
 506	perf_pmu_disable(event->pmu);
 507
 508	/* To look for a free counter for this event. */
 509	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
 510	if (idx < 0) {
 511		err = idx;
 512		goto out;
 513	}
 514
 515	/*
 516	 * If there is an event in the counter we are going to use then
 517	 * make sure it is disabled.
 518	 */
 519	event->hw.idx = idx;
 520	mipsxx_pmu_disable_event(idx);
 521	cpuc->events[idx] = event;
 522
 523	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 524	if (flags & PERF_EF_START)
 525		mipspmu_start(event, PERF_EF_RELOAD);
 526
 527	/* Propagate our changes to the userspace mapping. */
 528	perf_event_update_userpage(event);
 529
 530out:
 531	perf_pmu_enable(event->pmu);
 532	return err;
 533}
 534
 535static void mipspmu_del(struct perf_event *event, int flags)
 536{
 537	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 538	struct hw_perf_event *hwc = &event->hw;
 539	int idx = hwc->idx;
 540
 541	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 542
 543	mipspmu_stop(event, PERF_EF_UPDATE);
 544	cpuc->events[idx] = NULL;
 545	clear_bit(idx, cpuc->used_mask);
 546
 547	perf_event_update_userpage(event);
 548}
 549
 550static void mipspmu_read(struct perf_event *event)
 551{
 552	struct hw_perf_event *hwc = &event->hw;
 553
 554	/* Don't read disabled counters! */
 555	if (hwc->idx < 0)
 556		return;
 557
 558	mipspmu_event_update(event, hwc, hwc->idx);
 559}
 560
 561static void mipspmu_enable(struct pmu *pmu)
 562{
 563#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 564	write_unlock(&pmuint_rwlock);
 565#endif
 566	resume_local_counters();
 567}
 568
 569/*
 570 * MIPS performance counters can be per-TC. The control registers can
 571 * not be directly accessed across CPUs. Hence if we want to do global
 572 * control, we need cross CPU calls. on_each_cpu() can help us, but we
 573 * can not make sure this function is called with interrupts enabled. So
 574 * here we pause local counters and then grab a rwlock and leave the
 575 * counters on other CPUs alone. If any counter interrupt raises while
 576 * we own the write lock, simply pause local counters on that CPU and
 577 * spin in the handler. Also we know we won't be switched to another
 578 * CPU after pausing local counters and before grabbing the lock.
 579 */
 580static void mipspmu_disable(struct pmu *pmu)
 581{
 582	pause_local_counters();
 583#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 584	write_lock(&pmuint_rwlock);
 585#endif
 586}
 587
 588static atomic_t active_events = ATOMIC_INIT(0);
 589static DEFINE_MUTEX(pmu_reserve_mutex);
 590static int (*save_perf_irq)(void);
 591
 592static int mipspmu_get_irq(void)
 593{
 594	int err;
 595
 596	if (mipspmu.irq >= 0) {
 597		/* Request my own irq handler. */
 598		err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
 599				  IRQF_PERCPU | IRQF_NOBALANCING |
 600				  IRQF_NO_THREAD | IRQF_NO_SUSPEND |
 601				  IRQF_SHARED,
 602				  "mips_perf_pmu", &mipspmu);
 603		if (err) {
 604			pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
 605				mipspmu.irq);
 606		}
 607	} else if (cp0_perfcount_irq < 0) {
 608		/*
 609		 * We are sharing the irq number with the timer interrupt.
 610		 */
 611		save_perf_irq = perf_irq;
 612		perf_irq = mipsxx_pmu_handle_shared_irq;
 613		err = 0;
 614	} else {
 615		pr_warn("The platform hasn't properly defined its interrupt controller\n");
 616		err = -ENOENT;
 617	}
 618
 619	return err;
 620}
 621
 622static void mipspmu_free_irq(void)
 623{
 624	if (mipspmu.irq >= 0)
 625		free_irq(mipspmu.irq, &mipspmu);
 626	else if (cp0_perfcount_irq < 0)
 627		perf_irq = save_perf_irq;
 628}
 629
 630/*
 631 * mipsxx/rm9000/loongson2 have different performance counters, they have
 632 * specific low-level init routines.
 633 */
 634static void reset_counters(void *arg);
 635static int __hw_perf_event_init(struct perf_event *event);
 636
 637static void hw_perf_event_destroy(struct perf_event *event)
 638{
 639	if (atomic_dec_and_mutex_lock(&active_events,
 640				&pmu_reserve_mutex)) {
 641		/*
 642		 * We must not call the destroy function with interrupts
 643		 * disabled.
 644		 */
 645		on_each_cpu(reset_counters,
 646			(void *)(long)mipspmu.num_counters, 1);
 647		mipspmu_free_irq();
 648		mutex_unlock(&pmu_reserve_mutex);
 649	}
 650}
 651
 652static int mipspmu_event_init(struct perf_event *event)
 653{
 654	int err = 0;
 655
 656	/* does not support taken branch sampling */
 657	if (has_branch_stack(event))
 658		return -EOPNOTSUPP;
 659
 660	switch (event->attr.type) {
 661	case PERF_TYPE_RAW:
 662	case PERF_TYPE_HARDWARE:
 663	case PERF_TYPE_HW_CACHE:
 664		break;
 665
 666	default:
 667		return -ENOENT;
 668	}
 669
 670	if (event->cpu >= 0 && !cpu_online(event->cpu))
 671		return -ENODEV;
 672
 673	if (!atomic_inc_not_zero(&active_events)) {
 674		mutex_lock(&pmu_reserve_mutex);
 675		if (atomic_read(&active_events) == 0)
 676			err = mipspmu_get_irq();
 677
 678		if (!err)
 679			atomic_inc(&active_events);
 680		mutex_unlock(&pmu_reserve_mutex);
 681	}
 682
 683	if (err)
 684		return err;
 685
 686	return __hw_perf_event_init(event);
 687}
 688
 689static struct pmu pmu = {
 690	.pmu_enable	= mipspmu_enable,
 691	.pmu_disable	= mipspmu_disable,
 692	.event_init	= mipspmu_event_init,
 693	.add		= mipspmu_add,
 694	.del		= mipspmu_del,
 695	.start		= mipspmu_start,
 696	.stop		= mipspmu_stop,
 697	.read		= mipspmu_read,
 698};
 699
 700static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
 701{
 702/*
 703 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
 704 * event_id.
 705 */
 706#ifdef CONFIG_MIPS_MT_SMP
 707	if (num_possible_cpus() > 1)
 708		return ((unsigned int)pev->range << 24) |
 709			(pev->cntr_mask & 0xffff00) |
 710			(pev->event_id & 0xff);
 711	else
 712#endif /* CONFIG_MIPS_MT_SMP */
 713	{
 714		if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
 715			return (pev->cntr_mask & 0xfffc00) |
 716				(pev->event_id & 0x3ff);
 717		else
 718			return (pev->cntr_mask & 0xffff00) |
 719				(pev->event_id & 0xff);
 720	}
 721}
 722
 723static const struct mips_perf_event *mipspmu_map_general_event(int idx)
 724{
 725
 726	if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
 727		return ERR_PTR(-EOPNOTSUPP);
 728	return &(*mipspmu.general_event_map)[idx];
 729}
 730
 731static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
 732{
 733	unsigned int cache_type, cache_op, cache_result;
 734	const struct mips_perf_event *pev;
 735
 736	cache_type = (config >> 0) & 0xff;
 737	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
 738		return ERR_PTR(-EINVAL);
 739
 740	cache_op = (config >> 8) & 0xff;
 741	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
 742		return ERR_PTR(-EINVAL);
 743
 744	cache_result = (config >> 16) & 0xff;
 745	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
 746		return ERR_PTR(-EINVAL);
 747
 748	pev = &((*mipspmu.cache_event_map)
 749					[cache_type]
 750					[cache_op]
 751					[cache_result]);
 752
 753	if (pev->cntr_mask == 0)
 754		return ERR_PTR(-EOPNOTSUPP);
 755
 756	return pev;
 757
 758}
 759
 760static int validate_group(struct perf_event *event)
 761{
 762	struct perf_event *sibling, *leader = event->group_leader;
 763	struct cpu_hw_events fake_cpuc;
 764
 765	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
 766
 767	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
 768		return -EINVAL;
 769
 770	for_each_sibling_event(sibling, leader) {
 771		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
 772			return -EINVAL;
 773	}
 774
 775	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
 776		return -EINVAL;
 777
 778	return 0;
 779}
 780
 781/* This is needed by specific irq handlers in perf_event_*.c */
 782static void handle_associated_event(struct cpu_hw_events *cpuc,
 783				    int idx, struct perf_sample_data *data,
 784				    struct pt_regs *regs)
 785{
 786	struct perf_event *event = cpuc->events[idx];
 787	struct hw_perf_event *hwc = &event->hw;
 788
 789	mipspmu_event_update(event, hwc, idx);
 790	data->period = event->hw.last_period;
 791	if (!mipspmu_event_set_period(event, hwc, idx))
 792		return;
 793
 794	if (perf_event_overflow(event, data, regs))
 795		mipsxx_pmu_disable_event(idx);
 796}
 797
 798
 799static int __n_counters(void)
 800{
 801	if (!cpu_has_perf)
 802		return 0;
 803	if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
 804		return 1;
 805	if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
 806		return 2;
 807	if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
 808		return 3;
 809
 810	return 4;
 811}
 812
 813static int n_counters(void)
 814{
 815	int counters;
 816
 817	switch (current_cpu_type()) {
 818	case CPU_R10000:
 819		counters = 2;
 820		break;
 821
 822	case CPU_R12000:
 823	case CPU_R14000:
 824	case CPU_R16000:
 825		counters = 4;
 826		break;
 827
 828	default:
 829		counters = __n_counters();
 830	}
 831
 832	return counters;
 833}
 834
 835static void loongson3_reset_counters(void *arg)
 836{
 837	int counters = (int)(long)arg;
 838
 839	switch (counters) {
 840	case 4:
 841		mipsxx_pmu_write_control(3, 0);
 842		mipspmu.write_counter(3, 0);
 843		mipsxx_pmu_write_control(3, 127<<5);
 844		mipspmu.write_counter(3, 0);
 845		mipsxx_pmu_write_control(3, 191<<5);
 846		mipspmu.write_counter(3, 0);
 847		mipsxx_pmu_write_control(3, 255<<5);
 848		mipspmu.write_counter(3, 0);
 849		mipsxx_pmu_write_control(3, 319<<5);
 850		mipspmu.write_counter(3, 0);
 851		mipsxx_pmu_write_control(3, 383<<5);
 852		mipspmu.write_counter(3, 0);
 853		mipsxx_pmu_write_control(3, 575<<5);
 854		mipspmu.write_counter(3, 0);
 855		fallthrough;
 856	case 3:
 857		mipsxx_pmu_write_control(2, 0);
 858		mipspmu.write_counter(2, 0);
 859		mipsxx_pmu_write_control(2, 127<<5);
 860		mipspmu.write_counter(2, 0);
 861		mipsxx_pmu_write_control(2, 191<<5);
 862		mipspmu.write_counter(2, 0);
 863		mipsxx_pmu_write_control(2, 255<<5);
 864		mipspmu.write_counter(2, 0);
 865		mipsxx_pmu_write_control(2, 319<<5);
 866		mipspmu.write_counter(2, 0);
 867		mipsxx_pmu_write_control(2, 383<<5);
 868		mipspmu.write_counter(2, 0);
 869		mipsxx_pmu_write_control(2, 575<<5);
 870		mipspmu.write_counter(2, 0);
 871		fallthrough;
 872	case 2:
 873		mipsxx_pmu_write_control(1, 0);
 874		mipspmu.write_counter(1, 0);
 875		mipsxx_pmu_write_control(1, 127<<5);
 876		mipspmu.write_counter(1, 0);
 877		mipsxx_pmu_write_control(1, 191<<5);
 878		mipspmu.write_counter(1, 0);
 879		mipsxx_pmu_write_control(1, 255<<5);
 880		mipspmu.write_counter(1, 0);
 881		mipsxx_pmu_write_control(1, 319<<5);
 882		mipspmu.write_counter(1, 0);
 883		mipsxx_pmu_write_control(1, 383<<5);
 884		mipspmu.write_counter(1, 0);
 885		mipsxx_pmu_write_control(1, 575<<5);
 886		mipspmu.write_counter(1, 0);
 887		fallthrough;
 888	case 1:
 889		mipsxx_pmu_write_control(0, 0);
 890		mipspmu.write_counter(0, 0);
 891		mipsxx_pmu_write_control(0, 127<<5);
 892		mipspmu.write_counter(0, 0);
 893		mipsxx_pmu_write_control(0, 191<<5);
 894		mipspmu.write_counter(0, 0);
 895		mipsxx_pmu_write_control(0, 255<<5);
 896		mipspmu.write_counter(0, 0);
 897		mipsxx_pmu_write_control(0, 319<<5);
 898		mipspmu.write_counter(0, 0);
 899		mipsxx_pmu_write_control(0, 383<<5);
 900		mipspmu.write_counter(0, 0);
 901		mipsxx_pmu_write_control(0, 575<<5);
 902		mipspmu.write_counter(0, 0);
 903		break;
 904	}
 905}
 906
 907static void reset_counters(void *arg)
 908{
 909	int counters = (int)(long)arg;
 910
 911	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
 912		loongson3_reset_counters(arg);
 913		return;
 914	}
 915
 916	switch (counters) {
 917	case 4:
 918		mipsxx_pmu_write_control(3, 0);
 919		mipspmu.write_counter(3, 0);
 920		fallthrough;
 921	case 3:
 922		mipsxx_pmu_write_control(2, 0);
 923		mipspmu.write_counter(2, 0);
 924		fallthrough;
 925	case 2:
 926		mipsxx_pmu_write_control(1, 0);
 927		mipspmu.write_counter(1, 0);
 928		fallthrough;
 929	case 1:
 930		mipsxx_pmu_write_control(0, 0);
 931		mipspmu.write_counter(0, 0);
 932		break;
 933	}
 934}
 935
 936/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
 937static const struct mips_perf_event mipsxxcore_event_map
 938				[PERF_COUNT_HW_MAX] = {
 939	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 940	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 
 
 941	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
 942	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
 
 943};
 944
 945/* 74K/proAptiv core has different branch event code. */
 946static const struct mips_perf_event mipsxxcore_event_map2
 947				[PERF_COUNT_HW_MAX] = {
 948	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 949	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 
 
 950	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
 951	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
 
 952};
 953
 954static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
 955	[PERF_COUNT_HW_CPU_CYCLES]          = { 0x00, CNTR_EVEN | CNTR_ODD },
 956	[PERF_COUNT_HW_INSTRUCTIONS]        = { 0x01, CNTR_EVEN | CNTR_ODD },
 957	/* These only count dcache, not icache */
 958	[PERF_COUNT_HW_CACHE_REFERENCES]    = { 0x45, CNTR_EVEN | CNTR_ODD },
 959	[PERF_COUNT_HW_CACHE_MISSES]        = { 0x48, CNTR_EVEN | CNTR_ODD },
 960	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
 961	[PERF_COUNT_HW_BRANCH_MISSES]       = { 0x16, CNTR_EVEN | CNTR_ODD },
 962};
 963
 964static const struct mips_perf_event loongson3_event_map1[PERF_COUNT_HW_MAX] = {
 965	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
 966	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
 967	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
 968	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
 969};
 970
 971static const struct mips_perf_event loongson3_event_map2[PERF_COUNT_HW_MAX] = {
 972	[PERF_COUNT_HW_CPU_CYCLES] = { 0x80, CNTR_ALL },
 973	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x81, CNTR_ALL },
 974	[PERF_COUNT_HW_CACHE_MISSES] = { 0x18, CNTR_ALL },
 975	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x94, CNTR_ALL },
 976	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x9c, CNTR_ALL },
 977};
 978
 979static const struct mips_perf_event loongson3_event_map3[PERF_COUNT_HW_MAX] = {
 980	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_ALL },
 981	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_ALL },
 982	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x1c, CNTR_ALL },
 983	[PERF_COUNT_HW_CACHE_MISSES] = { 0x1d, CNTR_ALL },
 984	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_ALL },
 985	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x08, CNTR_ALL },
 986};
 987
 988static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
 989	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
 990	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
 991	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
 992	[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL	 },
 993	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
 994	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
 995	[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
 996};
 997
 998static const struct mips_perf_event bmips5000_event_map
 999				[PERF_COUNT_HW_MAX] = {
1000	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
1001	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
1002	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
1003};
1004
1005/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
1006static const struct mips_perf_event mipsxxcore_cache_map
1007				[PERF_COUNT_HW_CACHE_MAX]
1008				[PERF_COUNT_HW_CACHE_OP_MAX]
1009				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1010[C(L1D)] = {
1011	/*
1012	 * Like some other architectures (e.g. ARM), the performance
1013	 * counters don't differentiate between read and write
1014	 * accesses/misses, so this isn't strictly correct, but it's the
1015	 * best we can do. Writes and reads get combined.
1016	 */
1017	[C(OP_READ)] = {
1018		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
1019		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
1020	},
1021	[C(OP_WRITE)] = {
1022		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
1023		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
1024	},
 
 
 
 
1025},
1026[C(L1I)] = {
1027	[C(OP_READ)] = {
1028		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
1029		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
1030	},
1031	[C(OP_WRITE)] = {
1032		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
1033		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
1034	},
1035	[C(OP_PREFETCH)] = {
1036		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
1037		/*
1038		 * Note that MIPS has only "hit" events countable for
1039		 * the prefetch operation.
1040		 */
 
1041	},
1042},
1043[C(LL)] = {
1044	[C(OP_READ)] = {
1045		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
1046		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
1047	},
1048	[C(OP_WRITE)] = {
1049		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
1050		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
1051	},
 
 
 
 
1052},
1053[C(DTLB)] = {
1054	[C(OP_READ)] = {
1055		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1056		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1057	},
1058	[C(OP_WRITE)] = {
1059		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1060		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1061	},
 
 
 
 
1062},
1063[C(ITLB)] = {
1064	[C(OP_READ)] = {
1065		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
1066		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
1067	},
1068	[C(OP_WRITE)] = {
1069		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
1070		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
1071	},
 
 
 
 
1072},
1073[C(BPU)] = {
1074	/* Using the same code for *HW_BRANCH* */
1075	[C(OP_READ)] = {
1076		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
1077		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1078	},
1079	[C(OP_WRITE)] = {
1080		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
1081		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1082	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1083},
1084};
1085
1086/* 74K/proAptiv core has completely different cache event map. */
1087static const struct mips_perf_event mipsxxcore_cache_map2
1088				[PERF_COUNT_HW_CACHE_MAX]
1089				[PERF_COUNT_HW_CACHE_OP_MAX]
1090				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1091[C(L1D)] = {
1092	/*
1093	 * Like some other architectures (e.g. ARM), the performance
1094	 * counters don't differentiate between read and write
1095	 * accesses/misses, so this isn't strictly correct, but it's the
1096	 * best we can do. Writes and reads get combined.
1097	 */
1098	[C(OP_READ)] = {
1099		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
1100		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
1101	},
1102	[C(OP_WRITE)] = {
1103		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
1104		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
1105	},
 
 
 
 
1106},
1107[C(L1I)] = {
1108	[C(OP_READ)] = {
1109		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1110		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1111	},
1112	[C(OP_WRITE)] = {
1113		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1114		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1115	},
1116	[C(OP_PREFETCH)] = {
1117		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
1118		/*
1119		 * Note that MIPS has only "hit" events countable for
1120		 * the prefetch operation.
1121		 */
 
1122	},
1123},
1124[C(LL)] = {
1125	[C(OP_READ)] = {
1126		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
1127		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
1128	},
1129	[C(OP_WRITE)] = {
1130		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
1131		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
1132	},
1133},
1134/*
1135 * 74K core does not have specific DTLB events. proAptiv core has
1136 * "speculative" DTLB events which are numbered 0x63 (even/odd) and
1137 * not included here. One can use raw events if really needed.
1138 */
1139[C(ITLB)] = {
1140	[C(OP_READ)] = {
1141		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1142		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1143	},
1144	[C(OP_WRITE)] = {
1145		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1146		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1147	},
1148},
1149[C(BPU)] = {
1150	/* Using the same code for *HW_BRANCH* */
1151	[C(OP_READ)] = {
1152		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1153		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1154	},
1155	[C(OP_WRITE)] = {
1156		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1157		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1158	},
1159},
1160};
1161
1162static const struct mips_perf_event i6x00_cache_map
1163				[PERF_COUNT_HW_CACHE_MAX]
1164				[PERF_COUNT_HW_CACHE_OP_MAX]
1165				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1166[C(L1D)] = {
1167	[C(OP_READ)] = {
1168		[C(RESULT_ACCESS)]	= { 0x46, CNTR_EVEN | CNTR_ODD },
1169		[C(RESULT_MISS)]	= { 0x49, CNTR_EVEN | CNTR_ODD },
1170	},
1171	[C(OP_WRITE)] = {
1172		[C(RESULT_ACCESS)]	= { 0x47, CNTR_EVEN | CNTR_ODD },
1173		[C(RESULT_MISS)]	= { 0x4a, CNTR_EVEN | CNTR_ODD },
1174	},
1175},
1176[C(L1I)] = {
1177	[C(OP_READ)] = {
1178		[C(RESULT_ACCESS)]	= { 0x84, CNTR_EVEN | CNTR_ODD },
1179		[C(RESULT_MISS)]	= { 0x85, CNTR_EVEN | CNTR_ODD },
1180	},
1181},
1182[C(DTLB)] = {
1183	/* Can't distinguish read & write */
1184	[C(OP_READ)] = {
1185		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1186		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1187	},
1188	[C(OP_WRITE)] = {
1189		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1190		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1191	},
1192},
1193[C(BPU)] = {
1194	/* Conditional branches / mispredicted */
1195	[C(OP_READ)] = {
1196		[C(RESULT_ACCESS)]	= { 0x15, CNTR_EVEN | CNTR_ODD },
1197		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN | CNTR_ODD },
1198	},
1199},
1200};
1201
1202static const struct mips_perf_event loongson3_cache_map1
1203				[PERF_COUNT_HW_CACHE_MAX]
1204				[PERF_COUNT_HW_CACHE_OP_MAX]
1205				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1206[C(L1D)] = {
1207	/*
1208	 * Like some other architectures (e.g. ARM), the performance
1209	 * counters don't differentiate between read and write
1210	 * accesses/misses, so this isn't strictly correct, but it's the
1211	 * best we can do. Writes and reads get combined.
1212	 */
1213	[C(OP_READ)] = {
1214		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1215	},
1216	[C(OP_WRITE)] = {
1217		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1218	},
1219},
1220[C(L1I)] = {
1221	[C(OP_READ)] = {
1222		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1223	},
1224	[C(OP_WRITE)] = {
1225		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1226	},
1227},
1228[C(DTLB)] = {
1229	[C(OP_READ)] = {
1230		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1231	},
1232	[C(OP_WRITE)] = {
1233		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1234	},
1235},
1236[C(ITLB)] = {
1237	[C(OP_READ)] = {
1238		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1239	},
1240	[C(OP_WRITE)] = {
1241		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1242	},
1243},
1244[C(BPU)] = {
1245	/* Using the same code for *HW_BRANCH* */
1246	[C(OP_READ)] = {
1247		[C(RESULT_ACCESS)]      = { 0x01, CNTR_EVEN },
1248		[C(RESULT_MISS)]        = { 0x01, CNTR_ODD },
1249	},
1250	[C(OP_WRITE)] = {
1251		[C(RESULT_ACCESS)]      = { 0x01, CNTR_EVEN },
1252		[C(RESULT_MISS)]        = { 0x01, CNTR_ODD },
1253	},
1254},
1255};
1256
1257static const struct mips_perf_event loongson3_cache_map2
1258				[PERF_COUNT_HW_CACHE_MAX]
1259				[PERF_COUNT_HW_CACHE_OP_MAX]
1260				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1261[C(L1D)] = {
1262	/*
1263	 * Like some other architectures (e.g. ARM), the performance
1264	 * counters don't differentiate between read and write
1265	 * accesses/misses, so this isn't strictly correct, but it's the
1266	 * best we can do. Writes and reads get combined.
1267	 */
1268	[C(OP_READ)] = {
1269		[C(RESULT_ACCESS)]	= { 0x156, CNTR_ALL },
1270	},
1271	[C(OP_WRITE)] = {
1272		[C(RESULT_ACCESS)]	= { 0x155, CNTR_ALL },
1273		[C(RESULT_MISS)]        = { 0x153, CNTR_ALL },
1274	},
1275},
1276[C(L1I)] = {
1277	[C(OP_READ)] = {
1278		[C(RESULT_MISS)]	= { 0x18, CNTR_ALL },
1279	},
1280	[C(OP_WRITE)] = {
1281		[C(RESULT_MISS)]        = { 0x18, CNTR_ALL },
1282	},
1283},
1284[C(LL)] = {
1285	[C(OP_READ)] = {
1286		[C(RESULT_ACCESS)]	= { 0x1b6, CNTR_ALL },
1287	},
1288	[C(OP_WRITE)] = {
1289		[C(RESULT_ACCESS)]	= { 0x1b7, CNTR_ALL },
1290	},
1291	[C(OP_PREFETCH)] = {
1292		[C(RESULT_ACCESS)]	= { 0x1bf, CNTR_ALL },
 
1293	},
1294},
1295[C(DTLB)] = {
 
1296	[C(OP_READ)] = {
1297		[C(RESULT_MISS)]        = { 0x92, CNTR_ALL },
 
1298	},
1299	[C(OP_WRITE)] = {
1300		[C(RESULT_MISS)]        = { 0x92, CNTR_ALL },
1301	},
1302},
1303[C(ITLB)] = {
1304	[C(OP_READ)] = {
1305		[C(RESULT_MISS)]	= { 0x1a, CNTR_ALL },
1306	},
1307	[C(OP_WRITE)] = {
1308		[C(RESULT_MISS)]	= { 0x1a, CNTR_ALL },
1309	},
1310},
1311[C(BPU)] = {
1312	/* Using the same code for *HW_BRANCH* */
1313	[C(OP_READ)] = {
1314		[C(RESULT_ACCESS)]      = { 0x94, CNTR_ALL },
1315		[C(RESULT_MISS)]        = { 0x9c, CNTR_ALL },
1316	},
1317},
1318};
1319
1320static const struct mips_perf_event loongson3_cache_map3
1321				[PERF_COUNT_HW_CACHE_MAX]
1322				[PERF_COUNT_HW_CACHE_OP_MAX]
1323				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1324[C(L1D)] = {
1325	/*
1326	 * Like some other architectures (e.g. ARM), the performance
1327	 * counters don't differentiate between read and write
1328	 * accesses/misses, so this isn't strictly correct, but it's the
1329	 * best we can do. Writes and reads get combined.
1330	 */
1331	[C(OP_READ)] = {
1332		[C(RESULT_ACCESS)]      = { 0x1e, CNTR_ALL },
1333		[C(RESULT_MISS)]        = { 0x1f, CNTR_ALL },
1334	},
1335	[C(OP_PREFETCH)] = {
1336		[C(RESULT_ACCESS)]	= { 0xaa, CNTR_ALL },
1337		[C(RESULT_MISS)]	= { 0xa9, CNTR_ALL },
1338	},
1339},
1340[C(L1I)] = {
1341	[C(OP_READ)] = {
1342		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ALL },
1343		[C(RESULT_MISS)]	= { 0x1d, CNTR_ALL },
1344	},
1345},
1346[C(LL)] = {
1347	[C(OP_READ)] = {
1348		[C(RESULT_ACCESS)]	= { 0x2e, CNTR_ALL },
1349		[C(RESULT_MISS)]	= { 0x2f, CNTR_ALL },
1350	},
1351},
1352[C(DTLB)] = {
1353	[C(OP_READ)] = {
1354		[C(RESULT_ACCESS)]      = { 0x14, CNTR_ALL },
1355		[C(RESULT_MISS)]	= { 0x1b, CNTR_ALL },
1356	},
1357},
1358[C(ITLB)] = {
1359	[C(OP_READ)] = {
1360		[C(RESULT_MISS)]	= { 0x1a, CNTR_ALL },
1361	},
1362},
1363[C(BPU)] = {
1364	/* Using the same code for *HW_BRANCH* */
1365	[C(OP_READ)] = {
1366		[C(RESULT_ACCESS)]      = { 0x02, CNTR_ALL },
1367		[C(RESULT_MISS)]        = { 0x08, CNTR_ALL },
1368	},
1369},
1370};
1371
1372/* BMIPS5000 */
1373static const struct mips_perf_event bmips5000_cache_map
1374				[PERF_COUNT_HW_CACHE_MAX]
1375				[PERF_COUNT_HW_CACHE_OP_MAX]
1376				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1377[C(L1D)] = {
1378	/*
1379	 * Like some other architectures (e.g. ARM), the performance
1380	 * counters don't differentiate between read and write
1381	 * accesses/misses, so this isn't strictly correct, but it's the
1382	 * best we can do. Writes and reads get combined.
1383	 */
1384	[C(OP_READ)] = {
1385		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1386		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1387	},
1388	[C(OP_WRITE)] = {
1389		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1390		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1391	},
1392},
1393[C(L1I)] = {
1394	[C(OP_READ)] = {
1395		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1396		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1397	},
1398	[C(OP_WRITE)] = {
1399		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1400		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1401	},
1402	[C(OP_PREFETCH)] = {
1403		[C(RESULT_ACCESS)]	= { 23, CNTR_EVEN, T },
1404		/*
1405		 * Note that MIPS has only "hit" events countable for
1406		 * the prefetch operation.
1407		 */
1408	},
1409},
1410[C(LL)] = {
1411	[C(OP_READ)] = {
1412		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1413		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1414	},
1415	[C(OP_WRITE)] = {
1416		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1417		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1418	},
1419},
1420[C(BPU)] = {
1421	/* Using the same code for *HW_BRANCH* */
1422	[C(OP_READ)] = {
1423		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1424	},
1425	[C(OP_WRITE)] = {
1426		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1427	},
1428},
1429};
1430
1431static const struct mips_perf_event octeon_cache_map
1432				[PERF_COUNT_HW_CACHE_MAX]
1433				[PERF_COUNT_HW_CACHE_OP_MAX]
1434				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1435[C(L1D)] = {
1436	[C(OP_READ)] = {
1437		[C(RESULT_ACCESS)]	= { 0x2b, CNTR_ALL },
1438		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL },
1439	},
1440	[C(OP_WRITE)] = {
1441		[C(RESULT_ACCESS)]	= { 0x30, CNTR_ALL },
1442	},
1443},
1444[C(L1I)] = {
1445	[C(OP_READ)] = {
1446		[C(RESULT_ACCESS)]	= { 0x18, CNTR_ALL },
1447	},
1448	[C(OP_PREFETCH)] = {
1449		[C(RESULT_ACCESS)]	= { 0x19, CNTR_ALL },
 
1450	},
1451},
1452[C(DTLB)] = {
1453	/*
1454	 * Only general DTLB misses are counted use the same event for
1455	 * read and write.
1456	 */
1457	[C(OP_READ)] = {
1458		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
 
1459	},
1460	[C(OP_WRITE)] = {
1461		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
 
1462	},
1463},
1464[C(ITLB)] = {
1465	[C(OP_READ)] = {
1466		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL },
1467	},
1468},
1469};
1470
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1471static int __hw_perf_event_init(struct perf_event *event)
1472{
1473	struct perf_event_attr *attr = &event->attr;
1474	struct hw_perf_event *hwc = &event->hw;
1475	const struct mips_perf_event *pev;
1476	int err;
1477
1478	/* Returning MIPS event descriptor for generic perf event. */
1479	if (PERF_TYPE_HARDWARE == event->attr.type) {
1480		if (event->attr.config >= PERF_COUNT_HW_MAX)
1481			return -EINVAL;
1482		pev = mipspmu_map_general_event(event->attr.config);
1483	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1484		pev = mipspmu_map_cache_event(event->attr.config);
1485	} else if (PERF_TYPE_RAW == event->attr.type) {
1486		/* We are working on the global raw event. */
1487		mutex_lock(&raw_event_mutex);
1488		pev = mipspmu.map_raw_event(event->attr.config);
1489	} else {
1490		/* The event type is not (yet) supported. */
1491		return -EOPNOTSUPP;
1492	}
1493
1494	if (IS_ERR(pev)) {
1495		if (PERF_TYPE_RAW == event->attr.type)
1496			mutex_unlock(&raw_event_mutex);
1497		return PTR_ERR(pev);
1498	}
1499
1500	/*
1501	 * We allow max flexibility on how each individual counter shared
1502	 * by the single CPU operates (the mode exclusion and the range).
1503	 */
1504	hwc->config_base = MIPS_PERFCTRL_IE;
 
 
 
 
1505
1506	hwc->event_base = mipspmu_perf_event_encode(pev);
1507	if (PERF_TYPE_RAW == event->attr.type)
1508		mutex_unlock(&raw_event_mutex);
1509
1510	if (!attr->exclude_user)
1511		hwc->config_base |= MIPS_PERFCTRL_U;
1512	if (!attr->exclude_kernel) {
1513		hwc->config_base |= MIPS_PERFCTRL_K;
1514		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1515		hwc->config_base |= MIPS_PERFCTRL_EXL;
1516	}
1517	if (!attr->exclude_hv)
1518		hwc->config_base |= MIPS_PERFCTRL_S;
1519
1520	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1521	/*
1522	 * The event can belong to another cpu. We do not assign a local
1523	 * counter for it for now.
1524	 */
1525	hwc->idx = -1;
1526	hwc->config = 0;
1527
1528	if (!hwc->sample_period) {
1529		hwc->sample_period  = mipspmu.max_period;
1530		hwc->last_period    = hwc->sample_period;
1531		local64_set(&hwc->period_left, hwc->sample_period);
1532	}
1533
1534	err = 0;
1535	if (event->group_leader != event)
1536		err = validate_group(event);
 
 
 
1537
1538	event->destroy = hw_perf_event_destroy;
1539
1540	if (err)
1541		event->destroy(event);
1542
1543	return err;
1544}
1545
1546static void pause_local_counters(void)
1547{
1548	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1549	int ctr = mipspmu.num_counters;
1550	unsigned long flags;
1551
1552	local_irq_save(flags);
1553	do {
1554		ctr--;
1555		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1556		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1557					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1558	} while (ctr > 0);
 
 
 
 
 
 
 
 
 
 
 
 
1559	local_irq_restore(flags);
1560}
1561
1562static void resume_local_counters(void)
1563{
1564	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1565	int ctr = mipspmu.num_counters;
 
1566
1567	do {
1568		ctr--;
1569		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1570	} while (ctr > 0);
 
 
 
 
 
 
 
 
1571}
1572
1573static int mipsxx_pmu_handle_shared_irq(void)
1574{
1575	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1576	struct perf_sample_data data;
1577	unsigned int counters = mipspmu.num_counters;
1578	u64 counter;
1579	int n, handled = IRQ_NONE;
1580	struct pt_regs *regs;
1581
1582	if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1583		return handled;
 
1584	/*
1585	 * First we pause the local counters, so that when we are locked
1586	 * here, the counters are all paused. When it gets locked due to
1587	 * perf_disable(), the timer interrupt handler will be delayed.
1588	 *
1589	 * See also mipsxx_pmu_start().
1590	 */
1591	pause_local_counters();
1592#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1593	read_lock(&pmuint_rwlock);
1594#endif
1595
1596	regs = get_irq_regs();
1597
1598	perf_sample_data_init(&data, 0, 0);
1599
1600	for (n = counters - 1; n >= 0; n--) {
1601		if (!test_bit(n, cpuc->used_mask))
1602			continue;
1603
1604		counter = mipspmu.read_counter(n);
1605		if (!(counter & mipspmu.overflow))
1606			continue;
1607
1608		handle_associated_event(cpuc, n, &data, regs);
1609		handled = IRQ_HANDLED;
 
 
 
 
 
 
 
 
1610	}
1611
1612#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1613	read_unlock(&pmuint_rwlock);
1614#endif
1615	resume_local_counters();
1616
1617	/*
1618	 * Do all the work for the pending perf events. We can do this
1619	 * in here because the performance counter interrupt is a regular
1620	 * interrupt, not NMI.
1621	 */
1622	if (handled == IRQ_HANDLED)
1623		irq_work_run();
1624
 
 
 
 
1625	return handled;
1626}
1627
1628static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
 
1629{
1630	return mipsxx_pmu_handle_shared_irq();
1631}
1632
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1633/* 24K */
 
 
 
 
 
 
1634#define IS_BOTH_COUNTERS_24K_EVENT(b)					\
1635	((b) == 0 || (b) == 1 || (b) == 11)
1636
1637/* 34K */
 
 
 
 
1638#define IS_BOTH_COUNTERS_34K_EVENT(b)					\
1639	((b) == 0 || (b) == 1 || (b) == 11)
1640#ifdef CONFIG_MIPS_MT_SMP
1641#define IS_RANGE_P_34K_EVENT(r, b)					\
1642	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1643	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
1644	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
1645	 ((b) >= 64 && (b) <= 67))
1646#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1647#endif
1648
1649/* 74K */
 
 
 
 
 
 
 
1650#define IS_BOTH_COUNTERS_74K_EVENT(b)					\
1651	((b) == 0 || (b) == 1)
1652
1653/* proAptiv */
1654#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b)				\
1655	((b) == 0 || (b) == 1)
1656/* P5600 */
1657#define IS_BOTH_COUNTERS_P5600_EVENT(b)					\
1658	((b) == 0 || (b) == 1)
1659
1660/* 1004K */
 
 
 
1661#define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
1662	((b) == 0 || (b) == 1 || (b) == 11)
1663#ifdef CONFIG_MIPS_MT_SMP
1664#define IS_RANGE_P_1004K_EVENT(r, b)					\
1665	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1666	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
1667	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
1668	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
1669	 ((b) >= 64 && (b) <= 67))
1670#define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
1671#endif
1672
1673/* interAptiv */
1674#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b)				\
1675	((b) == 0 || (b) == 1 || (b) == 11)
1676#ifdef CONFIG_MIPS_MT_SMP
1677/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1678#define IS_RANGE_P_INTERAPTIV_EVENT(r, b)				\
1679	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1680	 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 ||		\
1681	 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 &&		\
1682	 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 ||		\
1683	 ((b) >= 64 && (b) <= 67))
1684#define IS_RANGE_V_INTERAPTIV_EVENT(r)	((r) == 47 || (r) == 175)
1685#endif
1686
1687/* BMIPS5000 */
1688#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b)				\
1689	((b) == 0 || (b) == 1)
1690
1691
1692/*
1693 * For most cores the user can use 0-255 raw events, where 0-127 for the events
1694 * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1695 * indicate the even/odd bank selector. So, for example, when user wants to take
1696 * the Event Num of 15 for odd counters (by referring to the user manual), then
1697 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1698 * to be used.
1699 *
1700 * Some newer cores have even more events, in which case the user can use raw
1701 * events 0-511, where 0-255 are for the events of even counters, and 256-511
1702 * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1703 */
1704static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
 
1705{
1706	/* currently most cores have 7-bit event numbers */
1707	int pmu_type;
1708	unsigned int raw_id = config & 0xff;
1709	unsigned int base_id = raw_id & 0x7f;
1710
1711	switch (current_cpu_type()) {
1712	case CPU_24K:
 
 
 
1713		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1714			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1715		else
1716			raw_event.cntr_mask =
1717				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1718#ifdef CONFIG_MIPS_MT_SMP
1719		/*
1720		 * This is actually doing nothing. Non-multithreading
1721		 * CPUs will not check and calculate the range.
1722		 */
1723		raw_event.range = P;
1724#endif
1725		break;
1726	case CPU_34K:
 
 
 
1727		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1728			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1729		else
1730			raw_event.cntr_mask =
1731				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1732#ifdef CONFIG_MIPS_MT_SMP
1733		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1734			raw_event.range = P;
1735		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1736			raw_event.range = V;
1737		else
1738			raw_event.range = T;
1739#endif
1740		break;
1741	case CPU_74K:
1742	case CPU_1074K:
 
 
1743		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1744			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1745		else
1746			raw_event.cntr_mask =
1747				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1748#ifdef CONFIG_MIPS_MT_SMP
1749		raw_event.range = P;
1750#endif
1751		break;
1752	case CPU_PROAPTIV:
1753		if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1754			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1755		else
1756			raw_event.cntr_mask =
1757				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1758#ifdef CONFIG_MIPS_MT_SMP
1759		raw_event.range = P;
1760#endif
1761		break;
1762	case CPU_P5600:
1763	case CPU_P6600:
1764		/* 8-bit event numbers */
1765		raw_id = config & 0x1ff;
1766		base_id = raw_id & 0xff;
1767		if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1768			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1769		else
1770			raw_event.cntr_mask =
1771				raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1772#ifdef CONFIG_MIPS_MT_SMP
1773		raw_event.range = P;
1774#endif
1775		break;
1776	case CPU_I6400:
1777	case CPU_I6500:
1778		/* 8-bit event numbers */
1779		base_id = config & 0xff;
1780		raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1781		break;
1782	case CPU_1004K:
 
 
 
1783		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1784			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1785		else
1786			raw_event.cntr_mask =
1787				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1788#ifdef CONFIG_MIPS_MT_SMP
1789		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1790			raw_event.range = P;
1791		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1792			raw_event.range = V;
1793		else
1794			raw_event.range = T;
1795#endif
1796		break;
1797	case CPU_INTERAPTIV:
1798		if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1799			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1800		else
1801			raw_event.cntr_mask =
1802				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1803#ifdef CONFIG_MIPS_MT_SMP
1804		if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1805			raw_event.range = P;
1806		else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1807			raw_event.range = V;
1808		else
1809			raw_event.range = T;
1810#endif
1811		break;
1812	case CPU_BMIPS5000:
1813		if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1814			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1815		else
1816			raw_event.cntr_mask =
1817				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1818		break;
1819	case CPU_LOONGSON64:
1820		pmu_type = get_loongson3_pmu_type();
1821
1822		switch (pmu_type) {
1823		case LOONGSON_PMU_TYPE1:
1824			raw_event.cntr_mask =
1825				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1826			break;
1827		case LOONGSON_PMU_TYPE2:
1828			base_id = config & 0x3ff;
1829			raw_event.cntr_mask = CNTR_ALL;
1830
1831			if ((base_id >= 1 && base_id < 28) ||
1832				(base_id >= 64 && base_id < 90) ||
1833				(base_id >= 128 && base_id < 164) ||
1834				(base_id >= 192 && base_id < 200) ||
1835				(base_id >= 256 && base_id < 275) ||
1836				(base_id >= 320 && base_id < 361) ||
1837				(base_id >= 384 && base_id < 574))
1838				break;
1839
1840			return ERR_PTR(-EOPNOTSUPP);
1841		case LOONGSON_PMU_TYPE3:
1842			base_id = raw_id;
1843			raw_event.cntr_mask = CNTR_ALL;
1844			break;
1845		}
1846		break;
1847	}
1848
1849	raw_event.event_id = base_id;
1850
1851	return &raw_event;
1852}
1853
1854static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1855{
1856	unsigned int base_id = config & 0x7f;
1857	unsigned int event_max;
1858
1859
1860	raw_event.cntr_mask = CNTR_ALL;
1861	raw_event.event_id = base_id;
1862
1863	if (current_cpu_type() == CPU_CAVIUM_OCTEON3)
1864		event_max = 0x5f;
1865	else if (current_cpu_type() == CPU_CAVIUM_OCTEON2)
1866		event_max = 0x42;
1867	else
1868		event_max = 0x3a;
1869
1870	if (base_id > event_max) {
1871		return ERR_PTR(-EOPNOTSUPP);
1872	}
1873
1874	switch (base_id) {
1875	case 0x00:
1876	case 0x0f:
1877	case 0x1e:
1878	case 0x1f:
1879	case 0x2f:
1880	case 0x34:
1881	case 0x3e ... 0x3f:
1882		return ERR_PTR(-EOPNOTSUPP);
1883	default:
1884		break;
1885	}
1886
1887	return &raw_event;
1888}
1889
1890static int __init
1891init_hw_perf_events(void)
1892{
1893	int counters, irq, pmu_type;
1894
1895	pr_info("Performance counters: ");
1896
1897	counters = n_counters();
1898	if (counters == 0) {
1899		pr_cont("No available PMU.\n");
1900		return -ENODEV;
1901	}
1902
1903#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 
1904	if (!cpu_has_mipsmt_pertccounters)
1905		counters = counters_total_to_per_cpu(counters);
1906#endif
1907
1908	if (get_c0_perfcount_int)
1909		irq = get_c0_perfcount_int();
1910	else if (cp0_perfcount_irq >= 0)
1911		irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1912	else
1913		irq = -1;
 
 
 
 
 
 
 
 
 
1914
1915	mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1916
1917	switch (current_cpu_type()) {
1918	case CPU_24K:
1919		mipspmu.name = "mips/24K";
1920		mipspmu.general_event_map = &mipsxxcore_event_map;
1921		mipspmu.cache_event_map = &mipsxxcore_cache_map;
 
1922		break;
1923	case CPU_34K:
1924		mipspmu.name = "mips/34K";
1925		mipspmu.general_event_map = &mipsxxcore_event_map;
1926		mipspmu.cache_event_map = &mipsxxcore_cache_map;
 
1927		break;
1928	case CPU_74K:
1929		mipspmu.name = "mips/74K";
1930		mipspmu.general_event_map = &mipsxxcore_event_map2;
1931		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1932		break;
1933	case CPU_PROAPTIV:
1934		mipspmu.name = "mips/proAptiv";
1935		mipspmu.general_event_map = &mipsxxcore_event_map2;
1936		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1937		break;
1938	case CPU_P5600:
1939		mipspmu.name = "mips/P5600";
1940		mipspmu.general_event_map = &mipsxxcore_event_map2;
1941		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1942		break;
1943	case CPU_P6600:
1944		mipspmu.name = "mips/P6600";
1945		mipspmu.general_event_map = &mipsxxcore_event_map2;
1946		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1947		break;
1948	case CPU_I6400:
1949		mipspmu.name = "mips/I6400";
1950		mipspmu.general_event_map = &i6x00_event_map;
1951		mipspmu.cache_event_map = &i6x00_cache_map;
1952		break;
1953	case CPU_I6500:
1954		mipspmu.name = "mips/I6500";
1955		mipspmu.general_event_map = &i6x00_event_map;
1956		mipspmu.cache_event_map = &i6x00_cache_map;
1957		break;
1958	case CPU_1004K:
1959		mipspmu.name = "mips/1004K";
1960		mipspmu.general_event_map = &mipsxxcore_event_map;
1961		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1962		break;
1963	case CPU_1074K:
1964		mipspmu.name = "mips/1074K";
1965		mipspmu.general_event_map = &mipsxxcore_event_map;
1966		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1967		break;
1968	case CPU_INTERAPTIV:
1969		mipspmu.name = "mips/interAptiv";
1970		mipspmu.general_event_map = &mipsxxcore_event_map;
1971		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1972		break;
1973	case CPU_LOONGSON32:
1974		mipspmu.name = "mips/loongson1";
1975		mipspmu.general_event_map = &mipsxxcore_event_map;
1976		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1977		break;
1978	case CPU_LOONGSON64:
1979		mipspmu.name = "mips/loongson3";
1980		pmu_type = get_loongson3_pmu_type();
1981
1982		switch (pmu_type) {
1983		case LOONGSON_PMU_TYPE1:
1984			counters = 2;
1985			mipspmu.general_event_map = &loongson3_event_map1;
1986			mipspmu.cache_event_map = &loongson3_cache_map1;
1987			break;
1988		case LOONGSON_PMU_TYPE2:
1989			counters = 4;
1990			mipspmu.general_event_map = &loongson3_event_map2;
1991			mipspmu.cache_event_map = &loongson3_cache_map2;
1992			break;
1993		case LOONGSON_PMU_TYPE3:
1994			counters = 4;
1995			mipspmu.general_event_map = &loongson3_event_map3;
1996			mipspmu.cache_event_map = &loongson3_cache_map3;
1997			break;
1998		}
1999		break;
2000	case CPU_CAVIUM_OCTEON:
2001	case CPU_CAVIUM_OCTEON_PLUS:
2002	case CPU_CAVIUM_OCTEON2:
2003	case CPU_CAVIUM_OCTEON3:
2004		mipspmu.name = "octeon";
2005		mipspmu.general_event_map = &octeon_event_map;
2006		mipspmu.cache_event_map = &octeon_cache_map;
2007		mipspmu.map_raw_event = octeon_pmu_map_raw_event;
2008		break;
2009	case CPU_BMIPS5000:
2010		mipspmu.name = "BMIPS5000";
2011		mipspmu.general_event_map = &bmips5000_event_map;
2012		mipspmu.cache_event_map = &bmips5000_cache_map;
2013		break;
2014	default:
2015		pr_cont("Either hardware does not support performance "
2016			"counters, or not yet implemented.\n");
2017		return -ENODEV;
2018	}
2019
2020	mipspmu.num_counters = counters;
2021	mipspmu.irq = irq;
2022
2023	if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
2024		if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
2025			counter_bits = 48;
2026			mipspmu.max_period = (1ULL << 47) - 1;
2027			mipspmu.valid_count = (1ULL << 47) - 1;
2028			mipspmu.overflow = 1ULL << 47;
2029		} else {
2030			counter_bits = 64;
2031			mipspmu.max_period = (1ULL << 63) - 1;
2032			mipspmu.valid_count = (1ULL << 63) - 1;
2033			mipspmu.overflow = 1ULL << 63;
2034		}
2035		mipspmu.read_counter = mipsxx_pmu_read_counter_64;
2036		mipspmu.write_counter = mipsxx_pmu_write_counter_64;
2037	} else {
2038		counter_bits = 32;
2039		mipspmu.max_period = (1ULL << 31) - 1;
2040		mipspmu.valid_count = (1ULL << 31) - 1;
2041		mipspmu.overflow = 1ULL << 31;
2042		mipspmu.read_counter = mipsxx_pmu_read_counter;
2043		mipspmu.write_counter = mipsxx_pmu_write_counter;
2044	}
2045
2046	on_each_cpu(reset_counters, (void *)(long)counters, 1);
2047
2048	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
2049		"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
2050		irq < 0 ? " (share with timer interrupt)" : "");
2051
2052	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
2053
2054	return 0;
2055}
2056early_initcall(init_hw_perf_events);
v3.1
   1#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
   2    defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   3
   4#define M_CONFIG1_PC	(1 << 4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5
   6#define M_PERFCTL_EXL			(1UL      <<  0)
   7#define M_PERFCTL_KERNEL		(1UL      <<  1)
   8#define M_PERFCTL_SUPERVISOR		(1UL      <<  2)
   9#define M_PERFCTL_USER			(1UL      <<  3)
  10#define M_PERFCTL_INTERRUPT_ENABLE	(1UL      <<  4)
  11#define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5)
  12#define M_PERFCTL_VPEID(vpe)		((vpe)    << 16)
  13#define M_PERFCTL_MT_EN(filter)		((filter) << 20)
  14#define    M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
  15#define    M_TC_EN_VPE			M_PERFCTL_MT_EN(1)
  16#define    M_TC_EN_TC			M_PERFCTL_MT_EN(2)
  17#define M_PERFCTL_TCID(tcid)		((tcid)   << 22)
  18#define M_PERFCTL_WIDE			(1UL      << 30)
  19#define M_PERFCTL_MORE			(1UL      << 31)
  20
  21#define M_PERFCTL_COUNT_EVENT_WHENEVER	(M_PERFCTL_EXL |		\
  22					M_PERFCTL_KERNEL |		\
  23					M_PERFCTL_USER |		\
  24					M_PERFCTL_SUPERVISOR |		\
  25					M_PERFCTL_INTERRUPT_ENABLE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  26
  27#ifdef CONFIG_MIPS_MT_SMP
  28#define M_PERFCTL_CONFIG_MASK		0x3fff801f
  29#else
  30#define M_PERFCTL_CONFIG_MASK		0x1f
  31#endif
  32#define M_PERFCTL_EVENT_MASK		0xfe0
  33
  34#define M_COUNTER_OVERFLOW		(1UL      << 31)
  35
  36#ifdef CONFIG_MIPS_MT_SMP
  37static int cpu_has_mipsmt_pertccounters;
  38
  39/*
  40 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
  41 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
  42 */
  43#if defined(CONFIG_HW_PERF_EVENTS)
  44#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
  45			0 : smp_processor_id())
  46#else
  47#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
  48			0 : cpu_data[smp_processor_id()].vpe_id)
  49#endif
  50
  51/* Copied from op_model_mipsxx.c */
  52static inline unsigned int vpe_shift(void)
  53{
  54	if (num_possible_cpus() > 1)
  55		return 1;
  56
  57	return 0;
  58}
  59#else /* !CONFIG_MIPS_MT_SMP */
  60#define vpe_id()	0
  61
  62static inline unsigned int vpe_shift(void)
  63{
  64	return 0;
  65}
  66#endif /* CONFIG_MIPS_MT_SMP */
  67
  68static inline unsigned int
  69counters_total_to_per_cpu(unsigned int counters)
  70{
  71	return counters >> vpe_shift();
  72}
  73
  74static inline unsigned int
  75counters_per_cpu_to_total(unsigned int counters)
  76{
  77	return counters << vpe_shift();
  78}
 
 
 
 
 
  79
  80#define __define_perf_accessors(r, n, np)				\
  81									\
  82static inline unsigned int r_c0_ ## r ## n(void)			\
  83{									\
  84	unsigned int cpu = vpe_id();					\
  85									\
  86	switch (cpu) {							\
  87	case 0:								\
  88		return read_c0_ ## r ## n();				\
  89	case 1:								\
  90		return read_c0_ ## r ## np();				\
  91	default:							\
  92		BUG();							\
  93	}								\
  94	return 0;							\
  95}									\
  96									\
  97static inline void w_c0_ ## r ## n(unsigned int value)			\
  98{									\
  99	unsigned int cpu = vpe_id();					\
 100									\
 101	switch (cpu) {							\
 102	case 0:								\
 103		write_c0_ ## r ## n(value);				\
 104		return;							\
 105	case 1:								\
 106		write_c0_ ## r ## np(value);				\
 107		return;							\
 108	default:							\
 109		BUG();							\
 110	}								\
 111	return;								\
 112}									\
 113
 114__define_perf_accessors(perfcntr, 0, 2)
 115__define_perf_accessors(perfcntr, 1, 3)
 116__define_perf_accessors(perfcntr, 2, 0)
 117__define_perf_accessors(perfcntr, 3, 1)
 118
 119__define_perf_accessors(perfctrl, 0, 2)
 120__define_perf_accessors(perfctrl, 1, 3)
 121__define_perf_accessors(perfctrl, 2, 0)
 122__define_perf_accessors(perfctrl, 3, 1)
 123
 124static inline int __n_counters(void)
 125{
 126	if (!(read_c0_config1() & M_CONFIG1_PC))
 127		return 0;
 128	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
 129		return 1;
 130	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
 131		return 2;
 132	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
 133		return 3;
 134
 135	return 4;
 136}
 137
 138static inline int n_counters(void)
 139{
 140	int counters;
 
 
 
 141
 142	switch (current_cpu_type()) {
 143	case CPU_R10000:
 144		counters = 2;
 145		break;
 146
 147	case CPU_R12000:
 148	case CPU_R14000:
 149		counters = 4;
 150		break;
 151
 
 
 
 
 
 
 
 
 
 
 
 
 
 152	default:
 153		counters = __n_counters();
 
 154	}
 155
 156	return counters;
 157}
 158
 159static void reset_counters(void *arg)
 160{
 161	int counters = (int)(long)arg;
 162	switch (counters) {
 163	case 4:
 164		w_c0_perfctrl3(0);
 165		w_c0_perfcntr3(0);
 
 
 
 
 
 166	case 3:
 167		w_c0_perfctrl2(0);
 168		w_c0_perfcntr2(0);
 169	case 2:
 170		w_c0_perfctrl1(0);
 171		w_c0_perfcntr1(0);
 172	case 1:
 173		w_c0_perfctrl0(0);
 174		w_c0_perfcntr0(0);
 175	}
 176}
 177
 178static inline u64
 179mipsxx_pmu_read_counter(unsigned int idx)
 180{
 
 
 181	switch (idx) {
 182	case 0:
 183		return r_c0_perfcntr0();
 
 184	case 1:
 185		return r_c0_perfcntr1();
 
 186	case 2:
 187		return r_c0_perfcntr2();
 
 188	case 3:
 189		return r_c0_perfcntr3();
 190	default:
 191		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 192		return 0;
 193	}
 194}
 195
 196static inline void
 197mipsxx_pmu_write_counter(unsigned int idx, u64 val)
 198{
 
 
 
 199	switch (idx) {
 200	case 0:
 201		w_c0_perfcntr0(val);
 202		return;
 203	case 1:
 204		w_c0_perfcntr1(val);
 205		return;
 206	case 2:
 207		w_c0_perfcntr2(val);
 208		return;
 209	case 3:
 210		w_c0_perfcntr3(val);
 211		return;
 212	}
 213}
 214
 215static inline unsigned int
 216mipsxx_pmu_read_control(unsigned int idx)
 217{
 
 
 218	switch (idx) {
 219	case 0:
 220		return r_c0_perfctrl0();
 221	case 1:
 222		return r_c0_perfctrl1();
 223	case 2:
 224		return r_c0_perfctrl2();
 225	case 3:
 226		return r_c0_perfctrl3();
 227	default:
 228		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 229		return 0;
 230	}
 231}
 232
 233static inline void
 234mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
 235{
 
 
 236	switch (idx) {
 237	case 0:
 238		w_c0_perfctrl0(val);
 239		return;
 240	case 1:
 241		w_c0_perfctrl1(val);
 242		return;
 243	case 2:
 244		w_c0_perfctrl2(val);
 245		return;
 246	case 3:
 247		w_c0_perfctrl3(val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 249	}
 
 
 
 
 
 
 
 
 
 
 250}
 251
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 252#ifdef CONFIG_MIPS_MT_SMP
 253static DEFINE_RWLOCK(pmuint_rwlock);
 254#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255
 256/* 24K/34K/1004K cores can share the same event map. */
 257static const struct mips_perf_event mipsxxcore_event_map
 258				[PERF_COUNT_HW_MAX] = {
 259	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 260	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 261	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
 262	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
 263	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
 264	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
 265	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
 266};
 267
 268/* 74K core has different branch event code. */
 269static const struct mips_perf_event mipsxx74Kcore_event_map
 270				[PERF_COUNT_HW_MAX] = {
 271	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 272	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 273	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
 274	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
 275	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
 276	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
 277	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
 278};
 279
 280/* 24K/34K/1004K cores can share the same cache event map. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 281static const struct mips_perf_event mipsxxcore_cache_map
 282				[PERF_COUNT_HW_CACHE_MAX]
 283				[PERF_COUNT_HW_CACHE_OP_MAX]
 284				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 285[C(L1D)] = {
 286	/*
 287	 * Like some other architectures (e.g. ARM), the performance
 288	 * counters don't differentiate between read and write
 289	 * accesses/misses, so this isn't strictly correct, but it's the
 290	 * best we can do. Writes and reads get combined.
 291	 */
 292	[C(OP_READ)] = {
 293		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
 294		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
 295	},
 296	[C(OP_WRITE)] = {
 297		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
 298		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
 299	},
 300	[C(OP_PREFETCH)] = {
 301		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 302		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 303	},
 304},
 305[C(L1I)] = {
 306	[C(OP_READ)] = {
 307		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
 308		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
 309	},
 310	[C(OP_WRITE)] = {
 311		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
 312		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
 313	},
 314	[C(OP_PREFETCH)] = {
 315		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
 316		/*
 317		 * Note that MIPS has only "hit" events countable for
 318		 * the prefetch operation.
 319		 */
 320		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 321	},
 322},
 323[C(LL)] = {
 324	[C(OP_READ)] = {
 325		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
 326		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
 327	},
 328	[C(OP_WRITE)] = {
 329		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
 330		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
 331	},
 332	[C(OP_PREFETCH)] = {
 333		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 334		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 335	},
 336},
 337[C(DTLB)] = {
 338	[C(OP_READ)] = {
 339		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 340		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 341	},
 342	[C(OP_WRITE)] = {
 343		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 344		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 345	},
 346	[C(OP_PREFETCH)] = {
 347		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 348		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 349	},
 350},
 351[C(ITLB)] = {
 352	[C(OP_READ)] = {
 353		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
 354		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
 355	},
 356	[C(OP_WRITE)] = {
 357		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
 358		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
 359	},
 360	[C(OP_PREFETCH)] = {
 361		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 362		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 363	},
 364},
 365[C(BPU)] = {
 366	/* Using the same code for *HW_BRANCH* */
 367	[C(OP_READ)] = {
 368		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
 369		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
 370	},
 371	[C(OP_WRITE)] = {
 372		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
 373		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
 374	},
 375	[C(OP_PREFETCH)] = {
 376		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 377		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 378	},
 379},
 380[C(NODE)] = {
 381	[C(OP_READ)] = {
 382		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 383		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 384	},
 385	[C(OP_WRITE)] = {
 386		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 387		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 388	},
 389	[C(OP_PREFETCH)] = {
 390		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 391		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 392	},
 393},
 394};
 395
 396/* 74K core has completely different cache event map. */
 397static const struct mips_perf_event mipsxx74Kcore_cache_map
 398				[PERF_COUNT_HW_CACHE_MAX]
 399				[PERF_COUNT_HW_CACHE_OP_MAX]
 400				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 401[C(L1D)] = {
 402	/*
 403	 * Like some other architectures (e.g. ARM), the performance
 404	 * counters don't differentiate between read and write
 405	 * accesses/misses, so this isn't strictly correct, but it's the
 406	 * best we can do. Writes and reads get combined.
 407	 */
 408	[C(OP_READ)] = {
 409		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
 410		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
 411	},
 412	[C(OP_WRITE)] = {
 413		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
 414		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
 415	},
 416	[C(OP_PREFETCH)] = {
 417		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 418		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 419	},
 420},
 421[C(L1I)] = {
 422	[C(OP_READ)] = {
 423		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 424		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 425	},
 426	[C(OP_WRITE)] = {
 427		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 428		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 429	},
 430	[C(OP_PREFETCH)] = {
 431		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
 432		/*
 433		 * Note that MIPS has only "hit" events countable for
 434		 * the prefetch operation.
 435		 */
 436		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 437	},
 438},
 439[C(LL)] = {
 440	[C(OP_READ)] = {
 441		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
 442		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
 443	},
 444	[C(OP_WRITE)] = {
 445		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
 446		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 447	},
 448	[C(OP_PREFETCH)] = {
 449		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 450		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 451	},
 452},
 453[C(DTLB)] = {
 454	/* 74K core does not have specific DTLB events. */
 455	[C(OP_READ)] = {
 456		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 457		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 458	},
 459	[C(OP_WRITE)] = {
 460		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 461		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 462	},
 463	[C(OP_PREFETCH)] = {
 464		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 465		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 466	},
 467},
 468[C(ITLB)] = {
 469	[C(OP_READ)] = {
 470		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
 471		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 472	},
 473	[C(OP_WRITE)] = {
 474		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
 475		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
 476	},
 477	[C(OP_PREFETCH)] = {
 478		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 479		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 480	},
 481},
 482[C(BPU)] = {
 483	/* Using the same code for *HW_BRANCH* */
 484	[C(OP_READ)] = {
 485		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
 486		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487	},
 488	[C(OP_WRITE)] = {
 489		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
 490		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
 
 
 
 
 491	},
 492	[C(OP_PREFETCH)] = {
 493		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 494		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 495	},
 496},
 497[C(NODE)] = {
 
 
 
 
 498	[C(OP_READ)] = {
 499		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 500		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 501	},
 502	[C(OP_WRITE)] = {
 503		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 504		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 505	},
 506	[C(OP_PREFETCH)] = {
 507		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 508		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 509	},
 510},
 511};
 512
 513#ifdef CONFIG_MIPS_MT_SMP
 514static void
 515check_and_calc_range(struct perf_event *event,
 516			const struct mips_perf_event *pev)
 517{
 518	struct hw_perf_event *hwc = &event->hw;
 519
 520	if (event->cpu >= 0) {
 521		if (pev->range > V) {
 522			/*
 523			 * The user selected an event that is processor
 524			 * wide, while expecting it to be VPE wide.
 525			 */
 526			hwc->config_base |= M_TC_EN_ALL;
 527		} else {
 528			/*
 529			 * FIXME: cpu_data[event->cpu].vpe_id reports 0
 530			 * for both CPUs.
 531			 */
 532			hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
 533			hwc->config_base |= M_TC_EN_VPE;
 534		}
 535	} else
 536		hwc->config_base |= M_TC_EN_ALL;
 537}
 538#else
 539static void
 540check_and_calc_range(struct perf_event *event,
 541			const struct mips_perf_event *pev)
 542{
 543}
 544#endif
 545
 546static int __hw_perf_event_init(struct perf_event *event)
 547{
 548	struct perf_event_attr *attr = &event->attr;
 549	struct hw_perf_event *hwc = &event->hw;
 550	const struct mips_perf_event *pev;
 551	int err;
 552
 553	/* Returning MIPS event descriptor for generic perf event. */
 554	if (PERF_TYPE_HARDWARE == event->attr.type) {
 555		if (event->attr.config >= PERF_COUNT_HW_MAX)
 556			return -EINVAL;
 557		pev = mipspmu_map_general_event(event->attr.config);
 558	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
 559		pev = mipspmu_map_cache_event(event->attr.config);
 560	} else if (PERF_TYPE_RAW == event->attr.type) {
 561		/* We are working on the global raw event. */
 562		mutex_lock(&raw_event_mutex);
 563		pev = mipspmu->map_raw_event(event->attr.config);
 564	} else {
 565		/* The event type is not (yet) supported. */
 566		return -EOPNOTSUPP;
 567	}
 568
 569	if (IS_ERR(pev)) {
 570		if (PERF_TYPE_RAW == event->attr.type)
 571			mutex_unlock(&raw_event_mutex);
 572		return PTR_ERR(pev);
 573	}
 574
 575	/*
 576	 * We allow max flexibility on how each individual counter shared
 577	 * by the single CPU operates (the mode exclusion and the range).
 578	 */
 579	hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
 580
 581	/* Calculate range bits and validate it. */
 582	if (num_possible_cpus() > 1)
 583		check_and_calc_range(event, pev);
 584
 585	hwc->event_base = mipspmu_perf_event_encode(pev);
 586	if (PERF_TYPE_RAW == event->attr.type)
 587		mutex_unlock(&raw_event_mutex);
 588
 589	if (!attr->exclude_user)
 590		hwc->config_base |= M_PERFCTL_USER;
 591	if (!attr->exclude_kernel) {
 592		hwc->config_base |= M_PERFCTL_KERNEL;
 593		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
 594		hwc->config_base |= M_PERFCTL_EXL;
 595	}
 596	if (!attr->exclude_hv)
 597		hwc->config_base |= M_PERFCTL_SUPERVISOR;
 598
 599	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
 600	/*
 601	 * The event can belong to another cpu. We do not assign a local
 602	 * counter for it for now.
 603	 */
 604	hwc->idx = -1;
 605	hwc->config = 0;
 606
 607	if (!hwc->sample_period) {
 608		hwc->sample_period  = MAX_PERIOD;
 609		hwc->last_period    = hwc->sample_period;
 610		local64_set(&hwc->period_left, hwc->sample_period);
 611	}
 612
 613	err = 0;
 614	if (event->group_leader != event) {
 615		err = validate_group(event);
 616		if (err)
 617			return -EINVAL;
 618	}
 619
 620	event->destroy = hw_perf_event_destroy;
 621
 
 
 
 622	return err;
 623}
 624
 625static void pause_local_counters(void)
 626{
 627	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 628	int counters = mipspmu->num_counters;
 629	unsigned long flags;
 630
 631	local_irq_save(flags);
 632	switch (counters) {
 633	case 4:
 634		cpuc->saved_ctrl[3] = r_c0_perfctrl3();
 635		w_c0_perfctrl3(cpuc->saved_ctrl[3] &
 636			~M_PERFCTL_COUNT_EVENT_WHENEVER);
 637	case 3:
 638		cpuc->saved_ctrl[2] = r_c0_perfctrl2();
 639		w_c0_perfctrl2(cpuc->saved_ctrl[2] &
 640			~M_PERFCTL_COUNT_EVENT_WHENEVER);
 641	case 2:
 642		cpuc->saved_ctrl[1] = r_c0_perfctrl1();
 643		w_c0_perfctrl1(cpuc->saved_ctrl[1] &
 644			~M_PERFCTL_COUNT_EVENT_WHENEVER);
 645	case 1:
 646		cpuc->saved_ctrl[0] = r_c0_perfctrl0();
 647		w_c0_perfctrl0(cpuc->saved_ctrl[0] &
 648			~M_PERFCTL_COUNT_EVENT_WHENEVER);
 649	}
 650	local_irq_restore(flags);
 651}
 652
 653static void resume_local_counters(void)
 654{
 655	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 656	int counters = mipspmu->num_counters;
 657	unsigned long flags;
 658
 659	local_irq_save(flags);
 660	switch (counters) {
 661	case 4:
 662		w_c0_perfctrl3(cpuc->saved_ctrl[3]);
 663	case 3:
 664		w_c0_perfctrl2(cpuc->saved_ctrl[2]);
 665	case 2:
 666		w_c0_perfctrl1(cpuc->saved_ctrl[1]);
 667	case 1:
 668		w_c0_perfctrl0(cpuc->saved_ctrl[0]);
 669	}
 670	local_irq_restore(flags);
 671}
 672
 673static int mipsxx_pmu_handle_shared_irq(void)
 674{
 675	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 676	struct perf_sample_data data;
 677	unsigned int counters = mipspmu->num_counters;
 678	unsigned int counter;
 679	int handled = IRQ_NONE;
 680	struct pt_regs *regs;
 681
 682	if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
 683		return handled;
 684
 685	/*
 686	 * First we pause the local counters, so that when we are locked
 687	 * here, the counters are all paused. When it gets locked due to
 688	 * perf_disable(), the timer interrupt handler will be delayed.
 689	 *
 690	 * See also mipsxx_pmu_start().
 691	 */
 692	pause_local_counters();
 693#ifdef CONFIG_MIPS_MT_SMP
 694	read_lock(&pmuint_rwlock);
 695#endif
 696
 697	regs = get_irq_regs();
 698
 699	perf_sample_data_init(&data, 0);
 700
 701	switch (counters) {
 702#define HANDLE_COUNTER(n)						\
 703	case n + 1:							\
 704		if (test_bit(n, cpuc->used_mask)) {			\
 705			counter = r_c0_perfcntr ## n();			\
 706			if (counter & M_COUNTER_OVERFLOW) {		\
 707				w_c0_perfcntr ## n(counter &		\
 708						VALID_COUNT);		\
 709				if (test_and_change_bit(n, cpuc->msbs))	\
 710					handle_associated_event(cpuc,	\
 711						n, &data, regs);	\
 712				handled = IRQ_HANDLED;			\
 713			}						\
 714		}
 715	HANDLE_COUNTER(3)
 716	HANDLE_COUNTER(2)
 717	HANDLE_COUNTER(1)
 718	HANDLE_COUNTER(0)
 719	}
 720
 
 
 
 
 
 721	/*
 722	 * Do all the work for the pending perf events. We can do this
 723	 * in here because the performance counter interrupt is a regular
 724	 * interrupt, not NMI.
 725	 */
 726	if (handled == IRQ_HANDLED)
 727		irq_work_run();
 728
 729#ifdef CONFIG_MIPS_MT_SMP
 730	read_unlock(&pmuint_rwlock);
 731#endif
 732	resume_local_counters();
 733	return handled;
 734}
 735
 736static irqreturn_t
 737mipsxx_pmu_handle_irq(int irq, void *dev)
 738{
 739	return mipsxx_pmu_handle_shared_irq();
 740}
 741
 742static void mipsxx_pmu_start(void)
 743{
 744#ifdef CONFIG_MIPS_MT_SMP
 745	write_unlock(&pmuint_rwlock);
 746#endif
 747	resume_local_counters();
 748}
 749
 750/*
 751 * MIPS performance counters can be per-TC. The control registers can
 752 * not be directly accessed across CPUs. Hence if we want to do global
 753 * control, we need cross CPU calls. on_each_cpu() can help us, but we
 754 * can not make sure this function is called with interrupts enabled. So
 755 * here we pause local counters and then grab a rwlock and leave the
 756 * counters on other CPUs alone. If any counter interrupt raises while
 757 * we own the write lock, simply pause local counters on that CPU and
 758 * spin in the handler. Also we know we won't be switched to another
 759 * CPU after pausing local counters and before grabbing the lock.
 760 */
 761static void mipsxx_pmu_stop(void)
 762{
 763	pause_local_counters();
 764#ifdef CONFIG_MIPS_MT_SMP
 765	write_lock(&pmuint_rwlock);
 766#endif
 767}
 768
 769static int
 770mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
 771			struct hw_perf_event *hwc)
 772{
 773	int i;
 774
 775	/*
 776	 * We only need to care the counter mask. The range has been
 777	 * checked definitely.
 778	 */
 779	unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
 780
 781	for (i = mipspmu->num_counters - 1; i >= 0; i--) {
 782		/*
 783		 * Note that some MIPS perf events can be counted by both
 784		 * even and odd counters, wheresas many other are only by
 785		 * even _or_ odd counters. This introduces an issue that
 786		 * when the former kind of event takes the counter the
 787		 * latter kind of event wants to use, then the "counter
 788		 * allocation" for the latter event will fail. In fact if
 789		 * they can be dynamically swapped, they both feel happy.
 790		 * But here we leave this issue alone for now.
 791		 */
 792		if (test_bit(i, &cntr_mask) &&
 793			!test_and_set_bit(i, cpuc->used_mask))
 794			return i;
 795	}
 796
 797	return -EAGAIN;
 798}
 799
 800static void
 801mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 802{
 803	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 804	unsigned long flags;
 805
 806	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
 807
 808	local_irq_save(flags);
 809	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
 810		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
 811		/* Make sure interrupt enabled. */
 812		M_PERFCTL_INTERRUPT_ENABLE;
 813	/*
 814	 * We do not actually let the counter run. Leave it until start().
 815	 */
 816	local_irq_restore(flags);
 817}
 818
 819static void
 820mipsxx_pmu_disable_event(int idx)
 821{
 822	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 823	unsigned long flags;
 824
 825	WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
 826
 827	local_irq_save(flags);
 828	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
 829		~M_PERFCTL_COUNT_EVENT_WHENEVER;
 830	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
 831	local_irq_restore(flags);
 832}
 833
 834/* 24K */
 835#define IS_UNSUPPORTED_24K_EVENT(r, b)					\
 836	((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 ||		\
 837	 (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 ||		\
 838	 (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 ||		\
 839	 (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) ||		\
 840	 ((b) >= 68 && (b) <= 127))
 841#define IS_BOTH_COUNTERS_24K_EVENT(b)					\
 842	((b) == 0 || (b) == 1 || (b) == 11)
 843
 844/* 34K */
 845#define IS_UNSUPPORTED_34K_EVENT(r, b)					\
 846	((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 ||		\
 847	 (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) ||		\
 848	 ((b) >= 68 && (b) <= 127))
 849#define IS_BOTH_COUNTERS_34K_EVENT(b)					\
 850	((b) == 0 || (b) == 1 || (b) == 11)
 851#ifdef CONFIG_MIPS_MT_SMP
 852#define IS_RANGE_P_34K_EVENT(r, b)					\
 853	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
 854	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
 855	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
 856	 ((b) >= 64 && (b) <= 67))
 857#define IS_RANGE_V_34K_EVENT(r)	((r) == 47)
 858#endif
 859
 860/* 74K */
 861#define IS_UNSUPPORTED_74K_EVENT(r, b)					\
 862	((r) == 5 || ((r) >= 135 && (r) <= 137) ||			\
 863	 ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 ||		\
 864	 (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) ||		\
 865	 (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 ||		\
 866	 (b) == 61 || (r) == 62 || (r) == 191 ||			\
 867	 ((b) >= 64 && (b) <= 127))
 868#define IS_BOTH_COUNTERS_74K_EVENT(b)					\
 869	((b) == 0 || (b) == 1)
 870
 
 
 
 
 
 
 
 871/* 1004K */
 872#define IS_UNSUPPORTED_1004K_EVENT(r, b)				\
 873	((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 ||		\
 874	 (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
 875#define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
 876	((b) == 0 || (b) == 1 || (b) == 11)
 877#ifdef CONFIG_MIPS_MT_SMP
 878#define IS_RANGE_P_1004K_EVENT(r, b)					\
 879	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
 880	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
 881	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
 882	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
 883	 ((b) >= 64 && (b) <= 67))
 884#define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
 885#endif
 886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 887/*
 888 * User can use 0-255 raw events, where 0-127 for the events of even
 889 * counters, and 128-255 for odd counters. Note that bit 7 is used to
 890 * indicate the parity. So, for example, when user wants to take the
 891 * Event Num of 15 for odd counters (by referring to the user manual),
 892 * then 128 needs to be added to 15 as the input for the event config,
 893 * i.e., 143 (0x8F) to be used.
 
 
 
 
 894 */
 895static const struct mips_perf_event *
 896mipsxx_pmu_map_raw_event(u64 config)
 897{
 
 
 898	unsigned int raw_id = config & 0xff;
 899	unsigned int base_id = raw_id & 0x7f;
 900
 901	switch (current_cpu_type()) {
 902	case CPU_24K:
 903		if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
 904			return ERR_PTR(-EOPNOTSUPP);
 905		raw_event.event_id = base_id;
 906		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
 907			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
 908		else
 909			raw_event.cntr_mask =
 910				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
 911#ifdef CONFIG_MIPS_MT_SMP
 912		/*
 913		 * This is actually doing nothing. Non-multithreading
 914		 * CPUs will not check and calculate the range.
 915		 */
 916		raw_event.range = P;
 917#endif
 918		break;
 919	case CPU_34K:
 920		if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
 921			return ERR_PTR(-EOPNOTSUPP);
 922		raw_event.event_id = base_id;
 923		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
 924			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
 925		else
 926			raw_event.cntr_mask =
 927				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
 928#ifdef CONFIG_MIPS_MT_SMP
 929		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
 930			raw_event.range = P;
 931		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
 932			raw_event.range = V;
 933		else
 934			raw_event.range = T;
 935#endif
 936		break;
 937	case CPU_74K:
 938		if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
 939			return ERR_PTR(-EOPNOTSUPP);
 940		raw_event.event_id = base_id;
 941		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
 942			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
 943		else
 944			raw_event.cntr_mask =
 945				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
 946#ifdef CONFIG_MIPS_MT_SMP
 947		raw_event.range = P;
 948#endif
 949		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 950	case CPU_1004K:
 951		if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
 952			return ERR_PTR(-EOPNOTSUPP);
 953		raw_event.event_id = base_id;
 954		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
 955			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
 956		else
 957			raw_event.cntr_mask =
 958				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
 959#ifdef CONFIG_MIPS_MT_SMP
 960		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
 961			raw_event.range = P;
 962		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
 963			raw_event.range = V;
 964		else
 965			raw_event.range = T;
 966#endif
 967		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 968	}
 969
 
 
 970	return &raw_event;
 971}
 972
 973static struct mips_pmu mipsxxcore_pmu = {
 974	.handle_irq = mipsxx_pmu_handle_irq,
 975	.handle_shared_irq = mipsxx_pmu_handle_shared_irq,
 976	.start = mipsxx_pmu_start,
 977	.stop = mipsxx_pmu_stop,
 978	.alloc_counter = mipsxx_pmu_alloc_counter,
 979	.read_counter = mipsxx_pmu_read_counter,
 980	.write_counter = mipsxx_pmu_write_counter,
 981	.enable_event = mipsxx_pmu_enable_event,
 982	.disable_event = mipsxx_pmu_disable_event,
 983	.map_raw_event = mipsxx_pmu_map_raw_event,
 984	.general_event_map = &mipsxxcore_event_map,
 985	.cache_event_map = &mipsxxcore_cache_map,
 986};
 987
 988static struct mips_pmu mipsxx74Kcore_pmu = {
 989	.handle_irq = mipsxx_pmu_handle_irq,
 990	.handle_shared_irq = mipsxx_pmu_handle_shared_irq,
 991	.start = mipsxx_pmu_start,
 992	.stop = mipsxx_pmu_stop,
 993	.alloc_counter = mipsxx_pmu_alloc_counter,
 994	.read_counter = mipsxx_pmu_read_counter,
 995	.write_counter = mipsxx_pmu_write_counter,
 996	.enable_event = mipsxx_pmu_enable_event,
 997	.disable_event = mipsxx_pmu_disable_event,
 998	.map_raw_event = mipsxx_pmu_map_raw_event,
 999	.general_event_map = &mipsxx74Kcore_event_map,
1000	.cache_event_map = &mipsxx74Kcore_cache_map,
1001};
 
 
 
 
 
 
1002
1003static int __init
1004init_hw_perf_events(void)
1005{
1006	int counters, irq;
1007
1008	pr_info("Performance counters: ");
1009
1010	counters = n_counters();
1011	if (counters == 0) {
1012		pr_cont("No available PMU.\n");
1013		return -ENODEV;
1014	}
1015
1016#ifdef CONFIG_MIPS_MT_SMP
1017	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1018	if (!cpu_has_mipsmt_pertccounters)
1019		counters = counters_total_to_per_cpu(counters);
1020#endif
1021
1022#ifdef MSC01E_INT_BASE
1023	if (cpu_has_veic) {
1024		/*
1025		 * Using platform specific interrupt controller defines.
1026		 */
1027		irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1028	} else {
1029#endif
1030		if (cp0_perfcount_irq >= 0)
1031			irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1032		else
1033			irq = -1;
1034#ifdef MSC01E_INT_BASE
1035	}
1036#endif
1037
1038	on_each_cpu(reset_counters, (void *)(long)counters, 1);
1039
1040	switch (current_cpu_type()) {
1041	case CPU_24K:
1042		mipsxxcore_pmu.name = "mips/24K";
1043		mipsxxcore_pmu.num_counters = counters;
1044		mipsxxcore_pmu.irq = irq;
1045		mipspmu = &mipsxxcore_pmu;
1046		break;
1047	case CPU_34K:
1048		mipsxxcore_pmu.name = "mips/34K";
1049		mipsxxcore_pmu.num_counters = counters;
1050		mipsxxcore_pmu.irq = irq;
1051		mipspmu = &mipsxxcore_pmu;
1052		break;
1053	case CPU_74K:
1054		mipsxx74Kcore_pmu.name = "mips/74K";
1055		mipsxx74Kcore_pmu.num_counters = counters;
1056		mipsxx74Kcore_pmu.irq = irq;
1057		mipspmu = &mipsxx74Kcore_pmu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058		break;
1059	case CPU_1004K:
1060		mipsxxcore_pmu.name = "mips/1004K";
1061		mipsxxcore_pmu.num_counters = counters;
1062		mipsxxcore_pmu.irq = irq;
1063		mipspmu = &mipsxxcore_pmu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1064		break;
1065	default:
1066		pr_cont("Either hardware does not support performance "
1067			"counters, or not yet implemented.\n");
1068		return -ENODEV;
1069	}
1070
1071	if (mipspmu)
1072		pr_cont("%s PMU enabled, %d counters available to each "
1073			"CPU, irq %d%s\n", mipspmu->name, counters, irq,
1074			irq < 0 ? " (share with timer interrupt)" : "");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1075
1076	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1077
1078	return 0;
1079}
1080early_initcall(init_hw_perf_events);
1081
1082#endif /* defined(CONFIG_CPU_MIPS32)... */