Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Linux performance counter support for MIPS.
   4 *
   5 * Copyright (C) 2010 MIPS Technologies, Inc.
   6 * Copyright (C) 2011 Cavium Networks, Inc.
   7 * Author: Deng-Cheng Zhu
   8 *
   9 * This code is based on the implementation for ARM, which is in turn
  10 * based on the sparc64 perf event code and the x86 code. Performance
  11 * counter access is based on the MIPS Oprofile code. And the callchain
  12 * support references the code of MIPS stacktrace.c.
 
 
 
 
  13 */
  14
  15#include <linux/cpumask.h>
  16#include <linux/interrupt.h>
  17#include <linux/smp.h>
  18#include <linux/kernel.h>
  19#include <linux/perf_event.h>
  20#include <linux/uaccess.h>
  21
  22#include <asm/irq.h>
  23#include <asm/irq_regs.h>
  24#include <asm/stacktrace.h>
  25#include <asm/time.h> /* For perf_irq */
  26
  27#define MIPS_MAX_HWEVENTS 4
  28#define MIPS_TCS_PER_COUNTER 2
  29#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
  30
  31struct cpu_hw_events {
  32	/* Array of events on this cpu. */
  33	struct perf_event	*events[MIPS_MAX_HWEVENTS];
  34
  35	/*
  36	 * Set the bit (indexed by the counter number) when the counter
  37	 * is used for an event.
  38	 */
  39	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
  40
  41	/*
  42	 * Software copy of the control register for each performance counter.
  43	 * MIPS CPUs vary in performance counters. They use this differently,
  44	 * and even may not use it.
  45	 */
  46	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
  47};
  48DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  49	.saved_ctrl = {0},
  50};
  51
  52/* The description of MIPS performance events. */
  53struct mips_perf_event {
  54	unsigned int event_id;
  55	/*
  56	 * MIPS performance counters are indexed starting from 0.
  57	 * CNTR_EVEN indicates the indexes of the counters to be used are
  58	 * even numbers.
  59	 */
  60	unsigned int cntr_mask;
  61	#define CNTR_EVEN	0x55555555
  62	#define CNTR_ODD	0xaaaaaaaa
  63	#define CNTR_ALL	0xffffffff
 
  64	enum {
  65		T  = 0,
  66		V  = 1,
  67		P  = 2,
  68	} range;
 
 
 
 
 
  69};
  70
  71static struct mips_perf_event raw_event;
  72static DEFINE_MUTEX(raw_event_mutex);
  73
 
  74#define C(x) PERF_COUNT_HW_CACHE_##x
  75
  76struct mips_pmu {
  77	u64		max_period;
  78	u64		valid_count;
  79	u64		overflow;
  80	const char	*name;
  81	int		irq;
  82	u64		(*read_counter)(unsigned int idx);
  83	void		(*write_counter)(unsigned int idx, u64 val);
  84	const struct mips_perf_event *(*map_raw_event)(u64 config);
  85	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
  86	const struct mips_perf_event (*cache_event_map)
  87				[PERF_COUNT_HW_CACHE_MAX]
  88				[PERF_COUNT_HW_CACHE_OP_MAX]
  89				[PERF_COUNT_HW_CACHE_RESULT_MAX];
  90	unsigned int	num_counters;
  91};
  92
  93static int counter_bits;
  94static struct mips_pmu mipspmu;
  95
  96#define M_PERFCTL_EVENT(event)		(((event) << MIPS_PERFCTRL_EVENT_S) & \
  97					 MIPS_PERFCTRL_EVENT)
  98#define M_PERFCTL_VPEID(vpe)		((vpe)	  << MIPS_PERFCTRL_VPEID_S)
  99
 100#ifdef CONFIG_CPU_BMIPS5000
 101#define M_PERFCTL_MT_EN(filter)		0
 102#else /* !CONFIG_CPU_BMIPS5000 */
 103#define M_PERFCTL_MT_EN(filter)		(filter)
 104#endif /* CONFIG_CPU_BMIPS5000 */
 105
 106#define	   M_TC_EN_ALL			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
 107#define	   M_TC_EN_VPE			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
 108#define	   M_TC_EN_TC			M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
 109
 110#define M_PERFCTL_COUNT_EVENT_WHENEVER	(MIPS_PERFCTRL_EXL |		\
 111					 MIPS_PERFCTRL_K |		\
 112					 MIPS_PERFCTRL_U |		\
 113					 MIPS_PERFCTRL_S |		\
 114					 MIPS_PERFCTRL_IE)
 
 
 
 115
 116#ifdef CONFIG_MIPS_MT_SMP
 117#define M_PERFCTL_CONFIG_MASK		0x3fff801f
 118#else
 119#define M_PERFCTL_CONFIG_MASK		0x1f
 120#endif
 
 
 121
 122#define CNTR_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 
 123
 124#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 125static DEFINE_RWLOCK(pmuint_rwlock);
 126
 127#if defined(CONFIG_CPU_BMIPS5000)
 
 
 
 
 128#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 129			 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
 130#else
 131#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 132			 0 : cpu_vpe_id(&current_cpu_data))
 133#endif
 134
 135/* Copied from op_model_mipsxx.c */
 136static unsigned int vpe_shift(void)
 137{
 138	if (num_possible_cpus() > 1)
 139		return 1;
 140
 141	return 0;
 142}
 143
 144static unsigned int counters_total_to_per_cpu(unsigned int counters)
 145{
 146	return counters >> vpe_shift();
 147}
 148
 149#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
 150#define vpe_id()	0
 151
 152#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
 153
 154static void resume_local_counters(void);
 155static void pause_local_counters(void);
 156static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
 157static int mipsxx_pmu_handle_shared_irq(void);
 158
 159/* 0: Not Loongson-3
 160 * 1: Loongson-3A1000/3B1000/3B1500
 161 * 2: Loongson-3A2000/3A3000
 162 * 3: Loongson-3A4000+
 163 */
 164
 165#define LOONGSON_PMU_TYPE0 0
 166#define LOONGSON_PMU_TYPE1 1
 167#define LOONGSON_PMU_TYPE2 2
 168#define LOONGSON_PMU_TYPE3 3
 169
 170static inline int get_loongson3_pmu_type(void)
 171{
 172	if (boot_cpu_type() != CPU_LOONGSON64)
 173		return LOONGSON_PMU_TYPE0;
 174	if ((boot_cpu_data.processor_id & PRID_COMP_MASK) == PRID_COMP_LEGACY)
 175		return LOONGSON_PMU_TYPE1;
 176	if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C)
 177		return LOONGSON_PMU_TYPE2;
 178	if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G)
 179		return LOONGSON_PMU_TYPE3;
 180
 181	return LOONGSON_PMU_TYPE0;
 182}
 183
 184static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
 185{
 186	if (vpe_id() == 1)
 187		idx = (idx + 2) & 3;
 188	return idx;
 189}
 190
 191static u64 mipsxx_pmu_read_counter(unsigned int idx)
 192{
 193	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 194
 195	switch (idx) {
 196	case 0:
 197		/*
 198		 * The counters are unsigned, we must cast to truncate
 199		 * off the high bits.
 200		 */
 201		return (u32)read_c0_perfcntr0();
 202	case 1:
 203		return (u32)read_c0_perfcntr1();
 204	case 2:
 205		return (u32)read_c0_perfcntr2();
 206	case 3:
 207		return (u32)read_c0_perfcntr3();
 208	default:
 209		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 210		return 0;
 211	}
 212}
 213
 214static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
 215{
 216	u64 mask = CNTR_BIT_MASK(counter_bits);
 217	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 218
 219	switch (idx) {
 220	case 0:
 221		return read_c0_perfcntr0_64() & mask;
 222	case 1:
 223		return read_c0_perfcntr1_64() & mask;
 224	case 2:
 225		return read_c0_perfcntr2_64() & mask;
 226	case 3:
 227		return read_c0_perfcntr3_64() & mask;
 228	default:
 229		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 230		return 0;
 231	}
 232}
 233
 234static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
 235{
 236	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 237
 238	switch (idx) {
 239	case 0:
 240		write_c0_perfcntr0(val);
 241		return;
 242	case 1:
 243		write_c0_perfcntr1(val);
 244		return;
 245	case 2:
 246		write_c0_perfcntr2(val);
 247		return;
 248	case 3:
 249		write_c0_perfcntr3(val);
 250		return;
 251	}
 252}
 253
 254static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
 255{
 256	val &= CNTR_BIT_MASK(counter_bits);
 257	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 258
 259	switch (idx) {
 260	case 0:
 261		write_c0_perfcntr0_64(val);
 262		return;
 263	case 1:
 264		write_c0_perfcntr1_64(val);
 265		return;
 266	case 2:
 267		write_c0_perfcntr2_64(val);
 268		return;
 269	case 3:
 270		write_c0_perfcntr3_64(val);
 271		return;
 272	}
 273}
 274
 275static unsigned int mipsxx_pmu_read_control(unsigned int idx)
 276{
 277	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 278
 279	switch (idx) {
 280	case 0:
 281		return read_c0_perfctrl0();
 282	case 1:
 283		return read_c0_perfctrl1();
 284	case 2:
 285		return read_c0_perfctrl2();
 286	case 3:
 287		return read_c0_perfctrl3();
 288	default:
 289		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 290		return 0;
 291	}
 292}
 293
 294static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
 295{
 296	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 297
 298	switch (idx) {
 299	case 0:
 300		write_c0_perfctrl0(val);
 301		return;
 302	case 1:
 303		write_c0_perfctrl1(val);
 304		return;
 305	case 2:
 306		write_c0_perfctrl2(val);
 307		return;
 308	case 3:
 309		write_c0_perfctrl3(val);
 310		return;
 311	}
 312}
 313
 314static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
 315				    struct hw_perf_event *hwc)
 316{
 317	int i;
 318	unsigned long cntr_mask;
 319
 320	/*
 321	 * We only need to care the counter mask. The range has been
 322	 * checked definitely.
 323	 */
 324	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
 325		cntr_mask = (hwc->event_base >> 10) & 0xffff;
 326	else
 327		cntr_mask = (hwc->event_base >> 8) & 0xffff;
 328
 329	for (i = mipspmu.num_counters - 1; i >= 0; i--) {
 330		/*
 331		 * Note that some MIPS perf events can be counted by both
 332		 * even and odd counters, whereas many other are only by
 333		 * even _or_ odd counters. This introduces an issue that
 334		 * when the former kind of event takes the counter the
 335		 * latter kind of event wants to use, then the "counter
 336		 * allocation" for the latter event will fail. In fact if
 337		 * they can be dynamically swapped, they both feel happy.
 338		 * But here we leave this issue alone for now.
 339		 */
 340		if (test_bit(i, &cntr_mask) &&
 341			!test_and_set_bit(i, cpuc->used_mask))
 342			return i;
 343	}
 344
 345	return -EAGAIN;
 346}
 347
 348static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 349{
 350	struct perf_event *event = container_of(evt, struct perf_event, hw);
 351	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 352	unsigned int range = evt->event_base >> 24;
 353
 354	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 355
 356	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
 357		cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) |
 358			(evt->config_base & M_PERFCTL_CONFIG_MASK) |
 359			/* Make sure interrupt enabled. */
 360			MIPS_PERFCTRL_IE;
 361	else
 362		cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
 363			(evt->config_base & M_PERFCTL_CONFIG_MASK) |
 364			/* Make sure interrupt enabled. */
 365			MIPS_PERFCTRL_IE;
 366
 367	if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
 368		/* enable the counter for the calling thread */
 369		cpuc->saved_ctrl[idx] |=
 370			(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
 371	} else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
 372		/* The counter is processor wide. Set it up to count all TCs. */
 373		pr_debug("Enabling perf counter for all TCs\n");
 374		cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
 375	} else {
 376		unsigned int cpu, ctrl;
 377
 378		/*
 379		 * Set up the counter for a particular CPU when event->cpu is
 380		 * a valid CPU number. Otherwise set up the counter for the CPU
 381		 * scheduling this thread.
 382		 */
 383		cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
 384
 385		ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
 386		ctrl |= M_TC_EN_VPE;
 387		cpuc->saved_ctrl[idx] |= ctrl;
 388		pr_debug("Enabling perf counter for CPU%d\n", cpu);
 389	}
 390	/*
 391	 * We do not actually let the counter run. Leave it until start().
 392	 */
 393}
 394
 395static void mipsxx_pmu_disable_event(int idx)
 396{
 397	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 398	unsigned long flags;
 399
 400	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 401
 402	local_irq_save(flags);
 403	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
 404		~M_PERFCTL_COUNT_EVENT_WHENEVER;
 405	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
 406	local_irq_restore(flags);
 407}
 408
 409static int mipspmu_event_set_period(struct perf_event *event,
 410				    struct hw_perf_event *hwc,
 411				    int idx)
 412{
 413	u64 left = local64_read(&hwc->period_left);
 414	u64 period = hwc->sample_period;
 415	int ret = 0;
 416
 417	if (unlikely((left + period) & (1ULL << 63))) {
 418		/* left underflowed by more than period. */
 419		left = period;
 420		local64_set(&hwc->period_left, left);
 421		hwc->last_period = period;
 422		ret = 1;
 423	} else	if (unlikely((left + period) <= period)) {
 424		/* left underflowed by less than period. */
 425		left += period;
 426		local64_set(&hwc->period_left, left);
 427		hwc->last_period = period;
 428		ret = 1;
 429	}
 430
 431	if (left > mipspmu.max_period) {
 432		left = mipspmu.max_period;
 433		local64_set(&hwc->period_left, left);
 434	}
 435
 436	local64_set(&hwc->prev_count, mipspmu.overflow - left);
 437
 438	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
 439		mipsxx_pmu_write_control(idx,
 440				M_PERFCTL_EVENT(hwc->event_base & 0x3ff));
 441
 442	mipspmu.write_counter(idx, mipspmu.overflow - left);
 443
 444	perf_event_update_userpage(event);
 445
 446	return ret;
 447}
 448
 449static void mipspmu_event_update(struct perf_event *event,
 450				 struct hw_perf_event *hwc,
 451				 int idx)
 452{
 453	u64 prev_raw_count, new_raw_count;
 454	u64 delta;
 455
 456again:
 457	prev_raw_count = local64_read(&hwc->prev_count);
 458	new_raw_count = mipspmu.read_counter(idx);
 459
 460	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 461				new_raw_count) != prev_raw_count)
 462		goto again;
 463
 464	delta = new_raw_count - prev_raw_count;
 465
 466	local64_add(delta, &event->count);
 467	local64_sub(delta, &hwc->period_left);
 468}
 469
 470static void mipspmu_start(struct perf_event *event, int flags)
 471{
 472	struct hw_perf_event *hwc = &event->hw;
 473
 474	if (flags & PERF_EF_RELOAD)
 475		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
 476
 477	hwc->state = 0;
 478
 479	/* Set the period for the event. */
 480	mipspmu_event_set_period(event, hwc, hwc->idx);
 481
 482	/* Enable the event. */
 483	mipsxx_pmu_enable_event(hwc, hwc->idx);
 484}
 485
 486static void mipspmu_stop(struct perf_event *event, int flags)
 487{
 488	struct hw_perf_event *hwc = &event->hw;
 489
 490	if (!(hwc->state & PERF_HES_STOPPED)) {
 491		/* We are working on a local event. */
 492		mipsxx_pmu_disable_event(hwc->idx);
 493		barrier();
 494		mipspmu_event_update(event, hwc, hwc->idx);
 495		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 496	}
 497}
 498
 499static int mipspmu_add(struct perf_event *event, int flags)
 500{
 501	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 502	struct hw_perf_event *hwc = &event->hw;
 503	int idx;
 504	int err = 0;
 505
 506	perf_pmu_disable(event->pmu);
 507
 508	/* To look for a free counter for this event. */
 509	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
 510	if (idx < 0) {
 511		err = idx;
 512		goto out;
 513	}
 514
 515	/*
 516	 * If there is an event in the counter we are going to use then
 517	 * make sure it is disabled.
 518	 */
 519	event->hw.idx = idx;
 520	mipsxx_pmu_disable_event(idx);
 521	cpuc->events[idx] = event;
 522
 523	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 524	if (flags & PERF_EF_START)
 525		mipspmu_start(event, PERF_EF_RELOAD);
 526
 527	/* Propagate our changes to the userspace mapping. */
 528	perf_event_update_userpage(event);
 529
 530out:
 531	perf_pmu_enable(event->pmu);
 532	return err;
 533}
 534
 535static void mipspmu_del(struct perf_event *event, int flags)
 536{
 537	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 538	struct hw_perf_event *hwc = &event->hw;
 539	int idx = hwc->idx;
 540
 541	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 542
 543	mipspmu_stop(event, PERF_EF_UPDATE);
 544	cpuc->events[idx] = NULL;
 545	clear_bit(idx, cpuc->used_mask);
 546
 547	perf_event_update_userpage(event);
 548}
 549
 550static void mipspmu_read(struct perf_event *event)
 551{
 552	struct hw_perf_event *hwc = &event->hw;
 553
 554	/* Don't read disabled counters! */
 555	if (hwc->idx < 0)
 556		return;
 557
 558	mipspmu_event_update(event, hwc, hwc->idx);
 559}
 560
 561static void mipspmu_enable(struct pmu *pmu)
 562{
 563#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 564	write_unlock(&pmuint_rwlock);
 565#endif
 566	resume_local_counters();
 567}
 568
 569/*
 570 * MIPS performance counters can be per-TC. The control registers can
 571 * not be directly accessed across CPUs. Hence if we want to do global
 572 * control, we need cross CPU calls. on_each_cpu() can help us, but we
 573 * can not make sure this function is called with interrupts enabled. So
 574 * here we pause local counters and then grab a rwlock and leave the
 575 * counters on other CPUs alone. If any counter interrupt raises while
 576 * we own the write lock, simply pause local counters on that CPU and
 577 * spin in the handler. Also we know we won't be switched to another
 578 * CPU after pausing local counters and before grabbing the lock.
 579 */
 580static void mipspmu_disable(struct pmu *pmu)
 581{
 582	pause_local_counters();
 583#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 584	write_lock(&pmuint_rwlock);
 585#endif
 586}
 587
 588static atomic_t active_events = ATOMIC_INIT(0);
 589static DEFINE_MUTEX(pmu_reserve_mutex);
 590static int (*save_perf_irq)(void);
 591
 592static int mipspmu_get_irq(void)
 593{
 594	int err;
 595
 596	if (mipspmu.irq >= 0) {
 597		/* Request my own irq handler. */
 598		err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
 599				  IRQF_PERCPU | IRQF_NOBALANCING |
 600				  IRQF_NO_THREAD | IRQF_NO_SUSPEND |
 601				  IRQF_SHARED,
 602				  "mips_perf_pmu", &mipspmu);
 603		if (err) {
 604			pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
 605				mipspmu.irq);
 606		}
 607	} else if (cp0_perfcount_irq < 0) {
 608		/*
 609		 * We are sharing the irq number with the timer interrupt.
 610		 */
 611		save_perf_irq = perf_irq;
 612		perf_irq = mipsxx_pmu_handle_shared_irq;
 613		err = 0;
 614	} else {
 615		pr_warn("The platform hasn't properly defined its interrupt controller\n");
 
 616		err = -ENOENT;
 617	}
 618
 619	return err;
 620}
 621
 622static void mipspmu_free_irq(void)
 623{
 624	if (mipspmu.irq >= 0)
 625		free_irq(mipspmu.irq, &mipspmu);
 626	else if (cp0_perfcount_irq < 0)
 627		perf_irq = save_perf_irq;
 628}
 629
 630/*
 631 * mipsxx/rm9000/loongson2 have different performance counters, they have
 632 * specific low-level init routines.
 633 */
 634static void reset_counters(void *arg);
 635static int __hw_perf_event_init(struct perf_event *event);
 636
 637static void hw_perf_event_destroy(struct perf_event *event)
 638{
 639	if (atomic_dec_and_mutex_lock(&active_events,
 640				&pmu_reserve_mutex)) {
 641		/*
 642		 * We must not call the destroy function with interrupts
 643		 * disabled.
 644		 */
 645		on_each_cpu(reset_counters,
 646			(void *)(long)mipspmu.num_counters, 1);
 647		mipspmu_free_irq();
 648		mutex_unlock(&pmu_reserve_mutex);
 649	}
 650}
 651
 652static int mipspmu_event_init(struct perf_event *event)
 653{
 654	int err = 0;
 655
 656	/* does not support taken branch sampling */
 657	if (has_branch_stack(event))
 658		return -EOPNOTSUPP;
 659
 660	switch (event->attr.type) {
 661	case PERF_TYPE_RAW:
 662	case PERF_TYPE_HARDWARE:
 663	case PERF_TYPE_HW_CACHE:
 664		break;
 665
 666	default:
 667		return -ENOENT;
 668	}
 669
 670	if (event->cpu >= 0 && !cpu_online(event->cpu))
 
 671		return -ENODEV;
 672
 673	if (!atomic_inc_not_zero(&active_events)) {
 674		mutex_lock(&pmu_reserve_mutex);
 675		if (atomic_read(&active_events) == 0)
 676			err = mipspmu_get_irq();
 677
 678		if (!err)
 679			atomic_inc(&active_events);
 680		mutex_unlock(&pmu_reserve_mutex);
 681	}
 682
 683	if (err)
 684		return err;
 685
 686	return __hw_perf_event_init(event);
 687}
 688
 689static struct pmu pmu = {
 690	.pmu_enable	= mipspmu_enable,
 691	.pmu_disable	= mipspmu_disable,
 692	.event_init	= mipspmu_event_init,
 693	.add		= mipspmu_add,
 694	.del		= mipspmu_del,
 695	.start		= mipspmu_start,
 696	.stop		= mipspmu_stop,
 697	.read		= mipspmu_read,
 698};
 699
 700static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
 701{
 702/*
 703 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
 704 * event_id.
 705 */
 706#ifdef CONFIG_MIPS_MT_SMP
 707	if (num_possible_cpus() > 1)
 708		return ((unsigned int)pev->range << 24) |
 709			(pev->cntr_mask & 0xffff00) |
 710			(pev->event_id & 0xff);
 711	else
 712#endif /* CONFIG_MIPS_MT_SMP */
 713	{
 714		if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
 715			return (pev->cntr_mask & 0xfffc00) |
 716				(pev->event_id & 0x3ff);
 717		else
 718			return (pev->cntr_mask & 0xffff00) |
 719				(pev->event_id & 0xff);
 720	}
 721}
 722
 723static const struct mips_perf_event *mipspmu_map_general_event(int idx)
 724{
 
 725
 726	if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
 727		return ERR_PTR(-EOPNOTSUPP);
 728	return &(*mipspmu.general_event_map)[idx];
 
 
 729}
 730
 731static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
 732{
 733	unsigned int cache_type, cache_op, cache_result;
 734	const struct mips_perf_event *pev;
 735
 736	cache_type = (config >> 0) & 0xff;
 737	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
 738		return ERR_PTR(-EINVAL);
 739
 740	cache_op = (config >> 8) & 0xff;
 741	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
 742		return ERR_PTR(-EINVAL);
 743
 744	cache_result = (config >> 16) & 0xff;
 745	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
 746		return ERR_PTR(-EINVAL);
 747
 748	pev = &((*mipspmu.cache_event_map)
 749					[cache_type]
 750					[cache_op]
 751					[cache_result]);
 752
 753	if (pev->cntr_mask == 0)
 754		return ERR_PTR(-EOPNOTSUPP);
 755
 756	return pev;
 757
 758}
 759
 760static int validate_group(struct perf_event *event)
 761{
 762	struct perf_event *sibling, *leader = event->group_leader;
 763	struct cpu_hw_events fake_cpuc;
 764
 765	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
 766
 767	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
 768		return -EINVAL;
 769
 770	for_each_sibling_event(sibling, leader) {
 771		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
 772			return -EINVAL;
 773	}
 774
 775	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
 776		return -EINVAL;
 777
 778	return 0;
 779}
 780
 781/* This is needed by specific irq handlers in perf_event_*.c */
 782static void handle_associated_event(struct cpu_hw_events *cpuc,
 783				    int idx, struct perf_sample_data *data,
 784				    struct pt_regs *regs)
 785{
 786	struct perf_event *event = cpuc->events[idx];
 787	struct hw_perf_event *hwc = &event->hw;
 788
 789	mipspmu_event_update(event, hwc, idx);
 790	data->period = event->hw.last_period;
 791	if (!mipspmu_event_set_period(event, hwc, idx))
 792		return;
 793
 794	if (perf_event_overflow(event, data, regs))
 795		mipsxx_pmu_disable_event(idx);
 796}
 797
 798
 799static int __n_counters(void)
 800{
 801	if (!cpu_has_perf)
 802		return 0;
 803	if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
 804		return 1;
 805	if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
 806		return 2;
 807	if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
 808		return 3;
 809
 810	return 4;
 811}
 812
 813static int n_counters(void)
 814{
 815	int counters;
 816
 817	switch (current_cpu_type()) {
 818	case CPU_R10000:
 819		counters = 2;
 820		break;
 821
 822	case CPU_R12000:
 823	case CPU_R14000:
 824	case CPU_R16000:
 825		counters = 4;
 826		break;
 827
 828	default:
 829		counters = __n_counters();
 830	}
 831
 832	return counters;
 833}
 834
 835static void loongson3_reset_counters(void *arg)
 836{
 837	int counters = (int)(long)arg;
 838
 839	switch (counters) {
 840	case 4:
 841		mipsxx_pmu_write_control(3, 0);
 842		mipspmu.write_counter(3, 0);
 843		mipsxx_pmu_write_control(3, 127<<5);
 844		mipspmu.write_counter(3, 0);
 845		mipsxx_pmu_write_control(3, 191<<5);
 846		mipspmu.write_counter(3, 0);
 847		mipsxx_pmu_write_control(3, 255<<5);
 848		mipspmu.write_counter(3, 0);
 849		mipsxx_pmu_write_control(3, 319<<5);
 850		mipspmu.write_counter(3, 0);
 851		mipsxx_pmu_write_control(3, 383<<5);
 852		mipspmu.write_counter(3, 0);
 853		mipsxx_pmu_write_control(3, 575<<5);
 854		mipspmu.write_counter(3, 0);
 855		fallthrough;
 856	case 3:
 857		mipsxx_pmu_write_control(2, 0);
 858		mipspmu.write_counter(2, 0);
 859		mipsxx_pmu_write_control(2, 127<<5);
 860		mipspmu.write_counter(2, 0);
 861		mipsxx_pmu_write_control(2, 191<<5);
 862		mipspmu.write_counter(2, 0);
 863		mipsxx_pmu_write_control(2, 255<<5);
 864		mipspmu.write_counter(2, 0);
 865		mipsxx_pmu_write_control(2, 319<<5);
 866		mipspmu.write_counter(2, 0);
 867		mipsxx_pmu_write_control(2, 383<<5);
 868		mipspmu.write_counter(2, 0);
 869		mipsxx_pmu_write_control(2, 575<<5);
 870		mipspmu.write_counter(2, 0);
 871		fallthrough;
 872	case 2:
 873		mipsxx_pmu_write_control(1, 0);
 874		mipspmu.write_counter(1, 0);
 875		mipsxx_pmu_write_control(1, 127<<5);
 876		mipspmu.write_counter(1, 0);
 877		mipsxx_pmu_write_control(1, 191<<5);
 878		mipspmu.write_counter(1, 0);
 879		mipsxx_pmu_write_control(1, 255<<5);
 880		mipspmu.write_counter(1, 0);
 881		mipsxx_pmu_write_control(1, 319<<5);
 882		mipspmu.write_counter(1, 0);
 883		mipsxx_pmu_write_control(1, 383<<5);
 884		mipspmu.write_counter(1, 0);
 885		mipsxx_pmu_write_control(1, 575<<5);
 886		mipspmu.write_counter(1, 0);
 887		fallthrough;
 888	case 1:
 889		mipsxx_pmu_write_control(0, 0);
 890		mipspmu.write_counter(0, 0);
 891		mipsxx_pmu_write_control(0, 127<<5);
 892		mipspmu.write_counter(0, 0);
 893		mipsxx_pmu_write_control(0, 191<<5);
 894		mipspmu.write_counter(0, 0);
 895		mipsxx_pmu_write_control(0, 255<<5);
 896		mipspmu.write_counter(0, 0);
 897		mipsxx_pmu_write_control(0, 319<<5);
 898		mipspmu.write_counter(0, 0);
 899		mipsxx_pmu_write_control(0, 383<<5);
 900		mipspmu.write_counter(0, 0);
 901		mipsxx_pmu_write_control(0, 575<<5);
 902		mipspmu.write_counter(0, 0);
 903		break;
 904	}
 905}
 906
 907static void reset_counters(void *arg)
 908{
 909	int counters = (int)(long)arg;
 910
 911	if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
 912		loongson3_reset_counters(arg);
 913		return;
 914	}
 915
 916	switch (counters) {
 917	case 4:
 918		mipsxx_pmu_write_control(3, 0);
 919		mipspmu.write_counter(3, 0);
 920		fallthrough;
 921	case 3:
 922		mipsxx_pmu_write_control(2, 0);
 923		mipspmu.write_counter(2, 0);
 924		fallthrough;
 925	case 2:
 926		mipsxx_pmu_write_control(1, 0);
 927		mipspmu.write_counter(1, 0);
 928		fallthrough;
 929	case 1:
 930		mipsxx_pmu_write_control(0, 0);
 931		mipspmu.write_counter(0, 0);
 932		break;
 933	}
 934}
 935
 936/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
 937static const struct mips_perf_event mipsxxcore_event_map
 938				[PERF_COUNT_HW_MAX] = {
 939	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 940	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 
 
 941	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
 942	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
 
 943};
 944
 945/* 74K/proAptiv core has different branch event code. */
 946static const struct mips_perf_event mipsxxcore_event_map2
 947				[PERF_COUNT_HW_MAX] = {
 948	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 949	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 
 
 950	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
 951	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
 952};
 953
 954static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
 955	[PERF_COUNT_HW_CPU_CYCLES]          = { 0x00, CNTR_EVEN | CNTR_ODD },
 956	[PERF_COUNT_HW_INSTRUCTIONS]        = { 0x01, CNTR_EVEN | CNTR_ODD },
 957	/* These only count dcache, not icache */
 958	[PERF_COUNT_HW_CACHE_REFERENCES]    = { 0x45, CNTR_EVEN | CNTR_ODD },
 959	[PERF_COUNT_HW_CACHE_MISSES]        = { 0x48, CNTR_EVEN | CNTR_ODD },
 960	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
 961	[PERF_COUNT_HW_BRANCH_MISSES]       = { 0x16, CNTR_EVEN | CNTR_ODD },
 962};
 963
 964static const struct mips_perf_event loongson3_event_map1[PERF_COUNT_HW_MAX] = {
 965	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
 966	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
 967	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
 968	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
 969};
 970
 971static const struct mips_perf_event loongson3_event_map2[PERF_COUNT_HW_MAX] = {
 972	[PERF_COUNT_HW_CPU_CYCLES] = { 0x80, CNTR_ALL },
 973	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x81, CNTR_ALL },
 974	[PERF_COUNT_HW_CACHE_MISSES] = { 0x18, CNTR_ALL },
 975	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x94, CNTR_ALL },
 976	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x9c, CNTR_ALL },
 977};
 978
 979static const struct mips_perf_event loongson3_event_map3[PERF_COUNT_HW_MAX] = {
 980	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_ALL },
 981	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_ALL },
 982	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x1c, CNTR_ALL },
 983	[PERF_COUNT_HW_CACHE_MISSES] = { 0x1d, CNTR_ALL },
 984	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_ALL },
 985	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x08, CNTR_ALL },
 986};
 987
 988static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
 989	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
 990	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
 991	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
 992	[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL	 },
 993	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
 994	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
 995	[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
 996};
 997
 998static const struct mips_perf_event bmips5000_event_map
 999				[PERF_COUNT_HW_MAX] = {
1000	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
1001	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
1002	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
1003};
1004
1005/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
1006static const struct mips_perf_event mipsxxcore_cache_map
1007				[PERF_COUNT_HW_CACHE_MAX]
1008				[PERF_COUNT_HW_CACHE_OP_MAX]
1009				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1010[C(L1D)] = {
1011	/*
1012	 * Like some other architectures (e.g. ARM), the performance
1013	 * counters don't differentiate between read and write
1014	 * accesses/misses, so this isn't strictly correct, but it's the
1015	 * best we can do. Writes and reads get combined.
1016	 */
1017	[C(OP_READ)] = {
1018		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
1019		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
1020	},
1021	[C(OP_WRITE)] = {
1022		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
1023		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
1024	},
 
 
 
 
1025},
1026[C(L1I)] = {
1027	[C(OP_READ)] = {
1028		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
1029		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
1030	},
1031	[C(OP_WRITE)] = {
1032		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
1033		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
1034	},
1035	[C(OP_PREFETCH)] = {
1036		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
1037		/*
1038		 * Note that MIPS has only "hit" events countable for
1039		 * the prefetch operation.
1040		 */
 
1041	},
1042},
1043[C(LL)] = {
1044	[C(OP_READ)] = {
1045		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
1046		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
1047	},
1048	[C(OP_WRITE)] = {
1049		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
1050		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
1051	},
 
 
 
 
1052},
1053[C(DTLB)] = {
1054	[C(OP_READ)] = {
1055		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1056		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1057	},
1058	[C(OP_WRITE)] = {
1059		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1060		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1061	},
 
 
 
 
1062},
1063[C(ITLB)] = {
1064	[C(OP_READ)] = {
1065		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
1066		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
1067	},
1068	[C(OP_WRITE)] = {
1069		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
1070		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
1071	},
 
 
 
 
1072},
1073[C(BPU)] = {
1074	/* Using the same code for *HW_BRANCH* */
1075	[C(OP_READ)] = {
1076		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
1077		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1078	},
1079	[C(OP_WRITE)] = {
1080		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
1081		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1082	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1083},
1084};
1085
1086/* 74K/proAptiv core has completely different cache event map. */
1087static const struct mips_perf_event mipsxxcore_cache_map2
1088				[PERF_COUNT_HW_CACHE_MAX]
1089				[PERF_COUNT_HW_CACHE_OP_MAX]
1090				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1091[C(L1D)] = {
1092	/*
1093	 * Like some other architectures (e.g. ARM), the performance
1094	 * counters don't differentiate between read and write
1095	 * accesses/misses, so this isn't strictly correct, but it's the
1096	 * best we can do. Writes and reads get combined.
1097	 */
1098	[C(OP_READ)] = {
1099		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
1100		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
1101	},
1102	[C(OP_WRITE)] = {
1103		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
1104		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
1105	},
 
 
 
 
1106},
1107[C(L1I)] = {
1108	[C(OP_READ)] = {
1109		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1110		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1111	},
1112	[C(OP_WRITE)] = {
1113		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
1114		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
1115	},
1116	[C(OP_PREFETCH)] = {
1117		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
1118		/*
1119		 * Note that MIPS has only "hit" events countable for
1120		 * the prefetch operation.
1121		 */
 
1122	},
1123},
1124[C(LL)] = {
1125	[C(OP_READ)] = {
1126		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
1127		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
1128	},
1129	[C(OP_WRITE)] = {
1130		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
1131		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN, P },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1132	},
1133},
1134/*
1135 * 74K core does not have specific DTLB events. proAptiv core has
1136 * "speculative" DTLB events which are numbered 0x63 (even/odd) and
1137 * not included here. One can use raw events if really needed.
1138 */
1139[C(ITLB)] = {
1140	[C(OP_READ)] = {
1141		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1142		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1143	},
1144	[C(OP_WRITE)] = {
1145		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1146		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1147	},
 
 
 
 
1148},
1149[C(BPU)] = {
1150	/* Using the same code for *HW_BRANCH* */
1151	[C(OP_READ)] = {
1152		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1153		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1154	},
1155	[C(OP_WRITE)] = {
1156		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1157		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1158	},
1159},
1160};
1161
1162static const struct mips_perf_event i6x00_cache_map
1163				[PERF_COUNT_HW_CACHE_MAX]
1164				[PERF_COUNT_HW_CACHE_OP_MAX]
1165				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1166[C(L1D)] = {
1167	[C(OP_READ)] = {
1168		[C(RESULT_ACCESS)]	= { 0x46, CNTR_EVEN | CNTR_ODD },
1169		[C(RESULT_MISS)]	= { 0x49, CNTR_EVEN | CNTR_ODD },
1170	},
1171	[C(OP_WRITE)] = {
1172		[C(RESULT_ACCESS)]	= { 0x47, CNTR_EVEN | CNTR_ODD },
1173		[C(RESULT_MISS)]	= { 0x4a, CNTR_EVEN | CNTR_ODD },
1174	},
1175},
1176[C(L1I)] = {
1177	[C(OP_READ)] = {
1178		[C(RESULT_ACCESS)]	= { 0x84, CNTR_EVEN | CNTR_ODD },
1179		[C(RESULT_MISS)]	= { 0x85, CNTR_EVEN | CNTR_ODD },
1180	},
1181},
1182[C(DTLB)] = {
1183	/* Can't distinguish read & write */
1184	[C(OP_READ)] = {
1185		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1186		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1187	},
1188	[C(OP_WRITE)] = {
1189		[C(RESULT_ACCESS)]	= { 0x40, CNTR_EVEN | CNTR_ODD },
1190		[C(RESULT_MISS)]	= { 0x41, CNTR_EVEN | CNTR_ODD },
1191	},
1192},
1193[C(BPU)] = {
1194	/* Conditional branches / mispredicted */
1195	[C(OP_READ)] = {
1196		[C(RESULT_ACCESS)]	= { 0x15, CNTR_EVEN | CNTR_ODD },
1197		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN | CNTR_ODD },
1198	},
1199},
1200};
1201
1202static const struct mips_perf_event loongson3_cache_map1
1203				[PERF_COUNT_HW_CACHE_MAX]
1204				[PERF_COUNT_HW_CACHE_OP_MAX]
1205				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1206[C(L1D)] = {
1207	/*
1208	 * Like some other architectures (e.g. ARM), the performance
1209	 * counters don't differentiate between read and write
1210	 * accesses/misses, so this isn't strictly correct, but it's the
1211	 * best we can do. Writes and reads get combined.
1212	 */
1213	[C(OP_READ)] = {
1214		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1215	},
1216	[C(OP_WRITE)] = {
1217		[C(RESULT_MISS)]        = { 0x04, CNTR_ODD },
1218	},
1219},
1220[C(L1I)] = {
1221	[C(OP_READ)] = {
1222		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1223	},
1224	[C(OP_WRITE)] = {
1225		[C(RESULT_MISS)]        = { 0x04, CNTR_EVEN },
1226	},
1227},
1228[C(DTLB)] = {
1229	[C(OP_READ)] = {
1230		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1231	},
1232	[C(OP_WRITE)] = {
1233		[C(RESULT_MISS)]        = { 0x09, CNTR_ODD },
1234	},
1235},
1236[C(ITLB)] = {
1237	[C(OP_READ)] = {
1238		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1239	},
1240	[C(OP_WRITE)] = {
1241		[C(RESULT_MISS)]        = { 0x0c, CNTR_ODD },
1242	},
1243},
1244[C(BPU)] = {
1245	/* Using the same code for *HW_BRANCH* */
1246	[C(OP_READ)] = {
1247		[C(RESULT_ACCESS)]      = { 0x01, CNTR_EVEN },
1248		[C(RESULT_MISS)]        = { 0x01, CNTR_ODD },
1249	},
1250	[C(OP_WRITE)] = {
1251		[C(RESULT_ACCESS)]      = { 0x01, CNTR_EVEN },
1252		[C(RESULT_MISS)]        = { 0x01, CNTR_ODD },
1253	},
1254},
1255};
1256
1257static const struct mips_perf_event loongson3_cache_map2
1258				[PERF_COUNT_HW_CACHE_MAX]
1259				[PERF_COUNT_HW_CACHE_OP_MAX]
1260				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1261[C(L1D)] = {
1262	/*
1263	 * Like some other architectures (e.g. ARM), the performance
1264	 * counters don't differentiate between read and write
1265	 * accesses/misses, so this isn't strictly correct, but it's the
1266	 * best we can do. Writes and reads get combined.
1267	 */
1268	[C(OP_READ)] = {
1269		[C(RESULT_ACCESS)]	= { 0x156, CNTR_ALL },
 
1270	},
1271	[C(OP_WRITE)] = {
1272		[C(RESULT_ACCESS)]	= { 0x155, CNTR_ALL },
1273		[C(RESULT_MISS)]        = { 0x153, CNTR_ALL },
1274	},
1275},
1276[C(L1I)] = {
1277	[C(OP_READ)] = {
1278		[C(RESULT_MISS)]	= { 0x18, CNTR_ALL },
1279	},
1280	[C(OP_WRITE)] = {
1281		[C(RESULT_MISS)]        = { 0x18, CNTR_ALL },
 
1282	},
1283},
1284[C(LL)] = {
1285	[C(OP_READ)] = {
1286		[C(RESULT_ACCESS)]	= { 0x1b6, CNTR_ALL },
 
1287	},
1288	[C(OP_WRITE)] = {
1289		[C(RESULT_ACCESS)]	= { 0x1b7, CNTR_ALL },
 
1290	},
1291	[C(OP_PREFETCH)] = {
1292		[C(RESULT_ACCESS)]	= { 0x1bf, CNTR_ALL },
1293	},
1294},
1295[C(DTLB)] = {
1296	[C(OP_READ)] = {
1297		[C(RESULT_MISS)]        = { 0x92, CNTR_ALL },
1298	},
1299	[C(OP_WRITE)] = {
1300		[C(RESULT_MISS)]        = { 0x92, CNTR_ALL },
1301	},
1302},
1303[C(ITLB)] = {
1304	[C(OP_READ)] = {
1305		[C(RESULT_MISS)]	= { 0x1a, CNTR_ALL },
 
1306	},
1307	[C(OP_WRITE)] = {
1308		[C(RESULT_MISS)]	= { 0x1a, CNTR_ALL },
1309	},
1310},
1311[C(BPU)] = {
1312	/* Using the same code for *HW_BRANCH* */
1313	[C(OP_READ)] = {
1314		[C(RESULT_ACCESS)]      = { 0x94, CNTR_ALL },
1315		[C(RESULT_MISS)]        = { 0x9c, CNTR_ALL },
1316	},
1317},
1318};
1319
1320static const struct mips_perf_event loongson3_cache_map3
1321				[PERF_COUNT_HW_CACHE_MAX]
1322				[PERF_COUNT_HW_CACHE_OP_MAX]
1323				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1324[C(L1D)] = {
1325	/*
1326	 * Like some other architectures (e.g. ARM), the performance
1327	 * counters don't differentiate between read and write
1328	 * accesses/misses, so this isn't strictly correct, but it's the
1329	 * best we can do. Writes and reads get combined.
1330	 */
1331	[C(OP_READ)] = {
1332		[C(RESULT_ACCESS)]      = { 0x1e, CNTR_ALL },
1333		[C(RESULT_MISS)]        = { 0x1f, CNTR_ALL },
1334	},
1335	[C(OP_PREFETCH)] = {
1336		[C(RESULT_ACCESS)]	= { 0xaa, CNTR_ALL },
1337		[C(RESULT_MISS)]	= { 0xa9, CNTR_ALL },
1338	},
1339},
1340[C(L1I)] = {
1341	[C(OP_READ)] = {
1342		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ALL },
1343		[C(RESULT_MISS)]	= { 0x1d, CNTR_ALL },
1344	},
1345},
1346[C(LL)] = {
1347	[C(OP_READ)] = {
1348		[C(RESULT_ACCESS)]	= { 0x2e, CNTR_ALL },
1349		[C(RESULT_MISS)]	= { 0x2f, CNTR_ALL },
1350	},
1351},
1352[C(DTLB)] = {
1353	[C(OP_READ)] = {
1354		[C(RESULT_ACCESS)]      = { 0x14, CNTR_ALL },
1355		[C(RESULT_MISS)]	= { 0x1b, CNTR_ALL },
1356	},
1357},
1358[C(ITLB)] = {
1359	[C(OP_READ)] = {
1360		[C(RESULT_MISS)]	= { 0x1a, CNTR_ALL },
1361	},
1362},
1363[C(BPU)] = {
1364	/* Using the same code for *HW_BRANCH* */
1365	[C(OP_READ)] = {
1366		[C(RESULT_ACCESS)]      = { 0x02, CNTR_ALL },
1367		[C(RESULT_MISS)]        = { 0x08, CNTR_ALL },
1368	},
1369},
1370};
1371
1372/* BMIPS5000 */
1373static const struct mips_perf_event bmips5000_cache_map
1374				[PERF_COUNT_HW_CACHE_MAX]
1375				[PERF_COUNT_HW_CACHE_OP_MAX]
1376				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1377[C(L1D)] = {
1378	/*
1379	 * Like some other architectures (e.g. ARM), the performance
1380	 * counters don't differentiate between read and write
1381	 * accesses/misses, so this isn't strictly correct, but it's the
1382	 * best we can do. Writes and reads get combined.
1383	 */
1384	[C(OP_READ)] = {
1385		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1386		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1387	},
1388	[C(OP_WRITE)] = {
1389		[C(RESULT_ACCESS)]	= { 12, CNTR_EVEN, T },
1390		[C(RESULT_MISS)]	= { 12, CNTR_ODD, T },
1391	},
1392},
1393[C(L1I)] = {
1394	[C(OP_READ)] = {
1395		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1396		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1397	},
1398	[C(OP_WRITE)] = {
1399		[C(RESULT_ACCESS)]	= { 10, CNTR_EVEN, T },
1400		[C(RESULT_MISS)]	= { 10, CNTR_ODD, T },
1401	},
1402	[C(OP_PREFETCH)] = {
1403		[C(RESULT_ACCESS)]	= { 23, CNTR_EVEN, T },
1404		/*
1405		 * Note that MIPS has only "hit" events countable for
1406		 * the prefetch operation.
1407		 */
1408	},
1409},
1410[C(LL)] = {
1411	[C(OP_READ)] = {
1412		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1413		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
1414	},
1415	[C(OP_WRITE)] = {
1416		[C(RESULT_ACCESS)]	= { 28, CNTR_EVEN, P },
1417		[C(RESULT_MISS)]	= { 28, CNTR_ODD, P },
 
 
 
 
1418	},
1419},
1420[C(BPU)] = {
1421	/* Using the same code for *HW_BRANCH* */
1422	[C(OP_READ)] = {
1423		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1424	},
1425	[C(OP_WRITE)] = {
1426		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
1427	},
1428},
1429};
1430
1431static const struct mips_perf_event octeon_cache_map
1432				[PERF_COUNT_HW_CACHE_MAX]
1433				[PERF_COUNT_HW_CACHE_OP_MAX]
1434				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1435[C(L1D)] = {
1436	[C(OP_READ)] = {
1437		[C(RESULT_ACCESS)]	= { 0x2b, CNTR_ALL },
1438		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL },
1439	},
1440	[C(OP_WRITE)] = {
1441		[C(RESULT_ACCESS)]	= { 0x30, CNTR_ALL },
1442	},
1443},
1444[C(L1I)] = {
1445	[C(OP_READ)] = {
1446		[C(RESULT_ACCESS)]	= { 0x18, CNTR_ALL },
1447	},
1448	[C(OP_PREFETCH)] = {
1449		[C(RESULT_ACCESS)]	= { 0x19, CNTR_ALL },
1450	},
1451},
1452[C(DTLB)] = {
1453	/*
1454	 * Only general DTLB misses are counted use the same event for
1455	 * read and write.
1456	 */
1457	[C(OP_READ)] = {
1458		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1459	},
1460	[C(OP_WRITE)] = {
1461		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1462	},
1463},
1464[C(ITLB)] = {
1465	[C(OP_READ)] = {
1466		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL },
1467	},
1468},
1469};
1470
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1471static int __hw_perf_event_init(struct perf_event *event)
1472{
1473	struct perf_event_attr *attr = &event->attr;
1474	struct hw_perf_event *hwc = &event->hw;
1475	const struct mips_perf_event *pev;
1476	int err;
1477
1478	/* Returning MIPS event descriptor for generic perf event. */
1479	if (PERF_TYPE_HARDWARE == event->attr.type) {
1480		if (event->attr.config >= PERF_COUNT_HW_MAX)
1481			return -EINVAL;
1482		pev = mipspmu_map_general_event(event->attr.config);
1483	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1484		pev = mipspmu_map_cache_event(event->attr.config);
1485	} else if (PERF_TYPE_RAW == event->attr.type) {
1486		/* We are working on the global raw event. */
1487		mutex_lock(&raw_event_mutex);
1488		pev = mipspmu.map_raw_event(event->attr.config);
1489	} else {
1490		/* The event type is not (yet) supported. */
1491		return -EOPNOTSUPP;
1492	}
1493
1494	if (IS_ERR(pev)) {
1495		if (PERF_TYPE_RAW == event->attr.type)
1496			mutex_unlock(&raw_event_mutex);
1497		return PTR_ERR(pev);
1498	}
1499
1500	/*
1501	 * We allow max flexibility on how each individual counter shared
1502	 * by the single CPU operates (the mode exclusion and the range).
1503	 */
1504	hwc->config_base = MIPS_PERFCTRL_IE;
 
 
 
 
1505
1506	hwc->event_base = mipspmu_perf_event_encode(pev);
1507	if (PERF_TYPE_RAW == event->attr.type)
1508		mutex_unlock(&raw_event_mutex);
1509
1510	if (!attr->exclude_user)
1511		hwc->config_base |= MIPS_PERFCTRL_U;
1512	if (!attr->exclude_kernel) {
1513		hwc->config_base |= MIPS_PERFCTRL_K;
1514		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1515		hwc->config_base |= MIPS_PERFCTRL_EXL;
1516	}
1517	if (!attr->exclude_hv)
1518		hwc->config_base |= MIPS_PERFCTRL_S;
1519
1520	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1521	/*
1522	 * The event can belong to another cpu. We do not assign a local
1523	 * counter for it for now.
1524	 */
1525	hwc->idx = -1;
1526	hwc->config = 0;
1527
1528	if (!hwc->sample_period) {
1529		hwc->sample_period  = mipspmu.max_period;
1530		hwc->last_period    = hwc->sample_period;
1531		local64_set(&hwc->period_left, hwc->sample_period);
1532	}
1533
1534	err = 0;
1535	if (event->group_leader != event)
1536		err = validate_group(event);
1537
1538	event->destroy = hw_perf_event_destroy;
1539
1540	if (err)
1541		event->destroy(event);
1542
1543	return err;
1544}
1545
1546static void pause_local_counters(void)
1547{
1548	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1549	int ctr = mipspmu.num_counters;
1550	unsigned long flags;
1551
1552	local_irq_save(flags);
1553	do {
1554		ctr--;
1555		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1556		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1557					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1558	} while (ctr > 0);
1559	local_irq_restore(flags);
1560}
1561
1562static void resume_local_counters(void)
1563{
1564	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1565	int ctr = mipspmu.num_counters;
1566
1567	do {
1568		ctr--;
1569		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1570	} while (ctr > 0);
1571}
1572
1573static int mipsxx_pmu_handle_shared_irq(void)
1574{
1575	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1576	struct perf_sample_data data;
1577	unsigned int counters = mipspmu.num_counters;
1578	u64 counter;
1579	int n, handled = IRQ_NONE;
1580	struct pt_regs *regs;
1581
1582	if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1583		return handled;
1584	/*
1585	 * First we pause the local counters, so that when we are locked
1586	 * here, the counters are all paused. When it gets locked due to
1587	 * perf_disable(), the timer interrupt handler will be delayed.
1588	 *
1589	 * See also mipsxx_pmu_start().
1590	 */
1591	pause_local_counters();
1592#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1593	read_lock(&pmuint_rwlock);
1594#endif
1595
1596	regs = get_irq_regs();
1597
1598	perf_sample_data_init(&data, 0, 0);
1599
1600	for (n = counters - 1; n >= 0; n--) {
1601		if (!test_bit(n, cpuc->used_mask))
1602			continue;
1603
1604		counter = mipspmu.read_counter(n);
1605		if (!(counter & mipspmu.overflow))
1606			continue;
1607
1608		handle_associated_event(cpuc, n, &data, regs);
1609		handled = IRQ_HANDLED;
 
 
 
 
1610	}
1611
1612#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1613	read_unlock(&pmuint_rwlock);
1614#endif
1615	resume_local_counters();
1616
1617	/*
1618	 * Do all the work for the pending perf events. We can do this
1619	 * in here because the performance counter interrupt is a regular
1620	 * interrupt, not NMI.
1621	 */
1622	if (handled == IRQ_HANDLED)
1623		irq_work_run();
1624
 
 
 
 
1625	return handled;
1626}
1627
1628static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1629{
1630	return mipsxx_pmu_handle_shared_irq();
1631}
1632
1633/* 24K */
1634#define IS_BOTH_COUNTERS_24K_EVENT(b)					\
1635	((b) == 0 || (b) == 1 || (b) == 11)
1636
1637/* 34K */
1638#define IS_BOTH_COUNTERS_34K_EVENT(b)					\
1639	((b) == 0 || (b) == 1 || (b) == 11)
1640#ifdef CONFIG_MIPS_MT_SMP
1641#define IS_RANGE_P_34K_EVENT(r, b)					\
1642	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1643	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
1644	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
1645	 ((b) >= 64 && (b) <= 67))
1646#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1647#endif
1648
1649/* 74K */
1650#define IS_BOTH_COUNTERS_74K_EVENT(b)					\
1651	((b) == 0 || (b) == 1)
1652
1653/* proAptiv */
1654#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b)				\
1655	((b) == 0 || (b) == 1)
1656/* P5600 */
1657#define IS_BOTH_COUNTERS_P5600_EVENT(b)					\
1658	((b) == 0 || (b) == 1)
1659
1660/* 1004K */
1661#define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
1662	((b) == 0 || (b) == 1 || (b) == 11)
1663#ifdef CONFIG_MIPS_MT_SMP
1664#define IS_RANGE_P_1004K_EVENT(r, b)					\
1665	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1666	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
1667	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
1668	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
1669	 ((b) >= 64 && (b) <= 67))
1670#define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
1671#endif
1672
1673/* interAptiv */
1674#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b)				\
1675	((b) == 0 || (b) == 1 || (b) == 11)
1676#ifdef CONFIG_MIPS_MT_SMP
1677/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1678#define IS_RANGE_P_INTERAPTIV_EVENT(r, b)				\
1679	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1680	 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 ||		\
1681	 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 &&		\
1682	 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 ||		\
1683	 ((b) >= 64 && (b) <= 67))
1684#define IS_RANGE_V_INTERAPTIV_EVENT(r)	((r) == 47 || (r) == 175)
1685#endif
1686
1687/* BMIPS5000 */
1688#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b)				\
1689	((b) == 0 || (b) == 1)
1690
1691
1692/*
1693 * For most cores the user can use 0-255 raw events, where 0-127 for the events
1694 * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1695 * indicate the even/odd bank selector. So, for example, when user wants to take
1696 * the Event Num of 15 for odd counters (by referring to the user manual), then
1697 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1698 * to be used.
1699 *
1700 * Some newer cores have even more events, in which case the user can use raw
1701 * events 0-511, where 0-255 are for the events of even counters, and 256-511
1702 * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1703 */
1704static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1705{
1706	/* currently most cores have 7-bit event numbers */
1707	int pmu_type;
1708	unsigned int raw_id = config & 0xff;
1709	unsigned int base_id = raw_id & 0x7f;
1710
 
 
1711	switch (current_cpu_type()) {
1712	case CPU_24K:
1713		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1714			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1715		else
1716			raw_event.cntr_mask =
1717				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1718#ifdef CONFIG_MIPS_MT_SMP
1719		/*
1720		 * This is actually doing nothing. Non-multithreading
1721		 * CPUs will not check and calculate the range.
1722		 */
1723		raw_event.range = P;
1724#endif
1725		break;
1726	case CPU_34K:
1727		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1728			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1729		else
1730			raw_event.cntr_mask =
1731				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1732#ifdef CONFIG_MIPS_MT_SMP
1733		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1734			raw_event.range = P;
1735		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1736			raw_event.range = V;
1737		else
1738			raw_event.range = T;
1739#endif
1740		break;
1741	case CPU_74K:
1742	case CPU_1074K:
1743		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1744			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1745		else
1746			raw_event.cntr_mask =
1747				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1748#ifdef CONFIG_MIPS_MT_SMP
1749		raw_event.range = P;
1750#endif
1751		break;
1752	case CPU_PROAPTIV:
1753		if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1754			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1755		else
1756			raw_event.cntr_mask =
1757				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1758#ifdef CONFIG_MIPS_MT_SMP
1759		raw_event.range = P;
1760#endif
1761		break;
1762	case CPU_P5600:
1763	case CPU_P6600:
1764		/* 8-bit event numbers */
1765		raw_id = config & 0x1ff;
1766		base_id = raw_id & 0xff;
1767		if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1768			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1769		else
1770			raw_event.cntr_mask =
1771				raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1772#ifdef CONFIG_MIPS_MT_SMP
1773		raw_event.range = P;
1774#endif
1775		break;
1776	case CPU_I6400:
1777	case CPU_I6500:
1778		/* 8-bit event numbers */
1779		base_id = config & 0xff;
1780		raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1781		break;
1782	case CPU_1004K:
1783		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1784			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1785		else
1786			raw_event.cntr_mask =
1787				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1788#ifdef CONFIG_MIPS_MT_SMP
1789		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1790			raw_event.range = P;
1791		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1792			raw_event.range = V;
1793		else
1794			raw_event.range = T;
1795#endif
1796		break;
1797	case CPU_INTERAPTIV:
1798		if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1799			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1800		else
1801			raw_event.cntr_mask =
1802				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1803#ifdef CONFIG_MIPS_MT_SMP
1804		if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1805			raw_event.range = P;
1806		else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1807			raw_event.range = V;
1808		else
1809			raw_event.range = T;
1810#endif
1811		break;
1812	case CPU_BMIPS5000:
1813		if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1814			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1815		else
1816			raw_event.cntr_mask =
1817				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1818		break;
1819	case CPU_LOONGSON64:
1820		pmu_type = get_loongson3_pmu_type();
1821
1822		switch (pmu_type) {
1823		case LOONGSON_PMU_TYPE1:
1824			raw_event.cntr_mask =
1825				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1826			break;
1827		case LOONGSON_PMU_TYPE2:
1828			base_id = config & 0x3ff;
1829			raw_event.cntr_mask = CNTR_ALL;
1830
1831			if ((base_id >= 1 && base_id < 28) ||
1832				(base_id >= 64 && base_id < 90) ||
1833				(base_id >= 128 && base_id < 164) ||
1834				(base_id >= 192 && base_id < 200) ||
1835				(base_id >= 256 && base_id < 275) ||
1836				(base_id >= 320 && base_id < 361) ||
1837				(base_id >= 384 && base_id < 574))
1838				break;
1839
1840			return ERR_PTR(-EOPNOTSUPP);
1841		case LOONGSON_PMU_TYPE3:
1842			base_id = raw_id;
1843			raw_event.cntr_mask = CNTR_ALL;
1844			break;
1845		}
1846		break;
1847	}
1848
1849	raw_event.event_id = base_id;
1850
1851	return &raw_event;
1852}
1853
1854static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1855{
1856	unsigned int base_id = config & 0x7f;
1857	unsigned int event_max;
1858
1859
1860	raw_event.cntr_mask = CNTR_ALL;
1861	raw_event.event_id = base_id;
1862
1863	if (current_cpu_type() == CPU_CAVIUM_OCTEON3)
1864		event_max = 0x5f;
1865	else if (current_cpu_type() == CPU_CAVIUM_OCTEON2)
1866		event_max = 0x42;
1867	else
1868		event_max = 0x3a;
1869
1870	if (base_id > event_max) {
1871		return ERR_PTR(-EOPNOTSUPP);
1872	}
1873
1874	switch (base_id) {
1875	case 0x00:
1876	case 0x0f:
1877	case 0x1e:
1878	case 0x1f:
1879	case 0x2f:
1880	case 0x34:
1881	case 0x3e ... 0x3f:
1882		return ERR_PTR(-EOPNOTSUPP);
1883	default:
1884		break;
1885	}
1886
1887	return &raw_event;
1888}
1889
1890static int __init
1891init_hw_perf_events(void)
1892{
1893	int counters, irq, pmu_type;
 
1894
1895	pr_info("Performance counters: ");
1896
1897	counters = n_counters();
1898	if (counters == 0) {
1899		pr_cont("No available PMU.\n");
1900		return -ENODEV;
1901	}
1902
1903#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
 
1904	if (!cpu_has_mipsmt_pertccounters)
1905		counters = counters_total_to_per_cpu(counters);
1906#endif
1907
1908	if (get_c0_perfcount_int)
1909		irq = get_c0_perfcount_int();
1910	else if (cp0_perfcount_irq >= 0)
1911		irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1912	else
1913		irq = -1;
 
 
 
 
 
 
 
 
 
 
1914
1915	mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1916
1917	switch (current_cpu_type()) {
1918	case CPU_24K:
1919		mipspmu.name = "mips/24K";
1920		mipspmu.general_event_map = &mipsxxcore_event_map;
1921		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1922		break;
1923	case CPU_34K:
1924		mipspmu.name = "mips/34K";
1925		mipspmu.general_event_map = &mipsxxcore_event_map;
1926		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1927		break;
1928	case CPU_74K:
1929		mipspmu.name = "mips/74K";
1930		mipspmu.general_event_map = &mipsxxcore_event_map2;
1931		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1932		break;
1933	case CPU_PROAPTIV:
1934		mipspmu.name = "mips/proAptiv";
1935		mipspmu.general_event_map = &mipsxxcore_event_map2;
1936		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1937		break;
1938	case CPU_P5600:
1939		mipspmu.name = "mips/P5600";
1940		mipspmu.general_event_map = &mipsxxcore_event_map2;
1941		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1942		break;
1943	case CPU_P6600:
1944		mipspmu.name = "mips/P6600";
1945		mipspmu.general_event_map = &mipsxxcore_event_map2;
1946		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1947		break;
1948	case CPU_I6400:
1949		mipspmu.name = "mips/I6400";
1950		mipspmu.general_event_map = &i6x00_event_map;
1951		mipspmu.cache_event_map = &i6x00_cache_map;
1952		break;
1953	case CPU_I6500:
1954		mipspmu.name = "mips/I6500";
1955		mipspmu.general_event_map = &i6x00_event_map;
1956		mipspmu.cache_event_map = &i6x00_cache_map;
1957		break;
1958	case CPU_1004K:
1959		mipspmu.name = "mips/1004K";
1960		mipspmu.general_event_map = &mipsxxcore_event_map;
1961		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1962		break;
1963	case CPU_1074K:
1964		mipspmu.name = "mips/1074K";
1965		mipspmu.general_event_map = &mipsxxcore_event_map;
1966		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1967		break;
1968	case CPU_INTERAPTIV:
1969		mipspmu.name = "mips/interAptiv";
1970		mipspmu.general_event_map = &mipsxxcore_event_map;
1971		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1972		break;
1973	case CPU_LOONGSON32:
1974		mipspmu.name = "mips/loongson1";
1975		mipspmu.general_event_map = &mipsxxcore_event_map;
1976		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1977		break;
1978	case CPU_LOONGSON64:
1979		mipspmu.name = "mips/loongson3";
1980		pmu_type = get_loongson3_pmu_type();
1981
1982		switch (pmu_type) {
1983		case LOONGSON_PMU_TYPE1:
1984			counters = 2;
1985			mipspmu.general_event_map = &loongson3_event_map1;
1986			mipspmu.cache_event_map = &loongson3_cache_map1;
1987			break;
1988		case LOONGSON_PMU_TYPE2:
1989			counters = 4;
1990			mipspmu.general_event_map = &loongson3_event_map2;
1991			mipspmu.cache_event_map = &loongson3_cache_map2;
1992			break;
1993		case LOONGSON_PMU_TYPE3:
1994			counters = 4;
1995			mipspmu.general_event_map = &loongson3_event_map3;
1996			mipspmu.cache_event_map = &loongson3_cache_map3;
1997			break;
1998		}
1999		break;
2000	case CPU_CAVIUM_OCTEON:
2001	case CPU_CAVIUM_OCTEON_PLUS:
2002	case CPU_CAVIUM_OCTEON2:
2003	case CPU_CAVIUM_OCTEON3:
2004		mipspmu.name = "octeon";
2005		mipspmu.general_event_map = &octeon_event_map;
2006		mipspmu.cache_event_map = &octeon_cache_map;
2007		mipspmu.map_raw_event = octeon_pmu_map_raw_event;
2008		break;
2009	case CPU_BMIPS5000:
2010		mipspmu.name = "BMIPS5000";
2011		mipspmu.general_event_map = &bmips5000_event_map;
2012		mipspmu.cache_event_map = &bmips5000_cache_map;
2013		break;
2014	default:
2015		pr_cont("Either hardware does not support performance "
2016			"counters, or not yet implemented.\n");
2017		return -ENODEV;
2018	}
2019
2020	mipspmu.num_counters = counters;
2021	mipspmu.irq = irq;
2022
2023	if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
2024		if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
2025			counter_bits = 48;
2026			mipspmu.max_period = (1ULL << 47) - 1;
2027			mipspmu.valid_count = (1ULL << 47) - 1;
2028			mipspmu.overflow = 1ULL << 47;
2029		} else {
2030			counter_bits = 64;
2031			mipspmu.max_period = (1ULL << 63) - 1;
2032			mipspmu.valid_count = (1ULL << 63) - 1;
2033			mipspmu.overflow = 1ULL << 63;
2034		}
2035		mipspmu.read_counter = mipsxx_pmu_read_counter_64;
2036		mipspmu.write_counter = mipsxx_pmu_write_counter_64;
 
2037	} else {
2038		counter_bits = 32;
2039		mipspmu.max_period = (1ULL << 31) - 1;
2040		mipspmu.valid_count = (1ULL << 31) - 1;
2041		mipspmu.overflow = 1ULL << 31;
2042		mipspmu.read_counter = mipsxx_pmu_read_counter;
2043		mipspmu.write_counter = mipsxx_pmu_write_counter;
 
2044	}
2045
2046	on_each_cpu(reset_counters, (void *)(long)counters, 1);
2047
2048	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
2049		"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
2050		irq < 0 ? " (share with timer interrupt)" : "");
2051
2052	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
2053
2054	return 0;
2055}
2056early_initcall(init_hw_perf_events);
v3.5.6
 
   1/*
   2 * Linux performance counter support for MIPS.
   3 *
   4 * Copyright (C) 2010 MIPS Technologies, Inc.
   5 * Copyright (C) 2011 Cavium Networks, Inc.
   6 * Author: Deng-Cheng Zhu
   7 *
   8 * This code is based on the implementation for ARM, which is in turn
   9 * based on the sparc64 perf event code and the x86 code. Performance
  10 * counter access is based on the MIPS Oprofile code. And the callchain
  11 * support references the code of MIPS stacktrace.c.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License version 2 as
  15 * published by the Free Software Foundation.
  16 */
  17
  18#include <linux/cpumask.h>
  19#include <linux/interrupt.h>
  20#include <linux/smp.h>
  21#include <linux/kernel.h>
  22#include <linux/perf_event.h>
  23#include <linux/uaccess.h>
  24
  25#include <asm/irq.h>
  26#include <asm/irq_regs.h>
  27#include <asm/stacktrace.h>
  28#include <asm/time.h> /* For perf_irq */
  29
  30#define MIPS_MAX_HWEVENTS 4
 
 
  31
  32struct cpu_hw_events {
  33	/* Array of events on this cpu. */
  34	struct perf_event	*events[MIPS_MAX_HWEVENTS];
  35
  36	/*
  37	 * Set the bit (indexed by the counter number) when the counter
  38	 * is used for an event.
  39	 */
  40	unsigned long		used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
  41
  42	/*
  43	 * Software copy of the control register for each performance counter.
  44	 * MIPS CPUs vary in performance counters. They use this differently,
  45	 * and even may not use it.
  46	 */
  47	unsigned int		saved_ctrl[MIPS_MAX_HWEVENTS];
  48};
  49DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
  50	.saved_ctrl = {0},
  51};
  52
  53/* The description of MIPS performance events. */
  54struct mips_perf_event {
  55	unsigned int event_id;
  56	/*
  57	 * MIPS performance counters are indexed starting from 0.
  58	 * CNTR_EVEN indicates the indexes of the counters to be used are
  59	 * even numbers.
  60	 */
  61	unsigned int cntr_mask;
  62	#define CNTR_EVEN	0x55555555
  63	#define CNTR_ODD	0xaaaaaaaa
  64	#define CNTR_ALL	0xffffffff
  65#ifdef CONFIG_MIPS_MT_SMP
  66	enum {
  67		T  = 0,
  68		V  = 1,
  69		P  = 2,
  70	} range;
  71#else
  72	#define T
  73	#define V
  74	#define P
  75#endif
  76};
  77
  78static struct mips_perf_event raw_event;
  79static DEFINE_MUTEX(raw_event_mutex);
  80
  81#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
  82#define C(x) PERF_COUNT_HW_CACHE_##x
  83
  84struct mips_pmu {
  85	u64		max_period;
  86	u64		valid_count;
  87	u64		overflow;
  88	const char	*name;
  89	int		irq;
  90	u64		(*read_counter)(unsigned int idx);
  91	void		(*write_counter)(unsigned int idx, u64 val);
  92	const struct mips_perf_event *(*map_raw_event)(u64 config);
  93	const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
  94	const struct mips_perf_event (*cache_event_map)
  95				[PERF_COUNT_HW_CACHE_MAX]
  96				[PERF_COUNT_HW_CACHE_OP_MAX]
  97				[PERF_COUNT_HW_CACHE_RESULT_MAX];
  98	unsigned int	num_counters;
  99};
 100
 
 101static struct mips_pmu mipspmu;
 102
 103#define M_CONFIG1_PC	(1 << 4)
 104
 105#define M_PERFCTL_EXL			(1      <<  0)
 106#define M_PERFCTL_KERNEL		(1      <<  1)
 107#define M_PERFCTL_SUPERVISOR		(1      <<  2)
 108#define M_PERFCTL_USER			(1      <<  3)
 109#define M_PERFCTL_INTERRUPT_ENABLE	(1      <<  4)
 110#define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5)
 111#define M_PERFCTL_VPEID(vpe)		((vpe)    << 16)
 112#define M_PERFCTL_MT_EN(filter)		((filter) << 20)
 113#define    M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
 114#define    M_TC_EN_VPE			M_PERFCTL_MT_EN(1)
 115#define    M_TC_EN_TC			M_PERFCTL_MT_EN(2)
 116#define M_PERFCTL_TCID(tcid)		((tcid)   << 22)
 117#define M_PERFCTL_WIDE			(1      << 30)
 118#define M_PERFCTL_MORE			(1      << 31)
 119
 120#define M_PERFCTL_COUNT_EVENT_WHENEVER	(M_PERFCTL_EXL |		\
 121					M_PERFCTL_KERNEL |		\
 122					M_PERFCTL_USER |		\
 123					M_PERFCTL_SUPERVISOR |		\
 124					M_PERFCTL_INTERRUPT_ENABLE)
 125
 126#ifdef CONFIG_MIPS_MT_SMP
 127#define M_PERFCTL_CONFIG_MASK		0x3fff801f
 128#else
 129#define M_PERFCTL_CONFIG_MASK		0x1f
 130#endif
 131#define M_PERFCTL_EVENT_MASK		0xfe0
 132
 133
 134#ifdef CONFIG_MIPS_MT_SMP
 135static int cpu_has_mipsmt_pertccounters;
 136
 
 137static DEFINE_RWLOCK(pmuint_rwlock);
 138
 139/*
 140 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
 141 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
 142 */
 143#if defined(CONFIG_HW_PERF_EVENTS)
 144#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 145			0 : smp_processor_id())
 146#else
 147#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 148			0 : cpu_data[smp_processor_id()].vpe_id)
 149#endif
 150
 151/* Copied from op_model_mipsxx.c */
 152static unsigned int vpe_shift(void)
 153{
 154	if (num_possible_cpus() > 1)
 155		return 1;
 156
 157	return 0;
 158}
 159
 160static unsigned int counters_total_to_per_cpu(unsigned int counters)
 161{
 162	return counters >> vpe_shift();
 163}
 164
 165#else /* !CONFIG_MIPS_MT_SMP */
 166#define vpe_id()	0
 167
 168#endif /* CONFIG_MIPS_MT_SMP */
 169
 170static void resume_local_counters(void);
 171static void pause_local_counters(void);
 172static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
 173static int mipsxx_pmu_handle_shared_irq(void);
 174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 175static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
 176{
 177	if (vpe_id() == 1)
 178		idx = (idx + 2) & 3;
 179	return idx;
 180}
 181
 182static u64 mipsxx_pmu_read_counter(unsigned int idx)
 183{
 184	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 185
 186	switch (idx) {
 187	case 0:
 188		/*
 189		 * The counters are unsigned, we must cast to truncate
 190		 * off the high bits.
 191		 */
 192		return (u32)read_c0_perfcntr0();
 193	case 1:
 194		return (u32)read_c0_perfcntr1();
 195	case 2:
 196		return (u32)read_c0_perfcntr2();
 197	case 3:
 198		return (u32)read_c0_perfcntr3();
 199	default:
 200		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 201		return 0;
 202	}
 203}
 204
 205static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
 206{
 
 207	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 208
 209	switch (idx) {
 210	case 0:
 211		return read_c0_perfcntr0_64();
 212	case 1:
 213		return read_c0_perfcntr1_64();
 214	case 2:
 215		return read_c0_perfcntr2_64();
 216	case 3:
 217		return read_c0_perfcntr3_64();
 218	default:
 219		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 220		return 0;
 221	}
 222}
 223
 224static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
 225{
 226	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 227
 228	switch (idx) {
 229	case 0:
 230		write_c0_perfcntr0(val);
 231		return;
 232	case 1:
 233		write_c0_perfcntr1(val);
 234		return;
 235	case 2:
 236		write_c0_perfcntr2(val);
 237		return;
 238	case 3:
 239		write_c0_perfcntr3(val);
 240		return;
 241	}
 242}
 243
 244static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
 245{
 
 246	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 247
 248	switch (idx) {
 249	case 0:
 250		write_c0_perfcntr0_64(val);
 251		return;
 252	case 1:
 253		write_c0_perfcntr1_64(val);
 254		return;
 255	case 2:
 256		write_c0_perfcntr2_64(val);
 257		return;
 258	case 3:
 259		write_c0_perfcntr3_64(val);
 260		return;
 261	}
 262}
 263
 264static unsigned int mipsxx_pmu_read_control(unsigned int idx)
 265{
 266	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 267
 268	switch (idx) {
 269	case 0:
 270		return read_c0_perfctrl0();
 271	case 1:
 272		return read_c0_perfctrl1();
 273	case 2:
 274		return read_c0_perfctrl2();
 275	case 3:
 276		return read_c0_perfctrl3();
 277	default:
 278		WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
 279		return 0;
 280	}
 281}
 282
 283static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
 284{
 285	idx = mipsxx_pmu_swizzle_perf_idx(idx);
 286
 287	switch (idx) {
 288	case 0:
 289		write_c0_perfctrl0(val);
 290		return;
 291	case 1:
 292		write_c0_perfctrl1(val);
 293		return;
 294	case 2:
 295		write_c0_perfctrl2(val);
 296		return;
 297	case 3:
 298		write_c0_perfctrl3(val);
 299		return;
 300	}
 301}
 302
 303static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
 304				    struct hw_perf_event *hwc)
 305{
 306	int i;
 
 307
 308	/*
 309	 * We only need to care the counter mask. The range has been
 310	 * checked definitely.
 311	 */
 312	unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
 
 
 
 313
 314	for (i = mipspmu.num_counters - 1; i >= 0; i--) {
 315		/*
 316		 * Note that some MIPS perf events can be counted by both
 317		 * even and odd counters, wheresas many other are only by
 318		 * even _or_ odd counters. This introduces an issue that
 319		 * when the former kind of event takes the counter the
 320		 * latter kind of event wants to use, then the "counter
 321		 * allocation" for the latter event will fail. In fact if
 322		 * they can be dynamically swapped, they both feel happy.
 323		 * But here we leave this issue alone for now.
 324		 */
 325		if (test_bit(i, &cntr_mask) &&
 326			!test_and_set_bit(i, cpuc->used_mask))
 327			return i;
 328	}
 329
 330	return -EAGAIN;
 331}
 332
 333static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
 334{
 335	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
 
 336
 337	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 338
 339	cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
 340		(evt->config_base & M_PERFCTL_CONFIG_MASK) |
 341		/* Make sure interrupt enabled. */
 342		M_PERFCTL_INTERRUPT_ENABLE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 343	/*
 344	 * We do not actually let the counter run. Leave it until start().
 345	 */
 346}
 347
 348static void mipsxx_pmu_disable_event(int idx)
 349{
 350	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 351	unsigned long flags;
 352
 353	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 354
 355	local_irq_save(flags);
 356	cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
 357		~M_PERFCTL_COUNT_EVENT_WHENEVER;
 358	mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
 359	local_irq_restore(flags);
 360}
 361
 362static int mipspmu_event_set_period(struct perf_event *event,
 363				    struct hw_perf_event *hwc,
 364				    int idx)
 365{
 366	u64 left = local64_read(&hwc->period_left);
 367	u64 period = hwc->sample_period;
 368	int ret = 0;
 369
 370	if (unlikely((left + period) & (1ULL << 63))) {
 371		/* left underflowed by more than period. */
 372		left = period;
 373		local64_set(&hwc->period_left, left);
 374		hwc->last_period = period;
 375		ret = 1;
 376	} else	if (unlikely((left + period) <= period)) {
 377		/* left underflowed by less than period. */
 378		left += period;
 379		local64_set(&hwc->period_left, left);
 380		hwc->last_period = period;
 381		ret = 1;
 382	}
 383
 384	if (left > mipspmu.max_period) {
 385		left = mipspmu.max_period;
 386		local64_set(&hwc->period_left, left);
 387	}
 388
 389	local64_set(&hwc->prev_count, mipspmu.overflow - left);
 390
 
 
 
 
 391	mipspmu.write_counter(idx, mipspmu.overflow - left);
 392
 393	perf_event_update_userpage(event);
 394
 395	return ret;
 396}
 397
 398static void mipspmu_event_update(struct perf_event *event,
 399				 struct hw_perf_event *hwc,
 400				 int idx)
 401{
 402	u64 prev_raw_count, new_raw_count;
 403	u64 delta;
 404
 405again:
 406	prev_raw_count = local64_read(&hwc->prev_count);
 407	new_raw_count = mipspmu.read_counter(idx);
 408
 409	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
 410				new_raw_count) != prev_raw_count)
 411		goto again;
 412
 413	delta = new_raw_count - prev_raw_count;
 414
 415	local64_add(delta, &event->count);
 416	local64_sub(delta, &hwc->period_left);
 417}
 418
 419static void mipspmu_start(struct perf_event *event, int flags)
 420{
 421	struct hw_perf_event *hwc = &event->hw;
 422
 423	if (flags & PERF_EF_RELOAD)
 424		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
 425
 426	hwc->state = 0;
 427
 428	/* Set the period for the event. */
 429	mipspmu_event_set_period(event, hwc, hwc->idx);
 430
 431	/* Enable the event. */
 432	mipsxx_pmu_enable_event(hwc, hwc->idx);
 433}
 434
 435static void mipspmu_stop(struct perf_event *event, int flags)
 436{
 437	struct hw_perf_event *hwc = &event->hw;
 438
 439	if (!(hwc->state & PERF_HES_STOPPED)) {
 440		/* We are working on a local event. */
 441		mipsxx_pmu_disable_event(hwc->idx);
 442		barrier();
 443		mipspmu_event_update(event, hwc, hwc->idx);
 444		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 445	}
 446}
 447
 448static int mipspmu_add(struct perf_event *event, int flags)
 449{
 450	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 451	struct hw_perf_event *hwc = &event->hw;
 452	int idx;
 453	int err = 0;
 454
 455	perf_pmu_disable(event->pmu);
 456
 457	/* To look for a free counter for this event. */
 458	idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
 459	if (idx < 0) {
 460		err = idx;
 461		goto out;
 462	}
 463
 464	/*
 465	 * If there is an event in the counter we are going to use then
 466	 * make sure it is disabled.
 467	 */
 468	event->hw.idx = idx;
 469	mipsxx_pmu_disable_event(idx);
 470	cpuc->events[idx] = event;
 471
 472	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 473	if (flags & PERF_EF_START)
 474		mipspmu_start(event, PERF_EF_RELOAD);
 475
 476	/* Propagate our changes to the userspace mapping. */
 477	perf_event_update_userpage(event);
 478
 479out:
 480	perf_pmu_enable(event->pmu);
 481	return err;
 482}
 483
 484static void mipspmu_del(struct perf_event *event, int flags)
 485{
 486	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 487	struct hw_perf_event *hwc = &event->hw;
 488	int idx = hwc->idx;
 489
 490	WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
 491
 492	mipspmu_stop(event, PERF_EF_UPDATE);
 493	cpuc->events[idx] = NULL;
 494	clear_bit(idx, cpuc->used_mask);
 495
 496	perf_event_update_userpage(event);
 497}
 498
 499static void mipspmu_read(struct perf_event *event)
 500{
 501	struct hw_perf_event *hwc = &event->hw;
 502
 503	/* Don't read disabled counters! */
 504	if (hwc->idx < 0)
 505		return;
 506
 507	mipspmu_event_update(event, hwc, hwc->idx);
 508}
 509
 510static void mipspmu_enable(struct pmu *pmu)
 511{
 512#ifdef CONFIG_MIPS_MT_SMP
 513	write_unlock(&pmuint_rwlock);
 514#endif
 515	resume_local_counters();
 516}
 517
 518/*
 519 * MIPS performance counters can be per-TC. The control registers can
 520 * not be directly accessed accross CPUs. Hence if we want to do global
 521 * control, we need cross CPU calls. on_each_cpu() can help us, but we
 522 * can not make sure this function is called with interrupts enabled. So
 523 * here we pause local counters and then grab a rwlock and leave the
 524 * counters on other CPUs alone. If any counter interrupt raises while
 525 * we own the write lock, simply pause local counters on that CPU and
 526 * spin in the handler. Also we know we won't be switched to another
 527 * CPU after pausing local counters and before grabbing the lock.
 528 */
 529static void mipspmu_disable(struct pmu *pmu)
 530{
 531	pause_local_counters();
 532#ifdef CONFIG_MIPS_MT_SMP
 533	write_lock(&pmuint_rwlock);
 534#endif
 535}
 536
 537static atomic_t active_events = ATOMIC_INIT(0);
 538static DEFINE_MUTEX(pmu_reserve_mutex);
 539static int (*save_perf_irq)(void);
 540
 541static int mipspmu_get_irq(void)
 542{
 543	int err;
 544
 545	if (mipspmu.irq >= 0) {
 546		/* Request my own irq handler. */
 547		err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
 548			IRQF_PERCPU | IRQF_NOBALANCING,
 549			"mips_perf_pmu", NULL);
 
 
 550		if (err) {
 551			pr_warning("Unable to request IRQ%d for MIPS "
 552			   "performance counters!\n", mipspmu.irq);
 553		}
 554	} else if (cp0_perfcount_irq < 0) {
 555		/*
 556		 * We are sharing the irq number with the timer interrupt.
 557		 */
 558		save_perf_irq = perf_irq;
 559		perf_irq = mipsxx_pmu_handle_shared_irq;
 560		err = 0;
 561	} else {
 562		pr_warning("The platform hasn't properly defined its "
 563			"interrupt controller.\n");
 564		err = -ENOENT;
 565	}
 566
 567	return err;
 568}
 569
 570static void mipspmu_free_irq(void)
 571{
 572	if (mipspmu.irq >= 0)
 573		free_irq(mipspmu.irq, NULL);
 574	else if (cp0_perfcount_irq < 0)
 575		perf_irq = save_perf_irq;
 576}
 577
 578/*
 579 * mipsxx/rm9000/loongson2 have different performance counters, they have
 580 * specific low-level init routines.
 581 */
 582static void reset_counters(void *arg);
 583static int __hw_perf_event_init(struct perf_event *event);
 584
 585static void hw_perf_event_destroy(struct perf_event *event)
 586{
 587	if (atomic_dec_and_mutex_lock(&active_events,
 588				&pmu_reserve_mutex)) {
 589		/*
 590		 * We must not call the destroy function with interrupts
 591		 * disabled.
 592		 */
 593		on_each_cpu(reset_counters,
 594			(void *)(long)mipspmu.num_counters, 1);
 595		mipspmu_free_irq();
 596		mutex_unlock(&pmu_reserve_mutex);
 597	}
 598}
 599
 600static int mipspmu_event_init(struct perf_event *event)
 601{
 602	int err = 0;
 603
 604	/* does not support taken branch sampling */
 605	if (has_branch_stack(event))
 606		return -EOPNOTSUPP;
 607
 608	switch (event->attr.type) {
 609	case PERF_TYPE_RAW:
 610	case PERF_TYPE_HARDWARE:
 611	case PERF_TYPE_HW_CACHE:
 612		break;
 613
 614	default:
 615		return -ENOENT;
 616	}
 617
 618	if (event->cpu >= nr_cpumask_bits ||
 619	    (event->cpu >= 0 && !cpu_online(event->cpu)))
 620		return -ENODEV;
 621
 622	if (!atomic_inc_not_zero(&active_events)) {
 623		mutex_lock(&pmu_reserve_mutex);
 624		if (atomic_read(&active_events) == 0)
 625			err = mipspmu_get_irq();
 626
 627		if (!err)
 628			atomic_inc(&active_events);
 629		mutex_unlock(&pmu_reserve_mutex);
 630	}
 631
 632	if (err)
 633		return err;
 634
 635	return __hw_perf_event_init(event);
 636}
 637
 638static struct pmu pmu = {
 639	.pmu_enable	= mipspmu_enable,
 640	.pmu_disable	= mipspmu_disable,
 641	.event_init	= mipspmu_event_init,
 642	.add		= mipspmu_add,
 643	.del		= mipspmu_del,
 644	.start		= mipspmu_start,
 645	.stop		= mipspmu_stop,
 646	.read		= mipspmu_read,
 647};
 648
 649static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
 650{
 651/*
 652 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
 653 * event_id.
 654 */
 655#ifdef CONFIG_MIPS_MT_SMP
 656	return ((unsigned int)pev->range << 24) |
 657		(pev->cntr_mask & 0xffff00) |
 658		(pev->event_id & 0xff);
 659#else
 660	return (pev->cntr_mask & 0xffff00) |
 661		(pev->event_id & 0xff);
 662#endif
 
 
 
 
 
 
 
 663}
 664
 665static const struct mips_perf_event *mipspmu_map_general_event(int idx)
 666{
 667	const struct mips_perf_event *pev;
 668
 669	pev = ((*mipspmu.general_event_map)[idx].event_id ==
 670		UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
 671		&(*mipspmu.general_event_map)[idx]);
 672
 673	return pev;
 674}
 675
 676static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
 677{
 678	unsigned int cache_type, cache_op, cache_result;
 679	const struct mips_perf_event *pev;
 680
 681	cache_type = (config >> 0) & 0xff;
 682	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
 683		return ERR_PTR(-EINVAL);
 684
 685	cache_op = (config >> 8) & 0xff;
 686	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
 687		return ERR_PTR(-EINVAL);
 688
 689	cache_result = (config >> 16) & 0xff;
 690	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
 691		return ERR_PTR(-EINVAL);
 692
 693	pev = &((*mipspmu.cache_event_map)
 694					[cache_type]
 695					[cache_op]
 696					[cache_result]);
 697
 698	if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
 699		return ERR_PTR(-EOPNOTSUPP);
 700
 701	return pev;
 702
 703}
 704
 705static int validate_group(struct perf_event *event)
 706{
 707	struct perf_event *sibling, *leader = event->group_leader;
 708	struct cpu_hw_events fake_cpuc;
 709
 710	memset(&fake_cpuc, 0, sizeof(fake_cpuc));
 711
 712	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
 713		return -EINVAL;
 714
 715	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
 716		if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
 717			return -EINVAL;
 718	}
 719
 720	if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
 721		return -EINVAL;
 722
 723	return 0;
 724}
 725
 726/* This is needed by specific irq handlers in perf_event_*.c */
 727static void handle_associated_event(struct cpu_hw_events *cpuc,
 728				    int idx, struct perf_sample_data *data,
 729				    struct pt_regs *regs)
 730{
 731	struct perf_event *event = cpuc->events[idx];
 732	struct hw_perf_event *hwc = &event->hw;
 733
 734	mipspmu_event_update(event, hwc, idx);
 735	data->period = event->hw.last_period;
 736	if (!mipspmu_event_set_period(event, hwc, idx))
 737		return;
 738
 739	if (perf_event_overflow(event, data, regs))
 740		mipsxx_pmu_disable_event(idx);
 741}
 742
 743
 744static int __n_counters(void)
 745{
 746	if (!(read_c0_config1() & M_CONFIG1_PC))
 747		return 0;
 748	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
 749		return 1;
 750	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
 751		return 2;
 752	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
 753		return 3;
 754
 755	return 4;
 756}
 757
 758static int n_counters(void)
 759{
 760	int counters;
 761
 762	switch (current_cpu_type()) {
 763	case CPU_R10000:
 764		counters = 2;
 765		break;
 766
 767	case CPU_R12000:
 768	case CPU_R14000:
 
 769		counters = 4;
 770		break;
 771
 772	default:
 773		counters = __n_counters();
 774	}
 775
 776	return counters;
 777}
 778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 779static void reset_counters(void *arg)
 780{
 781	int counters = (int)(long)arg;
 
 
 
 
 
 
 782	switch (counters) {
 783	case 4:
 784		mipsxx_pmu_write_control(3, 0);
 785		mipspmu.write_counter(3, 0);
 
 786	case 3:
 787		mipsxx_pmu_write_control(2, 0);
 788		mipspmu.write_counter(2, 0);
 
 789	case 2:
 790		mipsxx_pmu_write_control(1, 0);
 791		mipspmu.write_counter(1, 0);
 
 792	case 1:
 793		mipsxx_pmu_write_control(0, 0);
 794		mipspmu.write_counter(0, 0);
 
 795	}
 796}
 797
 798/* 24K/34K/1004K cores can share the same event map. */
 799static const struct mips_perf_event mipsxxcore_event_map
 800				[PERF_COUNT_HW_MAX] = {
 801	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 802	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 803	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
 804	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
 805	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
 806	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
 807	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
 808};
 809
 810/* 74K core has different branch event code. */
 811static const struct mips_perf_event mipsxx74Kcore_event_map
 812				[PERF_COUNT_HW_MAX] = {
 813	[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
 814	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
 815	[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
 816	[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
 817	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
 818	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
 819	[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 820};
 821
 822static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
 823	[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
 824	[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
 825	[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
 826	[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL  },
 827	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
 828	[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
 829	[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
 830};
 831
 832/* 24K/34K/1004K cores can share the same cache event map. */
 
 
 
 
 
 
 
 833static const struct mips_perf_event mipsxxcore_cache_map
 834				[PERF_COUNT_HW_CACHE_MAX]
 835				[PERF_COUNT_HW_CACHE_OP_MAX]
 836				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 837[C(L1D)] = {
 838	/*
 839	 * Like some other architectures (e.g. ARM), the performance
 840	 * counters don't differentiate between read and write
 841	 * accesses/misses, so this isn't strictly correct, but it's the
 842	 * best we can do. Writes and reads get combined.
 843	 */
 844	[C(OP_READ)] = {
 845		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
 846		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
 847	},
 848	[C(OP_WRITE)] = {
 849		[C(RESULT_ACCESS)]	= { 0x0a, CNTR_EVEN, T },
 850		[C(RESULT_MISS)]	= { 0x0b, CNTR_EVEN | CNTR_ODD, T },
 851	},
 852	[C(OP_PREFETCH)] = {
 853		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 854		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 855	},
 856},
 857[C(L1I)] = {
 858	[C(OP_READ)] = {
 859		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
 860		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
 861	},
 862	[C(OP_WRITE)] = {
 863		[C(RESULT_ACCESS)]	= { 0x09, CNTR_EVEN, T },
 864		[C(RESULT_MISS)]	= { 0x09, CNTR_ODD, T },
 865	},
 866	[C(OP_PREFETCH)] = {
 867		[C(RESULT_ACCESS)]	= { 0x14, CNTR_EVEN, T },
 868		/*
 869		 * Note that MIPS has only "hit" events countable for
 870		 * the prefetch operation.
 871		 */
 872		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 873	},
 874},
 875[C(LL)] = {
 876	[C(OP_READ)] = {
 877		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
 878		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
 879	},
 880	[C(OP_WRITE)] = {
 881		[C(RESULT_ACCESS)]	= { 0x15, CNTR_ODD, P },
 882		[C(RESULT_MISS)]	= { 0x16, CNTR_EVEN, P },
 883	},
 884	[C(OP_PREFETCH)] = {
 885		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 886		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 887	},
 888},
 889[C(DTLB)] = {
 890	[C(OP_READ)] = {
 891		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 892		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 893	},
 894	[C(OP_WRITE)] = {
 895		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 896		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 897	},
 898	[C(OP_PREFETCH)] = {
 899		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 900		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 901	},
 902},
 903[C(ITLB)] = {
 904	[C(OP_READ)] = {
 905		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
 906		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
 907	},
 908	[C(OP_WRITE)] = {
 909		[C(RESULT_ACCESS)]	= { 0x05, CNTR_EVEN, T },
 910		[C(RESULT_MISS)]	= { 0x05, CNTR_ODD, T },
 911	},
 912	[C(OP_PREFETCH)] = {
 913		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 914		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 915	},
 916},
 917[C(BPU)] = {
 918	/* Using the same code for *HW_BRANCH* */
 919	[C(OP_READ)] = {
 920		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
 921		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
 922	},
 923	[C(OP_WRITE)] = {
 924		[C(RESULT_ACCESS)]	= { 0x02, CNTR_EVEN, T },
 925		[C(RESULT_MISS)]	= { 0x02, CNTR_ODD, T },
 926	},
 927	[C(OP_PREFETCH)] = {
 928		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 929		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 930	},
 931},
 932[C(NODE)] = {
 933	[C(OP_READ)] = {
 934		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 935		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 936	},
 937	[C(OP_WRITE)] = {
 938		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 939		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 940	},
 941	[C(OP_PREFETCH)] = {
 942		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 943		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 944	},
 945},
 946};
 947
 948/* 74K core has completely different cache event map. */
 949static const struct mips_perf_event mipsxx74Kcore_cache_map
 950				[PERF_COUNT_HW_CACHE_MAX]
 951				[PERF_COUNT_HW_CACHE_OP_MAX]
 952				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
 953[C(L1D)] = {
 954	/*
 955	 * Like some other architectures (e.g. ARM), the performance
 956	 * counters don't differentiate between read and write
 957	 * accesses/misses, so this isn't strictly correct, but it's the
 958	 * best we can do. Writes and reads get combined.
 959	 */
 960	[C(OP_READ)] = {
 961		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
 962		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
 963	},
 964	[C(OP_WRITE)] = {
 965		[C(RESULT_ACCESS)]	= { 0x17, CNTR_ODD, T },
 966		[C(RESULT_MISS)]	= { 0x18, CNTR_ODD, T },
 967	},
 968	[C(OP_PREFETCH)] = {
 969		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 970		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 971	},
 972},
 973[C(L1I)] = {
 974	[C(OP_READ)] = {
 975		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 976		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 977	},
 978	[C(OP_WRITE)] = {
 979		[C(RESULT_ACCESS)]	= { 0x06, CNTR_EVEN, T },
 980		[C(RESULT_MISS)]	= { 0x06, CNTR_ODD, T },
 981	},
 982	[C(OP_PREFETCH)] = {
 983		[C(RESULT_ACCESS)]	= { 0x34, CNTR_EVEN, T },
 984		/*
 985		 * Note that MIPS has only "hit" events countable for
 986		 * the prefetch operation.
 987		 */
 988		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 989	},
 990},
 991[C(LL)] = {
 992	[C(OP_READ)] = {
 993		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
 994		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
 995	},
 996	[C(OP_WRITE)] = {
 997		[C(RESULT_ACCESS)]	= { 0x1c, CNTR_ODD, P },
 998		[C(RESULT_MISS)]	= { 0x1d, CNTR_EVEN | CNTR_ODD, P },
 999	},
1000	[C(OP_PREFETCH)] = {
1001		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1002		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1003	},
1004},
1005[C(DTLB)] = {
1006	/* 74K core does not have specific DTLB events. */
1007	[C(OP_READ)] = {
1008		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1009		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1010	},
1011	[C(OP_WRITE)] = {
1012		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1013		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1014	},
1015	[C(OP_PREFETCH)] = {
1016		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1017		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1018	},
1019},
 
 
 
 
 
1020[C(ITLB)] = {
1021	[C(OP_READ)] = {
1022		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1023		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1024	},
1025	[C(OP_WRITE)] = {
1026		[C(RESULT_ACCESS)]	= { 0x04, CNTR_EVEN, T },
1027		[C(RESULT_MISS)]	= { 0x04, CNTR_ODD, T },
1028	},
1029	[C(OP_PREFETCH)] = {
1030		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1031		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1032	},
1033},
1034[C(BPU)] = {
1035	/* Using the same code for *HW_BRANCH* */
1036	[C(OP_READ)] = {
1037		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1038		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1039	},
1040	[C(OP_WRITE)] = {
1041		[C(RESULT_ACCESS)]	= { 0x27, CNTR_EVEN, T },
1042		[C(RESULT_MISS)]	= { 0x27, CNTR_ODD, T },
1043	},
1044	[C(OP_PREFETCH)] = {
1045		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1046		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047	},
1048},
1049[C(NODE)] = {
 
1050	[C(OP_READ)] = {
1051		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1052		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1053	},
1054	[C(OP_WRITE)] = {
1055		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1056		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1057	},
1058	[C(OP_PREFETCH)] = {
1059		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1060		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
1061	},
1062},
1063};
1064
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065
1066static const struct mips_perf_event octeon_cache_map
1067				[PERF_COUNT_HW_CACHE_MAX]
1068				[PERF_COUNT_HW_CACHE_OP_MAX]
1069				[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1070[C(L1D)] = {
 
 
 
 
 
 
1071	[C(OP_READ)] = {
1072		[C(RESULT_ACCESS)]	= { 0x2b, CNTR_ALL },
1073		[C(RESULT_MISS)]	= { 0x2e, CNTR_ALL },
1074	},
1075	[C(OP_WRITE)] = {
1076		[C(RESULT_ACCESS)]	= { 0x30, CNTR_ALL },
1077		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
1078	},
1079	[C(OP_PREFETCH)] = {
1080		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1081		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1082	},
1083},
1084[C(L1I)] = {
1085	[C(OP_READ)] = {
1086		[C(RESULT_ACCESS)]	= { 0x18, CNTR_ALL },
1087		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1088	},
1089	[C(OP_WRITE)] = {
1090		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1091		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1092	},
1093	[C(OP_PREFETCH)] = {
1094		[C(RESULT_ACCESS)]	= { 0x19, CNTR_ALL },
1095		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
1096	},
1097},
1098[C(LL)] = {
1099	[C(OP_READ)] = {
1100		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1101		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1102	},
1103	[C(OP_WRITE)] = {
1104		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1105		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106	},
1107	[C(OP_PREFETCH)] = {
1108		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1109		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
1110	},
1111},
1112[C(DTLB)] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113	/*
1114	 * Only general DTLB misses are counted use the same event for
1115	 * read and write.
 
 
1116	 */
1117	[C(OP_READ)] = {
1118		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1119		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
 
 
 
 
 
 
 
 
 
 
1120	},
1121	[C(OP_WRITE)] = {
1122		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1123		[C(RESULT_MISS)]	= { 0x35, CNTR_ALL },
1124	},
1125	[C(OP_PREFETCH)] = {
1126		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1127		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
1128	},
1129},
1130[C(ITLB)] = {
1131	[C(OP_READ)] = {
1132		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1133		[C(RESULT_MISS)]	= { 0x37, CNTR_ALL },
1134	},
1135	[C(OP_WRITE)] = {
1136		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1137		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1138	},
1139	[C(OP_PREFETCH)] = {
1140		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1141		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1142	},
1143},
1144[C(BPU)] = {
1145	/* Using the same code for *HW_BRANCH* */
1146	[C(OP_READ)] = {
1147		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1148		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1149	},
1150	[C(OP_WRITE)] = {
1151		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1152		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
1153	},
1154	[C(OP_PREFETCH)] = {
1155		[C(RESULT_ACCESS)]	= { UNSUPPORTED_PERF_EVENT_ID },
1156		[C(RESULT_MISS)]	= { UNSUPPORTED_PERF_EVENT_ID },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1157	},
1158},
1159};
1160
1161#ifdef CONFIG_MIPS_MT_SMP
1162static void check_and_calc_range(struct perf_event *event,
1163				 const struct mips_perf_event *pev)
1164{
1165	struct hw_perf_event *hwc = &event->hw;
1166
1167	if (event->cpu >= 0) {
1168		if (pev->range > V) {
1169			/*
1170			 * The user selected an event that is processor
1171			 * wide, while expecting it to be VPE wide.
1172			 */
1173			hwc->config_base |= M_TC_EN_ALL;
1174		} else {
1175			/*
1176			 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1177			 * for both CPUs.
1178			 */
1179			hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1180			hwc->config_base |= M_TC_EN_VPE;
1181		}
1182	} else
1183		hwc->config_base |= M_TC_EN_ALL;
1184}
1185#else
1186static void check_and_calc_range(struct perf_event *event,
1187				 const struct mips_perf_event *pev)
1188{
1189}
1190#endif
1191
1192static int __hw_perf_event_init(struct perf_event *event)
1193{
1194	struct perf_event_attr *attr = &event->attr;
1195	struct hw_perf_event *hwc = &event->hw;
1196	const struct mips_perf_event *pev;
1197	int err;
1198
1199	/* Returning MIPS event descriptor for generic perf event. */
1200	if (PERF_TYPE_HARDWARE == event->attr.type) {
1201		if (event->attr.config >= PERF_COUNT_HW_MAX)
1202			return -EINVAL;
1203		pev = mipspmu_map_general_event(event->attr.config);
1204	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1205		pev = mipspmu_map_cache_event(event->attr.config);
1206	} else if (PERF_TYPE_RAW == event->attr.type) {
1207		/* We are working on the global raw event. */
1208		mutex_lock(&raw_event_mutex);
1209		pev = mipspmu.map_raw_event(event->attr.config);
1210	} else {
1211		/* The event type is not (yet) supported. */
1212		return -EOPNOTSUPP;
1213	}
1214
1215	if (IS_ERR(pev)) {
1216		if (PERF_TYPE_RAW == event->attr.type)
1217			mutex_unlock(&raw_event_mutex);
1218		return PTR_ERR(pev);
1219	}
1220
1221	/*
1222	 * We allow max flexibility on how each individual counter shared
1223	 * by the single CPU operates (the mode exclusion and the range).
1224	 */
1225	hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1226
1227	/* Calculate range bits and validate it. */
1228	if (num_possible_cpus() > 1)
1229		check_and_calc_range(event, pev);
1230
1231	hwc->event_base = mipspmu_perf_event_encode(pev);
1232	if (PERF_TYPE_RAW == event->attr.type)
1233		mutex_unlock(&raw_event_mutex);
1234
1235	if (!attr->exclude_user)
1236		hwc->config_base |= M_PERFCTL_USER;
1237	if (!attr->exclude_kernel) {
1238		hwc->config_base |= M_PERFCTL_KERNEL;
1239		/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1240		hwc->config_base |= M_PERFCTL_EXL;
1241	}
1242	if (!attr->exclude_hv)
1243		hwc->config_base |= M_PERFCTL_SUPERVISOR;
1244
1245	hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1246	/*
1247	 * The event can belong to another cpu. We do not assign a local
1248	 * counter for it for now.
1249	 */
1250	hwc->idx = -1;
1251	hwc->config = 0;
1252
1253	if (!hwc->sample_period) {
1254		hwc->sample_period  = mipspmu.max_period;
1255		hwc->last_period    = hwc->sample_period;
1256		local64_set(&hwc->period_left, hwc->sample_period);
1257	}
1258
1259	err = 0;
1260	if (event->group_leader != event)
1261		err = validate_group(event);
1262
1263	event->destroy = hw_perf_event_destroy;
1264
1265	if (err)
1266		event->destroy(event);
1267
1268	return err;
1269}
1270
1271static void pause_local_counters(void)
1272{
1273	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1274	int ctr = mipspmu.num_counters;
1275	unsigned long flags;
1276
1277	local_irq_save(flags);
1278	do {
1279		ctr--;
1280		cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1281		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1282					 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1283	} while (ctr > 0);
1284	local_irq_restore(flags);
1285}
1286
1287static void resume_local_counters(void)
1288{
1289	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1290	int ctr = mipspmu.num_counters;
1291
1292	do {
1293		ctr--;
1294		mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1295	} while (ctr > 0);
1296}
1297
1298static int mipsxx_pmu_handle_shared_irq(void)
1299{
1300	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1301	struct perf_sample_data data;
1302	unsigned int counters = mipspmu.num_counters;
1303	u64 counter;
1304	int handled = IRQ_NONE;
1305	struct pt_regs *regs;
1306
1307	if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
1308		return handled;
1309	/*
1310	 * First we pause the local counters, so that when we are locked
1311	 * here, the counters are all paused. When it gets locked due to
1312	 * perf_disable(), the timer interrupt handler will be delayed.
1313	 *
1314	 * See also mipsxx_pmu_start().
1315	 */
1316	pause_local_counters();
1317#ifdef CONFIG_MIPS_MT_SMP
1318	read_lock(&pmuint_rwlock);
1319#endif
1320
1321	regs = get_irq_regs();
1322
1323	perf_sample_data_init(&data, 0, 0);
1324
1325	switch (counters) {
1326#define HANDLE_COUNTER(n)						\
1327	case n + 1:							\
1328		if (test_bit(n, cpuc->used_mask)) {			\
1329			counter = mipspmu.read_counter(n);		\
1330			if (counter & mipspmu.overflow) {		\
1331				handle_associated_event(cpuc, n, &data, regs); \
1332				handled = IRQ_HANDLED;			\
1333			}						\
1334		}
1335	HANDLE_COUNTER(3)
1336	HANDLE_COUNTER(2)
1337	HANDLE_COUNTER(1)
1338	HANDLE_COUNTER(0)
1339	}
1340
 
 
 
 
 
1341	/*
1342	 * Do all the work for the pending perf events. We can do this
1343	 * in here because the performance counter interrupt is a regular
1344	 * interrupt, not NMI.
1345	 */
1346	if (handled == IRQ_HANDLED)
1347		irq_work_run();
1348
1349#ifdef CONFIG_MIPS_MT_SMP
1350	read_unlock(&pmuint_rwlock);
1351#endif
1352	resume_local_counters();
1353	return handled;
1354}
1355
1356static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1357{
1358	return mipsxx_pmu_handle_shared_irq();
1359}
1360
1361/* 24K */
1362#define IS_BOTH_COUNTERS_24K_EVENT(b)					\
1363	((b) == 0 || (b) == 1 || (b) == 11)
1364
1365/* 34K */
1366#define IS_BOTH_COUNTERS_34K_EVENT(b)					\
1367	((b) == 0 || (b) == 1 || (b) == 11)
1368#ifdef CONFIG_MIPS_MT_SMP
1369#define IS_RANGE_P_34K_EVENT(r, b)					\
1370	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1371	 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 ||		\
1372	 (r) == 176 || ((b) >= 50 && (b) <= 55) ||			\
1373	 ((b) >= 64 && (b) <= 67))
1374#define IS_RANGE_V_34K_EVENT(r)	((r) == 47)
1375#endif
1376
1377/* 74K */
1378#define IS_BOTH_COUNTERS_74K_EVENT(b)					\
1379	((b) == 0 || (b) == 1)
1380
 
 
 
 
 
 
 
1381/* 1004K */
1382#define IS_BOTH_COUNTERS_1004K_EVENT(b)					\
1383	((b) == 0 || (b) == 1 || (b) == 11)
1384#ifdef CONFIG_MIPS_MT_SMP
1385#define IS_RANGE_P_1004K_EVENT(r, b)					\
1386	((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 ||		\
1387	 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 ||		\
1388	 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) ||	\
1389	 (r) == 188 || (b) == 61 || (b) == 62 ||			\
1390	 ((b) >= 64 && (b) <= 67))
1391#define IS_RANGE_V_1004K_EVENT(r)	((r) == 47)
1392#endif
1393
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1394/*
1395 * User can use 0-255 raw events, where 0-127 for the events of even
1396 * counters, and 128-255 for odd counters. Note that bit 7 is used to
1397 * indicate the parity. So, for example, when user wants to take the
1398 * Event Num of 15 for odd counters (by referring to the user manual),
1399 * then 128 needs to be added to 15 as the input for the event config,
1400 * i.e., 143 (0x8F) to be used.
 
 
 
 
1401 */
1402static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1403{
 
 
1404	unsigned int raw_id = config & 0xff;
1405	unsigned int base_id = raw_id & 0x7f;
1406
1407	raw_event.event_id = base_id;
1408
1409	switch (current_cpu_type()) {
1410	case CPU_24K:
1411		if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1412			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1413		else
1414			raw_event.cntr_mask =
1415				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1416#ifdef CONFIG_MIPS_MT_SMP
1417		/*
1418		 * This is actually doing nothing. Non-multithreading
1419		 * CPUs will not check and calculate the range.
1420		 */
1421		raw_event.range = P;
1422#endif
1423		break;
1424	case CPU_34K:
1425		if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1426			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1427		else
1428			raw_event.cntr_mask =
1429				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1430#ifdef CONFIG_MIPS_MT_SMP
1431		if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1432			raw_event.range = P;
1433		else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1434			raw_event.range = V;
1435		else
1436			raw_event.range = T;
1437#endif
1438		break;
1439	case CPU_74K:
 
1440		if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1441			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1442		else
1443			raw_event.cntr_mask =
1444				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1445#ifdef CONFIG_MIPS_MT_SMP
1446		raw_event.range = P;
1447#endif
1448		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1449	case CPU_1004K:
1450		if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1451			raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1452		else
1453			raw_event.cntr_mask =
1454				raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1455#ifdef CONFIG_MIPS_MT_SMP
1456		if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1457			raw_event.range = P;
1458		else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1459			raw_event.range = V;
1460		else
1461			raw_event.range = T;
1462#endif
1463		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1464	}
1465
 
 
1466	return &raw_event;
1467}
1468
1469static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1470{
1471	unsigned int raw_id = config & 0xff;
1472	unsigned int base_id = raw_id & 0x7f;
1473
1474
1475	raw_event.cntr_mask = CNTR_ALL;
1476	raw_event.event_id = base_id;
1477
1478	if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1479		if (base_id > 0x42)
1480			return ERR_PTR(-EOPNOTSUPP);
1481	} else {
1482		if (base_id > 0x3a)
1483			return ERR_PTR(-EOPNOTSUPP);
 
 
 
1484	}
1485
1486	switch (base_id) {
1487	case 0x00:
1488	case 0x0f:
1489	case 0x1e:
1490	case 0x1f:
1491	case 0x2f:
1492	case 0x34:
1493	case 0x3b ... 0x3f:
1494		return ERR_PTR(-EOPNOTSUPP);
1495	default:
1496		break;
1497	}
1498
1499	return &raw_event;
1500}
1501
1502static int __init
1503init_hw_perf_events(void)
1504{
1505	int counters, irq;
1506	int counter_bits;
1507
1508	pr_info("Performance counters: ");
1509
1510	counters = n_counters();
1511	if (counters == 0) {
1512		pr_cont("No available PMU.\n");
1513		return -ENODEV;
1514	}
1515
1516#ifdef CONFIG_MIPS_MT_SMP
1517	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1518	if (!cpu_has_mipsmt_pertccounters)
1519		counters = counters_total_to_per_cpu(counters);
1520#endif
1521
1522#ifdef MSC01E_INT_BASE
1523	if (cpu_has_veic) {
1524		/*
1525		 * Using platform specific interrupt controller defines.
1526		 */
1527		irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1528	} else {
1529#endif
1530		if ((cp0_perfcount_irq >= 0) &&
1531				(cp0_compare_irq != cp0_perfcount_irq))
1532			irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1533		else
1534			irq = -1;
1535#ifdef MSC01E_INT_BASE
1536	}
1537#endif
1538
1539	mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1540
1541	switch (current_cpu_type()) {
1542	case CPU_24K:
1543		mipspmu.name = "mips/24K";
1544		mipspmu.general_event_map = &mipsxxcore_event_map;
1545		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1546		break;
1547	case CPU_34K:
1548		mipspmu.name = "mips/34K";
1549		mipspmu.general_event_map = &mipsxxcore_event_map;
1550		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1551		break;
1552	case CPU_74K:
1553		mipspmu.name = "mips/74K";
1554		mipspmu.general_event_map = &mipsxx74Kcore_event_map;
1555		mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1556		break;
1557	case CPU_1004K:
1558		mipspmu.name = "mips/1004K";
1559		mipspmu.general_event_map = &mipsxxcore_event_map;
1560		mipspmu.cache_event_map = &mipsxxcore_cache_map;
1561		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1562	case CPU_CAVIUM_OCTEON:
1563	case CPU_CAVIUM_OCTEON_PLUS:
1564	case CPU_CAVIUM_OCTEON2:
 
1565		mipspmu.name = "octeon";
1566		mipspmu.general_event_map = &octeon_event_map;
1567		mipspmu.cache_event_map = &octeon_cache_map;
1568		mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1569		break;
 
 
 
 
 
1570	default:
1571		pr_cont("Either hardware does not support performance "
1572			"counters, or not yet implemented.\n");
1573		return -ENODEV;
1574	}
1575
1576	mipspmu.num_counters = counters;
1577	mipspmu.irq = irq;
1578
1579	if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1580		mipspmu.max_period = (1ULL << 63) - 1;
1581		mipspmu.valid_count = (1ULL << 63) - 1;
1582		mipspmu.overflow = 1ULL << 63;
 
 
 
 
 
 
 
 
1583		mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1584		mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1585		counter_bits = 64;
1586	} else {
 
1587		mipspmu.max_period = (1ULL << 31) - 1;
1588		mipspmu.valid_count = (1ULL << 31) - 1;
1589		mipspmu.overflow = 1ULL << 31;
1590		mipspmu.read_counter = mipsxx_pmu_read_counter;
1591		mipspmu.write_counter = mipsxx_pmu_write_counter;
1592		counter_bits = 32;
1593	}
1594
1595	on_each_cpu(reset_counters, (void *)(long)counters, 1);
1596
1597	pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1598		"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1599		irq < 0 ? " (share with timer interrupt)" : "");
1600
1601	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1602
1603	return 0;
1604}
1605early_initcall(init_hw_perf_events);