Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
   3 */
   4#include <linux/acpi.h>
   5#include <linux/bitops.h>
   6#include <linux/bug.h>
   7#include <linux/cpuhotplug.h>
   8#include <linux/cpumask.h>
   9#include <linux/device.h>
  10#include <linux/errno.h>
  11#include <linux/interrupt.h>
  12#include <linux/irq.h>
  13#include <linux/kernel.h>
  14#include <linux/list.h>
  15#include <linux/percpu.h>
  16#include <linux/perf_event.h>
  17#include <linux/platform_device.h>
  18#include <linux/smp.h>
  19#include <linux/spinlock.h>
  20#include <linux/sysfs.h>
  21#include <linux/types.h>
  22
  23#include <asm/barrier.h>
  24#include <asm/local64.h>
  25#include <asm/sysreg.h>
  26
  27#define MAX_L2_CTRS             9
  28
  29#define L2PMCR_NUM_EV_SHIFT     11
  30#define L2PMCR_NUM_EV_MASK      0x1F
  31
  32#define L2PMCR                  0x400
  33#define L2PMCNTENCLR            0x403
  34#define L2PMCNTENSET            0x404
  35#define L2PMINTENCLR            0x405
  36#define L2PMINTENSET            0x406
  37#define L2PMOVSCLR              0x407
  38#define L2PMOVSSET              0x408
  39#define L2PMCCNTCR              0x409
  40#define L2PMCCNTR               0x40A
  41#define L2PMCCNTSR              0x40C
  42#define L2PMRESR                0x410
  43#define IA_L2PMXEVCNTCR_BASE    0x420
  44#define IA_L2PMXEVCNTR_BASE     0x421
  45#define IA_L2PMXEVFILTER_BASE   0x423
  46#define IA_L2PMXEVTYPER_BASE    0x424
  47
  48#define IA_L2_REG_OFFSET        0x10
  49
  50#define L2PMXEVFILTER_SUFILTER_ALL      0x000E0000
  51#define L2PMXEVFILTER_ORGFILTER_IDINDEP 0x00000004
  52#define L2PMXEVFILTER_ORGFILTER_ALL     0x00000003
  53
  54#define L2EVTYPER_REG_SHIFT     3
  55
  56#define L2PMRESR_GROUP_BITS     8
  57#define L2PMRESR_GROUP_MASK     GENMASK(7, 0)
  58
  59#define L2CYCLE_CTR_BIT         31
  60#define L2CYCLE_CTR_RAW_CODE    0xFE
  61
  62#define L2PMCR_RESET_ALL        0x6
  63#define L2PMCR_COUNTERS_ENABLE  0x1
  64#define L2PMCR_COUNTERS_DISABLE 0x0
  65
  66#define L2PMRESR_EN             BIT_ULL(63)
  67
  68#define L2_EVT_MASK             0x00000FFF
  69#define L2_EVT_CODE_MASK        0x00000FF0
  70#define L2_EVT_GRP_MASK         0x0000000F
  71#define L2_EVT_CODE_SHIFT       4
  72#define L2_EVT_GRP_SHIFT        0
  73
  74#define L2_EVT_CODE(event)   (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT)
  75#define L2_EVT_GROUP(event)  (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT)
  76
  77#define L2_EVT_GROUP_MAX        7
  78
  79#define L2_COUNTER_RELOAD       BIT_ULL(31)
  80#define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63)
  81
  82#define L2CPUSRSELR_EL1         sys_reg(3, 3, 15, 0, 6)
  83#define L2CPUSRDR_EL1           sys_reg(3, 3, 15, 0, 7)
  84
  85#define reg_idx(reg, i)         (((i) * IA_L2_REG_OFFSET) + reg##_BASE)
  86
  87/*
  88 * Events
  89 */
  90#define L2_EVENT_CYCLES                    0xfe
  91#define L2_EVENT_DCACHE_OPS                0x400
  92#define L2_EVENT_ICACHE_OPS                0x401
  93#define L2_EVENT_TLBI                      0x402
  94#define L2_EVENT_BARRIERS                  0x403
  95#define L2_EVENT_TOTAL_READS               0x405
  96#define L2_EVENT_TOTAL_WRITES              0x406
  97#define L2_EVENT_TOTAL_REQUESTS            0x407
  98#define L2_EVENT_LDREX                     0x420
  99#define L2_EVENT_STREX                     0x421
 100#define L2_EVENT_CLREX                     0x422
 101
 102static DEFINE_RAW_SPINLOCK(l2_access_lock);
 103
 104/**
 105 * set_l2_indirect_reg: write value to an L2 register
 106 * @reg: Address of L2 register.
 107 * @value: Value to be written to register.
 108 *
 109 * Use architecturally required barriers for ordering between system register
 110 * accesses
 111 */
 112static void set_l2_indirect_reg(u64 reg, u64 val)
 113{
 114	unsigned long flags;
 115
 116	raw_spin_lock_irqsave(&l2_access_lock, flags);
 117	write_sysreg_s(reg, L2CPUSRSELR_EL1);
 118	isb();
 119	write_sysreg_s(val, L2CPUSRDR_EL1);
 120	isb();
 121	raw_spin_unlock_irqrestore(&l2_access_lock, flags);
 122}
 123
 124/**
 125 * get_l2_indirect_reg: read an L2 register value
 126 * @reg: Address of L2 register.
 127 *
 128 * Use architecturally required barriers for ordering between system register
 129 * accesses
 130 */
 131static u64 get_l2_indirect_reg(u64 reg)
 132{
 133	u64 val;
 134	unsigned long flags;
 135
 136	raw_spin_lock_irqsave(&l2_access_lock, flags);
 137	write_sysreg_s(reg, L2CPUSRSELR_EL1);
 138	isb();
 139	val = read_sysreg_s(L2CPUSRDR_EL1);
 140	raw_spin_unlock_irqrestore(&l2_access_lock, flags);
 141
 142	return val;
 143}
 144
 145struct cluster_pmu;
 146
 147/*
 148 * Aggregate PMU. Implements the core pmu functions and manages
 149 * the hardware PMUs.
 150 */
 151struct l2cache_pmu {
 152	struct hlist_node node;
 153	u32 num_pmus;
 154	struct pmu pmu;
 155	int num_counters;
 156	cpumask_t cpumask;
 157	struct platform_device *pdev;
 158	struct cluster_pmu * __percpu *pmu_cluster;
 159	struct list_head clusters;
 160};
 161
 162/*
 163 * The cache is made up of one or more clusters, each cluster has its own PMU.
 164 * Each cluster is associated with one or more CPUs.
 165 * This structure represents one of the hardware PMUs.
 166 *
 167 * Events can be envisioned as a 2-dimensional array. Each column represents
 168 * a group of events. There are 8 groups. Only one entry from each
 169 * group can be in use at a time.
 170 *
 171 * Events are specified as 0xCCG, where CC is 2 hex digits specifying
 172 * the code (array row) and G specifies the group (column).
 173 *
 174 * In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE
 175 * which is outside the above scheme.
 176 */
 177struct cluster_pmu {
 178	struct list_head next;
 179	struct perf_event *events[MAX_L2_CTRS];
 180	struct l2cache_pmu *l2cache_pmu;
 181	DECLARE_BITMAP(used_counters, MAX_L2_CTRS);
 182	DECLARE_BITMAP(used_groups, L2_EVT_GROUP_MAX + 1);
 183	int irq;
 184	int cluster_id;
 185	/* The CPU that is used for collecting events on this cluster */
 186	int on_cpu;
 187	/* All the CPUs associated with this cluster */
 188	cpumask_t cluster_cpus;
 189	spinlock_t pmu_lock;
 190};
 191
 192#define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu))
 193
 194static u32 l2_cycle_ctr_idx;
 195static u32 l2_counter_present_mask;
 196
 197static inline u32 idx_to_reg_bit(u32 idx)
 198{
 199	if (idx == l2_cycle_ctr_idx)
 200		return BIT(L2CYCLE_CTR_BIT);
 201
 202	return BIT(idx);
 203}
 204
 205static inline struct cluster_pmu *get_cluster_pmu(
 206	struct l2cache_pmu *l2cache_pmu, int cpu)
 207{
 208	return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu);
 209}
 210
 211static void cluster_pmu_reset(void)
 212{
 213	/* Reset all counters */
 214	set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL);
 215	set_l2_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask);
 216	set_l2_indirect_reg(L2PMINTENCLR, l2_counter_present_mask);
 217	set_l2_indirect_reg(L2PMOVSCLR, l2_counter_present_mask);
 218}
 219
 220static inline void cluster_pmu_enable(void)
 221{
 222	set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE);
 223}
 224
 225static inline void cluster_pmu_disable(void)
 226{
 227	set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE);
 228}
 229
 230static inline void cluster_pmu_counter_set_value(u32 idx, u64 value)
 231{
 232	if (idx == l2_cycle_ctr_idx)
 233		set_l2_indirect_reg(L2PMCCNTR, value);
 234	else
 235		set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value);
 236}
 237
 238static inline u64 cluster_pmu_counter_get_value(u32 idx)
 239{
 240	u64 value;
 241
 242	if (idx == l2_cycle_ctr_idx)
 243		value = get_l2_indirect_reg(L2PMCCNTR);
 244	else
 245		value = get_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx));
 246
 247	return value;
 248}
 249
 250static inline void cluster_pmu_counter_enable(u32 idx)
 251{
 252	set_l2_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx));
 253}
 254
 255static inline void cluster_pmu_counter_disable(u32 idx)
 256{
 257	set_l2_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx));
 258}
 259
 260static inline void cluster_pmu_counter_enable_interrupt(u32 idx)
 261{
 262	set_l2_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx));
 263}
 264
 265static inline void cluster_pmu_counter_disable_interrupt(u32 idx)
 266{
 267	set_l2_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx));
 268}
 269
 270static inline void cluster_pmu_set_evccntcr(u32 val)
 271{
 272	set_l2_indirect_reg(L2PMCCNTCR, val);
 273}
 274
 275static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val)
 276{
 277	set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val);
 278}
 279
 280static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val)
 281{
 282	set_l2_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val);
 283}
 284
 285static void cluster_pmu_set_resr(struct cluster_pmu *cluster,
 286			       u32 event_group, u32 event_cc)
 287{
 288	u64 field;
 289	u64 resr_val;
 290	u32 shift;
 291	unsigned long flags;
 292
 293	shift = L2PMRESR_GROUP_BITS * event_group;
 294	field = ((u64)(event_cc & L2PMRESR_GROUP_MASK) << shift);
 295
 296	spin_lock_irqsave(&cluster->pmu_lock, flags);
 297
 298	resr_val = get_l2_indirect_reg(L2PMRESR);
 299	resr_val &= ~(L2PMRESR_GROUP_MASK << shift);
 300	resr_val |= field;
 301	resr_val |= L2PMRESR_EN;
 302	set_l2_indirect_reg(L2PMRESR, resr_val);
 303
 304	spin_unlock_irqrestore(&cluster->pmu_lock, flags);
 305}
 306
 307/*
 308 * Hardware allows filtering of events based on the originating
 309 * CPU. Turn this off by setting filter bits to allow events from
 310 * all CPUS, subunits and ID independent events in this cluster.
 311 */
 312static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr)
 313{
 314	u32 val =  L2PMXEVFILTER_SUFILTER_ALL |
 315		   L2PMXEVFILTER_ORGFILTER_IDINDEP |
 316		   L2PMXEVFILTER_ORGFILTER_ALL;
 317
 318	set_l2_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val);
 319}
 320
 321static inline u32 cluster_pmu_getreset_ovsr(void)
 322{
 323	u32 result = get_l2_indirect_reg(L2PMOVSSET);
 324
 325	set_l2_indirect_reg(L2PMOVSCLR, result);
 326	return result;
 327}
 328
 329static inline bool cluster_pmu_has_overflowed(u32 ovsr)
 330{
 331	return !!(ovsr & l2_counter_present_mask);
 332}
 333
 334static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx)
 335{
 336	return !!(ovsr & idx_to_reg_bit(idx));
 337}
 338
 339static void l2_cache_event_update(struct perf_event *event)
 340{
 341	struct hw_perf_event *hwc = &event->hw;
 342	u64 delta, prev, now;
 343	u32 idx = hwc->idx;
 344
 345	do {
 346		prev = local64_read(&hwc->prev_count);
 347		now = cluster_pmu_counter_get_value(idx);
 348	} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
 349
 350	/*
 351	 * The cycle counter is 64-bit, but all other counters are
 352	 * 32-bit, and we must handle 32-bit overflow explicitly.
 353	 */
 354	delta = now - prev;
 355	if (idx != l2_cycle_ctr_idx)
 356		delta &= 0xffffffff;
 357
 358	local64_add(delta, &event->count);
 359}
 360
 361static void l2_cache_cluster_set_period(struct cluster_pmu *cluster,
 362				       struct hw_perf_event *hwc)
 363{
 364	u32 idx = hwc->idx;
 365	u64 new;
 366
 367	/*
 368	 * We limit the max period to half the max counter value so
 369	 * that even in the case of extreme interrupt latency the
 370	 * counter will (hopefully) not wrap past its initial value.
 371	 */
 372	if (idx == l2_cycle_ctr_idx)
 373		new = L2_CYCLE_COUNTER_RELOAD;
 374	else
 375		new = L2_COUNTER_RELOAD;
 376
 377	local64_set(&hwc->prev_count, new);
 378	cluster_pmu_counter_set_value(idx, new);
 379}
 380
 381static int l2_cache_get_event_idx(struct cluster_pmu *cluster,
 382				   struct perf_event *event)
 383{
 384	struct hw_perf_event *hwc = &event->hw;
 385	int idx;
 386	int num_ctrs = cluster->l2cache_pmu->num_counters - 1;
 387	unsigned int group;
 388
 389	if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
 390		if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters))
 391			return -EAGAIN;
 392
 393		return l2_cycle_ctr_idx;
 394	}
 395
 396	idx = find_first_zero_bit(cluster->used_counters, num_ctrs);
 397	if (idx == num_ctrs)
 398		/* The counters are all in use. */
 399		return -EAGAIN;
 400
 401	/*
 402	 * Check for column exclusion: event column already in use by another
 403	 * event. This is for events which are not in the same group.
 404	 * Conflicting events in the same group are detected in event_init.
 405	 */
 406	group = L2_EVT_GROUP(hwc->config_base);
 407	if (test_bit(group, cluster->used_groups))
 408		return -EAGAIN;
 409
 410	set_bit(idx, cluster->used_counters);
 411	set_bit(group, cluster->used_groups);
 412
 413	return idx;
 414}
 415
 416static void l2_cache_clear_event_idx(struct cluster_pmu *cluster,
 417				      struct perf_event *event)
 418{
 419	struct hw_perf_event *hwc = &event->hw;
 420	int idx = hwc->idx;
 421
 422	clear_bit(idx, cluster->used_counters);
 423	if (hwc->config_base != L2CYCLE_CTR_RAW_CODE)
 424		clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups);
 425}
 426
 427static irqreturn_t l2_cache_handle_irq(int irq_num, void *data)
 428{
 429	struct cluster_pmu *cluster = data;
 430	int num_counters = cluster->l2cache_pmu->num_counters;
 431	u32 ovsr;
 432	int idx;
 433
 434	ovsr = cluster_pmu_getreset_ovsr();
 435	if (!cluster_pmu_has_overflowed(ovsr))
 436		return IRQ_NONE;
 437
 438	for_each_set_bit(idx, cluster->used_counters, num_counters) {
 439		struct perf_event *event = cluster->events[idx];
 440		struct hw_perf_event *hwc;
 441
 442		if (WARN_ON_ONCE(!event))
 443			continue;
 444
 445		if (!cluster_pmu_counter_has_overflowed(ovsr, idx))
 446			continue;
 447
 448		l2_cache_event_update(event);
 449		hwc = &event->hw;
 450
 451		l2_cache_cluster_set_period(cluster, hwc);
 452	}
 453
 454	return IRQ_HANDLED;
 455}
 456
 457/*
 458 * Implementation of abstract pmu functionality required by
 459 * the core perf events code.
 460 */
 461
 462static void l2_cache_pmu_enable(struct pmu *pmu)
 463{
 464	/*
 465	 * Although there is only one PMU (per socket) controlling multiple
 466	 * physical PMUs (per cluster), because we do not support per-task mode
 467	 * each event is associated with a CPU. Each event has pmu_enable
 468	 * called on its CPU, so here it is only necessary to enable the
 469	 * counters for the current CPU.
 470	 */
 471
 472	cluster_pmu_enable();
 473}
 474
 475static void l2_cache_pmu_disable(struct pmu *pmu)
 476{
 477	cluster_pmu_disable();
 478}
 479
 480static int l2_cache_event_init(struct perf_event *event)
 481{
 482	struct hw_perf_event *hwc = &event->hw;
 483	struct cluster_pmu *cluster;
 484	struct perf_event *sibling;
 485	struct l2cache_pmu *l2cache_pmu;
 486
 487	if (event->attr.type != event->pmu->type)
 488		return -ENOENT;
 489
 490	l2cache_pmu = to_l2cache_pmu(event->pmu);
 491
 492	if (hwc->sample_period) {
 493		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 494				    "Sampling not supported\n");
 495		return -EOPNOTSUPP;
 496	}
 497
 498	if (event->cpu < 0) {
 499		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 500				    "Per-task mode not supported\n");
 501		return -EOPNOTSUPP;
 502	}
 503
 504	if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) ||
 505	     ((event->attr.config & ~L2_EVT_MASK) != 0)) &&
 506	    (event->attr.config != L2CYCLE_CTR_RAW_CODE)) {
 507		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 508				    "Invalid config %llx\n",
 509				    event->attr.config);
 510		return -EINVAL;
 511	}
 512
 513	/* Don't allow groups with mixed PMUs, except for s/w events */
 514	if (event->group_leader->pmu != event->pmu &&
 515	    !is_software_event(event->group_leader)) {
 516		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 517			 "Can't create mixed PMU group\n");
 518		return -EINVAL;
 519	}
 520
 521	for_each_sibling_event(sibling, event->group_leader) {
 522		if (sibling->pmu != event->pmu &&
 523		    !is_software_event(sibling)) {
 524			dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 525				 "Can't create mixed PMU group\n");
 526			return -EINVAL;
 527		}
 528	}
 529
 530	cluster = get_cluster_pmu(l2cache_pmu, event->cpu);
 531	if (!cluster) {
 532		/* CPU has not been initialised */
 533		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 534			"CPU%d not associated with L2 cluster\n", event->cpu);
 535		return -EINVAL;
 536	}
 537
 538	/* Ensure all events in a group are on the same cpu */
 539	if ((event->group_leader != event) &&
 540	    (cluster->on_cpu != event->group_leader->cpu)) {
 541		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 542			 "Can't create group on CPUs %d and %d",
 543			 event->cpu, event->group_leader->cpu);
 544		return -EINVAL;
 545	}
 546
 547	if ((event != event->group_leader) &&
 548	    !is_software_event(event->group_leader) &&
 549	    (L2_EVT_GROUP(event->group_leader->attr.config) ==
 550	     L2_EVT_GROUP(event->attr.config))) {
 551		dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 552			 "Column exclusion: conflicting events %llx %llx\n",
 553		       event->group_leader->attr.config,
 554		       event->attr.config);
 555		return -EINVAL;
 556	}
 557
 558	for_each_sibling_event(sibling, event->group_leader) {
 559		if ((sibling != event) &&
 560		    !is_software_event(sibling) &&
 561		    (L2_EVT_GROUP(sibling->attr.config) ==
 562		     L2_EVT_GROUP(event->attr.config))) {
 563			dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
 564			     "Column exclusion: conflicting events %llx %llx\n",
 565					    sibling->attr.config,
 566					    event->attr.config);
 567			return -EINVAL;
 568		}
 569	}
 570
 571	hwc->idx = -1;
 572	hwc->config_base = event->attr.config;
 573
 574	/*
 575	 * Ensure all events are on the same cpu so all events are in the
 576	 * same cpu context, to avoid races on pmu_enable etc.
 577	 */
 578	event->cpu = cluster->on_cpu;
 579
 580	return 0;
 581}
 582
 583static void l2_cache_event_start(struct perf_event *event, int flags)
 584{
 585	struct cluster_pmu *cluster;
 586	struct hw_perf_event *hwc = &event->hw;
 587	int idx = hwc->idx;
 588	u32 config;
 589	u32 event_cc, event_group;
 590
 591	hwc->state = 0;
 592
 593	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
 594
 595	l2_cache_cluster_set_period(cluster, hwc);
 596
 597	if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) {
 598		cluster_pmu_set_evccntcr(0);
 599	} else {
 600		config = hwc->config_base;
 601		event_cc    = L2_EVT_CODE(config);
 602		event_group = L2_EVT_GROUP(config);
 603
 604		cluster_pmu_set_evcntcr(idx, 0);
 605		cluster_pmu_set_evtyper(idx, event_group);
 606		cluster_pmu_set_resr(cluster, event_group, event_cc);
 607		cluster_pmu_set_evfilter_sys_mode(idx);
 608	}
 609
 610	cluster_pmu_counter_enable_interrupt(idx);
 611	cluster_pmu_counter_enable(idx);
 612}
 613
 614static void l2_cache_event_stop(struct perf_event *event, int flags)
 615{
 616	struct hw_perf_event *hwc = &event->hw;
 617	int idx = hwc->idx;
 618
 619	if (hwc->state & PERF_HES_STOPPED)
 620		return;
 621
 622	cluster_pmu_counter_disable_interrupt(idx);
 623	cluster_pmu_counter_disable(idx);
 624
 625	if (flags & PERF_EF_UPDATE)
 626		l2_cache_event_update(event);
 627	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
 628}
 629
 630static int l2_cache_event_add(struct perf_event *event, int flags)
 631{
 632	struct hw_perf_event *hwc = &event->hw;
 633	int idx;
 634	int err = 0;
 635	struct cluster_pmu *cluster;
 636
 637	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
 638
 639	idx = l2_cache_get_event_idx(cluster, event);
 640	if (idx < 0)
 641		return idx;
 642
 643	hwc->idx = idx;
 644	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
 645	cluster->events[idx] = event;
 646	local64_set(&hwc->prev_count, 0);
 647
 648	if (flags & PERF_EF_START)
 649		l2_cache_event_start(event, flags);
 650
 651	/* Propagate changes to the userspace mapping. */
 652	perf_event_update_userpage(event);
 653
 654	return err;
 655}
 656
 657static void l2_cache_event_del(struct perf_event *event, int flags)
 658{
 659	struct hw_perf_event *hwc = &event->hw;
 660	struct cluster_pmu *cluster;
 661	int idx = hwc->idx;
 662
 663	cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu);
 664
 665	l2_cache_event_stop(event, flags | PERF_EF_UPDATE);
 666	cluster->events[idx] = NULL;
 667	l2_cache_clear_event_idx(cluster, event);
 668
 669	perf_event_update_userpage(event);
 670}
 671
 672static void l2_cache_event_read(struct perf_event *event)
 673{
 674	l2_cache_event_update(event);
 675}
 676
 677static ssize_t l2_cache_pmu_cpumask_show(struct device *dev,
 678					 struct device_attribute *attr,
 679					 char *buf)
 680{
 681	struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev));
 682
 683	return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask);
 684}
 685
 686static struct device_attribute l2_cache_pmu_cpumask_attr =
 687		__ATTR(cpumask, S_IRUGO, l2_cache_pmu_cpumask_show, NULL);
 688
 689static struct attribute *l2_cache_pmu_cpumask_attrs[] = {
 690	&l2_cache_pmu_cpumask_attr.attr,
 691	NULL,
 692};
 693
 694static struct attribute_group l2_cache_pmu_cpumask_group = {
 695	.attrs = l2_cache_pmu_cpumask_attrs,
 696};
 697
 698/* CCG format for perf RAW codes. */
 699PMU_FORMAT_ATTR(l2_code,   "config:4-11");
 700PMU_FORMAT_ATTR(l2_group,  "config:0-3");
 701PMU_FORMAT_ATTR(event,     "config:0-11");
 702
 703static struct attribute *l2_cache_pmu_formats[] = {
 704	&format_attr_l2_code.attr,
 705	&format_attr_l2_group.attr,
 706	&format_attr_event.attr,
 707	NULL,
 708};
 709
 710static struct attribute_group l2_cache_pmu_format_group = {
 711	.name = "format",
 712	.attrs = l2_cache_pmu_formats,
 713};
 714
 715static ssize_t l2cache_pmu_event_show(struct device *dev,
 716				      struct device_attribute *attr, char *page)
 717{
 718	struct perf_pmu_events_attr *pmu_attr;
 719
 720	pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
 721	return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
 722}
 723
 724#define L2CACHE_EVENT_ATTR(_name, _id)					     \
 725	(&((struct perf_pmu_events_attr[]) {				     \
 726		{ .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \
 727		  .id = _id, }						     \
 728	})[0].attr.attr)
 729
 730static struct attribute *l2_cache_pmu_events[] = {
 731	L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES),
 732	L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS),
 733	L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS),
 734	L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI),
 735	L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS),
 736	L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS),
 737	L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES),
 738	L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS),
 739	L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX),
 740	L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX),
 741	L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX),
 742	NULL
 743};
 744
 745static struct attribute_group l2_cache_pmu_events_group = {
 746	.name = "events",
 747	.attrs = l2_cache_pmu_events,
 748};
 749
 750static const struct attribute_group *l2_cache_pmu_attr_grps[] = {
 751	&l2_cache_pmu_format_group,
 752	&l2_cache_pmu_cpumask_group,
 753	&l2_cache_pmu_events_group,
 754	NULL,
 755};
 756
 757/*
 758 * Generic device handlers
 759 */
 760
 761static const struct acpi_device_id l2_cache_pmu_acpi_match[] = {
 762	{ "QCOM8130", },
 763	{ }
 764};
 765
 766static int get_num_counters(void)
 767{
 768	int val;
 769
 770	val = get_l2_indirect_reg(L2PMCR);
 771
 772	/*
 773	 * Read number of counters from L2PMCR and add 1
 774	 * for the cycle counter.
 775	 */
 776	return ((val >> L2PMCR_NUM_EV_SHIFT) & L2PMCR_NUM_EV_MASK) + 1;
 777}
 778
 779static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
 780	struct l2cache_pmu *l2cache_pmu, int cpu)
 781{
 782	u64 mpidr;
 783	int cpu_cluster_id;
 784	struct cluster_pmu *cluster = NULL;
 785
 786	/*
 787	 * This assumes that the cluster_id is in MPIDR[aff1] for
 788	 * single-threaded cores, and MPIDR[aff2] for multi-threaded
 789	 * cores. This logic will have to be updated if this changes.
 790	 */
 791	mpidr = read_cpuid_mpidr();
 792	if (mpidr & MPIDR_MT_BITMASK)
 793		cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
 794	else
 795		cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 796
 797	list_for_each_entry(cluster, &l2cache_pmu->clusters, next) {
 798		if (cluster->cluster_id != cpu_cluster_id)
 799			continue;
 800
 801		dev_info(&l2cache_pmu->pdev->dev,
 802			 "CPU%d associated with cluster %d\n", cpu,
 803			 cluster->cluster_id);
 804		cpumask_set_cpu(cpu, &cluster->cluster_cpus);
 805		*per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
 806		break;
 807	}
 808
 809	return cluster;
 810}
 811
 812static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
 813{
 814	struct cluster_pmu *cluster;
 815	struct l2cache_pmu *l2cache_pmu;
 816
 817	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
 818	cluster = get_cluster_pmu(l2cache_pmu, cpu);
 819	if (!cluster) {
 820		/* First time this CPU has come online */
 821		cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu);
 822		if (!cluster) {
 823			/* Only if broken firmware doesn't list every cluster */
 824			WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu);
 825			return 0;
 826		}
 827	}
 828
 829	/* If another CPU is managing this cluster, we're done */
 830	if (cluster->on_cpu != -1)
 831		return 0;
 832
 833	/*
 834	 * All CPUs on this cluster were down, use this one.
 835	 * Reset to put it into sane state.
 836	 */
 837	cluster->on_cpu = cpu;
 838	cpumask_set_cpu(cpu, &l2cache_pmu->cpumask);
 839	cluster_pmu_reset();
 840
 841	WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu)));
 842	enable_irq(cluster->irq);
 843
 844	return 0;
 845}
 846
 847static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
 848{
 849	struct cluster_pmu *cluster;
 850	struct l2cache_pmu *l2cache_pmu;
 851	cpumask_t cluster_online_cpus;
 852	unsigned int target;
 853
 854	l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
 855	cluster = get_cluster_pmu(l2cache_pmu, cpu);
 856	if (!cluster)
 857		return 0;
 858
 859	/* If this CPU is not managing the cluster, we're done */
 860	if (cluster->on_cpu != cpu)
 861		return 0;
 862
 863	/* Give up ownership of cluster */
 864	cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask);
 865	cluster->on_cpu = -1;
 866
 867	/* Any other CPU for this cluster which is still online */
 868	cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
 869		    cpu_online_mask);
 870	target = cpumask_any_but(&cluster_online_cpus, cpu);
 871	if (target >= nr_cpu_ids) {
 872		disable_irq(cluster->irq);
 873		return 0;
 874	}
 875
 876	perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target);
 877	cluster->on_cpu = target;
 878	cpumask_set_cpu(target, &l2cache_pmu->cpumask);
 879	WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target)));
 880
 881	return 0;
 882}
 883
 884static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
 885{
 886	struct platform_device *pdev = to_platform_device(dev->parent);
 887	struct platform_device *sdev = to_platform_device(dev);
 888	struct l2cache_pmu *l2cache_pmu = data;
 889	struct cluster_pmu *cluster;
 890	struct acpi_device *device;
 891	unsigned long fw_cluster_id;
 892	int err;
 893	int irq;
 894
 895	if (acpi_bus_get_device(ACPI_HANDLE(dev), &device))
 896		return -ENODEV;
 897
 898	if (kstrtoul(device->pnp.unique_id, 10, &fw_cluster_id) < 0) {
 899		dev_err(&pdev->dev, "unable to read ACPI uid\n");
 900		return -ENODEV;
 901	}
 902
 903	cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL);
 904	if (!cluster)
 905		return -ENOMEM;
 906
 907	INIT_LIST_HEAD(&cluster->next);
 908	list_add(&cluster->next, &l2cache_pmu->clusters);
 909	cluster->cluster_id = fw_cluster_id;
 910
 911	irq = platform_get_irq(sdev, 0);
 912	if (irq < 0)
 913		return irq;
 914	irq_set_status_flags(irq, IRQ_NOAUTOEN);
 915	cluster->irq = irq;
 916
 917	cluster->l2cache_pmu = l2cache_pmu;
 918	cluster->on_cpu = -1;
 919
 920	err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq,
 921			       IRQF_NOBALANCING | IRQF_NO_THREAD,
 922			       "l2-cache-pmu", cluster);
 923	if (err) {
 924		dev_err(&pdev->dev,
 925			"Unable to request IRQ%d for L2 PMU counters\n", irq);
 926		return err;
 927	}
 928
 929	dev_info(&pdev->dev,
 930		"Registered L2 cache PMU cluster %ld\n", fw_cluster_id);
 931
 932	spin_lock_init(&cluster->pmu_lock);
 933
 934	l2cache_pmu->num_pmus++;
 935
 936	return 0;
 937}
 938
 939static int l2_cache_pmu_probe(struct platform_device *pdev)
 940{
 941	int err;
 942	struct l2cache_pmu *l2cache_pmu;
 943
 944	l2cache_pmu =
 945		devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL);
 946	if (!l2cache_pmu)
 947		return -ENOMEM;
 948
 949	INIT_LIST_HEAD(&l2cache_pmu->clusters);
 950
 951	platform_set_drvdata(pdev, l2cache_pmu);
 952	l2cache_pmu->pmu = (struct pmu) {
 953		/* suffix is instance id for future use with multiple sockets */
 954		.name		= "l2cache_0",
 955		.task_ctx_nr    = perf_invalid_context,
 956		.pmu_enable	= l2_cache_pmu_enable,
 957		.pmu_disable	= l2_cache_pmu_disable,
 958		.event_init	= l2_cache_event_init,
 959		.add		= l2_cache_event_add,
 960		.del		= l2_cache_event_del,
 961		.start		= l2_cache_event_start,
 962		.stop		= l2_cache_event_stop,
 963		.read		= l2_cache_event_read,
 964		.attr_groups	= l2_cache_pmu_attr_grps,
 965		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
 966	};
 967
 968	l2cache_pmu->num_counters = get_num_counters();
 969	l2cache_pmu->pdev = pdev;
 970	l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev,
 971						     struct cluster_pmu *);
 972	if (!l2cache_pmu->pmu_cluster)
 973		return -ENOMEM;
 974
 975	l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1;
 976	l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) |
 977		BIT(L2CYCLE_CTR_BIT);
 978
 979	cpumask_clear(&l2cache_pmu->cpumask);
 980
 981	/* Read cluster info and initialize each cluster */
 982	err = device_for_each_child(&pdev->dev, l2cache_pmu,
 983				    l2_cache_pmu_probe_cluster);
 984	if (err)
 985		return err;
 986
 987	if (l2cache_pmu->num_pmus == 0) {
 988		dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n");
 989		return -ENODEV;
 990	}
 991
 992	err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
 993				       &l2cache_pmu->node);
 994	if (err) {
 995		dev_err(&pdev->dev, "Error %d registering hotplug", err);
 996		return err;
 997	}
 998
 999	err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1);
1000	if (err) {
1001		dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err);
1002		goto out_unregister;
1003	}
1004
1005	dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n",
1006		 l2cache_pmu->num_pmus);
1007
1008	return err;
1009
1010out_unregister:
1011	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
1012				    &l2cache_pmu->node);
1013	return err;
1014}
1015
1016static int l2_cache_pmu_remove(struct platform_device *pdev)
1017{
1018	struct l2cache_pmu *l2cache_pmu =
1019		to_l2cache_pmu(platform_get_drvdata(pdev));
1020
1021	perf_pmu_unregister(&l2cache_pmu->pmu);
1022	cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
1023				    &l2cache_pmu->node);
1024	return 0;
1025}
1026
1027static struct platform_driver l2_cache_pmu_driver = {
1028	.driver = {
1029		.name = "qcom-l2cache-pmu",
1030		.acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
1031	},
1032	.probe = l2_cache_pmu_probe,
1033	.remove = l2_cache_pmu_remove,
1034};
1035
1036static int __init register_l2_cache_pmu_driver(void)
1037{
1038	int err;
1039
1040	err = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
1041				      "AP_PERF_ARM_QCOM_L2_ONLINE",
1042				      l2cache_pmu_online_cpu,
1043				      l2cache_pmu_offline_cpu);
1044	if (err)
1045		return err;
1046
1047	return platform_driver_register(&l2_cache_pmu_driver);
1048}
1049device_initcall(register_l2_cache_pmu_driver);