Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * This driver adds support for perf events to use the Performance
5 * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
6 * to monitor that node.
7 *
8 * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
9 * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
10 * to 4K boundary. For example, the PMCG at 0xff88840000 is named
11 * smmuv3_pmcg_ff88840
12 *
13 * Filtering by stream id is done by specifying filtering parameters
14 * with the event. options are:
15 * filter_enable - 0 = no filtering, 1 = filtering enabled
16 * filter_span - 0 = exact match, 1 = pattern match
17 * filter_stream_id - pattern to filter against
18 *
19 * To match a partial StreamID where the X most-significant bits must match
20 * but the Y least-significant bits might differ, STREAMID is programmed
21 * with a value that contains:
22 * STREAMID[Y - 1] == 0.
23 * STREAMID[Y - 2:0] == 1 (where Y > 1).
24 * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
25 * contain a value to match from the corresponding bits of event StreamID.
26 *
27 * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
28 * filter_span=1,filter_stream_id=0x42/ -a netperf
29 * Applies filter pattern 0x42 to transaction events, which means events
30 * matching stream ids 0x42 and 0x43 are counted. Further filtering
31 * information is available in the SMMU documentation.
32 *
33 * SMMU events are not attributable to a CPU, so task mode and sampling
34 * are not supported.
35 */
36
37#include <linux/acpi.h>
38#include <linux/acpi_iort.h>
39#include <linux/bitfield.h>
40#include <linux/bitops.h>
41#include <linux/cpuhotplug.h>
42#include <linux/cpumask.h>
43#include <linux/device.h>
44#include <linux/errno.h>
45#include <linux/interrupt.h>
46#include <linux/irq.h>
47#include <linux/kernel.h>
48#include <linux/list.h>
49#include <linux/msi.h>
50#include <linux/of.h>
51#include <linux/perf_event.h>
52#include <linux/platform_device.h>
53#include <linux/smp.h>
54#include <linux/sysfs.h>
55#include <linux/types.h>
56
57#define SMMU_PMCG_EVCNTR0 0x0
58#define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
59#define SMMU_PMCG_EVTYPER0 0x400
60#define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
61#define SMMU_PMCG_SID_SPAN_SHIFT 29
62#define SMMU_PMCG_SMR0 0xA00
63#define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
64#define SMMU_PMCG_CNTENSET0 0xC00
65#define SMMU_PMCG_CNTENCLR0 0xC20
66#define SMMU_PMCG_INTENSET0 0xC40
67#define SMMU_PMCG_INTENCLR0 0xC60
68#define SMMU_PMCG_OVSCLR0 0xC80
69#define SMMU_PMCG_OVSSET0 0xCC0
70#define SMMU_PMCG_CFGR 0xE00
71#define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
72#define SMMU_PMCG_CFGR_MSI BIT(21)
73#define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
74#define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
75#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
76#define SMMU_PMCG_CR 0xE04
77#define SMMU_PMCG_CR_ENABLE BIT(0)
78#define SMMU_PMCG_IIDR 0xE08
79#define SMMU_PMCG_IIDR_PRODUCTID GENMASK(31, 20)
80#define SMMU_PMCG_IIDR_VARIANT GENMASK(19, 16)
81#define SMMU_PMCG_IIDR_REVISION GENMASK(15, 12)
82#define SMMU_PMCG_IIDR_IMPLEMENTER GENMASK(11, 0)
83#define SMMU_PMCG_CEID0 0xE20
84#define SMMU_PMCG_CEID1 0xE28
85#define SMMU_PMCG_IRQ_CTRL 0xE50
86#define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
87#define SMMU_PMCG_IRQ_CFG0 0xE58
88#define SMMU_PMCG_IRQ_CFG1 0xE60
89#define SMMU_PMCG_IRQ_CFG2 0xE64
90
91/* IMP-DEF ID registers */
92#define SMMU_PMCG_PIDR0 0xFE0
93#define SMMU_PMCG_PIDR0_PART_0 GENMASK(7, 0)
94#define SMMU_PMCG_PIDR1 0xFE4
95#define SMMU_PMCG_PIDR1_DES_0 GENMASK(7, 4)
96#define SMMU_PMCG_PIDR1_PART_1 GENMASK(3, 0)
97#define SMMU_PMCG_PIDR2 0xFE8
98#define SMMU_PMCG_PIDR2_REVISION GENMASK(7, 4)
99#define SMMU_PMCG_PIDR2_DES_1 GENMASK(2, 0)
100#define SMMU_PMCG_PIDR3 0xFEC
101#define SMMU_PMCG_PIDR3_REVAND GENMASK(7, 4)
102#define SMMU_PMCG_PIDR4 0xFD0
103#define SMMU_PMCG_PIDR4_DES_2 GENMASK(3, 0)
104
105/* MSI config fields */
106#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
107#define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
108
109#define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
110#define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
111
112#define SMMU_PMCG_MAX_COUNTERS 64
113#define SMMU_PMCG_ARCH_MAX_EVENTS 128
114
115#define SMMU_PMCG_PA_SHIFT 12
116
117#define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
118
119static int cpuhp_state_num;
120
121struct smmu_pmu {
122 struct hlist_node node;
123 struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
124 DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
125 DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
126 unsigned int irq;
127 unsigned int on_cpu;
128 struct pmu pmu;
129 unsigned int num_counters;
130 struct device *dev;
131 void __iomem *reg_base;
132 void __iomem *reloc_base;
133 u64 counter_mask;
134 u32 options;
135 u32 iidr;
136 bool global_filter;
137};
138
139#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
140
141#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
142 static inline u32 get_##_name(struct perf_event *event) \
143 { \
144 return FIELD_GET(GENMASK_ULL(_end, _start), \
145 event->attr._config); \
146 } \
147
148SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
149SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
150SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
151SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
152
153static inline void smmu_pmu_enable(struct pmu *pmu)
154{
155 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
156
157 writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
158 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
159 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
160}
161
162static inline void smmu_pmu_disable(struct pmu *pmu)
163{
164 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
165
166 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
167 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
168}
169
170static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
171 u32 idx, u64 value)
172{
173 if (smmu_pmu->counter_mask & BIT(32))
174 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
175 else
176 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
177}
178
179static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
180{
181 u64 value;
182
183 if (smmu_pmu->counter_mask & BIT(32))
184 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
185 else
186 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
187
188 return value;
189}
190
191static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
192{
193 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
194}
195
196static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
197{
198 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
199}
200
201static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
202{
203 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
204}
205
206static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
207 u32 idx)
208{
209 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
210}
211
212static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
213 u32 val)
214{
215 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
216}
217
218static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
219{
220 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
221}
222
223static void smmu_pmu_event_update(struct perf_event *event)
224{
225 struct hw_perf_event *hwc = &event->hw;
226 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
227 u64 delta, prev, now;
228 u32 idx = hwc->idx;
229
230 do {
231 prev = local64_read(&hwc->prev_count);
232 now = smmu_pmu_counter_get_value(smmu_pmu, idx);
233 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
234
235 /* handle overflow. */
236 delta = now - prev;
237 delta &= smmu_pmu->counter_mask;
238
239 local64_add(delta, &event->count);
240}
241
242static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
243 struct hw_perf_event *hwc)
244{
245 u32 idx = hwc->idx;
246 u64 new;
247
248 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
249 /*
250 * On platforms that require this quirk, if the counter starts
251 * at < half_counter value and wraps, the current logic of
252 * handling the overflow may not work. It is expected that,
253 * those platforms will have full 64 counter bits implemented
254 * so that such a possibility is remote(eg: HiSilicon HIP08).
255 */
256 new = smmu_pmu_counter_get_value(smmu_pmu, idx);
257 } else {
258 /*
259 * We limit the max period to half the max counter value
260 * of the counter size, so that even in the case of extreme
261 * interrupt latency the counter will (hopefully) not wrap
262 * past its initial value.
263 */
264 new = smmu_pmu->counter_mask >> 1;
265 smmu_pmu_counter_set_value(smmu_pmu, idx, new);
266 }
267
268 local64_set(&hwc->prev_count, new);
269}
270
271static void smmu_pmu_set_event_filter(struct perf_event *event,
272 int idx, u32 span, u32 sid)
273{
274 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
275 u32 evtyper;
276
277 evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
278 smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
279 smmu_pmu_set_smr(smmu_pmu, idx, sid);
280}
281
282static bool smmu_pmu_check_global_filter(struct perf_event *curr,
283 struct perf_event *new)
284{
285 if (get_filter_enable(new) != get_filter_enable(curr))
286 return false;
287
288 if (!get_filter_enable(new))
289 return true;
290
291 return get_filter_span(new) == get_filter_span(curr) &&
292 get_filter_stream_id(new) == get_filter_stream_id(curr);
293}
294
295static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
296 struct perf_event *event, int idx)
297{
298 u32 span, sid;
299 unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters;
300 bool filter_en = !!get_filter_enable(event);
301
302 span = filter_en ? get_filter_span(event) :
303 SMMU_PMCG_DEFAULT_FILTER_SPAN;
304 sid = filter_en ? get_filter_stream_id(event) :
305 SMMU_PMCG_DEFAULT_FILTER_SID;
306
307 cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
308 /*
309 * Per-counter filtering, or scheduling the first globally-filtered
310 * event into an empty PMU so idx == 0 and it works out equivalent.
311 */
312 if (!smmu_pmu->global_filter || cur_idx == num_ctrs) {
313 smmu_pmu_set_event_filter(event, idx, span, sid);
314 return 0;
315 }
316
317 /* Otherwise, must match whatever's currently scheduled */
318 if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) {
319 smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event));
320 return 0;
321 }
322
323 return -EAGAIN;
324}
325
326static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
327 struct perf_event *event)
328{
329 int idx, err;
330 unsigned int num_ctrs = smmu_pmu->num_counters;
331
332 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
333 if (idx == num_ctrs)
334 /* The counters are all in use. */
335 return -EAGAIN;
336
337 err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
338 if (err)
339 return err;
340
341 set_bit(idx, smmu_pmu->used_counters);
342
343 return idx;
344}
345
346static bool smmu_pmu_events_compatible(struct perf_event *curr,
347 struct perf_event *new)
348{
349 if (new->pmu != curr->pmu)
350 return false;
351
352 if (to_smmu_pmu(new->pmu)->global_filter &&
353 !smmu_pmu_check_global_filter(curr, new))
354 return false;
355
356 return true;
357}
358
359/*
360 * Implementation of abstract pmu functionality required by
361 * the core perf events code.
362 */
363
364static int smmu_pmu_event_init(struct perf_event *event)
365{
366 struct hw_perf_event *hwc = &event->hw;
367 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
368 struct device *dev = smmu_pmu->dev;
369 struct perf_event *sibling;
370 int group_num_events = 1;
371 u16 event_id;
372
373 if (event->attr.type != event->pmu->type)
374 return -ENOENT;
375
376 if (hwc->sample_period) {
377 dev_dbg(dev, "Sampling not supported\n");
378 return -EOPNOTSUPP;
379 }
380
381 if (event->cpu < 0) {
382 dev_dbg(dev, "Per-task mode not supported\n");
383 return -EOPNOTSUPP;
384 }
385
386 /* Verify specified event is supported on this PMU */
387 event_id = get_event(event);
388 if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
389 (!test_bit(event_id, smmu_pmu->supported_events))) {
390 dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
391 return -EINVAL;
392 }
393
394 /* Don't allow groups with mixed PMUs, except for s/w events */
395 if (!is_software_event(event->group_leader)) {
396 if (!smmu_pmu_events_compatible(event->group_leader, event))
397 return -EINVAL;
398
399 if (++group_num_events > smmu_pmu->num_counters)
400 return -EINVAL;
401 }
402
403 for_each_sibling_event(sibling, event->group_leader) {
404 if (is_software_event(sibling))
405 continue;
406
407 if (!smmu_pmu_events_compatible(sibling, event))
408 return -EINVAL;
409
410 if (++group_num_events > smmu_pmu->num_counters)
411 return -EINVAL;
412 }
413
414 hwc->idx = -1;
415
416 /*
417 * Ensure all events are on the same cpu so all events are in the
418 * same cpu context, to avoid races on pmu_enable etc.
419 */
420 event->cpu = smmu_pmu->on_cpu;
421
422 return 0;
423}
424
425static void smmu_pmu_event_start(struct perf_event *event, int flags)
426{
427 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
428 struct hw_perf_event *hwc = &event->hw;
429 int idx = hwc->idx;
430
431 hwc->state = 0;
432
433 smmu_pmu_set_period(smmu_pmu, hwc);
434
435 smmu_pmu_counter_enable(smmu_pmu, idx);
436}
437
438static void smmu_pmu_event_stop(struct perf_event *event, int flags)
439{
440 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
441 struct hw_perf_event *hwc = &event->hw;
442 int idx = hwc->idx;
443
444 if (hwc->state & PERF_HES_STOPPED)
445 return;
446
447 smmu_pmu_counter_disable(smmu_pmu, idx);
448 /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
449 smmu_pmu_event_update(event);
450 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
451}
452
453static int smmu_pmu_event_add(struct perf_event *event, int flags)
454{
455 struct hw_perf_event *hwc = &event->hw;
456 int idx;
457 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
458
459 idx = smmu_pmu_get_event_idx(smmu_pmu, event);
460 if (idx < 0)
461 return idx;
462
463 hwc->idx = idx;
464 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
465 smmu_pmu->events[idx] = event;
466 local64_set(&hwc->prev_count, 0);
467
468 smmu_pmu_interrupt_enable(smmu_pmu, idx);
469
470 if (flags & PERF_EF_START)
471 smmu_pmu_event_start(event, flags);
472
473 /* Propagate changes to the userspace mapping. */
474 perf_event_update_userpage(event);
475
476 return 0;
477}
478
479static void smmu_pmu_event_del(struct perf_event *event, int flags)
480{
481 struct hw_perf_event *hwc = &event->hw;
482 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
483 int idx = hwc->idx;
484
485 smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
486 smmu_pmu_interrupt_disable(smmu_pmu, idx);
487 smmu_pmu->events[idx] = NULL;
488 clear_bit(idx, smmu_pmu->used_counters);
489
490 perf_event_update_userpage(event);
491}
492
493static void smmu_pmu_event_read(struct perf_event *event)
494{
495 smmu_pmu_event_update(event);
496}
497
498/* cpumask */
499
500static ssize_t smmu_pmu_cpumask_show(struct device *dev,
501 struct device_attribute *attr,
502 char *buf)
503{
504 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
505
506 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
507}
508
509static struct device_attribute smmu_pmu_cpumask_attr =
510 __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
511
512static struct attribute *smmu_pmu_cpumask_attrs[] = {
513 &smmu_pmu_cpumask_attr.attr,
514 NULL
515};
516
517static const struct attribute_group smmu_pmu_cpumask_group = {
518 .attrs = smmu_pmu_cpumask_attrs,
519};
520
521/* Events */
522
523static ssize_t smmu_pmu_event_show(struct device *dev,
524 struct device_attribute *attr, char *page)
525{
526 struct perf_pmu_events_attr *pmu_attr;
527
528 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
529
530 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
531}
532
533#define SMMU_EVENT_ATTR(name, config) \
534 PMU_EVENT_ATTR_ID(name, smmu_pmu_event_show, config)
535
536static struct attribute *smmu_pmu_events[] = {
537 SMMU_EVENT_ATTR(cycles, 0),
538 SMMU_EVENT_ATTR(transaction, 1),
539 SMMU_EVENT_ATTR(tlb_miss, 2),
540 SMMU_EVENT_ATTR(config_cache_miss, 3),
541 SMMU_EVENT_ATTR(trans_table_walk_access, 4),
542 SMMU_EVENT_ATTR(config_struct_access, 5),
543 SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6),
544 SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7),
545 NULL
546};
547
548static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
549 struct attribute *attr, int unused)
550{
551 struct device *dev = kobj_to_dev(kobj);
552 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
553 struct perf_pmu_events_attr *pmu_attr;
554
555 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
556
557 if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
558 return attr->mode;
559
560 return 0;
561}
562
563static const struct attribute_group smmu_pmu_events_group = {
564 .name = "events",
565 .attrs = smmu_pmu_events,
566 .is_visible = smmu_pmu_event_is_visible,
567};
568
569static ssize_t smmu_pmu_identifier_attr_show(struct device *dev,
570 struct device_attribute *attr,
571 char *page)
572{
573 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
574
575 return sysfs_emit(page, "0x%08x\n", smmu_pmu->iidr);
576}
577
578static umode_t smmu_pmu_identifier_attr_visible(struct kobject *kobj,
579 struct attribute *attr,
580 int n)
581{
582 struct device *dev = kobj_to_dev(kobj);
583 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
584
585 if (!smmu_pmu->iidr)
586 return 0;
587 return attr->mode;
588}
589
590static struct device_attribute smmu_pmu_identifier_attr =
591 __ATTR(identifier, 0444, smmu_pmu_identifier_attr_show, NULL);
592
593static struct attribute *smmu_pmu_identifier_attrs[] = {
594 &smmu_pmu_identifier_attr.attr,
595 NULL
596};
597
598static const struct attribute_group smmu_pmu_identifier_group = {
599 .attrs = smmu_pmu_identifier_attrs,
600 .is_visible = smmu_pmu_identifier_attr_visible,
601};
602
603/* Formats */
604PMU_FORMAT_ATTR(event, "config:0-15");
605PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
606PMU_FORMAT_ATTR(filter_span, "config1:32");
607PMU_FORMAT_ATTR(filter_enable, "config1:33");
608
609static struct attribute *smmu_pmu_formats[] = {
610 &format_attr_event.attr,
611 &format_attr_filter_stream_id.attr,
612 &format_attr_filter_span.attr,
613 &format_attr_filter_enable.attr,
614 NULL
615};
616
617static const struct attribute_group smmu_pmu_format_group = {
618 .name = "format",
619 .attrs = smmu_pmu_formats,
620};
621
622static const struct attribute_group *smmu_pmu_attr_grps[] = {
623 &smmu_pmu_cpumask_group,
624 &smmu_pmu_events_group,
625 &smmu_pmu_format_group,
626 &smmu_pmu_identifier_group,
627 NULL
628};
629
630/*
631 * Generic device handlers
632 */
633
634static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
635{
636 struct smmu_pmu *smmu_pmu;
637 unsigned int target;
638
639 smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
640 if (cpu != smmu_pmu->on_cpu)
641 return 0;
642
643 target = cpumask_any_but(cpu_online_mask, cpu);
644 if (target >= nr_cpu_ids)
645 return 0;
646
647 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
648 smmu_pmu->on_cpu = target;
649 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(target)));
650
651 return 0;
652}
653
654static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
655{
656 struct smmu_pmu *smmu_pmu = data;
657 DECLARE_BITMAP(ovs, BITS_PER_TYPE(u64));
658 u64 ovsr;
659 unsigned int idx;
660
661 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
662 if (!ovsr)
663 return IRQ_NONE;
664
665 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
666
667 bitmap_from_u64(ovs, ovsr);
668 for_each_set_bit(idx, ovs, smmu_pmu->num_counters) {
669 struct perf_event *event = smmu_pmu->events[idx];
670 struct hw_perf_event *hwc;
671
672 if (WARN_ON_ONCE(!event))
673 continue;
674
675 smmu_pmu_event_update(event);
676 hwc = &event->hw;
677
678 smmu_pmu_set_period(smmu_pmu, hwc);
679 }
680
681 return IRQ_HANDLED;
682}
683
684static void smmu_pmu_free_msis(void *data)
685{
686 struct device *dev = data;
687
688 platform_msi_domain_free_irqs(dev);
689}
690
691static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
692{
693 phys_addr_t doorbell;
694 struct device *dev = msi_desc_to_dev(desc);
695 struct smmu_pmu *pmu = dev_get_drvdata(dev);
696
697 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
698 doorbell &= MSI_CFG0_ADDR_MASK;
699
700 writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
701 writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
702 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
703 pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
704}
705
706static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
707{
708 struct device *dev = pmu->dev;
709 int ret;
710
711 /* Clear MSI address reg */
712 writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
713
714 /* MSI supported or not */
715 if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
716 return;
717
718 ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
719 if (ret) {
720 dev_warn(dev, "failed to allocate MSIs\n");
721 return;
722 }
723
724 pmu->irq = msi_get_virq(dev, 0);
725
726 /* Add callback to free MSIs on teardown */
727 devm_add_action(dev, smmu_pmu_free_msis, dev);
728}
729
730static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
731{
732 unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
733 int irq, ret = -ENXIO;
734
735 smmu_pmu_setup_msi(pmu);
736
737 irq = pmu->irq;
738 if (irq)
739 ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
740 flags, "smmuv3-pmu", pmu);
741 return ret;
742}
743
744static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
745{
746 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
747
748 smmu_pmu_disable(&smmu_pmu->pmu);
749
750 /* Disable counter and interrupt */
751 writeq_relaxed(counter_present_mask,
752 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
753 writeq_relaxed(counter_present_mask,
754 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
755 writeq_relaxed(counter_present_mask,
756 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
757}
758
759static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
760{
761 u32 model;
762
763 model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
764
765 switch (model) {
766 case IORT_SMMU_V3_PMCG_HISI_HIP08:
767 /* HiSilicon Erratum 162001800 */
768 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
769 break;
770 }
771
772 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
773}
774
775static bool smmu_pmu_coresight_id_regs(struct smmu_pmu *smmu_pmu)
776{
777 return of_device_is_compatible(smmu_pmu->dev->of_node,
778 "arm,mmu-600-pmcg");
779}
780
781static void smmu_pmu_get_iidr(struct smmu_pmu *smmu_pmu)
782{
783 u32 iidr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_IIDR);
784
785 if (!iidr && smmu_pmu_coresight_id_regs(smmu_pmu)) {
786 u32 pidr0 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR0);
787 u32 pidr1 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR1);
788 u32 pidr2 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR2);
789 u32 pidr3 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR3);
790 u32 pidr4 = readl(smmu_pmu->reg_base + SMMU_PMCG_PIDR4);
791
792 u32 productid = FIELD_GET(SMMU_PMCG_PIDR0_PART_0, pidr0) |
793 (FIELD_GET(SMMU_PMCG_PIDR1_PART_1, pidr1) << 8);
794 u32 variant = FIELD_GET(SMMU_PMCG_PIDR2_REVISION, pidr2);
795 u32 revision = FIELD_GET(SMMU_PMCG_PIDR3_REVAND, pidr3);
796 u32 implementer =
797 FIELD_GET(SMMU_PMCG_PIDR1_DES_0, pidr1) |
798 (FIELD_GET(SMMU_PMCG_PIDR2_DES_1, pidr2) << 4) |
799 (FIELD_GET(SMMU_PMCG_PIDR4_DES_2, pidr4) << 8);
800
801 iidr = FIELD_PREP(SMMU_PMCG_IIDR_PRODUCTID, productid) |
802 FIELD_PREP(SMMU_PMCG_IIDR_VARIANT, variant) |
803 FIELD_PREP(SMMU_PMCG_IIDR_REVISION, revision) |
804 FIELD_PREP(SMMU_PMCG_IIDR_IMPLEMENTER, implementer);
805 }
806
807 smmu_pmu->iidr = iidr;
808}
809
810static int smmu_pmu_probe(struct platform_device *pdev)
811{
812 struct smmu_pmu *smmu_pmu;
813 struct resource *res_0;
814 u32 cfgr, reg_size;
815 u64 ceid_64[2];
816 int irq, err;
817 char *name;
818 struct device *dev = &pdev->dev;
819
820 smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
821 if (!smmu_pmu)
822 return -ENOMEM;
823
824 smmu_pmu->dev = dev;
825 platform_set_drvdata(pdev, smmu_pmu);
826
827 smmu_pmu->pmu = (struct pmu) {
828 .module = THIS_MODULE,
829 .task_ctx_nr = perf_invalid_context,
830 .pmu_enable = smmu_pmu_enable,
831 .pmu_disable = smmu_pmu_disable,
832 .event_init = smmu_pmu_event_init,
833 .add = smmu_pmu_event_add,
834 .del = smmu_pmu_event_del,
835 .start = smmu_pmu_event_start,
836 .stop = smmu_pmu_event_stop,
837 .read = smmu_pmu_event_read,
838 .attr_groups = smmu_pmu_attr_grps,
839 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
840 };
841
842 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
843 if (IS_ERR(smmu_pmu->reg_base))
844 return PTR_ERR(smmu_pmu->reg_base);
845
846 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
847
848 /* Determine if page 1 is present */
849 if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
850 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
851 if (IS_ERR(smmu_pmu->reloc_base))
852 return PTR_ERR(smmu_pmu->reloc_base);
853 } else {
854 smmu_pmu->reloc_base = smmu_pmu->reg_base;
855 }
856
857 irq = platform_get_irq_optional(pdev, 0);
858 if (irq > 0)
859 smmu_pmu->irq = irq;
860
861 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
862 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
863 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
864 SMMU_PMCG_ARCH_MAX_EVENTS);
865
866 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
867
868 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
869
870 reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
871 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
872
873 smmu_pmu_reset(smmu_pmu);
874
875 err = smmu_pmu_setup_irq(smmu_pmu);
876 if (err) {
877 dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
878 return err;
879 }
880
881 smmu_pmu_get_iidr(smmu_pmu);
882
883 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
884 (res_0->start) >> SMMU_PMCG_PA_SHIFT);
885 if (!name) {
886 dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
887 return -EINVAL;
888 }
889
890 if (!dev->of_node)
891 smmu_pmu_get_acpi_options(smmu_pmu);
892
893 /* Pick one CPU to be the preferred one to use */
894 smmu_pmu->on_cpu = raw_smp_processor_id();
895 WARN_ON(irq_set_affinity(smmu_pmu->irq, cpumask_of(smmu_pmu->on_cpu)));
896
897 err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
898 &smmu_pmu->node);
899 if (err) {
900 dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
901 err, &res_0->start);
902 return err;
903 }
904
905 err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
906 if (err) {
907 dev_err(dev, "Error %d registering PMU @%pa\n",
908 err, &res_0->start);
909 goto out_unregister;
910 }
911
912 dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
913 &res_0->start, smmu_pmu->num_counters,
914 smmu_pmu->global_filter ? "Global(Counter0)" :
915 "Individual");
916
917 return 0;
918
919out_unregister:
920 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
921 return err;
922}
923
924static int smmu_pmu_remove(struct platform_device *pdev)
925{
926 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
927
928 perf_pmu_unregister(&smmu_pmu->pmu);
929 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
930
931 return 0;
932}
933
934static void smmu_pmu_shutdown(struct platform_device *pdev)
935{
936 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
937
938 smmu_pmu_disable(&smmu_pmu->pmu);
939}
940
941#ifdef CONFIG_OF
942static const struct of_device_id smmu_pmu_of_match[] = {
943 { .compatible = "arm,smmu-v3-pmcg" },
944 {}
945};
946MODULE_DEVICE_TABLE(of, smmu_pmu_of_match);
947#endif
948
949static struct platform_driver smmu_pmu_driver = {
950 .driver = {
951 .name = "arm-smmu-v3-pmcg",
952 .of_match_table = of_match_ptr(smmu_pmu_of_match),
953 .suppress_bind_attrs = true,
954 },
955 .probe = smmu_pmu_probe,
956 .remove = smmu_pmu_remove,
957 .shutdown = smmu_pmu_shutdown,
958};
959
960static int __init arm_smmu_pmu_init(void)
961{
962 int ret;
963
964 cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
965 "perf/arm/pmcg:online",
966 NULL,
967 smmu_pmu_offline_cpu);
968 if (cpuhp_state_num < 0)
969 return cpuhp_state_num;
970
971 ret = platform_driver_register(&smmu_pmu_driver);
972 if (ret)
973 cpuhp_remove_multi_state(cpuhp_state_num);
974
975 return ret;
976}
977module_init(arm_smmu_pmu_init);
978
979static void __exit arm_smmu_pmu_exit(void)
980{
981 platform_driver_unregister(&smmu_pmu_driver);
982 cpuhp_remove_multi_state(cpuhp_state_num);
983}
984
985module_exit(arm_smmu_pmu_exit);
986
987MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
988MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
989MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
990MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * This driver adds support for perf events to use the Performance
5 * Monitor Counter Groups (PMCG) associated with an SMMUv3 node
6 * to monitor that node.
7 *
8 * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where
9 * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped
10 * to 4K boundary. For example, the PMCG at 0xff88840000 is named
11 * smmuv3_pmcg_ff88840
12 *
13 * Filtering by stream id is done by specifying filtering parameters
14 * with the event. options are:
15 * filter_enable - 0 = no filtering, 1 = filtering enabled
16 * filter_span - 0 = exact match, 1 = pattern match
17 * filter_stream_id - pattern to filter against
18 *
19 * To match a partial StreamID where the X most-significant bits must match
20 * but the Y least-significant bits might differ, STREAMID is programmed
21 * with a value that contains:
22 * STREAMID[Y - 1] == 0.
23 * STREAMID[Y - 2:0] == 1 (where Y > 1).
24 * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards)
25 * contain a value to match from the corresponding bits of event StreamID.
26 *
27 * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1,
28 * filter_span=1,filter_stream_id=0x42/ -a netperf
29 * Applies filter pattern 0x42 to transaction events, which means events
30 * matching stream ids 0x42 and 0x43 are counted. Further filtering
31 * information is available in the SMMU documentation.
32 *
33 * SMMU events are not attributable to a CPU, so task mode and sampling
34 * are not supported.
35 */
36
37#include <linux/acpi.h>
38#include <linux/acpi_iort.h>
39#include <linux/bitfield.h>
40#include <linux/bitops.h>
41#include <linux/cpuhotplug.h>
42#include <linux/cpumask.h>
43#include <linux/device.h>
44#include <linux/errno.h>
45#include <linux/interrupt.h>
46#include <linux/irq.h>
47#include <linux/kernel.h>
48#include <linux/list.h>
49#include <linux/msi.h>
50#include <linux/perf_event.h>
51#include <linux/platform_device.h>
52#include <linux/smp.h>
53#include <linux/sysfs.h>
54#include <linux/types.h>
55
56#define SMMU_PMCG_EVCNTR0 0x0
57#define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride))
58#define SMMU_PMCG_EVTYPER0 0x400
59#define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4)
60#define SMMU_PMCG_SID_SPAN_SHIFT 29
61#define SMMU_PMCG_SMR0 0xA00
62#define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4)
63#define SMMU_PMCG_CNTENSET0 0xC00
64#define SMMU_PMCG_CNTENCLR0 0xC20
65#define SMMU_PMCG_INTENSET0 0xC40
66#define SMMU_PMCG_INTENCLR0 0xC60
67#define SMMU_PMCG_OVSCLR0 0xC80
68#define SMMU_PMCG_OVSSET0 0xCC0
69#define SMMU_PMCG_CFGR 0xE00
70#define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23)
71#define SMMU_PMCG_CFGR_MSI BIT(21)
72#define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20)
73#define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8)
74#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0)
75#define SMMU_PMCG_CR 0xE04
76#define SMMU_PMCG_CR_ENABLE BIT(0)
77#define SMMU_PMCG_CEID0 0xE20
78#define SMMU_PMCG_CEID1 0xE28
79#define SMMU_PMCG_IRQ_CTRL 0xE50
80#define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0)
81#define SMMU_PMCG_IRQ_CFG0 0xE58
82#define SMMU_PMCG_IRQ_CFG1 0xE60
83#define SMMU_PMCG_IRQ_CFG2 0xE64
84
85/* MSI config fields */
86#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2)
87#define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1
88
89#define SMMU_PMCG_DEFAULT_FILTER_SPAN 1
90#define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0)
91
92#define SMMU_PMCG_MAX_COUNTERS 64
93#define SMMU_PMCG_ARCH_MAX_EVENTS 128
94
95#define SMMU_PMCG_PA_SHIFT 12
96
97#define SMMU_PMCG_EVCNTR_RDONLY BIT(0)
98
99static int cpuhp_state_num;
100
101struct smmu_pmu {
102 struct hlist_node node;
103 struct perf_event *events[SMMU_PMCG_MAX_COUNTERS];
104 DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS);
105 DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS);
106 unsigned int irq;
107 unsigned int on_cpu;
108 struct pmu pmu;
109 unsigned int num_counters;
110 struct device *dev;
111 void __iomem *reg_base;
112 void __iomem *reloc_base;
113 u64 counter_mask;
114 u32 options;
115 bool global_filter;
116};
117
118#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
119
120#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
121 static inline u32 get_##_name(struct perf_event *event) \
122 { \
123 return FIELD_GET(GENMASK_ULL(_end, _start), \
124 event->attr._config); \
125 } \
126
127SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15);
128SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31);
129SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32);
130SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33);
131
132static inline void smmu_pmu_enable(struct pmu *pmu)
133{
134 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
135
136 writel(SMMU_PMCG_IRQ_CTRL_IRQEN,
137 smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
138 writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR);
139}
140
141static inline void smmu_pmu_disable(struct pmu *pmu)
142{
143 struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu);
144
145 writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR);
146 writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL);
147}
148
149static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu,
150 u32 idx, u64 value)
151{
152 if (smmu_pmu->counter_mask & BIT(32))
153 writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
154 else
155 writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
156}
157
158static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx)
159{
160 u64 value;
161
162 if (smmu_pmu->counter_mask & BIT(32))
163 value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8));
164 else
165 value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4));
166
167 return value;
168}
169
170static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx)
171{
172 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0);
173}
174
175static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx)
176{
177 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
178}
179
180static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx)
181{
182 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0);
183}
184
185static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu,
186 u32 idx)
187{
188 writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
189}
190
191static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx,
192 u32 val)
193{
194 writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx));
195}
196
197static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val)
198{
199 writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx));
200}
201
202static void smmu_pmu_event_update(struct perf_event *event)
203{
204 struct hw_perf_event *hwc = &event->hw;
205 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
206 u64 delta, prev, now;
207 u32 idx = hwc->idx;
208
209 do {
210 prev = local64_read(&hwc->prev_count);
211 now = smmu_pmu_counter_get_value(smmu_pmu, idx);
212 } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
213
214 /* handle overflow. */
215 delta = now - prev;
216 delta &= smmu_pmu->counter_mask;
217
218 local64_add(delta, &event->count);
219}
220
221static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu,
222 struct hw_perf_event *hwc)
223{
224 u32 idx = hwc->idx;
225 u64 new;
226
227 if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) {
228 /*
229 * On platforms that require this quirk, if the counter starts
230 * at < half_counter value and wraps, the current logic of
231 * handling the overflow may not work. It is expected that,
232 * those platforms will have full 64 counter bits implemented
233 * so that such a possibility is remote(eg: HiSilicon HIP08).
234 */
235 new = smmu_pmu_counter_get_value(smmu_pmu, idx);
236 } else {
237 /*
238 * We limit the max period to half the max counter value
239 * of the counter size, so that even in the case of extreme
240 * interrupt latency the counter will (hopefully) not wrap
241 * past its initial value.
242 */
243 new = smmu_pmu->counter_mask >> 1;
244 smmu_pmu_counter_set_value(smmu_pmu, idx, new);
245 }
246
247 local64_set(&hwc->prev_count, new);
248}
249
250static void smmu_pmu_set_event_filter(struct perf_event *event,
251 int idx, u32 span, u32 sid)
252{
253 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
254 u32 evtyper;
255
256 evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT;
257 smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper);
258 smmu_pmu_set_smr(smmu_pmu, idx, sid);
259}
260
261static bool smmu_pmu_check_global_filter(struct perf_event *curr,
262 struct perf_event *new)
263{
264 if (get_filter_enable(new) != get_filter_enable(curr))
265 return false;
266
267 if (!get_filter_enable(new))
268 return true;
269
270 return get_filter_span(new) == get_filter_span(curr) &&
271 get_filter_stream_id(new) == get_filter_stream_id(curr);
272}
273
274static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
275 struct perf_event *event, int idx)
276{
277 u32 span, sid;
278 unsigned int num_ctrs = smmu_pmu->num_counters;
279 bool filter_en = !!get_filter_enable(event);
280
281 span = filter_en ? get_filter_span(event) :
282 SMMU_PMCG_DEFAULT_FILTER_SPAN;
283 sid = filter_en ? get_filter_stream_id(event) :
284 SMMU_PMCG_DEFAULT_FILTER_SID;
285
286 /* Support individual filter settings */
287 if (!smmu_pmu->global_filter) {
288 smmu_pmu_set_event_filter(event, idx, span, sid);
289 return 0;
290 }
291
292 /* Requested settings same as current global settings*/
293 idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
294 if (idx == num_ctrs ||
295 smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
296 smmu_pmu_set_event_filter(event, 0, span, sid);
297 return 0;
298 }
299
300 return -EAGAIN;
301}
302
303static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
304 struct perf_event *event)
305{
306 int idx, err;
307 unsigned int num_ctrs = smmu_pmu->num_counters;
308
309 idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs);
310 if (idx == num_ctrs)
311 /* The counters are all in use. */
312 return -EAGAIN;
313
314 err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx);
315 if (err)
316 return err;
317
318 set_bit(idx, smmu_pmu->used_counters);
319
320 return idx;
321}
322
323static bool smmu_pmu_events_compatible(struct perf_event *curr,
324 struct perf_event *new)
325{
326 if (new->pmu != curr->pmu)
327 return false;
328
329 if (to_smmu_pmu(new->pmu)->global_filter &&
330 !smmu_pmu_check_global_filter(curr, new))
331 return false;
332
333 return true;
334}
335
336/*
337 * Implementation of abstract pmu functionality required by
338 * the core perf events code.
339 */
340
341static int smmu_pmu_event_init(struct perf_event *event)
342{
343 struct hw_perf_event *hwc = &event->hw;
344 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
345 struct device *dev = smmu_pmu->dev;
346 struct perf_event *sibling;
347 int group_num_events = 1;
348 u16 event_id;
349
350 if (event->attr.type != event->pmu->type)
351 return -ENOENT;
352
353 if (hwc->sample_period) {
354 dev_dbg(dev, "Sampling not supported\n");
355 return -EOPNOTSUPP;
356 }
357
358 if (event->cpu < 0) {
359 dev_dbg(dev, "Per-task mode not supported\n");
360 return -EOPNOTSUPP;
361 }
362
363 /* Verify specified event is supported on this PMU */
364 event_id = get_event(event);
365 if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS &&
366 (!test_bit(event_id, smmu_pmu->supported_events))) {
367 dev_dbg(dev, "Invalid event %d for this PMU\n", event_id);
368 return -EINVAL;
369 }
370
371 /* Don't allow groups with mixed PMUs, except for s/w events */
372 if (!is_software_event(event->group_leader)) {
373 if (!smmu_pmu_events_compatible(event->group_leader, event))
374 return -EINVAL;
375
376 if (++group_num_events > smmu_pmu->num_counters)
377 return -EINVAL;
378 }
379
380 for_each_sibling_event(sibling, event->group_leader) {
381 if (is_software_event(sibling))
382 continue;
383
384 if (!smmu_pmu_events_compatible(sibling, event))
385 return -EINVAL;
386
387 if (++group_num_events > smmu_pmu->num_counters)
388 return -EINVAL;
389 }
390
391 hwc->idx = -1;
392
393 /*
394 * Ensure all events are on the same cpu so all events are in the
395 * same cpu context, to avoid races on pmu_enable etc.
396 */
397 event->cpu = smmu_pmu->on_cpu;
398
399 return 0;
400}
401
402static void smmu_pmu_event_start(struct perf_event *event, int flags)
403{
404 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
405 struct hw_perf_event *hwc = &event->hw;
406 int idx = hwc->idx;
407
408 hwc->state = 0;
409
410 smmu_pmu_set_period(smmu_pmu, hwc);
411
412 smmu_pmu_counter_enable(smmu_pmu, idx);
413}
414
415static void smmu_pmu_event_stop(struct perf_event *event, int flags)
416{
417 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
418 struct hw_perf_event *hwc = &event->hw;
419 int idx = hwc->idx;
420
421 if (hwc->state & PERF_HES_STOPPED)
422 return;
423
424 smmu_pmu_counter_disable(smmu_pmu, idx);
425 /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */
426 smmu_pmu_event_update(event);
427 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
428}
429
430static int smmu_pmu_event_add(struct perf_event *event, int flags)
431{
432 struct hw_perf_event *hwc = &event->hw;
433 int idx;
434 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
435
436 idx = smmu_pmu_get_event_idx(smmu_pmu, event);
437 if (idx < 0)
438 return idx;
439
440 hwc->idx = idx;
441 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
442 smmu_pmu->events[idx] = event;
443 local64_set(&hwc->prev_count, 0);
444
445 smmu_pmu_interrupt_enable(smmu_pmu, idx);
446
447 if (flags & PERF_EF_START)
448 smmu_pmu_event_start(event, flags);
449
450 /* Propagate changes to the userspace mapping. */
451 perf_event_update_userpage(event);
452
453 return 0;
454}
455
456static void smmu_pmu_event_del(struct perf_event *event, int flags)
457{
458 struct hw_perf_event *hwc = &event->hw;
459 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
460 int idx = hwc->idx;
461
462 smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE);
463 smmu_pmu_interrupt_disable(smmu_pmu, idx);
464 smmu_pmu->events[idx] = NULL;
465 clear_bit(idx, smmu_pmu->used_counters);
466
467 perf_event_update_userpage(event);
468}
469
470static void smmu_pmu_event_read(struct perf_event *event)
471{
472 smmu_pmu_event_update(event);
473}
474
475/* cpumask */
476
477static ssize_t smmu_pmu_cpumask_show(struct device *dev,
478 struct device_attribute *attr,
479 char *buf)
480{
481 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
482
483 return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu));
484}
485
486static struct device_attribute smmu_pmu_cpumask_attr =
487 __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL);
488
489static struct attribute *smmu_pmu_cpumask_attrs[] = {
490 &smmu_pmu_cpumask_attr.attr,
491 NULL
492};
493
494static struct attribute_group smmu_pmu_cpumask_group = {
495 .attrs = smmu_pmu_cpumask_attrs,
496};
497
498/* Events */
499
500static ssize_t smmu_pmu_event_show(struct device *dev,
501 struct device_attribute *attr, char *page)
502{
503 struct perf_pmu_events_attr *pmu_attr;
504
505 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
506
507 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
508}
509
510#define SMMU_EVENT_ATTR(name, config) \
511 PMU_EVENT_ATTR(name, smmu_event_attr_##name, \
512 config, smmu_pmu_event_show)
513SMMU_EVENT_ATTR(cycles, 0);
514SMMU_EVENT_ATTR(transaction, 1);
515SMMU_EVENT_ATTR(tlb_miss, 2);
516SMMU_EVENT_ATTR(config_cache_miss, 3);
517SMMU_EVENT_ATTR(trans_table_walk_access, 4);
518SMMU_EVENT_ATTR(config_struct_access, 5);
519SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6);
520SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7);
521
522static struct attribute *smmu_pmu_events[] = {
523 &smmu_event_attr_cycles.attr.attr,
524 &smmu_event_attr_transaction.attr.attr,
525 &smmu_event_attr_tlb_miss.attr.attr,
526 &smmu_event_attr_config_cache_miss.attr.attr,
527 &smmu_event_attr_trans_table_walk_access.attr.attr,
528 &smmu_event_attr_config_struct_access.attr.attr,
529 &smmu_event_attr_pcie_ats_trans_rq.attr.attr,
530 &smmu_event_attr_pcie_ats_trans_passed.attr.attr,
531 NULL
532};
533
534static umode_t smmu_pmu_event_is_visible(struct kobject *kobj,
535 struct attribute *attr, int unused)
536{
537 struct device *dev = kobj_to_dev(kobj);
538 struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev));
539 struct perf_pmu_events_attr *pmu_attr;
540
541 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
542
543 if (test_bit(pmu_attr->id, smmu_pmu->supported_events))
544 return attr->mode;
545
546 return 0;
547}
548
549static struct attribute_group smmu_pmu_events_group = {
550 .name = "events",
551 .attrs = smmu_pmu_events,
552 .is_visible = smmu_pmu_event_is_visible,
553};
554
555/* Formats */
556PMU_FORMAT_ATTR(event, "config:0-15");
557PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31");
558PMU_FORMAT_ATTR(filter_span, "config1:32");
559PMU_FORMAT_ATTR(filter_enable, "config1:33");
560
561static struct attribute *smmu_pmu_formats[] = {
562 &format_attr_event.attr,
563 &format_attr_filter_stream_id.attr,
564 &format_attr_filter_span.attr,
565 &format_attr_filter_enable.attr,
566 NULL
567};
568
569static struct attribute_group smmu_pmu_format_group = {
570 .name = "format",
571 .attrs = smmu_pmu_formats,
572};
573
574static const struct attribute_group *smmu_pmu_attr_grps[] = {
575 &smmu_pmu_cpumask_group,
576 &smmu_pmu_events_group,
577 &smmu_pmu_format_group,
578 NULL
579};
580
581/*
582 * Generic device handlers
583 */
584
585static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
586{
587 struct smmu_pmu *smmu_pmu;
588 unsigned int target;
589
590 smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node);
591 if (cpu != smmu_pmu->on_cpu)
592 return 0;
593
594 target = cpumask_any_but(cpu_online_mask, cpu);
595 if (target >= nr_cpu_ids)
596 return 0;
597
598 perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target);
599 smmu_pmu->on_cpu = target;
600 WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target)));
601
602 return 0;
603}
604
605static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
606{
607 struct smmu_pmu *smmu_pmu = data;
608 u64 ovsr;
609 unsigned int idx;
610
611 ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0);
612 if (!ovsr)
613 return IRQ_NONE;
614
615 writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
616
617 for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
618 struct perf_event *event = smmu_pmu->events[idx];
619 struct hw_perf_event *hwc;
620
621 if (WARN_ON_ONCE(!event))
622 continue;
623
624 smmu_pmu_event_update(event);
625 hwc = &event->hw;
626
627 smmu_pmu_set_period(smmu_pmu, hwc);
628 }
629
630 return IRQ_HANDLED;
631}
632
633static void smmu_pmu_free_msis(void *data)
634{
635 struct device *dev = data;
636
637 platform_msi_domain_free_irqs(dev);
638}
639
640static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
641{
642 phys_addr_t doorbell;
643 struct device *dev = msi_desc_to_dev(desc);
644 struct smmu_pmu *pmu = dev_get_drvdata(dev);
645
646 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
647 doorbell &= MSI_CFG0_ADDR_MASK;
648
649 writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
650 writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1);
651 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE,
652 pmu->reg_base + SMMU_PMCG_IRQ_CFG2);
653}
654
655static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
656{
657 struct msi_desc *desc;
658 struct device *dev = pmu->dev;
659 int ret;
660
661 /* Clear MSI address reg */
662 writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0);
663
664 /* MSI supported or not */
665 if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
666 return;
667
668 ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
669 if (ret) {
670 dev_warn(dev, "failed to allocate MSIs\n");
671 return;
672 }
673
674 desc = first_msi_entry(dev);
675 if (desc)
676 pmu->irq = desc->irq;
677
678 /* Add callback to free MSIs on teardown */
679 devm_add_action(dev, smmu_pmu_free_msis, dev);
680}
681
682static int smmu_pmu_setup_irq(struct smmu_pmu *pmu)
683{
684 unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD;
685 int irq, ret = -ENXIO;
686
687 smmu_pmu_setup_msi(pmu);
688
689 irq = pmu->irq;
690 if (irq)
691 ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq,
692 flags, "smmuv3-pmu", pmu);
693 return ret;
694}
695
696static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu)
697{
698 u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0);
699
700 smmu_pmu_disable(&smmu_pmu->pmu);
701
702 /* Disable counter and interrupt */
703 writeq_relaxed(counter_present_mask,
704 smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0);
705 writeq_relaxed(counter_present_mask,
706 smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0);
707 writeq_relaxed(counter_present_mask,
708 smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
709}
710
711static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu)
712{
713 u32 model;
714
715 model = *(u32 *)dev_get_platdata(smmu_pmu->dev);
716
717 switch (model) {
718 case IORT_SMMU_V3_PMCG_HISI_HIP08:
719 /* HiSilicon Erratum 162001800 */
720 smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY;
721 break;
722 }
723
724 dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options);
725}
726
727static int smmu_pmu_probe(struct platform_device *pdev)
728{
729 struct smmu_pmu *smmu_pmu;
730 struct resource *res_0;
731 u32 cfgr, reg_size;
732 u64 ceid_64[2];
733 int irq, err;
734 char *name;
735 struct device *dev = &pdev->dev;
736
737 smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL);
738 if (!smmu_pmu)
739 return -ENOMEM;
740
741 smmu_pmu->dev = dev;
742 platform_set_drvdata(pdev, smmu_pmu);
743
744 smmu_pmu->pmu = (struct pmu) {
745 .module = THIS_MODULE,
746 .task_ctx_nr = perf_invalid_context,
747 .pmu_enable = smmu_pmu_enable,
748 .pmu_disable = smmu_pmu_disable,
749 .event_init = smmu_pmu_event_init,
750 .add = smmu_pmu_event_add,
751 .del = smmu_pmu_event_del,
752 .start = smmu_pmu_event_start,
753 .stop = smmu_pmu_event_stop,
754 .read = smmu_pmu_event_read,
755 .attr_groups = smmu_pmu_attr_grps,
756 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
757 };
758
759 smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
760 if (IS_ERR(smmu_pmu->reg_base))
761 return PTR_ERR(smmu_pmu->reg_base);
762
763 cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR);
764
765 /* Determine if page 1 is present */
766 if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) {
767 smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1);
768 if (IS_ERR(smmu_pmu->reloc_base))
769 return PTR_ERR(smmu_pmu->reloc_base);
770 } else {
771 smmu_pmu->reloc_base = smmu_pmu->reg_base;
772 }
773
774 irq = platform_get_irq_optional(pdev, 0);
775 if (irq > 0)
776 smmu_pmu->irq = irq;
777
778 ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0);
779 ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1);
780 bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64,
781 SMMU_PMCG_ARCH_MAX_EVENTS);
782
783 smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1;
784
785 smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE);
786
787 reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr);
788 smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0);
789
790 smmu_pmu_reset(smmu_pmu);
791
792 err = smmu_pmu_setup_irq(smmu_pmu);
793 if (err) {
794 dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start);
795 return err;
796 }
797
798 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx",
799 (res_0->start) >> SMMU_PMCG_PA_SHIFT);
800 if (!name) {
801 dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start);
802 return -EINVAL;
803 }
804
805 smmu_pmu_get_acpi_options(smmu_pmu);
806
807 /* Pick one CPU to be the preferred one to use */
808 smmu_pmu->on_cpu = raw_smp_processor_id();
809 WARN_ON(irq_set_affinity_hint(smmu_pmu->irq,
810 cpumask_of(smmu_pmu->on_cpu)));
811
812 err = cpuhp_state_add_instance_nocalls(cpuhp_state_num,
813 &smmu_pmu->node);
814 if (err) {
815 dev_err(dev, "Error %d registering hotplug, PMU @%pa\n",
816 err, &res_0->start);
817 goto out_clear_affinity;
818 }
819
820 err = perf_pmu_register(&smmu_pmu->pmu, name, -1);
821 if (err) {
822 dev_err(dev, "Error %d registering PMU @%pa\n",
823 err, &res_0->start);
824 goto out_unregister;
825 }
826
827 dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n",
828 &res_0->start, smmu_pmu->num_counters,
829 smmu_pmu->global_filter ? "Global(Counter0)" :
830 "Individual");
831
832 return 0;
833
834out_unregister:
835 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
836out_clear_affinity:
837 irq_set_affinity_hint(smmu_pmu->irq, NULL);
838 return err;
839}
840
841static int smmu_pmu_remove(struct platform_device *pdev)
842{
843 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
844
845 perf_pmu_unregister(&smmu_pmu->pmu);
846 cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
847 irq_set_affinity_hint(smmu_pmu->irq, NULL);
848
849 return 0;
850}
851
852static void smmu_pmu_shutdown(struct platform_device *pdev)
853{
854 struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
855
856 smmu_pmu_disable(&smmu_pmu->pmu);
857}
858
859static struct platform_driver smmu_pmu_driver = {
860 .driver = {
861 .name = "arm-smmu-v3-pmcg",
862 .suppress_bind_attrs = true,
863 },
864 .probe = smmu_pmu_probe,
865 .remove = smmu_pmu_remove,
866 .shutdown = smmu_pmu_shutdown,
867};
868
869static int __init arm_smmu_pmu_init(void)
870{
871 cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
872 "perf/arm/pmcg:online",
873 NULL,
874 smmu_pmu_offline_cpu);
875 if (cpuhp_state_num < 0)
876 return cpuhp_state_num;
877
878 return platform_driver_register(&smmu_pmu_driver);
879}
880module_init(arm_smmu_pmu_init);
881
882static void __exit arm_smmu_pmu_exit(void)
883{
884 platform_driver_unregister(&smmu_pmu_driver);
885 cpuhp_remove_multi_state(cpuhp_state_num);
886}
887
888module_exit(arm_smmu_pmu_exit);
889
890MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension");
891MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>");
892MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>");
893MODULE_LICENSE("GPL v2");