Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2017 NXP
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 */
6
7#include <linux/bitfield.h>
8#include <linux/init.h>
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/of_irq.h>
14#include <linux/perf_event.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
17
18#define COUNTER_CNTL 0x0
19#define COUNTER_READ 0x20
20
21#define COUNTER_DPCR1 0x30
22#define COUNTER_MUX_CNTL 0x50
23#define COUNTER_MASK_COMP 0x54
24
25#define CNTL_OVER 0x1
26#define CNTL_CLEAR 0x2
27#define CNTL_EN 0x4
28#define CNTL_EN_MASK 0xFFFFFFFB
29#define CNTL_CLEAR_MASK 0xFFFFFFFD
30#define CNTL_OVER_MASK 0xFFFFFFFE
31
32#define CNTL_CP_SHIFT 16
33#define CNTL_CP_MASK (0xFF << CNTL_CP_SHIFT)
34#define CNTL_CSV_SHIFT 24
35#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
36
37#define READ_PORT_SHIFT 0
38#define READ_PORT_MASK (0x7 << READ_PORT_SHIFT)
39#define READ_CHANNEL_REVERT 0x00000008 /* bit 3 for read channel select */
40#define WRITE_PORT_SHIFT 8
41#define WRITE_PORT_MASK (0x7 << WRITE_PORT_SHIFT)
42#define WRITE_CHANNEL_REVERT 0x00000800 /* bit 11 for write channel select */
43
44#define EVENT_CYCLES_ID 0
45#define EVENT_CYCLES_COUNTER 0
46#define NUM_COUNTERS 4
47
48/* For removing bias if cycle counter CNTL.CP is set to 0xf0 */
49#define CYCLES_COUNTER_MASK 0x0FFFFFFF
50#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
51
52#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
53
54#define DDR_PERF_DEV_NAME "imx8_ddr"
55#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
56
57static DEFINE_IDA(ddr_ida);
58
59/* DDR Perf hardware feature */
60#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
61#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
62#define DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER 0x4 /* support AXI ID PORT CHANNEL filter */
63
64struct fsl_ddr_devtype_data {
65 unsigned int quirks; /* quirks needed for different DDR Perf core */
66 const char *identifier; /* system PMU identifier for userspace */
67};
68
69static const struct fsl_ddr_devtype_data imx8_devtype_data;
70
71static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
72 .quirks = DDR_CAP_AXI_ID_FILTER,
73};
74
75static const struct fsl_ddr_devtype_data imx8mq_devtype_data = {
76 .quirks = DDR_CAP_AXI_ID_FILTER,
77 .identifier = "i.MX8MQ",
78};
79
80static const struct fsl_ddr_devtype_data imx8mm_devtype_data = {
81 .quirks = DDR_CAP_AXI_ID_FILTER,
82 .identifier = "i.MX8MM",
83};
84
85static const struct fsl_ddr_devtype_data imx8mn_devtype_data = {
86 .quirks = DDR_CAP_AXI_ID_FILTER,
87 .identifier = "i.MX8MN",
88};
89
90static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
91 .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
92 .identifier = "i.MX8MP",
93};
94
95static const struct fsl_ddr_devtype_data imx8dxl_devtype_data = {
96 .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER,
97 .identifier = "i.MX8DXL",
98};
99
100static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
101 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
102 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
103 { .compatible = "fsl,imx8mq-ddr-pmu", .data = &imx8mq_devtype_data},
104 { .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data},
105 { .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data},
106 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
107 { .compatible = "fsl,imx8dxl-ddr-pmu", .data = &imx8dxl_devtype_data},
108 { /* sentinel */ }
109};
110MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
111
112struct ddr_pmu {
113 struct pmu pmu;
114 void __iomem *base;
115 unsigned int cpu;
116 struct hlist_node node;
117 struct device *dev;
118 struct perf_event *events[NUM_COUNTERS];
119 enum cpuhp_state cpuhp_state;
120 const struct fsl_ddr_devtype_data *devtype_data;
121 int irq;
122 int id;
123 int active_counter;
124};
125
126static ssize_t ddr_perf_identifier_show(struct device *dev,
127 struct device_attribute *attr,
128 char *page)
129{
130 struct ddr_pmu *pmu = dev_get_drvdata(dev);
131
132 return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier);
133}
134
135static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj,
136 struct attribute *attr,
137 int n)
138{
139 struct device *dev = kobj_to_dev(kobj);
140 struct ddr_pmu *pmu = dev_get_drvdata(dev);
141
142 if (!pmu->devtype_data->identifier)
143 return 0;
144 return attr->mode;
145};
146
147static struct device_attribute ddr_perf_identifier_attr =
148 __ATTR(identifier, 0444, ddr_perf_identifier_show, NULL);
149
150static struct attribute *ddr_perf_identifier_attrs[] = {
151 &ddr_perf_identifier_attr.attr,
152 NULL,
153};
154
155static const struct attribute_group ddr_perf_identifier_attr_group = {
156 .attrs = ddr_perf_identifier_attrs,
157 .is_visible = ddr_perf_identifier_attr_visible,
158};
159
160enum ddr_perf_filter_capabilities {
161 PERF_CAP_AXI_ID_FILTER = 0,
162 PERF_CAP_AXI_ID_FILTER_ENHANCED,
163 PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER,
164 PERF_CAP_AXI_ID_FEAT_MAX,
165};
166
167static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
168{
169 u32 quirks = pmu->devtype_data->quirks;
170
171 switch (cap) {
172 case PERF_CAP_AXI_ID_FILTER:
173 return !!(quirks & DDR_CAP_AXI_ID_FILTER);
174 case PERF_CAP_AXI_ID_FILTER_ENHANCED:
175 quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
176 return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
177 case PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER:
178 return !!(quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER);
179 default:
180 WARN(1, "unknown filter cap %d\n", cap);
181 }
182
183 return 0;
184}
185
186static ssize_t ddr_perf_filter_cap_show(struct device *dev,
187 struct device_attribute *attr,
188 char *buf)
189{
190 struct ddr_pmu *pmu = dev_get_drvdata(dev);
191 struct dev_ext_attribute *ea =
192 container_of(attr, struct dev_ext_attribute, attr);
193 int cap = (long)ea->var;
194
195 return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap));
196}
197
198#define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
199 (&((struct dev_ext_attribute) { \
200 __ATTR(_name, 0444, _func, NULL), (void *)_var \
201 }).attr.attr)
202
203#define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
204 PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
205
206static struct attribute *ddr_perf_filter_cap_attr[] = {
207 PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
208 PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
209 PERF_FILTER_EXT_ATTR_ENTRY(super_filter, PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER),
210 NULL,
211};
212
213static const struct attribute_group ddr_perf_filter_cap_attr_group = {
214 .name = "caps",
215 .attrs = ddr_perf_filter_cap_attr,
216};
217
218static ssize_t ddr_perf_cpumask_show(struct device *dev,
219 struct device_attribute *attr, char *buf)
220{
221 struct ddr_pmu *pmu = dev_get_drvdata(dev);
222
223 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
224}
225
226static struct device_attribute ddr_perf_cpumask_attr =
227 __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
228
229static struct attribute *ddr_perf_cpumask_attrs[] = {
230 &ddr_perf_cpumask_attr.attr,
231 NULL,
232};
233
234static const struct attribute_group ddr_perf_cpumask_attr_group = {
235 .attrs = ddr_perf_cpumask_attrs,
236};
237
238static ssize_t
239ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
240 char *page)
241{
242 struct perf_pmu_events_attr *pmu_attr;
243
244 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
245 return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
246}
247
248#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
249 PMU_EVENT_ATTR_ID(_name, ddr_pmu_event_show, _id)
250
251static struct attribute *ddr_perf_events_attrs[] = {
252 IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
253 IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
254 IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
255 IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
256 IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
257 IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
258 IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
259 IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
260 IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
261 IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
262 IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
263 IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
264 IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
265 IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
266 IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
267 IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
268 IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
269 IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
270 IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
271 IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
272 IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
273 IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
274 IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
275 IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
276 IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
277 IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
278 IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
279 IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
280 IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
281 IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
282 IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
283 IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
284 NULL,
285};
286
287static const struct attribute_group ddr_perf_events_attr_group = {
288 .name = "events",
289 .attrs = ddr_perf_events_attrs,
290};
291
292PMU_FORMAT_ATTR(event, "config:0-7");
293PMU_FORMAT_ATTR(axi_id, "config1:0-15");
294PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
295PMU_FORMAT_ATTR(axi_port, "config2:0-2");
296PMU_FORMAT_ATTR(axi_channel, "config2:3-3");
297
298static struct attribute *ddr_perf_format_attrs[] = {
299 &format_attr_event.attr,
300 &format_attr_axi_id.attr,
301 &format_attr_axi_mask.attr,
302 &format_attr_axi_port.attr,
303 &format_attr_axi_channel.attr,
304 NULL,
305};
306
307static const struct attribute_group ddr_perf_format_attr_group = {
308 .name = "format",
309 .attrs = ddr_perf_format_attrs,
310};
311
312static const struct attribute_group *attr_groups[] = {
313 &ddr_perf_events_attr_group,
314 &ddr_perf_format_attr_group,
315 &ddr_perf_cpumask_attr_group,
316 &ddr_perf_filter_cap_attr_group,
317 &ddr_perf_identifier_attr_group,
318 NULL,
319};
320
321static bool ddr_perf_is_filtered(struct perf_event *event)
322{
323 return event->attr.config == 0x41 || event->attr.config == 0x42;
324}
325
326static u32 ddr_perf_filter_val(struct perf_event *event)
327{
328 return event->attr.config1;
329}
330
331static bool ddr_perf_filters_compatible(struct perf_event *a,
332 struct perf_event *b)
333{
334 if (!ddr_perf_is_filtered(a))
335 return true;
336 if (!ddr_perf_is_filtered(b))
337 return true;
338 return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
339}
340
341static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
342{
343 unsigned int filt;
344 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
345
346 filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
347 return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
348 ddr_perf_is_filtered(event);
349}
350
351static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
352{
353 int i;
354
355 /*
356 * Always map cycle event to counter 0
357 * Cycles counter is dedicated for cycle event
358 * can't used for the other events
359 */
360 if (event == EVENT_CYCLES_ID) {
361 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
362 return EVENT_CYCLES_COUNTER;
363 else
364 return -ENOENT;
365 }
366
367 for (i = 1; i < NUM_COUNTERS; i++) {
368 if (pmu->events[i] == NULL)
369 return i;
370 }
371
372 return -ENOENT;
373}
374
375static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
376{
377 pmu->events[counter] = NULL;
378}
379
380static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
381{
382 struct perf_event *event = pmu->events[counter];
383 void __iomem *base = pmu->base;
384
385 /*
386 * return bytes instead of bursts from ddr transaction for
387 * axid-read and axid-write event if PMU core supports enhanced
388 * filter.
389 */
390 base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
391 COUNTER_READ;
392 return readl_relaxed(base + counter * 4);
393}
394
395static int ddr_perf_event_init(struct perf_event *event)
396{
397 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
398 struct hw_perf_event *hwc = &event->hw;
399 struct perf_event *sibling;
400
401 if (event->attr.type != event->pmu->type)
402 return -ENOENT;
403
404 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
405 return -EOPNOTSUPP;
406
407 if (event->cpu < 0) {
408 dev_warn(pmu->dev, "Can't provide per-task data!\n");
409 return -EOPNOTSUPP;
410 }
411
412 /*
413 * We must NOT create groups containing mixed PMUs, although software
414 * events are acceptable (for example to create a CCN group
415 * periodically read when a hrtimer aka cpu-clock leader triggers).
416 */
417 if (event->group_leader->pmu != event->pmu &&
418 !is_software_event(event->group_leader))
419 return -EINVAL;
420
421 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
422 if (!ddr_perf_filters_compatible(event, event->group_leader))
423 return -EINVAL;
424 for_each_sibling_event(sibling, event->group_leader) {
425 if (!ddr_perf_filters_compatible(event, sibling))
426 return -EINVAL;
427 }
428 }
429
430 for_each_sibling_event(sibling, event->group_leader) {
431 if (sibling->pmu != event->pmu &&
432 !is_software_event(sibling))
433 return -EINVAL;
434 }
435
436 event->cpu = pmu->cpu;
437 hwc->idx = -1;
438
439 return 0;
440}
441
442static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
443 int counter, bool enable)
444{
445 u8 reg = counter * 4 + COUNTER_CNTL;
446 int val;
447
448 if (enable) {
449 /*
450 * cycle counter is special which should firstly write 0 then
451 * write 1 into CLEAR bit to clear it. Other counters only
452 * need write 0 into CLEAR bit and it turns out to be 1 by
453 * hardware. Below enable flow is harmless for all counters.
454 */
455 writel(0, pmu->base + reg);
456 val = CNTL_EN | CNTL_CLEAR;
457 val |= FIELD_PREP(CNTL_CSV_MASK, config);
458
459 /*
460 * On i.MX8MP we need to bias the cycle counter to overflow more often.
461 * We do this by initializing bits [23:16] of the counter value via the
462 * COUNTER_CTRL Counter Parameter (CP) field.
463 */
464 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
465 if (counter == EVENT_CYCLES_COUNTER)
466 val |= FIELD_PREP(CNTL_CP_MASK, 0xf0);
467 }
468
469 writel(val, pmu->base + reg);
470 } else {
471 /* Disable counter */
472 val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
473 writel(val, pmu->base + reg);
474 }
475}
476
477static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter)
478{
479 int val;
480
481 val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL);
482
483 return val & CNTL_OVER;
484}
485
486static void ddr_perf_counter_clear(struct ddr_pmu *pmu, int counter)
487{
488 u8 reg = counter * 4 + COUNTER_CNTL;
489 int val;
490
491 val = readl_relaxed(pmu->base + reg);
492 val &= ~CNTL_CLEAR;
493 writel(val, pmu->base + reg);
494
495 val |= CNTL_CLEAR;
496 writel(val, pmu->base + reg);
497}
498
499static void ddr_perf_event_update(struct perf_event *event)
500{
501 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
502 struct hw_perf_event *hwc = &event->hw;
503 u64 new_raw_count;
504 int counter = hwc->idx;
505 int ret;
506
507 new_raw_count = ddr_perf_read_counter(pmu, counter);
508 /* Remove the bias applied in ddr_perf_counter_enable(). */
509 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED) {
510 if (counter == EVENT_CYCLES_COUNTER)
511 new_raw_count &= CYCLES_COUNTER_MASK;
512 }
513
514 local64_add(new_raw_count, &event->count);
515
516 /*
517 * For legacy SoCs: event counter continue counting when overflow,
518 * no need to clear the counter.
519 * For new SoCs: event counter stop counting when overflow, need
520 * clear counter to let it count again.
521 */
522 if (counter != EVENT_CYCLES_COUNTER) {
523 ret = ddr_perf_counter_overflow(pmu, counter);
524 if (ret)
525 dev_warn_ratelimited(pmu->dev, "events lost due to counter overflow (config 0x%llx)\n",
526 event->attr.config);
527 }
528
529 /* clear counter every time for both cycle counter and event counter */
530 ddr_perf_counter_clear(pmu, counter);
531}
532
533static void ddr_perf_event_start(struct perf_event *event, int flags)
534{
535 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
536 struct hw_perf_event *hwc = &event->hw;
537 int counter = hwc->idx;
538
539 local64_set(&hwc->prev_count, 0);
540
541 ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
542
543 if (!pmu->active_counter++)
544 ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
545 EVENT_CYCLES_COUNTER, true);
546
547 hwc->state = 0;
548}
549
550static int ddr_perf_event_add(struct perf_event *event, int flags)
551{
552 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
553 struct hw_perf_event *hwc = &event->hw;
554 int counter;
555 int cfg = event->attr.config;
556 int cfg1 = event->attr.config1;
557 int cfg2 = event->attr.config2;
558
559 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
560 int i;
561
562 for (i = 1; i < NUM_COUNTERS; i++) {
563 if (pmu->events[i] &&
564 !ddr_perf_filters_compatible(event, pmu->events[i]))
565 return -EINVAL;
566 }
567
568 if (ddr_perf_is_filtered(event)) {
569 /* revert axi id masking(axi_mask) value */
570 cfg1 ^= AXI_MASKING_REVERT;
571 writel(cfg1, pmu->base + COUNTER_DPCR1);
572 }
573 }
574
575 counter = ddr_perf_alloc_counter(pmu, cfg);
576 if (counter < 0) {
577 dev_dbg(pmu->dev, "There are not enough counters\n");
578 return -EOPNOTSUPP;
579 }
580
581 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER) {
582 if (ddr_perf_is_filtered(event)) {
583 /* revert axi id masking(axi_mask) value */
584 cfg1 ^= AXI_MASKING_REVERT;
585 writel(cfg1, pmu->base + COUNTER_MASK_COMP + ((counter - 1) << 4));
586
587 if (cfg == 0x41) {
588 /* revert axi read channel(axi_channel) value */
589 cfg2 ^= READ_CHANNEL_REVERT;
590 cfg2 |= FIELD_PREP(READ_PORT_MASK, cfg2);
591 } else {
592 /* revert axi write channel(axi_channel) value */
593 cfg2 ^= WRITE_CHANNEL_REVERT;
594 cfg2 |= FIELD_PREP(WRITE_PORT_MASK, cfg2);
595 }
596
597 writel(cfg2, pmu->base + COUNTER_MUX_CNTL + ((counter - 1) << 4));
598 }
599 }
600
601 pmu->events[counter] = event;
602 hwc->idx = counter;
603
604 hwc->state |= PERF_HES_STOPPED;
605
606 if (flags & PERF_EF_START)
607 ddr_perf_event_start(event, flags);
608
609 return 0;
610}
611
612static void ddr_perf_event_stop(struct perf_event *event, int flags)
613{
614 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
615 struct hw_perf_event *hwc = &event->hw;
616 int counter = hwc->idx;
617
618 ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
619 ddr_perf_event_update(event);
620
621 if (!--pmu->active_counter)
622 ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID,
623 EVENT_CYCLES_COUNTER, false);
624
625 hwc->state |= PERF_HES_STOPPED;
626}
627
628static void ddr_perf_event_del(struct perf_event *event, int flags)
629{
630 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
631 struct hw_perf_event *hwc = &event->hw;
632 int counter = hwc->idx;
633
634 ddr_perf_event_stop(event, PERF_EF_UPDATE);
635
636 ddr_perf_free_counter(pmu, counter);
637 hwc->idx = -1;
638}
639
640static void ddr_perf_pmu_enable(struct pmu *pmu)
641{
642}
643
644static void ddr_perf_pmu_disable(struct pmu *pmu)
645{
646}
647
648static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
649 struct device *dev)
650{
651 *pmu = (struct ddr_pmu) {
652 .pmu = (struct pmu) {
653 .module = THIS_MODULE,
654 .parent = dev,
655 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
656 .task_ctx_nr = perf_invalid_context,
657 .attr_groups = attr_groups,
658 .event_init = ddr_perf_event_init,
659 .add = ddr_perf_event_add,
660 .del = ddr_perf_event_del,
661 .start = ddr_perf_event_start,
662 .stop = ddr_perf_event_stop,
663 .read = ddr_perf_event_update,
664 .pmu_enable = ddr_perf_pmu_enable,
665 .pmu_disable = ddr_perf_pmu_disable,
666 },
667 .base = base,
668 .dev = dev,
669 };
670
671 pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL);
672 return pmu->id;
673}
674
675static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
676{
677 int i;
678 struct ddr_pmu *pmu = (struct ddr_pmu *) p;
679 struct perf_event *event;
680
681 /* all counter will stop if cycle counter disabled */
682 ddr_perf_counter_enable(pmu,
683 EVENT_CYCLES_ID,
684 EVENT_CYCLES_COUNTER,
685 false);
686 /*
687 * When the cycle counter overflows, all counters are stopped,
688 * and an IRQ is raised. If any other counter overflows, it
689 * continues counting, and no IRQ is raised. But for new SoCs,
690 * such as i.MX8MP, event counter would stop when overflow, so
691 * we need use cycle counter to stop overflow of event counter.
692 *
693 * Cycles occur at least 4 times as often as other events, so we
694 * can update all events on a cycle counter overflow and not
695 * lose events.
696 *
697 */
698 for (i = 0; i < NUM_COUNTERS; i++) {
699
700 if (!pmu->events[i])
701 continue;
702
703 event = pmu->events[i];
704
705 ddr_perf_event_update(event);
706 }
707
708 ddr_perf_counter_enable(pmu,
709 EVENT_CYCLES_ID,
710 EVENT_CYCLES_COUNTER,
711 true);
712
713 return IRQ_HANDLED;
714}
715
716static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
717{
718 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
719 int target;
720
721 if (cpu != pmu->cpu)
722 return 0;
723
724 target = cpumask_any_but(cpu_online_mask, cpu);
725 if (target >= nr_cpu_ids)
726 return 0;
727
728 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
729 pmu->cpu = target;
730
731 WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)));
732
733 return 0;
734}
735
736static int ddr_perf_probe(struct platform_device *pdev)
737{
738 struct ddr_pmu *pmu;
739 struct device_node *np;
740 void __iomem *base;
741 char *name;
742 int num;
743 int ret;
744 int irq;
745
746 base = devm_platform_ioremap_resource(pdev, 0);
747 if (IS_ERR(base))
748 return PTR_ERR(base);
749
750 np = pdev->dev.of_node;
751
752 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
753 if (!pmu)
754 return -ENOMEM;
755
756 num = ddr_perf_init(pmu, base, &pdev->dev);
757
758 platform_set_drvdata(pdev, pmu);
759
760 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
761 num);
762 if (!name) {
763 ret = -ENOMEM;
764 goto cpuhp_state_err;
765 }
766
767 pmu->devtype_data = of_device_get_match_data(&pdev->dev);
768
769 pmu->cpu = raw_smp_processor_id();
770 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
771 DDR_CPUHP_CB_NAME,
772 NULL,
773 ddr_perf_offline_cpu);
774
775 if (ret < 0) {
776 dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
777 goto cpuhp_state_err;
778 }
779
780 pmu->cpuhp_state = ret;
781
782 /* Register the pmu instance for cpu hotplug */
783 ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
784 if (ret) {
785 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
786 goto cpuhp_instance_err;
787 }
788
789 /* Request irq */
790 irq = of_irq_get(np, 0);
791 if (irq < 0) {
792 dev_err(&pdev->dev, "Failed to get irq: %d", irq);
793 ret = irq;
794 goto ddr_perf_err;
795 }
796
797 ret = devm_request_irq(&pdev->dev, irq,
798 ddr_perf_irq_handler,
799 IRQF_NOBALANCING | IRQF_NO_THREAD,
800 DDR_CPUHP_CB_NAME,
801 pmu);
802 if (ret < 0) {
803 dev_err(&pdev->dev, "Request irq failed: %d", ret);
804 goto ddr_perf_err;
805 }
806
807 pmu->irq = irq;
808 ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu));
809 if (ret) {
810 dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
811 goto ddr_perf_err;
812 }
813
814 ret = perf_pmu_register(&pmu->pmu, name, -1);
815 if (ret)
816 goto ddr_perf_err;
817
818 return 0;
819
820ddr_perf_err:
821 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
822cpuhp_instance_err:
823 cpuhp_remove_multi_state(pmu->cpuhp_state);
824cpuhp_state_err:
825 ida_free(&ddr_ida, pmu->id);
826 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
827 return ret;
828}
829
830static void ddr_perf_remove(struct platform_device *pdev)
831{
832 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
833
834 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
835 cpuhp_remove_multi_state(pmu->cpuhp_state);
836
837 perf_pmu_unregister(&pmu->pmu);
838
839 ida_free(&ddr_ida, pmu->id);
840}
841
842static struct platform_driver imx_ddr_pmu_driver = {
843 .driver = {
844 .name = "imx-ddr-pmu",
845 .of_match_table = imx_ddr_pmu_dt_ids,
846 .suppress_bind_attrs = true,
847 },
848 .probe = ddr_perf_probe,
849 .remove = ddr_perf_remove,
850};
851
852module_platform_driver(imx_ddr_pmu_driver);
853MODULE_DESCRIPTION("Freescale i.MX8 DDR Performance Monitor Driver");
854MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2017 NXP
4 * Copyright 2016 Freescale Semiconductor, Inc.
5 */
6
7#include <linux/bitfield.h>
8#include <linux/init.h>
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/of_address.h>
14#include <linux/of_device.h>
15#include <linux/of_irq.h>
16#include <linux/perf_event.h>
17#include <linux/slab.h>
18
19#define COUNTER_CNTL 0x0
20#define COUNTER_READ 0x20
21
22#define COUNTER_DPCR1 0x30
23
24#define CNTL_OVER 0x1
25#define CNTL_CLEAR 0x2
26#define CNTL_EN 0x4
27#define CNTL_EN_MASK 0xFFFFFFFB
28#define CNTL_CLEAR_MASK 0xFFFFFFFD
29#define CNTL_OVER_MASK 0xFFFFFFFE
30
31#define CNTL_CSV_SHIFT 24
32#define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT)
33
34#define EVENT_CYCLES_ID 0
35#define EVENT_CYCLES_COUNTER 0
36#define NUM_COUNTERS 4
37
38#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
39
40#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
41
42#define DDR_PERF_DEV_NAME "imx8_ddr"
43#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
44
45static DEFINE_IDA(ddr_ida);
46
47/* DDR Perf hardware feature */
48#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
49#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
50
51struct fsl_ddr_devtype_data {
52 unsigned int quirks; /* quirks needed for different DDR Perf core */
53};
54
55static const struct fsl_ddr_devtype_data imx8_devtype_data;
56
57static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
58 .quirks = DDR_CAP_AXI_ID_FILTER,
59};
60
61static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
62 .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED,
63};
64
65static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
66 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
67 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
68 { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
69 { /* sentinel */ }
70};
71MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
72
73struct ddr_pmu {
74 struct pmu pmu;
75 void __iomem *base;
76 unsigned int cpu;
77 struct hlist_node node;
78 struct device *dev;
79 struct perf_event *events[NUM_COUNTERS];
80 int active_events;
81 enum cpuhp_state cpuhp_state;
82 const struct fsl_ddr_devtype_data *devtype_data;
83 int irq;
84 int id;
85};
86
87enum ddr_perf_filter_capabilities {
88 PERF_CAP_AXI_ID_FILTER = 0,
89 PERF_CAP_AXI_ID_FILTER_ENHANCED,
90 PERF_CAP_AXI_ID_FEAT_MAX,
91};
92
93static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
94{
95 u32 quirks = pmu->devtype_data->quirks;
96
97 switch (cap) {
98 case PERF_CAP_AXI_ID_FILTER:
99 return !!(quirks & DDR_CAP_AXI_ID_FILTER);
100 case PERF_CAP_AXI_ID_FILTER_ENHANCED:
101 quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
102 return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
103 default:
104 WARN(1, "unknown filter cap %d\n", cap);
105 }
106
107 return 0;
108}
109
110static ssize_t ddr_perf_filter_cap_show(struct device *dev,
111 struct device_attribute *attr,
112 char *buf)
113{
114 struct ddr_pmu *pmu = dev_get_drvdata(dev);
115 struct dev_ext_attribute *ea =
116 container_of(attr, struct dev_ext_attribute, attr);
117 int cap = (long)ea->var;
118
119 return snprintf(buf, PAGE_SIZE, "%u\n",
120 ddr_perf_filter_cap_get(pmu, cap));
121}
122
123#define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \
124 (&((struct dev_ext_attribute) { \
125 __ATTR(_name, 0444, _func, NULL), (void *)_var \
126 }).attr.attr)
127
128#define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \
129 PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var)
130
131static struct attribute *ddr_perf_filter_cap_attr[] = {
132 PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
133 PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
134 NULL,
135};
136
137static struct attribute_group ddr_perf_filter_cap_attr_group = {
138 .name = "caps",
139 .attrs = ddr_perf_filter_cap_attr,
140};
141
142static ssize_t ddr_perf_cpumask_show(struct device *dev,
143 struct device_attribute *attr, char *buf)
144{
145 struct ddr_pmu *pmu = dev_get_drvdata(dev);
146
147 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
148}
149
150static struct device_attribute ddr_perf_cpumask_attr =
151 __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
152
153static struct attribute *ddr_perf_cpumask_attrs[] = {
154 &ddr_perf_cpumask_attr.attr,
155 NULL,
156};
157
158static struct attribute_group ddr_perf_cpumask_attr_group = {
159 .attrs = ddr_perf_cpumask_attrs,
160};
161
162static ssize_t
163ddr_pmu_event_show(struct device *dev, struct device_attribute *attr,
164 char *page)
165{
166 struct perf_pmu_events_attr *pmu_attr;
167
168 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
169 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
170}
171
172#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \
173 (&((struct perf_pmu_events_attr[]) { \
174 { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
175 .id = _id, } \
176 })[0].attr.attr)
177
178static struct attribute *ddr_perf_events_attrs[] = {
179 IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID),
180 IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01),
181 IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04),
182 IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05),
183 IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08),
184 IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09),
185 IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10),
186 IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11),
187 IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12),
188 IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20),
189 IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21),
190 IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22),
191 IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23),
192 IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24),
193 IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25),
194 IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26),
195 IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27),
196 IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29),
197 IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a),
198 IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b),
199 IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30),
200 IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31),
201 IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32),
202 IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33),
203 IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34),
204 IMX8_DDR_PMU_EVENT_ATTR(read, 0x35),
205 IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36),
206 IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
207 IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
208 IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
209 IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
210 IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
211 NULL,
212};
213
214static struct attribute_group ddr_perf_events_attr_group = {
215 .name = "events",
216 .attrs = ddr_perf_events_attrs,
217};
218
219PMU_FORMAT_ATTR(event, "config:0-7");
220PMU_FORMAT_ATTR(axi_id, "config1:0-15");
221PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
222
223static struct attribute *ddr_perf_format_attrs[] = {
224 &format_attr_event.attr,
225 &format_attr_axi_id.attr,
226 &format_attr_axi_mask.attr,
227 NULL,
228};
229
230static struct attribute_group ddr_perf_format_attr_group = {
231 .name = "format",
232 .attrs = ddr_perf_format_attrs,
233};
234
235static const struct attribute_group *attr_groups[] = {
236 &ddr_perf_events_attr_group,
237 &ddr_perf_format_attr_group,
238 &ddr_perf_cpumask_attr_group,
239 &ddr_perf_filter_cap_attr_group,
240 NULL,
241};
242
243static bool ddr_perf_is_filtered(struct perf_event *event)
244{
245 return event->attr.config == 0x41 || event->attr.config == 0x42;
246}
247
248static u32 ddr_perf_filter_val(struct perf_event *event)
249{
250 return event->attr.config1;
251}
252
253static bool ddr_perf_filters_compatible(struct perf_event *a,
254 struct perf_event *b)
255{
256 if (!ddr_perf_is_filtered(a))
257 return true;
258 if (!ddr_perf_is_filtered(b))
259 return true;
260 return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
261}
262
263static bool ddr_perf_is_enhanced_filtered(struct perf_event *event)
264{
265 unsigned int filt;
266 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
267
268 filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED;
269 return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) &&
270 ddr_perf_is_filtered(event);
271}
272
273static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event)
274{
275 int i;
276
277 /*
278 * Always map cycle event to counter 0
279 * Cycles counter is dedicated for cycle event
280 * can't used for the other events
281 */
282 if (event == EVENT_CYCLES_ID) {
283 if (pmu->events[EVENT_CYCLES_COUNTER] == NULL)
284 return EVENT_CYCLES_COUNTER;
285 else
286 return -ENOENT;
287 }
288
289 for (i = 1; i < NUM_COUNTERS; i++) {
290 if (pmu->events[i] == NULL)
291 return i;
292 }
293
294 return -ENOENT;
295}
296
297static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter)
298{
299 pmu->events[counter] = NULL;
300}
301
302static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
303{
304 struct perf_event *event = pmu->events[counter];
305 void __iomem *base = pmu->base;
306
307 /*
308 * return bytes instead of bursts from ddr transaction for
309 * axid-read and axid-write event if PMU core supports enhanced
310 * filter.
311 */
312 base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 :
313 COUNTER_READ;
314 return readl_relaxed(base + counter * 4);
315}
316
317static int ddr_perf_event_init(struct perf_event *event)
318{
319 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
320 struct hw_perf_event *hwc = &event->hw;
321 struct perf_event *sibling;
322
323 if (event->attr.type != event->pmu->type)
324 return -ENOENT;
325
326 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
327 return -EOPNOTSUPP;
328
329 if (event->cpu < 0) {
330 dev_warn(pmu->dev, "Can't provide per-task data!\n");
331 return -EOPNOTSUPP;
332 }
333
334 /*
335 * We must NOT create groups containing mixed PMUs, although software
336 * events are acceptable (for example to create a CCN group
337 * periodically read when a hrtimer aka cpu-clock leader triggers).
338 */
339 if (event->group_leader->pmu != event->pmu &&
340 !is_software_event(event->group_leader))
341 return -EINVAL;
342
343 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
344 if (!ddr_perf_filters_compatible(event, event->group_leader))
345 return -EINVAL;
346 for_each_sibling_event(sibling, event->group_leader) {
347 if (!ddr_perf_filters_compatible(event, sibling))
348 return -EINVAL;
349 }
350 }
351
352 for_each_sibling_event(sibling, event->group_leader) {
353 if (sibling->pmu != event->pmu &&
354 !is_software_event(sibling))
355 return -EINVAL;
356 }
357
358 event->cpu = pmu->cpu;
359 hwc->idx = -1;
360
361 return 0;
362}
363
364
365static void ddr_perf_event_update(struct perf_event *event)
366{
367 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
368 struct hw_perf_event *hwc = &event->hw;
369 u64 delta, prev_raw_count, new_raw_count;
370 int counter = hwc->idx;
371
372 do {
373 prev_raw_count = local64_read(&hwc->prev_count);
374 new_raw_count = ddr_perf_read_counter(pmu, counter);
375 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
376 new_raw_count) != prev_raw_count);
377
378 delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
379
380 local64_add(delta, &event->count);
381}
382
383static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
384 int counter, bool enable)
385{
386 u8 reg = counter * 4 + COUNTER_CNTL;
387 int val;
388
389 if (enable) {
390 /*
391 * cycle counter is special which should firstly write 0 then
392 * write 1 into CLEAR bit to clear it. Other counters only
393 * need write 0 into CLEAR bit and it turns out to be 1 by
394 * hardware. Below enable flow is harmless for all counters.
395 */
396 writel(0, pmu->base + reg);
397 val = CNTL_EN | CNTL_CLEAR;
398 val |= FIELD_PREP(CNTL_CSV_MASK, config);
399 writel(val, pmu->base + reg);
400 } else {
401 /* Disable counter */
402 val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
403 writel(val, pmu->base + reg);
404 }
405}
406
407static void ddr_perf_event_start(struct perf_event *event, int flags)
408{
409 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
410 struct hw_perf_event *hwc = &event->hw;
411 int counter = hwc->idx;
412
413 local64_set(&hwc->prev_count, 0);
414
415 ddr_perf_counter_enable(pmu, event->attr.config, counter, true);
416
417 hwc->state = 0;
418}
419
420static int ddr_perf_event_add(struct perf_event *event, int flags)
421{
422 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
423 struct hw_perf_event *hwc = &event->hw;
424 int counter;
425 int cfg = event->attr.config;
426 int cfg1 = event->attr.config1;
427
428 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
429 int i;
430
431 for (i = 1; i < NUM_COUNTERS; i++) {
432 if (pmu->events[i] &&
433 !ddr_perf_filters_compatible(event, pmu->events[i]))
434 return -EINVAL;
435 }
436
437 if (ddr_perf_is_filtered(event)) {
438 /* revert axi id masking(axi_mask) value */
439 cfg1 ^= AXI_MASKING_REVERT;
440 writel(cfg1, pmu->base + COUNTER_DPCR1);
441 }
442 }
443
444 counter = ddr_perf_alloc_counter(pmu, cfg);
445 if (counter < 0) {
446 dev_dbg(pmu->dev, "There are not enough counters\n");
447 return -EOPNOTSUPP;
448 }
449
450 pmu->events[counter] = event;
451 pmu->active_events++;
452 hwc->idx = counter;
453
454 hwc->state |= PERF_HES_STOPPED;
455
456 if (flags & PERF_EF_START)
457 ddr_perf_event_start(event, flags);
458
459 return 0;
460}
461
462static void ddr_perf_event_stop(struct perf_event *event, int flags)
463{
464 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
465 struct hw_perf_event *hwc = &event->hw;
466 int counter = hwc->idx;
467
468 ddr_perf_counter_enable(pmu, event->attr.config, counter, false);
469 ddr_perf_event_update(event);
470
471 hwc->state |= PERF_HES_STOPPED;
472}
473
474static void ddr_perf_event_del(struct perf_event *event, int flags)
475{
476 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
477 struct hw_perf_event *hwc = &event->hw;
478 int counter = hwc->idx;
479
480 ddr_perf_event_stop(event, PERF_EF_UPDATE);
481
482 ddr_perf_free_counter(pmu, counter);
483 pmu->active_events--;
484 hwc->idx = -1;
485}
486
487static void ddr_perf_pmu_enable(struct pmu *pmu)
488{
489 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
490
491 /* enable cycle counter if cycle is not active event list */
492 if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
493 ddr_perf_counter_enable(ddr_pmu,
494 EVENT_CYCLES_ID,
495 EVENT_CYCLES_COUNTER,
496 true);
497}
498
499static void ddr_perf_pmu_disable(struct pmu *pmu)
500{
501 struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
502
503 if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL)
504 ddr_perf_counter_enable(ddr_pmu,
505 EVENT_CYCLES_ID,
506 EVENT_CYCLES_COUNTER,
507 false);
508}
509
510static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
511 struct device *dev)
512{
513 *pmu = (struct ddr_pmu) {
514 .pmu = (struct pmu) {
515 .module = THIS_MODULE,
516 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
517 .task_ctx_nr = perf_invalid_context,
518 .attr_groups = attr_groups,
519 .event_init = ddr_perf_event_init,
520 .add = ddr_perf_event_add,
521 .del = ddr_perf_event_del,
522 .start = ddr_perf_event_start,
523 .stop = ddr_perf_event_stop,
524 .read = ddr_perf_event_update,
525 .pmu_enable = ddr_perf_pmu_enable,
526 .pmu_disable = ddr_perf_pmu_disable,
527 },
528 .base = base,
529 .dev = dev,
530 };
531
532 pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
533 return pmu->id;
534}
535
536static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
537{
538 int i;
539 struct ddr_pmu *pmu = (struct ddr_pmu *) p;
540 struct perf_event *event, *cycle_event = NULL;
541
542 /* all counter will stop if cycle counter disabled */
543 ddr_perf_counter_enable(pmu,
544 EVENT_CYCLES_ID,
545 EVENT_CYCLES_COUNTER,
546 false);
547 /*
548 * When the cycle counter overflows, all counters are stopped,
549 * and an IRQ is raised. If any other counter overflows, it
550 * continues counting, and no IRQ is raised.
551 *
552 * Cycles occur at least 4 times as often as other events, so we
553 * can update all events on a cycle counter overflow and not
554 * lose events.
555 *
556 */
557 for (i = 0; i < NUM_COUNTERS; i++) {
558
559 if (!pmu->events[i])
560 continue;
561
562 event = pmu->events[i];
563
564 ddr_perf_event_update(event);
565
566 if (event->hw.idx == EVENT_CYCLES_COUNTER)
567 cycle_event = event;
568 }
569
570 ddr_perf_counter_enable(pmu,
571 EVENT_CYCLES_ID,
572 EVENT_CYCLES_COUNTER,
573 true);
574 if (cycle_event)
575 ddr_perf_event_update(cycle_event);
576
577 return IRQ_HANDLED;
578}
579
580static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
581{
582 struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node);
583 int target;
584
585 if (cpu != pmu->cpu)
586 return 0;
587
588 target = cpumask_any_but(cpu_online_mask, cpu);
589 if (target >= nr_cpu_ids)
590 return 0;
591
592 perf_pmu_migrate_context(&pmu->pmu, cpu, target);
593 pmu->cpu = target;
594
595 WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)));
596
597 return 0;
598}
599
600static int ddr_perf_probe(struct platform_device *pdev)
601{
602 struct ddr_pmu *pmu;
603 struct device_node *np;
604 void __iomem *base;
605 char *name;
606 int num;
607 int ret;
608 int irq;
609
610 base = devm_platform_ioremap_resource(pdev, 0);
611 if (IS_ERR(base))
612 return PTR_ERR(base);
613
614 np = pdev->dev.of_node;
615
616 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
617 if (!pmu)
618 return -ENOMEM;
619
620 num = ddr_perf_init(pmu, base, &pdev->dev);
621
622 platform_set_drvdata(pdev, pmu);
623
624 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
625 num);
626 if (!name)
627 return -ENOMEM;
628
629 pmu->devtype_data = of_device_get_match_data(&pdev->dev);
630
631 pmu->cpu = raw_smp_processor_id();
632 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
633 DDR_CPUHP_CB_NAME,
634 NULL,
635 ddr_perf_offline_cpu);
636
637 if (ret < 0) {
638 dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
639 goto cpuhp_state_err;
640 }
641
642 pmu->cpuhp_state = ret;
643
644 /* Register the pmu instance for cpu hotplug */
645 ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
646 if (ret) {
647 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
648 goto cpuhp_instance_err;
649 }
650
651 /* Request irq */
652 irq = of_irq_get(np, 0);
653 if (irq < 0) {
654 dev_err(&pdev->dev, "Failed to get irq: %d", irq);
655 ret = irq;
656 goto ddr_perf_err;
657 }
658
659 ret = devm_request_irq(&pdev->dev, irq,
660 ddr_perf_irq_handler,
661 IRQF_NOBALANCING | IRQF_NO_THREAD,
662 DDR_CPUHP_CB_NAME,
663 pmu);
664 if (ret < 0) {
665 dev_err(&pdev->dev, "Request irq failed: %d", ret);
666 goto ddr_perf_err;
667 }
668
669 pmu->irq = irq;
670 ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu));
671 if (ret) {
672 dev_err(pmu->dev, "Failed to set interrupt affinity!\n");
673 goto ddr_perf_err;
674 }
675
676 ret = perf_pmu_register(&pmu->pmu, name, -1);
677 if (ret)
678 goto ddr_perf_err;
679
680 return 0;
681
682ddr_perf_err:
683 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
684cpuhp_instance_err:
685 cpuhp_remove_multi_state(pmu->cpuhp_state);
686cpuhp_state_err:
687 ida_simple_remove(&ddr_ida, pmu->id);
688 dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
689 return ret;
690}
691
692static int ddr_perf_remove(struct platform_device *pdev)
693{
694 struct ddr_pmu *pmu = platform_get_drvdata(pdev);
695
696 cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
697 cpuhp_remove_multi_state(pmu->cpuhp_state);
698 irq_set_affinity_hint(pmu->irq, NULL);
699
700 perf_pmu_unregister(&pmu->pmu);
701
702 ida_simple_remove(&ddr_ida, pmu->id);
703 return 0;
704}
705
706static struct platform_driver imx_ddr_pmu_driver = {
707 .driver = {
708 .name = "imx-ddr-pmu",
709 .of_match_table = imx_ddr_pmu_dt_ids,
710 .suppress_bind_attrs = true,
711 },
712 .probe = ddr_perf_probe,
713 .remove = ddr_perf_remove,
714};
715
716module_platform_driver(imx_ddr_pmu_driver);
717MODULE_LICENSE("GPL v2");