Loading...
Note: File does not exist in v6.2.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/moduleparam.h>
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/device.h>
11#include <linux/io.h>
12#include <linux/err.h>
13#include <linux/fs.h>
14#include <linux/slab.h>
15#include <linux/delay.h>
16#include <linux/smp.h>
17#include <linux/sysfs.h>
18#include <linux/stat.h>
19#include <linux/clk.h>
20#include <linux/cpu.h>
21#include <linux/cpu_pm.h>
22#include <linux/coresight.h>
23#include <linux/coresight-pmu.h>
24#include <linux/pm_wakeup.h>
25#include <linux/amba/bus.h>
26#include <linux/seq_file.h>
27#include <linux/uaccess.h>
28#include <linux/perf_event.h>
29#include <linux/pm_runtime.h>
30#include <linux/property.h>
31#include <asm/sections.h>
32#include <asm/local.h>
33#include <asm/virt.h>
34
35#include "coresight-etm4x.h"
36#include "coresight-etm-perf.h"
37
38static int boot_enable;
39module_param(boot_enable, int, 0444);
40MODULE_PARM_DESC(boot_enable, "Enable tracing on boot");
41
42#define PARAM_PM_SAVE_FIRMWARE 0 /* save self-hosted state as per firmware */
43#define PARAM_PM_SAVE_NEVER 1 /* never save any state */
44#define PARAM_PM_SAVE_SELF_HOSTED 2 /* save self-hosted state only */
45
46static int pm_save_enable = PARAM_PM_SAVE_FIRMWARE;
47module_param(pm_save_enable, int, 0444);
48MODULE_PARM_DESC(pm_save_enable,
49 "Save/restore state on power down: 1 = never, 2 = self-hosted");
50
51/* The number of ETMv4 currently registered */
52static int etm4_count;
53static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
54static void etm4_set_default_config(struct etmv4_config *config);
55static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
56 struct perf_event *event);
57
58static enum cpuhp_state hp_online;
59
60static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
61{
62 /* Writing 0 to TRCOSLAR unlocks the trace registers */
63 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
64 drvdata->os_unlock = true;
65 isb();
66}
67
68static void etm4_os_lock(struct etmv4_drvdata *drvdata)
69{
70 /* Writing 0x1 to TRCOSLAR locks the trace registers */
71 writel_relaxed(0x1, drvdata->base + TRCOSLAR);
72 drvdata->os_unlock = false;
73 isb();
74}
75
76static bool etm4_arch_supported(u8 arch)
77{
78 /* Mask out the minor version number */
79 switch (arch & 0xf0) {
80 case ETM_ARCH_V4:
81 break;
82 default:
83 return false;
84 }
85 return true;
86}
87
88static int etm4_cpu_id(struct coresight_device *csdev)
89{
90 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
91
92 return drvdata->cpu;
93}
94
95static int etm4_trace_id(struct coresight_device *csdev)
96{
97 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
98
99 return drvdata->trcid;
100}
101
102struct etm4_enable_arg {
103 struct etmv4_drvdata *drvdata;
104 int rc;
105};
106
107static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
108{
109 int i, rc;
110 struct etmv4_config *config = &drvdata->config;
111 struct device *etm_dev = &drvdata->csdev->dev;
112
113 CS_UNLOCK(drvdata->base);
114
115 etm4_os_unlock(drvdata);
116
117 rc = coresight_claim_device_unlocked(drvdata->base);
118 if (rc)
119 goto done;
120
121 /* Disable the trace unit before programming trace registers */
122 writel_relaxed(0, drvdata->base + TRCPRGCTLR);
123
124 /* wait for TRCSTATR.IDLE to go up */
125 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
126 dev_err(etm_dev,
127 "timeout while waiting for Idle Trace Status\n");
128
129 writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
130 writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
131 /* nothing specific implemented */
132 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
133 writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
134 writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
135 writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
136 writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
137 writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
138 writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
139 writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
140 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
141 writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
142 writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
143 writel_relaxed(config->vissctlr,
144 drvdata->base + TRCVISSCTLR);
145 writel_relaxed(config->vipcssctlr,
146 drvdata->base + TRCVIPCSSCTLR);
147 for (i = 0; i < drvdata->nrseqstate - 1; i++)
148 writel_relaxed(config->seq_ctrl[i],
149 drvdata->base + TRCSEQEVRn(i));
150 writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
151 writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
152 writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
153 for (i = 0; i < drvdata->nr_cntr; i++) {
154 writel_relaxed(config->cntrldvr[i],
155 drvdata->base + TRCCNTRLDVRn(i));
156 writel_relaxed(config->cntr_ctrl[i],
157 drvdata->base + TRCCNTCTLRn(i));
158 writel_relaxed(config->cntr_val[i],
159 drvdata->base + TRCCNTVRn(i));
160 }
161
162 /*
163 * Resource selector pair 0 is always implemented and reserved. As
164 * such start at 2.
165 */
166 for (i = 2; i < drvdata->nr_resource * 2; i++)
167 writel_relaxed(config->res_ctrl[i],
168 drvdata->base + TRCRSCTLRn(i));
169
170 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
171 /* always clear status bit on restart if using single-shot */
172 if (config->ss_ctrl[i] || config->ss_pe_cmp[i])
173 config->ss_status[i] &= ~BIT(31);
174 writel_relaxed(config->ss_ctrl[i],
175 drvdata->base + TRCSSCCRn(i));
176 writel_relaxed(config->ss_status[i],
177 drvdata->base + TRCSSCSRn(i));
178 writel_relaxed(config->ss_pe_cmp[i],
179 drvdata->base + TRCSSPCICRn(i));
180 }
181 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
182 writeq_relaxed(config->addr_val[i],
183 drvdata->base + TRCACVRn(i));
184 writeq_relaxed(config->addr_acc[i],
185 drvdata->base + TRCACATRn(i));
186 }
187 for (i = 0; i < drvdata->numcidc; i++)
188 writeq_relaxed(config->ctxid_pid[i],
189 drvdata->base + TRCCIDCVRn(i));
190 writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
191 writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
192
193 for (i = 0; i < drvdata->numvmidc; i++)
194 writeq_relaxed(config->vmid_val[i],
195 drvdata->base + TRCVMIDCVRn(i));
196 writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
197 writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
198
199 if (!drvdata->skip_power_up) {
200 /*
201 * Request to keep the trace unit powered and also
202 * emulation of powerdown
203 */
204 writel_relaxed(readl_relaxed(drvdata->base + TRCPDCR) |
205 TRCPDCR_PU, drvdata->base + TRCPDCR);
206 }
207
208 /* Enable the trace unit */
209 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
210
211 /* wait for TRCSTATR.IDLE to go back down to '0' */
212 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
213 dev_err(etm_dev,
214 "timeout while waiting for Idle Trace Status\n");
215
216 /*
217 * As recommended by section 4.3.7 ("Synchronization when using the
218 * memory-mapped interface") of ARM IHI 0064D
219 */
220 dsb(sy);
221 isb();
222
223done:
224 CS_LOCK(drvdata->base);
225
226 dev_dbg(etm_dev, "cpu: %d enable smp call done: %d\n",
227 drvdata->cpu, rc);
228 return rc;
229}
230
231static void etm4_enable_hw_smp_call(void *info)
232{
233 struct etm4_enable_arg *arg = info;
234
235 if (WARN_ON(!arg))
236 return;
237 arg->rc = etm4_enable_hw(arg->drvdata);
238}
239
240/*
241 * The goal of function etm4_config_timestamp_event() is to configure a
242 * counter that will tell the tracer to emit a timestamp packet when it
243 * reaches zero. This is done in order to get a more fine grained idea
244 * of when instructions are executed so that they can be correlated
245 * with execution on other CPUs.
246 *
247 * To do this the counter itself is configured to self reload and
248 * TRCRSCTLR1 (always true) used to get the counter to decrement. From
249 * there a resource selector is configured with the counter and the
250 * timestamp control register to use the resource selector to trigger the
251 * event that will insert a timestamp packet in the stream.
252 */
253static int etm4_config_timestamp_event(struct etmv4_drvdata *drvdata)
254{
255 int ctridx, ret = -EINVAL;
256 int counter, rselector;
257 u32 val = 0;
258 struct etmv4_config *config = &drvdata->config;
259
260 /* No point in trying if we don't have at least one counter */
261 if (!drvdata->nr_cntr)
262 goto out;
263
264 /* Find a counter that hasn't been initialised */
265 for (ctridx = 0; ctridx < drvdata->nr_cntr; ctridx++)
266 if (config->cntr_val[ctridx] == 0)
267 break;
268
269 /* All the counters have been configured already, bail out */
270 if (ctridx == drvdata->nr_cntr) {
271 pr_debug("%s: no available counter found\n", __func__);
272 ret = -ENOSPC;
273 goto out;
274 }
275
276 /*
277 * Searching for an available resource selector to use, starting at
278 * '2' since every implementation has at least 2 resource selector.
279 * ETMIDR4 gives the number of resource selector _pairs_,
280 * hence multiply by 2.
281 */
282 for (rselector = 2; rselector < drvdata->nr_resource * 2; rselector++)
283 if (!config->res_ctrl[rselector])
284 break;
285
286 if (rselector == drvdata->nr_resource * 2) {
287 pr_debug("%s: no available resource selector found\n",
288 __func__);
289 ret = -ENOSPC;
290 goto out;
291 }
292
293 /* Remember what counter we used */
294 counter = 1 << ctridx;
295
296 /*
297 * Initialise original and reload counter value to the smallest
298 * possible value in order to get as much precision as we can.
299 */
300 config->cntr_val[ctridx] = 1;
301 config->cntrldvr[ctridx] = 1;
302
303 /* Set the trace counter control register */
304 val = 0x1 << 16 | /* Bit 16, reload counter automatically */
305 0x0 << 7 | /* Select single resource selector */
306 0x1; /* Resource selector 1, i.e always true */
307
308 config->cntr_ctrl[ctridx] = val;
309
310 val = 0x2 << 16 | /* Group 0b0010 - Counter and sequencers */
311 counter << 0; /* Counter to use */
312
313 config->res_ctrl[rselector] = val;
314
315 val = 0x0 << 7 | /* Select single resource selector */
316 rselector; /* Resource selector */
317
318 config->ts_ctrl = val;
319
320 ret = 0;
321out:
322 return ret;
323}
324
325static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
326 struct perf_event *event)
327{
328 int ret = 0;
329 struct etmv4_config *config = &drvdata->config;
330 struct perf_event_attr *attr = &event->attr;
331
332 if (!attr) {
333 ret = -EINVAL;
334 goto out;
335 }
336
337 /* Clear configuration from previous run */
338 memset(config, 0, sizeof(struct etmv4_config));
339
340 if (attr->exclude_kernel)
341 config->mode = ETM_MODE_EXCL_KERN;
342
343 if (attr->exclude_user)
344 config->mode = ETM_MODE_EXCL_USER;
345
346 /* Always start from the default config */
347 etm4_set_default_config(config);
348
349 /* Configure filters specified on the perf cmd line, if any. */
350 ret = etm4_set_event_filters(drvdata, event);
351 if (ret)
352 goto out;
353
354 /* Go from generic option to ETMv4 specifics */
355 if (attr->config & BIT(ETM_OPT_CYCACC)) {
356 config->cfg |= BIT(4);
357 /* TRM: Must program this for cycacc to work */
358 config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
359 }
360 if (attr->config & BIT(ETM_OPT_TS)) {
361 /*
362 * Configure timestamps to be emitted at regular intervals in
363 * order to correlate instructions executed on different CPUs
364 * (CPU-wide trace scenarios).
365 */
366 ret = etm4_config_timestamp_event(drvdata);
367
368 /*
369 * No need to go further if timestamp intervals can't
370 * be configured.
371 */
372 if (ret)
373 goto out;
374
375 /* bit[11], Global timestamp tracing bit */
376 config->cfg |= BIT(11);
377 }
378
379 if (attr->config & BIT(ETM_OPT_CTXTID))
380 /* bit[6], Context ID tracing bit */
381 config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
382
383 /* return stack - enable if selected and supported */
384 if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
385 /* bit[12], Return stack enable bit */
386 config->cfg |= BIT(12);
387
388out:
389 return ret;
390}
391
392static int etm4_enable_perf(struct coresight_device *csdev,
393 struct perf_event *event)
394{
395 int ret = 0;
396 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
397
398 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
399 ret = -EINVAL;
400 goto out;
401 }
402
403 /* Configure the tracer based on the session's specifics */
404 ret = etm4_parse_event_config(drvdata, event);
405 if (ret)
406 goto out;
407 /* And enable it */
408 ret = etm4_enable_hw(drvdata);
409
410out:
411 return ret;
412}
413
414static int etm4_enable_sysfs(struct coresight_device *csdev)
415{
416 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
417 struct etm4_enable_arg arg = { };
418 int ret;
419
420 spin_lock(&drvdata->spinlock);
421
422 /*
423 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
424 * ensures that register writes occur when cpu is powered.
425 */
426 arg.drvdata = drvdata;
427 ret = smp_call_function_single(drvdata->cpu,
428 etm4_enable_hw_smp_call, &arg, 1);
429 if (!ret)
430 ret = arg.rc;
431 if (!ret)
432 drvdata->sticky_enable = true;
433 spin_unlock(&drvdata->spinlock);
434
435 if (!ret)
436 dev_dbg(&csdev->dev, "ETM tracing enabled\n");
437 return ret;
438}
439
440static int etm4_enable(struct coresight_device *csdev,
441 struct perf_event *event, u32 mode)
442{
443 int ret;
444 u32 val;
445 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
446
447 val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
448
449 /* Someone is already using the tracer */
450 if (val)
451 return -EBUSY;
452
453 switch (mode) {
454 case CS_MODE_SYSFS:
455 ret = etm4_enable_sysfs(csdev);
456 break;
457 case CS_MODE_PERF:
458 ret = etm4_enable_perf(csdev, event);
459 break;
460 default:
461 ret = -EINVAL;
462 }
463
464 /* The tracer didn't start */
465 if (ret)
466 local_set(&drvdata->mode, CS_MODE_DISABLED);
467
468 return ret;
469}
470
471static void etm4_disable_hw(void *info)
472{
473 u32 control;
474 struct etmv4_drvdata *drvdata = info;
475 struct etmv4_config *config = &drvdata->config;
476 struct device *etm_dev = &drvdata->csdev->dev;
477 int i;
478
479 CS_UNLOCK(drvdata->base);
480
481 if (!drvdata->skip_power_up) {
482 /* power can be removed from the trace unit now */
483 control = readl_relaxed(drvdata->base + TRCPDCR);
484 control &= ~TRCPDCR_PU;
485 writel_relaxed(control, drvdata->base + TRCPDCR);
486 }
487
488 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
489
490 /* EN, bit[0] Trace unit enable bit */
491 control &= ~0x1;
492
493 /*
494 * Make sure everything completes before disabling, as recommended
495 * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
496 * SSTATUS") of ARM IHI 0064D
497 */
498 dsb(sy);
499 isb();
500 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
501
502 /* wait for TRCSTATR.PMSTABLE to go to '1' */
503 if (coresight_timeout(drvdata->base, TRCSTATR,
504 TRCSTATR_PMSTABLE_BIT, 1))
505 dev_err(etm_dev,
506 "timeout while waiting for PM stable Trace Status\n");
507
508 /* read the status of the single shot comparators */
509 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
510 config->ss_status[i] =
511 readl_relaxed(drvdata->base + TRCSSCSRn(i));
512 }
513
514 /* read back the current counter values */
515 for (i = 0; i < drvdata->nr_cntr; i++) {
516 config->cntr_val[i] =
517 readl_relaxed(drvdata->base + TRCCNTVRn(i));
518 }
519
520 coresight_disclaim_device_unlocked(drvdata->base);
521
522 CS_LOCK(drvdata->base);
523
524 dev_dbg(&drvdata->csdev->dev,
525 "cpu: %d disable smp call done\n", drvdata->cpu);
526}
527
528static int etm4_disable_perf(struct coresight_device *csdev,
529 struct perf_event *event)
530{
531 u32 control;
532 struct etm_filters *filters = event->hw.addr_filters;
533 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
534
535 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
536 return -EINVAL;
537
538 etm4_disable_hw(drvdata);
539
540 /*
541 * Check if the start/stop logic was active when the unit was stopped.
542 * That way we can re-enable the start/stop logic when the process is
543 * scheduled again. Configuration of the start/stop logic happens in
544 * function etm4_set_event_filters().
545 */
546 control = readl_relaxed(drvdata->base + TRCVICTLR);
547 /* TRCVICTLR::SSSTATUS, bit[9] */
548 filters->ssstatus = (control & BIT(9));
549
550 return 0;
551}
552
553static void etm4_disable_sysfs(struct coresight_device *csdev)
554{
555 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
556
557 /*
558 * Taking hotplug lock here protects from clocks getting disabled
559 * with tracing being left on (crash scenario) if user disable occurs
560 * after cpu online mask indicates the cpu is offline but before the
561 * DYING hotplug callback is serviced by the ETM driver.
562 */
563 cpus_read_lock();
564 spin_lock(&drvdata->spinlock);
565
566 /*
567 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
568 * ensures that register writes occur when cpu is powered.
569 */
570 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
571
572 spin_unlock(&drvdata->spinlock);
573 cpus_read_unlock();
574
575 dev_dbg(&csdev->dev, "ETM tracing disabled\n");
576}
577
578static void etm4_disable(struct coresight_device *csdev,
579 struct perf_event *event)
580{
581 u32 mode;
582 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
583
584 /*
585 * For as long as the tracer isn't disabled another entity can't
586 * change its status. As such we can read the status here without
587 * fearing it will change under us.
588 */
589 mode = local_read(&drvdata->mode);
590
591 switch (mode) {
592 case CS_MODE_DISABLED:
593 break;
594 case CS_MODE_SYSFS:
595 etm4_disable_sysfs(csdev);
596 break;
597 case CS_MODE_PERF:
598 etm4_disable_perf(csdev, event);
599 break;
600 }
601
602 if (mode)
603 local_set(&drvdata->mode, CS_MODE_DISABLED);
604}
605
606static const struct coresight_ops_source etm4_source_ops = {
607 .cpu_id = etm4_cpu_id,
608 .trace_id = etm4_trace_id,
609 .enable = etm4_enable,
610 .disable = etm4_disable,
611};
612
613static const struct coresight_ops etm4_cs_ops = {
614 .source_ops = &etm4_source_ops,
615};
616
617static void etm4_init_arch_data(void *info)
618{
619 u32 etmidr0;
620 u32 etmidr1;
621 u32 etmidr2;
622 u32 etmidr3;
623 u32 etmidr4;
624 u32 etmidr5;
625 struct etmv4_drvdata *drvdata = info;
626 int i;
627
628 /* Make sure all registers are accessible */
629 etm4_os_unlock(drvdata);
630
631 CS_UNLOCK(drvdata->base);
632
633 /* find all capabilities of the tracing unit */
634 etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
635
636 /* INSTP0, bits[2:1] P0 tracing support field */
637 if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
638 drvdata->instrp0 = true;
639 else
640 drvdata->instrp0 = false;
641
642 /* TRCBB, bit[5] Branch broadcast tracing support bit */
643 if (BMVAL(etmidr0, 5, 5))
644 drvdata->trcbb = true;
645 else
646 drvdata->trcbb = false;
647
648 /* TRCCOND, bit[6] Conditional instruction tracing support bit */
649 if (BMVAL(etmidr0, 6, 6))
650 drvdata->trccond = true;
651 else
652 drvdata->trccond = false;
653
654 /* TRCCCI, bit[7] Cycle counting instruction bit */
655 if (BMVAL(etmidr0, 7, 7))
656 drvdata->trccci = true;
657 else
658 drvdata->trccci = false;
659
660 /* RETSTACK, bit[9] Return stack bit */
661 if (BMVAL(etmidr0, 9, 9))
662 drvdata->retstack = true;
663 else
664 drvdata->retstack = false;
665
666 /* NUMEVENT, bits[11:10] Number of events field */
667 drvdata->nr_event = BMVAL(etmidr0, 10, 11);
668 /* QSUPP, bits[16:15] Q element support field */
669 drvdata->q_support = BMVAL(etmidr0, 15, 16);
670 /* TSSIZE, bits[28:24] Global timestamp size field */
671 drvdata->ts_size = BMVAL(etmidr0, 24, 28);
672
673 /* base architecture of trace unit */
674 etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
675 /*
676 * TRCARCHMIN, bits[7:4] architecture the minor version number
677 * TRCARCHMAJ, bits[11:8] architecture major versin number
678 */
679 drvdata->arch = BMVAL(etmidr1, 4, 11);
680 drvdata->config.arch = drvdata->arch;
681
682 /* maximum size of resources */
683 etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
684 /* CIDSIZE, bits[9:5] Indicates the Context ID size */
685 drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
686 /* VMIDSIZE, bits[14:10] Indicates the VMID size */
687 drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
688 /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
689 drvdata->ccsize = BMVAL(etmidr2, 25, 28);
690
691 etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
692 /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
693 drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
694 /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
695 drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
696 /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
697 drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
698
699 /*
700 * TRCERR, bit[24] whether a trace unit can trace a
701 * system error exception.
702 */
703 if (BMVAL(etmidr3, 24, 24))
704 drvdata->trc_error = true;
705 else
706 drvdata->trc_error = false;
707
708 /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
709 if (BMVAL(etmidr3, 25, 25))
710 drvdata->syncpr = true;
711 else
712 drvdata->syncpr = false;
713
714 /* STALLCTL, bit[26] is stall control implemented? */
715 if (BMVAL(etmidr3, 26, 26))
716 drvdata->stallctl = true;
717 else
718 drvdata->stallctl = false;
719
720 /* SYSSTALL, bit[27] implementation can support stall control? */
721 if (BMVAL(etmidr3, 27, 27))
722 drvdata->sysstall = true;
723 else
724 drvdata->sysstall = false;
725
726 /* NUMPROC, bits[30:28] the number of PEs available for tracing */
727 drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
728
729 /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
730 if (BMVAL(etmidr3, 31, 31))
731 drvdata->nooverflow = true;
732 else
733 drvdata->nooverflow = false;
734
735 /* number of resources trace unit supports */
736 etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
737 /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
738 drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
739 /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
740 drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
741 /*
742 * NUMRSPAIR, bits[19:16]
743 * The number of resource pairs conveyed by the HW starts at 0, i.e a
744 * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
745 * As such add 1 to the value of NUMRSPAIR for a better representation.
746 */
747 drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
748 /*
749 * NUMSSCC, bits[23:20] the number of single-shot
750 * comparator control for tracing. Read any status regs as these
751 * also contain RO capability data.
752 */
753 drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
754 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
755 drvdata->config.ss_status[i] =
756 readl_relaxed(drvdata->base + TRCSSCSRn(i));
757 }
758 /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
759 drvdata->numcidc = BMVAL(etmidr4, 24, 27);
760 /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
761 drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
762
763 etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
764 /* NUMEXTIN, bits[8:0] number of external inputs implemented */
765 drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
766 /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
767 drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
768 /* ATBTRIG, bit[22] implementation can support ATB triggers? */
769 if (BMVAL(etmidr5, 22, 22))
770 drvdata->atbtrig = true;
771 else
772 drvdata->atbtrig = false;
773 /*
774 * LPOVERRIDE, bit[23] implementation supports
775 * low-power state override
776 */
777 if (BMVAL(etmidr5, 23, 23))
778 drvdata->lpoverride = true;
779 else
780 drvdata->lpoverride = false;
781 /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
782 drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
783 /* NUMCNTR, bits[30:28] number of counters available for tracing */
784 drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
785 CS_LOCK(drvdata->base);
786}
787
788static void etm4_set_default_config(struct etmv4_config *config)
789{
790 /* disable all events tracing */
791 config->eventctrl0 = 0x0;
792 config->eventctrl1 = 0x0;
793
794 /* disable stalling */
795 config->stall_ctrl = 0x0;
796
797 /* enable trace synchronization every 4096 bytes, if available */
798 config->syncfreq = 0xC;
799
800 /* disable timestamp event */
801 config->ts_ctrl = 0x0;
802
803 /* TRCVICTLR::EVENT = 0x01, select the always on logic */
804 config->vinst_ctrl = BIT(0);
805}
806
807static u64 etm4_get_ns_access_type(struct etmv4_config *config)
808{
809 u64 access_type = 0;
810
811 /*
812 * EXLEVEL_NS, bits[15:12]
813 * The Exception levels are:
814 * Bit[12] Exception level 0 - Application
815 * Bit[13] Exception level 1 - OS
816 * Bit[14] Exception level 2 - Hypervisor
817 * Bit[15] Never implemented
818 */
819 if (!is_kernel_in_hyp_mode()) {
820 /* Stay away from hypervisor mode for non-VHE */
821 access_type = ETM_EXLEVEL_NS_HYP;
822 if (config->mode & ETM_MODE_EXCL_KERN)
823 access_type |= ETM_EXLEVEL_NS_OS;
824 } else if (config->mode & ETM_MODE_EXCL_KERN) {
825 access_type = ETM_EXLEVEL_NS_HYP;
826 }
827
828 if (config->mode & ETM_MODE_EXCL_USER)
829 access_type |= ETM_EXLEVEL_NS_APP;
830
831 return access_type;
832}
833
834static u64 etm4_get_access_type(struct etmv4_config *config)
835{
836 u64 access_type = etm4_get_ns_access_type(config);
837 u64 s_hyp = (config->arch & 0x0f) >= 0x4 ? ETM_EXLEVEL_S_HYP : 0;
838
839 /*
840 * EXLEVEL_S, bits[11:8], don't trace anything happening
841 * in secure state.
842 */
843 access_type |= (ETM_EXLEVEL_S_APP |
844 ETM_EXLEVEL_S_OS |
845 s_hyp |
846 ETM_EXLEVEL_S_MON);
847
848 return access_type;
849}
850
851static void etm4_set_comparator_filter(struct etmv4_config *config,
852 u64 start, u64 stop, int comparator)
853{
854 u64 access_type = etm4_get_access_type(config);
855
856 /* First half of default address comparator */
857 config->addr_val[comparator] = start;
858 config->addr_acc[comparator] = access_type;
859 config->addr_type[comparator] = ETM_ADDR_TYPE_RANGE;
860
861 /* Second half of default address comparator */
862 config->addr_val[comparator + 1] = stop;
863 config->addr_acc[comparator + 1] = access_type;
864 config->addr_type[comparator + 1] = ETM_ADDR_TYPE_RANGE;
865
866 /*
867 * Configure the ViewInst function to include this address range
868 * comparator.
869 *
870 * @comparator is divided by two since it is the index in the
871 * etmv4_config::addr_val array but register TRCVIIECTLR deals with
872 * address range comparator _pairs_.
873 *
874 * Therefore:
875 * index 0 -> compatator pair 0
876 * index 2 -> comparator pair 1
877 * index 4 -> comparator pair 2
878 * ...
879 * index 14 -> comparator pair 7
880 */
881 config->viiectlr |= BIT(comparator / 2);
882}
883
884static void etm4_set_start_stop_filter(struct etmv4_config *config,
885 u64 address, int comparator,
886 enum etm_addr_type type)
887{
888 int shift;
889 u64 access_type = etm4_get_access_type(config);
890
891 /* Configure the comparator */
892 config->addr_val[comparator] = address;
893 config->addr_acc[comparator] = access_type;
894 config->addr_type[comparator] = type;
895
896 /*
897 * Configure ViewInst Start-Stop control register.
898 * Addresses configured to start tracing go from bit 0 to n-1,
899 * while those configured to stop tracing from 16 to 16 + n-1.
900 */
901 shift = (type == ETM_ADDR_TYPE_START ? 0 : 16);
902 config->vissctlr |= BIT(shift + comparator);
903}
904
905static void etm4_set_default_filter(struct etmv4_config *config)
906{
907 /* Trace everything 'default' filter achieved by no filtering */
908 config->viiectlr = 0x0;
909
910 /*
911 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
912 * in the started state
913 */
914 config->vinst_ctrl |= BIT(9);
915 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
916
917 /* No start-stop filtering for ViewInst */
918 config->vissctlr = 0x0;
919}
920
921static void etm4_set_default(struct etmv4_config *config)
922{
923 if (WARN_ON_ONCE(!config))
924 return;
925
926 /*
927 * Make default initialisation trace everything
928 *
929 * This is done by a minimum default config sufficient to enable
930 * full instruction trace - with a default filter for trace all
931 * achieved by having no filtering.
932 */
933 etm4_set_default_config(config);
934 etm4_set_default_filter(config);
935}
936
937static int etm4_get_next_comparator(struct etmv4_drvdata *drvdata, u32 type)
938{
939 int nr_comparator, index = 0;
940 struct etmv4_config *config = &drvdata->config;
941
942 /*
943 * nr_addr_cmp holds the number of comparator _pair_, so time 2
944 * for the total number of comparators.
945 */
946 nr_comparator = drvdata->nr_addr_cmp * 2;
947
948 /* Go through the tally of comparators looking for a free one. */
949 while (index < nr_comparator) {
950 switch (type) {
951 case ETM_ADDR_TYPE_RANGE:
952 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE &&
953 config->addr_type[index + 1] == ETM_ADDR_TYPE_NONE)
954 return index;
955
956 /* Address range comparators go in pairs */
957 index += 2;
958 break;
959 case ETM_ADDR_TYPE_START:
960 case ETM_ADDR_TYPE_STOP:
961 if (config->addr_type[index] == ETM_ADDR_TYPE_NONE)
962 return index;
963
964 /* Start/stop address can have odd indexes */
965 index += 1;
966 break;
967 default:
968 return -EINVAL;
969 }
970 }
971
972 /* If we are here all the comparators have been used. */
973 return -ENOSPC;
974}
975
976static int etm4_set_event_filters(struct etmv4_drvdata *drvdata,
977 struct perf_event *event)
978{
979 int i, comparator, ret = 0;
980 u64 address;
981 struct etmv4_config *config = &drvdata->config;
982 struct etm_filters *filters = event->hw.addr_filters;
983
984 if (!filters)
985 goto default_filter;
986
987 /* Sync events with what Perf got */
988 perf_event_addr_filters_sync(event);
989
990 /*
991 * If there are no filters to deal with simply go ahead with
992 * the default filter, i.e the entire address range.
993 */
994 if (!filters->nr_filters)
995 goto default_filter;
996
997 for (i = 0; i < filters->nr_filters; i++) {
998 struct etm_filter *filter = &filters->etm_filter[i];
999 enum etm_addr_type type = filter->type;
1000
1001 /* See if a comparator is free. */
1002 comparator = etm4_get_next_comparator(drvdata, type);
1003 if (comparator < 0) {
1004 ret = comparator;
1005 goto out;
1006 }
1007
1008 switch (type) {
1009 case ETM_ADDR_TYPE_RANGE:
1010 etm4_set_comparator_filter(config,
1011 filter->start_addr,
1012 filter->stop_addr,
1013 comparator);
1014 /*
1015 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
1016 * in the started state
1017 */
1018 config->vinst_ctrl |= BIT(9);
1019
1020 /* No start-stop filtering for ViewInst */
1021 config->vissctlr = 0x0;
1022 break;
1023 case ETM_ADDR_TYPE_START:
1024 case ETM_ADDR_TYPE_STOP:
1025 /* Get the right start or stop address */
1026 address = (type == ETM_ADDR_TYPE_START ?
1027 filter->start_addr :
1028 filter->stop_addr);
1029
1030 /* Configure comparator */
1031 etm4_set_start_stop_filter(config, address,
1032 comparator, type);
1033
1034 /*
1035 * If filters::ssstatus == 1, trace acquisition was
1036 * started but the process was yanked away before the
1037 * the stop address was hit. As such the start/stop
1038 * logic needs to be re-started so that tracing can
1039 * resume where it left.
1040 *
1041 * The start/stop logic status when a process is
1042 * scheduled out is checked in function
1043 * etm4_disable_perf().
1044 */
1045 if (filters->ssstatus)
1046 config->vinst_ctrl |= BIT(9);
1047
1048 /* No include/exclude filtering for ViewInst */
1049 config->viiectlr = 0x0;
1050 break;
1051 default:
1052 ret = -EINVAL;
1053 goto out;
1054 }
1055 }
1056
1057 goto out;
1058
1059
1060default_filter:
1061 etm4_set_default_filter(config);
1062
1063out:
1064 return ret;
1065}
1066
1067void etm4_config_trace_mode(struct etmv4_config *config)
1068{
1069 u32 addr_acc, mode;
1070
1071 mode = config->mode;
1072 mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
1073
1074 /* excluding kernel AND user space doesn't make sense */
1075 WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER));
1076
1077 /* nothing to do if neither flags are set */
1078 if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
1079 return;
1080
1081 addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
1082 /* clear default config */
1083 addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
1084 ETM_EXLEVEL_NS_HYP);
1085
1086 addr_acc |= etm4_get_ns_access_type(config);
1087
1088 config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
1089 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
1090}
1091
1092static int etm4_online_cpu(unsigned int cpu)
1093{
1094 if (!etmdrvdata[cpu])
1095 return 0;
1096
1097 if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
1098 coresight_enable(etmdrvdata[cpu]->csdev);
1099 return 0;
1100}
1101
1102static int etm4_starting_cpu(unsigned int cpu)
1103{
1104 if (!etmdrvdata[cpu])
1105 return 0;
1106
1107 spin_lock(&etmdrvdata[cpu]->spinlock);
1108 if (!etmdrvdata[cpu]->os_unlock)
1109 etm4_os_unlock(etmdrvdata[cpu]);
1110
1111 if (local_read(&etmdrvdata[cpu]->mode))
1112 etm4_enable_hw(etmdrvdata[cpu]);
1113 spin_unlock(&etmdrvdata[cpu]->spinlock);
1114 return 0;
1115}
1116
1117static int etm4_dying_cpu(unsigned int cpu)
1118{
1119 if (!etmdrvdata[cpu])
1120 return 0;
1121
1122 spin_lock(&etmdrvdata[cpu]->spinlock);
1123 if (local_read(&etmdrvdata[cpu]->mode))
1124 etm4_disable_hw(etmdrvdata[cpu]);
1125 spin_unlock(&etmdrvdata[cpu]->spinlock);
1126 return 0;
1127}
1128
1129static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
1130{
1131 drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
1132}
1133
1134static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
1135{
1136 int i, ret = 0;
1137 struct etmv4_save_state *state;
1138 struct device *etm_dev = &drvdata->csdev->dev;
1139
1140 /*
1141 * As recommended by 3.4.1 ("The procedure when powering down the PE")
1142 * of ARM IHI 0064D
1143 */
1144 dsb(sy);
1145 isb();
1146
1147 CS_UNLOCK(drvdata->base);
1148
1149 /* Lock the OS lock to disable trace and external debugger access */
1150 etm4_os_lock(drvdata);
1151
1152 /* wait for TRCSTATR.PMSTABLE to go up */
1153 if (coresight_timeout(drvdata->base, TRCSTATR,
1154 TRCSTATR_PMSTABLE_BIT, 1)) {
1155 dev_err(etm_dev,
1156 "timeout while waiting for PM Stable Status\n");
1157 etm4_os_unlock(drvdata);
1158 ret = -EBUSY;
1159 goto out;
1160 }
1161
1162 state = drvdata->save_state;
1163
1164 state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR);
1165 state->trcprocselr = readl(drvdata->base + TRCPROCSELR);
1166 state->trcconfigr = readl(drvdata->base + TRCCONFIGR);
1167 state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR);
1168 state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R);
1169 state->trceventctl1r = readl(drvdata->base + TRCEVENTCTL1R);
1170 state->trcstallctlr = readl(drvdata->base + TRCSTALLCTLR);
1171 state->trctsctlr = readl(drvdata->base + TRCTSCTLR);
1172 state->trcsyncpr = readl(drvdata->base + TRCSYNCPR);
1173 state->trcccctlr = readl(drvdata->base + TRCCCCTLR);
1174 state->trcbbctlr = readl(drvdata->base + TRCBBCTLR);
1175 state->trctraceidr = readl(drvdata->base + TRCTRACEIDR);
1176 state->trcqctlr = readl(drvdata->base + TRCQCTLR);
1177
1178 state->trcvictlr = readl(drvdata->base + TRCVICTLR);
1179 state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR);
1180 state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR);
1181 state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR);
1182 state->trcvdctlr = readl(drvdata->base + TRCVDCTLR);
1183 state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR);
1184 state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR);
1185
1186 for (i = 0; i < drvdata->nrseqstate; i++)
1187 state->trcseqevr[i] = readl(drvdata->base + TRCSEQEVRn(i));
1188
1189 state->trcseqrstevr = readl(drvdata->base + TRCSEQRSTEVR);
1190 state->trcseqstr = readl(drvdata->base + TRCSEQSTR);
1191 state->trcextinselr = readl(drvdata->base + TRCEXTINSELR);
1192
1193 for (i = 0; i < drvdata->nr_cntr; i++) {
1194 state->trccntrldvr[i] = readl(drvdata->base + TRCCNTRLDVRn(i));
1195 state->trccntctlr[i] = readl(drvdata->base + TRCCNTCTLRn(i));
1196 state->trccntvr[i] = readl(drvdata->base + TRCCNTVRn(i));
1197 }
1198
1199 for (i = 0; i < drvdata->nr_resource * 2; i++)
1200 state->trcrsctlr[i] = readl(drvdata->base + TRCRSCTLRn(i));
1201
1202 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
1203 state->trcssccr[i] = readl(drvdata->base + TRCSSCCRn(i));
1204 state->trcsscsr[i] = readl(drvdata->base + TRCSSCSRn(i));
1205 state->trcsspcicr[i] = readl(drvdata->base + TRCSSPCICRn(i));
1206 }
1207
1208 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
1209 state->trcacvr[i] = readq(drvdata->base + TRCACVRn(i));
1210 state->trcacatr[i] = readq(drvdata->base + TRCACATRn(i));
1211 }
1212
1213 /*
1214 * Data trace stream is architecturally prohibited for A profile cores
1215 * so we don't save (or later restore) trcdvcvr and trcdvcmr - As per
1216 * section 1.3.4 ("Possible functional configurations of an ETMv4 trace
1217 * unit") of ARM IHI 0064D.
1218 */
1219
1220 for (i = 0; i < drvdata->numcidc; i++)
1221 state->trccidcvr[i] = readq(drvdata->base + TRCCIDCVRn(i));
1222
1223 for (i = 0; i < drvdata->numvmidc; i++)
1224 state->trcvmidcvr[i] = readq(drvdata->base + TRCVMIDCVRn(i));
1225
1226 state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0);
1227 state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1);
1228
1229 state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0);
1230 state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR1);
1231
1232 state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR);
1233
1234 state->trcpdcr = readl(drvdata->base + TRCPDCR);
1235
1236 /* wait for TRCSTATR.IDLE to go up */
1237 if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) {
1238 dev_err(etm_dev,
1239 "timeout while waiting for Idle Trace Status\n");
1240 etm4_os_unlock(drvdata);
1241 ret = -EBUSY;
1242 goto out;
1243 }
1244
1245 drvdata->state_needs_restore = true;
1246
1247 /*
1248 * Power can be removed from the trace unit now. We do this to
1249 * potentially save power on systems that respect the TRCPDCR_PU
1250 * despite requesting software to save/restore state.
1251 */
1252 writel_relaxed((state->trcpdcr & ~TRCPDCR_PU),
1253 drvdata->base + TRCPDCR);
1254
1255out:
1256 CS_LOCK(drvdata->base);
1257 return ret;
1258}
1259
1260static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
1261{
1262 int i;
1263 struct etmv4_save_state *state = drvdata->save_state;
1264
1265 CS_UNLOCK(drvdata->base);
1266
1267 writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
1268
1269 writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR);
1270 writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR);
1271 writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR);
1272 writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR);
1273 writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R);
1274 writel_relaxed(state->trceventctl1r, drvdata->base + TRCEVENTCTL1R);
1275 writel_relaxed(state->trcstallctlr, drvdata->base + TRCSTALLCTLR);
1276 writel_relaxed(state->trctsctlr, drvdata->base + TRCTSCTLR);
1277 writel_relaxed(state->trcsyncpr, drvdata->base + TRCSYNCPR);
1278 writel_relaxed(state->trcccctlr, drvdata->base + TRCCCCTLR);
1279 writel_relaxed(state->trcbbctlr, drvdata->base + TRCBBCTLR);
1280 writel_relaxed(state->trctraceidr, drvdata->base + TRCTRACEIDR);
1281 writel_relaxed(state->trcqctlr, drvdata->base + TRCQCTLR);
1282
1283 writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR);
1284 writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR);
1285 writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR);
1286 writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR);
1287 writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR);
1288 writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR);
1289 writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR);
1290
1291 for (i = 0; i < drvdata->nrseqstate; i++)
1292 writel_relaxed(state->trcseqevr[i],
1293 drvdata->base + TRCSEQEVRn(i));
1294
1295 writel_relaxed(state->trcseqrstevr, drvdata->base + TRCSEQRSTEVR);
1296 writel_relaxed(state->trcseqstr, drvdata->base + TRCSEQSTR);
1297 writel_relaxed(state->trcextinselr, drvdata->base + TRCEXTINSELR);
1298
1299 for (i = 0; i < drvdata->nr_cntr; i++) {
1300 writel_relaxed(state->trccntrldvr[i],
1301 drvdata->base + TRCCNTRLDVRn(i));
1302 writel_relaxed(state->trccntctlr[i],
1303 drvdata->base + TRCCNTCTLRn(i));
1304 writel_relaxed(state->trccntvr[i],
1305 drvdata->base + TRCCNTVRn(i));
1306 }
1307
1308 for (i = 0; i < drvdata->nr_resource * 2; i++)
1309 writel_relaxed(state->trcrsctlr[i],
1310 drvdata->base + TRCRSCTLRn(i));
1311
1312 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
1313 writel_relaxed(state->trcssccr[i],
1314 drvdata->base + TRCSSCCRn(i));
1315 writel_relaxed(state->trcsscsr[i],
1316 drvdata->base + TRCSSCSRn(i));
1317 writel_relaxed(state->trcsspcicr[i],
1318 drvdata->base + TRCSSPCICRn(i));
1319 }
1320
1321 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
1322 writeq_relaxed(state->trcacvr[i],
1323 drvdata->base + TRCACVRn(i));
1324 writeq_relaxed(state->trcacatr[i],
1325 drvdata->base + TRCACATRn(i));
1326 }
1327
1328 for (i = 0; i < drvdata->numcidc; i++)
1329 writeq_relaxed(state->trccidcvr[i],
1330 drvdata->base + TRCCIDCVRn(i));
1331
1332 for (i = 0; i < drvdata->numvmidc; i++)
1333 writeq_relaxed(state->trcvmidcvr[i],
1334 drvdata->base + TRCVMIDCVRn(i));
1335
1336 writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0);
1337 writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1);
1338
1339 writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0);
1340 writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR1);
1341
1342 writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET);
1343
1344 writel_relaxed(state->trcpdcr, drvdata->base + TRCPDCR);
1345
1346 drvdata->state_needs_restore = false;
1347
1348 /*
1349 * As recommended by section 4.3.7 ("Synchronization when using the
1350 * memory-mapped interface") of ARM IHI 0064D
1351 */
1352 dsb(sy);
1353 isb();
1354
1355 /* Unlock the OS lock to re-enable trace and external debug access */
1356 etm4_os_unlock(drvdata);
1357 CS_LOCK(drvdata->base);
1358}
1359
1360static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
1361 void *v)
1362{
1363 struct etmv4_drvdata *drvdata;
1364 unsigned int cpu = smp_processor_id();
1365
1366 if (!etmdrvdata[cpu])
1367 return NOTIFY_OK;
1368
1369 drvdata = etmdrvdata[cpu];
1370
1371 if (!drvdata->save_state)
1372 return NOTIFY_OK;
1373
1374 if (WARN_ON_ONCE(drvdata->cpu != cpu))
1375 return NOTIFY_BAD;
1376
1377 switch (cmd) {
1378 case CPU_PM_ENTER:
1379 /* save the state if self-hosted coresight is in use */
1380 if (local_read(&drvdata->mode))
1381 if (etm4_cpu_save(drvdata))
1382 return NOTIFY_BAD;
1383 break;
1384 case CPU_PM_EXIT:
1385 case CPU_PM_ENTER_FAILED:
1386 if (drvdata->state_needs_restore)
1387 etm4_cpu_restore(drvdata);
1388 break;
1389 default:
1390 return NOTIFY_DONE;
1391 }
1392
1393 return NOTIFY_OK;
1394}
1395
1396static struct notifier_block etm4_cpu_pm_nb = {
1397 .notifier_call = etm4_cpu_pm_notify,
1398};
1399
1400/* Setup PM. Called with cpus locked. Deals with error conditions and counts */
1401static int etm4_pm_setup_cpuslocked(void)
1402{
1403 int ret;
1404
1405 if (etm4_count++)
1406 return 0;
1407
1408 ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
1409 if (ret)
1410 goto reduce_count;
1411
1412 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
1413 "arm/coresight4:starting",
1414 etm4_starting_cpu, etm4_dying_cpu);
1415
1416 if (ret)
1417 goto unregister_notifier;
1418
1419 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
1420 "arm/coresight4:online",
1421 etm4_online_cpu, NULL);
1422
1423 /* HP dyn state ID returned in ret on success */
1424 if (ret > 0) {
1425 hp_online = ret;
1426 return 0;
1427 }
1428
1429 /* failed dyn state - remove others */
1430 cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING);
1431
1432unregister_notifier:
1433 cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
1434
1435reduce_count:
1436 --etm4_count;
1437 return ret;
1438}
1439
1440static void etm4_pm_clear(void)
1441{
1442 if (--etm4_count != 0)
1443 return;
1444
1445 cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
1446 cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
1447 if (hp_online) {
1448 cpuhp_remove_state_nocalls(hp_online);
1449 hp_online = 0;
1450 }
1451}
1452
1453static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
1454{
1455 int ret;
1456 void __iomem *base;
1457 struct device *dev = &adev->dev;
1458 struct coresight_platform_data *pdata = NULL;
1459 struct etmv4_drvdata *drvdata;
1460 struct resource *res = &adev->res;
1461 struct coresight_desc desc = { 0 };
1462
1463 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1464 if (!drvdata)
1465 return -ENOMEM;
1466
1467 dev_set_drvdata(dev, drvdata);
1468
1469 if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
1470 pm_save_enable = coresight_loses_context_with_cpu(dev) ?
1471 PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
1472
1473 if (pm_save_enable != PARAM_PM_SAVE_NEVER) {
1474 drvdata->save_state = devm_kmalloc(dev,
1475 sizeof(struct etmv4_save_state), GFP_KERNEL);
1476 if (!drvdata->save_state)
1477 return -ENOMEM;
1478 }
1479
1480 if (fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
1481 drvdata->skip_power_up = true;
1482
1483 /* Validity for the resource is already checked by the AMBA core */
1484 base = devm_ioremap_resource(dev, res);
1485 if (IS_ERR(base))
1486 return PTR_ERR(base);
1487
1488 drvdata->base = base;
1489
1490 spin_lock_init(&drvdata->spinlock);
1491
1492 drvdata->cpu = coresight_get_cpu(dev);
1493 if (drvdata->cpu < 0)
1494 return drvdata->cpu;
1495
1496 desc.name = devm_kasprintf(dev, GFP_KERNEL, "etm%d", drvdata->cpu);
1497 if (!desc.name)
1498 return -ENOMEM;
1499
1500 cpus_read_lock();
1501 etmdrvdata[drvdata->cpu] = drvdata;
1502
1503 if (smp_call_function_single(drvdata->cpu,
1504 etm4_init_arch_data, drvdata, 1))
1505 dev_err(dev, "ETM arch init failed\n");
1506
1507 ret = etm4_pm_setup_cpuslocked();
1508 cpus_read_unlock();
1509
1510 /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */
1511 if (ret) {
1512 etmdrvdata[drvdata->cpu] = NULL;
1513 return ret;
1514 }
1515
1516 if (etm4_arch_supported(drvdata->arch) == false) {
1517 ret = -EINVAL;
1518 goto err_arch_supported;
1519 }
1520
1521 etm4_init_trace_id(drvdata);
1522 etm4_set_default(&drvdata->config);
1523
1524 pdata = coresight_get_platform_data(dev);
1525 if (IS_ERR(pdata)) {
1526 ret = PTR_ERR(pdata);
1527 goto err_arch_supported;
1528 }
1529 adev->dev.platform_data = pdata;
1530
1531 desc.type = CORESIGHT_DEV_TYPE_SOURCE;
1532 desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1533 desc.ops = &etm4_cs_ops;
1534 desc.pdata = pdata;
1535 desc.dev = dev;
1536 desc.groups = coresight_etmv4_groups;
1537 drvdata->csdev = coresight_register(&desc);
1538 if (IS_ERR(drvdata->csdev)) {
1539 ret = PTR_ERR(drvdata->csdev);
1540 goto err_arch_supported;
1541 }
1542
1543 ret = etm_perf_symlink(drvdata->csdev, true);
1544 if (ret) {
1545 coresight_unregister(drvdata->csdev);
1546 goto err_arch_supported;
1547 }
1548
1549 pm_runtime_put(&adev->dev);
1550 dev_info(&drvdata->csdev->dev, "CPU%d: ETM v%d.%d initialized\n",
1551 drvdata->cpu, drvdata->arch >> 4, drvdata->arch & 0xf);
1552
1553 if (boot_enable) {
1554 coresight_enable(drvdata->csdev);
1555 drvdata->boot_enable = true;
1556 }
1557
1558 return 0;
1559
1560err_arch_supported:
1561 etmdrvdata[drvdata->cpu] = NULL;
1562 etm4_pm_clear();
1563 return ret;
1564}
1565
1566static struct amba_cs_uci_id uci_id_etm4[] = {
1567 {
1568 /* ETMv4 UCI data */
1569 .devarch = 0x47704a13,
1570 .devarch_mask = 0xfff0ffff,
1571 .devtype = 0x00000013,
1572 }
1573};
1574
1575static const struct amba_id etm4_ids[] = {
1576 CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
1577 CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
1578 CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
1579 CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
1580 CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4),/* Cortex-A35 */
1581 CS_AMBA_UCI_ID(0x000bbd0c, uci_id_etm4),/* Neoverse N1 */
1582 CS_AMBA_UCI_ID(0x000f0205, uci_id_etm4),/* Qualcomm Kryo */
1583 CS_AMBA_UCI_ID(0x000f0211, uci_id_etm4),/* Qualcomm Kryo */
1584 CS_AMBA_UCI_ID(0x000bb802, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A55 */
1585 CS_AMBA_UCI_ID(0x000bb803, uci_id_etm4),/* Qualcomm Kryo 385 Cortex-A75 */
1586 CS_AMBA_UCI_ID(0x000bb805, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A55 */
1587 CS_AMBA_UCI_ID(0x000bb804, uci_id_etm4),/* Qualcomm Kryo 4XX Cortex-A76 */
1588 CS_AMBA_UCI_ID(0x000cc0af, uci_id_etm4),/* Marvell ThunderX2 */
1589 {},
1590};
1591
1592static struct amba_driver etm4x_driver = {
1593 .drv = {
1594 .name = "coresight-etm4x",
1595 .suppress_bind_attrs = true,
1596 },
1597 .probe = etm4_probe,
1598 .id_table = etm4_ids,
1599};
1600builtin_amba_driver(etm4x_driver);