Loading...
Note: File does not exist in v6.8.
1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * Description: CoreSight Trace Memory Controller driver
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/device.h>
19#include <linux/io.h>
20#include <linux/err.h>
21#include <linux/fs.h>
22#include <linux/miscdevice.h>
23#include <linux/uaccess.h>
24#include <linux/slab.h>
25#include <linux/dma-mapping.h>
26#include <linux/spinlock.h>
27#include <linux/pm_runtime.h>
28#include <linux/of.h>
29#include <linux/coresight.h>
30#include <linux/amba/bus.h>
31
32#include "coresight-priv.h"
33
34#define TMC_RSZ 0x004
35#define TMC_STS 0x00c
36#define TMC_RRD 0x010
37#define TMC_RRP 0x014
38#define TMC_RWP 0x018
39#define TMC_TRG 0x01c
40#define TMC_CTL 0x020
41#define TMC_RWD 0x024
42#define TMC_MODE 0x028
43#define TMC_LBUFLEVEL 0x02c
44#define TMC_CBUFLEVEL 0x030
45#define TMC_BUFWM 0x034
46#define TMC_RRPHI 0x038
47#define TMC_RWPHI 0x03c
48#define TMC_AXICTL 0x110
49#define TMC_DBALO 0x118
50#define TMC_DBAHI 0x11c
51#define TMC_FFSR 0x300
52#define TMC_FFCR 0x304
53#define TMC_PSCR 0x308
54#define TMC_ITMISCOP0 0xee0
55#define TMC_ITTRFLIN 0xee8
56#define TMC_ITATBDATA0 0xeec
57#define TMC_ITATBCTR2 0xef0
58#define TMC_ITATBCTR1 0xef4
59#define TMC_ITATBCTR0 0xef8
60
61/* register description */
62/* TMC_CTL - 0x020 */
63#define TMC_CTL_CAPT_EN BIT(0)
64/* TMC_STS - 0x00C */
65#define TMC_STS_TRIGGERED BIT(1)
66/* TMC_AXICTL - 0x110 */
67#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
68#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
69#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
70#define TMC_AXICTL_WR_BURST_LEN 0xF00
71/* TMC_FFCR - 0x304 */
72#define TMC_FFCR_EN_FMT BIT(0)
73#define TMC_FFCR_EN_TI BIT(1)
74#define TMC_FFCR_FON_FLIN BIT(4)
75#define TMC_FFCR_FON_TRIG_EVT BIT(5)
76#define TMC_FFCR_FLUSHMAN BIT(6)
77#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
78#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
79
80#define TMC_STS_TRIGGERED_BIT 2
81#define TMC_FFCR_FLUSHMAN_BIT 6
82
83enum tmc_config_type {
84 TMC_CONFIG_TYPE_ETB,
85 TMC_CONFIG_TYPE_ETR,
86 TMC_CONFIG_TYPE_ETF,
87};
88
89enum tmc_mode {
90 TMC_MODE_CIRCULAR_BUFFER,
91 TMC_MODE_SOFTWARE_FIFO,
92 TMC_MODE_HARDWARE_FIFO,
93};
94
95enum tmc_mem_intf_width {
96 TMC_MEM_INTF_WIDTH_32BITS = 0x2,
97 TMC_MEM_INTF_WIDTH_64BITS = 0x3,
98 TMC_MEM_INTF_WIDTH_128BITS = 0x4,
99 TMC_MEM_INTF_WIDTH_256BITS = 0x5,
100};
101
102/**
103 * struct tmc_drvdata - specifics associated to an TMC component
104 * @base: memory mapped base address for this component.
105 * @dev: the device entity associated to this component.
106 * @csdev: component vitals needed by the framework.
107 * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
108 * @spinlock: only one at a time pls.
109 * @read_count: manages preparation of buffer for reading.
110 * @buf: area of memory where trace data get sent.
111 * @paddr: DMA start location in RAM.
112 * @vaddr: virtual representation of @paddr.
113 * @size: @buf size.
114 * @enable: this TMC is being used.
115 * @config_type: TMC variant, must be of type @tmc_config_type.
116 * @trigger_cntr: amount of words to store after a trigger.
117 */
118struct tmc_drvdata {
119 void __iomem *base;
120 struct device *dev;
121 struct coresight_device *csdev;
122 struct miscdevice miscdev;
123 spinlock_t spinlock;
124 int read_count;
125 bool reading;
126 char *buf;
127 dma_addr_t paddr;
128 void *vaddr;
129 u32 size;
130 bool enable;
131 enum tmc_config_type config_type;
132 u32 trigger_cntr;
133};
134
135static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
136{
137 /* Ensure formatter, unformatter and hardware fifo are empty */
138 if (coresight_timeout(drvdata->base,
139 TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
140 dev_err(drvdata->dev,
141 "timeout observed when probing at offset %#x\n",
142 TMC_STS);
143 }
144}
145
146static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
147{
148 u32 ffcr;
149
150 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
151 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
152 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
153 ffcr |= TMC_FFCR_FLUSHMAN;
154 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
155 /* Ensure flush completes */
156 if (coresight_timeout(drvdata->base,
157 TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
158 dev_err(drvdata->dev,
159 "timeout observed when probing at offset %#x\n",
160 TMC_FFCR);
161 }
162
163 tmc_wait_for_ready(drvdata);
164}
165
166static void tmc_enable_hw(struct tmc_drvdata *drvdata)
167{
168 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
169}
170
171static void tmc_disable_hw(struct tmc_drvdata *drvdata)
172{
173 writel_relaxed(0x0, drvdata->base + TMC_CTL);
174}
175
176static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
177{
178 /* Zero out the memory to help with debug */
179 memset(drvdata->buf, 0, drvdata->size);
180
181 CS_UNLOCK(drvdata->base);
182
183 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
184 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
185 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
186 TMC_FFCR_TRIGON_TRIGIN,
187 drvdata->base + TMC_FFCR);
188
189 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
190 tmc_enable_hw(drvdata);
191
192 CS_LOCK(drvdata->base);
193}
194
195static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
196{
197 u32 axictl;
198
199 /* Zero out the memory to help with debug */
200 memset(drvdata->vaddr, 0, drvdata->size);
201
202 CS_UNLOCK(drvdata->base);
203
204 writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
205 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
206
207 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
208 axictl |= TMC_AXICTL_WR_BURST_LEN;
209 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
210 axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
211 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
212 axictl = (axictl &
213 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
214 TMC_AXICTL_PROT_CTL_B1;
215 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
216
217 writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
218 writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
219 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
220 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
221 TMC_FFCR_TRIGON_TRIGIN,
222 drvdata->base + TMC_FFCR);
223 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
224 tmc_enable_hw(drvdata);
225
226 CS_LOCK(drvdata->base);
227}
228
229static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
230{
231 CS_UNLOCK(drvdata->base);
232
233 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
234 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
235 drvdata->base + TMC_FFCR);
236 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
237 tmc_enable_hw(drvdata);
238
239 CS_LOCK(drvdata->base);
240}
241
242static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
243{
244 unsigned long flags;
245
246 spin_lock_irqsave(&drvdata->spinlock, flags);
247 if (drvdata->reading) {
248 spin_unlock_irqrestore(&drvdata->spinlock, flags);
249 return -EBUSY;
250 }
251
252 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
253 tmc_etb_enable_hw(drvdata);
254 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
255 tmc_etr_enable_hw(drvdata);
256 } else {
257 if (mode == TMC_MODE_CIRCULAR_BUFFER)
258 tmc_etb_enable_hw(drvdata);
259 else
260 tmc_etf_enable_hw(drvdata);
261 }
262 drvdata->enable = true;
263 spin_unlock_irqrestore(&drvdata->spinlock, flags);
264
265 dev_info(drvdata->dev, "TMC enabled\n");
266 return 0;
267}
268
269static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
270{
271 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
272
273 return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
274}
275
276static int tmc_enable_link(struct coresight_device *csdev, int inport,
277 int outport)
278{
279 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
280
281 return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
282}
283
284static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
285{
286 enum tmc_mem_intf_width memwidth;
287 u8 memwords;
288 char *bufp;
289 u32 read_data;
290 int i;
291
292 memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
293 if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
294 memwords = 1;
295 else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
296 memwords = 2;
297 else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
298 memwords = 4;
299 else
300 memwords = 8;
301
302 bufp = drvdata->buf;
303 while (1) {
304 for (i = 0; i < memwords; i++) {
305 read_data = readl_relaxed(drvdata->base + TMC_RRD);
306 if (read_data == 0xFFFFFFFF)
307 return;
308 memcpy(bufp, &read_data, 4);
309 bufp += 4;
310 }
311 }
312}
313
314static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
315{
316 CS_UNLOCK(drvdata->base);
317
318 tmc_flush_and_stop(drvdata);
319 tmc_etb_dump_hw(drvdata);
320 tmc_disable_hw(drvdata);
321
322 CS_LOCK(drvdata->base);
323}
324
325static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
326{
327 u32 rwp, val;
328
329 rwp = readl_relaxed(drvdata->base + TMC_RWP);
330 val = readl_relaxed(drvdata->base + TMC_STS);
331
332 /* How much memory do we still have */
333 if (val & BIT(0))
334 drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
335 else
336 drvdata->buf = drvdata->vaddr;
337}
338
339static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
340{
341 CS_UNLOCK(drvdata->base);
342
343 tmc_flush_and_stop(drvdata);
344 tmc_etr_dump_hw(drvdata);
345 tmc_disable_hw(drvdata);
346
347 CS_LOCK(drvdata->base);
348}
349
350static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
351{
352 CS_UNLOCK(drvdata->base);
353
354 tmc_flush_and_stop(drvdata);
355 tmc_disable_hw(drvdata);
356
357 CS_LOCK(drvdata->base);
358}
359
360static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
361{
362 unsigned long flags;
363
364 spin_lock_irqsave(&drvdata->spinlock, flags);
365 if (drvdata->reading)
366 goto out;
367
368 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
369 tmc_etb_disable_hw(drvdata);
370 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
371 tmc_etr_disable_hw(drvdata);
372 } else {
373 if (mode == TMC_MODE_CIRCULAR_BUFFER)
374 tmc_etb_disable_hw(drvdata);
375 else
376 tmc_etf_disable_hw(drvdata);
377 }
378out:
379 drvdata->enable = false;
380 spin_unlock_irqrestore(&drvdata->spinlock, flags);
381
382 dev_info(drvdata->dev, "TMC disabled\n");
383}
384
385static void tmc_disable_sink(struct coresight_device *csdev)
386{
387 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
388
389 tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
390}
391
392static void tmc_disable_link(struct coresight_device *csdev, int inport,
393 int outport)
394{
395 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
396
397 tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
398}
399
400static const struct coresight_ops_sink tmc_sink_ops = {
401 .enable = tmc_enable_sink,
402 .disable = tmc_disable_sink,
403};
404
405static const struct coresight_ops_link tmc_link_ops = {
406 .enable = tmc_enable_link,
407 .disable = tmc_disable_link,
408};
409
410static const struct coresight_ops tmc_etb_cs_ops = {
411 .sink_ops = &tmc_sink_ops,
412};
413
414static const struct coresight_ops tmc_etr_cs_ops = {
415 .sink_ops = &tmc_sink_ops,
416};
417
418static const struct coresight_ops tmc_etf_cs_ops = {
419 .sink_ops = &tmc_sink_ops,
420 .link_ops = &tmc_link_ops,
421};
422
423static int tmc_read_prepare(struct tmc_drvdata *drvdata)
424{
425 int ret;
426 unsigned long flags;
427 enum tmc_mode mode;
428
429 spin_lock_irqsave(&drvdata->spinlock, flags);
430 if (!drvdata->enable)
431 goto out;
432
433 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
434 tmc_etb_disable_hw(drvdata);
435 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
436 tmc_etr_disable_hw(drvdata);
437 } else {
438 mode = readl_relaxed(drvdata->base + TMC_MODE);
439 if (mode == TMC_MODE_CIRCULAR_BUFFER) {
440 tmc_etb_disable_hw(drvdata);
441 } else {
442 ret = -ENODEV;
443 goto err;
444 }
445 }
446out:
447 drvdata->reading = true;
448 spin_unlock_irqrestore(&drvdata->spinlock, flags);
449
450 dev_info(drvdata->dev, "TMC read start\n");
451 return 0;
452err:
453 spin_unlock_irqrestore(&drvdata->spinlock, flags);
454 return ret;
455}
456
457static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
458{
459 unsigned long flags;
460 enum tmc_mode mode;
461
462 spin_lock_irqsave(&drvdata->spinlock, flags);
463 if (!drvdata->enable)
464 goto out;
465
466 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
467 tmc_etb_enable_hw(drvdata);
468 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
469 tmc_etr_enable_hw(drvdata);
470 } else {
471 mode = readl_relaxed(drvdata->base + TMC_MODE);
472 if (mode == TMC_MODE_CIRCULAR_BUFFER)
473 tmc_etb_enable_hw(drvdata);
474 }
475out:
476 drvdata->reading = false;
477 spin_unlock_irqrestore(&drvdata->spinlock, flags);
478
479 dev_info(drvdata->dev, "TMC read end\n");
480}
481
482static int tmc_open(struct inode *inode, struct file *file)
483{
484 struct tmc_drvdata *drvdata = container_of(file->private_data,
485 struct tmc_drvdata, miscdev);
486 int ret = 0;
487
488 if (drvdata->read_count++)
489 goto out;
490
491 ret = tmc_read_prepare(drvdata);
492 if (ret)
493 return ret;
494out:
495 nonseekable_open(inode, file);
496
497 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
498 return 0;
499}
500
501static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
502 loff_t *ppos)
503{
504 struct tmc_drvdata *drvdata = container_of(file->private_data,
505 struct tmc_drvdata, miscdev);
506 char *bufp = drvdata->buf + *ppos;
507
508 if (*ppos + len > drvdata->size)
509 len = drvdata->size - *ppos;
510
511 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
512 if (bufp == (char *)(drvdata->vaddr + drvdata->size))
513 bufp = drvdata->vaddr;
514 else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
515 bufp -= drvdata->size;
516 if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
517 len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
518 }
519
520 if (copy_to_user(data, bufp, len)) {
521 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
522 return -EFAULT;
523 }
524
525 *ppos += len;
526
527 dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
528 __func__, len, (int)(drvdata->size - *ppos));
529 return len;
530}
531
532static int tmc_release(struct inode *inode, struct file *file)
533{
534 struct tmc_drvdata *drvdata = container_of(file->private_data,
535 struct tmc_drvdata, miscdev);
536
537 if (--drvdata->read_count) {
538 if (drvdata->read_count < 0) {
539 dev_err(drvdata->dev, "mismatched close\n");
540 drvdata->read_count = 0;
541 }
542 goto out;
543 }
544
545 tmc_read_unprepare(drvdata);
546out:
547 dev_dbg(drvdata->dev, "%s: released\n", __func__);
548 return 0;
549}
550
551static const struct file_operations tmc_fops = {
552 .owner = THIS_MODULE,
553 .open = tmc_open,
554 .read = tmc_read,
555 .release = tmc_release,
556 .llseek = no_llseek,
557};
558
559static ssize_t status_show(struct device *dev,
560 struct device_attribute *attr, char *buf)
561{
562 unsigned long flags;
563 u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
564 u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
565 u32 devid;
566 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
567
568 pm_runtime_get_sync(drvdata->dev);
569 spin_lock_irqsave(&drvdata->spinlock, flags);
570 CS_UNLOCK(drvdata->base);
571
572 tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
573 tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
574 tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
575 tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
576 tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
577 tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
578 tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
579 tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
580 tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
581 tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
582 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
583
584 CS_LOCK(drvdata->base);
585 spin_unlock_irqrestore(&drvdata->spinlock, flags);
586 pm_runtime_put(drvdata->dev);
587
588 return sprintf(buf,
589 "Depth:\t\t0x%x\n"
590 "Status:\t\t0x%x\n"
591 "RAM read ptr:\t0x%x\n"
592 "RAM wrt ptr:\t0x%x\n"
593 "Trigger cnt:\t0x%x\n"
594 "Control:\t0x%x\n"
595 "Flush status:\t0x%x\n"
596 "Flush ctrl:\t0x%x\n"
597 "Mode:\t\t0x%x\n"
598 "PSRC:\t\t0x%x\n"
599 "DEVID:\t\t0x%x\n",
600 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
601 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
602
603 return -EINVAL;
604}
605static DEVICE_ATTR_RO(status);
606
607static ssize_t trigger_cntr_show(struct device *dev,
608 struct device_attribute *attr, char *buf)
609{
610 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
611 unsigned long val = drvdata->trigger_cntr;
612
613 return sprintf(buf, "%#lx\n", val);
614}
615
616static ssize_t trigger_cntr_store(struct device *dev,
617 struct device_attribute *attr,
618 const char *buf, size_t size)
619{
620 int ret;
621 unsigned long val;
622 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
623
624 ret = kstrtoul(buf, 16, &val);
625 if (ret)
626 return ret;
627
628 drvdata->trigger_cntr = val;
629 return size;
630}
631static DEVICE_ATTR_RW(trigger_cntr);
632
633static struct attribute *coresight_etb_attrs[] = {
634 &dev_attr_trigger_cntr.attr,
635 &dev_attr_status.attr,
636 NULL,
637};
638ATTRIBUTE_GROUPS(coresight_etb);
639
640static struct attribute *coresight_etr_attrs[] = {
641 &dev_attr_trigger_cntr.attr,
642 &dev_attr_status.attr,
643 NULL,
644};
645ATTRIBUTE_GROUPS(coresight_etr);
646
647static struct attribute *coresight_etf_attrs[] = {
648 &dev_attr_trigger_cntr.attr,
649 &dev_attr_status.attr,
650 NULL,
651};
652ATTRIBUTE_GROUPS(coresight_etf);
653
654static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
655{
656 int ret = 0;
657 u32 devid;
658 void __iomem *base;
659 struct device *dev = &adev->dev;
660 struct coresight_platform_data *pdata = NULL;
661 struct tmc_drvdata *drvdata;
662 struct resource *res = &adev->res;
663 struct coresight_desc *desc;
664 struct device_node *np = adev->dev.of_node;
665
666 if (np) {
667 pdata = of_get_coresight_platform_data(dev, np);
668 if (IS_ERR(pdata))
669 return PTR_ERR(pdata);
670 adev->dev.platform_data = pdata;
671 }
672
673 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
674 if (!drvdata)
675 return -ENOMEM;
676
677 drvdata->dev = &adev->dev;
678 dev_set_drvdata(dev, drvdata);
679
680 /* Validity for the resource is already checked by the AMBA core */
681 base = devm_ioremap_resource(dev, res);
682 if (IS_ERR(base))
683 return PTR_ERR(base);
684
685 drvdata->base = base;
686
687 spin_lock_init(&drvdata->spinlock);
688
689 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
690 drvdata->config_type = BMVAL(devid, 6, 7);
691
692 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
693 if (np)
694 ret = of_property_read_u32(np,
695 "arm,buffer-size",
696 &drvdata->size);
697 if (ret)
698 drvdata->size = SZ_1M;
699 } else {
700 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
701 }
702
703 pm_runtime_put(&adev->dev);
704
705 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
706 drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
707 &drvdata->paddr, GFP_KERNEL);
708 if (!drvdata->vaddr)
709 return -ENOMEM;
710
711 memset(drvdata->vaddr, 0, drvdata->size);
712 drvdata->buf = drvdata->vaddr;
713 } else {
714 drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
715 if (!drvdata->buf)
716 return -ENOMEM;
717 }
718
719 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
720 if (!desc) {
721 ret = -ENOMEM;
722 goto err_devm_kzalloc;
723 }
724
725 desc->pdata = pdata;
726 desc->dev = dev;
727 desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
728
729 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
730 desc->type = CORESIGHT_DEV_TYPE_SINK;
731 desc->ops = &tmc_etb_cs_ops;
732 desc->groups = coresight_etb_groups;
733 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
734 desc->type = CORESIGHT_DEV_TYPE_SINK;
735 desc->ops = &tmc_etr_cs_ops;
736 desc->groups = coresight_etr_groups;
737 } else {
738 desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
739 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
740 desc->ops = &tmc_etf_cs_ops;
741 desc->groups = coresight_etf_groups;
742 }
743
744 drvdata->csdev = coresight_register(desc);
745 if (IS_ERR(drvdata->csdev)) {
746 ret = PTR_ERR(drvdata->csdev);
747 goto err_devm_kzalloc;
748 }
749
750 drvdata->miscdev.name = pdata->name;
751 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
752 drvdata->miscdev.fops = &tmc_fops;
753 ret = misc_register(&drvdata->miscdev);
754 if (ret)
755 goto err_misc_register;
756
757 dev_info(dev, "TMC initialized\n");
758 return 0;
759
760err_misc_register:
761 coresight_unregister(drvdata->csdev);
762err_devm_kzalloc:
763 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
764 dma_free_coherent(dev, drvdata->size,
765 drvdata->vaddr, drvdata->paddr);
766 return ret;
767}
768
769static struct amba_id tmc_ids[] = {
770 {
771 .id = 0x0003b961,
772 .mask = 0x0003ffff,
773 },
774 { 0, 0},
775};
776
777static struct amba_driver tmc_driver = {
778 .drv = {
779 .name = "coresight-tmc",
780 .owner = THIS_MODULE,
781 .suppress_bind_attrs = true,
782 },
783 .probe = tmc_probe,
784 .id_table = tmc_ids,
785};
786builtin_amba_driver(tmc_driver);