Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 *
4 * Description: CoreSight Trace Memory Controller driver
5 */
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/device.h>
11#include <linux/idr.h>
12#include <linux/io.h>
13#include <linux/err.h>
14#include <linux/fs.h>
15#include <linux/miscdevice.h>
16#include <linux/mutex.h>
17#include <linux/property.h>
18#include <linux/uaccess.h>
19#include <linux/slab.h>
20#include <linux/dma-mapping.h>
21#include <linux/spinlock.h>
22#include <linux/pm_runtime.h>
23#include <linux/of.h>
24#include <linux/coresight.h>
25#include <linux/amba/bus.h>
26
27#include "coresight-priv.h"
28#include "coresight-tmc.h"
29
30DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
31DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
32DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
33
34void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
35{
36 /* Ensure formatter, unformatter and hardware fifo are empty */
37 if (coresight_timeout(drvdata->base,
38 TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
39 dev_err(&drvdata->csdev->dev,
40 "timeout while waiting for TMC to be Ready\n");
41 }
42}
43
44void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
45{
46 u32 ffcr;
47
48 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
49 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
50 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
51 ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
52 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
53 /* Ensure flush completes */
54 if (coresight_timeout(drvdata->base,
55 TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
56 dev_err(&drvdata->csdev->dev,
57 "timeout while waiting for completion of Manual Flush\n");
58 }
59
60 tmc_wait_for_tmcready(drvdata);
61}
62
63void tmc_enable_hw(struct tmc_drvdata *drvdata)
64{
65 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
66}
67
68void tmc_disable_hw(struct tmc_drvdata *drvdata)
69{
70 writel_relaxed(0x0, drvdata->base + TMC_CTL);
71}
72
73u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
74{
75 u32 mask = 0;
76
77 /*
78 * When moving RRP or an offset address forward, the new values must
79 * be byte-address aligned to the width of the trace memory databus
80 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
81 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
82 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
83 * be 0s.
84 */
85 switch (drvdata->memwidth) {
86 case TMC_MEM_INTF_WIDTH_32BITS:
87 /* fallthrough */
88 case TMC_MEM_INTF_WIDTH_64BITS:
89 /* fallthrough */
90 case TMC_MEM_INTF_WIDTH_128BITS:
91 mask = GENMASK(31, 4);
92 break;
93 case TMC_MEM_INTF_WIDTH_256BITS:
94 mask = GENMASK(31, 5);
95 break;
96 }
97
98 return mask;
99}
100
101static int tmc_read_prepare(struct tmc_drvdata *drvdata)
102{
103 int ret = 0;
104
105 switch (drvdata->config_type) {
106 case TMC_CONFIG_TYPE_ETB:
107 case TMC_CONFIG_TYPE_ETF:
108 ret = tmc_read_prepare_etb(drvdata);
109 break;
110 case TMC_CONFIG_TYPE_ETR:
111 ret = tmc_read_prepare_etr(drvdata);
112 break;
113 default:
114 ret = -EINVAL;
115 }
116
117 if (!ret)
118 dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
119
120 return ret;
121}
122
123static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
124{
125 int ret = 0;
126
127 switch (drvdata->config_type) {
128 case TMC_CONFIG_TYPE_ETB:
129 case TMC_CONFIG_TYPE_ETF:
130 ret = tmc_read_unprepare_etb(drvdata);
131 break;
132 case TMC_CONFIG_TYPE_ETR:
133 ret = tmc_read_unprepare_etr(drvdata);
134 break;
135 default:
136 ret = -EINVAL;
137 }
138
139 if (!ret)
140 dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
141
142 return ret;
143}
144
145static int tmc_open(struct inode *inode, struct file *file)
146{
147 int ret;
148 struct tmc_drvdata *drvdata = container_of(file->private_data,
149 struct tmc_drvdata, miscdev);
150
151 ret = tmc_read_prepare(drvdata);
152 if (ret)
153 return ret;
154
155 nonseekable_open(inode, file);
156
157 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
158 return 0;
159}
160
161static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
162 loff_t pos, size_t len, char **bufpp)
163{
164 switch (drvdata->config_type) {
165 case TMC_CONFIG_TYPE_ETB:
166 case TMC_CONFIG_TYPE_ETF:
167 return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
168 case TMC_CONFIG_TYPE_ETR:
169 return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
170 }
171
172 return -EINVAL;
173}
174
175static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
176 loff_t *ppos)
177{
178 char *bufp;
179 ssize_t actual;
180 struct tmc_drvdata *drvdata = container_of(file->private_data,
181 struct tmc_drvdata, miscdev);
182 actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
183 if (actual <= 0)
184 return 0;
185
186 if (copy_to_user(data, bufp, actual)) {
187 dev_dbg(&drvdata->csdev->dev,
188 "%s: copy_to_user failed\n", __func__);
189 return -EFAULT;
190 }
191
192 *ppos += actual;
193 dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
194
195 return actual;
196}
197
198static int tmc_release(struct inode *inode, struct file *file)
199{
200 int ret;
201 struct tmc_drvdata *drvdata = container_of(file->private_data,
202 struct tmc_drvdata, miscdev);
203
204 ret = tmc_read_unprepare(drvdata);
205 if (ret)
206 return ret;
207
208 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
209 return 0;
210}
211
212static const struct file_operations tmc_fops = {
213 .owner = THIS_MODULE,
214 .open = tmc_open,
215 .read = tmc_read,
216 .release = tmc_release,
217 .llseek = no_llseek,
218};
219
220static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
221{
222 enum tmc_mem_intf_width memwidth;
223
224 /*
225 * Excerpt from the TRM:
226 *
227 * DEVID::MEMWIDTH[10:8]
228 * 0x2 Memory interface databus is 32 bits wide.
229 * 0x3 Memory interface databus is 64 bits wide.
230 * 0x4 Memory interface databus is 128 bits wide.
231 * 0x5 Memory interface databus is 256 bits wide.
232 */
233 switch (BMVAL(devid, 8, 10)) {
234 case 0x2:
235 memwidth = TMC_MEM_INTF_WIDTH_32BITS;
236 break;
237 case 0x3:
238 memwidth = TMC_MEM_INTF_WIDTH_64BITS;
239 break;
240 case 0x4:
241 memwidth = TMC_MEM_INTF_WIDTH_128BITS;
242 break;
243 case 0x5:
244 memwidth = TMC_MEM_INTF_WIDTH_256BITS;
245 break;
246 default:
247 memwidth = 0;
248 }
249
250 return memwidth;
251}
252
253#define coresight_tmc_reg(name, offset) \
254 coresight_simple_reg32(struct tmc_drvdata, name, offset)
255#define coresight_tmc_reg64(name, lo_off, hi_off) \
256 coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
257
258coresight_tmc_reg(rsz, TMC_RSZ);
259coresight_tmc_reg(sts, TMC_STS);
260coresight_tmc_reg(trg, TMC_TRG);
261coresight_tmc_reg(ctl, TMC_CTL);
262coresight_tmc_reg(ffsr, TMC_FFSR);
263coresight_tmc_reg(ffcr, TMC_FFCR);
264coresight_tmc_reg(mode, TMC_MODE);
265coresight_tmc_reg(pscr, TMC_PSCR);
266coresight_tmc_reg(axictl, TMC_AXICTL);
267coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
268coresight_tmc_reg(devid, CORESIGHT_DEVID);
269coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
270coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
271coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
272
273static struct attribute *coresight_tmc_mgmt_attrs[] = {
274 &dev_attr_rsz.attr,
275 &dev_attr_sts.attr,
276 &dev_attr_rrp.attr,
277 &dev_attr_rwp.attr,
278 &dev_attr_trg.attr,
279 &dev_attr_ctl.attr,
280 &dev_attr_ffsr.attr,
281 &dev_attr_ffcr.attr,
282 &dev_attr_mode.attr,
283 &dev_attr_pscr.attr,
284 &dev_attr_devid.attr,
285 &dev_attr_dba.attr,
286 &dev_attr_axictl.attr,
287 &dev_attr_authstatus.attr,
288 NULL,
289};
290
291static ssize_t trigger_cntr_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
295 unsigned long val = drvdata->trigger_cntr;
296
297 return sprintf(buf, "%#lx\n", val);
298}
299
300static ssize_t trigger_cntr_store(struct device *dev,
301 struct device_attribute *attr,
302 const char *buf, size_t size)
303{
304 int ret;
305 unsigned long val;
306 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
307
308 ret = kstrtoul(buf, 16, &val);
309 if (ret)
310 return ret;
311
312 drvdata->trigger_cntr = val;
313 return size;
314}
315static DEVICE_ATTR_RW(trigger_cntr);
316
317static ssize_t buffer_size_show(struct device *dev,
318 struct device_attribute *attr, char *buf)
319{
320 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
321
322 return sprintf(buf, "%#x\n", drvdata->size);
323}
324
325static ssize_t buffer_size_store(struct device *dev,
326 struct device_attribute *attr,
327 const char *buf, size_t size)
328{
329 int ret;
330 unsigned long val;
331 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
332
333 /* Only permitted for TMC-ETRs */
334 if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
335 return -EPERM;
336
337 ret = kstrtoul(buf, 0, &val);
338 if (ret)
339 return ret;
340 /* The buffer size should be page aligned */
341 if (val & (PAGE_SIZE - 1))
342 return -EINVAL;
343 drvdata->size = val;
344 return size;
345}
346
347static DEVICE_ATTR_RW(buffer_size);
348
349static struct attribute *coresight_tmc_attrs[] = {
350 &dev_attr_trigger_cntr.attr,
351 &dev_attr_buffer_size.attr,
352 NULL,
353};
354
355static const struct attribute_group coresight_tmc_group = {
356 .attrs = coresight_tmc_attrs,
357};
358
359static const struct attribute_group coresight_tmc_mgmt_group = {
360 .attrs = coresight_tmc_mgmt_attrs,
361 .name = "mgmt",
362};
363
364const struct attribute_group *coresight_tmc_groups[] = {
365 &coresight_tmc_group,
366 &coresight_tmc_mgmt_group,
367 NULL,
368};
369
370static inline bool tmc_etr_can_use_sg(struct device *dev)
371{
372 return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
373}
374
375static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
376{
377 u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
378
379 return (auth & TMC_AUTH_NSID_MASK) == 0x3;
380}
381
382/* Detect and initialise the capabilities of a TMC ETR */
383static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
384{
385 int rc;
386 u32 dma_mask = 0;
387 struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
388
389 if (!tmc_etr_has_non_secure_access(drvdata))
390 return -EACCES;
391
392 /* Set the unadvertised capabilities */
393 tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
394
395 if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
396 tmc_etr_set_cap(drvdata, TMC_ETR_SG);
397
398 /* Check if the AXI address width is available */
399 if (devid & TMC_DEVID_AXIAW_VALID)
400 dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
401 TMC_DEVID_AXIAW_MASK);
402
403 /*
404 * Unless specified in the device configuration, ETR uses a 40-bit
405 * AXI master in place of the embedded SRAM of ETB/ETF.
406 */
407 switch (dma_mask) {
408 case 32:
409 case 40:
410 case 44:
411 case 48:
412 case 52:
413 dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
414 break;
415 default:
416 dma_mask = 40;
417 }
418
419 rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
420 if (rc)
421 dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
422 return rc;
423}
424
425static u32 tmc_etr_get_default_buffer_size(struct device *dev)
426{
427 u32 size;
428
429 if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
430 size = SZ_1M;
431 return size;
432}
433
434static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
435{
436 int ret = 0;
437 u32 devid;
438 void __iomem *base;
439 struct device *dev = &adev->dev;
440 struct coresight_platform_data *pdata = NULL;
441 struct tmc_drvdata *drvdata;
442 struct resource *res = &adev->res;
443 struct coresight_desc desc = { 0 };
444 struct coresight_dev_list *dev_list = NULL;
445
446 ret = -ENOMEM;
447 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
448 if (!drvdata)
449 goto out;
450
451 dev_set_drvdata(dev, drvdata);
452
453 /* Validity for the resource is already checked by the AMBA core */
454 base = devm_ioremap_resource(dev, res);
455 if (IS_ERR(base)) {
456 ret = PTR_ERR(base);
457 goto out;
458 }
459
460 drvdata->base = base;
461
462 spin_lock_init(&drvdata->spinlock);
463
464 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
465 drvdata->config_type = BMVAL(devid, 6, 7);
466 drvdata->memwidth = tmc_get_memwidth(devid);
467 /* This device is not associated with a session */
468 drvdata->pid = -1;
469
470 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
471 drvdata->size = tmc_etr_get_default_buffer_size(dev);
472 else
473 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
474
475 desc.dev = dev;
476 desc.groups = coresight_tmc_groups;
477
478 switch (drvdata->config_type) {
479 case TMC_CONFIG_TYPE_ETB:
480 desc.type = CORESIGHT_DEV_TYPE_SINK;
481 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
482 desc.ops = &tmc_etb_cs_ops;
483 dev_list = &etb_devs;
484 break;
485 case TMC_CONFIG_TYPE_ETR:
486 desc.type = CORESIGHT_DEV_TYPE_SINK;
487 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
488 desc.ops = &tmc_etr_cs_ops;
489 ret = tmc_etr_setup_caps(dev, devid,
490 coresight_get_uci_data(id));
491 if (ret)
492 goto out;
493 idr_init(&drvdata->idr);
494 mutex_init(&drvdata->idr_mutex);
495 dev_list = &etr_devs;
496 break;
497 case TMC_CONFIG_TYPE_ETF:
498 desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
499 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
500 desc.ops = &tmc_etf_cs_ops;
501 dev_list = &etf_devs;
502 break;
503 default:
504 pr_err("%s: Unsupported TMC config\n", desc.name);
505 ret = -EINVAL;
506 goto out;
507 }
508
509 desc.name = coresight_alloc_device_name(dev_list, dev);
510 if (!desc.name) {
511 ret = -ENOMEM;
512 goto out;
513 }
514
515 pdata = coresight_get_platform_data(dev);
516 if (IS_ERR(pdata)) {
517 ret = PTR_ERR(pdata);
518 goto out;
519 }
520 adev->dev.platform_data = pdata;
521 desc.pdata = pdata;
522
523 drvdata->csdev = coresight_register(&desc);
524 if (IS_ERR(drvdata->csdev)) {
525 ret = PTR_ERR(drvdata->csdev);
526 goto out;
527 }
528
529 drvdata->miscdev.name = desc.name;
530 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
531 drvdata->miscdev.fops = &tmc_fops;
532 ret = misc_register(&drvdata->miscdev);
533 if (ret)
534 coresight_unregister(drvdata->csdev);
535 else
536 pm_runtime_put(&adev->dev);
537out:
538 return ret;
539}
540
541static const struct amba_id tmc_ids[] = {
542 CS_AMBA_ID(0x000bb961),
543 /* Coresight SoC 600 TMC-ETR/ETS */
544 CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
545 /* Coresight SoC 600 TMC-ETB */
546 CS_AMBA_ID(0x000bb9e9),
547 /* Coresight SoC 600 TMC-ETF */
548 CS_AMBA_ID(0x000bb9ea),
549 { 0, 0},
550};
551
552static struct amba_driver tmc_driver = {
553 .drv = {
554 .name = "coresight-tmc",
555 .owner = THIS_MODULE,
556 .suppress_bind_attrs = true,
557 },
558 .probe = tmc_probe,
559 .id_table = tmc_ids,
560};
561builtin_amba_driver(tmc_driver);
1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * Description: CoreSight Trace Memory Controller driver
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/device.h>
19#include <linux/io.h>
20#include <linux/err.h>
21#include <linux/fs.h>
22#include <linux/miscdevice.h>
23#include <linux/uaccess.h>
24#include <linux/slab.h>
25#include <linux/dma-mapping.h>
26#include <linux/spinlock.h>
27#include <linux/pm_runtime.h>
28#include <linux/of.h>
29#include <linux/coresight.h>
30#include <linux/amba/bus.h>
31
32#include "coresight-priv.h"
33#include "coresight-tmc.h"
34
35void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
36{
37 /* Ensure formatter, unformatter and hardware fifo are empty */
38 if (coresight_timeout(drvdata->base,
39 TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
40 dev_err(drvdata->dev,
41 "timeout while waiting for TMC to be Ready\n");
42 }
43}
44
45void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
46{
47 u32 ffcr;
48
49 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
50 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
51 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
52 ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
53 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
54 /* Ensure flush completes */
55 if (coresight_timeout(drvdata->base,
56 TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
57 dev_err(drvdata->dev,
58 "timeout while waiting for completion of Manual Flush\n");
59 }
60
61 tmc_wait_for_tmcready(drvdata);
62}
63
64void tmc_enable_hw(struct tmc_drvdata *drvdata)
65{
66 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
67}
68
69void tmc_disable_hw(struct tmc_drvdata *drvdata)
70{
71 writel_relaxed(0x0, drvdata->base + TMC_CTL);
72}
73
74static int tmc_read_prepare(struct tmc_drvdata *drvdata)
75{
76 int ret = 0;
77
78 switch (drvdata->config_type) {
79 case TMC_CONFIG_TYPE_ETB:
80 case TMC_CONFIG_TYPE_ETF:
81 ret = tmc_read_prepare_etb(drvdata);
82 break;
83 case TMC_CONFIG_TYPE_ETR:
84 ret = tmc_read_prepare_etr(drvdata);
85 break;
86 default:
87 ret = -EINVAL;
88 }
89
90 if (!ret)
91 dev_info(drvdata->dev, "TMC read start\n");
92
93 return ret;
94}
95
96static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
97{
98 int ret = 0;
99
100 switch (drvdata->config_type) {
101 case TMC_CONFIG_TYPE_ETB:
102 case TMC_CONFIG_TYPE_ETF:
103 ret = tmc_read_unprepare_etb(drvdata);
104 break;
105 case TMC_CONFIG_TYPE_ETR:
106 ret = tmc_read_unprepare_etr(drvdata);
107 break;
108 default:
109 ret = -EINVAL;
110 }
111
112 if (!ret)
113 dev_info(drvdata->dev, "TMC read end\n");
114
115 return ret;
116}
117
118static int tmc_open(struct inode *inode, struct file *file)
119{
120 int ret;
121 struct tmc_drvdata *drvdata = container_of(file->private_data,
122 struct tmc_drvdata, miscdev);
123
124 ret = tmc_read_prepare(drvdata);
125 if (ret)
126 return ret;
127
128 nonseekable_open(inode, file);
129
130 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
131 return 0;
132}
133
134static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
135 loff_t *ppos)
136{
137 struct tmc_drvdata *drvdata = container_of(file->private_data,
138 struct tmc_drvdata, miscdev);
139 char *bufp = drvdata->buf + *ppos;
140
141 if (*ppos + len > drvdata->len)
142 len = drvdata->len - *ppos;
143
144 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
145 if (bufp == (char *)(drvdata->vaddr + drvdata->size))
146 bufp = drvdata->vaddr;
147 else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
148 bufp -= drvdata->size;
149 if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
150 len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
151 }
152
153 if (copy_to_user(data, bufp, len)) {
154 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
155 return -EFAULT;
156 }
157
158 *ppos += len;
159
160 dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
161 __func__, len, (int)(drvdata->len - *ppos));
162 return len;
163}
164
165static int tmc_release(struct inode *inode, struct file *file)
166{
167 int ret;
168 struct tmc_drvdata *drvdata = container_of(file->private_data,
169 struct tmc_drvdata, miscdev);
170
171 ret = tmc_read_unprepare(drvdata);
172 if (ret)
173 return ret;
174
175 dev_dbg(drvdata->dev, "%s: released\n", __func__);
176 return 0;
177}
178
179static const struct file_operations tmc_fops = {
180 .owner = THIS_MODULE,
181 .open = tmc_open,
182 .read = tmc_read,
183 .release = tmc_release,
184 .llseek = no_llseek,
185};
186
187static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
188{
189 enum tmc_mem_intf_width memwidth;
190
191 /*
192 * Excerpt from the TRM:
193 *
194 * DEVID::MEMWIDTH[10:8]
195 * 0x2 Memory interface databus is 32 bits wide.
196 * 0x3 Memory interface databus is 64 bits wide.
197 * 0x4 Memory interface databus is 128 bits wide.
198 * 0x5 Memory interface databus is 256 bits wide.
199 */
200 switch (BMVAL(devid, 8, 10)) {
201 case 0x2:
202 memwidth = TMC_MEM_INTF_WIDTH_32BITS;
203 break;
204 case 0x3:
205 memwidth = TMC_MEM_INTF_WIDTH_64BITS;
206 break;
207 case 0x4:
208 memwidth = TMC_MEM_INTF_WIDTH_128BITS;
209 break;
210 case 0x5:
211 memwidth = TMC_MEM_INTF_WIDTH_256BITS;
212 break;
213 default:
214 memwidth = 0;
215 }
216
217 return memwidth;
218}
219
220#define coresight_tmc_reg(name, offset) \
221 coresight_simple_reg32(struct tmc_drvdata, name, offset)
222#define coresight_tmc_reg64(name, lo_off, hi_off) \
223 coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
224
225coresight_tmc_reg(rsz, TMC_RSZ);
226coresight_tmc_reg(sts, TMC_STS);
227coresight_tmc_reg(trg, TMC_TRG);
228coresight_tmc_reg(ctl, TMC_CTL);
229coresight_tmc_reg(ffsr, TMC_FFSR);
230coresight_tmc_reg(ffcr, TMC_FFCR);
231coresight_tmc_reg(mode, TMC_MODE);
232coresight_tmc_reg(pscr, TMC_PSCR);
233coresight_tmc_reg(axictl, TMC_AXICTL);
234coresight_tmc_reg(devid, CORESIGHT_DEVID);
235coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
236coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
237coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
238
239static struct attribute *coresight_tmc_mgmt_attrs[] = {
240 &dev_attr_rsz.attr,
241 &dev_attr_sts.attr,
242 &dev_attr_rrp.attr,
243 &dev_attr_rwp.attr,
244 &dev_attr_trg.attr,
245 &dev_attr_ctl.attr,
246 &dev_attr_ffsr.attr,
247 &dev_attr_ffcr.attr,
248 &dev_attr_mode.attr,
249 &dev_attr_pscr.attr,
250 &dev_attr_devid.attr,
251 &dev_attr_dba.attr,
252 &dev_attr_axictl.attr,
253 NULL,
254};
255
256static ssize_t trigger_cntr_show(struct device *dev,
257 struct device_attribute *attr, char *buf)
258{
259 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
260 unsigned long val = drvdata->trigger_cntr;
261
262 return sprintf(buf, "%#lx\n", val);
263}
264
265static ssize_t trigger_cntr_store(struct device *dev,
266 struct device_attribute *attr,
267 const char *buf, size_t size)
268{
269 int ret;
270 unsigned long val;
271 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
272
273 ret = kstrtoul(buf, 16, &val);
274 if (ret)
275 return ret;
276
277 drvdata->trigger_cntr = val;
278 return size;
279}
280static DEVICE_ATTR_RW(trigger_cntr);
281
282static struct attribute *coresight_tmc_attrs[] = {
283 &dev_attr_trigger_cntr.attr,
284 NULL,
285};
286
287static const struct attribute_group coresight_tmc_group = {
288 .attrs = coresight_tmc_attrs,
289};
290
291static const struct attribute_group coresight_tmc_mgmt_group = {
292 .attrs = coresight_tmc_mgmt_attrs,
293 .name = "mgmt",
294};
295
296const struct attribute_group *coresight_tmc_groups[] = {
297 &coresight_tmc_group,
298 &coresight_tmc_mgmt_group,
299 NULL,
300};
301
302/* Detect and initialise the capabilities of a TMC ETR */
303static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
304 u32 devid, void *dev_caps)
305{
306 u32 dma_mask = 0;
307
308 /* Set the unadvertised capabilities */
309 tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
310
311 if (!(devid & TMC_DEVID_NOSCAT))
312 tmc_etr_set_cap(drvdata, TMC_ETR_SG);
313
314 /* Check if the AXI address width is available */
315 if (devid & TMC_DEVID_AXIAW_VALID)
316 dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
317 TMC_DEVID_AXIAW_MASK);
318
319 /*
320 * Unless specified in the device configuration, ETR uses a 40-bit
321 * AXI master in place of the embedded SRAM of ETB/ETF.
322 */
323 switch (dma_mask) {
324 case 32:
325 case 40:
326 case 44:
327 case 48:
328 case 52:
329 dev_info(drvdata->dev, "Detected dma mask %dbits\n", dma_mask);
330 break;
331 default:
332 dma_mask = 40;
333 }
334
335 return dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
336}
337
338static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
339{
340 int ret = 0;
341 u32 devid;
342 void __iomem *base;
343 struct device *dev = &adev->dev;
344 struct coresight_platform_data *pdata = NULL;
345 struct tmc_drvdata *drvdata;
346 struct resource *res = &adev->res;
347 struct coresight_desc desc = { 0 };
348 struct device_node *np = adev->dev.of_node;
349
350 if (np) {
351 pdata = of_get_coresight_platform_data(dev, np);
352 if (IS_ERR(pdata)) {
353 ret = PTR_ERR(pdata);
354 goto out;
355 }
356 adev->dev.platform_data = pdata;
357 }
358
359 ret = -ENOMEM;
360 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
361 if (!drvdata)
362 goto out;
363
364 drvdata->dev = &adev->dev;
365 dev_set_drvdata(dev, drvdata);
366
367 /* Validity for the resource is already checked by the AMBA core */
368 base = devm_ioremap_resource(dev, res);
369 if (IS_ERR(base)) {
370 ret = PTR_ERR(base);
371 goto out;
372 }
373
374 drvdata->base = base;
375
376 spin_lock_init(&drvdata->spinlock);
377
378 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
379 drvdata->config_type = BMVAL(devid, 6, 7);
380 drvdata->memwidth = tmc_get_memwidth(devid);
381
382 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
383 if (np)
384 ret = of_property_read_u32(np,
385 "arm,buffer-size",
386 &drvdata->size);
387 if (ret)
388 drvdata->size = SZ_1M;
389 } else {
390 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
391 }
392
393 pm_runtime_put(&adev->dev);
394
395 desc.pdata = pdata;
396 desc.dev = dev;
397 desc.groups = coresight_tmc_groups;
398
399 switch (drvdata->config_type) {
400 case TMC_CONFIG_TYPE_ETB:
401 desc.type = CORESIGHT_DEV_TYPE_SINK;
402 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
403 desc.ops = &tmc_etb_cs_ops;
404 break;
405 case TMC_CONFIG_TYPE_ETR:
406 desc.type = CORESIGHT_DEV_TYPE_SINK;
407 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
408 desc.ops = &tmc_etr_cs_ops;
409 ret = tmc_etr_setup_caps(drvdata, devid, id->data);
410 if (ret)
411 goto out;
412 break;
413 case TMC_CONFIG_TYPE_ETF:
414 desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
415 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
416 desc.ops = &tmc_etf_cs_ops;
417 break;
418 default:
419 pr_err("%s: Unsupported TMC config\n", pdata->name);
420 ret = -EINVAL;
421 goto out;
422 }
423
424 drvdata->csdev = coresight_register(&desc);
425 if (IS_ERR(drvdata->csdev)) {
426 ret = PTR_ERR(drvdata->csdev);
427 goto out;
428 }
429
430 drvdata->miscdev.name = pdata->name;
431 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
432 drvdata->miscdev.fops = &tmc_fops;
433 ret = misc_register(&drvdata->miscdev);
434 if (ret)
435 coresight_unregister(drvdata->csdev);
436out:
437 return ret;
438}
439
440static const struct amba_id tmc_ids[] = {
441 {
442 .id = 0x000bb961,
443 .mask = 0x000fffff,
444 },
445 {
446 /* Coresight SoC 600 TMC-ETR/ETS */
447 .id = 0x000bb9e8,
448 .mask = 0x000fffff,
449 .data = (void *)(unsigned long)CORESIGHT_SOC_600_ETR_CAPS,
450 },
451 {
452 /* Coresight SoC 600 TMC-ETB */
453 .id = 0x000bb9e9,
454 .mask = 0x000fffff,
455 },
456 {
457 /* Coresight SoC 600 TMC-ETF */
458 .id = 0x000bb9ea,
459 .mask = 0x000fffff,
460 },
461 { 0, 0},
462};
463
464static struct amba_driver tmc_driver = {
465 .drv = {
466 .name = "coresight-tmc",
467 .owner = THIS_MODULE,
468 .suppress_bind_attrs = true,
469 },
470 .probe = tmc_probe,
471 .id_table = tmc_ids,
472};
473builtin_amba_driver(tmc_driver);