Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 *
4 * Description: CoreSight Trace Memory Controller driver
5 */
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/device.h>
11#include <linux/idr.h>
12#include <linux/io.h>
13#include <linux/err.h>
14#include <linux/fs.h>
15#include <linux/miscdevice.h>
16#include <linux/mutex.h>
17#include <linux/property.h>
18#include <linux/uaccess.h>
19#include <linux/slab.h>
20#include <linux/dma-mapping.h>
21#include <linux/spinlock.h>
22#include <linux/pm_runtime.h>
23#include <linux/of.h>
24#include <linux/coresight.h>
25#include <linux/amba/bus.h>
26
27#include "coresight-priv.h"
28#include "coresight-tmc.h"
29
30DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
31DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
32DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
33
34void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
35{
36 /* Ensure formatter, unformatter and hardware fifo are empty */
37 if (coresight_timeout(drvdata->base,
38 TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
39 dev_err(&drvdata->csdev->dev,
40 "timeout while waiting for TMC to be Ready\n");
41 }
42}
43
44void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
45{
46 u32 ffcr;
47
48 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
49 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
50 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
51 ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
52 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
53 /* Ensure flush completes */
54 if (coresight_timeout(drvdata->base,
55 TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
56 dev_err(&drvdata->csdev->dev,
57 "timeout while waiting for completion of Manual Flush\n");
58 }
59
60 tmc_wait_for_tmcready(drvdata);
61}
62
63void tmc_enable_hw(struct tmc_drvdata *drvdata)
64{
65 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
66}
67
68void tmc_disable_hw(struct tmc_drvdata *drvdata)
69{
70 writel_relaxed(0x0, drvdata->base + TMC_CTL);
71}
72
73u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
74{
75 u32 mask = 0;
76
77 /*
78 * When moving RRP or an offset address forward, the new values must
79 * be byte-address aligned to the width of the trace memory databus
80 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
81 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
82 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
83 * be 0s.
84 */
85 switch (drvdata->memwidth) {
86 case TMC_MEM_INTF_WIDTH_32BITS:
87 case TMC_MEM_INTF_WIDTH_64BITS:
88 case TMC_MEM_INTF_WIDTH_128BITS:
89 mask = GENMASK(31, 4);
90 break;
91 case TMC_MEM_INTF_WIDTH_256BITS:
92 mask = GENMASK(31, 5);
93 break;
94 }
95
96 return mask;
97}
98
99static int tmc_read_prepare(struct tmc_drvdata *drvdata)
100{
101 int ret = 0;
102
103 switch (drvdata->config_type) {
104 case TMC_CONFIG_TYPE_ETB:
105 case TMC_CONFIG_TYPE_ETF:
106 ret = tmc_read_prepare_etb(drvdata);
107 break;
108 case TMC_CONFIG_TYPE_ETR:
109 ret = tmc_read_prepare_etr(drvdata);
110 break;
111 default:
112 ret = -EINVAL;
113 }
114
115 if (!ret)
116 dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
117
118 return ret;
119}
120
121static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
122{
123 int ret = 0;
124
125 switch (drvdata->config_type) {
126 case TMC_CONFIG_TYPE_ETB:
127 case TMC_CONFIG_TYPE_ETF:
128 ret = tmc_read_unprepare_etb(drvdata);
129 break;
130 case TMC_CONFIG_TYPE_ETR:
131 ret = tmc_read_unprepare_etr(drvdata);
132 break;
133 default:
134 ret = -EINVAL;
135 }
136
137 if (!ret)
138 dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
139
140 return ret;
141}
142
143static int tmc_open(struct inode *inode, struct file *file)
144{
145 int ret;
146 struct tmc_drvdata *drvdata = container_of(file->private_data,
147 struct tmc_drvdata, miscdev);
148
149 ret = tmc_read_prepare(drvdata);
150 if (ret)
151 return ret;
152
153 nonseekable_open(inode, file);
154
155 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
156 return 0;
157}
158
159static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
160 loff_t pos, size_t len, char **bufpp)
161{
162 switch (drvdata->config_type) {
163 case TMC_CONFIG_TYPE_ETB:
164 case TMC_CONFIG_TYPE_ETF:
165 return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
166 case TMC_CONFIG_TYPE_ETR:
167 return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
168 }
169
170 return -EINVAL;
171}
172
173static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
174 loff_t *ppos)
175{
176 char *bufp;
177 ssize_t actual;
178 struct tmc_drvdata *drvdata = container_of(file->private_data,
179 struct tmc_drvdata, miscdev);
180 actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
181 if (actual <= 0)
182 return 0;
183
184 if (copy_to_user(data, bufp, actual)) {
185 dev_dbg(&drvdata->csdev->dev,
186 "%s: copy_to_user failed\n", __func__);
187 return -EFAULT;
188 }
189
190 *ppos += actual;
191 dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
192
193 return actual;
194}
195
196static int tmc_release(struct inode *inode, struct file *file)
197{
198 int ret;
199 struct tmc_drvdata *drvdata = container_of(file->private_data,
200 struct tmc_drvdata, miscdev);
201
202 ret = tmc_read_unprepare(drvdata);
203 if (ret)
204 return ret;
205
206 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
207 return 0;
208}
209
210static const struct file_operations tmc_fops = {
211 .owner = THIS_MODULE,
212 .open = tmc_open,
213 .read = tmc_read,
214 .release = tmc_release,
215 .llseek = no_llseek,
216};
217
218static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
219{
220 enum tmc_mem_intf_width memwidth;
221
222 /*
223 * Excerpt from the TRM:
224 *
225 * DEVID::MEMWIDTH[10:8]
226 * 0x2 Memory interface databus is 32 bits wide.
227 * 0x3 Memory interface databus is 64 bits wide.
228 * 0x4 Memory interface databus is 128 bits wide.
229 * 0x5 Memory interface databus is 256 bits wide.
230 */
231 switch (BMVAL(devid, 8, 10)) {
232 case 0x2:
233 memwidth = TMC_MEM_INTF_WIDTH_32BITS;
234 break;
235 case 0x3:
236 memwidth = TMC_MEM_INTF_WIDTH_64BITS;
237 break;
238 case 0x4:
239 memwidth = TMC_MEM_INTF_WIDTH_128BITS;
240 break;
241 case 0x5:
242 memwidth = TMC_MEM_INTF_WIDTH_256BITS;
243 break;
244 default:
245 memwidth = 0;
246 }
247
248 return memwidth;
249}
250
251#define coresight_tmc_reg(name, offset) \
252 coresight_simple_reg32(struct tmc_drvdata, name, offset)
253#define coresight_tmc_reg64(name, lo_off, hi_off) \
254 coresight_simple_reg64(struct tmc_drvdata, name, lo_off, hi_off)
255
256coresight_tmc_reg(rsz, TMC_RSZ);
257coresight_tmc_reg(sts, TMC_STS);
258coresight_tmc_reg(trg, TMC_TRG);
259coresight_tmc_reg(ctl, TMC_CTL);
260coresight_tmc_reg(ffsr, TMC_FFSR);
261coresight_tmc_reg(ffcr, TMC_FFCR);
262coresight_tmc_reg(mode, TMC_MODE);
263coresight_tmc_reg(pscr, TMC_PSCR);
264coresight_tmc_reg(axictl, TMC_AXICTL);
265coresight_tmc_reg(authstatus, TMC_AUTHSTATUS);
266coresight_tmc_reg(devid, CORESIGHT_DEVID);
267coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI);
268coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI);
269coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI);
270
271static struct attribute *coresight_tmc_mgmt_attrs[] = {
272 &dev_attr_rsz.attr,
273 &dev_attr_sts.attr,
274 &dev_attr_rrp.attr,
275 &dev_attr_rwp.attr,
276 &dev_attr_trg.attr,
277 &dev_attr_ctl.attr,
278 &dev_attr_ffsr.attr,
279 &dev_attr_ffcr.attr,
280 &dev_attr_mode.attr,
281 &dev_attr_pscr.attr,
282 &dev_attr_devid.attr,
283 &dev_attr_dba.attr,
284 &dev_attr_axictl.attr,
285 &dev_attr_authstatus.attr,
286 NULL,
287};
288
289static ssize_t trigger_cntr_show(struct device *dev,
290 struct device_attribute *attr, char *buf)
291{
292 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
293 unsigned long val = drvdata->trigger_cntr;
294
295 return sprintf(buf, "%#lx\n", val);
296}
297
298static ssize_t trigger_cntr_store(struct device *dev,
299 struct device_attribute *attr,
300 const char *buf, size_t size)
301{
302 int ret;
303 unsigned long val;
304 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
305
306 ret = kstrtoul(buf, 16, &val);
307 if (ret)
308 return ret;
309
310 drvdata->trigger_cntr = val;
311 return size;
312}
313static DEVICE_ATTR_RW(trigger_cntr);
314
315static ssize_t buffer_size_show(struct device *dev,
316 struct device_attribute *attr, char *buf)
317{
318 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
319
320 return sprintf(buf, "%#x\n", drvdata->size);
321}
322
323static ssize_t buffer_size_store(struct device *dev,
324 struct device_attribute *attr,
325 const char *buf, size_t size)
326{
327 int ret;
328 unsigned long val;
329 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
330
331 /* Only permitted for TMC-ETRs */
332 if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
333 return -EPERM;
334
335 ret = kstrtoul(buf, 0, &val);
336 if (ret)
337 return ret;
338 /* The buffer size should be page aligned */
339 if (val & (PAGE_SIZE - 1))
340 return -EINVAL;
341 drvdata->size = val;
342 return size;
343}
344
345static DEVICE_ATTR_RW(buffer_size);
346
347static struct attribute *coresight_tmc_attrs[] = {
348 &dev_attr_trigger_cntr.attr,
349 &dev_attr_buffer_size.attr,
350 NULL,
351};
352
353static const struct attribute_group coresight_tmc_group = {
354 .attrs = coresight_tmc_attrs,
355};
356
357static const struct attribute_group coresight_tmc_mgmt_group = {
358 .attrs = coresight_tmc_mgmt_attrs,
359 .name = "mgmt",
360};
361
362static const struct attribute_group *coresight_tmc_groups[] = {
363 &coresight_tmc_group,
364 &coresight_tmc_mgmt_group,
365 NULL,
366};
367
368static inline bool tmc_etr_can_use_sg(struct device *dev)
369{
370 return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
371}
372
373static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
374{
375 u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
376
377 return (auth & TMC_AUTH_NSID_MASK) == 0x3;
378}
379
380/* Detect and initialise the capabilities of a TMC ETR */
381static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
382{
383 int rc;
384 u32 dma_mask = 0;
385 struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
386
387 if (!tmc_etr_has_non_secure_access(drvdata))
388 return -EACCES;
389
390 /* Set the unadvertised capabilities */
391 tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
392
393 if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
394 tmc_etr_set_cap(drvdata, TMC_ETR_SG);
395
396 /* Check if the AXI address width is available */
397 if (devid & TMC_DEVID_AXIAW_VALID)
398 dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
399 TMC_DEVID_AXIAW_MASK);
400
401 /*
402 * Unless specified in the device configuration, ETR uses a 40-bit
403 * AXI master in place of the embedded SRAM of ETB/ETF.
404 */
405 switch (dma_mask) {
406 case 32:
407 case 40:
408 case 44:
409 case 48:
410 case 52:
411 dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
412 break;
413 default:
414 dma_mask = 40;
415 }
416
417 rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
418 if (rc)
419 dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
420 return rc;
421}
422
423static u32 tmc_etr_get_default_buffer_size(struct device *dev)
424{
425 u32 size;
426
427 if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
428 size = SZ_1M;
429 return size;
430}
431
432static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
433{
434 int ret = 0;
435 u32 devid;
436 void __iomem *base;
437 struct device *dev = &adev->dev;
438 struct coresight_platform_data *pdata = NULL;
439 struct tmc_drvdata *drvdata;
440 struct resource *res = &adev->res;
441 struct coresight_desc desc = { 0 };
442 struct coresight_dev_list *dev_list = NULL;
443
444 ret = -ENOMEM;
445 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
446 if (!drvdata)
447 goto out;
448
449 dev_set_drvdata(dev, drvdata);
450
451 /* Validity for the resource is already checked by the AMBA core */
452 base = devm_ioremap_resource(dev, res);
453 if (IS_ERR(base)) {
454 ret = PTR_ERR(base);
455 goto out;
456 }
457
458 drvdata->base = base;
459
460 spin_lock_init(&drvdata->spinlock);
461
462 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
463 drvdata->config_type = BMVAL(devid, 6, 7);
464 drvdata->memwidth = tmc_get_memwidth(devid);
465 /* This device is not associated with a session */
466 drvdata->pid = -1;
467
468 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
469 drvdata->size = tmc_etr_get_default_buffer_size(dev);
470 else
471 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
472
473 desc.dev = dev;
474 desc.groups = coresight_tmc_groups;
475
476 switch (drvdata->config_type) {
477 case TMC_CONFIG_TYPE_ETB:
478 desc.type = CORESIGHT_DEV_TYPE_SINK;
479 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
480 desc.ops = &tmc_etb_cs_ops;
481 dev_list = &etb_devs;
482 break;
483 case TMC_CONFIG_TYPE_ETR:
484 desc.type = CORESIGHT_DEV_TYPE_SINK;
485 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
486 desc.ops = &tmc_etr_cs_ops;
487 ret = tmc_etr_setup_caps(dev, devid,
488 coresight_get_uci_data(id));
489 if (ret)
490 goto out;
491 idr_init(&drvdata->idr);
492 mutex_init(&drvdata->idr_mutex);
493 dev_list = &etr_devs;
494 break;
495 case TMC_CONFIG_TYPE_ETF:
496 desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
497 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
498 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
499 desc.ops = &tmc_etf_cs_ops;
500 dev_list = &etf_devs;
501 break;
502 default:
503 pr_err("%s: Unsupported TMC config\n", desc.name);
504 ret = -EINVAL;
505 goto out;
506 }
507
508 desc.name = coresight_alloc_device_name(dev_list, dev);
509 if (!desc.name) {
510 ret = -ENOMEM;
511 goto out;
512 }
513
514 pdata = coresight_get_platform_data(dev);
515 if (IS_ERR(pdata)) {
516 ret = PTR_ERR(pdata);
517 goto out;
518 }
519 adev->dev.platform_data = pdata;
520 desc.pdata = pdata;
521
522 drvdata->csdev = coresight_register(&desc);
523 if (IS_ERR(drvdata->csdev)) {
524 ret = PTR_ERR(drvdata->csdev);
525 goto out;
526 }
527
528 drvdata->miscdev.name = desc.name;
529 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
530 drvdata->miscdev.fops = &tmc_fops;
531 ret = misc_register(&drvdata->miscdev);
532 if (ret)
533 coresight_unregister(drvdata->csdev);
534 else
535 pm_runtime_put(&adev->dev);
536out:
537 return ret;
538}
539
540static void tmc_shutdown(struct amba_device *adev)
541{
542 unsigned long flags;
543 struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
544
545 spin_lock_irqsave(&drvdata->spinlock, flags);
546
547 if (drvdata->mode == CS_MODE_DISABLED)
548 goto out;
549
550 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
551 tmc_etr_disable_hw(drvdata);
552
553 /*
554 * We do not care about coresight unregister here unlike remove
555 * callback which is required for making coresight modular since
556 * the system is going down after this.
557 */
558out:
559 spin_unlock_irqrestore(&drvdata->spinlock, flags);
560}
561
562static const struct amba_id tmc_ids[] = {
563 CS_AMBA_ID(0x000bb961),
564 /* Coresight SoC 600 TMC-ETR/ETS */
565 CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
566 /* Coresight SoC 600 TMC-ETB */
567 CS_AMBA_ID(0x000bb9e9),
568 /* Coresight SoC 600 TMC-ETF */
569 CS_AMBA_ID(0x000bb9ea),
570 { 0, 0},
571};
572
573static struct amba_driver tmc_driver = {
574 .drv = {
575 .name = "coresight-tmc",
576 .owner = THIS_MODULE,
577 .suppress_bind_attrs = true,
578 },
579 .probe = tmc_probe,
580 .shutdown = tmc_shutdown,
581 .id_table = tmc_ids,
582};
583builtin_amba_driver(tmc_driver);
1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * Description: CoreSight Trace Memory Controller driver
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/device.h>
19#include <linux/io.h>
20#include <linux/err.h>
21#include <linux/fs.h>
22#include <linux/miscdevice.h>
23#include <linux/uaccess.h>
24#include <linux/slab.h>
25#include <linux/dma-mapping.h>
26#include <linux/spinlock.h>
27#include <linux/pm_runtime.h>
28#include <linux/of.h>
29#include <linux/coresight.h>
30#include <linux/amba/bus.h>
31
32#include "coresight-priv.h"
33#include "coresight-tmc.h"
34
35void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
36{
37 /* Ensure formatter, unformatter and hardware fifo are empty */
38 if (coresight_timeout(drvdata->base,
39 TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
40 dev_err(drvdata->dev,
41 "timeout while waiting for TMC to be Ready\n");
42 }
43}
44
45void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
46{
47 u32 ffcr;
48
49 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
50 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
51 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
52 ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
53 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
54 /* Ensure flush completes */
55 if (coresight_timeout(drvdata->base,
56 TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
57 dev_err(drvdata->dev,
58 "timeout while waiting for completion of Manual Flush\n");
59 }
60
61 tmc_wait_for_tmcready(drvdata);
62}
63
64void tmc_enable_hw(struct tmc_drvdata *drvdata)
65{
66 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
67}
68
69void tmc_disable_hw(struct tmc_drvdata *drvdata)
70{
71 writel_relaxed(0x0, drvdata->base + TMC_CTL);
72}
73
74static int tmc_read_prepare(struct tmc_drvdata *drvdata)
75{
76 int ret = 0;
77
78 switch (drvdata->config_type) {
79 case TMC_CONFIG_TYPE_ETB:
80 case TMC_CONFIG_TYPE_ETF:
81 ret = tmc_read_prepare_etb(drvdata);
82 break;
83 case TMC_CONFIG_TYPE_ETR:
84 ret = tmc_read_prepare_etr(drvdata);
85 break;
86 default:
87 ret = -EINVAL;
88 }
89
90 if (!ret)
91 dev_info(drvdata->dev, "TMC read start\n");
92
93 return ret;
94}
95
96static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
97{
98 int ret = 0;
99
100 switch (drvdata->config_type) {
101 case TMC_CONFIG_TYPE_ETB:
102 case TMC_CONFIG_TYPE_ETF:
103 ret = tmc_read_unprepare_etb(drvdata);
104 break;
105 case TMC_CONFIG_TYPE_ETR:
106 ret = tmc_read_unprepare_etr(drvdata);
107 break;
108 default:
109 ret = -EINVAL;
110 }
111
112 if (!ret)
113 dev_info(drvdata->dev, "TMC read end\n");
114
115 return ret;
116}
117
118static int tmc_open(struct inode *inode, struct file *file)
119{
120 int ret;
121 struct tmc_drvdata *drvdata = container_of(file->private_data,
122 struct tmc_drvdata, miscdev);
123
124 ret = tmc_read_prepare(drvdata);
125 if (ret)
126 return ret;
127
128 nonseekable_open(inode, file);
129
130 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
131 return 0;
132}
133
134static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
135 loff_t *ppos)
136{
137 struct tmc_drvdata *drvdata = container_of(file->private_data,
138 struct tmc_drvdata, miscdev);
139 char *bufp = drvdata->buf + *ppos;
140
141 if (*ppos + len > drvdata->len)
142 len = drvdata->len - *ppos;
143
144 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
145 if (bufp == (char *)(drvdata->vaddr + drvdata->size))
146 bufp = drvdata->vaddr;
147 else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
148 bufp -= drvdata->size;
149 if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
150 len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
151 }
152
153 if (copy_to_user(data, bufp, len)) {
154 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
155 return -EFAULT;
156 }
157
158 *ppos += len;
159
160 dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
161 __func__, len, (int)(drvdata->len - *ppos));
162 return len;
163}
164
165static int tmc_release(struct inode *inode, struct file *file)
166{
167 int ret;
168 struct tmc_drvdata *drvdata = container_of(file->private_data,
169 struct tmc_drvdata, miscdev);
170
171 ret = tmc_read_unprepare(drvdata);
172 if (ret)
173 return ret;
174
175 dev_dbg(drvdata->dev, "%s: released\n", __func__);
176 return 0;
177}
178
179static const struct file_operations tmc_fops = {
180 .owner = THIS_MODULE,
181 .open = tmc_open,
182 .read = tmc_read,
183 .release = tmc_release,
184 .llseek = no_llseek,
185};
186
187static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
188{
189 enum tmc_mem_intf_width memwidth;
190
191 /*
192 * Excerpt from the TRM:
193 *
194 * DEVID::MEMWIDTH[10:8]
195 * 0x2 Memory interface databus is 32 bits wide.
196 * 0x3 Memory interface databus is 64 bits wide.
197 * 0x4 Memory interface databus is 128 bits wide.
198 * 0x5 Memory interface databus is 256 bits wide.
199 */
200 switch (BMVAL(devid, 8, 10)) {
201 case 0x2:
202 memwidth = TMC_MEM_INTF_WIDTH_32BITS;
203 break;
204 case 0x3:
205 memwidth = TMC_MEM_INTF_WIDTH_64BITS;
206 break;
207 case 0x4:
208 memwidth = TMC_MEM_INTF_WIDTH_128BITS;
209 break;
210 case 0x5:
211 memwidth = TMC_MEM_INTF_WIDTH_256BITS;
212 break;
213 default:
214 memwidth = 0;
215 }
216
217 return memwidth;
218}
219
220#define coresight_tmc_simple_func(name, offset) \
221 coresight_simple_func(struct tmc_drvdata, NULL, name, offset)
222
223coresight_tmc_simple_func(rsz, TMC_RSZ);
224coresight_tmc_simple_func(sts, TMC_STS);
225coresight_tmc_simple_func(rrp, TMC_RRP);
226coresight_tmc_simple_func(rwp, TMC_RWP);
227coresight_tmc_simple_func(trg, TMC_TRG);
228coresight_tmc_simple_func(ctl, TMC_CTL);
229coresight_tmc_simple_func(ffsr, TMC_FFSR);
230coresight_tmc_simple_func(ffcr, TMC_FFCR);
231coresight_tmc_simple_func(mode, TMC_MODE);
232coresight_tmc_simple_func(pscr, TMC_PSCR);
233coresight_tmc_simple_func(devid, CORESIGHT_DEVID);
234
235static struct attribute *coresight_tmc_mgmt_attrs[] = {
236 &dev_attr_rsz.attr,
237 &dev_attr_sts.attr,
238 &dev_attr_rrp.attr,
239 &dev_attr_rwp.attr,
240 &dev_attr_trg.attr,
241 &dev_attr_ctl.attr,
242 &dev_attr_ffsr.attr,
243 &dev_attr_ffcr.attr,
244 &dev_attr_mode.attr,
245 &dev_attr_pscr.attr,
246 &dev_attr_devid.attr,
247 NULL,
248};
249
250static ssize_t trigger_cntr_show(struct device *dev,
251 struct device_attribute *attr, char *buf)
252{
253 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
254 unsigned long val = drvdata->trigger_cntr;
255
256 return sprintf(buf, "%#lx\n", val);
257}
258
259static ssize_t trigger_cntr_store(struct device *dev,
260 struct device_attribute *attr,
261 const char *buf, size_t size)
262{
263 int ret;
264 unsigned long val;
265 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
266
267 ret = kstrtoul(buf, 16, &val);
268 if (ret)
269 return ret;
270
271 drvdata->trigger_cntr = val;
272 return size;
273}
274static DEVICE_ATTR_RW(trigger_cntr);
275
276static struct attribute *coresight_tmc_attrs[] = {
277 &dev_attr_trigger_cntr.attr,
278 NULL,
279};
280
281static const struct attribute_group coresight_tmc_group = {
282 .attrs = coresight_tmc_attrs,
283};
284
285static const struct attribute_group coresight_tmc_mgmt_group = {
286 .attrs = coresight_tmc_mgmt_attrs,
287 .name = "mgmt",
288};
289
290const struct attribute_group *coresight_tmc_groups[] = {
291 &coresight_tmc_group,
292 &coresight_tmc_mgmt_group,
293 NULL,
294};
295
296static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
297{
298 int ret = 0;
299 u32 devid;
300 void __iomem *base;
301 struct device *dev = &adev->dev;
302 struct coresight_platform_data *pdata = NULL;
303 struct tmc_drvdata *drvdata;
304 struct resource *res = &adev->res;
305 struct coresight_desc desc = { 0 };
306 struct device_node *np = adev->dev.of_node;
307
308 if (np) {
309 pdata = of_get_coresight_platform_data(dev, np);
310 if (IS_ERR(pdata)) {
311 ret = PTR_ERR(pdata);
312 goto out;
313 }
314 adev->dev.platform_data = pdata;
315 }
316
317 ret = -ENOMEM;
318 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
319 if (!drvdata)
320 goto out;
321
322 drvdata->dev = &adev->dev;
323 dev_set_drvdata(dev, drvdata);
324
325 /* Validity for the resource is already checked by the AMBA core */
326 base = devm_ioremap_resource(dev, res);
327 if (IS_ERR(base)) {
328 ret = PTR_ERR(base);
329 goto out;
330 }
331
332 drvdata->base = base;
333
334 spin_lock_init(&drvdata->spinlock);
335
336 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
337 drvdata->config_type = BMVAL(devid, 6, 7);
338 drvdata->memwidth = tmc_get_memwidth(devid);
339
340 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
341 if (np)
342 ret = of_property_read_u32(np,
343 "arm,buffer-size",
344 &drvdata->size);
345 if (ret)
346 drvdata->size = SZ_1M;
347 } else {
348 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
349 }
350
351 pm_runtime_put(&adev->dev);
352
353 desc.pdata = pdata;
354 desc.dev = dev;
355 desc.groups = coresight_tmc_groups;
356
357 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
358 desc.type = CORESIGHT_DEV_TYPE_SINK;
359 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
360 desc.ops = &tmc_etb_cs_ops;
361 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
362 desc.type = CORESIGHT_DEV_TYPE_SINK;
363 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
364 desc.ops = &tmc_etr_cs_ops;
365 } else {
366 desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
367 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
368 desc.ops = &tmc_etf_cs_ops;
369 }
370
371 drvdata->csdev = coresight_register(&desc);
372 if (IS_ERR(drvdata->csdev)) {
373 ret = PTR_ERR(drvdata->csdev);
374 goto out;
375 }
376
377 drvdata->miscdev.name = pdata->name;
378 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
379 drvdata->miscdev.fops = &tmc_fops;
380 ret = misc_register(&drvdata->miscdev);
381 if (ret)
382 coresight_unregister(drvdata->csdev);
383out:
384 return ret;
385}
386
387static struct amba_id tmc_ids[] = {
388 {
389 .id = 0x0003b961,
390 .mask = 0x0003ffff,
391 },
392 { 0, 0},
393};
394
395static struct amba_driver tmc_driver = {
396 .drv = {
397 .name = "coresight-tmc",
398 .owner = THIS_MODULE,
399 .suppress_bind_attrs = true,
400 },
401 .probe = tmc_probe,
402 .id_table = tmc_ids,
403};
404builtin_amba_driver(tmc_driver);