Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  3 *
  4 * Description: CoreSight Trace Memory Controller driver
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/types.h>
 10#include <linux/device.h>
 11#include <linux/idr.h>
 12#include <linux/io.h>
 
 13#include <linux/err.h>
 14#include <linux/fs.h>
 15#include <linux/miscdevice.h>
 16#include <linux/mutex.h>
 17#include <linux/property.h>
 18#include <linux/uaccess.h>
 19#include <linux/slab.h>
 20#include <linux/dma-mapping.h>
 21#include <linux/spinlock.h>
 22#include <linux/pm_runtime.h>
 23#include <linux/of.h>
 24#include <linux/coresight.h>
 25#include <linux/amba/bus.h>
 26
 27#include "coresight-priv.h"
 28#include "coresight-tmc.h"
 29
 30DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
 31DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
 32DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
 33
 34void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
 35{
 36	struct coresight_device *csdev = drvdata->csdev;
 37	struct csdev_access *csa = &csdev->access;
 38
 39	/* Ensure formatter, unformatter and hardware fifo are empty */
 40	if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
 41		dev_err(&csdev->dev,
 42			"timeout while waiting for TMC to be Ready\n");
 
 43	}
 
 44}
 45
 46void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
 47{
 48	struct coresight_device *csdev = drvdata->csdev;
 49	struct csdev_access *csa = &csdev->access;
 50	u32 ffcr;
 51
 52	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
 53	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
 54	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 55	ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
 56	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 57	/* Ensure flush completes */
 58	if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
 59		dev_err(&csdev->dev,
 60		"timeout while waiting for completion of Manual Flush\n");
 61	}
 62
 63	tmc_wait_for_tmcready(drvdata);
 64}
 65
 66void tmc_enable_hw(struct tmc_drvdata *drvdata)
 67{
 68	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 69}
 70
 71void tmc_disable_hw(struct tmc_drvdata *drvdata)
 72{
 73	writel_relaxed(0x0, drvdata->base + TMC_CTL);
 74}
 75
 76u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
 77{
 78	u32 mask = 0;
 79
 80	/*
 81	 * When moving RRP or an offset address forward, the new values must
 82	 * be byte-address aligned to the width of the trace memory databus
 83	 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
 84	 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
 85	 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
 86	 * be 0s.
 87	 */
 88	switch (drvdata->memwidth) {
 89	case TMC_MEM_INTF_WIDTH_32BITS:
 90	case TMC_MEM_INTF_WIDTH_64BITS:
 91	case TMC_MEM_INTF_WIDTH_128BITS:
 92		mask = GENMASK(31, 4);
 93		break;
 94	case TMC_MEM_INTF_WIDTH_256BITS:
 95		mask = GENMASK(31, 5);
 96		break;
 97	}
 98
 99	return mask;
100}
101
102static int tmc_read_prepare(struct tmc_drvdata *drvdata)
103{
104	int ret = 0;
105
106	switch (drvdata->config_type) {
107	case TMC_CONFIG_TYPE_ETB:
108	case TMC_CONFIG_TYPE_ETF:
109		ret = tmc_read_prepare_etb(drvdata);
110		break;
111	case TMC_CONFIG_TYPE_ETR:
112		ret = tmc_read_prepare_etr(drvdata);
113		break;
114	default:
115		ret = -EINVAL;
116	}
117
118	if (!ret)
119		dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
120
121	return ret;
122}
123
124static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
125{
126	int ret = 0;
127
128	switch (drvdata->config_type) {
129	case TMC_CONFIG_TYPE_ETB:
130	case TMC_CONFIG_TYPE_ETF:
131		ret = tmc_read_unprepare_etb(drvdata);
132		break;
133	case TMC_CONFIG_TYPE_ETR:
134		ret = tmc_read_unprepare_etr(drvdata);
135		break;
136	default:
137		ret = -EINVAL;
138	}
139
140	if (!ret)
141		dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
142
143	return ret;
144}
145
146static int tmc_open(struct inode *inode, struct file *file)
147{
148	int ret;
149	struct tmc_drvdata *drvdata = container_of(file->private_data,
150						   struct tmc_drvdata, miscdev);
151
152	ret = tmc_read_prepare(drvdata);
153	if (ret)
154		return ret;
155
156	nonseekable_open(inode, file);
157
158	dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
159	return 0;
160}
161
162static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
163					  loff_t pos, size_t len, char **bufpp)
164{
165	switch (drvdata->config_type) {
166	case TMC_CONFIG_TYPE_ETB:
167	case TMC_CONFIG_TYPE_ETF:
168		return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
169	case TMC_CONFIG_TYPE_ETR:
170		return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
171	}
172
173	return -EINVAL;
174}
175
176static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
177			loff_t *ppos)
178{
179	char *bufp;
180	ssize_t actual;
181	struct tmc_drvdata *drvdata = container_of(file->private_data,
182						   struct tmc_drvdata, miscdev);
183	actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
184	if (actual <= 0)
185		return 0;
186
187	if (copy_to_user(data, bufp, actual)) {
188		dev_dbg(&drvdata->csdev->dev,
189			"%s: copy_to_user failed\n", __func__);
190		return -EFAULT;
191	}
192
193	*ppos += actual;
194	dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
195
196	return actual;
197}
198
199static int tmc_release(struct inode *inode, struct file *file)
200{
201	int ret;
202	struct tmc_drvdata *drvdata = container_of(file->private_data,
203						   struct tmc_drvdata, miscdev);
204
205	ret = tmc_read_unprepare(drvdata);
206	if (ret)
207		return ret;
208
209	dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
210	return 0;
211}
212
213static const struct file_operations tmc_fops = {
214	.owner		= THIS_MODULE,
215	.open		= tmc_open,
216	.read		= tmc_read,
217	.release	= tmc_release,
218	.llseek		= no_llseek,
219};
220
221static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
222{
223	enum tmc_mem_intf_width memwidth;
224
225	/*
226	 * Excerpt from the TRM:
227	 *
228	 * DEVID::MEMWIDTH[10:8]
229	 * 0x2 Memory interface databus is 32 bits wide.
230	 * 0x3 Memory interface databus is 64 bits wide.
231	 * 0x4 Memory interface databus is 128 bits wide.
232	 * 0x5 Memory interface databus is 256 bits wide.
233	 */
234	switch (BMVAL(devid, 8, 10)) {
235	case 0x2:
236		memwidth = TMC_MEM_INTF_WIDTH_32BITS;
237		break;
238	case 0x3:
239		memwidth = TMC_MEM_INTF_WIDTH_64BITS;
240		break;
241	case 0x4:
242		memwidth = TMC_MEM_INTF_WIDTH_128BITS;
243		break;
244	case 0x5:
245		memwidth = TMC_MEM_INTF_WIDTH_256BITS;
246		break;
247	default:
248		memwidth = 0;
249	}
250
251	return memwidth;
252}
253
254static struct attribute *coresight_tmc_mgmt_attrs[] = {
255	coresight_simple_reg32(rsz, TMC_RSZ),
256	coresight_simple_reg32(sts, TMC_STS),
257	coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
258	coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
259	coresight_simple_reg32(trg, TMC_TRG),
260	coresight_simple_reg32(ctl, TMC_CTL),
261	coresight_simple_reg32(ffsr, TMC_FFSR),
262	coresight_simple_reg32(ffcr, TMC_FFCR),
263	coresight_simple_reg32(mode, TMC_MODE),
264	coresight_simple_reg32(pscr, TMC_PSCR),
265	coresight_simple_reg32(devid, CORESIGHT_DEVID),
266	coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
267	coresight_simple_reg32(axictl, TMC_AXICTL),
268	coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
269	NULL,
270};
271
272static ssize_t trigger_cntr_show(struct device *dev,
273				 struct device_attribute *attr, char *buf)
274{
275	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
276	unsigned long val = drvdata->trigger_cntr;
277
278	return sprintf(buf, "%#lx\n", val);
279}
280
281static ssize_t trigger_cntr_store(struct device *dev,
282			     struct device_attribute *attr,
283			     const char *buf, size_t size)
284{
285	int ret;
286	unsigned long val;
287	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
288
289	ret = kstrtoul(buf, 16, &val);
290	if (ret)
291		return ret;
292
293	drvdata->trigger_cntr = val;
294	return size;
295}
296static DEVICE_ATTR_RW(trigger_cntr);
297
298static ssize_t buffer_size_show(struct device *dev,
299				struct device_attribute *attr, char *buf)
300{
301	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
302
303	return sprintf(buf, "%#x\n", drvdata->size);
304}
305
306static ssize_t buffer_size_store(struct device *dev,
307				 struct device_attribute *attr,
308				 const char *buf, size_t size)
309{
310	int ret;
311	unsigned long val;
312	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
313
314	/* Only permitted for TMC-ETRs */
315	if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
316		return -EPERM;
317
318	ret = kstrtoul(buf, 0, &val);
319	if (ret)
320		return ret;
321	/* The buffer size should be page aligned */
322	if (val & (PAGE_SIZE - 1))
323		return -EINVAL;
324	drvdata->size = val;
325	return size;
326}
327
328static DEVICE_ATTR_RW(buffer_size);
329
330static struct attribute *coresight_tmc_attrs[] = {
331	&dev_attr_trigger_cntr.attr,
332	&dev_attr_buffer_size.attr,
333	NULL,
334};
335
336static const struct attribute_group coresight_tmc_group = {
337	.attrs = coresight_tmc_attrs,
338};
339
340static const struct attribute_group coresight_tmc_mgmt_group = {
341	.attrs = coresight_tmc_mgmt_attrs,
342	.name = "mgmt",
343};
344
345static const struct attribute_group *coresight_tmc_groups[] = {
 
 
 
 
 
 
 
346	&coresight_tmc_group,
347	&coresight_tmc_mgmt_group,
348	NULL,
349};
350
351static inline bool tmc_etr_can_use_sg(struct device *dev)
352{
353	return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
354}
355
356static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
357{
358	u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
359
360	return (auth & TMC_AUTH_NSID_MASK) == 0x3;
361}
362
363/* Detect and initialise the capabilities of a TMC ETR */
364static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
365{
366	int rc;
367	u32 dma_mask = 0;
368	struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
369
370	if (!tmc_etr_has_non_secure_access(drvdata))
371		return -EACCES;
372
373	/* Set the unadvertised capabilities */
374	tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
375
376	if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
377		tmc_etr_set_cap(drvdata, TMC_ETR_SG);
378
379	/* Check if the AXI address width is available */
380	if (devid & TMC_DEVID_AXIAW_VALID)
381		dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
382				TMC_DEVID_AXIAW_MASK);
383
384	/*
385	 * Unless specified in the device configuration, ETR uses a 40-bit
386	 * AXI master in place of the embedded SRAM of ETB/ETF.
387	 */
388	switch (dma_mask) {
389	case 32:
390	case 40:
391	case 44:
392	case 48:
393	case 52:
394		dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
395		break;
396	default:
397		dma_mask = 40;
398	}
399
400	rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
401	if (rc)
402		dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
403	return rc;
404}
405
406static u32 tmc_etr_get_default_buffer_size(struct device *dev)
407{
408	u32 size;
409
410	if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
411		size = SZ_1M;
412	return size;
413}
414
415static u32 tmc_etr_get_max_burst_size(struct device *dev)
416{
417	u32 burst_size;
418
419	if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
420				     &burst_size))
421		return TMC_AXICTL_WR_BURST_16;
422
423	/* Only permissible values are 0 to 15 */
424	if (burst_size > 0xF)
425		burst_size = TMC_AXICTL_WR_BURST_16;
426
427	return burst_size;
428}
429
430static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
431{
432	int ret = 0;
433	u32 devid;
434	void __iomem *base;
435	struct device *dev = &adev->dev;
436	struct coresight_platform_data *pdata = NULL;
437	struct tmc_drvdata *drvdata;
438	struct resource *res = &adev->res;
439	struct coresight_desc desc = { 0 };
440	struct coresight_dev_list *dev_list = NULL;
441
442	ret = -ENOMEM;
443	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
444	if (!drvdata)
445		goto out;
446
447	dev_set_drvdata(dev, drvdata);
448
449	/* Validity for the resource is already checked by the AMBA core */
450	base = devm_ioremap_resource(dev, res);
451	if (IS_ERR(base)) {
452		ret = PTR_ERR(base);
453		goto out;
454	}
455
456	drvdata->base = base;
457	desc.access = CSDEV_ACCESS_IOMEM(base);
458
459	spin_lock_init(&drvdata->spinlock);
460
461	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
462	drvdata->config_type = BMVAL(devid, 6, 7);
463	drvdata->memwidth = tmc_get_memwidth(devid);
464	/* This device is not associated with a session */
465	drvdata->pid = -1;
 
466
467	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
468		drvdata->size = tmc_etr_get_default_buffer_size(dev);
469		drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
470	} else {
471		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
472	}
473
474	desc.dev = dev;
475	desc.groups = coresight_tmc_groups;
476
477	switch (drvdata->config_type) {
478	case TMC_CONFIG_TYPE_ETB:
 
479		desc.type = CORESIGHT_DEV_TYPE_SINK;
480		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
481		desc.ops = &tmc_etb_cs_ops;
482		dev_list = &etb_devs;
483		break;
484	case TMC_CONFIG_TYPE_ETR:
 
485		desc.type = CORESIGHT_DEV_TYPE_SINK;
486		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
487		desc.ops = &tmc_etr_cs_ops;
488		ret = tmc_etr_setup_caps(dev, devid,
489					 coresight_get_uci_data(id));
490		if (ret)
491			goto out;
492		idr_init(&drvdata->idr);
493		mutex_init(&drvdata->idr_mutex);
494		dev_list = &etr_devs;
495		break;
496	case TMC_CONFIG_TYPE_ETF:
 
497		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
498		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
499		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
500		desc.ops = &tmc_etf_cs_ops;
501		dev_list = &etf_devs;
502		break;
503	default:
504		pr_err("%s: Unsupported TMC config\n", desc.name);
505		ret = -EINVAL;
506		goto out;
507	}
508
509	desc.name = coresight_alloc_device_name(dev_list, dev);
510	if (!desc.name) {
511		ret = -ENOMEM;
512		goto out;
513	}
514
515	pdata = coresight_get_platform_data(dev);
516	if (IS_ERR(pdata)) {
517		ret = PTR_ERR(pdata);
518		goto out;
519	}
520	adev->dev.platform_data = pdata;
521	desc.pdata = pdata;
522
523	drvdata->csdev = coresight_register(&desc);
524	if (IS_ERR(drvdata->csdev)) {
525		ret = PTR_ERR(drvdata->csdev);
526		goto out;
527	}
528
529	drvdata->miscdev.name = desc.name;
530	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
531	drvdata->miscdev.fops = &tmc_fops;
532	ret = misc_register(&drvdata->miscdev);
533	if (ret)
534		coresight_unregister(drvdata->csdev);
535	else
536		pm_runtime_put(&adev->dev);
537out:
538	return ret;
539}
540
541static void tmc_shutdown(struct amba_device *adev)
542{
543	unsigned long flags;
544	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
545
546	spin_lock_irqsave(&drvdata->spinlock, flags);
547
548	if (drvdata->mode == CS_MODE_DISABLED)
549		goto out;
550
551	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
552		tmc_etr_disable_hw(drvdata);
553
554	/*
555	 * We do not care about coresight unregister here unlike remove
556	 * callback which is required for making coresight modular since
557	 * the system is going down after this.
558	 */
559out:
560	spin_unlock_irqrestore(&drvdata->spinlock, flags);
561}
562
563static void tmc_remove(struct amba_device *adev)
564{
565	struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
566
567	/*
568	 * Since misc_open() holds a refcount on the f_ops, which is
569	 * etb fops in this case, device is there until last file
570	 * handler to this device is closed.
571	 */
572	misc_deregister(&drvdata->miscdev);
573	coresight_unregister(drvdata->csdev);
574}
575
576static const struct amba_id tmc_ids[] = {
577	CS_AMBA_ID(0x000bb961),
578	/* Coresight SoC 600 TMC-ETR/ETS */
579	CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
580	/* Coresight SoC 600 TMC-ETB */
581	CS_AMBA_ID(0x000bb9e9),
582	/* Coresight SoC 600 TMC-ETF */
583	CS_AMBA_ID(0x000bb9ea),
584	{ 0, 0},
585};
586
587MODULE_DEVICE_TABLE(amba, tmc_ids);
588
589static struct amba_driver tmc_driver = {
590	.drv = {
591		.name   = "coresight-tmc",
592		.owner  = THIS_MODULE,
593		.suppress_bind_attrs = true,
594	},
595	.probe		= tmc_probe,
596	.shutdown	= tmc_shutdown,
597	.remove		= tmc_remove,
598	.id_table	= tmc_ids,
599};
600
601module_amba_driver(tmc_driver);
602
603MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
604MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
605MODULE_LICENSE("GPL v2");
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  3 *
  4 * Description: CoreSight Trace Memory Controller driver
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/types.h>
 10#include <linux/device.h>
 11#include <linux/idr.h>
 12#include <linux/io.h>
 13#include <linux/iommu.h>
 14#include <linux/err.h>
 15#include <linux/fs.h>
 16#include <linux/miscdevice.h>
 17#include <linux/mutex.h>
 18#include <linux/property.h>
 19#include <linux/uaccess.h>
 20#include <linux/slab.h>
 21#include <linux/dma-mapping.h>
 22#include <linux/spinlock.h>
 23#include <linux/pm_runtime.h>
 24#include <linux/of.h>
 25#include <linux/coresight.h>
 26#include <linux/amba/bus.h>
 27
 28#include "coresight-priv.h"
 29#include "coresight-tmc.h"
 30
 31DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
 32DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
 33DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
 34
 35int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
 36{
 37	struct coresight_device *csdev = drvdata->csdev;
 38	struct csdev_access *csa = &csdev->access;
 39
 40	/* Ensure formatter, unformatter and hardware fifo are empty */
 41	if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
 42		dev_err(&csdev->dev,
 43			"timeout while waiting for TMC to be Ready\n");
 44		return -EBUSY;
 45	}
 46	return 0;
 47}
 48
 49void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
 50{
 51	struct coresight_device *csdev = drvdata->csdev;
 52	struct csdev_access *csa = &csdev->access;
 53	u32 ffcr;
 54
 55	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
 56	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
 57	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 58	ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
 59	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
 60	/* Ensure flush completes */
 61	if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
 62		dev_err(&csdev->dev,
 63		"timeout while waiting for completion of Manual Flush\n");
 64	}
 65
 66	tmc_wait_for_tmcready(drvdata);
 67}
 68
 69void tmc_enable_hw(struct tmc_drvdata *drvdata)
 70{
 71	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
 72}
 73
 74void tmc_disable_hw(struct tmc_drvdata *drvdata)
 75{
 76	writel_relaxed(0x0, drvdata->base + TMC_CTL);
 77}
 78
 79u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
 80{
 81	u32 mask = 0;
 82
 83	/*
 84	 * When moving RRP or an offset address forward, the new values must
 85	 * be byte-address aligned to the width of the trace memory databus
 86	 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
 87	 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
 88	 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
 89	 * be 0s.
 90	 */
 91	switch (drvdata->memwidth) {
 92	case TMC_MEM_INTF_WIDTH_32BITS:
 93	case TMC_MEM_INTF_WIDTH_64BITS:
 94	case TMC_MEM_INTF_WIDTH_128BITS:
 95		mask = GENMASK(31, 4);
 96		break;
 97	case TMC_MEM_INTF_WIDTH_256BITS:
 98		mask = GENMASK(31, 5);
 99		break;
100	}
101
102	return mask;
103}
104
105static int tmc_read_prepare(struct tmc_drvdata *drvdata)
106{
107	int ret = 0;
108
109	switch (drvdata->config_type) {
110	case TMC_CONFIG_TYPE_ETB:
111	case TMC_CONFIG_TYPE_ETF:
112		ret = tmc_read_prepare_etb(drvdata);
113		break;
114	case TMC_CONFIG_TYPE_ETR:
115		ret = tmc_read_prepare_etr(drvdata);
116		break;
117	default:
118		ret = -EINVAL;
119	}
120
121	if (!ret)
122		dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
123
124	return ret;
125}
126
127static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
128{
129	int ret = 0;
130
131	switch (drvdata->config_type) {
132	case TMC_CONFIG_TYPE_ETB:
133	case TMC_CONFIG_TYPE_ETF:
134		ret = tmc_read_unprepare_etb(drvdata);
135		break;
136	case TMC_CONFIG_TYPE_ETR:
137		ret = tmc_read_unprepare_etr(drvdata);
138		break;
139	default:
140		ret = -EINVAL;
141	}
142
143	if (!ret)
144		dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
145
146	return ret;
147}
148
149static int tmc_open(struct inode *inode, struct file *file)
150{
151	int ret;
152	struct tmc_drvdata *drvdata = container_of(file->private_data,
153						   struct tmc_drvdata, miscdev);
154
155	ret = tmc_read_prepare(drvdata);
156	if (ret)
157		return ret;
158
159	nonseekable_open(inode, file);
160
161	dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
162	return 0;
163}
164
165static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
166					  loff_t pos, size_t len, char **bufpp)
167{
168	switch (drvdata->config_type) {
169	case TMC_CONFIG_TYPE_ETB:
170	case TMC_CONFIG_TYPE_ETF:
171		return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
172	case TMC_CONFIG_TYPE_ETR:
173		return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
174	}
175
176	return -EINVAL;
177}
178
179static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
180			loff_t *ppos)
181{
182	char *bufp;
183	ssize_t actual;
184	struct tmc_drvdata *drvdata = container_of(file->private_data,
185						   struct tmc_drvdata, miscdev);
186	actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
187	if (actual <= 0)
188		return 0;
189
190	if (copy_to_user(data, bufp, actual)) {
191		dev_dbg(&drvdata->csdev->dev,
192			"%s: copy_to_user failed\n", __func__);
193		return -EFAULT;
194	}
195
196	*ppos += actual;
197	dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
198
199	return actual;
200}
201
202static int tmc_release(struct inode *inode, struct file *file)
203{
204	int ret;
205	struct tmc_drvdata *drvdata = container_of(file->private_data,
206						   struct tmc_drvdata, miscdev);
207
208	ret = tmc_read_unprepare(drvdata);
209	if (ret)
210		return ret;
211
212	dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
213	return 0;
214}
215
216static const struct file_operations tmc_fops = {
217	.owner		= THIS_MODULE,
218	.open		= tmc_open,
219	.read		= tmc_read,
220	.release	= tmc_release,
221	.llseek		= no_llseek,
222};
223
224static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
225{
226	enum tmc_mem_intf_width memwidth;
227
228	/*
229	 * Excerpt from the TRM:
230	 *
231	 * DEVID::MEMWIDTH[10:8]
232	 * 0x2 Memory interface databus is 32 bits wide.
233	 * 0x3 Memory interface databus is 64 bits wide.
234	 * 0x4 Memory interface databus is 128 bits wide.
235	 * 0x5 Memory interface databus is 256 bits wide.
236	 */
237	switch (BMVAL(devid, 8, 10)) {
238	case 0x2:
239		memwidth = TMC_MEM_INTF_WIDTH_32BITS;
240		break;
241	case 0x3:
242		memwidth = TMC_MEM_INTF_WIDTH_64BITS;
243		break;
244	case 0x4:
245		memwidth = TMC_MEM_INTF_WIDTH_128BITS;
246		break;
247	case 0x5:
248		memwidth = TMC_MEM_INTF_WIDTH_256BITS;
249		break;
250	default:
251		memwidth = 0;
252	}
253
254	return memwidth;
255}
256
257static struct attribute *coresight_tmc_mgmt_attrs[] = {
258	coresight_simple_reg32(rsz, TMC_RSZ),
259	coresight_simple_reg32(sts, TMC_STS),
260	coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
261	coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
262	coresight_simple_reg32(trg, TMC_TRG),
263	coresight_simple_reg32(ctl, TMC_CTL),
264	coresight_simple_reg32(ffsr, TMC_FFSR),
265	coresight_simple_reg32(ffcr, TMC_FFCR),
266	coresight_simple_reg32(mode, TMC_MODE),
267	coresight_simple_reg32(pscr, TMC_PSCR),
268	coresight_simple_reg32(devid, CORESIGHT_DEVID),
269	coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
270	coresight_simple_reg32(axictl, TMC_AXICTL),
271	coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
272	NULL,
273};
274
275static ssize_t trigger_cntr_show(struct device *dev,
276				 struct device_attribute *attr, char *buf)
277{
278	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
279	unsigned long val = drvdata->trigger_cntr;
280
281	return sprintf(buf, "%#lx\n", val);
282}
283
284static ssize_t trigger_cntr_store(struct device *dev,
285			     struct device_attribute *attr,
286			     const char *buf, size_t size)
287{
288	int ret;
289	unsigned long val;
290	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
291
292	ret = kstrtoul(buf, 16, &val);
293	if (ret)
294		return ret;
295
296	drvdata->trigger_cntr = val;
297	return size;
298}
299static DEVICE_ATTR_RW(trigger_cntr);
300
301static ssize_t buffer_size_show(struct device *dev,
302				struct device_attribute *attr, char *buf)
303{
304	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
305
306	return sprintf(buf, "%#x\n", drvdata->size);
307}
308
309static ssize_t buffer_size_store(struct device *dev,
310				 struct device_attribute *attr,
311				 const char *buf, size_t size)
312{
313	int ret;
314	unsigned long val;
315	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
316
317	/* Only permitted for TMC-ETRs */
318	if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
319		return -EPERM;
320
321	ret = kstrtoul(buf, 0, &val);
322	if (ret)
323		return ret;
324	/* The buffer size should be page aligned */
325	if (val & (PAGE_SIZE - 1))
326		return -EINVAL;
327	drvdata->size = val;
328	return size;
329}
330
331static DEVICE_ATTR_RW(buffer_size);
332
333static struct attribute *coresight_tmc_attrs[] = {
334	&dev_attr_trigger_cntr.attr,
335	&dev_attr_buffer_size.attr,
336	NULL,
337};
338
339static const struct attribute_group coresight_tmc_group = {
340	.attrs = coresight_tmc_attrs,
341};
342
343static const struct attribute_group coresight_tmc_mgmt_group = {
344	.attrs = coresight_tmc_mgmt_attrs,
345	.name = "mgmt",
346};
347
348static const struct attribute_group *coresight_etf_groups[] = {
349	&coresight_tmc_group,
350	&coresight_tmc_mgmt_group,
351	NULL,
352};
353
354static const struct attribute_group *coresight_etr_groups[] = {
355	&coresight_etr_group,
356	&coresight_tmc_group,
357	&coresight_tmc_mgmt_group,
358	NULL,
359};
360
361static inline bool tmc_etr_can_use_sg(struct device *dev)
362{
363	return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
364}
365
366static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
367{
368	u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
369
370	return (auth & TMC_AUTH_NSID_MASK) == 0x3;
371}
372
373/* Detect and initialise the capabilities of a TMC ETR */
374static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
375{
376	int rc;
377	u32 dma_mask = 0;
378	struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
379
380	if (!tmc_etr_has_non_secure_access(drvdata))
381		return -EACCES;
382
383	/* Set the unadvertised capabilities */
384	tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
385
386	if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
387		tmc_etr_set_cap(drvdata, TMC_ETR_SG);
388
389	/* Check if the AXI address width is available */
390	if (devid & TMC_DEVID_AXIAW_VALID)
391		dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
392				TMC_DEVID_AXIAW_MASK);
393
394	/*
395	 * Unless specified in the device configuration, ETR uses a 40-bit
396	 * AXI master in place of the embedded SRAM of ETB/ETF.
397	 */
398	switch (dma_mask) {
399	case 32:
400	case 40:
401	case 44:
402	case 48:
403	case 52:
404		dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
405		break;
406	default:
407		dma_mask = 40;
408	}
409
410	rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
411	if (rc)
412		dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
413	return rc;
414}
415
416static u32 tmc_etr_get_default_buffer_size(struct device *dev)
417{
418	u32 size;
419
420	if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
421		size = SZ_1M;
422	return size;
423}
424
425static u32 tmc_etr_get_max_burst_size(struct device *dev)
426{
427	u32 burst_size;
428
429	if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
430				     &burst_size))
431		return TMC_AXICTL_WR_BURST_16;
432
433	/* Only permissible values are 0 to 15 */
434	if (burst_size > 0xF)
435		burst_size = TMC_AXICTL_WR_BURST_16;
436
437	return burst_size;
438}
439
440static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
441{
442	int ret = 0;
443	u32 devid;
444	void __iomem *base;
445	struct device *dev = &adev->dev;
446	struct coresight_platform_data *pdata = NULL;
447	struct tmc_drvdata *drvdata;
448	struct resource *res = &adev->res;
449	struct coresight_desc desc = { 0 };
450	struct coresight_dev_list *dev_list = NULL;
451
452	ret = -ENOMEM;
453	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
454	if (!drvdata)
455		goto out;
456
457	dev_set_drvdata(dev, drvdata);
458
459	/* Validity for the resource is already checked by the AMBA core */
460	base = devm_ioremap_resource(dev, res);
461	if (IS_ERR(base)) {
462		ret = PTR_ERR(base);
463		goto out;
464	}
465
466	drvdata->base = base;
467	desc.access = CSDEV_ACCESS_IOMEM(base);
468
469	spin_lock_init(&drvdata->spinlock);
470
471	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
472	drvdata->config_type = BMVAL(devid, 6, 7);
473	drvdata->memwidth = tmc_get_memwidth(devid);
474	/* This device is not associated with a session */
475	drvdata->pid = -1;
476	drvdata->etr_mode = ETR_MODE_AUTO;
477
478	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
479		drvdata->size = tmc_etr_get_default_buffer_size(dev);
480		drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
481	} else {
482		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
483	}
484
485	desc.dev = dev;
 
486
487	switch (drvdata->config_type) {
488	case TMC_CONFIG_TYPE_ETB:
489		desc.groups = coresight_etf_groups;
490		desc.type = CORESIGHT_DEV_TYPE_SINK;
491		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
492		desc.ops = &tmc_etb_cs_ops;
493		dev_list = &etb_devs;
494		break;
495	case TMC_CONFIG_TYPE_ETR:
496		desc.groups = coresight_etr_groups;
497		desc.type = CORESIGHT_DEV_TYPE_SINK;
498		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
499		desc.ops = &tmc_etr_cs_ops;
500		ret = tmc_etr_setup_caps(dev, devid,
501					 coresight_get_uci_data(id));
502		if (ret)
503			goto out;
504		idr_init(&drvdata->idr);
505		mutex_init(&drvdata->idr_mutex);
506		dev_list = &etr_devs;
507		break;
508	case TMC_CONFIG_TYPE_ETF:
509		desc.groups = coresight_etf_groups;
510		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
511		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
512		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
513		desc.ops = &tmc_etf_cs_ops;
514		dev_list = &etf_devs;
515		break;
516	default:
517		pr_err("%s: Unsupported TMC config\n", desc.name);
518		ret = -EINVAL;
519		goto out;
520	}
521
522	desc.name = coresight_alloc_device_name(dev_list, dev);
523	if (!desc.name) {
524		ret = -ENOMEM;
525		goto out;
526	}
527
528	pdata = coresight_get_platform_data(dev);
529	if (IS_ERR(pdata)) {
530		ret = PTR_ERR(pdata);
531		goto out;
532	}
533	adev->dev.platform_data = pdata;
534	desc.pdata = pdata;
535
536	drvdata->csdev = coresight_register(&desc);
537	if (IS_ERR(drvdata->csdev)) {
538		ret = PTR_ERR(drvdata->csdev);
539		goto out;
540	}
541
542	drvdata->miscdev.name = desc.name;
543	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
544	drvdata->miscdev.fops = &tmc_fops;
545	ret = misc_register(&drvdata->miscdev);
546	if (ret)
547		coresight_unregister(drvdata->csdev);
548	else
549		pm_runtime_put(&adev->dev);
550out:
551	return ret;
552}
553
554static void tmc_shutdown(struct amba_device *adev)
555{
556	unsigned long flags;
557	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
558
559	spin_lock_irqsave(&drvdata->spinlock, flags);
560
561	if (drvdata->mode == CS_MODE_DISABLED)
562		goto out;
563
564	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
565		tmc_etr_disable_hw(drvdata);
566
567	/*
568	 * We do not care about coresight unregister here unlike remove
569	 * callback which is required for making coresight modular since
570	 * the system is going down after this.
571	 */
572out:
573	spin_unlock_irqrestore(&drvdata->spinlock, flags);
574}
575
576static void tmc_remove(struct amba_device *adev)
577{
578	struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
579
580	/*
581	 * Since misc_open() holds a refcount on the f_ops, which is
582	 * etb fops in this case, device is there until last file
583	 * handler to this device is closed.
584	 */
585	misc_deregister(&drvdata->miscdev);
586	coresight_unregister(drvdata->csdev);
587}
588
589static const struct amba_id tmc_ids[] = {
590	CS_AMBA_ID(0x000bb961),
591	/* Coresight SoC 600 TMC-ETR/ETS */
592	CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
593	/* Coresight SoC 600 TMC-ETB */
594	CS_AMBA_ID(0x000bb9e9),
595	/* Coresight SoC 600 TMC-ETF */
596	CS_AMBA_ID(0x000bb9ea),
597	{ 0, 0},
598};
599
600MODULE_DEVICE_TABLE(amba, tmc_ids);
601
602static struct amba_driver tmc_driver = {
603	.drv = {
604		.name   = "coresight-tmc",
605		.owner  = THIS_MODULE,
606		.suppress_bind_attrs = true,
607	},
608	.probe		= tmc_probe,
609	.shutdown	= tmc_shutdown,
610	.remove		= tmc_remove,
611	.id_table	= tmc_ids,
612};
613
614module_amba_driver(tmc_driver);
615
616MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
617MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
618MODULE_LICENSE("GPL v2");