Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * IOMMU API for MTK architected m4u v1 implementations
  4 *
  5 * Copyright (c) 2015-2016 MediaTek Inc.
  6 * Author: Honghui Zhang <honghui.zhang@mediatek.com>
  7 *
  8 * Based on driver/iommu/mtk_iommu.c
  9 */
 10#include <linux/memblock.h>
 11#include <linux/bug.h>
 12#include <linux/clk.h>
 13#include <linux/component.h>
 14#include <linux/device.h>
 15#include <linux/dma-mapping.h>
 16#include <linux/dma-iommu.h>
 17#include <linux/err.h>
 18#include <linux/interrupt.h>
 19#include <linux/io.h>
 20#include <linux/iommu.h>
 21#include <linux/iopoll.h>
 22#include <linux/list.h>
 
 23#include <linux/of_address.h>
 24#include <linux/of_iommu.h>
 25#include <linux/of_irq.h>
 26#include <linux/of_platform.h>
 27#include <linux/platform_device.h>
 28#include <linux/slab.h>
 29#include <linux/spinlock.h>
 30#include <asm/barrier.h>
 31#include <asm/dma-iommu.h>
 32#include <linux/init.h>
 33#include <dt-bindings/memory/mt2701-larb-port.h>
 34#include <soc/mediatek/smi.h>
 35#include "mtk_iommu.h"
 36
 37#define REG_MMU_PT_BASE_ADDR			0x000
 38
 39#define F_ALL_INVLD				0x2
 40#define F_MMU_INV_RANGE				0x1
 41#define F_INVLD_EN0				BIT(0)
 42#define F_INVLD_EN1				BIT(1)
 43
 44#define F_MMU_FAULT_VA_MSK			0xfffff000
 45#define MTK_PROTECT_PA_ALIGN			128
 46
 47#define REG_MMU_CTRL_REG			0x210
 48#define F_MMU_CTRL_COHERENT_EN			BIT(8)
 49#define REG_MMU_IVRP_PADDR			0x214
 50#define REG_MMU_INT_CONTROL			0x220
 51#define F_INT_TRANSLATION_FAULT			BIT(0)
 52#define F_INT_MAIN_MULTI_HIT_FAULT		BIT(1)
 53#define F_INT_INVALID_PA_FAULT			BIT(2)
 54#define F_INT_ENTRY_REPLACEMENT_FAULT		BIT(3)
 55#define F_INT_TABLE_WALK_FAULT			BIT(4)
 56#define F_INT_TLB_MISS_FAULT			BIT(5)
 57#define F_INT_PFH_DMA_FIFO_OVERFLOW		BIT(6)
 58#define F_INT_MISS_DMA_FIFO_OVERFLOW		BIT(7)
 59
 60#define F_MMU_TF_PROTECT_SEL(prot)		(((prot) & 0x3) << 5)
 61#define F_INT_CLR_BIT				BIT(12)
 62
 63#define REG_MMU_FAULT_ST			0x224
 64#define REG_MMU_FAULT_VA			0x228
 65#define REG_MMU_INVLD_PA			0x22C
 66#define REG_MMU_INT_ID				0x388
 67#define REG_MMU_INVALIDATE			0x5c0
 68#define REG_MMU_INVLD_START_A			0x5c4
 69#define REG_MMU_INVLD_END_A			0x5c8
 70
 71#define REG_MMU_INV_SEL				0x5d8
 72#define REG_MMU_STANDARD_AXI_MODE		0x5e8
 73
 74#define REG_MMU_DCM				0x5f0
 75#define F_MMU_DCM_ON				BIT(1)
 76#define REG_MMU_CPE_DONE			0x60c
 77#define F_DESC_VALID				0x2
 78#define F_DESC_NONSEC				BIT(3)
 79#define MT2701_M4U_TF_LARB(TF)			(6 - (((TF) >> 13) & 0x7))
 80#define MT2701_M4U_TF_PORT(TF)			(((TF) >> 8) & 0xF)
 81/* MTK generation one iommu HW only support 4K size mapping */
 82#define MT2701_IOMMU_PAGE_SHIFT			12
 83#define MT2701_IOMMU_PAGE_SIZE			(1UL << MT2701_IOMMU_PAGE_SHIFT)
 
 84
 85/*
 86 * MTK m4u support 4GB iova address space, and only support 4K page
 87 * mapping. So the pagetable size should be exactly as 4M.
 88 */
 89#define M2701_IOMMU_PGT_SIZE			SZ_4M
 90
 91struct mtk_iommu_domain {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92	spinlock_t			pgtlock; /* lock for page table */
 93	struct iommu_domain		domain;
 94	u32				*pgt_va;
 95	dma_addr_t			pgt_pa;
 96	struct mtk_iommu_data		*data;
 97};
 98
 99static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
100{
101	return container_of(dom, struct mtk_iommu_domain, domain);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102}
103
104static const int mt2701_m4u_in_larb[] = {
105	LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
106	LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
107};
108
109static inline int mt2701_m4u_to_larb(int id)
110{
111	int i;
112
113	for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--)
114		if ((id) >= mt2701_m4u_in_larb[i])
115			return i;
116
117	return 0;
118}
119
120static inline int mt2701_m4u_to_port(int id)
121{
122	int larb = mt2701_m4u_to_larb(id);
123
124	return id - mt2701_m4u_in_larb[larb];
125}
126
127static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
128{
129	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
130			data->base + REG_MMU_INV_SEL);
131	writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
132	wmb(); /* Make sure the tlb flush all done */
133}
134
135static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
136				unsigned long iova, size_t size)
137{
138	int ret;
139	u32 tmp;
140
141	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
142		data->base + REG_MMU_INV_SEL);
143	writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
144		data->base + REG_MMU_INVLD_START_A);
145	writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
146		data->base + REG_MMU_INVLD_END_A);
147	writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
148
149	ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
150				tmp, tmp != 0, 10, 100000);
151	if (ret) {
152		dev_warn(data->dev,
153			 "Partial TLB flush timed out, falling back to full flush\n");
154		mtk_iommu_tlb_flush_all(data);
155	}
156	/* Clear the CPE status */
157	writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
158}
159
160static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
161{
162	struct mtk_iommu_data *data = dev_id;
163	struct mtk_iommu_domain *dom = data->m4u_dom;
164	u32 int_state, regval, fault_iova, fault_pa;
165	unsigned int fault_larb, fault_port;
166
167	/* Read error information from registers */
168	int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST);
169	fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
170
171	fault_iova &= F_MMU_FAULT_VA_MSK;
172	fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
173	regval = readl_relaxed(data->base + REG_MMU_INT_ID);
174	fault_larb = MT2701_M4U_TF_LARB(regval);
175	fault_port = MT2701_M4U_TF_PORT(regval);
176
177	/*
178	 * MTK v1 iommu HW could not determine whether the fault is read or
179	 * write fault, report as read fault.
180	 */
181	if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
182			IOMMU_FAULT_READ))
183		dev_err_ratelimited(data->dev,
184			"fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n",
185			int_state, fault_iova, fault_pa,
186			fault_larb, fault_port);
187
188	/* Interrupt clear */
189	regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL);
190	regval |= F_INT_CLR_BIT;
191	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
192
193	mtk_iommu_tlb_flush_all(data);
194
195	return IRQ_HANDLED;
196}
197
198static void mtk_iommu_config(struct mtk_iommu_data *data,
199			     struct device *dev, bool enable)
200{
201	struct mtk_smi_larb_iommu    *larb_mmu;
202	unsigned int                 larbid, portid;
203	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
204	int i;
205
206	for (i = 0; i < fwspec->num_ids; ++i) {
207		larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
208		portid = mt2701_m4u_to_port(fwspec->ids[i]);
209		larb_mmu = &data->larb_imu[larbid];
210
211		dev_dbg(dev, "%s iommu port: %d\n",
212			enable ? "enable" : "disable", portid);
213
214		if (enable)
215			larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
216		else
217			larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
218	}
219}
220
221static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
222{
223	struct mtk_iommu_domain *dom = data->m4u_dom;
224
225	spin_lock_init(&dom->pgtlock);
226
227	dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
228					 &dom->pgt_pa, GFP_KERNEL);
229	if (!dom->pgt_va)
230		return -ENOMEM;
231
232	writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
233
234	dom->data = data;
235
236	return 0;
237}
238
239static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
240{
241	struct mtk_iommu_domain *dom;
242
243	if (type != IOMMU_DOMAIN_UNMANAGED)
244		return NULL;
245
246	dom = kzalloc(sizeof(*dom), GFP_KERNEL);
247	if (!dom)
248		return NULL;
249
250	return &dom->domain;
251}
252
253static void mtk_iommu_domain_free(struct iommu_domain *domain)
254{
255	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
256	struct mtk_iommu_data *data = dom->data;
257
258	dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
259			dom->pgt_va, dom->pgt_pa);
260	kfree(to_mtk_domain(domain));
261}
262
263static int mtk_iommu_attach_device(struct iommu_domain *domain,
264				   struct device *dev)
265{
266	struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
267	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
268	struct dma_iommu_mapping *mtk_mapping;
269	int ret;
270
271	/* Only allow the domain created internally. */
272	mtk_mapping = data->mapping;
273	if (mtk_mapping->domain != domain)
274		return 0;
275
276	if (!data->m4u_dom) {
277		data->m4u_dom = dom;
278		ret = mtk_iommu_domain_finalise(data);
279		if (ret) {
280			data->m4u_dom = NULL;
281			return ret;
282		}
283	}
284
285	mtk_iommu_config(data, dev, true);
286	return 0;
287}
288
289static void mtk_iommu_detach_device(struct iommu_domain *domain,
290				    struct device *dev)
291{
292	struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
293
294	mtk_iommu_config(data, dev, false);
 
295}
296
297static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
298			 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 
 
 
 
 
 
 
 
 
 
299{
300	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
301	unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
302	unsigned long flags;
303	unsigned int i;
304	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
305	u32 pabase = (u32)paddr;
306	int map_size = 0;
307
308	spin_lock_irqsave(&dom->pgtlock, flags);
309	for (i = 0; i < page_num; i++) {
310		if (pgt_base_iova[i]) {
311			memset(pgt_base_iova, 0, i * sizeof(u32));
312			break;
313		}
314		pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
315		pabase += MT2701_IOMMU_PAGE_SIZE;
316		map_size += MT2701_IOMMU_PAGE_SIZE;
317	}
318
319	spin_unlock_irqrestore(&dom->pgtlock, flags);
320
321	mtk_iommu_tlb_flush_range(dom->data, iova, size);
 
322
323	return map_size == size ? 0 : -EEXIST;
324}
325
326static size_t mtk_iommu_unmap(struct iommu_domain *domain,
327			      unsigned long iova, size_t size,
328			      struct iommu_iotlb_gather *gather)
329{
330	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
331	unsigned long flags;
332	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
333	unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
334
335	spin_lock_irqsave(&dom->pgtlock, flags);
336	memset(pgt_base_iova, 0, page_num * sizeof(u32));
337	spin_unlock_irqrestore(&dom->pgtlock, flags);
338
339	mtk_iommu_tlb_flush_range(dom->data, iova, size);
340
341	return size;
342}
343
344static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
345					  dma_addr_t iova)
346{
347	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
348	unsigned long flags;
349	phys_addr_t pa;
350
351	spin_lock_irqsave(&dom->pgtlock, flags);
352	pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
353	pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1));
354	spin_unlock_irqrestore(&dom->pgtlock, flags);
355
356	return pa;
357}
358
359static const struct iommu_ops mtk_iommu_ops;
360
361/*
362 * MTK generation one iommu HW only support one iommu domain, and all the client
363 * sharing the same iova address space.
364 */
365static int mtk_iommu_create_mapping(struct device *dev,
366				    struct of_phandle_args *args)
367{
368	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
369	struct mtk_iommu_data *data;
370	struct platform_device *m4updev;
371	struct dma_iommu_mapping *mtk_mapping;
372	int ret;
373
374	if (args->args_count != 1) {
375		dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
376			args->args_count);
377		return -EINVAL;
378	}
379
380	if (!fwspec) {
381		ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
382		if (ret)
383			return ret;
384		fwspec = dev_iommu_fwspec_get(dev);
385	} else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_ops) {
386		return -EINVAL;
387	}
388
389	if (!dev_iommu_priv_get(dev)) {
390		/* Get the m4u device */
391		m4updev = of_find_device_by_node(args->np);
392		if (WARN_ON(!m4updev))
393			return -EINVAL;
394
395		dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
396	}
397
398	ret = iommu_fwspec_add_ids(dev, args->args, 1);
399	if (ret)
400		return ret;
401
402	data = dev_iommu_priv_get(dev);
403	mtk_mapping = data->mapping;
404	if (!mtk_mapping) {
405		/* MTK iommu support 4GB iova address space. */
406		mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
407						0, 1ULL << 32);
408		if (IS_ERR(mtk_mapping))
409			return PTR_ERR(mtk_mapping);
410
411		data->mapping = mtk_mapping;
412	}
413
414	return 0;
415}
416
417static int mtk_iommu_def_domain_type(struct device *dev)
418{
419	return IOMMU_DOMAIN_UNMANAGED;
420}
421
422static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
423{
424	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
425	struct of_phandle_args iommu_spec;
426	struct of_phandle_iterator it;
427	struct mtk_iommu_data *data;
428	int err;
 
429
430	of_for_each_phandle(&it, err, dev->of_node, "iommus",
431			"#iommu-cells", -1) {
432		int count = of_phandle_iterator_args(&it, iommu_spec.args,
433					MAX_PHANDLE_ARGS);
434		iommu_spec.np = of_node_get(it.node);
435		iommu_spec.args_count = count;
 
 
436
437		mtk_iommu_create_mapping(dev, &iommu_spec);
 
 
 
 
 
 
 
438
439		/* dev->iommu_fwspec might have changed */
440		fwspec = dev_iommu_fwspec_get(dev);
441
442		of_node_put(iommu_spec.np);
443	}
444
445	if (!fwspec || fwspec->ops != &mtk_iommu_ops)
446		return ERR_PTR(-ENODEV); /* Not a iommu client device */
447
448	data = dev_iommu_priv_get(dev);
449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450	return &data->iommu;
451}
452
453static void mtk_iommu_probe_finalize(struct device *dev)
454{
455	struct dma_iommu_mapping *mtk_mapping;
456	struct mtk_iommu_data *data;
457	int err;
458
459	data        = dev_iommu_priv_get(dev);
460	mtk_mapping = data->mapping;
461
462	err = arm_iommu_attach_device(dev, mtk_mapping);
463	if (err)
464		dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
465}
466
467static void mtk_iommu_release_device(struct device *dev)
468{
469	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 
 
 
470
471	if (!fwspec || fwspec->ops != &mtk_iommu_ops)
472		return;
473
474	iommu_fwspec_free(dev);
475}
476
477static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
478{
479	u32 regval;
480	int ret;
481
482	ret = clk_prepare_enable(data->bclk);
483	if (ret) {
484		dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
485		return ret;
486	}
487
488	regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2);
489	writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
490
491	regval = F_INT_TRANSLATION_FAULT |
492		F_INT_MAIN_MULTI_HIT_FAULT |
493		F_INT_INVALID_PA_FAULT |
494		F_INT_ENTRY_REPLACEMENT_FAULT |
495		F_INT_TABLE_WALK_FAULT |
496		F_INT_TLB_MISS_FAULT |
497		F_INT_PFH_DMA_FIFO_OVERFLOW |
498		F_INT_MISS_DMA_FIFO_OVERFLOW;
499	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
500
501	/* protect memory,hw will write here while translation fault */
502	writel_relaxed(data->protect_base,
503			data->base + REG_MMU_IVRP_PADDR);
504
505	writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
506
507	if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
508			     dev_name(data->dev), (void *)data)) {
509		writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
510		clk_disable_unprepare(data->bclk);
511		dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
512		return -ENODEV;
513	}
514
515	return 0;
516}
517
518static const struct iommu_ops mtk_iommu_ops = {
519	.domain_alloc	= mtk_iommu_domain_alloc,
520	.domain_free	= mtk_iommu_domain_free,
521	.attach_dev	= mtk_iommu_attach_device,
522	.detach_dev	= mtk_iommu_detach_device,
523	.map		= mtk_iommu_map,
524	.unmap		= mtk_iommu_unmap,
525	.iova_to_phys	= mtk_iommu_iova_to_phys,
526	.probe_device	= mtk_iommu_probe_device,
527	.probe_finalize = mtk_iommu_probe_finalize,
528	.release_device	= mtk_iommu_release_device,
529	.def_domain_type = mtk_iommu_def_domain_type,
530	.device_group	= generic_device_group,
531	.pgsize_bitmap	= ~0UL << MT2701_IOMMU_PAGE_SHIFT,
 
 
 
 
 
 
 
 
532};
533
534static const struct of_device_id mtk_iommu_of_ids[] = {
535	{ .compatible = "mediatek,mt2701-m4u", },
536	{}
537};
538
539static const struct component_master_ops mtk_iommu_com_ops = {
540	.bind		= mtk_iommu_bind,
541	.unbind		= mtk_iommu_unbind,
542};
543
544static int mtk_iommu_probe(struct platform_device *pdev)
545{
546	struct mtk_iommu_data		*data;
547	struct device			*dev = &pdev->dev;
 
548	struct resource			*res;
549	struct component_match		*match = NULL;
550	struct of_phandle_args		larb_spec;
551	struct of_phandle_iterator	it;
552	void				*protect;
553	int				larb_nr, ret, err;
554
555	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
556	if (!data)
557		return -ENOMEM;
558
559	data->dev = dev;
560
561	/* Protect memory. HW will access here while translation fault.*/
562	protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
563			GFP_KERNEL | GFP_DMA);
564	if (!protect)
565		return -ENOMEM;
566	data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
567
568	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
569	data->base = devm_ioremap_resource(dev, res);
570	if (IS_ERR(data->base))
571		return PTR_ERR(data->base);
572
573	data->irq = platform_get_irq(pdev, 0);
574	if (data->irq < 0)
575		return data->irq;
576
577	data->bclk = devm_clk_get(dev, "bclk");
578	if (IS_ERR(data->bclk))
579		return PTR_ERR(data->bclk);
580
581	larb_nr = 0;
582	of_for_each_phandle(&it, err, dev->of_node,
583			"mediatek,larbs", NULL, 0) {
 
 
 
 
584		struct platform_device *plarbdev;
585		int count = of_phandle_iterator_args(&it, larb_spec.args,
586					MAX_PHANDLE_ARGS);
587
588		if (count)
589			continue;
 
590
591		larb_spec.np = of_node_get(it.node);
592		if (!of_device_is_available(larb_spec.np))
593			continue;
 
594
595		plarbdev = of_find_device_by_node(larb_spec.np);
596		if (!plarbdev) {
597			plarbdev = of_platform_device_create(
598						larb_spec.np, NULL,
599						platform_bus_type.dev_root);
600			if (!plarbdev) {
601				of_node_put(larb_spec.np);
602				return -EPROBE_DEFER;
603			}
604		}
 
 
 
 
 
605
606		data->larb_imu[larb_nr].dev = &plarbdev->dev;
607		component_match_add_release(dev, &match, release_of,
608					    compare_of, larb_spec.np);
609		larb_nr++;
610	}
611
612	platform_set_drvdata(pdev, data);
613
614	ret = mtk_iommu_hw_init(data);
615	if (ret)
616		return ret;
617
618	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
619				     dev_name(&pdev->dev));
620	if (ret)
621		return ret;
622
623	iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
624
625	ret = iommu_device_register(&data->iommu);
626	if (ret)
627		return ret;
628
629	if (!iommu_present(&platform_bus_type))
630		bus_set_iommu(&platform_bus_type,  &mtk_iommu_ops);
 
 
631
632	return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
 
 
 
 
 
 
633}
634
635static int mtk_iommu_remove(struct platform_device *pdev)
636{
637	struct mtk_iommu_data *data = platform_get_drvdata(pdev);
638
639	iommu_device_sysfs_remove(&data->iommu);
640	iommu_device_unregister(&data->iommu);
641
642	if (iommu_present(&platform_bus_type))
643		bus_set_iommu(&platform_bus_type, NULL);
644
645	clk_disable_unprepare(data->bclk);
646	devm_free_irq(&pdev->dev, data->irq, data);
647	component_master_del(&pdev->dev, &mtk_iommu_com_ops);
648	return 0;
649}
650
651static int __maybe_unused mtk_iommu_suspend(struct device *dev)
652{
653	struct mtk_iommu_data *data = dev_get_drvdata(dev);
654	struct mtk_iommu_suspend_reg *reg = &data->reg;
655	void __iomem *base = data->base;
656
657	reg->standard_axi_mode = readl_relaxed(base +
658					       REG_MMU_STANDARD_AXI_MODE);
659	reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM);
660	reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
661	reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL);
662	return 0;
663}
664
665static int __maybe_unused mtk_iommu_resume(struct device *dev)
666{
667	struct mtk_iommu_data *data = dev_get_drvdata(dev);
668	struct mtk_iommu_suspend_reg *reg = &data->reg;
669	void __iomem *base = data->base;
670
671	writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
672	writel_relaxed(reg->standard_axi_mode,
673		       base + REG_MMU_STANDARD_AXI_MODE);
674	writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM);
675	writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
676	writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL);
677	writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR);
678	return 0;
679}
680
681static const struct dev_pm_ops mtk_iommu_pm_ops = {
682	SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
683};
684
685static struct platform_driver mtk_iommu_driver = {
686	.probe	= mtk_iommu_probe,
687	.remove	= mtk_iommu_remove,
688	.driver	= {
689		.name = "mtk-iommu-v1",
690		.of_match_table = mtk_iommu_of_ids,
691		.pm = &mtk_iommu_pm_ops,
692	}
693};
 
694
695static int __init m4u_init(void)
696{
697	return platform_driver_register(&mtk_iommu_driver);
698}
699subsys_initcall(m4u_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * IOMMU API for MTK architected m4u v1 implementations
  4 *
  5 * Copyright (c) 2015-2016 MediaTek Inc.
  6 * Author: Honghui Zhang <honghui.zhang@mediatek.com>
  7 *
  8 * Based on driver/iommu/mtk_iommu.c
  9 */
 
 10#include <linux/bug.h>
 11#include <linux/clk.h>
 12#include <linux/component.h>
 13#include <linux/device.h>
 14#include <linux/dma-mapping.h>
 
 15#include <linux/err.h>
 16#include <linux/interrupt.h>
 17#include <linux/io.h>
 18#include <linux/iommu.h>
 19#include <linux/iopoll.h>
 20#include <linux/list.h>
 21#include <linux/module.h>
 22#include <linux/of_address.h>
 
 23#include <linux/of_irq.h>
 24#include <linux/of_platform.h>
 25#include <linux/platform_device.h>
 26#include <linux/slab.h>
 27#include <linux/spinlock.h>
 28#include <asm/barrier.h>
 29#include <asm/dma-iommu.h>
 30#include <dt-bindings/memory/mtk-memory-port.h>
 31#include <dt-bindings/memory/mt2701-larb-port.h>
 32#include <soc/mediatek/smi.h>
 
 33
 34#define REG_MMU_PT_BASE_ADDR			0x000
 35
 36#define F_ALL_INVLD				0x2
 37#define F_MMU_INV_RANGE				0x1
 38#define F_INVLD_EN0				BIT(0)
 39#define F_INVLD_EN1				BIT(1)
 40
 41#define F_MMU_FAULT_VA_MSK			0xfffff000
 42#define MTK_PROTECT_PA_ALIGN			128
 43
 44#define REG_MMU_CTRL_REG			0x210
 45#define F_MMU_CTRL_COHERENT_EN			BIT(8)
 46#define REG_MMU_IVRP_PADDR			0x214
 47#define REG_MMU_INT_CONTROL			0x220
 48#define F_INT_TRANSLATION_FAULT			BIT(0)
 49#define F_INT_MAIN_MULTI_HIT_FAULT		BIT(1)
 50#define F_INT_INVALID_PA_FAULT			BIT(2)
 51#define F_INT_ENTRY_REPLACEMENT_FAULT		BIT(3)
 52#define F_INT_TABLE_WALK_FAULT			BIT(4)
 53#define F_INT_TLB_MISS_FAULT			BIT(5)
 54#define F_INT_PFH_DMA_FIFO_OVERFLOW		BIT(6)
 55#define F_INT_MISS_DMA_FIFO_OVERFLOW		BIT(7)
 56
 57#define F_MMU_TF_PROTECT_SEL(prot)		(((prot) & 0x3) << 5)
 58#define F_INT_CLR_BIT				BIT(12)
 59
 60#define REG_MMU_FAULT_ST			0x224
 61#define REG_MMU_FAULT_VA			0x228
 62#define REG_MMU_INVLD_PA			0x22C
 63#define REG_MMU_INT_ID				0x388
 64#define REG_MMU_INVALIDATE			0x5c0
 65#define REG_MMU_INVLD_START_A			0x5c4
 66#define REG_MMU_INVLD_END_A			0x5c8
 67
 68#define REG_MMU_INV_SEL				0x5d8
 69#define REG_MMU_STANDARD_AXI_MODE		0x5e8
 70
 71#define REG_MMU_DCM				0x5f0
 72#define F_MMU_DCM_ON				BIT(1)
 73#define REG_MMU_CPE_DONE			0x60c
 74#define F_DESC_VALID				0x2
 75#define F_DESC_NONSEC				BIT(3)
 76#define MT2701_M4U_TF_LARB(TF)			(6 - (((TF) >> 13) & 0x7))
 77#define MT2701_M4U_TF_PORT(TF)			(((TF) >> 8) & 0xF)
 78/* MTK generation one iommu HW only support 4K size mapping */
 79#define MT2701_IOMMU_PAGE_SHIFT			12
 80#define MT2701_IOMMU_PAGE_SIZE			(1UL << MT2701_IOMMU_PAGE_SHIFT)
 81#define MT2701_LARB_NR_MAX			3
 82
 83/*
 84 * MTK m4u support 4GB iova address space, and only support 4K page
 85 * mapping. So the pagetable size should be exactly as 4M.
 86 */
 87#define M2701_IOMMU_PGT_SIZE			SZ_4M
 88
 89struct mtk_iommu_v1_suspend_reg {
 90	u32			standard_axi_mode;
 91	u32			dcm_dis;
 92	u32			ctrl_reg;
 93	u32			int_control0;
 94};
 95
 96struct mtk_iommu_v1_data {
 97	void __iomem			*base;
 98	int				irq;
 99	struct device			*dev;
100	struct clk			*bclk;
101	phys_addr_t			protect_base; /* protect memory base */
102	struct mtk_iommu_v1_domain	*m4u_dom;
103
104	struct iommu_device		iommu;
105	struct dma_iommu_mapping	*mapping;
106	struct mtk_smi_larb_iommu	larb_imu[MTK_LARB_NR_MAX];
107
108	struct mtk_iommu_v1_suspend_reg	reg;
109};
110
111struct mtk_iommu_v1_domain {
112	spinlock_t			pgtlock; /* lock for page table */
113	struct iommu_domain		domain;
114	u32				*pgt_va;
115	dma_addr_t			pgt_pa;
116	struct mtk_iommu_v1_data	*data;
117};
118
119static int mtk_iommu_v1_bind(struct device *dev)
120{
121	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
122
123	return component_bind_all(dev, &data->larb_imu);
124}
125
126static void mtk_iommu_v1_unbind(struct device *dev)
127{
128	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
129
130	component_unbind_all(dev, &data->larb_imu);
131}
132
133static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom)
134{
135	return container_of(dom, struct mtk_iommu_v1_domain, domain);
136}
137
138static const int mt2701_m4u_in_larb[] = {
139	LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
140	LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
141};
142
143static inline int mt2701_m4u_to_larb(int id)
144{
145	int i;
146
147	for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--)
148		if ((id) >= mt2701_m4u_in_larb[i])
149			return i;
150
151	return 0;
152}
153
154static inline int mt2701_m4u_to_port(int id)
155{
156	int larb = mt2701_m4u_to_larb(id);
157
158	return id - mt2701_m4u_in_larb[larb];
159}
160
161static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data)
162{
163	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
164			data->base + REG_MMU_INV_SEL);
165	writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
166	wmb(); /* Make sure the tlb flush all done */
167}
168
169static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data,
170					 unsigned long iova, size_t size)
171{
172	int ret;
173	u32 tmp;
174
175	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
176		data->base + REG_MMU_INV_SEL);
177	writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
178		data->base + REG_MMU_INVLD_START_A);
179	writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
180		data->base + REG_MMU_INVLD_END_A);
181	writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
182
183	ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
184				tmp, tmp != 0, 10, 100000);
185	if (ret) {
186		dev_warn(data->dev,
187			 "Partial TLB flush timed out, falling back to full flush\n");
188		mtk_iommu_v1_tlb_flush_all(data);
189	}
190	/* Clear the CPE status */
191	writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
192}
193
194static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id)
195{
196	struct mtk_iommu_v1_data *data = dev_id;
197	struct mtk_iommu_v1_domain *dom = data->m4u_dom;
198	u32 int_state, regval, fault_iova, fault_pa;
199	unsigned int fault_larb, fault_port;
200
201	/* Read error information from registers */
202	int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST);
203	fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
204
205	fault_iova &= F_MMU_FAULT_VA_MSK;
206	fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
207	regval = readl_relaxed(data->base + REG_MMU_INT_ID);
208	fault_larb = MT2701_M4U_TF_LARB(regval);
209	fault_port = MT2701_M4U_TF_PORT(regval);
210
211	/*
212	 * MTK v1 iommu HW could not determine whether the fault is read or
213	 * write fault, report as read fault.
214	 */
215	if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
216			IOMMU_FAULT_READ))
217		dev_err_ratelimited(data->dev,
218			"fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n",
219			int_state, fault_iova, fault_pa,
220			fault_larb, fault_port);
221
222	/* Interrupt clear */
223	regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL);
224	regval |= F_INT_CLR_BIT;
225	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
226
227	mtk_iommu_v1_tlb_flush_all(data);
228
229	return IRQ_HANDLED;
230}
231
232static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data,
233				struct device *dev, bool enable)
234{
235	struct mtk_smi_larb_iommu    *larb_mmu;
236	unsigned int                 larbid, portid;
237	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
238	int i;
239
240	for (i = 0; i < fwspec->num_ids; ++i) {
241		larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
242		portid = mt2701_m4u_to_port(fwspec->ids[i]);
243		larb_mmu = &data->larb_imu[larbid];
244
245		dev_dbg(dev, "%s iommu port: %d\n",
246			enable ? "enable" : "disable", portid);
247
248		if (enable)
249			larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
250		else
251			larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
252	}
253}
254
255static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
256{
257	struct mtk_iommu_v1_domain *dom = data->m4u_dom;
258
259	spin_lock_init(&dom->pgtlock);
260
261	dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
262					 &dom->pgt_pa, GFP_KERNEL);
263	if (!dom->pgt_va)
264		return -ENOMEM;
265
266	writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
267
268	dom->data = data;
269
270	return 0;
271}
272
273static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev)
274{
275	struct mtk_iommu_v1_domain *dom;
 
 
 
276
277	dom = kzalloc(sizeof(*dom), GFP_KERNEL);
278	if (!dom)
279		return NULL;
280
281	return &dom->domain;
282}
283
284static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
285{
286	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
287	struct mtk_iommu_v1_data *data = dom->data;
288
289	dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
290			dom->pgt_va, dom->pgt_pa);
291	kfree(to_mtk_domain(domain));
292}
293
294static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev)
 
295{
296	struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
297	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
298	struct dma_iommu_mapping *mtk_mapping;
299	int ret;
300
301	/* Only allow the domain created internally. */
302	mtk_mapping = data->mapping;
303	if (mtk_mapping->domain != domain)
304		return 0;
305
306	if (!data->m4u_dom) {
307		data->m4u_dom = dom;
308		ret = mtk_iommu_v1_domain_finalise(data);
309		if (ret) {
310			data->m4u_dom = NULL;
311			return ret;
312		}
313	}
314
315	mtk_iommu_v1_config(data, dev, true);
316	return 0;
317}
318
319static int mtk_iommu_v1_identity_attach(struct iommu_domain *identity_domain,
320					struct device *dev)
321{
322	struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
323
324	mtk_iommu_v1_config(data, dev, false);
325	return 0;
326}
327
328static struct iommu_domain_ops mtk_iommu_v1_identity_ops = {
329	.attach_dev = mtk_iommu_v1_identity_attach,
330};
331
332static struct iommu_domain mtk_iommu_v1_identity_domain = {
333	.type = IOMMU_DOMAIN_IDENTITY,
334	.ops = &mtk_iommu_v1_identity_ops,
335};
336
337static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
338			    phys_addr_t paddr, size_t pgsize, size_t pgcount,
339			    int prot, gfp_t gfp, size_t *mapped)
340{
341	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
 
342	unsigned long flags;
343	unsigned int i;
344	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
345	u32 pabase = (u32)paddr;
 
346
347	spin_lock_irqsave(&dom->pgtlock, flags);
348	for (i = 0; i < pgcount; i++) {
349		if (pgt_base_iova[i])
 
350			break;
 
351		pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
352		pabase += MT2701_IOMMU_PAGE_SIZE;
 
353	}
354
355	spin_unlock_irqrestore(&dom->pgtlock, flags);
356
357	*mapped = i * MT2701_IOMMU_PAGE_SIZE;
358	mtk_iommu_v1_tlb_flush_range(dom->data, iova, *mapped);
359
360	return i == pgcount ? 0 : -EEXIST;
361}
362
363static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova,
364				 size_t pgsize, size_t pgcount,
365				 struct iommu_iotlb_gather *gather)
366{
367	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
368	unsigned long flags;
369	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
370	size_t size = pgcount * MT2701_IOMMU_PAGE_SIZE;
371
372	spin_lock_irqsave(&dom->pgtlock, flags);
373	memset(pgt_base_iova, 0, pgcount * sizeof(u32));
374	spin_unlock_irqrestore(&dom->pgtlock, flags);
375
376	mtk_iommu_v1_tlb_flush_range(dom->data, iova, size);
377
378	return size;
379}
380
381static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
 
382{
383	struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
384	unsigned long flags;
385	phys_addr_t pa;
386
387	spin_lock_irqsave(&dom->pgtlock, flags);
388	pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
389	pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1));
390	spin_unlock_irqrestore(&dom->pgtlock, flags);
391
392	return pa;
393}
394
395static const struct iommu_ops mtk_iommu_v1_ops;
396
397/*
398 * MTK generation one iommu HW only support one iommu domain, and all the client
399 * sharing the same iova address space.
400 */
401static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args)
 
402{
403	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
404	struct mtk_iommu_v1_data *data;
405	struct platform_device *m4updev;
406	struct dma_iommu_mapping *mtk_mapping;
407	int ret;
408
409	if (args->args_count != 1) {
410		dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
411			args->args_count);
412		return -EINVAL;
413	}
414
415	if (!fwspec) {
416		ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops);
417		if (ret)
418			return ret;
419		fwspec = dev_iommu_fwspec_get(dev);
420	} else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_v1_ops) {
421		return -EINVAL;
422	}
423
424	if (!dev_iommu_priv_get(dev)) {
425		/* Get the m4u device */
426		m4updev = of_find_device_by_node(args->np);
427		if (WARN_ON(!m4updev))
428			return -EINVAL;
429
430		dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
431	}
432
433	ret = iommu_fwspec_add_ids(dev, args->args, 1);
434	if (ret)
435		return ret;
436
437	data = dev_iommu_priv_get(dev);
438	mtk_mapping = data->mapping;
439	if (!mtk_mapping) {
440		/* MTK iommu support 4GB iova address space. */
441		mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
442						0, 1ULL << 32);
443		if (IS_ERR(mtk_mapping))
444			return PTR_ERR(mtk_mapping);
445
446		data->mapping = mtk_mapping;
447	}
448
449	return 0;
450}
451
452static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
 
 
 
 
 
453{
454	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
455	struct of_phandle_args iommu_spec;
456	struct mtk_iommu_v1_data *data;
457	int err, idx = 0, larbid, larbidx;
458	struct device_link *link;
459	struct device *larbdev;
460
461	/*
462	 * In the deferred case, free the existed fwspec.
463	 * Always initialize the fwspec internally.
464	 */
465	if (fwspec) {
466		iommu_fwspec_free(dev);
467		fwspec = dev_iommu_fwspec_get(dev);
468	}
469
470	while (!of_parse_phandle_with_args(dev->of_node, "iommus",
471					   "#iommu-cells",
472					   idx, &iommu_spec)) {
473
474		err = mtk_iommu_v1_create_mapping(dev, &iommu_spec);
475		of_node_put(iommu_spec.np);
476		if (err)
477			return ERR_PTR(err);
478
479		/* dev->iommu_fwspec might have changed */
480		fwspec = dev_iommu_fwspec_get(dev);
481		idx++;
 
482	}
483
 
 
 
484	data = dev_iommu_priv_get(dev);
485
486	/* Link the consumer device with the smi-larb device(supplier) */
487	larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
488	if (larbid >= MT2701_LARB_NR_MAX)
489		return ERR_PTR(-EINVAL);
490
491	for (idx = 1; idx < fwspec->num_ids; idx++) {
492		larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
493		if (larbid != larbidx) {
494			dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
495				larbid, larbidx);
496			return ERR_PTR(-EINVAL);
497		}
498	}
499
500	larbdev = data->larb_imu[larbid].dev;
501	if (!larbdev)
502		return ERR_PTR(-EINVAL);
503
504	link = device_link_add(dev, larbdev,
505			       DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
506	if (!link)
507		dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
508
509	return &data->iommu;
510}
511
512static void mtk_iommu_v1_probe_finalize(struct device *dev)
513{
514	struct dma_iommu_mapping *mtk_mapping;
515	struct mtk_iommu_v1_data *data;
516	int err;
517
518	data        = dev_iommu_priv_get(dev);
519	mtk_mapping = data->mapping;
520
521	err = arm_iommu_attach_device(dev, mtk_mapping);
522	if (err)
523		dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
524}
525
526static void mtk_iommu_v1_release_device(struct device *dev)
527{
528	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
529	struct mtk_iommu_v1_data *data;
530	struct device *larbdev;
531	unsigned int larbid;
532
533	data = dev_iommu_priv_get(dev);
534	larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
535	larbdev = data->larb_imu[larbid].dev;
536	device_link_remove(dev, larbdev);
537}
538
539static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
540{
541	u32 regval;
542	int ret;
543
544	ret = clk_prepare_enable(data->bclk);
545	if (ret) {
546		dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
547		return ret;
548	}
549
550	regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2);
551	writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
552
553	regval = F_INT_TRANSLATION_FAULT |
554		F_INT_MAIN_MULTI_HIT_FAULT |
555		F_INT_INVALID_PA_FAULT |
556		F_INT_ENTRY_REPLACEMENT_FAULT |
557		F_INT_TABLE_WALK_FAULT |
558		F_INT_TLB_MISS_FAULT |
559		F_INT_PFH_DMA_FIFO_OVERFLOW |
560		F_INT_MISS_DMA_FIFO_OVERFLOW;
561	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
562
563	/* protect memory,hw will write here while translation fault */
564	writel_relaxed(data->protect_base,
565			data->base + REG_MMU_IVRP_PADDR);
566
567	writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
568
569	if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0,
570			     dev_name(data->dev), (void *)data)) {
571		writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
572		clk_disable_unprepare(data->bclk);
573		dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
574		return -ENODEV;
575	}
576
577	return 0;
578}
579
580static const struct iommu_ops mtk_iommu_v1_ops = {
581	.identity_domain = &mtk_iommu_v1_identity_domain,
582	.domain_alloc_paging = mtk_iommu_v1_domain_alloc_paging,
583	.probe_device	= mtk_iommu_v1_probe_device,
584	.probe_finalize = mtk_iommu_v1_probe_finalize,
585	.release_device	= mtk_iommu_v1_release_device,
 
 
 
 
 
 
586	.device_group	= generic_device_group,
587	.pgsize_bitmap	= MT2701_IOMMU_PAGE_SIZE,
588	.owner          = THIS_MODULE,
589	.default_domain_ops = &(const struct iommu_domain_ops) {
590		.attach_dev	= mtk_iommu_v1_attach_device,
591		.map_pages	= mtk_iommu_v1_map,
592		.unmap_pages	= mtk_iommu_v1_unmap,
593		.iova_to_phys	= mtk_iommu_v1_iova_to_phys,
594		.free		= mtk_iommu_v1_domain_free,
595	}
596};
597
598static const struct of_device_id mtk_iommu_v1_of_ids[] = {
599	{ .compatible = "mediatek,mt2701-m4u", },
600	{}
601};
602
603static const struct component_master_ops mtk_iommu_v1_com_ops = {
604	.bind		= mtk_iommu_v1_bind,
605	.unbind		= mtk_iommu_v1_unbind,
606};
607
608static int mtk_iommu_v1_probe(struct platform_device *pdev)
609{
 
610	struct device			*dev = &pdev->dev;
611	struct mtk_iommu_v1_data	*data;
612	struct resource			*res;
613	struct component_match		*match = NULL;
 
 
614	void				*protect;
615	int				larb_nr, ret, i;
616
617	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
618	if (!data)
619		return -ENOMEM;
620
621	data->dev = dev;
622
623	/* Protect memory. HW will access here while translation fault.*/
624	protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
625			GFP_KERNEL | GFP_DMA);
626	if (!protect)
627		return -ENOMEM;
628	data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
629
630	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
631	data->base = devm_ioremap_resource(dev, res);
632	if (IS_ERR(data->base))
633		return PTR_ERR(data->base);
634
635	data->irq = platform_get_irq(pdev, 0);
636	if (data->irq < 0)
637		return data->irq;
638
639	data->bclk = devm_clk_get(dev, "bclk");
640	if (IS_ERR(data->bclk))
641		return PTR_ERR(data->bclk);
642
643	larb_nr = of_count_phandle_with_args(dev->of_node,
644					     "mediatek,larbs", NULL);
645	if (larb_nr < 0)
646		return larb_nr;
647
648	for (i = 0; i < larb_nr; i++) {
649		struct device_node *larbnode;
650		struct platform_device *plarbdev;
 
 
651
652		larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
653		if (!larbnode)
654			return -EINVAL;
655
656		if (!of_device_is_available(larbnode)) {
657			of_node_put(larbnode);
658			continue;
659		}
660
661		plarbdev = of_find_device_by_node(larbnode);
662		if (!plarbdev) {
663			of_node_put(larbnode);
664			return -ENODEV;
 
 
 
 
 
665		}
666		if (!plarbdev->dev.driver) {
667			of_node_put(larbnode);
668			return -EPROBE_DEFER;
669		}
670		data->larb_imu[i].dev = &plarbdev->dev;
671
672		component_match_add_release(dev, &match, component_release_of,
673					    component_compare_of, larbnode);
 
 
674	}
675
676	platform_set_drvdata(pdev, data);
677
678	ret = mtk_iommu_v1_hw_init(data);
679	if (ret)
680		return ret;
681
682	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
683				     dev_name(&pdev->dev));
684	if (ret)
685		goto out_clk_unprepare;
686
687	ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
 
 
688	if (ret)
689		goto out_sysfs_remove;
690
691	ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match);
692	if (ret)
693		goto out_dev_unreg;
694	return ret;
695
696out_dev_unreg:
697	iommu_device_unregister(&data->iommu);
698out_sysfs_remove:
699	iommu_device_sysfs_remove(&data->iommu);
700out_clk_unprepare:
701	clk_disable_unprepare(data->bclk);
702	return ret;
703}
704
705static void mtk_iommu_v1_remove(struct platform_device *pdev)
706{
707	struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
708
709	iommu_device_sysfs_remove(&data->iommu);
710	iommu_device_unregister(&data->iommu);
711
 
 
 
712	clk_disable_unprepare(data->bclk);
713	devm_free_irq(&pdev->dev, data->irq, data);
714	component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
 
715}
716
717static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
718{
719	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
720	struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
721	void __iomem *base = data->base;
722
723	reg->standard_axi_mode = readl_relaxed(base +
724					       REG_MMU_STANDARD_AXI_MODE);
725	reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM);
726	reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
727	reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL);
728	return 0;
729}
730
731static int __maybe_unused mtk_iommu_v1_resume(struct device *dev)
732{
733	struct mtk_iommu_v1_data *data = dev_get_drvdata(dev);
734	struct mtk_iommu_v1_suspend_reg *reg = &data->reg;
735	void __iomem *base = data->base;
736
737	writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
738	writel_relaxed(reg->standard_axi_mode,
739		       base + REG_MMU_STANDARD_AXI_MODE);
740	writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM);
741	writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
742	writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL);
743	writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR);
744	return 0;
745}
746
747static const struct dev_pm_ops mtk_iommu_v1_pm_ops = {
748	SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume)
749};
750
751static struct platform_driver mtk_iommu_v1_driver = {
752	.probe	= mtk_iommu_v1_probe,
753	.remove_new = mtk_iommu_v1_remove,
754	.driver	= {
755		.name = "mtk-iommu-v1",
756		.of_match_table = mtk_iommu_v1_of_ids,
757		.pm = &mtk_iommu_v1_pm_ops,
758	}
759};
760module_platform_driver(mtk_iommu_v1_driver);
761
762MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations");
763MODULE_LICENSE("GPL v2");