Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright (c) 2015-2016 MediaTek Inc.
  3 * Author: Honghui Zhang <honghui.zhang@mediatek.com>
  4 *
  5 * Based on driver/iommu/mtk_iommu.c
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 */
 16#include <linux/bootmem.h>
 17#include <linux/bug.h>
 18#include <linux/clk.h>
 19#include <linux/component.h>
 20#include <linux/device.h>
 21#include <linux/dma-iommu.h>
 22#include <linux/err.h>
 23#include <linux/interrupt.h>
 24#include <linux/io.h>
 25#include <linux/iommu.h>
 26#include <linux/iopoll.h>
 27#include <linux/kmemleak.h>
 28#include <linux/list.h>
 29#include <linux/of_address.h>
 30#include <linux/of_iommu.h>
 31#include <linux/of_irq.h>
 32#include <linux/of_platform.h>
 33#include <linux/platform_device.h>
 34#include <linux/slab.h>
 35#include <linux/spinlock.h>
 36#include <asm/barrier.h>
 37#include <asm/dma-iommu.h>
 38#include <linux/module.h>
 39#include <dt-bindings/memory/mt2701-larb-port.h>
 40#include <soc/mediatek/smi.h>
 41#include "mtk_iommu.h"
 42
 43#define REG_MMU_PT_BASE_ADDR			0x000
 44
 45#define F_ALL_INVLD				0x2
 46#define F_MMU_INV_RANGE				0x1
 47#define F_INVLD_EN0				BIT(0)
 48#define F_INVLD_EN1				BIT(1)
 49
 50#define F_MMU_FAULT_VA_MSK			0xfffff000
 51#define MTK_PROTECT_PA_ALIGN			128
 52
 53#define REG_MMU_CTRL_REG			0x210
 54#define F_MMU_CTRL_COHERENT_EN			BIT(8)
 55#define REG_MMU_IVRP_PADDR			0x214
 56#define REG_MMU_INT_CONTROL			0x220
 57#define F_INT_TRANSLATION_FAULT			BIT(0)
 58#define F_INT_MAIN_MULTI_HIT_FAULT		BIT(1)
 59#define F_INT_INVALID_PA_FAULT			BIT(2)
 60#define F_INT_ENTRY_REPLACEMENT_FAULT		BIT(3)
 61#define F_INT_TABLE_WALK_FAULT			BIT(4)
 62#define F_INT_TLB_MISS_FAULT			BIT(5)
 63#define F_INT_PFH_DMA_FIFO_OVERFLOW		BIT(6)
 64#define F_INT_MISS_DMA_FIFO_OVERFLOW		BIT(7)
 65
 66#define F_MMU_TF_PROTECT_SEL(prot)		(((prot) & 0x3) << 5)
 67#define F_INT_CLR_BIT				BIT(12)
 68
 69#define REG_MMU_FAULT_ST			0x224
 70#define REG_MMU_FAULT_VA			0x228
 71#define REG_MMU_INVLD_PA			0x22C
 72#define REG_MMU_INT_ID				0x388
 73#define REG_MMU_INVALIDATE			0x5c0
 74#define REG_MMU_INVLD_START_A			0x5c4
 75#define REG_MMU_INVLD_END_A			0x5c8
 76
 77#define REG_MMU_INV_SEL				0x5d8
 78#define REG_MMU_STANDARD_AXI_MODE		0x5e8
 79
 80#define REG_MMU_DCM				0x5f0
 81#define F_MMU_DCM_ON				BIT(1)
 82#define REG_MMU_CPE_DONE			0x60c
 83#define F_DESC_VALID				0x2
 84#define F_DESC_NONSEC				BIT(3)
 85#define MT2701_M4U_TF_LARB(TF)			(6 - (((TF) >> 13) & 0x7))
 86#define MT2701_M4U_TF_PORT(TF)			(((TF) >> 8) & 0xF)
 87/* MTK generation one iommu HW only support 4K size mapping */
 88#define MT2701_IOMMU_PAGE_SHIFT			12
 89#define MT2701_IOMMU_PAGE_SIZE			(1UL << MT2701_IOMMU_PAGE_SHIFT)
 90
 91/*
 92 * MTK m4u support 4GB iova address space, and only support 4K page
 93 * mapping. So the pagetable size should be exactly as 4M.
 94 */
 95#define M2701_IOMMU_PGT_SIZE			SZ_4M
 96
 97struct mtk_iommu_domain {
 98	spinlock_t			pgtlock; /* lock for page table */
 99	struct iommu_domain		domain;
100	u32				*pgt_va;
101	dma_addr_t			pgt_pa;
102	struct mtk_iommu_data		*data;
103};
104
105static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
106{
107	return container_of(dom, struct mtk_iommu_domain, domain);
108}
109
110static const int mt2701_m4u_in_larb[] = {
111	LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
112	LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
113};
114
115static inline int mt2701_m4u_to_larb(int id)
116{
117	int i;
118
119	for (i = ARRAY_SIZE(mt2701_m4u_in_larb) - 1; i >= 0; i--)
120		if ((id) >= mt2701_m4u_in_larb[i])
121			return i;
122
123	return 0;
124}
125
126static inline int mt2701_m4u_to_port(int id)
127{
128	int larb = mt2701_m4u_to_larb(id);
129
130	return id - mt2701_m4u_in_larb[larb];
131}
132
133static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
134{
135	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
136			data->base + REG_MMU_INV_SEL);
137	writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
138	wmb(); /* Make sure the tlb flush all done */
139}
140
141static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data,
142				unsigned long iova, size_t size)
143{
144	int ret;
145	u32 tmp;
146
147	writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
148		data->base + REG_MMU_INV_SEL);
149	writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
150		data->base + REG_MMU_INVLD_START_A);
151	writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
152		data->base + REG_MMU_INVLD_END_A);
153	writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
154
155	ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
156				tmp, tmp != 0, 10, 100000);
157	if (ret) {
158		dev_warn(data->dev,
159			 "Partial TLB flush timed out, falling back to full flush\n");
160		mtk_iommu_tlb_flush_all(data);
161	}
162	/* Clear the CPE status */
163	writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
164}
165
166static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
167{
168	struct mtk_iommu_data *data = dev_id;
169	struct mtk_iommu_domain *dom = data->m4u_dom;
170	u32 int_state, regval, fault_iova, fault_pa;
171	unsigned int fault_larb, fault_port;
172
173	/* Read error information from registers */
174	int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST);
175	fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
176
177	fault_iova &= F_MMU_FAULT_VA_MSK;
178	fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
179	regval = readl_relaxed(data->base + REG_MMU_INT_ID);
180	fault_larb = MT2701_M4U_TF_LARB(regval);
181	fault_port = MT2701_M4U_TF_PORT(regval);
182
183	/*
184	 * MTK v1 iommu HW could not determine whether the fault is read or
185	 * write fault, report as read fault.
186	 */
187	if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
188			IOMMU_FAULT_READ))
189		dev_err_ratelimited(data->dev,
190			"fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d\n",
191			int_state, fault_iova, fault_pa,
192			fault_larb, fault_port);
193
194	/* Interrupt clear */
195	regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL);
196	regval |= F_INT_CLR_BIT;
197	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
198
199	mtk_iommu_tlb_flush_all(data);
200
201	return IRQ_HANDLED;
202}
203
204static void mtk_iommu_config(struct mtk_iommu_data *data,
205			     struct device *dev, bool enable)
206{
207	struct mtk_smi_larb_iommu    *larb_mmu;
208	unsigned int                 larbid, portid;
209	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
210	int i;
211
212	for (i = 0; i < fwspec->num_ids; ++i) {
213		larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
214		portid = mt2701_m4u_to_port(fwspec->ids[i]);
215		larb_mmu = &data->smi_imu.larb_imu[larbid];
216
217		dev_dbg(dev, "%s iommu port: %d\n",
218			enable ? "enable" : "disable", portid);
219
220		if (enable)
221			larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
222		else
223			larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
224	}
225}
226
227static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
228{
229	struct mtk_iommu_domain *dom = data->m4u_dom;
230
231	spin_lock_init(&dom->pgtlock);
232
233	dom->pgt_va = dma_zalloc_coherent(data->dev,
234				M2701_IOMMU_PGT_SIZE,
235				&dom->pgt_pa, GFP_KERNEL);
236	if (!dom->pgt_va)
237		return -ENOMEM;
238
239	writel(dom->pgt_pa, data->base + REG_MMU_PT_BASE_ADDR);
240
241	dom->data = data;
242
243	return 0;
244}
245
246static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
247{
248	struct mtk_iommu_domain *dom;
249
250	if (type != IOMMU_DOMAIN_UNMANAGED)
251		return NULL;
252
253	dom = kzalloc(sizeof(*dom), GFP_KERNEL);
254	if (!dom)
255		return NULL;
256
257	return &dom->domain;
258}
259
260static void mtk_iommu_domain_free(struct iommu_domain *domain)
261{
262	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
263	struct mtk_iommu_data *data = dom->data;
264
265	dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
266			dom->pgt_va, dom->pgt_pa);
267	kfree(to_mtk_domain(domain));
268}
269
270static int mtk_iommu_attach_device(struct iommu_domain *domain,
271				   struct device *dev)
272{
273	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
274	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
275	int ret;
276
277	if (!data)
278		return -ENODEV;
279
280	if (!data->m4u_dom) {
281		data->m4u_dom = dom;
282		ret = mtk_iommu_domain_finalise(data);
283		if (ret) {
284			data->m4u_dom = NULL;
285			return ret;
286		}
287	}
288
289	mtk_iommu_config(data, dev, true);
290	return 0;
291}
292
293static void mtk_iommu_detach_device(struct iommu_domain *domain,
294				    struct device *dev)
295{
296	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
297
298	if (!data)
299		return;
300
301	mtk_iommu_config(data, dev, false);
302}
303
304static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
305			 phys_addr_t paddr, size_t size, int prot)
306{
307	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
308	unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
309	unsigned long flags;
310	unsigned int i;
311	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
312	u32 pabase = (u32)paddr;
313	int map_size = 0;
314
315	spin_lock_irqsave(&dom->pgtlock, flags);
316	for (i = 0; i < page_num; i++) {
317		if (pgt_base_iova[i]) {
318			memset(pgt_base_iova, 0, i * sizeof(u32));
319			break;
320		}
321		pgt_base_iova[i] = pabase | F_DESC_VALID | F_DESC_NONSEC;
322		pabase += MT2701_IOMMU_PAGE_SIZE;
323		map_size += MT2701_IOMMU_PAGE_SIZE;
324	}
325
326	spin_unlock_irqrestore(&dom->pgtlock, flags);
327
328	mtk_iommu_tlb_flush_range(dom->data, iova, size);
329
330	return map_size == size ? 0 : -EEXIST;
331}
332
333static size_t mtk_iommu_unmap(struct iommu_domain *domain,
334			      unsigned long iova, size_t size)
335{
336	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
337	unsigned long flags;
338	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
339	unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
340
341	spin_lock_irqsave(&dom->pgtlock, flags);
342	memset(pgt_base_iova, 0, page_num * sizeof(u32));
343	spin_unlock_irqrestore(&dom->pgtlock, flags);
344
345	mtk_iommu_tlb_flush_range(dom->data, iova, size);
346
347	return size;
348}
349
350static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
351					  dma_addr_t iova)
352{
353	struct mtk_iommu_domain *dom = to_mtk_domain(domain);
354	unsigned long flags;
355	phys_addr_t pa;
356
357	spin_lock_irqsave(&dom->pgtlock, flags);
358	pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
359	pa = pa & (~(MT2701_IOMMU_PAGE_SIZE - 1));
360	spin_unlock_irqrestore(&dom->pgtlock, flags);
361
362	return pa;
363}
364
365static struct iommu_ops mtk_iommu_ops;
366
367/*
368 * MTK generation one iommu HW only support one iommu domain, and all the client
369 * sharing the same iova address space.
370 */
371static int mtk_iommu_create_mapping(struct device *dev,
372				    struct of_phandle_args *args)
373{
374	struct mtk_iommu_data *data;
375	struct platform_device *m4updev;
376	struct dma_iommu_mapping *mtk_mapping;
377	struct device *m4udev;
378	int ret;
379
380	if (args->args_count != 1) {
381		dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
382			args->args_count);
383		return -EINVAL;
384	}
385
386	if (!dev->iommu_fwspec) {
387		ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops);
388		if (ret)
389			return ret;
390	} else if (dev->iommu_fwspec->ops != &mtk_iommu_ops) {
391		return -EINVAL;
392	}
393
394	if (!dev->iommu_fwspec->iommu_priv) {
395		/* Get the m4u device */
396		m4updev = of_find_device_by_node(args->np);
397		if (WARN_ON(!m4updev))
398			return -EINVAL;
399
400		dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
401	}
402
403	ret = iommu_fwspec_add_ids(dev, args->args, 1);
404	if (ret)
405		return ret;
406
407	data = dev->iommu_fwspec->iommu_priv;
408	m4udev = data->dev;
409	mtk_mapping = m4udev->archdata.iommu;
410	if (!mtk_mapping) {
411		/* MTK iommu support 4GB iova address space. */
412		mtk_mapping = arm_iommu_create_mapping(&platform_bus_type,
413						0, 1ULL << 32);
414		if (IS_ERR(mtk_mapping))
415			return PTR_ERR(mtk_mapping);
416
417		m4udev->archdata.iommu = mtk_mapping;
418	}
419
420	ret = arm_iommu_attach_device(dev, mtk_mapping);
421	if (ret)
422		goto err_release_mapping;
423
424	return 0;
425
426err_release_mapping:
427	arm_iommu_release_mapping(mtk_mapping);
428	m4udev->archdata.iommu = NULL;
429	return ret;
430}
431
432static int mtk_iommu_add_device(struct device *dev)
433{
434	struct iommu_group *group;
435	struct of_phandle_args iommu_spec;
436	struct of_phandle_iterator it;
437	int err;
438
439	of_for_each_phandle(&it, err, dev->of_node, "iommus",
440			"#iommu-cells", 0) {
441		int count = of_phandle_iterator_args(&it, iommu_spec.args,
442					MAX_PHANDLE_ARGS);
443		iommu_spec.np = of_node_get(it.node);
444		iommu_spec.args_count = count;
445
446		mtk_iommu_create_mapping(dev, &iommu_spec);
447		of_node_put(iommu_spec.np);
448	}
449
450	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
451		return -ENODEV; /* Not a iommu client device */
452
453	group = iommu_group_get_for_dev(dev);
454	if (IS_ERR(group))
455		return PTR_ERR(group);
456
457	iommu_group_put(group);
458	return 0;
459}
460
461static void mtk_iommu_remove_device(struct device *dev)
462{
463	if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
464		return;
465
466	iommu_group_remove_device(dev);
467	iommu_fwspec_free(dev);
468}
469
470static struct iommu_group *mtk_iommu_device_group(struct device *dev)
471{
472	struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
473
474	if (!data)
475		return ERR_PTR(-ENODEV);
476
477	/* All the client devices are in the same m4u iommu-group */
478	if (!data->m4u_group) {
479		data->m4u_group = iommu_group_alloc();
480		if (IS_ERR(data->m4u_group))
481			dev_err(dev, "Failed to allocate M4U IOMMU group\n");
482	} else {
483		iommu_group_ref_get(data->m4u_group);
484	}
485	return data->m4u_group;
486}
487
488static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
489{
490	u32 regval;
491	int ret;
492
493	ret = clk_prepare_enable(data->bclk);
494	if (ret) {
495		dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
496		return ret;
497	}
498
499	regval = F_MMU_CTRL_COHERENT_EN | F_MMU_TF_PROTECT_SEL(2);
500	writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
501
502	regval = F_INT_TRANSLATION_FAULT |
503		F_INT_MAIN_MULTI_HIT_FAULT |
504		F_INT_INVALID_PA_FAULT |
505		F_INT_ENTRY_REPLACEMENT_FAULT |
506		F_INT_TABLE_WALK_FAULT |
507		F_INT_TLB_MISS_FAULT |
508		F_INT_PFH_DMA_FIFO_OVERFLOW |
509		F_INT_MISS_DMA_FIFO_OVERFLOW;
510	writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL);
511
512	/* protect memory,hw will write here while translation fault */
513	writel_relaxed(data->protect_base,
514			data->base + REG_MMU_IVRP_PADDR);
515
516	writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM);
517
518	if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
519			     dev_name(data->dev), (void *)data)) {
520		writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
521		clk_disable_unprepare(data->bclk);
522		dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
523		return -ENODEV;
524	}
525
526	return 0;
527}
528
529static struct iommu_ops mtk_iommu_ops = {
530	.domain_alloc	= mtk_iommu_domain_alloc,
531	.domain_free	= mtk_iommu_domain_free,
532	.attach_dev	= mtk_iommu_attach_device,
533	.detach_dev	= mtk_iommu_detach_device,
534	.map		= mtk_iommu_map,
535	.unmap		= mtk_iommu_unmap,
536	.map_sg		= default_iommu_map_sg,
537	.iova_to_phys	= mtk_iommu_iova_to_phys,
538	.add_device	= mtk_iommu_add_device,
539	.remove_device	= mtk_iommu_remove_device,
540	.device_group	= mtk_iommu_device_group,
541	.pgsize_bitmap	= ~0UL << MT2701_IOMMU_PAGE_SHIFT,
542};
543
544static const struct of_device_id mtk_iommu_of_ids[] = {
545	{ .compatible = "mediatek,mt2701-m4u", },
546	{}
547};
548
549static const struct component_master_ops mtk_iommu_com_ops = {
550	.bind		= mtk_iommu_bind,
551	.unbind		= mtk_iommu_unbind,
552};
553
554static int mtk_iommu_probe(struct platform_device *pdev)
555{
556	struct mtk_iommu_data		*data;
557	struct device			*dev = &pdev->dev;
558	struct resource			*res;
559	struct component_match		*match = NULL;
560	struct of_phandle_args		larb_spec;
561	struct of_phandle_iterator	it;
562	void				*protect;
563	int				larb_nr, ret, err;
564
565	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
566	if (!data)
567		return -ENOMEM;
568
569	data->dev = dev;
570
571	/* Protect memory. HW will access here while translation fault.*/
572	protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
573			GFP_KERNEL | GFP_DMA);
574	if (!protect)
575		return -ENOMEM;
576	data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
577
578	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
579	data->base = devm_ioremap_resource(dev, res);
580	if (IS_ERR(data->base))
581		return PTR_ERR(data->base);
582
583	data->irq = platform_get_irq(pdev, 0);
584	if (data->irq < 0)
585		return data->irq;
586
587	data->bclk = devm_clk_get(dev, "bclk");
588	if (IS_ERR(data->bclk))
589		return PTR_ERR(data->bclk);
590
591	larb_nr = 0;
592	of_for_each_phandle(&it, err, dev->of_node,
593			"mediatek,larbs", NULL, 0) {
594		struct platform_device *plarbdev;
595		int count = of_phandle_iterator_args(&it, larb_spec.args,
596					MAX_PHANDLE_ARGS);
597
598		if (count)
599			continue;
600
601		larb_spec.np = of_node_get(it.node);
602		if (!of_device_is_available(larb_spec.np))
603			continue;
604
605		plarbdev = of_find_device_by_node(larb_spec.np);
606		if (!plarbdev) {
607			plarbdev = of_platform_device_create(
608						larb_spec.np, NULL,
609						platform_bus_type.dev_root);
610			if (!plarbdev) {
611				of_node_put(larb_spec.np);
612				return -EPROBE_DEFER;
613			}
614		}
615
616		data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev;
617		component_match_add_release(dev, &match, release_of,
618					    compare_of, larb_spec.np);
619		larb_nr++;
620	}
621
622	data->smi_imu.larb_nr = larb_nr;
623
624	platform_set_drvdata(pdev, data);
625
626	ret = mtk_iommu_hw_init(data);
627	if (ret)
628		return ret;
629
630	if (!iommu_present(&platform_bus_type))
631		bus_set_iommu(&platform_bus_type,  &mtk_iommu_ops);
632
633	return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
634}
635
636static int mtk_iommu_remove(struct platform_device *pdev)
637{
638	struct mtk_iommu_data *data = platform_get_drvdata(pdev);
639
640	if (iommu_present(&platform_bus_type))
641		bus_set_iommu(&platform_bus_type, NULL);
642
643	clk_disable_unprepare(data->bclk);
644	devm_free_irq(&pdev->dev, data->irq, data);
645	component_master_del(&pdev->dev, &mtk_iommu_com_ops);
646	return 0;
647}
648
649static int __maybe_unused mtk_iommu_suspend(struct device *dev)
650{
651	struct mtk_iommu_data *data = dev_get_drvdata(dev);
652	struct mtk_iommu_suspend_reg *reg = &data->reg;
653	void __iomem *base = data->base;
654
655	reg->standard_axi_mode = readl_relaxed(base +
656					       REG_MMU_STANDARD_AXI_MODE);
657	reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM);
658	reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
659	reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL);
660	return 0;
661}
662
663static int __maybe_unused mtk_iommu_resume(struct device *dev)
664{
665	struct mtk_iommu_data *data = dev_get_drvdata(dev);
666	struct mtk_iommu_suspend_reg *reg = &data->reg;
667	void __iomem *base = data->base;
668
669	writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR);
670	writel_relaxed(reg->standard_axi_mode,
671		       base + REG_MMU_STANDARD_AXI_MODE);
672	writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM);
673	writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
674	writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL);
675	writel_relaxed(data->protect_base, base + REG_MMU_IVRP_PADDR);
676	return 0;
677}
678
679static const struct dev_pm_ops mtk_iommu_pm_ops = {
680	SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
681};
682
683static struct platform_driver mtk_iommu_driver = {
684	.probe	= mtk_iommu_probe,
685	.remove	= mtk_iommu_remove,
686	.driver	= {
687		.name = "mtk-iommu",
688		.of_match_table = mtk_iommu_of_ids,
689		.pm = &mtk_iommu_pm_ops,
690	}
691};
692
693static int __init m4u_init(void)
694{
695	return platform_driver_register(&mtk_iommu_driver);
696}
697
698static void __exit m4u_exit(void)
699{
700	return platform_driver_unregister(&mtk_iommu_driver);
701}
702
703subsys_initcall(m4u_init);
704module_exit(m4u_exit);
705
706MODULE_DESCRIPTION("IOMMU API for MTK architected m4u v1 implementations");
707MODULE_AUTHOR("Honghui Zhang <honghui.zhang@mediatek.com>");
708MODULE_LICENSE("GPL v2");