Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2016 MediaTek Inc.
4 * Author: Yong Wu <yong.wu@mediatek.com>
5 */
6#include <linux/memblock.h>
7#include <linux/bug.h>
8#include <linux/clk.h>
9#include <linux/component.h>
10#include <linux/device.h>
11#include <linux/dma-iommu.h>
12#include <linux/err.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/iommu.h>
16#include <linux/iopoll.h>
17#include <linux/list.h>
18#include <linux/of_address.h>
19#include <linux/of_iommu.h>
20#include <linux/of_irq.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <asm/barrier.h>
26#include <soc/mediatek/smi.h>
27
28#include "mtk_iommu.h"
29
30#define REG_MMU_PT_BASE_ADDR 0x000
31#define MMU_PT_ADDR_MASK GENMASK(31, 7)
32
33#define REG_MMU_INVALIDATE 0x020
34#define F_ALL_INVLD 0x2
35#define F_MMU_INV_RANGE 0x1
36
37#define REG_MMU_INVLD_START_A 0x024
38#define REG_MMU_INVLD_END_A 0x028
39
40#define REG_MMU_INV_SEL_GEN2 0x02c
41#define REG_MMU_INV_SEL_GEN1 0x038
42#define F_INVLD_EN0 BIT(0)
43#define F_INVLD_EN1 BIT(1)
44
45#define REG_MMU_MISC_CTRL 0x048
46#define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17))
47#define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19))
48
49#define REG_MMU_DCM_DIS 0x050
50#define REG_MMU_WR_LEN_CTRL 0x054
51#define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21))
52
53#define REG_MMU_CTRL_REG 0x110
54#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
55#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
56#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
57
58#define REG_MMU_IVRP_PADDR 0x114
59
60#define REG_MMU_VLD_PA_RNG 0x118
61#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
62
63#define REG_MMU_INT_CONTROL0 0x120
64#define F_L2_MULIT_HIT_EN BIT(0)
65#define F_TABLE_WALK_FAULT_INT_EN BIT(1)
66#define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
67#define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
68#define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
69#define F_MISS_FIFO_ERR_INT_EN BIT(6)
70#define F_INT_CLR_BIT BIT(12)
71
72#define REG_MMU_INT_MAIN_CONTROL 0x124
73 /* mmu0 | mmu1 */
74#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
75#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
76#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
77#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
78#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
79#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
80#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
81
82#define REG_MMU_CPE_DONE 0x12C
83
84#define REG_MMU_FAULT_ST1 0x134
85#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
86#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
87
88#define REG_MMU0_FAULT_VA 0x13c
89#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
90#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
91
92#define REG_MMU0_INVLD_PA 0x140
93#define REG_MMU1_FAULT_VA 0x144
94#define REG_MMU1_INVLD_PA 0x148
95#define REG_MMU0_INT_ID 0x150
96#define REG_MMU1_INT_ID 0x154
97#define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7)
98#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
99#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
100#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
101
102#define MTK_PROTECT_PA_ALIGN 256
103
104/*
105 * Get the local arbiter ID and the portid within the larb arbiter
106 * from mtk_m4u_id which is defined by MTK_M4U_ID.
107 */
108#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
109#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
110
111#define HAS_4GB_MODE BIT(0)
112/* HW will use the EMI clock if there isn't the "bclk". */
113#define HAS_BCLK BIT(1)
114#define HAS_VLD_PA_RNG BIT(2)
115#define RESET_AXI BIT(3)
116#define OUT_ORDER_WR_EN BIT(4)
117#define HAS_SUB_COMM BIT(5)
118#define WR_THROT_EN BIT(6)
119
120#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
121 ((((pdata)->flags) & (_x)) == (_x))
122
123struct mtk_iommu_domain {
124 struct io_pgtable_cfg cfg;
125 struct io_pgtable_ops *iop;
126
127 struct iommu_domain domain;
128};
129
130static const struct iommu_ops mtk_iommu_ops;
131
132/*
133 * In M4U 4GB mode, the physical address is remapped as below:
134 *
135 * CPU Physical address:
136 * ====================
137 *
138 * 0 1G 2G 3G 4G 5G
139 * |---A---|---B---|---C---|---D---|---E---|
140 * +--I/O--+------------Memory-------------+
141 *
142 * IOMMU output physical address:
143 * =============================
144 *
145 * 4G 5G 6G 7G 8G
146 * |---E---|---B---|---C---|---D---|
147 * +------------Memory-------------+
148 *
149 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
150 * bit32 of the CPU physical address always is needed to set, and for Region
151 * 'E', the CPU physical address keep as is.
152 * Additionally, The iommu consumers always use the CPU phyiscal address.
153 */
154#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
155
156static LIST_HEAD(m4ulist); /* List all the M4U HWs */
157
158#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
159
160/*
161 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
162 * for the performance.
163 *
164 * Here always return the mtk_iommu_data of the first probed M4U where the
165 * iommu domain information is recorded.
166 */
167static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
168{
169 struct mtk_iommu_data *data;
170
171 for_each_m4u(data)
172 return data;
173
174 return NULL;
175}
176
177static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
178{
179 return container_of(dom, struct mtk_iommu_domain, domain);
180}
181
182static void mtk_iommu_tlb_flush_all(void *cookie)
183{
184 struct mtk_iommu_data *data = cookie;
185
186 for_each_m4u(data) {
187 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
188 data->base + data->plat_data->inv_sel_reg);
189 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
190 wmb(); /* Make sure the tlb flush all done */
191 }
192}
193
194static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
195 size_t granule, void *cookie)
196{
197 struct mtk_iommu_data *data = cookie;
198 unsigned long flags;
199 int ret;
200 u32 tmp;
201
202 for_each_m4u(data) {
203 spin_lock_irqsave(&data->tlb_lock, flags);
204 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
205 data->base + data->plat_data->inv_sel_reg);
206
207 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
208 writel_relaxed(iova + size - 1,
209 data->base + REG_MMU_INVLD_END_A);
210 writel_relaxed(F_MMU_INV_RANGE,
211 data->base + REG_MMU_INVALIDATE);
212
213 /* tlb sync */
214 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
215 tmp, tmp != 0, 10, 1000);
216 if (ret) {
217 dev_warn(data->dev,
218 "Partial TLB flush timed out, falling back to full flush\n");
219 mtk_iommu_tlb_flush_all(cookie);
220 }
221 /* Clear the CPE status */
222 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
223 spin_unlock_irqrestore(&data->tlb_lock, flags);
224 }
225}
226
227static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
228 unsigned long iova, size_t granule,
229 void *cookie)
230{
231 struct mtk_iommu_data *data = cookie;
232 struct iommu_domain *domain = &data->m4u_dom->domain;
233
234 iommu_iotlb_gather_add_page(domain, gather, iova, granule);
235}
236
237static const struct iommu_flush_ops mtk_iommu_flush_ops = {
238 .tlb_flush_all = mtk_iommu_tlb_flush_all,
239 .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
240 .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
241 .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
242};
243
244static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
245{
246 struct mtk_iommu_data *data = dev_id;
247 struct mtk_iommu_domain *dom = data->m4u_dom;
248 u32 int_state, regval, fault_iova, fault_pa;
249 unsigned int fault_larb, fault_port, sub_comm = 0;
250 bool layer, write;
251
252 /* Read error info from registers */
253 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
254 if (int_state & F_REG_MMU0_FAULT_MASK) {
255 regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
256 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
257 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
258 } else {
259 regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
260 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
261 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
262 }
263 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
264 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
265 fault_port = F_MMU_INT_ID_PORT_ID(regval);
266 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
267 fault_larb = F_MMU_INT_ID_COMM_ID(regval);
268 sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
269 } else {
270 fault_larb = F_MMU_INT_ID_LARB_ID(regval);
271 }
272 fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
273
274 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
275 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
276 dev_err_ratelimited(
277 data->dev,
278 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
279 int_state, fault_iova, fault_pa, fault_larb, fault_port,
280 layer, write ? "write" : "read");
281 }
282
283 /* Interrupt clear */
284 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
285 regval |= F_INT_CLR_BIT;
286 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
287
288 mtk_iommu_tlb_flush_all(data);
289
290 return IRQ_HANDLED;
291}
292
293static void mtk_iommu_config(struct mtk_iommu_data *data,
294 struct device *dev, bool enable)
295{
296 struct mtk_smi_larb_iommu *larb_mmu;
297 unsigned int larbid, portid;
298 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
299 int i;
300
301 for (i = 0; i < fwspec->num_ids; ++i) {
302 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
303 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
304 larb_mmu = &data->larb_imu[larbid];
305
306 dev_dbg(dev, "%s iommu port: %d\n",
307 enable ? "enable" : "disable", portid);
308
309 if (enable)
310 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
311 else
312 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
313 }
314}
315
316static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
317{
318 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
319
320 dom->cfg = (struct io_pgtable_cfg) {
321 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
322 IO_PGTABLE_QUIRK_NO_PERMS |
323 IO_PGTABLE_QUIRK_TLBI_ON_MAP |
324 IO_PGTABLE_QUIRK_ARM_MTK_EXT,
325 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
326 .ias = 32,
327 .oas = 34,
328 .tlb = &mtk_iommu_flush_ops,
329 .iommu_dev = data->dev,
330 };
331
332 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
333 if (!dom->iop) {
334 dev_err(data->dev, "Failed to alloc io pgtable\n");
335 return -EINVAL;
336 }
337
338 /* Update our support page sizes bitmap */
339 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
340 return 0;
341}
342
343static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
344{
345 struct mtk_iommu_domain *dom;
346
347 if (type != IOMMU_DOMAIN_DMA)
348 return NULL;
349
350 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
351 if (!dom)
352 return NULL;
353
354 if (iommu_get_dma_cookie(&dom->domain))
355 goto free_dom;
356
357 if (mtk_iommu_domain_finalise(dom))
358 goto put_dma_cookie;
359
360 dom->domain.geometry.aperture_start = 0;
361 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
362 dom->domain.geometry.force_aperture = true;
363
364 return &dom->domain;
365
366put_dma_cookie:
367 iommu_put_dma_cookie(&dom->domain);
368free_dom:
369 kfree(dom);
370 return NULL;
371}
372
373static void mtk_iommu_domain_free(struct iommu_domain *domain)
374{
375 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
376
377 free_io_pgtable_ops(dom->iop);
378 iommu_put_dma_cookie(domain);
379 kfree(to_mtk_domain(domain));
380}
381
382static int mtk_iommu_attach_device(struct iommu_domain *domain,
383 struct device *dev)
384{
385 struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
386 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
387
388 if (!data)
389 return -ENODEV;
390
391 /* Update the pgtable base address register of the M4U HW */
392 if (!data->m4u_dom) {
393 data->m4u_dom = dom;
394 writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
395 data->base + REG_MMU_PT_BASE_ADDR);
396 }
397
398 mtk_iommu_config(data, dev, true);
399 return 0;
400}
401
402static void mtk_iommu_detach_device(struct iommu_domain *domain,
403 struct device *dev)
404{
405 struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
406
407 if (!data)
408 return;
409
410 mtk_iommu_config(data, dev, false);
411}
412
413static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
414 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
415{
416 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
417 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
418
419 /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
420 if (data->enable_4GB)
421 paddr |= BIT_ULL(32);
422
423 /* Synchronize with the tlb_lock */
424 return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
425}
426
427static size_t mtk_iommu_unmap(struct iommu_domain *domain,
428 unsigned long iova, size_t size,
429 struct iommu_iotlb_gather *gather)
430{
431 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
432
433 return dom->iop->unmap(dom->iop, iova, size, gather);
434}
435
436static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
437{
438 mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
439}
440
441static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
442 struct iommu_iotlb_gather *gather)
443{
444 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
445 size_t length = gather->end - gather->start;
446
447 if (gather->start == ULONG_MAX)
448 return;
449
450 mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
451 data);
452}
453
454static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
455 dma_addr_t iova)
456{
457 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
458 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
459 phys_addr_t pa;
460
461 pa = dom->iop->iova_to_phys(dom->iop, iova);
462 if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
463 pa &= ~BIT_ULL(32);
464
465 return pa;
466}
467
468static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
469{
470 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
471 struct mtk_iommu_data *data;
472
473 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
474 return ERR_PTR(-ENODEV); /* Not a iommu client device */
475
476 data = dev_iommu_priv_get(dev);
477
478 return &data->iommu;
479}
480
481static void mtk_iommu_release_device(struct device *dev)
482{
483 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
484
485 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
486 return;
487
488 iommu_fwspec_free(dev);
489}
490
491static struct iommu_group *mtk_iommu_device_group(struct device *dev)
492{
493 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
494
495 if (!data)
496 return ERR_PTR(-ENODEV);
497
498 /* All the client devices are in the same m4u iommu-group */
499 if (!data->m4u_group) {
500 data->m4u_group = iommu_group_alloc();
501 if (IS_ERR(data->m4u_group))
502 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
503 } else {
504 iommu_group_ref_get(data->m4u_group);
505 }
506 return data->m4u_group;
507}
508
509static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
510{
511 struct platform_device *m4updev;
512
513 if (args->args_count != 1) {
514 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
515 args->args_count);
516 return -EINVAL;
517 }
518
519 if (!dev_iommu_priv_get(dev)) {
520 /* Get the m4u device */
521 m4updev = of_find_device_by_node(args->np);
522 if (WARN_ON(!m4updev))
523 return -EINVAL;
524
525 dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
526 }
527
528 return iommu_fwspec_add_ids(dev, args->args, 1);
529}
530
531static const struct iommu_ops mtk_iommu_ops = {
532 .domain_alloc = mtk_iommu_domain_alloc,
533 .domain_free = mtk_iommu_domain_free,
534 .attach_dev = mtk_iommu_attach_device,
535 .detach_dev = mtk_iommu_detach_device,
536 .map = mtk_iommu_map,
537 .unmap = mtk_iommu_unmap,
538 .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
539 .iotlb_sync = mtk_iommu_iotlb_sync,
540 .iova_to_phys = mtk_iommu_iova_to_phys,
541 .probe_device = mtk_iommu_probe_device,
542 .release_device = mtk_iommu_release_device,
543 .device_group = mtk_iommu_device_group,
544 .of_xlate = mtk_iommu_of_xlate,
545 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
546};
547
548static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
549{
550 u32 regval;
551 int ret;
552
553 ret = clk_prepare_enable(data->bclk);
554 if (ret) {
555 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
556 return ret;
557 }
558
559 if (data->plat_data->m4u_plat == M4U_MT8173) {
560 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
561 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
562 } else {
563 regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
564 regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
565 }
566 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
567
568 regval = F_L2_MULIT_HIT_EN |
569 F_TABLE_WALK_FAULT_INT_EN |
570 F_PREETCH_FIFO_OVERFLOW_INT_EN |
571 F_MISS_FIFO_OVERFLOW_INT_EN |
572 F_PREFETCH_FIFO_ERR_INT_EN |
573 F_MISS_FIFO_ERR_INT_EN;
574 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
575
576 regval = F_INT_TRANSLATION_FAULT |
577 F_INT_MAIN_MULTI_HIT_FAULT |
578 F_INT_INVALID_PA_FAULT |
579 F_INT_ENTRY_REPLACEMENT_FAULT |
580 F_INT_TLB_MISS_FAULT |
581 F_INT_MISS_TRANSACTION_FIFO_FAULT |
582 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
583 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
584
585 if (data->plat_data->m4u_plat == M4U_MT8173)
586 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
587 else
588 regval = lower_32_bits(data->protect_base) |
589 upper_32_bits(data->protect_base);
590 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
591
592 if (data->enable_4GB &&
593 MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
594 /*
595 * If 4GB mode is enabled, the validate PA range is from
596 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
597 */
598 regval = F_MMU_VLD_PA_RNG(7, 4);
599 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
600 }
601 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
602 if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
603 /* write command throttling mode */
604 regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
605 regval &= ~F_MMU_WR_THROT_DIS_MASK;
606 writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
607 }
608
609 if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
610 /* The register is called STANDARD_AXI_MODE in this case */
611 regval = 0;
612 } else {
613 regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
614 regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
615 if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
616 regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
617 }
618 writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
619
620 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
621 dev_name(data->dev), (void *)data)) {
622 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
623 clk_disable_unprepare(data->bclk);
624 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
625 return -ENODEV;
626 }
627
628 return 0;
629}
630
631static const struct component_master_ops mtk_iommu_com_ops = {
632 .bind = mtk_iommu_bind,
633 .unbind = mtk_iommu_unbind,
634};
635
636static int mtk_iommu_probe(struct platform_device *pdev)
637{
638 struct mtk_iommu_data *data;
639 struct device *dev = &pdev->dev;
640 struct resource *res;
641 resource_size_t ioaddr;
642 struct component_match *match = NULL;
643 void *protect;
644 int i, larb_nr, ret;
645
646 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
647 if (!data)
648 return -ENOMEM;
649 data->dev = dev;
650 data->plat_data = of_device_get_match_data(dev);
651
652 /* Protect memory. HW will access here while translation fault.*/
653 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
654 if (!protect)
655 return -ENOMEM;
656 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
657
658 /* Whether the current dram is over 4GB */
659 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
660 if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
661 data->enable_4GB = false;
662
663 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
664 data->base = devm_ioremap_resource(dev, res);
665 if (IS_ERR(data->base))
666 return PTR_ERR(data->base);
667 ioaddr = res->start;
668
669 data->irq = platform_get_irq(pdev, 0);
670 if (data->irq < 0)
671 return data->irq;
672
673 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
674 data->bclk = devm_clk_get(dev, "bclk");
675 if (IS_ERR(data->bclk))
676 return PTR_ERR(data->bclk);
677 }
678
679 larb_nr = of_count_phandle_with_args(dev->of_node,
680 "mediatek,larbs", NULL);
681 if (larb_nr < 0)
682 return larb_nr;
683
684 for (i = 0; i < larb_nr; i++) {
685 struct device_node *larbnode;
686 struct platform_device *plarbdev;
687 u32 id;
688
689 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
690 if (!larbnode)
691 return -EINVAL;
692
693 if (!of_device_is_available(larbnode)) {
694 of_node_put(larbnode);
695 continue;
696 }
697
698 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
699 if (ret)/* The id is consecutive if there is no this property */
700 id = i;
701
702 plarbdev = of_find_device_by_node(larbnode);
703 if (!plarbdev) {
704 of_node_put(larbnode);
705 return -EPROBE_DEFER;
706 }
707 data->larb_imu[id].dev = &plarbdev->dev;
708
709 component_match_add_release(dev, &match, release_of,
710 compare_of, larbnode);
711 }
712
713 platform_set_drvdata(pdev, data);
714
715 ret = mtk_iommu_hw_init(data);
716 if (ret)
717 return ret;
718
719 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
720 "mtk-iommu.%pa", &ioaddr);
721 if (ret)
722 return ret;
723
724 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
725 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
726
727 ret = iommu_device_register(&data->iommu);
728 if (ret)
729 return ret;
730
731 spin_lock_init(&data->tlb_lock);
732 list_add_tail(&data->list, &m4ulist);
733
734 if (!iommu_present(&platform_bus_type))
735 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
736
737 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
738}
739
740static int mtk_iommu_remove(struct platform_device *pdev)
741{
742 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
743
744 iommu_device_sysfs_remove(&data->iommu);
745 iommu_device_unregister(&data->iommu);
746
747 if (iommu_present(&platform_bus_type))
748 bus_set_iommu(&platform_bus_type, NULL);
749
750 clk_disable_unprepare(data->bclk);
751 devm_free_irq(&pdev->dev, data->irq, data);
752 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
753 return 0;
754}
755
756static int __maybe_unused mtk_iommu_suspend(struct device *dev)
757{
758 struct mtk_iommu_data *data = dev_get_drvdata(dev);
759 struct mtk_iommu_suspend_reg *reg = &data->reg;
760 void __iomem *base = data->base;
761
762 reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
763 reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
764 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
765 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
766 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
767 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
768 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
769 reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
770 clk_disable_unprepare(data->bclk);
771 return 0;
772}
773
774static int __maybe_unused mtk_iommu_resume(struct device *dev)
775{
776 struct mtk_iommu_data *data = dev_get_drvdata(dev);
777 struct mtk_iommu_suspend_reg *reg = &data->reg;
778 struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
779 void __iomem *base = data->base;
780 int ret;
781
782 ret = clk_prepare_enable(data->bclk);
783 if (ret) {
784 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
785 return ret;
786 }
787 writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
788 writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
789 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
790 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
791 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
792 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
793 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
794 writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
795 if (m4u_dom)
796 writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
797 base + REG_MMU_PT_BASE_ADDR);
798 return 0;
799}
800
801static const struct dev_pm_ops mtk_iommu_pm_ops = {
802 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
803};
804
805static const struct mtk_iommu_plat_data mt2712_data = {
806 .m4u_plat = M4U_MT2712,
807 .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
808 .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
809 .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
810};
811
812static const struct mtk_iommu_plat_data mt6779_data = {
813 .m4u_plat = M4U_MT6779,
814 .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
815 .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
816 .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
817};
818
819static const struct mtk_iommu_plat_data mt8173_data = {
820 .m4u_plat = M4U_MT8173,
821 .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI,
822 .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
823 .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
824};
825
826static const struct mtk_iommu_plat_data mt8183_data = {
827 .m4u_plat = M4U_MT8183,
828 .flags = RESET_AXI,
829 .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
830 .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
831};
832
833static const struct of_device_id mtk_iommu_of_ids[] = {
834 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
835 { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
836 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
837 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
838 {}
839};
840
841static struct platform_driver mtk_iommu_driver = {
842 .probe = mtk_iommu_probe,
843 .remove = mtk_iommu_remove,
844 .driver = {
845 .name = "mtk-iommu",
846 .of_match_table = of_match_ptr(mtk_iommu_of_ids),
847 .pm = &mtk_iommu_pm_ops,
848 }
849};
850
851static int __init mtk_iommu_init(void)
852{
853 int ret;
854
855 ret = platform_driver_register(&mtk_iommu_driver);
856 if (ret != 0)
857 pr_err("Failed to register MTK IOMMU driver\n");
858
859 return ret;
860}
861
862subsys_initcall(mtk_iommu_init)
1/*
2 * Copyright (c) 2015-2016 MediaTek Inc.
3 * Author: Yong Wu <yong.wu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/bootmem.h>
15#include <linux/bug.h>
16#include <linux/clk.h>
17#include <linux/component.h>
18#include <linux/device.h>
19#include <linux/dma-iommu.h>
20#include <linux/err.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/iommu.h>
24#include <linux/iopoll.h>
25#include <linux/list.h>
26#include <linux/of_address.h>
27#include <linux/of_iommu.h>
28#include <linux/of_irq.h>
29#include <linux/of_platform.h>
30#include <linux/platform_device.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <asm/barrier.h>
34#include <soc/mediatek/smi.h>
35
36#include "mtk_iommu.h"
37
38#define REG_MMU_PT_BASE_ADDR 0x000
39
40#define REG_MMU_INVALIDATE 0x020
41#define F_ALL_INVLD 0x2
42#define F_MMU_INV_RANGE 0x1
43
44#define REG_MMU_INVLD_START_A 0x024
45#define REG_MMU_INVLD_END_A 0x028
46
47#define REG_MMU_INV_SEL 0x038
48#define F_INVLD_EN0 BIT(0)
49#define F_INVLD_EN1 BIT(1)
50
51#define REG_MMU_STANDARD_AXI_MODE 0x048
52#define REG_MMU_DCM_DIS 0x050
53
54#define REG_MMU_CTRL_REG 0x110
55#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
56#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \
57 ((data)->m4u_plat == M4U_MT2712 ? 4 : 5)
58/* It's named by F_MMU_TF_PROT_SEL in mt2712. */
59#define F_MMU_TF_PROTECT_SEL(prot, data) \
60 (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
61
62#define REG_MMU_IVRP_PADDR 0x114
63
64#define REG_MMU_VLD_PA_RNG 0x118
65#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
66
67#define REG_MMU_INT_CONTROL0 0x120
68#define F_L2_MULIT_HIT_EN BIT(0)
69#define F_TABLE_WALK_FAULT_INT_EN BIT(1)
70#define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
71#define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
72#define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
73#define F_MISS_FIFO_ERR_INT_EN BIT(6)
74#define F_INT_CLR_BIT BIT(12)
75
76#define REG_MMU_INT_MAIN_CONTROL 0x124
77#define F_INT_TRANSLATION_FAULT BIT(0)
78#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
79#define F_INT_INVALID_PA_FAULT BIT(2)
80#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
81#define F_INT_TLB_MISS_FAULT BIT(4)
82#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5)
83#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6)
84
85#define REG_MMU_CPE_DONE 0x12C
86
87#define REG_MMU_FAULT_ST1 0x134
88
89#define REG_MMU_FAULT_VA 0x13c
90#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
91#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
92
93#define REG_MMU_INVLD_PA 0x140
94#define REG_MMU_INT_ID 0x150
95#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
96#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
97
98#define MTK_PROTECT_PA_ALIGN 128
99
100/*
101 * Get the local arbiter ID and the portid within the larb arbiter
102 * from mtk_m4u_id which is defined by MTK_M4U_ID.
103 */
104#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
105#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
106
107struct mtk_iommu_domain {
108 spinlock_t pgtlock; /* lock for page table */
109
110 struct io_pgtable_cfg cfg;
111 struct io_pgtable_ops *iop;
112
113 struct iommu_domain domain;
114};
115
116static struct iommu_ops mtk_iommu_ops;
117
118static LIST_HEAD(m4ulist); /* List all the M4U HWs */
119
120#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
121
122/*
123 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
124 * for the performance.
125 *
126 * Here always return the mtk_iommu_data of the first probed M4U where the
127 * iommu domain information is recorded.
128 */
129static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
130{
131 struct mtk_iommu_data *data;
132
133 for_each_m4u(data)
134 return data;
135
136 return NULL;
137}
138
139static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
140{
141 return container_of(dom, struct mtk_iommu_domain, domain);
142}
143
144static void mtk_iommu_tlb_flush_all(void *cookie)
145{
146 struct mtk_iommu_data *data = cookie;
147
148 for_each_m4u(data) {
149 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
150 data->base + REG_MMU_INV_SEL);
151 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
152 wmb(); /* Make sure the tlb flush all done */
153 }
154}
155
156static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
157 size_t granule, bool leaf,
158 void *cookie)
159{
160 struct mtk_iommu_data *data = cookie;
161
162 for_each_m4u(data) {
163 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
164 data->base + REG_MMU_INV_SEL);
165
166 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
167 writel_relaxed(iova + size - 1,
168 data->base + REG_MMU_INVLD_END_A);
169 writel_relaxed(F_MMU_INV_RANGE,
170 data->base + REG_MMU_INVALIDATE);
171 data->tlb_flush_active = true;
172 }
173}
174
175static void mtk_iommu_tlb_sync(void *cookie)
176{
177 struct mtk_iommu_data *data = cookie;
178 int ret;
179 u32 tmp;
180
181 for_each_m4u(data) {
182 /* Avoid timing out if there's nothing to wait for */
183 if (!data->tlb_flush_active)
184 return;
185
186 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
187 tmp, tmp != 0, 10, 100000);
188 if (ret) {
189 dev_warn(data->dev,
190 "Partial TLB flush timed out, falling back to full flush\n");
191 mtk_iommu_tlb_flush_all(cookie);
192 }
193 /* Clear the CPE status */
194 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
195 data->tlb_flush_active = false;
196 }
197}
198
199static const struct iommu_gather_ops mtk_iommu_gather_ops = {
200 .tlb_flush_all = mtk_iommu_tlb_flush_all,
201 .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
202 .tlb_sync = mtk_iommu_tlb_sync,
203};
204
205static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
206{
207 struct mtk_iommu_data *data = dev_id;
208 struct mtk_iommu_domain *dom = data->m4u_dom;
209 u32 int_state, regval, fault_iova, fault_pa;
210 unsigned int fault_larb, fault_port;
211 bool layer, write;
212
213 /* Read error info from registers */
214 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
215 fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
216 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
217 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
218 fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
219 regval = readl_relaxed(data->base + REG_MMU_INT_ID);
220 fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
221 fault_port = F_MMU0_INT_ID_PORT_ID(regval);
222
223 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
224 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
225 dev_err_ratelimited(
226 data->dev,
227 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
228 int_state, fault_iova, fault_pa, fault_larb, fault_port,
229 layer, write ? "write" : "read");
230 }
231
232 /* Interrupt clear */
233 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
234 regval |= F_INT_CLR_BIT;
235 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
236
237 mtk_iommu_tlb_flush_all(data);
238
239 return IRQ_HANDLED;
240}
241
242static void mtk_iommu_config(struct mtk_iommu_data *data,
243 struct device *dev, bool enable)
244{
245 struct mtk_smi_larb_iommu *larb_mmu;
246 unsigned int larbid, portid;
247 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
248 int i;
249
250 for (i = 0; i < fwspec->num_ids; ++i) {
251 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
252 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
253 larb_mmu = &data->smi_imu.larb_imu[larbid];
254
255 dev_dbg(dev, "%s iommu port: %d\n",
256 enable ? "enable" : "disable", portid);
257
258 if (enable)
259 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
260 else
261 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
262 }
263}
264
265static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
266{
267 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
268
269 spin_lock_init(&dom->pgtlock);
270
271 dom->cfg = (struct io_pgtable_cfg) {
272 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
273 IO_PGTABLE_QUIRK_NO_PERMS |
274 IO_PGTABLE_QUIRK_TLBI_ON_MAP,
275 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
276 .ias = 32,
277 .oas = 32,
278 .tlb = &mtk_iommu_gather_ops,
279 .iommu_dev = data->dev,
280 };
281
282 if (data->enable_4GB)
283 dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
284
285 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
286 if (!dom->iop) {
287 dev_err(data->dev, "Failed to alloc io pgtable\n");
288 return -EINVAL;
289 }
290
291 /* Update our support page sizes bitmap */
292 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
293 return 0;
294}
295
296static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
297{
298 struct mtk_iommu_domain *dom;
299
300 if (type != IOMMU_DOMAIN_DMA)
301 return NULL;
302
303 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
304 if (!dom)
305 return NULL;
306
307 if (iommu_get_dma_cookie(&dom->domain))
308 goto free_dom;
309
310 if (mtk_iommu_domain_finalise(dom))
311 goto put_dma_cookie;
312
313 dom->domain.geometry.aperture_start = 0;
314 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
315 dom->domain.geometry.force_aperture = true;
316
317 return &dom->domain;
318
319put_dma_cookie:
320 iommu_put_dma_cookie(&dom->domain);
321free_dom:
322 kfree(dom);
323 return NULL;
324}
325
326static void mtk_iommu_domain_free(struct iommu_domain *domain)
327{
328 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
329
330 free_io_pgtable_ops(dom->iop);
331 iommu_put_dma_cookie(domain);
332 kfree(to_mtk_domain(domain));
333}
334
335static int mtk_iommu_attach_device(struct iommu_domain *domain,
336 struct device *dev)
337{
338 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
339 struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
340
341 if (!data)
342 return -ENODEV;
343
344 /* Update the pgtable base address register of the M4U HW */
345 if (!data->m4u_dom) {
346 data->m4u_dom = dom;
347 writel(dom->cfg.arm_v7s_cfg.ttbr[0],
348 data->base + REG_MMU_PT_BASE_ADDR);
349 }
350
351 mtk_iommu_config(data, dev, true);
352 return 0;
353}
354
355static void mtk_iommu_detach_device(struct iommu_domain *domain,
356 struct device *dev)
357{
358 struct mtk_iommu_data *data = dev->iommu_fwspec->iommu_priv;
359
360 if (!data)
361 return;
362
363 mtk_iommu_config(data, dev, false);
364}
365
366static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
367 phys_addr_t paddr, size_t size, int prot)
368{
369 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
370 unsigned long flags;
371 int ret;
372
373 spin_lock_irqsave(&dom->pgtlock, flags);
374 ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
375 size, prot);
376 spin_unlock_irqrestore(&dom->pgtlock, flags);
377
378 return ret;
379}
380
381static size_t mtk_iommu_unmap(struct iommu_domain *domain,
382 unsigned long iova, size_t size)
383{
384 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
385 unsigned long flags;
386 size_t unmapsz;
387
388 spin_lock_irqsave(&dom->pgtlock, flags);
389 unmapsz = dom->iop->unmap(dom->iop, iova, size);
390 spin_unlock_irqrestore(&dom->pgtlock, flags);
391
392 return unmapsz;
393}
394
395static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
396{
397 mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
398}
399
400static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
401 dma_addr_t iova)
402{
403 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
404 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
405 unsigned long flags;
406 phys_addr_t pa;
407
408 spin_lock_irqsave(&dom->pgtlock, flags);
409 pa = dom->iop->iova_to_phys(dom->iop, iova);
410 spin_unlock_irqrestore(&dom->pgtlock, flags);
411
412 if (data->enable_4GB)
413 pa |= BIT_ULL(32);
414
415 return pa;
416}
417
418static int mtk_iommu_add_device(struct device *dev)
419{
420 struct mtk_iommu_data *data;
421 struct iommu_group *group;
422
423 if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
424 return -ENODEV; /* Not a iommu client device */
425
426 data = dev->iommu_fwspec->iommu_priv;
427 iommu_device_link(&data->iommu, dev);
428
429 group = iommu_group_get_for_dev(dev);
430 if (IS_ERR(group))
431 return PTR_ERR(group);
432
433 iommu_group_put(group);
434 return 0;
435}
436
437static void mtk_iommu_remove_device(struct device *dev)
438{
439 struct mtk_iommu_data *data;
440
441 if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
442 return;
443
444 data = dev->iommu_fwspec->iommu_priv;
445 iommu_device_unlink(&data->iommu, dev);
446
447 iommu_group_remove_device(dev);
448 iommu_fwspec_free(dev);
449}
450
451static struct iommu_group *mtk_iommu_device_group(struct device *dev)
452{
453 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
454
455 if (!data)
456 return ERR_PTR(-ENODEV);
457
458 /* All the client devices are in the same m4u iommu-group */
459 if (!data->m4u_group) {
460 data->m4u_group = iommu_group_alloc();
461 if (IS_ERR(data->m4u_group))
462 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
463 } else {
464 iommu_group_ref_get(data->m4u_group);
465 }
466 return data->m4u_group;
467}
468
469static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
470{
471 struct platform_device *m4updev;
472
473 if (args->args_count != 1) {
474 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
475 args->args_count);
476 return -EINVAL;
477 }
478
479 if (!dev->iommu_fwspec->iommu_priv) {
480 /* Get the m4u device */
481 m4updev = of_find_device_by_node(args->np);
482 if (WARN_ON(!m4updev))
483 return -EINVAL;
484
485 dev->iommu_fwspec->iommu_priv = platform_get_drvdata(m4updev);
486 }
487
488 return iommu_fwspec_add_ids(dev, args->args, 1);
489}
490
491static struct iommu_ops mtk_iommu_ops = {
492 .domain_alloc = mtk_iommu_domain_alloc,
493 .domain_free = mtk_iommu_domain_free,
494 .attach_dev = mtk_iommu_attach_device,
495 .detach_dev = mtk_iommu_detach_device,
496 .map = mtk_iommu_map,
497 .unmap = mtk_iommu_unmap,
498 .map_sg = default_iommu_map_sg,
499 .flush_iotlb_all = mtk_iommu_iotlb_sync,
500 .iotlb_sync = mtk_iommu_iotlb_sync,
501 .iova_to_phys = mtk_iommu_iova_to_phys,
502 .add_device = mtk_iommu_add_device,
503 .remove_device = mtk_iommu_remove_device,
504 .device_group = mtk_iommu_device_group,
505 .of_xlate = mtk_iommu_of_xlate,
506 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
507};
508
509static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
510{
511 u32 regval;
512 int ret;
513
514 ret = clk_prepare_enable(data->bclk);
515 if (ret) {
516 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
517 return ret;
518 }
519
520 regval = F_MMU_TF_PROTECT_SEL(2, data);
521 if (data->m4u_plat == M4U_MT8173)
522 regval |= F_MMU_PREFETCH_RT_REPLACE_MOD;
523 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
524
525 regval = F_L2_MULIT_HIT_EN |
526 F_TABLE_WALK_FAULT_INT_EN |
527 F_PREETCH_FIFO_OVERFLOW_INT_EN |
528 F_MISS_FIFO_OVERFLOW_INT_EN |
529 F_PREFETCH_FIFO_ERR_INT_EN |
530 F_MISS_FIFO_ERR_INT_EN;
531 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
532
533 regval = F_INT_TRANSLATION_FAULT |
534 F_INT_MAIN_MULTI_HIT_FAULT |
535 F_INT_INVALID_PA_FAULT |
536 F_INT_ENTRY_REPLACEMENT_FAULT |
537 F_INT_TLB_MISS_FAULT |
538 F_INT_MISS_TRANSACTION_FIFO_FAULT |
539 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
540 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
541
542 if (data->m4u_plat == M4U_MT8173)
543 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
544 else
545 regval = lower_32_bits(data->protect_base) |
546 upper_32_bits(data->protect_base);
547 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
548
549 if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
550 /*
551 * If 4GB mode is enabled, the validate PA range is from
552 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
553 */
554 regval = F_MMU_VLD_PA_RNG(7, 4);
555 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
556 }
557 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
558
559 /* It's MISC control register whose default value is ok except mt8173.*/
560 if (data->m4u_plat == M4U_MT8173)
561 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
562
563 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
564 dev_name(data->dev), (void *)data)) {
565 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
566 clk_disable_unprepare(data->bclk);
567 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
568 return -ENODEV;
569 }
570
571 return 0;
572}
573
574static const struct component_master_ops mtk_iommu_com_ops = {
575 .bind = mtk_iommu_bind,
576 .unbind = mtk_iommu_unbind,
577};
578
579static int mtk_iommu_probe(struct platform_device *pdev)
580{
581 struct mtk_iommu_data *data;
582 struct device *dev = &pdev->dev;
583 struct resource *res;
584 resource_size_t ioaddr;
585 struct component_match *match = NULL;
586 void *protect;
587 int i, larb_nr, ret;
588
589 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
590 if (!data)
591 return -ENOMEM;
592 data->dev = dev;
593 data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev);
594
595 /* Protect memory. HW will access here while translation fault.*/
596 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
597 if (!protect)
598 return -ENOMEM;
599 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
600
601 /* Whether the current dram is over 4GB */
602 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
603
604 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
605 data->base = devm_ioremap_resource(dev, res);
606 if (IS_ERR(data->base))
607 return PTR_ERR(data->base);
608 ioaddr = res->start;
609
610 data->irq = platform_get_irq(pdev, 0);
611 if (data->irq < 0)
612 return data->irq;
613
614 data->bclk = devm_clk_get(dev, "bclk");
615 if (IS_ERR(data->bclk))
616 return PTR_ERR(data->bclk);
617
618 larb_nr = of_count_phandle_with_args(dev->of_node,
619 "mediatek,larbs", NULL);
620 if (larb_nr < 0)
621 return larb_nr;
622 data->smi_imu.larb_nr = larb_nr;
623
624 for (i = 0; i < larb_nr; i++) {
625 struct device_node *larbnode;
626 struct platform_device *plarbdev;
627 u32 id;
628
629 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
630 if (!larbnode)
631 return -EINVAL;
632
633 if (!of_device_is_available(larbnode))
634 continue;
635
636 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
637 if (ret)/* The id is consecutive if there is no this property */
638 id = i;
639
640 plarbdev = of_find_device_by_node(larbnode);
641 if (!plarbdev)
642 return -EPROBE_DEFER;
643 data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
644
645 component_match_add_release(dev, &match, release_of,
646 compare_of, larbnode);
647 }
648
649 platform_set_drvdata(pdev, data);
650
651 ret = mtk_iommu_hw_init(data);
652 if (ret)
653 return ret;
654
655 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
656 "mtk-iommu.%pa", &ioaddr);
657 if (ret)
658 return ret;
659
660 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
661 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
662
663 ret = iommu_device_register(&data->iommu);
664 if (ret)
665 return ret;
666
667 list_add_tail(&data->list, &m4ulist);
668
669 if (!iommu_present(&platform_bus_type))
670 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
671
672 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
673}
674
675static int mtk_iommu_remove(struct platform_device *pdev)
676{
677 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
678
679 iommu_device_sysfs_remove(&data->iommu);
680 iommu_device_unregister(&data->iommu);
681
682 if (iommu_present(&platform_bus_type))
683 bus_set_iommu(&platform_bus_type, NULL);
684
685 clk_disable_unprepare(data->bclk);
686 devm_free_irq(&pdev->dev, data->irq, data);
687 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
688 return 0;
689}
690
691static int __maybe_unused mtk_iommu_suspend(struct device *dev)
692{
693 struct mtk_iommu_data *data = dev_get_drvdata(dev);
694 struct mtk_iommu_suspend_reg *reg = &data->reg;
695 void __iomem *base = data->base;
696
697 reg->standard_axi_mode = readl_relaxed(base +
698 REG_MMU_STANDARD_AXI_MODE);
699 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
700 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
701 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
702 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
703 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
704 clk_disable_unprepare(data->bclk);
705 return 0;
706}
707
708static int __maybe_unused mtk_iommu_resume(struct device *dev)
709{
710 struct mtk_iommu_data *data = dev_get_drvdata(dev);
711 struct mtk_iommu_suspend_reg *reg = &data->reg;
712 void __iomem *base = data->base;
713 int ret;
714
715 ret = clk_prepare_enable(data->bclk);
716 if (ret) {
717 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
718 return ret;
719 }
720 writel_relaxed(reg->standard_axi_mode,
721 base + REG_MMU_STANDARD_AXI_MODE);
722 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
723 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
724 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
725 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
726 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
727 if (data->m4u_dom)
728 writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
729 base + REG_MMU_PT_BASE_ADDR);
730 return 0;
731}
732
733static const struct dev_pm_ops mtk_iommu_pm_ops = {
734 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
735};
736
737static const struct of_device_id mtk_iommu_of_ids[] = {
738 { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712},
739 { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173},
740 {}
741};
742
743static struct platform_driver mtk_iommu_driver = {
744 .probe = mtk_iommu_probe,
745 .remove = mtk_iommu_remove,
746 .driver = {
747 .name = "mtk-iommu",
748 .of_match_table = of_match_ptr(mtk_iommu_of_ids),
749 .pm = &mtk_iommu_pm_ops,
750 }
751};
752
753static int __init mtk_iommu_init(void)
754{
755 int ret;
756
757 ret = platform_driver_register(&mtk_iommu_driver);
758 if (ret != 0)
759 pr_err("Failed to register MTK IOMMU driver\n");
760
761 return ret;
762}
763
764subsys_initcall(mtk_iommu_init)