Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2016 MediaTek Inc.
4 * Author: Yong Wu <yong.wu@mediatek.com>
5 */
6#include <linux/memblock.h>
7#include <linux/bug.h>
8#include <linux/clk.h>
9#include <linux/component.h>
10#include <linux/device.h>
11#include <linux/dma-iommu.h>
12#include <linux/err.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/iommu.h>
16#include <linux/iopoll.h>
17#include <linux/list.h>
18#include <linux/of_address.h>
19#include <linux/of_iommu.h>
20#include <linux/of_irq.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <asm/barrier.h>
26#include <soc/mediatek/smi.h>
27
28#include "mtk_iommu.h"
29
30#define REG_MMU_PT_BASE_ADDR 0x000
31#define MMU_PT_ADDR_MASK GENMASK(31, 7)
32
33#define REG_MMU_INVALIDATE 0x020
34#define F_ALL_INVLD 0x2
35#define F_MMU_INV_RANGE 0x1
36
37#define REG_MMU_INVLD_START_A 0x024
38#define REG_MMU_INVLD_END_A 0x028
39
40#define REG_MMU_INV_SEL_GEN2 0x02c
41#define REG_MMU_INV_SEL_GEN1 0x038
42#define F_INVLD_EN0 BIT(0)
43#define F_INVLD_EN1 BIT(1)
44
45#define REG_MMU_MISC_CTRL 0x048
46#define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17))
47#define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19))
48
49#define REG_MMU_DCM_DIS 0x050
50#define REG_MMU_WR_LEN_CTRL 0x054
51#define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21))
52
53#define REG_MMU_CTRL_REG 0x110
54#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
55#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
56#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
57
58#define REG_MMU_IVRP_PADDR 0x114
59
60#define REG_MMU_VLD_PA_RNG 0x118
61#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
62
63#define REG_MMU_INT_CONTROL0 0x120
64#define F_L2_MULIT_HIT_EN BIT(0)
65#define F_TABLE_WALK_FAULT_INT_EN BIT(1)
66#define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
67#define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
68#define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
69#define F_MISS_FIFO_ERR_INT_EN BIT(6)
70#define F_INT_CLR_BIT BIT(12)
71
72#define REG_MMU_INT_MAIN_CONTROL 0x124
73 /* mmu0 | mmu1 */
74#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
75#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
76#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
77#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
78#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
79#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
80#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
81
82#define REG_MMU_CPE_DONE 0x12C
83
84#define REG_MMU_FAULT_ST1 0x134
85#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
86#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
87
88#define REG_MMU0_FAULT_VA 0x13c
89#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
90#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
91
92#define REG_MMU0_INVLD_PA 0x140
93#define REG_MMU1_FAULT_VA 0x144
94#define REG_MMU1_INVLD_PA 0x148
95#define REG_MMU0_INT_ID 0x150
96#define REG_MMU1_INT_ID 0x154
97#define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7)
98#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3)
99#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
100#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
101
102#define MTK_PROTECT_PA_ALIGN 256
103
104/*
105 * Get the local arbiter ID and the portid within the larb arbiter
106 * from mtk_m4u_id which is defined by MTK_M4U_ID.
107 */
108#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
109#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
110
111#define HAS_4GB_MODE BIT(0)
112/* HW will use the EMI clock if there isn't the "bclk". */
113#define HAS_BCLK BIT(1)
114#define HAS_VLD_PA_RNG BIT(2)
115#define RESET_AXI BIT(3)
116#define OUT_ORDER_WR_EN BIT(4)
117#define HAS_SUB_COMM BIT(5)
118#define WR_THROT_EN BIT(6)
119
120#define MTK_IOMMU_HAS_FLAG(pdata, _x) \
121 ((((pdata)->flags) & (_x)) == (_x))
122
123struct mtk_iommu_domain {
124 struct io_pgtable_cfg cfg;
125 struct io_pgtable_ops *iop;
126
127 struct iommu_domain domain;
128};
129
130static const struct iommu_ops mtk_iommu_ops;
131
132/*
133 * In M4U 4GB mode, the physical address is remapped as below:
134 *
135 * CPU Physical address:
136 * ====================
137 *
138 * 0 1G 2G 3G 4G 5G
139 * |---A---|---B---|---C---|---D---|---E---|
140 * +--I/O--+------------Memory-------------+
141 *
142 * IOMMU output physical address:
143 * =============================
144 *
145 * 4G 5G 6G 7G 8G
146 * |---E---|---B---|---C---|---D---|
147 * +------------Memory-------------+
148 *
149 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
150 * bit32 of the CPU physical address always is needed to set, and for Region
151 * 'E', the CPU physical address keep as is.
152 * Additionally, The iommu consumers always use the CPU phyiscal address.
153 */
154#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
155
156static LIST_HEAD(m4ulist); /* List all the M4U HWs */
157
158#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
159
160/*
161 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
162 * for the performance.
163 *
164 * Here always return the mtk_iommu_data of the first probed M4U where the
165 * iommu domain information is recorded.
166 */
167static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
168{
169 struct mtk_iommu_data *data;
170
171 for_each_m4u(data)
172 return data;
173
174 return NULL;
175}
176
177static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
178{
179 return container_of(dom, struct mtk_iommu_domain, domain);
180}
181
182static void mtk_iommu_tlb_flush_all(void *cookie)
183{
184 struct mtk_iommu_data *data = cookie;
185
186 for_each_m4u(data) {
187 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
188 data->base + data->plat_data->inv_sel_reg);
189 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
190 wmb(); /* Make sure the tlb flush all done */
191 }
192}
193
194static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
195 size_t granule, void *cookie)
196{
197 struct mtk_iommu_data *data = cookie;
198 unsigned long flags;
199 int ret;
200 u32 tmp;
201
202 for_each_m4u(data) {
203 spin_lock_irqsave(&data->tlb_lock, flags);
204 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
205 data->base + data->plat_data->inv_sel_reg);
206
207 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
208 writel_relaxed(iova + size - 1,
209 data->base + REG_MMU_INVLD_END_A);
210 writel_relaxed(F_MMU_INV_RANGE,
211 data->base + REG_MMU_INVALIDATE);
212
213 /* tlb sync */
214 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
215 tmp, tmp != 0, 10, 1000);
216 if (ret) {
217 dev_warn(data->dev,
218 "Partial TLB flush timed out, falling back to full flush\n");
219 mtk_iommu_tlb_flush_all(cookie);
220 }
221 /* Clear the CPE status */
222 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
223 spin_unlock_irqrestore(&data->tlb_lock, flags);
224 }
225}
226
227static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
228 unsigned long iova, size_t granule,
229 void *cookie)
230{
231 struct mtk_iommu_data *data = cookie;
232 struct iommu_domain *domain = &data->m4u_dom->domain;
233
234 iommu_iotlb_gather_add_page(domain, gather, iova, granule);
235}
236
237static const struct iommu_flush_ops mtk_iommu_flush_ops = {
238 .tlb_flush_all = mtk_iommu_tlb_flush_all,
239 .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync,
240 .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync,
241 .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
242};
243
244static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
245{
246 struct mtk_iommu_data *data = dev_id;
247 struct mtk_iommu_domain *dom = data->m4u_dom;
248 u32 int_state, regval, fault_iova, fault_pa;
249 unsigned int fault_larb, fault_port, sub_comm = 0;
250 bool layer, write;
251
252 /* Read error info from registers */
253 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
254 if (int_state & F_REG_MMU0_FAULT_MASK) {
255 regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
256 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
257 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
258 } else {
259 regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
260 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
261 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
262 }
263 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
264 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
265 fault_port = F_MMU_INT_ID_PORT_ID(regval);
266 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
267 fault_larb = F_MMU_INT_ID_COMM_ID(regval);
268 sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval);
269 } else {
270 fault_larb = F_MMU_INT_ID_LARB_ID(regval);
271 }
272 fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
273
274 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
275 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
276 dev_err_ratelimited(
277 data->dev,
278 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
279 int_state, fault_iova, fault_pa, fault_larb, fault_port,
280 layer, write ? "write" : "read");
281 }
282
283 /* Interrupt clear */
284 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
285 regval |= F_INT_CLR_BIT;
286 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
287
288 mtk_iommu_tlb_flush_all(data);
289
290 return IRQ_HANDLED;
291}
292
293static void mtk_iommu_config(struct mtk_iommu_data *data,
294 struct device *dev, bool enable)
295{
296 struct mtk_smi_larb_iommu *larb_mmu;
297 unsigned int larbid, portid;
298 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
299 int i;
300
301 for (i = 0; i < fwspec->num_ids; ++i) {
302 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
303 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
304 larb_mmu = &data->larb_imu[larbid];
305
306 dev_dbg(dev, "%s iommu port: %d\n",
307 enable ? "enable" : "disable", portid);
308
309 if (enable)
310 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
311 else
312 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
313 }
314}
315
316static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
317{
318 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
319
320 dom->cfg = (struct io_pgtable_cfg) {
321 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
322 IO_PGTABLE_QUIRK_NO_PERMS |
323 IO_PGTABLE_QUIRK_TLBI_ON_MAP |
324 IO_PGTABLE_QUIRK_ARM_MTK_EXT,
325 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
326 .ias = 32,
327 .oas = 34,
328 .tlb = &mtk_iommu_flush_ops,
329 .iommu_dev = data->dev,
330 };
331
332 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
333 if (!dom->iop) {
334 dev_err(data->dev, "Failed to alloc io pgtable\n");
335 return -EINVAL;
336 }
337
338 /* Update our support page sizes bitmap */
339 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
340 return 0;
341}
342
343static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
344{
345 struct mtk_iommu_domain *dom;
346
347 if (type != IOMMU_DOMAIN_DMA)
348 return NULL;
349
350 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
351 if (!dom)
352 return NULL;
353
354 if (iommu_get_dma_cookie(&dom->domain))
355 goto free_dom;
356
357 if (mtk_iommu_domain_finalise(dom))
358 goto put_dma_cookie;
359
360 dom->domain.geometry.aperture_start = 0;
361 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
362 dom->domain.geometry.force_aperture = true;
363
364 return &dom->domain;
365
366put_dma_cookie:
367 iommu_put_dma_cookie(&dom->domain);
368free_dom:
369 kfree(dom);
370 return NULL;
371}
372
373static void mtk_iommu_domain_free(struct iommu_domain *domain)
374{
375 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
376
377 free_io_pgtable_ops(dom->iop);
378 iommu_put_dma_cookie(domain);
379 kfree(to_mtk_domain(domain));
380}
381
382static int mtk_iommu_attach_device(struct iommu_domain *domain,
383 struct device *dev)
384{
385 struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
386 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
387
388 if (!data)
389 return -ENODEV;
390
391 /* Update the pgtable base address register of the M4U HW */
392 if (!data->m4u_dom) {
393 data->m4u_dom = dom;
394 writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
395 data->base + REG_MMU_PT_BASE_ADDR);
396 }
397
398 mtk_iommu_config(data, dev, true);
399 return 0;
400}
401
402static void mtk_iommu_detach_device(struct iommu_domain *domain,
403 struct device *dev)
404{
405 struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
406
407 if (!data)
408 return;
409
410 mtk_iommu_config(data, dev, false);
411}
412
413static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
414 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
415{
416 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
417 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
418
419 /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
420 if (data->enable_4GB)
421 paddr |= BIT_ULL(32);
422
423 /* Synchronize with the tlb_lock */
424 return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
425}
426
427static size_t mtk_iommu_unmap(struct iommu_domain *domain,
428 unsigned long iova, size_t size,
429 struct iommu_iotlb_gather *gather)
430{
431 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
432
433 return dom->iop->unmap(dom->iop, iova, size, gather);
434}
435
436static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
437{
438 mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
439}
440
441static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
442 struct iommu_iotlb_gather *gather)
443{
444 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
445 size_t length = gather->end - gather->start;
446
447 if (gather->start == ULONG_MAX)
448 return;
449
450 mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize,
451 data);
452}
453
454static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
455 dma_addr_t iova)
456{
457 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
458 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
459 phys_addr_t pa;
460
461 pa = dom->iop->iova_to_phys(dom->iop, iova);
462 if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
463 pa &= ~BIT_ULL(32);
464
465 return pa;
466}
467
468static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
469{
470 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
471 struct mtk_iommu_data *data;
472
473 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
474 return ERR_PTR(-ENODEV); /* Not a iommu client device */
475
476 data = dev_iommu_priv_get(dev);
477
478 return &data->iommu;
479}
480
481static void mtk_iommu_release_device(struct device *dev)
482{
483 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
484
485 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
486 return;
487
488 iommu_fwspec_free(dev);
489}
490
491static struct iommu_group *mtk_iommu_device_group(struct device *dev)
492{
493 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
494
495 if (!data)
496 return ERR_PTR(-ENODEV);
497
498 /* All the client devices are in the same m4u iommu-group */
499 if (!data->m4u_group) {
500 data->m4u_group = iommu_group_alloc();
501 if (IS_ERR(data->m4u_group))
502 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
503 } else {
504 iommu_group_ref_get(data->m4u_group);
505 }
506 return data->m4u_group;
507}
508
509static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
510{
511 struct platform_device *m4updev;
512
513 if (args->args_count != 1) {
514 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
515 args->args_count);
516 return -EINVAL;
517 }
518
519 if (!dev_iommu_priv_get(dev)) {
520 /* Get the m4u device */
521 m4updev = of_find_device_by_node(args->np);
522 if (WARN_ON(!m4updev))
523 return -EINVAL;
524
525 dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
526 }
527
528 return iommu_fwspec_add_ids(dev, args->args, 1);
529}
530
531static const struct iommu_ops mtk_iommu_ops = {
532 .domain_alloc = mtk_iommu_domain_alloc,
533 .domain_free = mtk_iommu_domain_free,
534 .attach_dev = mtk_iommu_attach_device,
535 .detach_dev = mtk_iommu_detach_device,
536 .map = mtk_iommu_map,
537 .unmap = mtk_iommu_unmap,
538 .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
539 .iotlb_sync = mtk_iommu_iotlb_sync,
540 .iova_to_phys = mtk_iommu_iova_to_phys,
541 .probe_device = mtk_iommu_probe_device,
542 .release_device = mtk_iommu_release_device,
543 .device_group = mtk_iommu_device_group,
544 .of_xlate = mtk_iommu_of_xlate,
545 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
546};
547
548static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
549{
550 u32 regval;
551 int ret;
552
553 ret = clk_prepare_enable(data->bclk);
554 if (ret) {
555 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
556 return ret;
557 }
558
559 if (data->plat_data->m4u_plat == M4U_MT8173) {
560 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
561 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
562 } else {
563 regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
564 regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
565 }
566 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
567
568 regval = F_L2_MULIT_HIT_EN |
569 F_TABLE_WALK_FAULT_INT_EN |
570 F_PREETCH_FIFO_OVERFLOW_INT_EN |
571 F_MISS_FIFO_OVERFLOW_INT_EN |
572 F_PREFETCH_FIFO_ERR_INT_EN |
573 F_MISS_FIFO_ERR_INT_EN;
574 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
575
576 regval = F_INT_TRANSLATION_FAULT |
577 F_INT_MAIN_MULTI_HIT_FAULT |
578 F_INT_INVALID_PA_FAULT |
579 F_INT_ENTRY_REPLACEMENT_FAULT |
580 F_INT_TLB_MISS_FAULT |
581 F_INT_MISS_TRANSACTION_FIFO_FAULT |
582 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
583 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
584
585 if (data->plat_data->m4u_plat == M4U_MT8173)
586 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
587 else
588 regval = lower_32_bits(data->protect_base) |
589 upper_32_bits(data->protect_base);
590 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
591
592 if (data->enable_4GB &&
593 MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
594 /*
595 * If 4GB mode is enabled, the validate PA range is from
596 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
597 */
598 regval = F_MMU_VLD_PA_RNG(7, 4);
599 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
600 }
601 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
602 if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
603 /* write command throttling mode */
604 regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
605 regval &= ~F_MMU_WR_THROT_DIS_MASK;
606 writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
607 }
608
609 if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
610 /* The register is called STANDARD_AXI_MODE in this case */
611 regval = 0;
612 } else {
613 regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
614 regval &= ~F_MMU_STANDARD_AXI_MODE_MASK;
615 if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
616 regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
617 }
618 writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
619
620 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
621 dev_name(data->dev), (void *)data)) {
622 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
623 clk_disable_unprepare(data->bclk);
624 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
625 return -ENODEV;
626 }
627
628 return 0;
629}
630
631static const struct component_master_ops mtk_iommu_com_ops = {
632 .bind = mtk_iommu_bind,
633 .unbind = mtk_iommu_unbind,
634};
635
636static int mtk_iommu_probe(struct platform_device *pdev)
637{
638 struct mtk_iommu_data *data;
639 struct device *dev = &pdev->dev;
640 struct resource *res;
641 resource_size_t ioaddr;
642 struct component_match *match = NULL;
643 void *protect;
644 int i, larb_nr, ret;
645
646 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
647 if (!data)
648 return -ENOMEM;
649 data->dev = dev;
650 data->plat_data = of_device_get_match_data(dev);
651
652 /* Protect memory. HW will access here while translation fault.*/
653 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
654 if (!protect)
655 return -ENOMEM;
656 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
657
658 /* Whether the current dram is over 4GB */
659 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
660 if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE))
661 data->enable_4GB = false;
662
663 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
664 data->base = devm_ioremap_resource(dev, res);
665 if (IS_ERR(data->base))
666 return PTR_ERR(data->base);
667 ioaddr = res->start;
668
669 data->irq = platform_get_irq(pdev, 0);
670 if (data->irq < 0)
671 return data->irq;
672
673 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
674 data->bclk = devm_clk_get(dev, "bclk");
675 if (IS_ERR(data->bclk))
676 return PTR_ERR(data->bclk);
677 }
678
679 larb_nr = of_count_phandle_with_args(dev->of_node,
680 "mediatek,larbs", NULL);
681 if (larb_nr < 0)
682 return larb_nr;
683
684 for (i = 0; i < larb_nr; i++) {
685 struct device_node *larbnode;
686 struct platform_device *plarbdev;
687 u32 id;
688
689 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
690 if (!larbnode)
691 return -EINVAL;
692
693 if (!of_device_is_available(larbnode)) {
694 of_node_put(larbnode);
695 continue;
696 }
697
698 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
699 if (ret)/* The id is consecutive if there is no this property */
700 id = i;
701
702 plarbdev = of_find_device_by_node(larbnode);
703 if (!plarbdev) {
704 of_node_put(larbnode);
705 return -EPROBE_DEFER;
706 }
707 data->larb_imu[id].dev = &plarbdev->dev;
708
709 component_match_add_release(dev, &match, release_of,
710 compare_of, larbnode);
711 }
712
713 platform_set_drvdata(pdev, data);
714
715 ret = mtk_iommu_hw_init(data);
716 if (ret)
717 return ret;
718
719 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
720 "mtk-iommu.%pa", &ioaddr);
721 if (ret)
722 return ret;
723
724 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
725 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
726
727 ret = iommu_device_register(&data->iommu);
728 if (ret)
729 return ret;
730
731 spin_lock_init(&data->tlb_lock);
732 list_add_tail(&data->list, &m4ulist);
733
734 if (!iommu_present(&platform_bus_type))
735 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
736
737 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
738}
739
740static int mtk_iommu_remove(struct platform_device *pdev)
741{
742 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
743
744 iommu_device_sysfs_remove(&data->iommu);
745 iommu_device_unregister(&data->iommu);
746
747 if (iommu_present(&platform_bus_type))
748 bus_set_iommu(&platform_bus_type, NULL);
749
750 clk_disable_unprepare(data->bclk);
751 devm_free_irq(&pdev->dev, data->irq, data);
752 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
753 return 0;
754}
755
756static int __maybe_unused mtk_iommu_suspend(struct device *dev)
757{
758 struct mtk_iommu_data *data = dev_get_drvdata(dev);
759 struct mtk_iommu_suspend_reg *reg = &data->reg;
760 void __iomem *base = data->base;
761
762 reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL);
763 reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL);
764 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
765 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
766 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
767 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
768 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
769 reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
770 clk_disable_unprepare(data->bclk);
771 return 0;
772}
773
774static int __maybe_unused mtk_iommu_resume(struct device *dev)
775{
776 struct mtk_iommu_data *data = dev_get_drvdata(dev);
777 struct mtk_iommu_suspend_reg *reg = &data->reg;
778 struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
779 void __iomem *base = data->base;
780 int ret;
781
782 ret = clk_prepare_enable(data->bclk);
783 if (ret) {
784 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
785 return ret;
786 }
787 writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
788 writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
789 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
790 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
791 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
792 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
793 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
794 writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
795 if (m4u_dom)
796 writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK,
797 base + REG_MMU_PT_BASE_ADDR);
798 return 0;
799}
800
801static const struct dev_pm_ops mtk_iommu_pm_ops = {
802 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
803};
804
805static const struct mtk_iommu_plat_data mt2712_data = {
806 .m4u_plat = M4U_MT2712,
807 .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG,
808 .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
809 .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
810};
811
812static const struct mtk_iommu_plat_data mt6779_data = {
813 .m4u_plat = M4U_MT6779,
814 .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN,
815 .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
816 .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}},
817};
818
819static const struct mtk_iommu_plat_data mt8173_data = {
820 .m4u_plat = M4U_MT8173,
821 .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI,
822 .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
823 .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */
824};
825
826static const struct mtk_iommu_plat_data mt8183_data = {
827 .m4u_plat = M4U_MT8183,
828 .flags = RESET_AXI,
829 .inv_sel_reg = REG_MMU_INV_SEL_GEN1,
830 .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}},
831};
832
833static const struct of_device_id mtk_iommu_of_ids[] = {
834 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
835 { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
836 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
837 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
838 {}
839};
840
841static struct platform_driver mtk_iommu_driver = {
842 .probe = mtk_iommu_probe,
843 .remove = mtk_iommu_remove,
844 .driver = {
845 .name = "mtk-iommu",
846 .of_match_table = of_match_ptr(mtk_iommu_of_ids),
847 .pm = &mtk_iommu_pm_ops,
848 }
849};
850
851static int __init mtk_iommu_init(void)
852{
853 int ret;
854
855 ret = platform_driver_register(&mtk_iommu_driver);
856 if (ret != 0)
857 pr_err("Failed to register MTK IOMMU driver\n");
858
859 return ret;
860}
861
862subsys_initcall(mtk_iommu_init)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2015-2016 MediaTek Inc.
4 * Author: Yong Wu <yong.wu@mediatek.com>
5 */
6#include <linux/memblock.h>
7#include <linux/bug.h>
8#include <linux/clk.h>
9#include <linux/component.h>
10#include <linux/device.h>
11#include <linux/dma-iommu.h>
12#include <linux/err.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/iommu.h>
16#include <linux/iopoll.h>
17#include <linux/list.h>
18#include <linux/of_address.h>
19#include <linux/of_iommu.h>
20#include <linux/of_irq.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <asm/barrier.h>
26#include <soc/mediatek/smi.h>
27
28#include "mtk_iommu.h"
29
30#define REG_MMU_PT_BASE_ADDR 0x000
31#define MMU_PT_ADDR_MASK GENMASK(31, 7)
32
33#define REG_MMU_INVALIDATE 0x020
34#define F_ALL_INVLD 0x2
35#define F_MMU_INV_RANGE 0x1
36
37#define REG_MMU_INVLD_START_A 0x024
38#define REG_MMU_INVLD_END_A 0x028
39
40#define REG_MMU_INV_SEL 0x038
41#define F_INVLD_EN0 BIT(0)
42#define F_INVLD_EN1 BIT(1)
43
44#define REG_MMU_STANDARD_AXI_MODE 0x048
45#define REG_MMU_DCM_DIS 0x050
46
47#define REG_MMU_CTRL_REG 0x110
48#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
49#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
50#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
51
52#define REG_MMU_IVRP_PADDR 0x114
53
54#define REG_MMU_VLD_PA_RNG 0x118
55#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
56
57#define REG_MMU_INT_CONTROL0 0x120
58#define F_L2_MULIT_HIT_EN BIT(0)
59#define F_TABLE_WALK_FAULT_INT_EN BIT(1)
60#define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
61#define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
62#define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
63#define F_MISS_FIFO_ERR_INT_EN BIT(6)
64#define F_INT_CLR_BIT BIT(12)
65
66#define REG_MMU_INT_MAIN_CONTROL 0x124
67 /* mmu0 | mmu1 */
68#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
69#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
70#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
71#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
72#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
73#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
74#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
75
76#define REG_MMU_CPE_DONE 0x12C
77
78#define REG_MMU_FAULT_ST1 0x134
79#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
80#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
81
82#define REG_MMU0_FAULT_VA 0x13c
83#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
84#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
85
86#define REG_MMU0_INVLD_PA 0x140
87#define REG_MMU1_FAULT_VA 0x144
88#define REG_MMU1_INVLD_PA 0x148
89#define REG_MMU0_INT_ID 0x150
90#define REG_MMU1_INT_ID 0x154
91#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
92#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
93
94#define MTK_PROTECT_PA_ALIGN 128
95
96/*
97 * Get the local arbiter ID and the portid within the larb arbiter
98 * from mtk_m4u_id which is defined by MTK_M4U_ID.
99 */
100#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
101#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
102
103struct mtk_iommu_domain {
104 spinlock_t pgtlock; /* lock for page table */
105
106 struct io_pgtable_cfg cfg;
107 struct io_pgtable_ops *iop;
108
109 struct iommu_domain domain;
110};
111
112static const struct iommu_ops mtk_iommu_ops;
113
114/*
115 * In M4U 4GB mode, the physical address is remapped as below:
116 *
117 * CPU Physical address:
118 * ====================
119 *
120 * 0 1G 2G 3G 4G 5G
121 * |---A---|---B---|---C---|---D---|---E---|
122 * +--I/O--+------------Memory-------------+
123 *
124 * IOMMU output physical address:
125 * =============================
126 *
127 * 4G 5G 6G 7G 8G
128 * |---E---|---B---|---C---|---D---|
129 * +------------Memory-------------+
130 *
131 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
132 * bit32 of the CPU physical address always is needed to set, and for Region
133 * 'E', the CPU physical address keep as is.
134 * Additionally, The iommu consumers always use the CPU phyiscal address.
135 */
136#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
137
138static LIST_HEAD(m4ulist); /* List all the M4U HWs */
139
140#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
141
142/*
143 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
144 * for the performance.
145 *
146 * Here always return the mtk_iommu_data of the first probed M4U where the
147 * iommu domain information is recorded.
148 */
149static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
150{
151 struct mtk_iommu_data *data;
152
153 for_each_m4u(data)
154 return data;
155
156 return NULL;
157}
158
159static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
160{
161 return container_of(dom, struct mtk_iommu_domain, domain);
162}
163
164static void mtk_iommu_tlb_flush_all(void *cookie)
165{
166 struct mtk_iommu_data *data = cookie;
167
168 for_each_m4u(data) {
169 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
170 data->base + REG_MMU_INV_SEL);
171 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
172 wmb(); /* Make sure the tlb flush all done */
173 }
174}
175
176static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
177 size_t granule, bool leaf,
178 void *cookie)
179{
180 struct mtk_iommu_data *data = cookie;
181
182 for_each_m4u(data) {
183 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
184 data->base + REG_MMU_INV_SEL);
185
186 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
187 writel_relaxed(iova + size - 1,
188 data->base + REG_MMU_INVLD_END_A);
189 writel_relaxed(F_MMU_INV_RANGE,
190 data->base + REG_MMU_INVALIDATE);
191 data->tlb_flush_active = true;
192 }
193}
194
195static void mtk_iommu_tlb_sync(void *cookie)
196{
197 struct mtk_iommu_data *data = cookie;
198 int ret;
199 u32 tmp;
200
201 for_each_m4u(data) {
202 /* Avoid timing out if there's nothing to wait for */
203 if (!data->tlb_flush_active)
204 return;
205
206 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
207 tmp, tmp != 0, 10, 100000);
208 if (ret) {
209 dev_warn(data->dev,
210 "Partial TLB flush timed out, falling back to full flush\n");
211 mtk_iommu_tlb_flush_all(cookie);
212 }
213 /* Clear the CPE status */
214 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
215 data->tlb_flush_active = false;
216 }
217}
218
219static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
220 size_t granule, void *cookie)
221{
222 mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
223 mtk_iommu_tlb_sync(cookie);
224}
225
226static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
227 size_t granule, void *cookie)
228{
229 mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
230 mtk_iommu_tlb_sync(cookie);
231}
232
233static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
234 unsigned long iova, size_t granule,
235 void *cookie)
236{
237 mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
238}
239
240static const struct iommu_flush_ops mtk_iommu_flush_ops = {
241 .tlb_flush_all = mtk_iommu_tlb_flush_all,
242 .tlb_flush_walk = mtk_iommu_tlb_flush_walk,
243 .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
244 .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
245};
246
247static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
248{
249 struct mtk_iommu_data *data = dev_id;
250 struct mtk_iommu_domain *dom = data->m4u_dom;
251 u32 int_state, regval, fault_iova, fault_pa;
252 unsigned int fault_larb, fault_port;
253 bool layer, write;
254
255 /* Read error info from registers */
256 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
257 if (int_state & F_REG_MMU0_FAULT_MASK) {
258 regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
259 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
260 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
261 } else {
262 regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
263 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
264 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
265 }
266 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
267 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
268 fault_larb = F_MMU_INT_ID_LARB_ID(regval);
269 fault_port = F_MMU_INT_ID_PORT_ID(regval);
270
271 fault_larb = data->plat_data->larbid_remap[fault_larb];
272
273 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
274 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
275 dev_err_ratelimited(
276 data->dev,
277 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
278 int_state, fault_iova, fault_pa, fault_larb, fault_port,
279 layer, write ? "write" : "read");
280 }
281
282 /* Interrupt clear */
283 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
284 regval |= F_INT_CLR_BIT;
285 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
286
287 mtk_iommu_tlb_flush_all(data);
288
289 return IRQ_HANDLED;
290}
291
292static void mtk_iommu_config(struct mtk_iommu_data *data,
293 struct device *dev, bool enable)
294{
295 struct mtk_smi_larb_iommu *larb_mmu;
296 unsigned int larbid, portid;
297 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
298 int i;
299
300 for (i = 0; i < fwspec->num_ids; ++i) {
301 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
302 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
303 larb_mmu = &data->larb_imu[larbid];
304
305 dev_dbg(dev, "%s iommu port: %d\n",
306 enable ? "enable" : "disable", portid);
307
308 if (enable)
309 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
310 else
311 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
312 }
313}
314
315static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
316{
317 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
318
319 spin_lock_init(&dom->pgtlock);
320
321 dom->cfg = (struct io_pgtable_cfg) {
322 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
323 IO_PGTABLE_QUIRK_NO_PERMS |
324 IO_PGTABLE_QUIRK_TLBI_ON_MAP |
325 IO_PGTABLE_QUIRK_ARM_MTK_EXT,
326 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
327 .ias = 32,
328 .oas = 34,
329 .tlb = &mtk_iommu_flush_ops,
330 .iommu_dev = data->dev,
331 };
332
333 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
334 if (!dom->iop) {
335 dev_err(data->dev, "Failed to alloc io pgtable\n");
336 return -EINVAL;
337 }
338
339 /* Update our support page sizes bitmap */
340 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
341 return 0;
342}
343
344static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
345{
346 struct mtk_iommu_domain *dom;
347
348 if (type != IOMMU_DOMAIN_DMA)
349 return NULL;
350
351 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
352 if (!dom)
353 return NULL;
354
355 if (iommu_get_dma_cookie(&dom->domain))
356 goto free_dom;
357
358 if (mtk_iommu_domain_finalise(dom))
359 goto put_dma_cookie;
360
361 dom->domain.geometry.aperture_start = 0;
362 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
363 dom->domain.geometry.force_aperture = true;
364
365 return &dom->domain;
366
367put_dma_cookie:
368 iommu_put_dma_cookie(&dom->domain);
369free_dom:
370 kfree(dom);
371 return NULL;
372}
373
374static void mtk_iommu_domain_free(struct iommu_domain *domain)
375{
376 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
377
378 free_io_pgtable_ops(dom->iop);
379 iommu_put_dma_cookie(domain);
380 kfree(to_mtk_domain(domain));
381}
382
383static int mtk_iommu_attach_device(struct iommu_domain *domain,
384 struct device *dev)
385{
386 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
387 struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
388
389 if (!data)
390 return -ENODEV;
391
392 /* Update the pgtable base address register of the M4U HW */
393 if (!data->m4u_dom) {
394 data->m4u_dom = dom;
395 writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
396 data->base + REG_MMU_PT_BASE_ADDR);
397 }
398
399 mtk_iommu_config(data, dev, true);
400 return 0;
401}
402
403static void mtk_iommu_detach_device(struct iommu_domain *domain,
404 struct device *dev)
405{
406 struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
407
408 if (!data)
409 return;
410
411 mtk_iommu_config(data, dev, false);
412}
413
414static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
415 phys_addr_t paddr, size_t size, int prot)
416{
417 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
418 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
419 unsigned long flags;
420 int ret;
421
422 /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
423 if (data->enable_4GB)
424 paddr |= BIT_ULL(32);
425
426 spin_lock_irqsave(&dom->pgtlock, flags);
427 ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
428 spin_unlock_irqrestore(&dom->pgtlock, flags);
429
430 return ret;
431}
432
433static size_t mtk_iommu_unmap(struct iommu_domain *domain,
434 unsigned long iova, size_t size,
435 struct iommu_iotlb_gather *gather)
436{
437 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
438 unsigned long flags;
439 size_t unmapsz;
440
441 spin_lock_irqsave(&dom->pgtlock, flags);
442 unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
443 spin_unlock_irqrestore(&dom->pgtlock, flags);
444
445 return unmapsz;
446}
447
448static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
449{
450 mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
451}
452
453static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
454 struct iommu_iotlb_gather *gather)
455{
456 mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
457}
458
459static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
460 dma_addr_t iova)
461{
462 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
463 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
464 unsigned long flags;
465 phys_addr_t pa;
466
467 spin_lock_irqsave(&dom->pgtlock, flags);
468 pa = dom->iop->iova_to_phys(dom->iop, iova);
469 spin_unlock_irqrestore(&dom->pgtlock, flags);
470
471 if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
472 pa &= ~BIT_ULL(32);
473
474 return pa;
475}
476
477static int mtk_iommu_add_device(struct device *dev)
478{
479 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
480 struct mtk_iommu_data *data;
481 struct iommu_group *group;
482
483 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
484 return -ENODEV; /* Not a iommu client device */
485
486 data = fwspec->iommu_priv;
487 iommu_device_link(&data->iommu, dev);
488
489 group = iommu_group_get_for_dev(dev);
490 if (IS_ERR(group))
491 return PTR_ERR(group);
492
493 iommu_group_put(group);
494 return 0;
495}
496
497static void mtk_iommu_remove_device(struct device *dev)
498{
499 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
500 struct mtk_iommu_data *data;
501
502 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
503 return;
504
505 data = fwspec->iommu_priv;
506 iommu_device_unlink(&data->iommu, dev);
507
508 iommu_group_remove_device(dev);
509 iommu_fwspec_free(dev);
510}
511
512static struct iommu_group *mtk_iommu_device_group(struct device *dev)
513{
514 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
515
516 if (!data)
517 return ERR_PTR(-ENODEV);
518
519 /* All the client devices are in the same m4u iommu-group */
520 if (!data->m4u_group) {
521 data->m4u_group = iommu_group_alloc();
522 if (IS_ERR(data->m4u_group))
523 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
524 } else {
525 iommu_group_ref_get(data->m4u_group);
526 }
527 return data->m4u_group;
528}
529
530static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
531{
532 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
533 struct platform_device *m4updev;
534
535 if (args->args_count != 1) {
536 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
537 args->args_count);
538 return -EINVAL;
539 }
540
541 if (!fwspec->iommu_priv) {
542 /* Get the m4u device */
543 m4updev = of_find_device_by_node(args->np);
544 if (WARN_ON(!m4updev))
545 return -EINVAL;
546
547 fwspec->iommu_priv = platform_get_drvdata(m4updev);
548 }
549
550 return iommu_fwspec_add_ids(dev, args->args, 1);
551}
552
553static const struct iommu_ops mtk_iommu_ops = {
554 .domain_alloc = mtk_iommu_domain_alloc,
555 .domain_free = mtk_iommu_domain_free,
556 .attach_dev = mtk_iommu_attach_device,
557 .detach_dev = mtk_iommu_detach_device,
558 .map = mtk_iommu_map,
559 .unmap = mtk_iommu_unmap,
560 .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
561 .iotlb_sync = mtk_iommu_iotlb_sync,
562 .iova_to_phys = mtk_iommu_iova_to_phys,
563 .add_device = mtk_iommu_add_device,
564 .remove_device = mtk_iommu_remove_device,
565 .device_group = mtk_iommu_device_group,
566 .of_xlate = mtk_iommu_of_xlate,
567 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
568};
569
570static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
571{
572 u32 regval;
573 int ret;
574
575 ret = clk_prepare_enable(data->bclk);
576 if (ret) {
577 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
578 return ret;
579 }
580
581 if (data->plat_data->m4u_plat == M4U_MT8173)
582 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
583 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
584 else
585 regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
586 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
587
588 regval = F_L2_MULIT_HIT_EN |
589 F_TABLE_WALK_FAULT_INT_EN |
590 F_PREETCH_FIFO_OVERFLOW_INT_EN |
591 F_MISS_FIFO_OVERFLOW_INT_EN |
592 F_PREFETCH_FIFO_ERR_INT_EN |
593 F_MISS_FIFO_ERR_INT_EN;
594 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
595
596 regval = F_INT_TRANSLATION_FAULT |
597 F_INT_MAIN_MULTI_HIT_FAULT |
598 F_INT_INVALID_PA_FAULT |
599 F_INT_ENTRY_REPLACEMENT_FAULT |
600 F_INT_TLB_MISS_FAULT |
601 F_INT_MISS_TRANSACTION_FIFO_FAULT |
602 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
603 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
604
605 if (data->plat_data->m4u_plat == M4U_MT8173)
606 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
607 else
608 regval = lower_32_bits(data->protect_base) |
609 upper_32_bits(data->protect_base);
610 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
611
612 if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
613 /*
614 * If 4GB mode is enabled, the validate PA range is from
615 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
616 */
617 regval = F_MMU_VLD_PA_RNG(7, 4);
618 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
619 }
620 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
621
622 if (data->plat_data->reset_axi)
623 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
624
625 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
626 dev_name(data->dev), (void *)data)) {
627 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
628 clk_disable_unprepare(data->bclk);
629 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
630 return -ENODEV;
631 }
632
633 return 0;
634}
635
636static const struct component_master_ops mtk_iommu_com_ops = {
637 .bind = mtk_iommu_bind,
638 .unbind = mtk_iommu_unbind,
639};
640
641static int mtk_iommu_probe(struct platform_device *pdev)
642{
643 struct mtk_iommu_data *data;
644 struct device *dev = &pdev->dev;
645 struct resource *res;
646 resource_size_t ioaddr;
647 struct component_match *match = NULL;
648 void *protect;
649 int i, larb_nr, ret;
650
651 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
652 if (!data)
653 return -ENOMEM;
654 data->dev = dev;
655 data->plat_data = of_device_get_match_data(dev);
656
657 /* Protect memory. HW will access here while translation fault.*/
658 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
659 if (!protect)
660 return -ENOMEM;
661 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
662
663 /* Whether the current dram is over 4GB */
664 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
665 if (!data->plat_data->has_4gb_mode)
666 data->enable_4GB = false;
667
668 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
669 data->base = devm_ioremap_resource(dev, res);
670 if (IS_ERR(data->base))
671 return PTR_ERR(data->base);
672 ioaddr = res->start;
673
674 data->irq = platform_get_irq(pdev, 0);
675 if (data->irq < 0)
676 return data->irq;
677
678 if (data->plat_data->has_bclk) {
679 data->bclk = devm_clk_get(dev, "bclk");
680 if (IS_ERR(data->bclk))
681 return PTR_ERR(data->bclk);
682 }
683
684 larb_nr = of_count_phandle_with_args(dev->of_node,
685 "mediatek,larbs", NULL);
686 if (larb_nr < 0)
687 return larb_nr;
688
689 for (i = 0; i < larb_nr; i++) {
690 struct device_node *larbnode;
691 struct platform_device *plarbdev;
692 u32 id;
693
694 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
695 if (!larbnode)
696 return -EINVAL;
697
698 if (!of_device_is_available(larbnode)) {
699 of_node_put(larbnode);
700 continue;
701 }
702
703 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
704 if (ret)/* The id is consecutive if there is no this property */
705 id = i;
706
707 plarbdev = of_find_device_by_node(larbnode);
708 if (!plarbdev) {
709 of_node_put(larbnode);
710 return -EPROBE_DEFER;
711 }
712 data->larb_imu[id].dev = &plarbdev->dev;
713
714 component_match_add_release(dev, &match, release_of,
715 compare_of, larbnode);
716 }
717
718 platform_set_drvdata(pdev, data);
719
720 ret = mtk_iommu_hw_init(data);
721 if (ret)
722 return ret;
723
724 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
725 "mtk-iommu.%pa", &ioaddr);
726 if (ret)
727 return ret;
728
729 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
730 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
731
732 ret = iommu_device_register(&data->iommu);
733 if (ret)
734 return ret;
735
736 list_add_tail(&data->list, &m4ulist);
737
738 if (!iommu_present(&platform_bus_type))
739 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
740
741 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
742}
743
744static int mtk_iommu_remove(struct platform_device *pdev)
745{
746 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
747
748 iommu_device_sysfs_remove(&data->iommu);
749 iommu_device_unregister(&data->iommu);
750
751 if (iommu_present(&platform_bus_type))
752 bus_set_iommu(&platform_bus_type, NULL);
753
754 clk_disable_unprepare(data->bclk);
755 devm_free_irq(&pdev->dev, data->irq, data);
756 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
757 return 0;
758}
759
760static int __maybe_unused mtk_iommu_suspend(struct device *dev)
761{
762 struct mtk_iommu_data *data = dev_get_drvdata(dev);
763 struct mtk_iommu_suspend_reg *reg = &data->reg;
764 void __iomem *base = data->base;
765
766 reg->standard_axi_mode = readl_relaxed(base +
767 REG_MMU_STANDARD_AXI_MODE);
768 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
769 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
770 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
771 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
772 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
773 reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
774 clk_disable_unprepare(data->bclk);
775 return 0;
776}
777
778static int __maybe_unused mtk_iommu_resume(struct device *dev)
779{
780 struct mtk_iommu_data *data = dev_get_drvdata(dev);
781 struct mtk_iommu_suspend_reg *reg = &data->reg;
782 struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
783 void __iomem *base = data->base;
784 int ret;
785
786 ret = clk_prepare_enable(data->bclk);
787 if (ret) {
788 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
789 return ret;
790 }
791 writel_relaxed(reg->standard_axi_mode,
792 base + REG_MMU_STANDARD_AXI_MODE);
793 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
794 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
795 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
796 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
797 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
798 writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
799 if (m4u_dom)
800 writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
801 base + REG_MMU_PT_BASE_ADDR);
802 return 0;
803}
804
805static const struct dev_pm_ops mtk_iommu_pm_ops = {
806 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
807};
808
809static const struct mtk_iommu_plat_data mt2712_data = {
810 .m4u_plat = M4U_MT2712,
811 .has_4gb_mode = true,
812 .has_bclk = true,
813 .has_vld_pa_rng = true,
814 .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
815};
816
817static const struct mtk_iommu_plat_data mt8173_data = {
818 .m4u_plat = M4U_MT8173,
819 .has_4gb_mode = true,
820 .has_bclk = true,
821 .reset_axi = true,
822 .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
823};
824
825static const struct mtk_iommu_plat_data mt8183_data = {
826 .m4u_plat = M4U_MT8183,
827 .reset_axi = true,
828 .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
829};
830
831static const struct of_device_id mtk_iommu_of_ids[] = {
832 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
833 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
834 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
835 {}
836};
837
838static struct platform_driver mtk_iommu_driver = {
839 .probe = mtk_iommu_probe,
840 .remove = mtk_iommu_remove,
841 .driver = {
842 .name = "mtk-iommu",
843 .of_match_table = of_match_ptr(mtk_iommu_of_ids),
844 .pm = &mtk_iommu_pm_ops,
845 }
846};
847
848static int __init mtk_iommu_init(void)
849{
850 int ret;
851
852 ret = platform_driver_register(&mtk_iommu_driver);
853 if (ret != 0)
854 pr_err("Failed to register MTK IOMMU driver\n");
855
856 return ret;
857}
858
859subsys_initcall(mtk_iommu_init)