Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3 *
4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/errno.h>
12#include <linux/io.h>
13#include <linux/io-pgtable.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/spinlock.h>
17#include <linux/slab.h>
18#include <linux/iommu.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/of_iommu.h>
22
23#include <asm/cacheflush.h>
24#include <linux/sizes.h>
25
26#include "msm_iommu_hw-8xxx.h"
27#include "msm_iommu.h"
28
29#define MRC(reg, processor, op1, crn, crm, op2) \
30__asm__ __volatile__ ( \
31" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
32: "=r" (reg))
33
34/* bitmap of the page sizes currently supported */
35#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
36
37static DEFINE_SPINLOCK(msm_iommu_lock);
38static LIST_HEAD(qcom_iommu_devices);
39static struct iommu_ops msm_iommu_ops;
40
41struct msm_priv {
42 struct list_head list_attached;
43 struct iommu_domain domain;
44 struct io_pgtable_cfg cfg;
45 struct io_pgtable_ops *iop;
46 struct device *dev;
47 spinlock_t pgtlock; /* pagetable lock */
48};
49
50static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
51{
52 return container_of(dom, struct msm_priv, domain);
53}
54
55static int __enable_clocks(struct msm_iommu_dev *iommu)
56{
57 int ret;
58
59 ret = clk_enable(iommu->pclk);
60 if (ret)
61 goto fail;
62
63 if (iommu->clk) {
64 ret = clk_enable(iommu->clk);
65 if (ret)
66 clk_disable(iommu->pclk);
67 }
68fail:
69 return ret;
70}
71
72static void __disable_clocks(struct msm_iommu_dev *iommu)
73{
74 if (iommu->clk)
75 clk_disable(iommu->clk);
76 clk_disable(iommu->pclk);
77}
78
79static void msm_iommu_reset(void __iomem *base, int ncb)
80{
81 int ctx;
82
83 SET_RPUE(base, 0);
84 SET_RPUEIE(base, 0);
85 SET_ESRRESTORE(base, 0);
86 SET_TBE(base, 0);
87 SET_CR(base, 0);
88 SET_SPDMBE(base, 0);
89 SET_TESTBUSCR(base, 0);
90 SET_TLBRSW(base, 0);
91 SET_GLOBAL_TLBIALL(base, 0);
92 SET_RPU_ACR(base, 0);
93 SET_TLBLKCRWE(base, 1);
94
95 for (ctx = 0; ctx < ncb; ctx++) {
96 SET_BPRCOSH(base, ctx, 0);
97 SET_BPRCISH(base, ctx, 0);
98 SET_BPRCNSH(base, ctx, 0);
99 SET_BPSHCFG(base, ctx, 0);
100 SET_BPMTCFG(base, ctx, 0);
101 SET_ACTLR(base, ctx, 0);
102 SET_SCTLR(base, ctx, 0);
103 SET_FSRRESTORE(base, ctx, 0);
104 SET_TTBR0(base, ctx, 0);
105 SET_TTBR1(base, ctx, 0);
106 SET_TTBCR(base, ctx, 0);
107 SET_BFBCR(base, ctx, 0);
108 SET_PAR(base, ctx, 0);
109 SET_FAR(base, ctx, 0);
110 SET_CTX_TLBIALL(base, ctx, 0);
111 SET_TLBFLPTER(base, ctx, 0);
112 SET_TLBSLPTER(base, ctx, 0);
113 SET_TLBLKCR(base, ctx, 0);
114 SET_CONTEXTIDR(base, ctx, 0);
115 }
116}
117
118static void __flush_iotlb(void *cookie)
119{
120 struct msm_priv *priv = cookie;
121 struct msm_iommu_dev *iommu = NULL;
122 struct msm_iommu_ctx_dev *master;
123 int ret = 0;
124
125 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
126 ret = __enable_clocks(iommu);
127 if (ret)
128 goto fail;
129
130 list_for_each_entry(master, &iommu->ctx_list, list)
131 SET_CTX_TLBIALL(iommu->base, master->num, 0);
132
133 __disable_clocks(iommu);
134 }
135fail:
136 return;
137}
138
139static void __flush_iotlb_range(unsigned long iova, size_t size,
140 size_t granule, bool leaf, void *cookie)
141{
142 struct msm_priv *priv = cookie;
143 struct msm_iommu_dev *iommu = NULL;
144 struct msm_iommu_ctx_dev *master;
145 int ret = 0;
146 int temp_size;
147
148 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
149 ret = __enable_clocks(iommu);
150 if (ret)
151 goto fail;
152
153 list_for_each_entry(master, &iommu->ctx_list, list) {
154 temp_size = size;
155 do {
156 iova &= TLBIVA_VA;
157 iova |= GET_CONTEXTIDR_ASID(iommu->base,
158 master->num);
159 SET_TLBIVA(iommu->base, master->num, iova);
160 iova += granule;
161 } while (temp_size -= granule);
162 }
163
164 __disable_clocks(iommu);
165 }
166
167fail:
168 return;
169}
170
171static void __flush_iotlb_walk(unsigned long iova, size_t size,
172 size_t granule, void *cookie)
173{
174 __flush_iotlb_range(iova, size, granule, false, cookie);
175}
176
177static void __flush_iotlb_leaf(unsigned long iova, size_t size,
178 size_t granule, void *cookie)
179{
180 __flush_iotlb_range(iova, size, granule, true, cookie);
181}
182
183static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
184 unsigned long iova, size_t granule, void *cookie)
185{
186 __flush_iotlb_range(iova, granule, granule, true, cookie);
187}
188
189static const struct iommu_flush_ops msm_iommu_flush_ops = {
190 .tlb_flush_all = __flush_iotlb,
191 .tlb_flush_walk = __flush_iotlb_walk,
192 .tlb_flush_leaf = __flush_iotlb_leaf,
193 .tlb_add_page = __flush_iotlb_page,
194};
195
196static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
197{
198 int idx;
199
200 do {
201 idx = find_next_zero_bit(map, end, start);
202 if (idx == end)
203 return -ENOSPC;
204 } while (test_and_set_bit(idx, map));
205
206 return idx;
207}
208
209static void msm_iommu_free_ctx(unsigned long *map, int idx)
210{
211 clear_bit(idx, map);
212}
213
214static void config_mids(struct msm_iommu_dev *iommu,
215 struct msm_iommu_ctx_dev *master)
216{
217 int mid, ctx, i;
218
219 for (i = 0; i < master->num_mids; i++) {
220 mid = master->mids[i];
221 ctx = master->num;
222
223 SET_M2VCBR_N(iommu->base, mid, 0);
224 SET_CBACR_N(iommu->base, ctx, 0);
225
226 /* Set VMID = 0 */
227 SET_VMID(iommu->base, mid, 0);
228
229 /* Set the context number for that MID to this context */
230 SET_CBNDX(iommu->base, mid, ctx);
231
232 /* Set MID associated with this context bank to 0*/
233 SET_CBVMID(iommu->base, ctx, 0);
234
235 /* Set the ASID for TLB tagging for this context */
236 SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
237
238 /* Set security bit override to be Non-secure */
239 SET_NSCFG(iommu->base, mid, 3);
240 }
241}
242
243static void __reset_context(void __iomem *base, int ctx)
244{
245 SET_BPRCOSH(base, ctx, 0);
246 SET_BPRCISH(base, ctx, 0);
247 SET_BPRCNSH(base, ctx, 0);
248 SET_BPSHCFG(base, ctx, 0);
249 SET_BPMTCFG(base, ctx, 0);
250 SET_ACTLR(base, ctx, 0);
251 SET_SCTLR(base, ctx, 0);
252 SET_FSRRESTORE(base, ctx, 0);
253 SET_TTBR0(base, ctx, 0);
254 SET_TTBR1(base, ctx, 0);
255 SET_TTBCR(base, ctx, 0);
256 SET_BFBCR(base, ctx, 0);
257 SET_PAR(base, ctx, 0);
258 SET_FAR(base, ctx, 0);
259 SET_CTX_TLBIALL(base, ctx, 0);
260 SET_TLBFLPTER(base, ctx, 0);
261 SET_TLBSLPTER(base, ctx, 0);
262 SET_TLBLKCR(base, ctx, 0);
263}
264
265static void __program_context(void __iomem *base, int ctx,
266 struct msm_priv *priv)
267{
268 __reset_context(base, ctx);
269
270 /* Turn on TEX Remap */
271 SET_TRE(base, ctx, 1);
272 SET_AFE(base, ctx, 1);
273
274 /* Set up HTW mode */
275 /* TLB miss configuration: perform HTW on miss */
276 SET_TLBMCFG(base, ctx, 0x3);
277
278 /* V2P configuration: HTW for access */
279 SET_V2PCFG(base, ctx, 0x3);
280
281 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
282 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
283 SET_TTBR1(base, ctx, 0);
284
285 /* Set prrr and nmrr */
286 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
287 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
288
289 /* Invalidate the TLB for this context */
290 SET_CTX_TLBIALL(base, ctx, 0);
291
292 /* Set interrupt number to "secure" interrupt */
293 SET_IRPTNDX(base, ctx, 0);
294
295 /* Enable context fault interrupt */
296 SET_CFEIE(base, ctx, 1);
297
298 /* Stall access on a context fault and let the handler deal with it */
299 SET_CFCFG(base, ctx, 1);
300
301 /* Redirect all cacheable requests to L2 slave port. */
302 SET_RCISH(base, ctx, 1);
303 SET_RCOSH(base, ctx, 1);
304 SET_RCNSH(base, ctx, 1);
305
306 /* Turn on BFB prefetch */
307 SET_BFBDFE(base, ctx, 1);
308
309 /* Enable the MMU */
310 SET_M(base, ctx, 1);
311}
312
313static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
314{
315 struct msm_priv *priv;
316
317 if (type != IOMMU_DOMAIN_UNMANAGED)
318 return NULL;
319
320 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
321 if (!priv)
322 goto fail_nomem;
323
324 INIT_LIST_HEAD(&priv->list_attached);
325
326 priv->domain.geometry.aperture_start = 0;
327 priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
328 priv->domain.geometry.force_aperture = true;
329
330 return &priv->domain;
331
332fail_nomem:
333 kfree(priv);
334 return NULL;
335}
336
337static void msm_iommu_domain_free(struct iommu_domain *domain)
338{
339 struct msm_priv *priv;
340 unsigned long flags;
341
342 spin_lock_irqsave(&msm_iommu_lock, flags);
343 priv = to_msm_priv(domain);
344 kfree(priv);
345 spin_unlock_irqrestore(&msm_iommu_lock, flags);
346}
347
348static int msm_iommu_domain_config(struct msm_priv *priv)
349{
350 spin_lock_init(&priv->pgtlock);
351
352 priv->cfg = (struct io_pgtable_cfg) {
353 .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
354 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
355 .ias = 32,
356 .oas = 32,
357 .tlb = &msm_iommu_flush_ops,
358 .iommu_dev = priv->dev,
359 };
360
361 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
362 if (!priv->iop) {
363 dev_err(priv->dev, "Failed to allocate pgtable\n");
364 return -EINVAL;
365 }
366
367 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
368
369 return 0;
370}
371
372/* Must be called under msm_iommu_lock */
373static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
374{
375 struct msm_iommu_dev *iommu, *ret = NULL;
376 struct msm_iommu_ctx_dev *master;
377
378 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
379 master = list_first_entry(&iommu->ctx_list,
380 struct msm_iommu_ctx_dev,
381 list);
382 if (master->of_node == dev->of_node) {
383 ret = iommu;
384 break;
385 }
386 }
387
388 return ret;
389}
390
391static struct iommu_device *msm_iommu_probe_device(struct device *dev)
392{
393 struct msm_iommu_dev *iommu;
394 unsigned long flags;
395
396 spin_lock_irqsave(&msm_iommu_lock, flags);
397 iommu = find_iommu_for_dev(dev);
398 spin_unlock_irqrestore(&msm_iommu_lock, flags);
399
400 if (!iommu)
401 return ERR_PTR(-ENODEV);
402
403 return &iommu->iommu;
404}
405
406static void msm_iommu_release_device(struct device *dev)
407{
408}
409
410static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
411{
412 int ret = 0;
413 unsigned long flags;
414 struct msm_iommu_dev *iommu;
415 struct msm_priv *priv = to_msm_priv(domain);
416 struct msm_iommu_ctx_dev *master;
417
418 priv->dev = dev;
419 msm_iommu_domain_config(priv);
420
421 spin_lock_irqsave(&msm_iommu_lock, flags);
422 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
423 master = list_first_entry(&iommu->ctx_list,
424 struct msm_iommu_ctx_dev,
425 list);
426 if (master->of_node == dev->of_node) {
427 ret = __enable_clocks(iommu);
428 if (ret)
429 goto fail;
430
431 list_for_each_entry(master, &iommu->ctx_list, list) {
432 if (master->num) {
433 dev_err(dev, "domain already attached");
434 ret = -EEXIST;
435 goto fail;
436 }
437 master->num =
438 msm_iommu_alloc_ctx(iommu->context_map,
439 0, iommu->ncb);
440 if (IS_ERR_VALUE(master->num)) {
441 ret = -ENODEV;
442 goto fail;
443 }
444 config_mids(iommu, master);
445 __program_context(iommu->base, master->num,
446 priv);
447 }
448 __disable_clocks(iommu);
449 list_add(&iommu->dom_node, &priv->list_attached);
450 }
451 }
452
453fail:
454 spin_unlock_irqrestore(&msm_iommu_lock, flags);
455
456 return ret;
457}
458
459static void msm_iommu_detach_dev(struct iommu_domain *domain,
460 struct device *dev)
461{
462 struct msm_priv *priv = to_msm_priv(domain);
463 unsigned long flags;
464 struct msm_iommu_dev *iommu;
465 struct msm_iommu_ctx_dev *master;
466 int ret;
467
468 free_io_pgtable_ops(priv->iop);
469
470 spin_lock_irqsave(&msm_iommu_lock, flags);
471 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
472 ret = __enable_clocks(iommu);
473 if (ret)
474 goto fail;
475
476 list_for_each_entry(master, &iommu->ctx_list, list) {
477 msm_iommu_free_ctx(iommu->context_map, master->num);
478 __reset_context(iommu->base, master->num);
479 }
480 __disable_clocks(iommu);
481 }
482fail:
483 spin_unlock_irqrestore(&msm_iommu_lock, flags);
484}
485
486static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
487 phys_addr_t pa, size_t len, int prot, gfp_t gfp)
488{
489 struct msm_priv *priv = to_msm_priv(domain);
490 unsigned long flags;
491 int ret;
492
493 spin_lock_irqsave(&priv->pgtlock, flags);
494 ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
495 spin_unlock_irqrestore(&priv->pgtlock, flags);
496
497 return ret;
498}
499
500static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
501 size_t len, struct iommu_iotlb_gather *gather)
502{
503 struct msm_priv *priv = to_msm_priv(domain);
504 unsigned long flags;
505
506 spin_lock_irqsave(&priv->pgtlock, flags);
507 len = priv->iop->unmap(priv->iop, iova, len, gather);
508 spin_unlock_irqrestore(&priv->pgtlock, flags);
509
510 return len;
511}
512
513static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
514 dma_addr_t va)
515{
516 struct msm_priv *priv;
517 struct msm_iommu_dev *iommu;
518 struct msm_iommu_ctx_dev *master;
519 unsigned int par;
520 unsigned long flags;
521 phys_addr_t ret = 0;
522
523 spin_lock_irqsave(&msm_iommu_lock, flags);
524
525 priv = to_msm_priv(domain);
526 iommu = list_first_entry(&priv->list_attached,
527 struct msm_iommu_dev, dom_node);
528
529 if (list_empty(&iommu->ctx_list))
530 goto fail;
531
532 master = list_first_entry(&iommu->ctx_list,
533 struct msm_iommu_ctx_dev, list);
534 if (!master)
535 goto fail;
536
537 ret = __enable_clocks(iommu);
538 if (ret)
539 goto fail;
540
541 /* Invalidate context TLB */
542 SET_CTX_TLBIALL(iommu->base, master->num, 0);
543 SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
544
545 par = GET_PAR(iommu->base, master->num);
546
547 /* We are dealing with a supersection */
548 if (GET_NOFAULT_SS(iommu->base, master->num))
549 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
550 else /* Upper 20 bits from PAR, lower 12 from VA */
551 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
552
553 if (GET_FAULT(iommu->base, master->num))
554 ret = 0;
555
556 __disable_clocks(iommu);
557fail:
558 spin_unlock_irqrestore(&msm_iommu_lock, flags);
559 return ret;
560}
561
562static bool msm_iommu_capable(enum iommu_cap cap)
563{
564 return false;
565}
566
567static void print_ctx_regs(void __iomem *base, int ctx)
568{
569 unsigned int fsr = GET_FSR(base, ctx);
570 pr_err("FAR = %08x PAR = %08x\n",
571 GET_FAR(base, ctx), GET_PAR(base, ctx));
572 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
573 (fsr & 0x02) ? "TF " : "",
574 (fsr & 0x04) ? "AFF " : "",
575 (fsr & 0x08) ? "APF " : "",
576 (fsr & 0x10) ? "TLBMF " : "",
577 (fsr & 0x20) ? "HTWDEEF " : "",
578 (fsr & 0x40) ? "HTWSEEF " : "",
579 (fsr & 0x80) ? "MHF " : "",
580 (fsr & 0x10000) ? "SL " : "",
581 (fsr & 0x40000000) ? "SS " : "",
582 (fsr & 0x80000000) ? "MULTI " : "");
583
584 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
585 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
586 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
587 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
588 pr_err("SCTLR = %08x ACTLR = %08x\n",
589 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
590}
591
592static void insert_iommu_master(struct device *dev,
593 struct msm_iommu_dev **iommu,
594 struct of_phandle_args *spec)
595{
596 struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
597 int sid;
598
599 if (list_empty(&(*iommu)->ctx_list)) {
600 master = kzalloc(sizeof(*master), GFP_ATOMIC);
601 master->of_node = dev->of_node;
602 list_add(&master->list, &(*iommu)->ctx_list);
603 dev_iommu_priv_set(dev, master);
604 }
605
606 for (sid = 0; sid < master->num_mids; sid++)
607 if (master->mids[sid] == spec->args[0]) {
608 dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
609 sid);
610 return;
611 }
612
613 master->mids[master->num_mids++] = spec->args[0];
614}
615
616static int qcom_iommu_of_xlate(struct device *dev,
617 struct of_phandle_args *spec)
618{
619 struct msm_iommu_dev *iommu;
620 unsigned long flags;
621 int ret = 0;
622
623 spin_lock_irqsave(&msm_iommu_lock, flags);
624 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
625 if (iommu->dev->of_node == spec->np)
626 break;
627
628 if (!iommu || iommu->dev->of_node != spec->np) {
629 ret = -ENODEV;
630 goto fail;
631 }
632
633 insert_iommu_master(dev, &iommu, spec);
634fail:
635 spin_unlock_irqrestore(&msm_iommu_lock, flags);
636
637 return ret;
638}
639
640irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
641{
642 struct msm_iommu_dev *iommu = dev_id;
643 unsigned int fsr;
644 int i, ret;
645
646 spin_lock(&msm_iommu_lock);
647
648 if (!iommu) {
649 pr_err("Invalid device ID in context interrupt handler\n");
650 goto fail;
651 }
652
653 pr_err("Unexpected IOMMU page fault!\n");
654 pr_err("base = %08x\n", (unsigned int)iommu->base);
655
656 ret = __enable_clocks(iommu);
657 if (ret)
658 goto fail;
659
660 for (i = 0; i < iommu->ncb; i++) {
661 fsr = GET_FSR(iommu->base, i);
662 if (fsr) {
663 pr_err("Fault occurred in context %d.\n", i);
664 pr_err("Interesting registers:\n");
665 print_ctx_regs(iommu->base, i);
666 SET_FSR(iommu->base, i, 0x4000000F);
667 }
668 }
669 __disable_clocks(iommu);
670fail:
671 spin_unlock(&msm_iommu_lock);
672 return 0;
673}
674
675static struct iommu_ops msm_iommu_ops = {
676 .capable = msm_iommu_capable,
677 .domain_alloc = msm_iommu_domain_alloc,
678 .domain_free = msm_iommu_domain_free,
679 .attach_dev = msm_iommu_attach_dev,
680 .detach_dev = msm_iommu_detach_dev,
681 .map = msm_iommu_map,
682 .unmap = msm_iommu_unmap,
683 /*
684 * Nothing is needed here, the barrier to guarantee
685 * completion of the tlb sync operation is implicitly
686 * taken care when the iommu client does a writel before
687 * kick starting the other master.
688 */
689 .iotlb_sync = NULL,
690 .iova_to_phys = msm_iommu_iova_to_phys,
691 .probe_device = msm_iommu_probe_device,
692 .release_device = msm_iommu_release_device,
693 .device_group = generic_device_group,
694 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
695 .of_xlate = qcom_iommu_of_xlate,
696};
697
698static int msm_iommu_probe(struct platform_device *pdev)
699{
700 struct resource *r;
701 resource_size_t ioaddr;
702 struct msm_iommu_dev *iommu;
703 int ret, par, val;
704
705 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
706 if (!iommu)
707 return -ENODEV;
708
709 iommu->dev = &pdev->dev;
710 INIT_LIST_HEAD(&iommu->ctx_list);
711
712 iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
713 if (IS_ERR(iommu->pclk)) {
714 dev_err(iommu->dev, "could not get smmu_pclk\n");
715 return PTR_ERR(iommu->pclk);
716 }
717
718 ret = clk_prepare(iommu->pclk);
719 if (ret) {
720 dev_err(iommu->dev, "could not prepare smmu_pclk\n");
721 return ret;
722 }
723
724 iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
725 if (IS_ERR(iommu->clk)) {
726 dev_err(iommu->dev, "could not get iommu_clk\n");
727 clk_unprepare(iommu->pclk);
728 return PTR_ERR(iommu->clk);
729 }
730
731 ret = clk_prepare(iommu->clk);
732 if (ret) {
733 dev_err(iommu->dev, "could not prepare iommu_clk\n");
734 clk_unprepare(iommu->pclk);
735 return ret;
736 }
737
738 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
739 iommu->base = devm_ioremap_resource(iommu->dev, r);
740 if (IS_ERR(iommu->base)) {
741 dev_err(iommu->dev, "could not get iommu base\n");
742 ret = PTR_ERR(iommu->base);
743 goto fail;
744 }
745 ioaddr = r->start;
746
747 iommu->irq = platform_get_irq(pdev, 0);
748 if (iommu->irq < 0) {
749 ret = -ENODEV;
750 goto fail;
751 }
752
753 ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
754 if (ret) {
755 dev_err(iommu->dev, "could not get ncb\n");
756 goto fail;
757 }
758 iommu->ncb = val;
759
760 msm_iommu_reset(iommu->base, iommu->ncb);
761 SET_M(iommu->base, 0, 1);
762 SET_PAR(iommu->base, 0, 0);
763 SET_V2PCFG(iommu->base, 0, 1);
764 SET_V2PPR(iommu->base, 0, 0);
765 par = GET_PAR(iommu->base, 0);
766 SET_V2PCFG(iommu->base, 0, 0);
767 SET_M(iommu->base, 0, 0);
768
769 if (!par) {
770 pr_err("Invalid PAR value detected\n");
771 ret = -ENODEV;
772 goto fail;
773 }
774
775 ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
776 msm_iommu_fault_handler,
777 IRQF_ONESHOT | IRQF_SHARED,
778 "msm_iommu_secure_irpt_handler",
779 iommu);
780 if (ret) {
781 pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
782 goto fail;
783 }
784
785 list_add(&iommu->dev_node, &qcom_iommu_devices);
786
787 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
788 "msm-smmu.%pa", &ioaddr);
789 if (ret) {
790 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
791 goto fail;
792 }
793
794 iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
795 iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
796
797 ret = iommu_device_register(&iommu->iommu);
798 if (ret) {
799 pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
800 goto fail;
801 }
802
803 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
804
805 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
806 iommu->base, iommu->irq, iommu->ncb);
807
808 return ret;
809fail:
810 clk_unprepare(iommu->clk);
811 clk_unprepare(iommu->pclk);
812 return ret;
813}
814
815static const struct of_device_id msm_iommu_dt_match[] = {
816 { .compatible = "qcom,apq8064-iommu" },
817 {}
818};
819
820static int msm_iommu_remove(struct platform_device *pdev)
821{
822 struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
823
824 clk_unprepare(iommu->clk);
825 clk_unprepare(iommu->pclk);
826 return 0;
827}
828
829static struct platform_driver msm_iommu_driver = {
830 .driver = {
831 .name = "msm_iommu",
832 .of_match_table = msm_iommu_dt_match,
833 },
834 .probe = msm_iommu_probe,
835 .remove = msm_iommu_remove,
836};
837
838static int __init msm_iommu_driver_init(void)
839{
840 int ret;
841
842 ret = platform_driver_register(&msm_iommu_driver);
843 if (ret != 0)
844 pr_err("Failed to register IOMMU driver\n");
845
846 return ret;
847}
848subsys_initcall(msm_iommu_driver_init);
849
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3 *
4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/errno.h>
12#include <linux/io.h>
13#include <linux/io-pgtable.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/spinlock.h>
17#include <linux/slab.h>
18#include <linux/iommu.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/of_iommu.h>
22
23#include <asm/cacheflush.h>
24#include <linux/sizes.h>
25
26#include "msm_iommu_hw-8xxx.h"
27#include "msm_iommu.h"
28
29#define MRC(reg, processor, op1, crn, crm, op2) \
30__asm__ __volatile__ ( \
31" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
32: "=r" (reg))
33
34/* bitmap of the page sizes currently supported */
35#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
36
37DEFINE_SPINLOCK(msm_iommu_lock);
38static LIST_HEAD(qcom_iommu_devices);
39static struct iommu_ops msm_iommu_ops;
40
41struct msm_priv {
42 struct list_head list_attached;
43 struct iommu_domain domain;
44 struct io_pgtable_cfg cfg;
45 struct io_pgtable_ops *iop;
46 struct device *dev;
47 spinlock_t pgtlock; /* pagetable lock */
48};
49
50static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
51{
52 return container_of(dom, struct msm_priv, domain);
53}
54
55static int __enable_clocks(struct msm_iommu_dev *iommu)
56{
57 int ret;
58
59 ret = clk_enable(iommu->pclk);
60 if (ret)
61 goto fail;
62
63 if (iommu->clk) {
64 ret = clk_enable(iommu->clk);
65 if (ret)
66 clk_disable(iommu->pclk);
67 }
68fail:
69 return ret;
70}
71
72static void __disable_clocks(struct msm_iommu_dev *iommu)
73{
74 if (iommu->clk)
75 clk_disable(iommu->clk);
76 clk_disable(iommu->pclk);
77}
78
79static void msm_iommu_reset(void __iomem *base, int ncb)
80{
81 int ctx;
82
83 SET_RPUE(base, 0);
84 SET_RPUEIE(base, 0);
85 SET_ESRRESTORE(base, 0);
86 SET_TBE(base, 0);
87 SET_CR(base, 0);
88 SET_SPDMBE(base, 0);
89 SET_TESTBUSCR(base, 0);
90 SET_TLBRSW(base, 0);
91 SET_GLOBAL_TLBIALL(base, 0);
92 SET_RPU_ACR(base, 0);
93 SET_TLBLKCRWE(base, 1);
94
95 for (ctx = 0; ctx < ncb; ctx++) {
96 SET_BPRCOSH(base, ctx, 0);
97 SET_BPRCISH(base, ctx, 0);
98 SET_BPRCNSH(base, ctx, 0);
99 SET_BPSHCFG(base, ctx, 0);
100 SET_BPMTCFG(base, ctx, 0);
101 SET_ACTLR(base, ctx, 0);
102 SET_SCTLR(base, ctx, 0);
103 SET_FSRRESTORE(base, ctx, 0);
104 SET_TTBR0(base, ctx, 0);
105 SET_TTBR1(base, ctx, 0);
106 SET_TTBCR(base, ctx, 0);
107 SET_BFBCR(base, ctx, 0);
108 SET_PAR(base, ctx, 0);
109 SET_FAR(base, ctx, 0);
110 SET_CTX_TLBIALL(base, ctx, 0);
111 SET_TLBFLPTER(base, ctx, 0);
112 SET_TLBSLPTER(base, ctx, 0);
113 SET_TLBLKCR(base, ctx, 0);
114 SET_CONTEXTIDR(base, ctx, 0);
115 }
116}
117
118static void __flush_iotlb(void *cookie)
119{
120 struct msm_priv *priv = cookie;
121 struct msm_iommu_dev *iommu = NULL;
122 struct msm_iommu_ctx_dev *master;
123 int ret = 0;
124
125 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
126 ret = __enable_clocks(iommu);
127 if (ret)
128 goto fail;
129
130 list_for_each_entry(master, &iommu->ctx_list, list)
131 SET_CTX_TLBIALL(iommu->base, master->num, 0);
132
133 __disable_clocks(iommu);
134 }
135fail:
136 return;
137}
138
139static void __flush_iotlb_range(unsigned long iova, size_t size,
140 size_t granule, bool leaf, void *cookie)
141{
142 struct msm_priv *priv = cookie;
143 struct msm_iommu_dev *iommu = NULL;
144 struct msm_iommu_ctx_dev *master;
145 int ret = 0;
146 int temp_size;
147
148 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
149 ret = __enable_clocks(iommu);
150 if (ret)
151 goto fail;
152
153 list_for_each_entry(master, &iommu->ctx_list, list) {
154 temp_size = size;
155 do {
156 iova &= TLBIVA_VA;
157 iova |= GET_CONTEXTIDR_ASID(iommu->base,
158 master->num);
159 SET_TLBIVA(iommu->base, master->num, iova);
160 iova += granule;
161 } while (temp_size -= granule);
162 }
163
164 __disable_clocks(iommu);
165 }
166
167fail:
168 return;
169}
170
171static void __flush_iotlb_walk(unsigned long iova, size_t size,
172 size_t granule, void *cookie)
173{
174 __flush_iotlb_range(iova, size, granule, false, cookie);
175}
176
177static void __flush_iotlb_leaf(unsigned long iova, size_t size,
178 size_t granule, void *cookie)
179{
180 __flush_iotlb_range(iova, size, granule, true, cookie);
181}
182
183static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
184 unsigned long iova, size_t granule, void *cookie)
185{
186 __flush_iotlb_range(iova, granule, granule, true, cookie);
187}
188
189static const struct iommu_flush_ops msm_iommu_flush_ops = {
190 .tlb_flush_all = __flush_iotlb,
191 .tlb_flush_walk = __flush_iotlb_walk,
192 .tlb_flush_leaf = __flush_iotlb_leaf,
193 .tlb_add_page = __flush_iotlb_page,
194};
195
196static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
197{
198 int idx;
199
200 do {
201 idx = find_next_zero_bit(map, end, start);
202 if (idx == end)
203 return -ENOSPC;
204 } while (test_and_set_bit(idx, map));
205
206 return idx;
207}
208
209static void msm_iommu_free_ctx(unsigned long *map, int idx)
210{
211 clear_bit(idx, map);
212}
213
214static void config_mids(struct msm_iommu_dev *iommu,
215 struct msm_iommu_ctx_dev *master)
216{
217 int mid, ctx, i;
218
219 for (i = 0; i < master->num_mids; i++) {
220 mid = master->mids[i];
221 ctx = master->num;
222
223 SET_M2VCBR_N(iommu->base, mid, 0);
224 SET_CBACR_N(iommu->base, ctx, 0);
225
226 /* Set VMID = 0 */
227 SET_VMID(iommu->base, mid, 0);
228
229 /* Set the context number for that MID to this context */
230 SET_CBNDX(iommu->base, mid, ctx);
231
232 /* Set MID associated with this context bank to 0*/
233 SET_CBVMID(iommu->base, ctx, 0);
234
235 /* Set the ASID for TLB tagging for this context */
236 SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
237
238 /* Set security bit override to be Non-secure */
239 SET_NSCFG(iommu->base, mid, 3);
240 }
241}
242
243static void __reset_context(void __iomem *base, int ctx)
244{
245 SET_BPRCOSH(base, ctx, 0);
246 SET_BPRCISH(base, ctx, 0);
247 SET_BPRCNSH(base, ctx, 0);
248 SET_BPSHCFG(base, ctx, 0);
249 SET_BPMTCFG(base, ctx, 0);
250 SET_ACTLR(base, ctx, 0);
251 SET_SCTLR(base, ctx, 0);
252 SET_FSRRESTORE(base, ctx, 0);
253 SET_TTBR0(base, ctx, 0);
254 SET_TTBR1(base, ctx, 0);
255 SET_TTBCR(base, ctx, 0);
256 SET_BFBCR(base, ctx, 0);
257 SET_PAR(base, ctx, 0);
258 SET_FAR(base, ctx, 0);
259 SET_CTX_TLBIALL(base, ctx, 0);
260 SET_TLBFLPTER(base, ctx, 0);
261 SET_TLBSLPTER(base, ctx, 0);
262 SET_TLBLKCR(base, ctx, 0);
263}
264
265static void __program_context(void __iomem *base, int ctx,
266 struct msm_priv *priv)
267{
268 __reset_context(base, ctx);
269
270 /* Turn on TEX Remap */
271 SET_TRE(base, ctx, 1);
272 SET_AFE(base, ctx, 1);
273
274 /* Set up HTW mode */
275 /* TLB miss configuration: perform HTW on miss */
276 SET_TLBMCFG(base, ctx, 0x3);
277
278 /* V2P configuration: HTW for access */
279 SET_V2PCFG(base, ctx, 0x3);
280
281 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
282 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[0]);
283 SET_TTBR1(base, ctx, priv->cfg.arm_v7s_cfg.ttbr[1]);
284
285 /* Set prrr and nmrr */
286 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
287 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
288
289 /* Invalidate the TLB for this context */
290 SET_CTX_TLBIALL(base, ctx, 0);
291
292 /* Set interrupt number to "secure" interrupt */
293 SET_IRPTNDX(base, ctx, 0);
294
295 /* Enable context fault interrupt */
296 SET_CFEIE(base, ctx, 1);
297
298 /* Stall access on a context fault and let the handler deal with it */
299 SET_CFCFG(base, ctx, 1);
300
301 /* Redirect all cacheable requests to L2 slave port. */
302 SET_RCISH(base, ctx, 1);
303 SET_RCOSH(base, ctx, 1);
304 SET_RCNSH(base, ctx, 1);
305
306 /* Turn on BFB prefetch */
307 SET_BFBDFE(base, ctx, 1);
308
309 /* Enable the MMU */
310 SET_M(base, ctx, 1);
311}
312
313static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
314{
315 struct msm_priv *priv;
316
317 if (type != IOMMU_DOMAIN_UNMANAGED)
318 return NULL;
319
320 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
321 if (!priv)
322 goto fail_nomem;
323
324 INIT_LIST_HEAD(&priv->list_attached);
325
326 priv->domain.geometry.aperture_start = 0;
327 priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
328 priv->domain.geometry.force_aperture = true;
329
330 return &priv->domain;
331
332fail_nomem:
333 kfree(priv);
334 return NULL;
335}
336
337static void msm_iommu_domain_free(struct iommu_domain *domain)
338{
339 struct msm_priv *priv;
340 unsigned long flags;
341
342 spin_lock_irqsave(&msm_iommu_lock, flags);
343 priv = to_msm_priv(domain);
344 kfree(priv);
345 spin_unlock_irqrestore(&msm_iommu_lock, flags);
346}
347
348static int msm_iommu_domain_config(struct msm_priv *priv)
349{
350 spin_lock_init(&priv->pgtlock);
351
352 priv->cfg = (struct io_pgtable_cfg) {
353 .quirks = IO_PGTABLE_QUIRK_TLBI_ON_MAP,
354 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
355 .ias = 32,
356 .oas = 32,
357 .tlb = &msm_iommu_flush_ops,
358 .iommu_dev = priv->dev,
359 };
360
361 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
362 if (!priv->iop) {
363 dev_err(priv->dev, "Failed to allocate pgtable\n");
364 return -EINVAL;
365 }
366
367 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
368
369 return 0;
370}
371
372/* Must be called under msm_iommu_lock */
373static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
374{
375 struct msm_iommu_dev *iommu, *ret = NULL;
376 struct msm_iommu_ctx_dev *master;
377
378 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
379 master = list_first_entry(&iommu->ctx_list,
380 struct msm_iommu_ctx_dev,
381 list);
382 if (master->of_node == dev->of_node) {
383 ret = iommu;
384 break;
385 }
386 }
387
388 return ret;
389}
390
391static int msm_iommu_add_device(struct device *dev)
392{
393 struct msm_iommu_dev *iommu;
394 struct iommu_group *group;
395 unsigned long flags;
396
397 spin_lock_irqsave(&msm_iommu_lock, flags);
398 iommu = find_iommu_for_dev(dev);
399 spin_unlock_irqrestore(&msm_iommu_lock, flags);
400
401 if (iommu)
402 iommu_device_link(&iommu->iommu, dev);
403 else
404 return -ENODEV;
405
406 group = iommu_group_get_for_dev(dev);
407 if (IS_ERR(group))
408 return PTR_ERR(group);
409
410 iommu_group_put(group);
411
412 return 0;
413}
414
415static void msm_iommu_remove_device(struct device *dev)
416{
417 struct msm_iommu_dev *iommu;
418 unsigned long flags;
419
420 spin_lock_irqsave(&msm_iommu_lock, flags);
421 iommu = find_iommu_for_dev(dev);
422 spin_unlock_irqrestore(&msm_iommu_lock, flags);
423
424 if (iommu)
425 iommu_device_unlink(&iommu->iommu, dev);
426
427 iommu_group_remove_device(dev);
428}
429
430static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
431{
432 int ret = 0;
433 unsigned long flags;
434 struct msm_iommu_dev *iommu;
435 struct msm_priv *priv = to_msm_priv(domain);
436 struct msm_iommu_ctx_dev *master;
437
438 priv->dev = dev;
439 msm_iommu_domain_config(priv);
440
441 spin_lock_irqsave(&msm_iommu_lock, flags);
442 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
443 master = list_first_entry(&iommu->ctx_list,
444 struct msm_iommu_ctx_dev,
445 list);
446 if (master->of_node == dev->of_node) {
447 ret = __enable_clocks(iommu);
448 if (ret)
449 goto fail;
450
451 list_for_each_entry(master, &iommu->ctx_list, list) {
452 if (master->num) {
453 dev_err(dev, "domain already attached");
454 ret = -EEXIST;
455 goto fail;
456 }
457 master->num =
458 msm_iommu_alloc_ctx(iommu->context_map,
459 0, iommu->ncb);
460 if (IS_ERR_VALUE(master->num)) {
461 ret = -ENODEV;
462 goto fail;
463 }
464 config_mids(iommu, master);
465 __program_context(iommu->base, master->num,
466 priv);
467 }
468 __disable_clocks(iommu);
469 list_add(&iommu->dom_node, &priv->list_attached);
470 }
471 }
472
473fail:
474 spin_unlock_irqrestore(&msm_iommu_lock, flags);
475
476 return ret;
477}
478
479static void msm_iommu_detach_dev(struct iommu_domain *domain,
480 struct device *dev)
481{
482 struct msm_priv *priv = to_msm_priv(domain);
483 unsigned long flags;
484 struct msm_iommu_dev *iommu;
485 struct msm_iommu_ctx_dev *master;
486 int ret;
487
488 free_io_pgtable_ops(priv->iop);
489
490 spin_lock_irqsave(&msm_iommu_lock, flags);
491 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
492 ret = __enable_clocks(iommu);
493 if (ret)
494 goto fail;
495
496 list_for_each_entry(master, &iommu->ctx_list, list) {
497 msm_iommu_free_ctx(iommu->context_map, master->num);
498 __reset_context(iommu->base, master->num);
499 }
500 __disable_clocks(iommu);
501 }
502fail:
503 spin_unlock_irqrestore(&msm_iommu_lock, flags);
504}
505
506static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
507 phys_addr_t pa, size_t len, int prot)
508{
509 struct msm_priv *priv = to_msm_priv(domain);
510 unsigned long flags;
511 int ret;
512
513 spin_lock_irqsave(&priv->pgtlock, flags);
514 ret = priv->iop->map(priv->iop, iova, pa, len, prot);
515 spin_unlock_irqrestore(&priv->pgtlock, flags);
516
517 return ret;
518}
519
520static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
521 size_t len, struct iommu_iotlb_gather *gather)
522{
523 struct msm_priv *priv = to_msm_priv(domain);
524 unsigned long flags;
525
526 spin_lock_irqsave(&priv->pgtlock, flags);
527 len = priv->iop->unmap(priv->iop, iova, len, gather);
528 spin_unlock_irqrestore(&priv->pgtlock, flags);
529
530 return len;
531}
532
533static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
534 dma_addr_t va)
535{
536 struct msm_priv *priv;
537 struct msm_iommu_dev *iommu;
538 struct msm_iommu_ctx_dev *master;
539 unsigned int par;
540 unsigned long flags;
541 phys_addr_t ret = 0;
542
543 spin_lock_irqsave(&msm_iommu_lock, flags);
544
545 priv = to_msm_priv(domain);
546 iommu = list_first_entry(&priv->list_attached,
547 struct msm_iommu_dev, dom_node);
548
549 if (list_empty(&iommu->ctx_list))
550 goto fail;
551
552 master = list_first_entry(&iommu->ctx_list,
553 struct msm_iommu_ctx_dev, list);
554 if (!master)
555 goto fail;
556
557 ret = __enable_clocks(iommu);
558 if (ret)
559 goto fail;
560
561 /* Invalidate context TLB */
562 SET_CTX_TLBIALL(iommu->base, master->num, 0);
563 SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
564
565 par = GET_PAR(iommu->base, master->num);
566
567 /* We are dealing with a supersection */
568 if (GET_NOFAULT_SS(iommu->base, master->num))
569 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
570 else /* Upper 20 bits from PAR, lower 12 from VA */
571 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
572
573 if (GET_FAULT(iommu->base, master->num))
574 ret = 0;
575
576 __disable_clocks(iommu);
577fail:
578 spin_unlock_irqrestore(&msm_iommu_lock, flags);
579 return ret;
580}
581
582static bool msm_iommu_capable(enum iommu_cap cap)
583{
584 return false;
585}
586
587static void print_ctx_regs(void __iomem *base, int ctx)
588{
589 unsigned int fsr = GET_FSR(base, ctx);
590 pr_err("FAR = %08x PAR = %08x\n",
591 GET_FAR(base, ctx), GET_PAR(base, ctx));
592 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
593 (fsr & 0x02) ? "TF " : "",
594 (fsr & 0x04) ? "AFF " : "",
595 (fsr & 0x08) ? "APF " : "",
596 (fsr & 0x10) ? "TLBMF " : "",
597 (fsr & 0x20) ? "HTWDEEF " : "",
598 (fsr & 0x40) ? "HTWSEEF " : "",
599 (fsr & 0x80) ? "MHF " : "",
600 (fsr & 0x10000) ? "SL " : "",
601 (fsr & 0x40000000) ? "SS " : "",
602 (fsr & 0x80000000) ? "MULTI " : "");
603
604 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
605 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
606 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
607 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
608 pr_err("SCTLR = %08x ACTLR = %08x\n",
609 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
610}
611
612static void insert_iommu_master(struct device *dev,
613 struct msm_iommu_dev **iommu,
614 struct of_phandle_args *spec)
615{
616 struct msm_iommu_ctx_dev *master = dev->archdata.iommu;
617 int sid;
618
619 if (list_empty(&(*iommu)->ctx_list)) {
620 master = kzalloc(sizeof(*master), GFP_ATOMIC);
621 master->of_node = dev->of_node;
622 list_add(&master->list, &(*iommu)->ctx_list);
623 dev->archdata.iommu = master;
624 }
625
626 for (sid = 0; sid < master->num_mids; sid++)
627 if (master->mids[sid] == spec->args[0]) {
628 dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
629 sid);
630 return;
631 }
632
633 master->mids[master->num_mids++] = spec->args[0];
634}
635
636static int qcom_iommu_of_xlate(struct device *dev,
637 struct of_phandle_args *spec)
638{
639 struct msm_iommu_dev *iommu;
640 unsigned long flags;
641 int ret = 0;
642
643 spin_lock_irqsave(&msm_iommu_lock, flags);
644 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node)
645 if (iommu->dev->of_node == spec->np)
646 break;
647
648 if (!iommu || iommu->dev->of_node != spec->np) {
649 ret = -ENODEV;
650 goto fail;
651 }
652
653 insert_iommu_master(dev, &iommu, spec);
654fail:
655 spin_unlock_irqrestore(&msm_iommu_lock, flags);
656
657 return ret;
658}
659
660irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
661{
662 struct msm_iommu_dev *iommu = dev_id;
663 unsigned int fsr;
664 int i, ret;
665
666 spin_lock(&msm_iommu_lock);
667
668 if (!iommu) {
669 pr_err("Invalid device ID in context interrupt handler\n");
670 goto fail;
671 }
672
673 pr_err("Unexpected IOMMU page fault!\n");
674 pr_err("base = %08x\n", (unsigned int)iommu->base);
675
676 ret = __enable_clocks(iommu);
677 if (ret)
678 goto fail;
679
680 for (i = 0; i < iommu->ncb; i++) {
681 fsr = GET_FSR(iommu->base, i);
682 if (fsr) {
683 pr_err("Fault occurred in context %d.\n", i);
684 pr_err("Interesting registers:\n");
685 print_ctx_regs(iommu->base, i);
686 SET_FSR(iommu->base, i, 0x4000000F);
687 }
688 }
689 __disable_clocks(iommu);
690fail:
691 spin_unlock(&msm_iommu_lock);
692 return 0;
693}
694
695static struct iommu_ops msm_iommu_ops = {
696 .capable = msm_iommu_capable,
697 .domain_alloc = msm_iommu_domain_alloc,
698 .domain_free = msm_iommu_domain_free,
699 .attach_dev = msm_iommu_attach_dev,
700 .detach_dev = msm_iommu_detach_dev,
701 .map = msm_iommu_map,
702 .unmap = msm_iommu_unmap,
703 /*
704 * Nothing is needed here, the barrier to guarantee
705 * completion of the tlb sync operation is implicitly
706 * taken care when the iommu client does a writel before
707 * kick starting the other master.
708 */
709 .iotlb_sync = NULL,
710 .iova_to_phys = msm_iommu_iova_to_phys,
711 .add_device = msm_iommu_add_device,
712 .remove_device = msm_iommu_remove_device,
713 .device_group = generic_device_group,
714 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
715 .of_xlate = qcom_iommu_of_xlate,
716};
717
718static int msm_iommu_probe(struct platform_device *pdev)
719{
720 struct resource *r;
721 resource_size_t ioaddr;
722 struct msm_iommu_dev *iommu;
723 int ret, par, val;
724
725 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
726 if (!iommu)
727 return -ENODEV;
728
729 iommu->dev = &pdev->dev;
730 INIT_LIST_HEAD(&iommu->ctx_list);
731
732 iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
733 if (IS_ERR(iommu->pclk)) {
734 dev_err(iommu->dev, "could not get smmu_pclk\n");
735 return PTR_ERR(iommu->pclk);
736 }
737
738 ret = clk_prepare(iommu->pclk);
739 if (ret) {
740 dev_err(iommu->dev, "could not prepare smmu_pclk\n");
741 return ret;
742 }
743
744 iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
745 if (IS_ERR(iommu->clk)) {
746 dev_err(iommu->dev, "could not get iommu_clk\n");
747 clk_unprepare(iommu->pclk);
748 return PTR_ERR(iommu->clk);
749 }
750
751 ret = clk_prepare(iommu->clk);
752 if (ret) {
753 dev_err(iommu->dev, "could not prepare iommu_clk\n");
754 clk_unprepare(iommu->pclk);
755 return ret;
756 }
757
758 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
759 iommu->base = devm_ioremap_resource(iommu->dev, r);
760 if (IS_ERR(iommu->base)) {
761 dev_err(iommu->dev, "could not get iommu base\n");
762 ret = PTR_ERR(iommu->base);
763 goto fail;
764 }
765 ioaddr = r->start;
766
767 iommu->irq = platform_get_irq(pdev, 0);
768 if (iommu->irq < 0) {
769 ret = -ENODEV;
770 goto fail;
771 }
772
773 ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
774 if (ret) {
775 dev_err(iommu->dev, "could not get ncb\n");
776 goto fail;
777 }
778 iommu->ncb = val;
779
780 msm_iommu_reset(iommu->base, iommu->ncb);
781 SET_M(iommu->base, 0, 1);
782 SET_PAR(iommu->base, 0, 0);
783 SET_V2PCFG(iommu->base, 0, 1);
784 SET_V2PPR(iommu->base, 0, 0);
785 par = GET_PAR(iommu->base, 0);
786 SET_V2PCFG(iommu->base, 0, 0);
787 SET_M(iommu->base, 0, 0);
788
789 if (!par) {
790 pr_err("Invalid PAR value detected\n");
791 ret = -ENODEV;
792 goto fail;
793 }
794
795 ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
796 msm_iommu_fault_handler,
797 IRQF_ONESHOT | IRQF_SHARED,
798 "msm_iommu_secure_irpt_handler",
799 iommu);
800 if (ret) {
801 pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
802 goto fail;
803 }
804
805 list_add(&iommu->dev_node, &qcom_iommu_devices);
806
807 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
808 "msm-smmu.%pa", &ioaddr);
809 if (ret) {
810 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
811 goto fail;
812 }
813
814 iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
815 iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
816
817 ret = iommu_device_register(&iommu->iommu);
818 if (ret) {
819 pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
820 goto fail;
821 }
822
823 bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
824
825 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
826 iommu->base, iommu->irq, iommu->ncb);
827
828 return ret;
829fail:
830 clk_unprepare(iommu->clk);
831 clk_unprepare(iommu->pclk);
832 return ret;
833}
834
835static const struct of_device_id msm_iommu_dt_match[] = {
836 { .compatible = "qcom,apq8064-iommu" },
837 {}
838};
839
840static int msm_iommu_remove(struct platform_device *pdev)
841{
842 struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
843
844 clk_unprepare(iommu->clk);
845 clk_unprepare(iommu->pclk);
846 return 0;
847}
848
849static struct platform_driver msm_iommu_driver = {
850 .driver = {
851 .name = "msm_iommu",
852 .of_match_table = msm_iommu_dt_match,
853 },
854 .probe = msm_iommu_probe,
855 .remove = msm_iommu_remove,
856};
857
858static int __init msm_iommu_driver_init(void)
859{
860 int ret;
861
862 ret = platform_driver_register(&msm_iommu_driver);
863 if (ret != 0)
864 pr_err("Failed to register IOMMU driver\n");
865
866 return ret;
867}
868subsys_initcall(msm_iommu_driver_init);
869