Loading...
1/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/errno.h>
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/list.h>
26#include <linux/spinlock.h>
27#include <linux/slab.h>
28#include <linux/iommu.h>
29#include <linux/clk.h>
30
31#include <asm/cacheflush.h>
32#include <asm/sizes.h>
33
34#include <mach/iommu_hw-8xxx.h>
35#include <mach/iommu.h>
36
37#define MRC(reg, processor, op1, crn, crm, op2) \
38__asm__ __volatile__ ( \
39" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
40: "=r" (reg))
41
42#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
43#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
44
45static int msm_iommu_tex_class[4];
46
47DEFINE_SPINLOCK(msm_iommu_lock);
48
49struct msm_priv {
50 unsigned long *pgtable;
51 struct list_head list_attached;
52};
53
54static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
55{
56 int ret;
57
58 ret = clk_enable(drvdata->pclk);
59 if (ret)
60 goto fail;
61
62 if (drvdata->clk) {
63 ret = clk_enable(drvdata->clk);
64 if (ret)
65 clk_disable(drvdata->pclk);
66 }
67fail:
68 return ret;
69}
70
71static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
72{
73 if (drvdata->clk)
74 clk_disable(drvdata->clk);
75 clk_disable(drvdata->pclk);
76}
77
78static int __flush_iotlb(struct iommu_domain *domain)
79{
80 struct msm_priv *priv = domain->priv;
81 struct msm_iommu_drvdata *iommu_drvdata;
82 struct msm_iommu_ctx_drvdata *ctx_drvdata;
83 int ret = 0;
84#ifndef CONFIG_IOMMU_PGTABLES_L2
85 unsigned long *fl_table = priv->pgtable;
86 int i;
87
88 if (!list_empty(&priv->list_attached)) {
89 dmac_flush_range(fl_table, fl_table + SZ_16K);
90
91 for (i = 0; i < NUM_FL_PTE; i++)
92 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
93 void *sl_table = __va(fl_table[i] &
94 FL_BASE_MASK);
95 dmac_flush_range(sl_table, sl_table + SZ_4K);
96 }
97 }
98#endif
99
100 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
101 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
102 BUG();
103
104 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
105 BUG_ON(!iommu_drvdata);
106
107 ret = __enable_clocks(iommu_drvdata);
108 if (ret)
109 goto fail;
110
111 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
112 __disable_clocks(iommu_drvdata);
113 }
114fail:
115 return ret;
116}
117
118static void __reset_context(void __iomem *base, int ctx)
119{
120 SET_BPRCOSH(base, ctx, 0);
121 SET_BPRCISH(base, ctx, 0);
122 SET_BPRCNSH(base, ctx, 0);
123 SET_BPSHCFG(base, ctx, 0);
124 SET_BPMTCFG(base, ctx, 0);
125 SET_ACTLR(base, ctx, 0);
126 SET_SCTLR(base, ctx, 0);
127 SET_FSRRESTORE(base, ctx, 0);
128 SET_TTBR0(base, ctx, 0);
129 SET_TTBR1(base, ctx, 0);
130 SET_TTBCR(base, ctx, 0);
131 SET_BFBCR(base, ctx, 0);
132 SET_PAR(base, ctx, 0);
133 SET_FAR(base, ctx, 0);
134 SET_CTX_TLBIALL(base, ctx, 0);
135 SET_TLBFLPTER(base, ctx, 0);
136 SET_TLBSLPTER(base, ctx, 0);
137 SET_TLBLKCR(base, ctx, 0);
138 SET_PRRR(base, ctx, 0);
139 SET_NMRR(base, ctx, 0);
140}
141
142static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
143{
144 unsigned int prrr, nmrr;
145 __reset_context(base, ctx);
146
147 /* Set up HTW mode */
148 /* TLB miss configuration: perform HTW on miss */
149 SET_TLBMCFG(base, ctx, 0x3);
150
151 /* V2P configuration: HTW for access */
152 SET_V2PCFG(base, ctx, 0x3);
153
154 SET_TTBCR(base, ctx, 0);
155 SET_TTBR0_PA(base, ctx, (pgtable >> 14));
156
157 /* Invalidate the TLB for this context */
158 SET_CTX_TLBIALL(base, ctx, 0);
159
160 /* Set interrupt number to "secure" interrupt */
161 SET_IRPTNDX(base, ctx, 0);
162
163 /* Enable context fault interrupt */
164 SET_CFEIE(base, ctx, 1);
165
166 /* Stall access on a context fault and let the handler deal with it */
167 SET_CFCFG(base, ctx, 1);
168
169 /* Redirect all cacheable requests to L2 slave port. */
170 SET_RCISH(base, ctx, 1);
171 SET_RCOSH(base, ctx, 1);
172 SET_RCNSH(base, ctx, 1);
173
174 /* Turn on TEX Remap */
175 SET_TRE(base, ctx, 1);
176
177 /* Set TEX remap attributes */
178 RCP15_PRRR(prrr);
179 RCP15_NMRR(nmrr);
180 SET_PRRR(base, ctx, prrr);
181 SET_NMRR(base, ctx, nmrr);
182
183 /* Turn on BFB prefetch */
184 SET_BFBDFE(base, ctx, 1);
185
186#ifdef CONFIG_IOMMU_PGTABLES_L2
187 /* Configure page tables as inner-cacheable and shareable to reduce
188 * the TLB miss penalty.
189 */
190 SET_TTBR0_SH(base, ctx, 1);
191 SET_TTBR1_SH(base, ctx, 1);
192
193 SET_TTBR0_NOS(base, ctx, 1);
194 SET_TTBR1_NOS(base, ctx, 1);
195
196 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
197 SET_TTBR0_IRGNL(base, ctx, 1);
198
199 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
200 SET_TTBR1_IRGNL(base, ctx, 1);
201
202 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
203 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
204#endif
205
206 /* Enable the MMU */
207 SET_M(base, ctx, 1);
208}
209
210static int msm_iommu_domain_init(struct iommu_domain *domain)
211{
212 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
213
214 if (!priv)
215 goto fail_nomem;
216
217 INIT_LIST_HEAD(&priv->list_attached);
218 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
219 get_order(SZ_16K));
220
221 if (!priv->pgtable)
222 goto fail_nomem;
223
224 memset(priv->pgtable, 0, SZ_16K);
225 domain->priv = priv;
226 return 0;
227
228fail_nomem:
229 kfree(priv);
230 return -ENOMEM;
231}
232
233static void msm_iommu_domain_destroy(struct iommu_domain *domain)
234{
235 struct msm_priv *priv;
236 unsigned long flags;
237 unsigned long *fl_table;
238 int i;
239
240 spin_lock_irqsave(&msm_iommu_lock, flags);
241 priv = domain->priv;
242 domain->priv = NULL;
243
244 if (priv) {
245 fl_table = priv->pgtable;
246
247 for (i = 0; i < NUM_FL_PTE; i++)
248 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
249 free_page((unsigned long) __va(((fl_table[i]) &
250 FL_BASE_MASK)));
251
252 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
253 priv->pgtable = NULL;
254 }
255
256 kfree(priv);
257 spin_unlock_irqrestore(&msm_iommu_lock, flags);
258}
259
260static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
261{
262 struct msm_priv *priv;
263 struct msm_iommu_ctx_dev *ctx_dev;
264 struct msm_iommu_drvdata *iommu_drvdata;
265 struct msm_iommu_ctx_drvdata *ctx_drvdata;
266 struct msm_iommu_ctx_drvdata *tmp_drvdata;
267 int ret = 0;
268 unsigned long flags;
269
270 spin_lock_irqsave(&msm_iommu_lock, flags);
271
272 priv = domain->priv;
273
274 if (!priv || !dev) {
275 ret = -EINVAL;
276 goto fail;
277 }
278
279 iommu_drvdata = dev_get_drvdata(dev->parent);
280 ctx_drvdata = dev_get_drvdata(dev);
281 ctx_dev = dev->platform_data;
282
283 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
284 ret = -EINVAL;
285 goto fail;
286 }
287
288 if (!list_empty(&ctx_drvdata->attached_elm)) {
289 ret = -EBUSY;
290 goto fail;
291 }
292
293 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
294 if (tmp_drvdata == ctx_drvdata) {
295 ret = -EBUSY;
296 goto fail;
297 }
298
299 ret = __enable_clocks(iommu_drvdata);
300 if (ret)
301 goto fail;
302
303 __program_context(iommu_drvdata->base, ctx_dev->num,
304 __pa(priv->pgtable));
305
306 __disable_clocks(iommu_drvdata);
307 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
308 ret = __flush_iotlb(domain);
309
310fail:
311 spin_unlock_irqrestore(&msm_iommu_lock, flags);
312 return ret;
313}
314
315static void msm_iommu_detach_dev(struct iommu_domain *domain,
316 struct device *dev)
317{
318 struct msm_priv *priv;
319 struct msm_iommu_ctx_dev *ctx_dev;
320 struct msm_iommu_drvdata *iommu_drvdata;
321 struct msm_iommu_ctx_drvdata *ctx_drvdata;
322 unsigned long flags;
323 int ret;
324
325 spin_lock_irqsave(&msm_iommu_lock, flags);
326 priv = domain->priv;
327
328 if (!priv || !dev)
329 goto fail;
330
331 iommu_drvdata = dev_get_drvdata(dev->parent);
332 ctx_drvdata = dev_get_drvdata(dev);
333 ctx_dev = dev->platform_data;
334
335 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
336 goto fail;
337
338 ret = __flush_iotlb(domain);
339 if (ret)
340 goto fail;
341
342 ret = __enable_clocks(iommu_drvdata);
343 if (ret)
344 goto fail;
345
346 __reset_context(iommu_drvdata->base, ctx_dev->num);
347 __disable_clocks(iommu_drvdata);
348 list_del_init(&ctx_drvdata->attached_elm);
349
350fail:
351 spin_unlock_irqrestore(&msm_iommu_lock, flags);
352}
353
354static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
355 phys_addr_t pa, int order, int prot)
356{
357 struct msm_priv *priv;
358 unsigned long flags;
359 unsigned long *fl_table;
360 unsigned long *fl_pte;
361 unsigned long fl_offset;
362 unsigned long *sl_table;
363 unsigned long *sl_pte;
364 unsigned long sl_offset;
365 unsigned int pgprot;
366 size_t len = 0x1000UL << order;
367 int ret = 0, tex, sh;
368
369 spin_lock_irqsave(&msm_iommu_lock, flags);
370
371 sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
372 tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
373
374 if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
375 ret = -EINVAL;
376 goto fail;
377 }
378
379 priv = domain->priv;
380 if (!priv) {
381 ret = -EINVAL;
382 goto fail;
383 }
384
385 fl_table = priv->pgtable;
386
387 if (len != SZ_16M && len != SZ_1M &&
388 len != SZ_64K && len != SZ_4K) {
389 pr_debug("Bad size: %d\n", len);
390 ret = -EINVAL;
391 goto fail;
392 }
393
394 if (!fl_table) {
395 pr_debug("Null page table\n");
396 ret = -EINVAL;
397 goto fail;
398 }
399
400 if (len == SZ_16M || len == SZ_1M) {
401 pgprot = sh ? FL_SHARED : 0;
402 pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
403 pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
404 pgprot |= tex & 0x04 ? FL_TEX0 : 0;
405 } else {
406 pgprot = sh ? SL_SHARED : 0;
407 pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
408 pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
409 pgprot |= tex & 0x04 ? SL_TEX0 : 0;
410 }
411
412 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
413 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
414
415 if (len == SZ_16M) {
416 int i = 0;
417 for (i = 0; i < 16; i++)
418 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
419 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
420 FL_SHARED | FL_NG | pgprot;
421 }
422
423 if (len == SZ_1M)
424 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
425 FL_TYPE_SECT | FL_SHARED | pgprot;
426
427 /* Need a 2nd level table */
428 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
429 unsigned long *sl;
430 sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
431 get_order(SZ_4K));
432
433 if (!sl) {
434 pr_debug("Could not allocate second level table\n");
435 ret = -ENOMEM;
436 goto fail;
437 }
438
439 memset(sl, 0, SZ_4K);
440 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
441 }
442
443 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
444 sl_offset = SL_OFFSET(va);
445 sl_pte = sl_table + sl_offset;
446
447
448 if (len == SZ_4K)
449 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
450 SL_SHARED | SL_TYPE_SMALL | pgprot;
451
452 if (len == SZ_64K) {
453 int i;
454
455 for (i = 0; i < 16; i++)
456 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
457 SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
458 }
459
460 ret = __flush_iotlb(domain);
461fail:
462 spin_unlock_irqrestore(&msm_iommu_lock, flags);
463 return ret;
464}
465
466static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
467 int order)
468{
469 struct msm_priv *priv;
470 unsigned long flags;
471 unsigned long *fl_table;
472 unsigned long *fl_pte;
473 unsigned long fl_offset;
474 unsigned long *sl_table;
475 unsigned long *sl_pte;
476 unsigned long sl_offset;
477 size_t len = 0x1000UL << order;
478 int i, ret = 0;
479
480 spin_lock_irqsave(&msm_iommu_lock, flags);
481
482 priv = domain->priv;
483
484 if (!priv) {
485 ret = -ENODEV;
486 goto fail;
487 }
488
489 fl_table = priv->pgtable;
490
491 if (len != SZ_16M && len != SZ_1M &&
492 len != SZ_64K && len != SZ_4K) {
493 pr_debug("Bad length: %d\n", len);
494 ret = -EINVAL;
495 goto fail;
496 }
497
498 if (!fl_table) {
499 pr_debug("Null page table\n");
500 ret = -EINVAL;
501 goto fail;
502 }
503
504 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
505 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
506
507 if (*fl_pte == 0) {
508 pr_debug("First level PTE is 0\n");
509 ret = -ENODEV;
510 goto fail;
511 }
512
513 /* Unmap supersection */
514 if (len == SZ_16M)
515 for (i = 0; i < 16; i++)
516 *(fl_pte+i) = 0;
517
518 if (len == SZ_1M)
519 *fl_pte = 0;
520
521 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
522 sl_offset = SL_OFFSET(va);
523 sl_pte = sl_table + sl_offset;
524
525 if (len == SZ_64K) {
526 for (i = 0; i < 16; i++)
527 *(sl_pte+i) = 0;
528 }
529
530 if (len == SZ_4K)
531 *sl_pte = 0;
532
533 if (len == SZ_4K || len == SZ_64K) {
534 int used = 0;
535
536 for (i = 0; i < NUM_SL_PTE; i++)
537 if (sl_table[i])
538 used = 1;
539 if (!used) {
540 free_page((unsigned long)sl_table);
541 *fl_pte = 0;
542 }
543 }
544
545 ret = __flush_iotlb(domain);
546fail:
547 spin_unlock_irqrestore(&msm_iommu_lock, flags);
548 return ret;
549}
550
551static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
552 unsigned long va)
553{
554 struct msm_priv *priv;
555 struct msm_iommu_drvdata *iommu_drvdata;
556 struct msm_iommu_ctx_drvdata *ctx_drvdata;
557 unsigned int par;
558 unsigned long flags;
559 void __iomem *base;
560 phys_addr_t ret = 0;
561 int ctx;
562
563 spin_lock_irqsave(&msm_iommu_lock, flags);
564
565 priv = domain->priv;
566 if (list_empty(&priv->list_attached))
567 goto fail;
568
569 ctx_drvdata = list_entry(priv->list_attached.next,
570 struct msm_iommu_ctx_drvdata, attached_elm);
571 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
572
573 base = iommu_drvdata->base;
574 ctx = ctx_drvdata->num;
575
576 ret = __enable_clocks(iommu_drvdata);
577 if (ret)
578 goto fail;
579
580 /* Invalidate context TLB */
581 SET_CTX_TLBIALL(base, ctx, 0);
582 SET_V2PPR(base, ctx, va & V2Pxx_VA);
583
584 par = GET_PAR(base, ctx);
585
586 /* We are dealing with a supersection */
587 if (GET_NOFAULT_SS(base, ctx))
588 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
589 else /* Upper 20 bits from PAR, lower 12 from VA */
590 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
591
592 if (GET_FAULT(base, ctx))
593 ret = 0;
594
595 __disable_clocks(iommu_drvdata);
596fail:
597 spin_unlock_irqrestore(&msm_iommu_lock, flags);
598 return ret;
599}
600
601static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
602 unsigned long cap)
603{
604 return 0;
605}
606
607static void print_ctx_regs(void __iomem *base, int ctx)
608{
609 unsigned int fsr = GET_FSR(base, ctx);
610 pr_err("FAR = %08x PAR = %08x\n",
611 GET_FAR(base, ctx), GET_PAR(base, ctx));
612 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
613 (fsr & 0x02) ? "TF " : "",
614 (fsr & 0x04) ? "AFF " : "",
615 (fsr & 0x08) ? "APF " : "",
616 (fsr & 0x10) ? "TLBMF " : "",
617 (fsr & 0x20) ? "HTWDEEF " : "",
618 (fsr & 0x40) ? "HTWSEEF " : "",
619 (fsr & 0x80) ? "MHF " : "",
620 (fsr & 0x10000) ? "SL " : "",
621 (fsr & 0x40000000) ? "SS " : "",
622 (fsr & 0x80000000) ? "MULTI " : "");
623
624 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
625 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
626 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
627 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
628 pr_err("SCTLR = %08x ACTLR = %08x\n",
629 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
630 pr_err("PRRR = %08x NMRR = %08x\n",
631 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
632}
633
634irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
635{
636 struct msm_iommu_drvdata *drvdata = dev_id;
637 void __iomem *base;
638 unsigned int fsr;
639 int i, ret;
640
641 spin_lock(&msm_iommu_lock);
642
643 if (!drvdata) {
644 pr_err("Invalid device ID in context interrupt handler\n");
645 goto fail;
646 }
647
648 base = drvdata->base;
649
650 pr_err("Unexpected IOMMU page fault!\n");
651 pr_err("base = %08x\n", (unsigned int) base);
652
653 ret = __enable_clocks(drvdata);
654 if (ret)
655 goto fail;
656
657 for (i = 0; i < drvdata->ncb; i++) {
658 fsr = GET_FSR(base, i);
659 if (fsr) {
660 pr_err("Fault occurred in context %d.\n", i);
661 pr_err("Interesting registers:\n");
662 print_ctx_regs(base, i);
663 SET_FSR(base, i, 0x4000000F);
664 }
665 }
666 __disable_clocks(drvdata);
667fail:
668 spin_unlock(&msm_iommu_lock);
669 return 0;
670}
671
672static struct iommu_ops msm_iommu_ops = {
673 .domain_init = msm_iommu_domain_init,
674 .domain_destroy = msm_iommu_domain_destroy,
675 .attach_dev = msm_iommu_attach_dev,
676 .detach_dev = msm_iommu_detach_dev,
677 .map = msm_iommu_map,
678 .unmap = msm_iommu_unmap,
679 .iova_to_phys = msm_iommu_iova_to_phys,
680 .domain_has_cap = msm_iommu_domain_has_cap
681};
682
683static int __init get_tex_class(int icp, int ocp, int mt, int nos)
684{
685 int i = 0;
686 unsigned int prrr = 0;
687 unsigned int nmrr = 0;
688 int c_icp, c_ocp, c_mt, c_nos;
689
690 RCP15_PRRR(prrr);
691 RCP15_NMRR(nmrr);
692
693 for (i = 0; i < NUM_TEX_CLASS; i++) {
694 c_nos = PRRR_NOS(prrr, i);
695 c_mt = PRRR_MT(prrr, i);
696 c_icp = NMRR_ICP(nmrr, i);
697 c_ocp = NMRR_OCP(nmrr, i);
698
699 if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
700 return i;
701 }
702
703 return -ENODEV;
704}
705
706static void __init setup_iommu_tex_classes(void)
707{
708 msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
709 get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
710
711 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
712 get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
713
714 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
715 get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
716
717 msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
718 get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
719}
720
721static int __init msm_iommu_init(void)
722{
723 setup_iommu_tex_classes();
724 register_iommu(&msm_iommu_ops);
725 return 0;
726}
727
728subsys_initcall(msm_iommu_init);
729
730MODULE_LICENSE("GPL v2");
731MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
3 *
4 * Author: Stepan Moskovchenko <stepanm@codeaurora.org>
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/errno.h>
12#include <linux/io.h>
13#include <linux/io-pgtable.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/spinlock.h>
17#include <linux/slab.h>
18#include <linux/iommu.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21
22#include <asm/cacheflush.h>
23#include <linux/sizes.h>
24
25#include "msm_iommu_hw-8xxx.h"
26#include "msm_iommu.h"
27
28#define MRC(reg, processor, op1, crn, crm, op2) \
29__asm__ __volatile__ ( \
30" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
31: "=r" (reg))
32
33/* bitmap of the page sizes currently supported */
34#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
35
36static DEFINE_SPINLOCK(msm_iommu_lock);
37static LIST_HEAD(qcom_iommu_devices);
38static struct iommu_ops msm_iommu_ops;
39
40struct msm_priv {
41 struct list_head list_attached;
42 struct iommu_domain domain;
43 struct io_pgtable_cfg cfg;
44 struct io_pgtable_ops *iop;
45 struct device *dev;
46 spinlock_t pgtlock; /* pagetable lock */
47};
48
49static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
50{
51 return container_of(dom, struct msm_priv, domain);
52}
53
54static int __enable_clocks(struct msm_iommu_dev *iommu)
55{
56 int ret;
57
58 ret = clk_enable(iommu->pclk);
59 if (ret)
60 goto fail;
61
62 if (iommu->clk) {
63 ret = clk_enable(iommu->clk);
64 if (ret)
65 clk_disable(iommu->pclk);
66 }
67fail:
68 return ret;
69}
70
71static void __disable_clocks(struct msm_iommu_dev *iommu)
72{
73 if (iommu->clk)
74 clk_disable(iommu->clk);
75 clk_disable(iommu->pclk);
76}
77
78static void msm_iommu_reset(void __iomem *base, int ncb)
79{
80 int ctx;
81
82 SET_RPUE(base, 0);
83 SET_RPUEIE(base, 0);
84 SET_ESRRESTORE(base, 0);
85 SET_TBE(base, 0);
86 SET_CR(base, 0);
87 SET_SPDMBE(base, 0);
88 SET_TESTBUSCR(base, 0);
89 SET_TLBRSW(base, 0);
90 SET_GLOBAL_TLBIALL(base, 0);
91 SET_RPU_ACR(base, 0);
92 SET_TLBLKCRWE(base, 1);
93
94 for (ctx = 0; ctx < ncb; ctx++) {
95 SET_BPRCOSH(base, ctx, 0);
96 SET_BPRCISH(base, ctx, 0);
97 SET_BPRCNSH(base, ctx, 0);
98 SET_BPSHCFG(base, ctx, 0);
99 SET_BPMTCFG(base, ctx, 0);
100 SET_ACTLR(base, ctx, 0);
101 SET_SCTLR(base, ctx, 0);
102 SET_FSRRESTORE(base, ctx, 0);
103 SET_TTBR0(base, ctx, 0);
104 SET_TTBR1(base, ctx, 0);
105 SET_TTBCR(base, ctx, 0);
106 SET_BFBCR(base, ctx, 0);
107 SET_PAR(base, ctx, 0);
108 SET_FAR(base, ctx, 0);
109 SET_CTX_TLBIALL(base, ctx, 0);
110 SET_TLBFLPTER(base, ctx, 0);
111 SET_TLBSLPTER(base, ctx, 0);
112 SET_TLBLKCR(base, ctx, 0);
113 SET_CONTEXTIDR(base, ctx, 0);
114 }
115}
116
117static void __flush_iotlb(void *cookie)
118{
119 struct msm_priv *priv = cookie;
120 struct msm_iommu_dev *iommu = NULL;
121 struct msm_iommu_ctx_dev *master;
122 int ret = 0;
123
124 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
125 ret = __enable_clocks(iommu);
126 if (ret)
127 goto fail;
128
129 list_for_each_entry(master, &iommu->ctx_list, list)
130 SET_CTX_TLBIALL(iommu->base, master->num, 0);
131
132 __disable_clocks(iommu);
133 }
134fail:
135 return;
136}
137
138static void __flush_iotlb_range(unsigned long iova, size_t size,
139 size_t granule, bool leaf, void *cookie)
140{
141 struct msm_priv *priv = cookie;
142 struct msm_iommu_dev *iommu = NULL;
143 struct msm_iommu_ctx_dev *master;
144 int ret = 0;
145 int temp_size;
146
147 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
148 ret = __enable_clocks(iommu);
149 if (ret)
150 goto fail;
151
152 list_for_each_entry(master, &iommu->ctx_list, list) {
153 temp_size = size;
154 do {
155 iova &= TLBIVA_VA;
156 iova |= GET_CONTEXTIDR_ASID(iommu->base,
157 master->num);
158 SET_TLBIVA(iommu->base, master->num, iova);
159 iova += granule;
160 } while (temp_size -= granule);
161 }
162
163 __disable_clocks(iommu);
164 }
165
166fail:
167 return;
168}
169
170static void __flush_iotlb_walk(unsigned long iova, size_t size,
171 size_t granule, void *cookie)
172{
173 __flush_iotlb_range(iova, size, granule, false, cookie);
174}
175
176static void __flush_iotlb_page(struct iommu_iotlb_gather *gather,
177 unsigned long iova, size_t granule, void *cookie)
178{
179 __flush_iotlb_range(iova, granule, granule, true, cookie);
180}
181
182static const struct iommu_flush_ops msm_iommu_flush_ops = {
183 .tlb_flush_all = __flush_iotlb,
184 .tlb_flush_walk = __flush_iotlb_walk,
185 .tlb_add_page = __flush_iotlb_page,
186};
187
188static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
189{
190 int idx;
191
192 do {
193 idx = find_next_zero_bit(map, end, start);
194 if (idx == end)
195 return -ENOSPC;
196 } while (test_and_set_bit(idx, map));
197
198 return idx;
199}
200
201static void msm_iommu_free_ctx(unsigned long *map, int idx)
202{
203 clear_bit(idx, map);
204}
205
206static void config_mids(struct msm_iommu_dev *iommu,
207 struct msm_iommu_ctx_dev *master)
208{
209 int mid, ctx, i;
210
211 for (i = 0; i < master->num_mids; i++) {
212 mid = master->mids[i];
213 ctx = master->num;
214
215 SET_M2VCBR_N(iommu->base, mid, 0);
216 SET_CBACR_N(iommu->base, ctx, 0);
217
218 /* Set VMID = 0 */
219 SET_VMID(iommu->base, mid, 0);
220
221 /* Set the context number for that MID to this context */
222 SET_CBNDX(iommu->base, mid, ctx);
223
224 /* Set MID associated with this context bank to 0*/
225 SET_CBVMID(iommu->base, ctx, 0);
226
227 /* Set the ASID for TLB tagging for this context */
228 SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
229
230 /* Set security bit override to be Non-secure */
231 SET_NSCFG(iommu->base, mid, 3);
232 }
233}
234
235static void __reset_context(void __iomem *base, int ctx)
236{
237 SET_BPRCOSH(base, ctx, 0);
238 SET_BPRCISH(base, ctx, 0);
239 SET_BPRCNSH(base, ctx, 0);
240 SET_BPSHCFG(base, ctx, 0);
241 SET_BPMTCFG(base, ctx, 0);
242 SET_ACTLR(base, ctx, 0);
243 SET_SCTLR(base, ctx, 0);
244 SET_FSRRESTORE(base, ctx, 0);
245 SET_TTBR0(base, ctx, 0);
246 SET_TTBR1(base, ctx, 0);
247 SET_TTBCR(base, ctx, 0);
248 SET_BFBCR(base, ctx, 0);
249 SET_PAR(base, ctx, 0);
250 SET_FAR(base, ctx, 0);
251 SET_CTX_TLBIALL(base, ctx, 0);
252 SET_TLBFLPTER(base, ctx, 0);
253 SET_TLBSLPTER(base, ctx, 0);
254 SET_TLBLKCR(base, ctx, 0);
255}
256
257static void __program_context(void __iomem *base, int ctx,
258 struct msm_priv *priv)
259{
260 __reset_context(base, ctx);
261
262 /* Turn on TEX Remap */
263 SET_TRE(base, ctx, 1);
264 SET_AFE(base, ctx, 1);
265
266 /* Set up HTW mode */
267 /* TLB miss configuration: perform HTW on miss */
268 SET_TLBMCFG(base, ctx, 0x3);
269
270 /* V2P configuration: HTW for access */
271 SET_V2PCFG(base, ctx, 0x3);
272
273 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr);
274 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr);
275 SET_TTBR1(base, ctx, 0);
276
277 /* Set prrr and nmrr */
278 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr);
279 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr);
280
281 /* Invalidate the TLB for this context */
282 SET_CTX_TLBIALL(base, ctx, 0);
283
284 /* Set interrupt number to "secure" interrupt */
285 SET_IRPTNDX(base, ctx, 0);
286
287 /* Enable context fault interrupt */
288 SET_CFEIE(base, ctx, 1);
289
290 /* Stall access on a context fault and let the handler deal with it */
291 SET_CFCFG(base, ctx, 1);
292
293 /* Redirect all cacheable requests to L2 slave port. */
294 SET_RCISH(base, ctx, 1);
295 SET_RCOSH(base, ctx, 1);
296 SET_RCNSH(base, ctx, 1);
297
298 /* Turn on BFB prefetch */
299 SET_BFBDFE(base, ctx, 1);
300
301 /* Enable the MMU */
302 SET_M(base, ctx, 1);
303}
304
305static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev)
306{
307 struct msm_priv *priv;
308
309 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
310 if (!priv)
311 goto fail_nomem;
312
313 INIT_LIST_HEAD(&priv->list_attached);
314
315 priv->domain.geometry.aperture_start = 0;
316 priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
317 priv->domain.geometry.force_aperture = true;
318
319 return &priv->domain;
320
321fail_nomem:
322 kfree(priv);
323 return NULL;
324}
325
326static void msm_iommu_domain_free(struct iommu_domain *domain)
327{
328 struct msm_priv *priv;
329 unsigned long flags;
330
331 spin_lock_irqsave(&msm_iommu_lock, flags);
332 priv = to_msm_priv(domain);
333 kfree(priv);
334 spin_unlock_irqrestore(&msm_iommu_lock, flags);
335}
336
337static int msm_iommu_domain_config(struct msm_priv *priv)
338{
339 spin_lock_init(&priv->pgtlock);
340
341 priv->cfg = (struct io_pgtable_cfg) {
342 .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap,
343 .ias = 32,
344 .oas = 32,
345 .tlb = &msm_iommu_flush_ops,
346 .iommu_dev = priv->dev,
347 };
348
349 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv);
350 if (!priv->iop) {
351 dev_err(priv->dev, "Failed to allocate pgtable\n");
352 return -EINVAL;
353 }
354
355 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap;
356
357 return 0;
358}
359
360/* Must be called under msm_iommu_lock */
361static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
362{
363 struct msm_iommu_dev *iommu, *ret = NULL;
364 struct msm_iommu_ctx_dev *master;
365
366 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
367 master = list_first_entry(&iommu->ctx_list,
368 struct msm_iommu_ctx_dev,
369 list);
370 if (master->of_node == dev->of_node) {
371 ret = iommu;
372 break;
373 }
374 }
375
376 return ret;
377}
378
379static struct iommu_device *msm_iommu_probe_device(struct device *dev)
380{
381 struct msm_iommu_dev *iommu;
382 unsigned long flags;
383
384 spin_lock_irqsave(&msm_iommu_lock, flags);
385 iommu = find_iommu_for_dev(dev);
386 spin_unlock_irqrestore(&msm_iommu_lock, flags);
387
388 if (!iommu)
389 return ERR_PTR(-ENODEV);
390
391 return &iommu->iommu;
392}
393
394static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
395{
396 int ret = 0;
397 unsigned long flags;
398 struct msm_iommu_dev *iommu;
399 struct msm_priv *priv = to_msm_priv(domain);
400 struct msm_iommu_ctx_dev *master;
401
402 priv->dev = dev;
403 msm_iommu_domain_config(priv);
404
405 spin_lock_irqsave(&msm_iommu_lock, flags);
406 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
407 master = list_first_entry(&iommu->ctx_list,
408 struct msm_iommu_ctx_dev,
409 list);
410 if (master->of_node == dev->of_node) {
411 ret = __enable_clocks(iommu);
412 if (ret)
413 goto fail;
414
415 list_for_each_entry(master, &iommu->ctx_list, list) {
416 if (master->num) {
417 dev_err(dev, "domain already attached");
418 ret = -EEXIST;
419 goto fail;
420 }
421 master->num =
422 msm_iommu_alloc_ctx(iommu->context_map,
423 0, iommu->ncb);
424 if (IS_ERR_VALUE(master->num)) {
425 ret = -ENODEV;
426 goto fail;
427 }
428 config_mids(iommu, master);
429 __program_context(iommu->base, master->num,
430 priv);
431 }
432 __disable_clocks(iommu);
433 list_add(&iommu->dom_node, &priv->list_attached);
434 }
435 }
436
437fail:
438 spin_unlock_irqrestore(&msm_iommu_lock, flags);
439
440 return ret;
441}
442
443static int msm_iommu_identity_attach(struct iommu_domain *identity_domain,
444 struct device *dev)
445{
446 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
447 struct msm_priv *priv;
448 unsigned long flags;
449 struct msm_iommu_dev *iommu;
450 struct msm_iommu_ctx_dev *master;
451 int ret = 0;
452
453 if (domain == identity_domain || !domain)
454 return 0;
455
456 priv = to_msm_priv(domain);
457 free_io_pgtable_ops(priv->iop);
458
459 spin_lock_irqsave(&msm_iommu_lock, flags);
460 list_for_each_entry(iommu, &priv->list_attached, dom_node) {
461 ret = __enable_clocks(iommu);
462 if (ret)
463 goto fail;
464
465 list_for_each_entry(master, &iommu->ctx_list, list) {
466 msm_iommu_free_ctx(iommu->context_map, master->num);
467 __reset_context(iommu->base, master->num);
468 }
469 __disable_clocks(iommu);
470 }
471fail:
472 spin_unlock_irqrestore(&msm_iommu_lock, flags);
473 return ret;
474}
475
476static struct iommu_domain_ops msm_iommu_identity_ops = {
477 .attach_dev = msm_iommu_identity_attach,
478};
479
480static struct iommu_domain msm_iommu_identity_domain = {
481 .type = IOMMU_DOMAIN_IDENTITY,
482 .ops = &msm_iommu_identity_ops,
483};
484
485static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
486 phys_addr_t pa, size_t pgsize, size_t pgcount,
487 int prot, gfp_t gfp, size_t *mapped)
488{
489 struct msm_priv *priv = to_msm_priv(domain);
490 unsigned long flags;
491 int ret;
492
493 spin_lock_irqsave(&priv->pgtlock, flags);
494 ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot,
495 GFP_ATOMIC, mapped);
496 spin_unlock_irqrestore(&priv->pgtlock, flags);
497
498 return ret;
499}
500
501static int msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
502 size_t size)
503{
504 struct msm_priv *priv = to_msm_priv(domain);
505
506 __flush_iotlb_range(iova, size, SZ_4K, false, priv);
507 return 0;
508}
509
510static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
511 size_t pgsize, size_t pgcount,
512 struct iommu_iotlb_gather *gather)
513{
514 struct msm_priv *priv = to_msm_priv(domain);
515 unsigned long flags;
516 size_t ret;
517
518 spin_lock_irqsave(&priv->pgtlock, flags);
519 ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather);
520 spin_unlock_irqrestore(&priv->pgtlock, flags);
521
522 return ret;
523}
524
525static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
526 dma_addr_t va)
527{
528 struct msm_priv *priv;
529 struct msm_iommu_dev *iommu;
530 struct msm_iommu_ctx_dev *master;
531 unsigned int par;
532 unsigned long flags;
533 phys_addr_t ret = 0;
534
535 spin_lock_irqsave(&msm_iommu_lock, flags);
536
537 priv = to_msm_priv(domain);
538 iommu = list_first_entry(&priv->list_attached,
539 struct msm_iommu_dev, dom_node);
540
541 if (list_empty(&iommu->ctx_list))
542 goto fail;
543
544 master = list_first_entry(&iommu->ctx_list,
545 struct msm_iommu_ctx_dev, list);
546 if (!master)
547 goto fail;
548
549 ret = __enable_clocks(iommu);
550 if (ret)
551 goto fail;
552
553 /* Invalidate context TLB */
554 SET_CTX_TLBIALL(iommu->base, master->num, 0);
555 SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
556
557 par = GET_PAR(iommu->base, master->num);
558
559 /* We are dealing with a supersection */
560 if (GET_NOFAULT_SS(iommu->base, master->num))
561 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
562 else /* Upper 20 bits from PAR, lower 12 from VA */
563 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
564
565 if (GET_FAULT(iommu->base, master->num))
566 ret = 0;
567
568 __disable_clocks(iommu);
569fail:
570 spin_unlock_irqrestore(&msm_iommu_lock, flags);
571 return ret;
572}
573
574static void print_ctx_regs(void __iomem *base, int ctx)
575{
576 unsigned int fsr = GET_FSR(base, ctx);
577 pr_err("FAR = %08x PAR = %08x\n",
578 GET_FAR(base, ctx), GET_PAR(base, ctx));
579 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
580 (fsr & 0x02) ? "TF " : "",
581 (fsr & 0x04) ? "AFF " : "",
582 (fsr & 0x08) ? "APF " : "",
583 (fsr & 0x10) ? "TLBMF " : "",
584 (fsr & 0x20) ? "HTWDEEF " : "",
585 (fsr & 0x40) ? "HTWSEEF " : "",
586 (fsr & 0x80) ? "MHF " : "",
587 (fsr & 0x10000) ? "SL " : "",
588 (fsr & 0x40000000) ? "SS " : "",
589 (fsr & 0x80000000) ? "MULTI " : "");
590
591 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
592 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
593 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
594 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
595 pr_err("SCTLR = %08x ACTLR = %08x\n",
596 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
597}
598
599static int insert_iommu_master(struct device *dev,
600 struct msm_iommu_dev **iommu,
601 struct of_phandle_args *spec)
602{
603 struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
604 int sid;
605
606 if (list_empty(&(*iommu)->ctx_list)) {
607 master = kzalloc(sizeof(*master), GFP_ATOMIC);
608 if (!master) {
609 dev_err(dev, "Failed to allocate iommu_master\n");
610 return -ENOMEM;
611 }
612 master->of_node = dev->of_node;
613 list_add(&master->list, &(*iommu)->ctx_list);
614 dev_iommu_priv_set(dev, master);
615 }
616
617 for (sid = 0; sid < master->num_mids; sid++)
618 if (master->mids[sid] == spec->args[0]) {
619 dev_warn(dev, "Stream ID 0x%x repeated; ignoring\n",
620 sid);
621 return 0;
622 }
623
624 master->mids[master->num_mids++] = spec->args[0];
625 return 0;
626}
627
628static int qcom_iommu_of_xlate(struct device *dev,
629 struct of_phandle_args *spec)
630{
631 struct msm_iommu_dev *iommu = NULL, *iter;
632 unsigned long flags;
633 int ret = 0;
634
635 spin_lock_irqsave(&msm_iommu_lock, flags);
636 list_for_each_entry(iter, &qcom_iommu_devices, dev_node) {
637 if (iter->dev->of_node == spec->np) {
638 iommu = iter;
639 break;
640 }
641 }
642
643 if (!iommu) {
644 ret = -ENODEV;
645 goto fail;
646 }
647
648 ret = insert_iommu_master(dev, &iommu, spec);
649fail:
650 spin_unlock_irqrestore(&msm_iommu_lock, flags);
651
652 return ret;
653}
654
655irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
656{
657 struct msm_iommu_dev *iommu = dev_id;
658 unsigned int fsr;
659 int i, ret;
660
661 spin_lock(&msm_iommu_lock);
662
663 if (!iommu) {
664 pr_err("Invalid device ID in context interrupt handler\n");
665 goto fail;
666 }
667
668 pr_err("Unexpected IOMMU page fault!\n");
669 pr_err("base = %08x\n", (unsigned int)iommu->base);
670
671 ret = __enable_clocks(iommu);
672 if (ret)
673 goto fail;
674
675 for (i = 0; i < iommu->ncb; i++) {
676 fsr = GET_FSR(iommu->base, i);
677 if (fsr) {
678 pr_err("Fault occurred in context %d.\n", i);
679 pr_err("Interesting registers:\n");
680 print_ctx_regs(iommu->base, i);
681 SET_FSR(iommu->base, i, 0x4000000F);
682 }
683 }
684 __disable_clocks(iommu);
685fail:
686 spin_unlock(&msm_iommu_lock);
687 return 0;
688}
689
690static struct iommu_ops msm_iommu_ops = {
691 .identity_domain = &msm_iommu_identity_domain,
692 .domain_alloc_paging = msm_iommu_domain_alloc_paging,
693 .probe_device = msm_iommu_probe_device,
694 .device_group = generic_device_group,
695 .pgsize_bitmap = MSM_IOMMU_PGSIZES,
696 .of_xlate = qcom_iommu_of_xlate,
697 .default_domain_ops = &(const struct iommu_domain_ops) {
698 .attach_dev = msm_iommu_attach_dev,
699 .map_pages = msm_iommu_map,
700 .unmap_pages = msm_iommu_unmap,
701 /*
702 * Nothing is needed here, the barrier to guarantee
703 * completion of the tlb sync operation is implicitly
704 * taken care when the iommu client does a writel before
705 * kick starting the other master.
706 */
707 .iotlb_sync = NULL,
708 .iotlb_sync_map = msm_iommu_sync_map,
709 .iova_to_phys = msm_iommu_iova_to_phys,
710 .free = msm_iommu_domain_free,
711 }
712};
713
714static int msm_iommu_probe(struct platform_device *pdev)
715{
716 struct resource *r;
717 resource_size_t ioaddr;
718 struct msm_iommu_dev *iommu;
719 int ret, par, val;
720
721 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
722 if (!iommu)
723 return -ENODEV;
724
725 iommu->dev = &pdev->dev;
726 INIT_LIST_HEAD(&iommu->ctx_list);
727
728 iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
729 if (IS_ERR(iommu->pclk))
730 return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
731 "could not get smmu_pclk\n");
732
733 ret = clk_prepare(iommu->pclk);
734 if (ret)
735 return dev_err_probe(iommu->dev, ret,
736 "could not prepare smmu_pclk\n");
737
738 iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
739 if (IS_ERR(iommu->clk)) {
740 clk_unprepare(iommu->pclk);
741 return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
742 "could not get iommu_clk\n");
743 }
744
745 ret = clk_prepare(iommu->clk);
746 if (ret) {
747 clk_unprepare(iommu->pclk);
748 return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
749 }
750
751 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
752 iommu->base = devm_ioremap_resource(iommu->dev, r);
753 if (IS_ERR(iommu->base)) {
754 ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
755 goto fail;
756 }
757 ioaddr = r->start;
758
759 iommu->irq = platform_get_irq(pdev, 0);
760 if (iommu->irq < 0) {
761 ret = -ENODEV;
762 goto fail;
763 }
764
765 ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val);
766 if (ret) {
767 dev_err(iommu->dev, "could not get ncb\n");
768 goto fail;
769 }
770 iommu->ncb = val;
771
772 msm_iommu_reset(iommu->base, iommu->ncb);
773 SET_M(iommu->base, 0, 1);
774 SET_PAR(iommu->base, 0, 0);
775 SET_V2PCFG(iommu->base, 0, 1);
776 SET_V2PPR(iommu->base, 0, 0);
777 par = GET_PAR(iommu->base, 0);
778 SET_V2PCFG(iommu->base, 0, 0);
779 SET_M(iommu->base, 0, 0);
780
781 if (!par) {
782 pr_err("Invalid PAR value detected\n");
783 ret = -ENODEV;
784 goto fail;
785 }
786
787 ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
788 msm_iommu_fault_handler,
789 IRQF_ONESHOT | IRQF_SHARED,
790 "msm_iommu_secure_irpt_handler",
791 iommu);
792 if (ret) {
793 pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
794 goto fail;
795 }
796
797 list_add(&iommu->dev_node, &qcom_iommu_devices);
798
799 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
800 "msm-smmu.%pa", &ioaddr);
801 if (ret) {
802 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
803 goto fail;
804 }
805
806 ret = iommu_device_register(&iommu->iommu, &msm_iommu_ops, &pdev->dev);
807 if (ret) {
808 pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
809 goto fail;
810 }
811
812 pr_info("device mapped at %p, irq %d with %d ctx banks\n",
813 iommu->base, iommu->irq, iommu->ncb);
814
815 return ret;
816fail:
817 clk_unprepare(iommu->clk);
818 clk_unprepare(iommu->pclk);
819 return ret;
820}
821
822static const struct of_device_id msm_iommu_dt_match[] = {
823 { .compatible = "qcom,apq8064-iommu" },
824 {}
825};
826
827static void msm_iommu_remove(struct platform_device *pdev)
828{
829 struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
830
831 clk_unprepare(iommu->clk);
832 clk_unprepare(iommu->pclk);
833}
834
835static struct platform_driver msm_iommu_driver = {
836 .driver = {
837 .name = "msm_iommu",
838 .of_match_table = msm_iommu_dt_match,
839 },
840 .probe = msm_iommu_probe,
841 .remove_new = msm_iommu_remove,
842};
843builtin_platform_driver(msm_iommu_driver);