Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * omap iommu: tlb and pagetable primitives
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
7 *
8 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
9 * Paul Mundt and Toshihiro Kobayashi
10 */
11
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/ioport.h>
17#include <linux/platform_device.h>
18#include <linux/iommu.h>
19#include <linux/omap-iommu.h>
20#include <linux/mutex.h>
21#include <linux/spinlock.h>
22#include <linux/io.h>
23#include <linux/pm_runtime.h>
24#include <linux/of.h>
25#include <linux/of_iommu.h>
26#include <linux/of_irq.h>
27#include <linux/of_platform.h>
28#include <linux/regmap.h>
29#include <linux/mfd/syscon.h>
30
31#include <linux/platform_data/iommu-omap.h>
32
33#include "omap-iopgtable.h"
34#include "omap-iommu.h"
35
36static const struct iommu_ops omap_iommu_ops;
37
38#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
39
40/* bitmap of the page sizes currently supported */
41#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
42
43#define MMU_LOCK_BASE_SHIFT 10
44#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
45#define MMU_LOCK_BASE(x) \
46 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
47
48#define MMU_LOCK_VICT_SHIFT 4
49#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
50#define MMU_LOCK_VICT(x) \
51 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
52
53static struct platform_driver omap_iommu_driver;
54static struct kmem_cache *iopte_cachep;
55
56/**
57 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
58 * @dom: generic iommu domain handle
59 **/
60static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
61{
62 return container_of(dom, struct omap_iommu_domain, domain);
63}
64
65/**
66 * omap_iommu_save_ctx - Save registers for pm off-mode support
67 * @dev: client device
68 *
69 * This should be treated as an deprecated API. It is preserved only
70 * to maintain existing functionality for OMAP3 ISP driver.
71 **/
72void omap_iommu_save_ctx(struct device *dev)
73{
74 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
75 struct omap_iommu *obj;
76 u32 *p;
77 int i;
78
79 if (!arch_data)
80 return;
81
82 while (arch_data->iommu_dev) {
83 obj = arch_data->iommu_dev;
84 p = obj->ctx;
85 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
86 p[i] = iommu_read_reg(obj, i * sizeof(u32));
87 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
88 p[i]);
89 }
90 arch_data++;
91 }
92}
93EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
94
95/**
96 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
97 * @dev: client device
98 *
99 * This should be treated as an deprecated API. It is preserved only
100 * to maintain existing functionality for OMAP3 ISP driver.
101 **/
102void omap_iommu_restore_ctx(struct device *dev)
103{
104 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
105 struct omap_iommu *obj;
106 u32 *p;
107 int i;
108
109 if (!arch_data)
110 return;
111
112 while (arch_data->iommu_dev) {
113 obj = arch_data->iommu_dev;
114 p = obj->ctx;
115 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
116 iommu_write_reg(obj, p[i], i * sizeof(u32));
117 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
118 p[i]);
119 }
120 arch_data++;
121 }
122}
123EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
124
125static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
126{
127 u32 val, mask;
128
129 if (!obj->syscfg)
130 return;
131
132 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
133 val = enable ? mask : 0;
134 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
135}
136
137static void __iommu_set_twl(struct omap_iommu *obj, bool on)
138{
139 u32 l = iommu_read_reg(obj, MMU_CNTL);
140
141 if (on)
142 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
143 else
144 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
145
146 l &= ~MMU_CNTL_MASK;
147 if (on)
148 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
149 else
150 l |= (MMU_CNTL_MMU_EN);
151
152 iommu_write_reg(obj, l, MMU_CNTL);
153}
154
155static int omap2_iommu_enable(struct omap_iommu *obj)
156{
157 u32 l, pa;
158
159 if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K))
160 return -EINVAL;
161
162 pa = virt_to_phys(obj->iopgd);
163 if (!IS_ALIGNED(pa, SZ_16K))
164 return -EINVAL;
165
166 l = iommu_read_reg(obj, MMU_REVISION);
167 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
168 (l >> 4) & 0xf, l & 0xf);
169
170 iommu_write_reg(obj, pa, MMU_TTB);
171
172 dra7_cfg_dspsys_mmu(obj, true);
173
174 if (obj->has_bus_err_back)
175 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
176
177 __iommu_set_twl(obj, true);
178
179 return 0;
180}
181
182static void omap2_iommu_disable(struct omap_iommu *obj)
183{
184 u32 l = iommu_read_reg(obj, MMU_CNTL);
185
186 l &= ~MMU_CNTL_MASK;
187 iommu_write_reg(obj, l, MMU_CNTL);
188 dra7_cfg_dspsys_mmu(obj, false);
189
190 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
191}
192
193static int iommu_enable(struct omap_iommu *obj)
194{
195 int ret;
196
197 ret = pm_runtime_get_sync(obj->dev);
198 if (ret < 0)
199 pm_runtime_put_noidle(obj->dev);
200
201 return ret < 0 ? ret : 0;
202}
203
204static void iommu_disable(struct omap_iommu *obj)
205{
206 pm_runtime_put_sync(obj->dev);
207}
208
209/*
210 * TLB operations
211 */
212static u32 iotlb_cr_to_virt(struct cr_regs *cr)
213{
214 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
215 u32 mask = get_cam_va_mask(cr->cam & page_size);
216
217 return cr->cam & mask;
218}
219
220static u32 get_iopte_attr(struct iotlb_entry *e)
221{
222 u32 attr;
223
224 attr = e->mixed << 5;
225 attr |= e->endian;
226 attr |= e->elsz >> 3;
227 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
228 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
229 return attr;
230}
231
232static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
233{
234 u32 status, fault_addr;
235
236 status = iommu_read_reg(obj, MMU_IRQSTATUS);
237 status &= MMU_IRQ_MASK;
238 if (!status) {
239 *da = 0;
240 return 0;
241 }
242
243 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
244 *da = fault_addr;
245
246 iommu_write_reg(obj, status, MMU_IRQSTATUS);
247
248 return status;
249}
250
251void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
252{
253 u32 val;
254
255 val = iommu_read_reg(obj, MMU_LOCK);
256
257 l->base = MMU_LOCK_BASE(val);
258 l->vict = MMU_LOCK_VICT(val);
259}
260
261void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
262{
263 u32 val;
264
265 val = (l->base << MMU_LOCK_BASE_SHIFT);
266 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
267
268 iommu_write_reg(obj, val, MMU_LOCK);
269}
270
271static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
272{
273 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
274 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
275}
276
277static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
278{
279 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
280 iommu_write_reg(obj, cr->ram, MMU_RAM);
281
282 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
283 iommu_write_reg(obj, 1, MMU_LD_TLB);
284}
285
286/* only used in iotlb iteration for-loop */
287struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
288{
289 struct cr_regs cr;
290 struct iotlb_lock l;
291
292 iotlb_lock_get(obj, &l);
293 l.vict = n;
294 iotlb_lock_set(obj, &l);
295 iotlb_read_cr(obj, &cr);
296
297 return cr;
298}
299
300#ifdef PREFETCH_IOTLB
301static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
302 struct iotlb_entry *e)
303{
304 struct cr_regs *cr;
305
306 if (!e)
307 return NULL;
308
309 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
310 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
311 e->da);
312 return ERR_PTR(-EINVAL);
313 }
314
315 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
316 if (!cr)
317 return ERR_PTR(-ENOMEM);
318
319 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
320 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
321
322 return cr;
323}
324
325/**
326 * load_iotlb_entry - Set an iommu tlb entry
327 * @obj: target iommu
328 * @e: an iommu tlb entry info
329 **/
330static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
331{
332 int err = 0;
333 struct iotlb_lock l;
334 struct cr_regs *cr;
335
336 if (!obj || !obj->nr_tlb_entries || !e)
337 return -EINVAL;
338
339 pm_runtime_get_sync(obj->dev);
340
341 iotlb_lock_get(obj, &l);
342 if (l.base == obj->nr_tlb_entries) {
343 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
344 err = -EBUSY;
345 goto out;
346 }
347 if (!e->prsvd) {
348 int i;
349 struct cr_regs tmp;
350
351 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
352 if (!iotlb_cr_valid(&tmp))
353 break;
354
355 if (i == obj->nr_tlb_entries) {
356 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
357 err = -EBUSY;
358 goto out;
359 }
360
361 iotlb_lock_get(obj, &l);
362 } else {
363 l.vict = l.base;
364 iotlb_lock_set(obj, &l);
365 }
366
367 cr = iotlb_alloc_cr(obj, e);
368 if (IS_ERR(cr)) {
369 pm_runtime_put_sync(obj->dev);
370 return PTR_ERR(cr);
371 }
372
373 iotlb_load_cr(obj, cr);
374 kfree(cr);
375
376 if (e->prsvd)
377 l.base++;
378 /* increment victim for next tlb load */
379 if (++l.vict == obj->nr_tlb_entries)
380 l.vict = l.base;
381 iotlb_lock_set(obj, &l);
382out:
383 pm_runtime_put_sync(obj->dev);
384 return err;
385}
386
387#else /* !PREFETCH_IOTLB */
388
389static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
390{
391 return 0;
392}
393
394#endif /* !PREFETCH_IOTLB */
395
396static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
397{
398 return load_iotlb_entry(obj, e);
399}
400
401/**
402 * flush_iotlb_page - Clear an iommu tlb entry
403 * @obj: target iommu
404 * @da: iommu device virtual address
405 *
406 * Clear an iommu tlb entry which includes 'da' address.
407 **/
408static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
409{
410 int i;
411 struct cr_regs cr;
412
413 pm_runtime_get_sync(obj->dev);
414
415 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
416 u32 start;
417 size_t bytes;
418
419 if (!iotlb_cr_valid(&cr))
420 continue;
421
422 start = iotlb_cr_to_virt(&cr);
423 bytes = iopgsz_to_bytes(cr.cam & 3);
424
425 if ((start <= da) && (da < start + bytes)) {
426 dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n",
427 __func__, start, da, bytes);
428 iotlb_load_cr(obj, &cr);
429 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
430 break;
431 }
432 }
433 pm_runtime_put_sync(obj->dev);
434
435 if (i == obj->nr_tlb_entries)
436 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
437}
438
439/**
440 * flush_iotlb_all - Clear all iommu tlb entries
441 * @obj: target iommu
442 **/
443static void flush_iotlb_all(struct omap_iommu *obj)
444{
445 struct iotlb_lock l;
446
447 pm_runtime_get_sync(obj->dev);
448
449 l.base = 0;
450 l.vict = 0;
451 iotlb_lock_set(obj, &l);
452
453 iommu_write_reg(obj, 1, MMU_GFLUSH);
454
455 pm_runtime_put_sync(obj->dev);
456}
457
458/*
459 * H/W pagetable operations
460 */
461static void flush_iopte_range(struct device *dev, dma_addr_t dma,
462 unsigned long offset, int num_entries)
463{
464 size_t size = num_entries * sizeof(u32);
465
466 dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE);
467}
468
469static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid)
470{
471 dma_addr_t pt_dma;
472
473 /* Note: freed iopte's must be clean ready for re-use */
474 if (iopte) {
475 if (dma_valid) {
476 pt_dma = virt_to_phys(iopte);
477 dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE,
478 DMA_TO_DEVICE);
479 }
480
481 kmem_cache_free(iopte_cachep, iopte);
482 }
483}
484
485static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
486 dma_addr_t *pt_dma, u32 da)
487{
488 u32 *iopte;
489 unsigned long offset = iopgd_index(da) * sizeof(da);
490
491 /* a table has already existed */
492 if (*iopgd)
493 goto pte_ready;
494
495 /*
496 * do the allocation outside the page table lock
497 */
498 spin_unlock(&obj->page_table_lock);
499 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
500 spin_lock(&obj->page_table_lock);
501
502 if (!*iopgd) {
503 if (!iopte)
504 return ERR_PTR(-ENOMEM);
505
506 *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE,
507 DMA_TO_DEVICE);
508 if (dma_mapping_error(obj->dev, *pt_dma)) {
509 dev_err(obj->dev, "DMA map error for L2 table\n");
510 iopte_free(obj, iopte, false);
511 return ERR_PTR(-ENOMEM);
512 }
513
514 /*
515 * we rely on dma address and the physical address to be
516 * the same for mapping the L2 table
517 */
518 if (WARN_ON(*pt_dma != virt_to_phys(iopte))) {
519 dev_err(obj->dev, "DMA translation error for L2 table\n");
520 dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE,
521 DMA_TO_DEVICE);
522 iopte_free(obj, iopte, false);
523 return ERR_PTR(-ENOMEM);
524 }
525
526 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
527
528 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
529 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
530 } else {
531 /* We raced, free the reduniovant table */
532 iopte_free(obj, iopte, false);
533 }
534
535pte_ready:
536 iopte = iopte_offset(iopgd, da);
537 *pt_dma = iopgd_page_paddr(iopgd);
538 dev_vdbg(obj->dev,
539 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
540 __func__, da, iopgd, *iopgd, iopte, *iopte);
541
542 return iopte;
543}
544
545static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
546{
547 u32 *iopgd = iopgd_offset(obj, da);
548 unsigned long offset = iopgd_index(da) * sizeof(da);
549
550 if ((da | pa) & ~IOSECTION_MASK) {
551 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
552 __func__, da, pa, IOSECTION_SIZE);
553 return -EINVAL;
554 }
555
556 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
557 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
558 return 0;
559}
560
561static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
562{
563 u32 *iopgd = iopgd_offset(obj, da);
564 unsigned long offset = iopgd_index(da) * sizeof(da);
565 int i;
566
567 if ((da | pa) & ~IOSUPER_MASK) {
568 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
569 __func__, da, pa, IOSUPER_SIZE);
570 return -EINVAL;
571 }
572
573 for (i = 0; i < 16; i++)
574 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
575 flush_iopte_range(obj->dev, obj->pd_dma, offset, 16);
576 return 0;
577}
578
579static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
580{
581 u32 *iopgd = iopgd_offset(obj, da);
582 dma_addr_t pt_dma;
583 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
584 unsigned long offset = iopte_index(da) * sizeof(da);
585
586 if (IS_ERR(iopte))
587 return PTR_ERR(iopte);
588
589 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
590 flush_iopte_range(obj->dev, pt_dma, offset, 1);
591
592 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
593 __func__, da, pa, iopte, *iopte);
594
595 return 0;
596}
597
598static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
599{
600 u32 *iopgd = iopgd_offset(obj, da);
601 dma_addr_t pt_dma;
602 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
603 unsigned long offset = iopte_index(da) * sizeof(da);
604 int i;
605
606 if ((da | pa) & ~IOLARGE_MASK) {
607 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
608 __func__, da, pa, IOLARGE_SIZE);
609 return -EINVAL;
610 }
611
612 if (IS_ERR(iopte))
613 return PTR_ERR(iopte);
614
615 for (i = 0; i < 16; i++)
616 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
617 flush_iopte_range(obj->dev, pt_dma, offset, 16);
618 return 0;
619}
620
621static int
622iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
623{
624 int (*fn)(struct omap_iommu *, u32, u32, u32);
625 u32 prot;
626 int err;
627
628 if (!obj || !e)
629 return -EINVAL;
630
631 switch (e->pgsz) {
632 case MMU_CAM_PGSZ_16M:
633 fn = iopgd_alloc_super;
634 break;
635 case MMU_CAM_PGSZ_1M:
636 fn = iopgd_alloc_section;
637 break;
638 case MMU_CAM_PGSZ_64K:
639 fn = iopte_alloc_large;
640 break;
641 case MMU_CAM_PGSZ_4K:
642 fn = iopte_alloc_page;
643 break;
644 default:
645 fn = NULL;
646 break;
647 }
648
649 if (WARN_ON(!fn))
650 return -EINVAL;
651
652 prot = get_iopte_attr(e);
653
654 spin_lock(&obj->page_table_lock);
655 err = fn(obj, e->da, e->pa, prot);
656 spin_unlock(&obj->page_table_lock);
657
658 return err;
659}
660
661/**
662 * omap_iopgtable_store_entry - Make an iommu pte entry
663 * @obj: target iommu
664 * @e: an iommu tlb entry info
665 **/
666static int
667omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
668{
669 int err;
670
671 flush_iotlb_page(obj, e->da);
672 err = iopgtable_store_entry_core(obj, e);
673 if (!err)
674 prefetch_iotlb_entry(obj, e);
675 return err;
676}
677
678/**
679 * iopgtable_lookup_entry - Lookup an iommu pte entry
680 * @obj: target iommu
681 * @da: iommu device virtual address
682 * @ppgd: iommu pgd entry pointer to be returned
683 * @ppte: iommu pte entry pointer to be returned
684 **/
685static void
686iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
687{
688 u32 *iopgd, *iopte = NULL;
689
690 iopgd = iopgd_offset(obj, da);
691 if (!*iopgd)
692 goto out;
693
694 if (iopgd_is_table(*iopgd))
695 iopte = iopte_offset(iopgd, da);
696out:
697 *ppgd = iopgd;
698 *ppte = iopte;
699}
700
701static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
702{
703 size_t bytes;
704 u32 *iopgd = iopgd_offset(obj, da);
705 int nent = 1;
706 dma_addr_t pt_dma;
707 unsigned long pd_offset = iopgd_index(da) * sizeof(da);
708 unsigned long pt_offset = iopte_index(da) * sizeof(da);
709
710 if (!*iopgd)
711 return 0;
712
713 if (iopgd_is_table(*iopgd)) {
714 int i;
715 u32 *iopte = iopte_offset(iopgd, da);
716
717 bytes = IOPTE_SIZE;
718 if (*iopte & IOPTE_LARGE) {
719 nent *= 16;
720 /* rewind to the 1st entry */
721 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
722 }
723 bytes *= nent;
724 memset(iopte, 0, nent * sizeof(*iopte));
725 pt_dma = iopgd_page_paddr(iopgd);
726 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
727
728 /*
729 * do table walk to check if this table is necessary or not
730 */
731 iopte = iopte_offset(iopgd, 0);
732 for (i = 0; i < PTRS_PER_IOPTE; i++)
733 if (iopte[i])
734 goto out;
735
736 iopte_free(obj, iopte, true);
737 nent = 1; /* for the next L1 entry */
738 } else {
739 bytes = IOPGD_SIZE;
740 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
741 nent *= 16;
742 /* rewind to the 1st entry */
743 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
744 }
745 bytes *= nent;
746 }
747 memset(iopgd, 0, nent * sizeof(*iopgd));
748 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent);
749out:
750 return bytes;
751}
752
753/**
754 * iopgtable_clear_entry - Remove an iommu pte entry
755 * @obj: target iommu
756 * @da: iommu device virtual address
757 **/
758static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
759{
760 size_t bytes;
761
762 spin_lock(&obj->page_table_lock);
763
764 bytes = iopgtable_clear_entry_core(obj, da);
765 flush_iotlb_page(obj, da);
766
767 spin_unlock(&obj->page_table_lock);
768
769 return bytes;
770}
771
772static void iopgtable_clear_entry_all(struct omap_iommu *obj)
773{
774 unsigned long offset;
775 int i;
776
777 spin_lock(&obj->page_table_lock);
778
779 for (i = 0; i < PTRS_PER_IOPGD; i++) {
780 u32 da;
781 u32 *iopgd;
782
783 da = i << IOPGD_SHIFT;
784 iopgd = iopgd_offset(obj, da);
785 offset = iopgd_index(da) * sizeof(da);
786
787 if (!*iopgd)
788 continue;
789
790 if (iopgd_is_table(*iopgd))
791 iopte_free(obj, iopte_offset(iopgd, 0), true);
792
793 *iopgd = 0;
794 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
795 }
796
797 flush_iotlb_all(obj);
798
799 spin_unlock(&obj->page_table_lock);
800}
801
802/*
803 * Device IOMMU generic operations
804 */
805static irqreturn_t iommu_fault_handler(int irq, void *data)
806{
807 u32 da, errs;
808 u32 *iopgd, *iopte;
809 struct omap_iommu *obj = data;
810 struct iommu_domain *domain = obj->domain;
811 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
812
813 if (!omap_domain->dev)
814 return IRQ_NONE;
815
816 errs = iommu_report_fault(obj, &da);
817 if (errs == 0)
818 return IRQ_HANDLED;
819
820 /* Fault callback or TLB/PTE Dynamic loading */
821 if (!report_iommu_fault(domain, obj->dev, da, 0))
822 return IRQ_HANDLED;
823
824 iommu_write_reg(obj, 0, MMU_IRQENABLE);
825
826 iopgd = iopgd_offset(obj, da);
827
828 if (!iopgd_is_table(*iopgd)) {
829 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
830 obj->name, errs, da, iopgd, *iopgd);
831 return IRQ_NONE;
832 }
833
834 iopte = iopte_offset(iopgd, da);
835
836 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
837 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
838
839 return IRQ_NONE;
840}
841
842/**
843 * omap_iommu_attach() - attach iommu device to an iommu domain
844 * @obj: target omap iommu device
845 * @iopgd: page table
846 **/
847static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
848{
849 int err;
850
851 spin_lock(&obj->iommu_lock);
852
853 obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
854 DMA_TO_DEVICE);
855 if (dma_mapping_error(obj->dev, obj->pd_dma)) {
856 dev_err(obj->dev, "DMA map error for L1 table\n");
857 err = -ENOMEM;
858 goto out_err;
859 }
860
861 obj->iopgd = iopgd;
862 err = iommu_enable(obj);
863 if (err)
864 goto out_err;
865 flush_iotlb_all(obj);
866
867 spin_unlock(&obj->iommu_lock);
868
869 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
870
871 return 0;
872
873out_err:
874 spin_unlock(&obj->iommu_lock);
875
876 return err;
877}
878
879/**
880 * omap_iommu_detach - release iommu device
881 * @obj: target iommu
882 **/
883static void omap_iommu_detach(struct omap_iommu *obj)
884{
885 if (!obj || IS_ERR(obj))
886 return;
887
888 spin_lock(&obj->iommu_lock);
889
890 dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
891 DMA_TO_DEVICE);
892 obj->pd_dma = 0;
893 obj->iopgd = NULL;
894 iommu_disable(obj);
895
896 spin_unlock(&obj->iommu_lock);
897
898 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
899}
900
901static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
902{
903 struct iotlb_lock lock;
904 struct cr_regs cr;
905 struct cr_regs *tmp;
906 int i;
907
908 /* check if there are any locked tlbs to save */
909 iotlb_lock_get(obj, &lock);
910 obj->num_cr_ctx = lock.base;
911 if (!obj->num_cr_ctx)
912 return;
913
914 tmp = obj->cr_ctx;
915 for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
916 * tmp++ = cr;
917}
918
919static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
920{
921 struct iotlb_lock l;
922 struct cr_regs *tmp;
923 int i;
924
925 /* no locked tlbs to restore */
926 if (!obj->num_cr_ctx)
927 return;
928
929 l.base = 0;
930 tmp = obj->cr_ctx;
931 for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
932 l.vict = i;
933 iotlb_lock_set(obj, &l);
934 iotlb_load_cr(obj, tmp);
935 }
936 l.base = obj->num_cr_ctx;
937 l.vict = i;
938 iotlb_lock_set(obj, &l);
939}
940
941/**
942 * omap_iommu_domain_deactivate - deactivate attached iommu devices
943 * @domain: iommu domain attached to the target iommu device
944 *
945 * This API allows the client devices of IOMMU devices to suspend
946 * the IOMMUs they control at runtime, after they are idled and
947 * suspended all activity. System Suspend will leverage the PM
948 * driver late callbacks.
949 **/
950int omap_iommu_domain_deactivate(struct iommu_domain *domain)
951{
952 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
953 struct omap_iommu_device *iommu;
954 struct omap_iommu *oiommu;
955 int i;
956
957 if (!omap_domain->dev)
958 return 0;
959
960 iommu = omap_domain->iommus;
961 iommu += (omap_domain->num_iommus - 1);
962 for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
963 oiommu = iommu->iommu_dev;
964 pm_runtime_put_sync(oiommu->dev);
965 }
966
967 return 0;
968}
969EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
970
971/**
972 * omap_iommu_domain_activate - activate attached iommu devices
973 * @domain: iommu domain attached to the target iommu device
974 *
975 * This API allows the client devices of IOMMU devices to resume the
976 * IOMMUs they control at runtime, before they can resume operations.
977 * System Resume will leverage the PM driver late callbacks.
978 **/
979int omap_iommu_domain_activate(struct iommu_domain *domain)
980{
981 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
982 struct omap_iommu_device *iommu;
983 struct omap_iommu *oiommu;
984 int i;
985
986 if (!omap_domain->dev)
987 return 0;
988
989 iommu = omap_domain->iommus;
990 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
991 oiommu = iommu->iommu_dev;
992 pm_runtime_get_sync(oiommu->dev);
993 }
994
995 return 0;
996}
997EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
998
999/**
1000 * omap_iommu_runtime_suspend - disable an iommu device
1001 * @dev: iommu device
1002 *
1003 * This function performs all that is necessary to disable an
1004 * IOMMU device, either during final detachment from a client
1005 * device, or during system/runtime suspend of the device. This
1006 * includes programming all the appropriate IOMMU registers, and
1007 * managing the associated omap_hwmod's state and the device's
1008 * reset line. This function also saves the context of any
1009 * locked TLBs if suspending.
1010 **/
1011static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
1012{
1013 struct platform_device *pdev = to_platform_device(dev);
1014 struct iommu_platform_data *pdata = dev_get_platdata(dev);
1015 struct omap_iommu *obj = to_iommu(dev);
1016 int ret;
1017
1018 /* save the TLBs only during suspend, and not for power down */
1019 if (obj->domain && obj->iopgd)
1020 omap_iommu_save_tlb_entries(obj);
1021
1022 omap2_iommu_disable(obj);
1023
1024 if (pdata && pdata->device_idle)
1025 pdata->device_idle(pdev);
1026
1027 if (pdata && pdata->assert_reset)
1028 pdata->assert_reset(pdev, pdata->reset_name);
1029
1030 if (pdata && pdata->set_pwrdm_constraint) {
1031 ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
1032 if (ret) {
1033 dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
1034 ret);
1035 }
1036 }
1037
1038 return 0;
1039}
1040
1041/**
1042 * omap_iommu_runtime_resume - enable an iommu device
1043 * @dev: iommu device
1044 *
1045 * This function performs all that is necessary to enable an
1046 * IOMMU device, either during initial attachment to a client
1047 * device, or during system/runtime resume of the device. This
1048 * includes programming all the appropriate IOMMU registers, and
1049 * managing the associated omap_hwmod's state and the device's
1050 * reset line. The function also restores any locked TLBs if
1051 * resuming after a suspend.
1052 **/
1053static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
1054{
1055 struct platform_device *pdev = to_platform_device(dev);
1056 struct iommu_platform_data *pdata = dev_get_platdata(dev);
1057 struct omap_iommu *obj = to_iommu(dev);
1058 int ret = 0;
1059
1060 if (pdata && pdata->set_pwrdm_constraint) {
1061 ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
1062 if (ret) {
1063 dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
1064 ret);
1065 }
1066 }
1067
1068 if (pdata && pdata->deassert_reset) {
1069 ret = pdata->deassert_reset(pdev, pdata->reset_name);
1070 if (ret) {
1071 dev_err(dev, "deassert_reset failed: %d\n", ret);
1072 return ret;
1073 }
1074 }
1075
1076 if (pdata && pdata->device_enable)
1077 pdata->device_enable(pdev);
1078
1079 /* restore the TLBs only during resume, and not for power up */
1080 if (obj->domain)
1081 omap_iommu_restore_tlb_entries(obj);
1082
1083 ret = omap2_iommu_enable(obj);
1084
1085 return ret;
1086}
1087
1088/**
1089 * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
1090 * @dev: iommu device
1091 *
1092 * This function performs the necessary checks to determine if the IOMMU
1093 * device needs suspending or not. The function checks if the runtime_pm
1094 * status of the device is suspended, and returns 1 in that case. This
1095 * results in the PM core to skip invoking any of the Sleep PM callbacks
1096 * (suspend, suspend_late, resume, resume_early etc).
1097 */
1098static int omap_iommu_prepare(struct device *dev)
1099{
1100 if (pm_runtime_status_suspended(dev))
1101 return 1;
1102 return 0;
1103}
1104
1105static bool omap_iommu_can_register(struct platform_device *pdev)
1106{
1107 struct device_node *np = pdev->dev.of_node;
1108
1109 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
1110 return true;
1111
1112 /*
1113 * restrict IOMMU core registration only for processor-port MDMA MMUs
1114 * on DRA7 DSPs
1115 */
1116 if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
1117 (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
1118 return true;
1119
1120 return false;
1121}
1122
1123static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
1124 struct omap_iommu *obj)
1125{
1126 struct device_node *np = pdev->dev.of_node;
1127 int ret;
1128
1129 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
1130 return 0;
1131
1132 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
1133 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
1134 return -EINVAL;
1135 }
1136
1137 obj->syscfg =
1138 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
1139 if (IS_ERR(obj->syscfg)) {
1140 /* can fail with -EPROBE_DEFER */
1141 ret = PTR_ERR(obj->syscfg);
1142 return ret;
1143 }
1144
1145 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
1146 &obj->id)) {
1147 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
1148 return -EINVAL;
1149 }
1150
1151 if (obj->id != 0 && obj->id != 1) {
1152 dev_err(&pdev->dev, "invalid IOMMU instance id\n");
1153 return -EINVAL;
1154 }
1155
1156 return 0;
1157}
1158
1159/*
1160 * OMAP Device MMU(IOMMU) detection
1161 */
1162static int omap_iommu_probe(struct platform_device *pdev)
1163{
1164 int err = -ENODEV;
1165 int irq;
1166 struct omap_iommu *obj;
1167 struct resource *res;
1168 struct device_node *of = pdev->dev.of_node;
1169
1170 if (!of) {
1171 pr_err("%s: only DT-based devices are supported\n", __func__);
1172 return -ENODEV;
1173 }
1174
1175 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
1176 if (!obj)
1177 return -ENOMEM;
1178
1179 /*
1180 * self-manage the ordering dependencies between omap_device_enable/idle
1181 * and omap_device_assert/deassert_hardreset API
1182 */
1183 if (pdev->dev.pm_domain) {
1184 dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
1185 pdev->dev.pm_domain = NULL;
1186 }
1187
1188 obj->name = dev_name(&pdev->dev);
1189 obj->nr_tlb_entries = 32;
1190 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
1191 if (err && err != -EINVAL)
1192 return err;
1193 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
1194 return -EINVAL;
1195 if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
1196 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
1197
1198 obj->dev = &pdev->dev;
1199 obj->ctx = (void *)obj + sizeof(*obj);
1200 obj->cr_ctx = devm_kzalloc(&pdev->dev,
1201 sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
1202 GFP_KERNEL);
1203 if (!obj->cr_ctx)
1204 return -ENOMEM;
1205
1206 spin_lock_init(&obj->iommu_lock);
1207 spin_lock_init(&obj->page_table_lock);
1208
1209 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1210 obj->regbase = devm_ioremap_resource(obj->dev, res);
1211 if (IS_ERR(obj->regbase))
1212 return PTR_ERR(obj->regbase);
1213
1214 err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
1215 if (err)
1216 return err;
1217
1218 irq = platform_get_irq(pdev, 0);
1219 if (irq < 0)
1220 return -ENODEV;
1221
1222 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
1223 dev_name(obj->dev), obj);
1224 if (err < 0)
1225 return err;
1226 platform_set_drvdata(pdev, obj);
1227
1228 if (omap_iommu_can_register(pdev)) {
1229 obj->group = iommu_group_alloc();
1230 if (IS_ERR(obj->group))
1231 return PTR_ERR(obj->group);
1232
1233 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
1234 obj->name);
1235 if (err)
1236 goto out_group;
1237
1238 iommu_device_set_ops(&obj->iommu, &omap_iommu_ops);
1239 iommu_device_set_fwnode(&obj->iommu, &of->fwnode);
1240
1241 err = iommu_device_register(&obj->iommu);
1242 if (err)
1243 goto out_sysfs;
1244 }
1245
1246 pm_runtime_enable(obj->dev);
1247
1248 omap_iommu_debugfs_add(obj);
1249
1250 dev_info(&pdev->dev, "%s registered\n", obj->name);
1251
1252 /* Re-probe bus to probe device attached to this IOMMU */
1253 bus_iommu_probe(&platform_bus_type);
1254
1255 return 0;
1256
1257out_sysfs:
1258 iommu_device_sysfs_remove(&obj->iommu);
1259out_group:
1260 iommu_group_put(obj->group);
1261 return err;
1262}
1263
1264static int omap_iommu_remove(struct platform_device *pdev)
1265{
1266 struct omap_iommu *obj = platform_get_drvdata(pdev);
1267
1268 if (obj->group) {
1269 iommu_group_put(obj->group);
1270 obj->group = NULL;
1271
1272 iommu_device_sysfs_remove(&obj->iommu);
1273 iommu_device_unregister(&obj->iommu);
1274 }
1275
1276 omap_iommu_debugfs_remove(obj);
1277
1278 pm_runtime_disable(obj->dev);
1279
1280 dev_info(&pdev->dev, "%s removed\n", obj->name);
1281 return 0;
1282}
1283
1284static const struct dev_pm_ops omap_iommu_pm_ops = {
1285 .prepare = omap_iommu_prepare,
1286 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1287 pm_runtime_force_resume)
1288 SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
1289 omap_iommu_runtime_resume, NULL)
1290};
1291
1292static const struct of_device_id omap_iommu_of_match[] = {
1293 { .compatible = "ti,omap2-iommu" },
1294 { .compatible = "ti,omap4-iommu" },
1295 { .compatible = "ti,dra7-iommu" },
1296 { .compatible = "ti,dra7-dsp-iommu" },
1297 {},
1298};
1299
1300static struct platform_driver omap_iommu_driver = {
1301 .probe = omap_iommu_probe,
1302 .remove = omap_iommu_remove,
1303 .driver = {
1304 .name = "omap-iommu",
1305 .pm = &omap_iommu_pm_ops,
1306 .of_match_table = of_match_ptr(omap_iommu_of_match),
1307 },
1308};
1309
1310static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
1311{
1312 memset(e, 0, sizeof(*e));
1313
1314 e->da = da;
1315 e->pa = pa;
1316 e->valid = MMU_CAM_V;
1317 e->pgsz = pgsz;
1318 e->endian = MMU_RAM_ENDIAN_LITTLE;
1319 e->elsz = MMU_RAM_ELSZ_8;
1320 e->mixed = 0;
1321
1322 return iopgsz_to_bytes(e->pgsz);
1323}
1324
1325static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1326 phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
1327{
1328 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1329 struct device *dev = omap_domain->dev;
1330 struct omap_iommu_device *iommu;
1331 struct omap_iommu *oiommu;
1332 struct iotlb_entry e;
1333 int omap_pgsz;
1334 u32 ret = -EINVAL;
1335 int i;
1336
1337 omap_pgsz = bytes_to_iopgsz(bytes);
1338 if (omap_pgsz < 0) {
1339 dev_err(dev, "invalid size to map: %zu\n", bytes);
1340 return -EINVAL;
1341 }
1342
1343 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes);
1344
1345 iotlb_init_entry(&e, da, pa, omap_pgsz);
1346
1347 iommu = omap_domain->iommus;
1348 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1349 oiommu = iommu->iommu_dev;
1350 ret = omap_iopgtable_store_entry(oiommu, &e);
1351 if (ret) {
1352 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
1353 ret);
1354 break;
1355 }
1356 }
1357
1358 if (ret) {
1359 while (i--) {
1360 iommu--;
1361 oiommu = iommu->iommu_dev;
1362 iopgtable_clear_entry(oiommu, da);
1363 }
1364 }
1365
1366 return ret;
1367}
1368
1369static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1370 size_t size, struct iommu_iotlb_gather *gather)
1371{
1372 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1373 struct device *dev = omap_domain->dev;
1374 struct omap_iommu_device *iommu;
1375 struct omap_iommu *oiommu;
1376 bool error = false;
1377 size_t bytes = 0;
1378 int i;
1379
1380 dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size);
1381
1382 iommu = omap_domain->iommus;
1383 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1384 oiommu = iommu->iommu_dev;
1385 bytes = iopgtable_clear_entry(oiommu, da);
1386 if (!bytes)
1387 error = true;
1388 }
1389
1390 /*
1391 * simplify return - we are only checking if any of the iommus
1392 * reported an error, but not if all of them are unmapping the
1393 * same number of entries. This should not occur due to the
1394 * mirror programming.
1395 */
1396 return error ? 0 : bytes;
1397}
1398
1399static int omap_iommu_count(struct device *dev)
1400{
1401 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1402 int count = 0;
1403
1404 while (arch_data->iommu_dev) {
1405 count++;
1406 arch_data++;
1407 }
1408
1409 return count;
1410}
1411
1412/* caller should call cleanup if this function fails */
1413static int omap_iommu_attach_init(struct device *dev,
1414 struct omap_iommu_domain *odomain)
1415{
1416 struct omap_iommu_device *iommu;
1417 int i;
1418
1419 odomain->num_iommus = omap_iommu_count(dev);
1420 if (!odomain->num_iommus)
1421 return -EINVAL;
1422
1423 odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
1424 GFP_ATOMIC);
1425 if (!odomain->iommus)
1426 return -ENOMEM;
1427
1428 iommu = odomain->iommus;
1429 for (i = 0; i < odomain->num_iommus; i++, iommu++) {
1430 iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
1431 if (!iommu->pgtable)
1432 return -ENOMEM;
1433
1434 /*
1435 * should never fail, but please keep this around to ensure
1436 * we keep the hardware happy
1437 */
1438 if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
1439 IOPGD_TABLE_SIZE)))
1440 return -EINVAL;
1441 }
1442
1443 return 0;
1444}
1445
1446static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
1447{
1448 int i;
1449 struct omap_iommu_device *iommu = odomain->iommus;
1450
1451 for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
1452 kfree(iommu->pgtable);
1453
1454 kfree(odomain->iommus);
1455 odomain->num_iommus = 0;
1456 odomain->iommus = NULL;
1457}
1458
1459static int
1460omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1461{
1462 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1463 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1464 struct omap_iommu_device *iommu;
1465 struct omap_iommu *oiommu;
1466 int ret = 0;
1467 int i;
1468
1469 if (!arch_data || !arch_data->iommu_dev) {
1470 dev_err(dev, "device doesn't have an associated iommu\n");
1471 return -EINVAL;
1472 }
1473
1474 spin_lock(&omap_domain->lock);
1475
1476 /* only a single client device can be attached to a domain */
1477 if (omap_domain->dev) {
1478 dev_err(dev, "iommu domain is already attached\n");
1479 ret = -EBUSY;
1480 goto out;
1481 }
1482
1483 ret = omap_iommu_attach_init(dev, omap_domain);
1484 if (ret) {
1485 dev_err(dev, "failed to allocate required iommu data %d\n",
1486 ret);
1487 goto init_fail;
1488 }
1489
1490 iommu = omap_domain->iommus;
1491 for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
1492 /* configure and enable the omap iommu */
1493 oiommu = arch_data->iommu_dev;
1494 ret = omap_iommu_attach(oiommu, iommu->pgtable);
1495 if (ret) {
1496 dev_err(dev, "can't get omap iommu: %d\n", ret);
1497 goto attach_fail;
1498 }
1499
1500 oiommu->domain = domain;
1501 iommu->iommu_dev = oiommu;
1502 }
1503
1504 omap_domain->dev = dev;
1505
1506 goto out;
1507
1508attach_fail:
1509 while (i--) {
1510 iommu--;
1511 arch_data--;
1512 oiommu = iommu->iommu_dev;
1513 omap_iommu_detach(oiommu);
1514 iommu->iommu_dev = NULL;
1515 oiommu->domain = NULL;
1516 }
1517init_fail:
1518 omap_iommu_detach_fini(omap_domain);
1519out:
1520 spin_unlock(&omap_domain->lock);
1521 return ret;
1522}
1523
1524static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1525 struct device *dev)
1526{
1527 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1528 struct omap_iommu_device *iommu = omap_domain->iommus;
1529 struct omap_iommu *oiommu;
1530 int i;
1531
1532 if (!omap_domain->dev) {
1533 dev_err(dev, "domain has no attached device\n");
1534 return;
1535 }
1536
1537 /* only a single device is supported per domain for now */
1538 if (omap_domain->dev != dev) {
1539 dev_err(dev, "invalid attached device\n");
1540 return;
1541 }
1542
1543 /*
1544 * cleanup in the reverse order of attachment - this addresses
1545 * any h/w dependencies between multiple instances, if any
1546 */
1547 iommu += (omap_domain->num_iommus - 1);
1548 arch_data += (omap_domain->num_iommus - 1);
1549 for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
1550 oiommu = iommu->iommu_dev;
1551 iopgtable_clear_entry_all(oiommu);
1552
1553 omap_iommu_detach(oiommu);
1554 iommu->iommu_dev = NULL;
1555 oiommu->domain = NULL;
1556 }
1557
1558 omap_iommu_detach_fini(omap_domain);
1559
1560 omap_domain->dev = NULL;
1561}
1562
1563static void omap_iommu_detach_dev(struct iommu_domain *domain,
1564 struct device *dev)
1565{
1566 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1567
1568 spin_lock(&omap_domain->lock);
1569 _omap_iommu_detach_dev(omap_domain, dev);
1570 spin_unlock(&omap_domain->lock);
1571}
1572
1573static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
1574{
1575 struct omap_iommu_domain *omap_domain;
1576
1577 if (type != IOMMU_DOMAIN_UNMANAGED)
1578 return NULL;
1579
1580 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1581 if (!omap_domain)
1582 return NULL;
1583
1584 spin_lock_init(&omap_domain->lock);
1585
1586 omap_domain->domain.geometry.aperture_start = 0;
1587 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
1588 omap_domain->domain.geometry.force_aperture = true;
1589
1590 return &omap_domain->domain;
1591}
1592
1593static void omap_iommu_domain_free(struct iommu_domain *domain)
1594{
1595 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1596
1597 /*
1598 * An iommu device is still attached
1599 * (currently, only one device can be attached) ?
1600 */
1601 if (omap_domain->dev)
1602 _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1603
1604 kfree(omap_domain);
1605}
1606
1607static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1608 dma_addr_t da)
1609{
1610 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1611 struct omap_iommu_device *iommu = omap_domain->iommus;
1612 struct omap_iommu *oiommu = iommu->iommu_dev;
1613 struct device *dev = oiommu->dev;
1614 u32 *pgd, *pte;
1615 phys_addr_t ret = 0;
1616
1617 /*
1618 * all the iommus within the domain will have identical programming,
1619 * so perform the lookup using just the first iommu
1620 */
1621 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1622
1623 if (pte) {
1624 if (iopte_is_small(*pte))
1625 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1626 else if (iopte_is_large(*pte))
1627 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1628 else
1629 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
1630 (unsigned long long)da);
1631 } else {
1632 if (iopgd_is_section(*pgd))
1633 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1634 else if (iopgd_is_super(*pgd))
1635 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1636 else
1637 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
1638 (unsigned long long)da);
1639 }
1640
1641 return ret;
1642}
1643
1644static struct iommu_device *omap_iommu_probe_device(struct device *dev)
1645{
1646 struct omap_iommu_arch_data *arch_data, *tmp;
1647 struct platform_device *pdev;
1648 struct omap_iommu *oiommu;
1649 struct device_node *np;
1650 int num_iommus, i;
1651
1652 /*
1653 * Allocate the per-device iommu structure for DT-based devices.
1654 *
1655 * TODO: Simplify this when removing non-DT support completely from the
1656 * IOMMU users.
1657 */
1658 if (!dev->of_node)
1659 return ERR_PTR(-ENODEV);
1660
1661 /*
1662 * retrieve the count of IOMMU nodes using phandle size as element size
1663 * since #iommu-cells = 0 for OMAP
1664 */
1665 num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
1666 sizeof(phandle));
1667 if (num_iommus < 0)
1668 return 0;
1669
1670 arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
1671 if (!arch_data)
1672 return ERR_PTR(-ENOMEM);
1673
1674 for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
1675 np = of_parse_phandle(dev->of_node, "iommus", i);
1676 if (!np) {
1677 kfree(arch_data);
1678 return ERR_PTR(-EINVAL);
1679 }
1680
1681 pdev = of_find_device_by_node(np);
1682 if (!pdev) {
1683 of_node_put(np);
1684 kfree(arch_data);
1685 return ERR_PTR(-ENODEV);
1686 }
1687
1688 oiommu = platform_get_drvdata(pdev);
1689 if (!oiommu) {
1690 of_node_put(np);
1691 kfree(arch_data);
1692 return ERR_PTR(-EINVAL);
1693 }
1694
1695 tmp->iommu_dev = oiommu;
1696 tmp->dev = &pdev->dev;
1697
1698 of_node_put(np);
1699 }
1700
1701 dev_iommu_priv_set(dev, arch_data);
1702
1703 /*
1704 * use the first IOMMU alone for the sysfs device linking.
1705 * TODO: Evaluate if a single iommu_group needs to be
1706 * maintained for both IOMMUs
1707 */
1708 oiommu = arch_data->iommu_dev;
1709
1710 return &oiommu->iommu;
1711}
1712
1713static void omap_iommu_release_device(struct device *dev)
1714{
1715 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1716
1717 if (!dev->of_node || !arch_data)
1718 return;
1719
1720 dev_iommu_priv_set(dev, NULL);
1721 kfree(arch_data);
1722
1723}
1724
1725static struct iommu_group *omap_iommu_device_group(struct device *dev)
1726{
1727 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1728 struct iommu_group *group = ERR_PTR(-EINVAL);
1729
1730 if (!arch_data)
1731 return ERR_PTR(-ENODEV);
1732
1733 if (arch_data->iommu_dev)
1734 group = iommu_group_ref_get(arch_data->iommu_dev->group);
1735
1736 return group;
1737}
1738
1739static const struct iommu_ops omap_iommu_ops = {
1740 .domain_alloc = omap_iommu_domain_alloc,
1741 .domain_free = omap_iommu_domain_free,
1742 .attach_dev = omap_iommu_attach_dev,
1743 .detach_dev = omap_iommu_detach_dev,
1744 .map = omap_iommu_map,
1745 .unmap = omap_iommu_unmap,
1746 .iova_to_phys = omap_iommu_iova_to_phys,
1747 .probe_device = omap_iommu_probe_device,
1748 .release_device = omap_iommu_release_device,
1749 .device_group = omap_iommu_device_group,
1750 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
1751};
1752
1753static int __init omap_iommu_init(void)
1754{
1755 struct kmem_cache *p;
1756 const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
1757 size_t align = 1 << 10; /* L2 pagetable alignement */
1758 struct device_node *np;
1759 int ret;
1760
1761 np = of_find_matching_node(NULL, omap_iommu_of_match);
1762 if (!np)
1763 return 0;
1764
1765 of_node_put(np);
1766
1767 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1768 NULL);
1769 if (!p)
1770 return -ENOMEM;
1771 iopte_cachep = p;
1772
1773 omap_iommu_debugfs_init();
1774
1775 ret = platform_driver_register(&omap_iommu_driver);
1776 if (ret) {
1777 pr_err("%s: failed to register driver\n", __func__);
1778 goto fail_driver;
1779 }
1780
1781 ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1782 if (ret)
1783 goto fail_bus;
1784
1785 return 0;
1786
1787fail_bus:
1788 platform_driver_unregister(&omap_iommu_driver);
1789fail_driver:
1790 kmem_cache_destroy(iopte_cachep);
1791 return ret;
1792}
1793subsys_initcall(omap_iommu_init);
1794/* must be ready before omap3isp is probed */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * omap iommu: tlb and pagetable primitives
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/
7 *
8 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
9 * Paul Mundt and Toshihiro Kobayashi
10 */
11
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/ioport.h>
17#include <linux/platform_device.h>
18#include <linux/iommu.h>
19#include <linux/omap-iommu.h>
20#include <linux/mutex.h>
21#include <linux/spinlock.h>
22#include <linux/io.h>
23#include <linux/pm_runtime.h>
24#include <linux/of.h>
25#include <linux/of_irq.h>
26#include <linux/of_platform.h>
27#include <linux/regmap.h>
28#include <linux/mfd/syscon.h>
29
30#include <linux/platform_data/iommu-omap.h>
31
32#include "omap-iopgtable.h"
33#include "omap-iommu.h"
34
35static const struct iommu_ops omap_iommu_ops;
36
37#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
38
39/* bitmap of the page sizes currently supported */
40#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
41
42#define MMU_LOCK_BASE_SHIFT 10
43#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
44#define MMU_LOCK_BASE(x) \
45 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
46
47#define MMU_LOCK_VICT_SHIFT 4
48#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
49#define MMU_LOCK_VICT(x) \
50 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
51
52static struct platform_driver omap_iommu_driver;
53static struct kmem_cache *iopte_cachep;
54
55/**
56 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
57 * @dom: generic iommu domain handle
58 **/
59static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
60{
61 return container_of(dom, struct omap_iommu_domain, domain);
62}
63
64/**
65 * omap_iommu_save_ctx - Save registers for pm off-mode support
66 * @dev: client device
67 *
68 * This should be treated as an deprecated API. It is preserved only
69 * to maintain existing functionality for OMAP3 ISP driver.
70 **/
71void omap_iommu_save_ctx(struct device *dev)
72{
73 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
74 struct omap_iommu *obj;
75 u32 *p;
76 int i;
77
78 if (!arch_data)
79 return;
80
81 while (arch_data->iommu_dev) {
82 obj = arch_data->iommu_dev;
83 p = obj->ctx;
84 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
85 p[i] = iommu_read_reg(obj, i * sizeof(u32));
86 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
87 p[i]);
88 }
89 arch_data++;
90 }
91}
92EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
93
94/**
95 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
96 * @dev: client device
97 *
98 * This should be treated as an deprecated API. It is preserved only
99 * to maintain existing functionality for OMAP3 ISP driver.
100 **/
101void omap_iommu_restore_ctx(struct device *dev)
102{
103 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
104 struct omap_iommu *obj;
105 u32 *p;
106 int i;
107
108 if (!arch_data)
109 return;
110
111 while (arch_data->iommu_dev) {
112 obj = arch_data->iommu_dev;
113 p = obj->ctx;
114 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
115 iommu_write_reg(obj, p[i], i * sizeof(u32));
116 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i,
117 p[i]);
118 }
119 arch_data++;
120 }
121}
122EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
123
124static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
125{
126 u32 val, mask;
127
128 if (!obj->syscfg)
129 return;
130
131 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
132 val = enable ? mask : 0;
133 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
134}
135
136static void __iommu_set_twl(struct omap_iommu *obj, bool on)
137{
138 u32 l = iommu_read_reg(obj, MMU_CNTL);
139
140 if (on)
141 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
142 else
143 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
144
145 l &= ~MMU_CNTL_MASK;
146 if (on)
147 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
148 else
149 l |= (MMU_CNTL_MMU_EN);
150
151 iommu_write_reg(obj, l, MMU_CNTL);
152}
153
154static int omap2_iommu_enable(struct omap_iommu *obj)
155{
156 u32 l, pa;
157
158 if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K))
159 return -EINVAL;
160
161 pa = virt_to_phys(obj->iopgd);
162 if (!IS_ALIGNED(pa, SZ_16K))
163 return -EINVAL;
164
165 l = iommu_read_reg(obj, MMU_REVISION);
166 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
167 (l >> 4) & 0xf, l & 0xf);
168
169 iommu_write_reg(obj, pa, MMU_TTB);
170
171 dra7_cfg_dspsys_mmu(obj, true);
172
173 if (obj->has_bus_err_back)
174 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
175
176 __iommu_set_twl(obj, true);
177
178 return 0;
179}
180
181static void omap2_iommu_disable(struct omap_iommu *obj)
182{
183 u32 l = iommu_read_reg(obj, MMU_CNTL);
184
185 l &= ~MMU_CNTL_MASK;
186 iommu_write_reg(obj, l, MMU_CNTL);
187 dra7_cfg_dspsys_mmu(obj, false);
188
189 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
190}
191
192static int iommu_enable(struct omap_iommu *obj)
193{
194 int ret;
195
196 ret = pm_runtime_get_sync(obj->dev);
197 if (ret < 0)
198 pm_runtime_put_noidle(obj->dev);
199
200 return ret < 0 ? ret : 0;
201}
202
203static void iommu_disable(struct omap_iommu *obj)
204{
205 pm_runtime_put_sync(obj->dev);
206}
207
208/*
209 * TLB operations
210 */
211static u32 iotlb_cr_to_virt(struct cr_regs *cr)
212{
213 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
214 u32 mask = get_cam_va_mask(cr->cam & page_size);
215
216 return cr->cam & mask;
217}
218
219static u32 get_iopte_attr(struct iotlb_entry *e)
220{
221 u32 attr;
222
223 attr = e->mixed << 5;
224 attr |= e->endian;
225 attr |= e->elsz >> 3;
226 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
227 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
228 return attr;
229}
230
231static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
232{
233 u32 status, fault_addr;
234
235 status = iommu_read_reg(obj, MMU_IRQSTATUS);
236 status &= MMU_IRQ_MASK;
237 if (!status) {
238 *da = 0;
239 return 0;
240 }
241
242 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
243 *da = fault_addr;
244
245 iommu_write_reg(obj, status, MMU_IRQSTATUS);
246
247 return status;
248}
249
250void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
251{
252 u32 val;
253
254 val = iommu_read_reg(obj, MMU_LOCK);
255
256 l->base = MMU_LOCK_BASE(val);
257 l->vict = MMU_LOCK_VICT(val);
258}
259
260void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
261{
262 u32 val;
263
264 val = (l->base << MMU_LOCK_BASE_SHIFT);
265 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
266
267 iommu_write_reg(obj, val, MMU_LOCK);
268}
269
270static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
271{
272 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
273 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
274}
275
276static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
277{
278 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
279 iommu_write_reg(obj, cr->ram, MMU_RAM);
280
281 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
282 iommu_write_reg(obj, 1, MMU_LD_TLB);
283}
284
285/* only used in iotlb iteration for-loop */
286struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
287{
288 struct cr_regs cr;
289 struct iotlb_lock l;
290
291 iotlb_lock_get(obj, &l);
292 l.vict = n;
293 iotlb_lock_set(obj, &l);
294 iotlb_read_cr(obj, &cr);
295
296 return cr;
297}
298
299#ifdef PREFETCH_IOTLB
300static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
301 struct iotlb_entry *e)
302{
303 struct cr_regs *cr;
304
305 if (!e)
306 return NULL;
307
308 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
309 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
310 e->da);
311 return ERR_PTR(-EINVAL);
312 }
313
314 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
315 if (!cr)
316 return ERR_PTR(-ENOMEM);
317
318 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
319 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
320
321 return cr;
322}
323
324/**
325 * load_iotlb_entry - Set an iommu tlb entry
326 * @obj: target iommu
327 * @e: an iommu tlb entry info
328 **/
329static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
330{
331 int err = 0;
332 struct iotlb_lock l;
333 struct cr_regs *cr;
334
335 if (!obj || !obj->nr_tlb_entries || !e)
336 return -EINVAL;
337
338 pm_runtime_get_sync(obj->dev);
339
340 iotlb_lock_get(obj, &l);
341 if (l.base == obj->nr_tlb_entries) {
342 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
343 err = -EBUSY;
344 goto out;
345 }
346 if (!e->prsvd) {
347 int i;
348 struct cr_regs tmp;
349
350 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
351 if (!iotlb_cr_valid(&tmp))
352 break;
353
354 if (i == obj->nr_tlb_entries) {
355 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
356 err = -EBUSY;
357 goto out;
358 }
359
360 iotlb_lock_get(obj, &l);
361 } else {
362 l.vict = l.base;
363 iotlb_lock_set(obj, &l);
364 }
365
366 cr = iotlb_alloc_cr(obj, e);
367 if (IS_ERR(cr)) {
368 pm_runtime_put_sync(obj->dev);
369 return PTR_ERR(cr);
370 }
371
372 iotlb_load_cr(obj, cr);
373 kfree(cr);
374
375 if (e->prsvd)
376 l.base++;
377 /* increment victim for next tlb load */
378 if (++l.vict == obj->nr_tlb_entries)
379 l.vict = l.base;
380 iotlb_lock_set(obj, &l);
381out:
382 pm_runtime_put_sync(obj->dev);
383 return err;
384}
385
386#else /* !PREFETCH_IOTLB */
387
388static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
389{
390 return 0;
391}
392
393#endif /* !PREFETCH_IOTLB */
394
395static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
396{
397 return load_iotlb_entry(obj, e);
398}
399
400/**
401 * flush_iotlb_page - Clear an iommu tlb entry
402 * @obj: target iommu
403 * @da: iommu device virtual address
404 *
405 * Clear an iommu tlb entry which includes 'da' address.
406 **/
407static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
408{
409 int i;
410 struct cr_regs cr;
411
412 pm_runtime_get_sync(obj->dev);
413
414 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
415 u32 start;
416 size_t bytes;
417
418 if (!iotlb_cr_valid(&cr))
419 continue;
420
421 start = iotlb_cr_to_virt(&cr);
422 bytes = iopgsz_to_bytes(cr.cam & 3);
423
424 if ((start <= da) && (da < start + bytes)) {
425 dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n",
426 __func__, start, da, bytes);
427 iotlb_load_cr(obj, &cr);
428 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
429 break;
430 }
431 }
432 pm_runtime_put_sync(obj->dev);
433
434 if (i == obj->nr_tlb_entries)
435 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
436}
437
438/**
439 * flush_iotlb_all - Clear all iommu tlb entries
440 * @obj: target iommu
441 **/
442static void flush_iotlb_all(struct omap_iommu *obj)
443{
444 struct iotlb_lock l;
445
446 pm_runtime_get_sync(obj->dev);
447
448 l.base = 0;
449 l.vict = 0;
450 iotlb_lock_set(obj, &l);
451
452 iommu_write_reg(obj, 1, MMU_GFLUSH);
453
454 pm_runtime_put_sync(obj->dev);
455}
456
457/*
458 * H/W pagetable operations
459 */
460static void flush_iopte_range(struct device *dev, dma_addr_t dma,
461 unsigned long offset, int num_entries)
462{
463 size_t size = num_entries * sizeof(u32);
464
465 dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE);
466}
467
468static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid)
469{
470 dma_addr_t pt_dma;
471
472 /* Note: freed iopte's must be clean ready for re-use */
473 if (iopte) {
474 if (dma_valid) {
475 pt_dma = virt_to_phys(iopte);
476 dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE,
477 DMA_TO_DEVICE);
478 }
479
480 kmem_cache_free(iopte_cachep, iopte);
481 }
482}
483
484static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
485 dma_addr_t *pt_dma, u32 da)
486{
487 u32 *iopte;
488 unsigned long offset = iopgd_index(da) * sizeof(da);
489
490 /* a table has already existed */
491 if (*iopgd)
492 goto pte_ready;
493
494 /*
495 * do the allocation outside the page table lock
496 */
497 spin_unlock(&obj->page_table_lock);
498 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
499 spin_lock(&obj->page_table_lock);
500
501 if (!*iopgd) {
502 if (!iopte)
503 return ERR_PTR(-ENOMEM);
504
505 *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE,
506 DMA_TO_DEVICE);
507 if (dma_mapping_error(obj->dev, *pt_dma)) {
508 dev_err(obj->dev, "DMA map error for L2 table\n");
509 iopte_free(obj, iopte, false);
510 return ERR_PTR(-ENOMEM);
511 }
512
513 /*
514 * we rely on dma address and the physical address to be
515 * the same for mapping the L2 table
516 */
517 if (WARN_ON(*pt_dma != virt_to_phys(iopte))) {
518 dev_err(obj->dev, "DMA translation error for L2 table\n");
519 dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE,
520 DMA_TO_DEVICE);
521 iopte_free(obj, iopte, false);
522 return ERR_PTR(-ENOMEM);
523 }
524
525 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
526
527 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
528 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
529 } else {
530 /* We raced, free the reduniovant table */
531 iopte_free(obj, iopte, false);
532 }
533
534pte_ready:
535 iopte = iopte_offset(iopgd, da);
536 *pt_dma = iopgd_page_paddr(iopgd);
537 dev_vdbg(obj->dev,
538 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
539 __func__, da, iopgd, *iopgd, iopte, *iopte);
540
541 return iopte;
542}
543
544static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
545{
546 u32 *iopgd = iopgd_offset(obj, da);
547 unsigned long offset = iopgd_index(da) * sizeof(da);
548
549 if ((da | pa) & ~IOSECTION_MASK) {
550 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
551 __func__, da, pa, IOSECTION_SIZE);
552 return -EINVAL;
553 }
554
555 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
556 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
557 return 0;
558}
559
560static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
561{
562 u32 *iopgd = iopgd_offset(obj, da);
563 unsigned long offset = iopgd_index(da) * sizeof(da);
564 int i;
565
566 if ((da | pa) & ~IOSUPER_MASK) {
567 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
568 __func__, da, pa, IOSUPER_SIZE);
569 return -EINVAL;
570 }
571
572 for (i = 0; i < 16; i++)
573 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
574 flush_iopte_range(obj->dev, obj->pd_dma, offset, 16);
575 return 0;
576}
577
578static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
579{
580 u32 *iopgd = iopgd_offset(obj, da);
581 dma_addr_t pt_dma;
582 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
583 unsigned long offset = iopte_index(da) * sizeof(da);
584
585 if (IS_ERR(iopte))
586 return PTR_ERR(iopte);
587
588 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
589 flush_iopte_range(obj->dev, pt_dma, offset, 1);
590
591 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
592 __func__, da, pa, iopte, *iopte);
593
594 return 0;
595}
596
597static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
598{
599 u32 *iopgd = iopgd_offset(obj, da);
600 dma_addr_t pt_dma;
601 u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da);
602 unsigned long offset = iopte_index(da) * sizeof(da);
603 int i;
604
605 if ((da | pa) & ~IOLARGE_MASK) {
606 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
607 __func__, da, pa, IOLARGE_SIZE);
608 return -EINVAL;
609 }
610
611 if (IS_ERR(iopte))
612 return PTR_ERR(iopte);
613
614 for (i = 0; i < 16; i++)
615 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
616 flush_iopte_range(obj->dev, pt_dma, offset, 16);
617 return 0;
618}
619
620static int
621iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
622{
623 int (*fn)(struct omap_iommu *, u32, u32, u32);
624 u32 prot;
625 int err;
626
627 if (!obj || !e)
628 return -EINVAL;
629
630 switch (e->pgsz) {
631 case MMU_CAM_PGSZ_16M:
632 fn = iopgd_alloc_super;
633 break;
634 case MMU_CAM_PGSZ_1M:
635 fn = iopgd_alloc_section;
636 break;
637 case MMU_CAM_PGSZ_64K:
638 fn = iopte_alloc_large;
639 break;
640 case MMU_CAM_PGSZ_4K:
641 fn = iopte_alloc_page;
642 break;
643 default:
644 fn = NULL;
645 break;
646 }
647
648 if (WARN_ON(!fn))
649 return -EINVAL;
650
651 prot = get_iopte_attr(e);
652
653 spin_lock(&obj->page_table_lock);
654 err = fn(obj, e->da, e->pa, prot);
655 spin_unlock(&obj->page_table_lock);
656
657 return err;
658}
659
660/**
661 * omap_iopgtable_store_entry - Make an iommu pte entry
662 * @obj: target iommu
663 * @e: an iommu tlb entry info
664 **/
665static int
666omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
667{
668 int err;
669
670 flush_iotlb_page(obj, e->da);
671 err = iopgtable_store_entry_core(obj, e);
672 if (!err)
673 prefetch_iotlb_entry(obj, e);
674 return err;
675}
676
677/**
678 * iopgtable_lookup_entry - Lookup an iommu pte entry
679 * @obj: target iommu
680 * @da: iommu device virtual address
681 * @ppgd: iommu pgd entry pointer to be returned
682 * @ppte: iommu pte entry pointer to be returned
683 **/
684static void
685iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
686{
687 u32 *iopgd, *iopte = NULL;
688
689 iopgd = iopgd_offset(obj, da);
690 if (!*iopgd)
691 goto out;
692
693 if (iopgd_is_table(*iopgd))
694 iopte = iopte_offset(iopgd, da);
695out:
696 *ppgd = iopgd;
697 *ppte = iopte;
698}
699
700static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
701{
702 size_t bytes;
703 u32 *iopgd = iopgd_offset(obj, da);
704 int nent = 1;
705 dma_addr_t pt_dma;
706 unsigned long pd_offset = iopgd_index(da) * sizeof(da);
707 unsigned long pt_offset = iopte_index(da) * sizeof(da);
708
709 if (!*iopgd)
710 return 0;
711
712 if (iopgd_is_table(*iopgd)) {
713 int i;
714 u32 *iopte = iopte_offset(iopgd, da);
715
716 bytes = IOPTE_SIZE;
717 if (*iopte & IOPTE_LARGE) {
718 nent *= 16;
719 /* rewind to the 1st entry */
720 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
721 }
722 bytes *= nent;
723 memset(iopte, 0, nent * sizeof(*iopte));
724 pt_dma = iopgd_page_paddr(iopgd);
725 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
726
727 /*
728 * do table walk to check if this table is necessary or not
729 */
730 iopte = iopte_offset(iopgd, 0);
731 for (i = 0; i < PTRS_PER_IOPTE; i++)
732 if (iopte[i])
733 goto out;
734
735 iopte_free(obj, iopte, true);
736 nent = 1; /* for the next L1 entry */
737 } else {
738 bytes = IOPGD_SIZE;
739 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
740 nent *= 16;
741 /* rewind to the 1st entry */
742 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
743 }
744 bytes *= nent;
745 }
746 memset(iopgd, 0, nent * sizeof(*iopgd));
747 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent);
748out:
749 return bytes;
750}
751
752/**
753 * iopgtable_clear_entry - Remove an iommu pte entry
754 * @obj: target iommu
755 * @da: iommu device virtual address
756 **/
757static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
758{
759 size_t bytes;
760
761 spin_lock(&obj->page_table_lock);
762
763 bytes = iopgtable_clear_entry_core(obj, da);
764 flush_iotlb_page(obj, da);
765
766 spin_unlock(&obj->page_table_lock);
767
768 return bytes;
769}
770
771static void iopgtable_clear_entry_all(struct omap_iommu *obj)
772{
773 unsigned long offset;
774 int i;
775
776 spin_lock(&obj->page_table_lock);
777
778 for (i = 0; i < PTRS_PER_IOPGD; i++) {
779 u32 da;
780 u32 *iopgd;
781
782 da = i << IOPGD_SHIFT;
783 iopgd = iopgd_offset(obj, da);
784 offset = iopgd_index(da) * sizeof(da);
785
786 if (!*iopgd)
787 continue;
788
789 if (iopgd_is_table(*iopgd))
790 iopte_free(obj, iopte_offset(iopgd, 0), true);
791
792 *iopgd = 0;
793 flush_iopte_range(obj->dev, obj->pd_dma, offset, 1);
794 }
795
796 flush_iotlb_all(obj);
797
798 spin_unlock(&obj->page_table_lock);
799}
800
801/*
802 * Device IOMMU generic operations
803 */
804static irqreturn_t iommu_fault_handler(int irq, void *data)
805{
806 u32 da, errs;
807 u32 *iopgd, *iopte;
808 struct omap_iommu *obj = data;
809 struct iommu_domain *domain = obj->domain;
810 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
811
812 if (!omap_domain->dev)
813 return IRQ_NONE;
814
815 errs = iommu_report_fault(obj, &da);
816 if (errs == 0)
817 return IRQ_HANDLED;
818
819 /* Fault callback or TLB/PTE Dynamic loading */
820 if (!report_iommu_fault(domain, obj->dev, da, 0))
821 return IRQ_HANDLED;
822
823 iommu_write_reg(obj, 0, MMU_IRQENABLE);
824
825 iopgd = iopgd_offset(obj, da);
826
827 if (!iopgd_is_table(*iopgd)) {
828 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
829 obj->name, errs, da, iopgd, *iopgd);
830 return IRQ_NONE;
831 }
832
833 iopte = iopte_offset(iopgd, da);
834
835 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
836 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
837
838 return IRQ_NONE;
839}
840
841/**
842 * omap_iommu_attach() - attach iommu device to an iommu domain
843 * @obj: target omap iommu device
844 * @iopgd: page table
845 **/
846static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd)
847{
848 int err;
849
850 spin_lock(&obj->iommu_lock);
851
852 obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE,
853 DMA_TO_DEVICE);
854 if (dma_mapping_error(obj->dev, obj->pd_dma)) {
855 dev_err(obj->dev, "DMA map error for L1 table\n");
856 err = -ENOMEM;
857 goto out_err;
858 }
859
860 obj->iopgd = iopgd;
861 err = iommu_enable(obj);
862 if (err)
863 goto out_err;
864 flush_iotlb_all(obj);
865
866 spin_unlock(&obj->iommu_lock);
867
868 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
869
870 return 0;
871
872out_err:
873 spin_unlock(&obj->iommu_lock);
874
875 return err;
876}
877
878/**
879 * omap_iommu_detach - release iommu device
880 * @obj: target iommu
881 **/
882static void omap_iommu_detach(struct omap_iommu *obj)
883{
884 if (!obj || IS_ERR(obj))
885 return;
886
887 spin_lock(&obj->iommu_lock);
888
889 dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
890 DMA_TO_DEVICE);
891 obj->pd_dma = 0;
892 obj->iopgd = NULL;
893 iommu_disable(obj);
894
895 spin_unlock(&obj->iommu_lock);
896
897 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
898}
899
900static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
901{
902 struct iotlb_lock lock;
903 struct cr_regs cr;
904 struct cr_regs *tmp;
905 int i;
906
907 /* check if there are any locked tlbs to save */
908 iotlb_lock_get(obj, &lock);
909 obj->num_cr_ctx = lock.base;
910 if (!obj->num_cr_ctx)
911 return;
912
913 tmp = obj->cr_ctx;
914 for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
915 * tmp++ = cr;
916}
917
918static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
919{
920 struct iotlb_lock l;
921 struct cr_regs *tmp;
922 int i;
923
924 /* no locked tlbs to restore */
925 if (!obj->num_cr_ctx)
926 return;
927
928 l.base = 0;
929 tmp = obj->cr_ctx;
930 for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
931 l.vict = i;
932 iotlb_lock_set(obj, &l);
933 iotlb_load_cr(obj, tmp);
934 }
935 l.base = obj->num_cr_ctx;
936 l.vict = i;
937 iotlb_lock_set(obj, &l);
938}
939
940/**
941 * omap_iommu_domain_deactivate - deactivate attached iommu devices
942 * @domain: iommu domain attached to the target iommu device
943 *
944 * This API allows the client devices of IOMMU devices to suspend
945 * the IOMMUs they control at runtime, after they are idled and
946 * suspended all activity. System Suspend will leverage the PM
947 * driver late callbacks.
948 **/
949int omap_iommu_domain_deactivate(struct iommu_domain *domain)
950{
951 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
952 struct omap_iommu_device *iommu;
953 struct omap_iommu *oiommu;
954 int i;
955
956 if (!omap_domain->dev)
957 return 0;
958
959 iommu = omap_domain->iommus;
960 iommu += (omap_domain->num_iommus - 1);
961 for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
962 oiommu = iommu->iommu_dev;
963 pm_runtime_put_sync(oiommu->dev);
964 }
965
966 return 0;
967}
968EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
969
970/**
971 * omap_iommu_domain_activate - activate attached iommu devices
972 * @domain: iommu domain attached to the target iommu device
973 *
974 * This API allows the client devices of IOMMU devices to resume the
975 * IOMMUs they control at runtime, before they can resume operations.
976 * System Resume will leverage the PM driver late callbacks.
977 **/
978int omap_iommu_domain_activate(struct iommu_domain *domain)
979{
980 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
981 struct omap_iommu_device *iommu;
982 struct omap_iommu *oiommu;
983 int i;
984
985 if (!omap_domain->dev)
986 return 0;
987
988 iommu = omap_domain->iommus;
989 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
990 oiommu = iommu->iommu_dev;
991 pm_runtime_get_sync(oiommu->dev);
992 }
993
994 return 0;
995}
996EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
997
998/**
999 * omap_iommu_runtime_suspend - disable an iommu device
1000 * @dev: iommu device
1001 *
1002 * This function performs all that is necessary to disable an
1003 * IOMMU device, either during final detachment from a client
1004 * device, or during system/runtime suspend of the device. This
1005 * includes programming all the appropriate IOMMU registers, and
1006 * managing the associated omap_hwmod's state and the device's
1007 * reset line. This function also saves the context of any
1008 * locked TLBs if suspending.
1009 **/
1010static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
1011{
1012 struct platform_device *pdev = to_platform_device(dev);
1013 struct iommu_platform_data *pdata = dev_get_platdata(dev);
1014 struct omap_iommu *obj = to_iommu(dev);
1015 int ret;
1016
1017 /* save the TLBs only during suspend, and not for power down */
1018 if (obj->domain && obj->iopgd)
1019 omap_iommu_save_tlb_entries(obj);
1020
1021 omap2_iommu_disable(obj);
1022
1023 if (pdata && pdata->device_idle)
1024 pdata->device_idle(pdev);
1025
1026 if (pdata && pdata->assert_reset)
1027 pdata->assert_reset(pdev, pdata->reset_name);
1028
1029 if (pdata && pdata->set_pwrdm_constraint) {
1030 ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
1031 if (ret) {
1032 dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
1033 ret);
1034 }
1035 }
1036
1037 return 0;
1038}
1039
1040/**
1041 * omap_iommu_runtime_resume - enable an iommu device
1042 * @dev: iommu device
1043 *
1044 * This function performs all that is necessary to enable an
1045 * IOMMU device, either during initial attachment to a client
1046 * device, or during system/runtime resume of the device. This
1047 * includes programming all the appropriate IOMMU registers, and
1048 * managing the associated omap_hwmod's state and the device's
1049 * reset line. The function also restores any locked TLBs if
1050 * resuming after a suspend.
1051 **/
1052static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
1053{
1054 struct platform_device *pdev = to_platform_device(dev);
1055 struct iommu_platform_data *pdata = dev_get_platdata(dev);
1056 struct omap_iommu *obj = to_iommu(dev);
1057 int ret = 0;
1058
1059 if (pdata && pdata->set_pwrdm_constraint) {
1060 ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
1061 if (ret) {
1062 dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
1063 ret);
1064 }
1065 }
1066
1067 if (pdata && pdata->deassert_reset) {
1068 ret = pdata->deassert_reset(pdev, pdata->reset_name);
1069 if (ret) {
1070 dev_err(dev, "deassert_reset failed: %d\n", ret);
1071 return ret;
1072 }
1073 }
1074
1075 if (pdata && pdata->device_enable)
1076 pdata->device_enable(pdev);
1077
1078 /* restore the TLBs only during resume, and not for power up */
1079 if (obj->domain)
1080 omap_iommu_restore_tlb_entries(obj);
1081
1082 ret = omap2_iommu_enable(obj);
1083
1084 return ret;
1085}
1086
1087/**
1088 * omap_iommu_prepare - prepare() dev_pm_ops implementation
1089 * @dev: iommu device
1090 *
1091 * This function performs the necessary checks to determine if the IOMMU
1092 * device needs suspending or not. The function checks if the runtime_pm
1093 * status of the device is suspended, and returns 1 in that case. This
1094 * results in the PM core to skip invoking any of the Sleep PM callbacks
1095 * (suspend, suspend_late, resume, resume_early etc).
1096 */
1097static int omap_iommu_prepare(struct device *dev)
1098{
1099 if (pm_runtime_status_suspended(dev))
1100 return 1;
1101 return 0;
1102}
1103
1104static bool omap_iommu_can_register(struct platform_device *pdev)
1105{
1106 struct device_node *np = pdev->dev.of_node;
1107
1108 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
1109 return true;
1110
1111 /*
1112 * restrict IOMMU core registration only for processor-port MDMA MMUs
1113 * on DRA7 DSPs
1114 */
1115 if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) ||
1116 (!strcmp(dev_name(&pdev->dev), "41501000.mmu")))
1117 return true;
1118
1119 return false;
1120}
1121
1122static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
1123 struct omap_iommu *obj)
1124{
1125 struct device_node *np = pdev->dev.of_node;
1126 int ret;
1127
1128 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
1129 return 0;
1130
1131 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
1132 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
1133 return -EINVAL;
1134 }
1135
1136 obj->syscfg =
1137 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
1138 if (IS_ERR(obj->syscfg)) {
1139 /* can fail with -EPROBE_DEFER */
1140 ret = PTR_ERR(obj->syscfg);
1141 return ret;
1142 }
1143
1144 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
1145 &obj->id)) {
1146 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
1147 return -EINVAL;
1148 }
1149
1150 if (obj->id != 0 && obj->id != 1) {
1151 dev_err(&pdev->dev, "invalid IOMMU instance id\n");
1152 return -EINVAL;
1153 }
1154
1155 return 0;
1156}
1157
1158/*
1159 * OMAP Device MMU(IOMMU) detection
1160 */
1161static int omap_iommu_probe(struct platform_device *pdev)
1162{
1163 int err = -ENODEV;
1164 int irq;
1165 struct omap_iommu *obj;
1166 struct resource *res;
1167 struct device_node *of = pdev->dev.of_node;
1168
1169 if (!of) {
1170 pr_err("%s: only DT-based devices are supported\n", __func__);
1171 return -ENODEV;
1172 }
1173
1174 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
1175 if (!obj)
1176 return -ENOMEM;
1177
1178 /*
1179 * self-manage the ordering dependencies between omap_device_enable/idle
1180 * and omap_device_assert/deassert_hardreset API
1181 */
1182 if (pdev->dev.pm_domain) {
1183 dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
1184 pdev->dev.pm_domain = NULL;
1185 }
1186
1187 obj->name = dev_name(&pdev->dev);
1188 obj->nr_tlb_entries = 32;
1189 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
1190 if (err && err != -EINVAL)
1191 return err;
1192 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
1193 return -EINVAL;
1194 if (of_property_read_bool(of, "ti,iommu-bus-err-back"))
1195 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
1196
1197 obj->dev = &pdev->dev;
1198 obj->ctx = (void *)obj + sizeof(*obj);
1199 obj->cr_ctx = devm_kzalloc(&pdev->dev,
1200 sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
1201 GFP_KERNEL);
1202 if (!obj->cr_ctx)
1203 return -ENOMEM;
1204
1205 spin_lock_init(&obj->iommu_lock);
1206 spin_lock_init(&obj->page_table_lock);
1207
1208 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1209 obj->regbase = devm_ioremap_resource(obj->dev, res);
1210 if (IS_ERR(obj->regbase))
1211 return PTR_ERR(obj->regbase);
1212
1213 err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
1214 if (err)
1215 return err;
1216
1217 irq = platform_get_irq(pdev, 0);
1218 if (irq < 0)
1219 return -ENODEV;
1220
1221 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
1222 dev_name(obj->dev), obj);
1223 if (err < 0)
1224 return err;
1225 platform_set_drvdata(pdev, obj);
1226
1227 if (omap_iommu_can_register(pdev)) {
1228 err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
1229 obj->name);
1230 if (err)
1231 return err;
1232
1233 err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev);
1234 if (err)
1235 goto out_sysfs;
1236 obj->has_iommu_driver = true;
1237 }
1238
1239 pm_runtime_enable(obj->dev);
1240
1241 omap_iommu_debugfs_add(obj);
1242
1243 dev_info(&pdev->dev, "%s registered\n", obj->name);
1244
1245 /* Re-probe bus to probe device attached to this IOMMU */
1246 bus_iommu_probe(&platform_bus_type);
1247
1248 return 0;
1249
1250out_sysfs:
1251 iommu_device_sysfs_remove(&obj->iommu);
1252 return err;
1253}
1254
1255static void omap_iommu_remove(struct platform_device *pdev)
1256{
1257 struct omap_iommu *obj = platform_get_drvdata(pdev);
1258
1259 if (obj->has_iommu_driver) {
1260 iommu_device_sysfs_remove(&obj->iommu);
1261 iommu_device_unregister(&obj->iommu);
1262 }
1263
1264 omap_iommu_debugfs_remove(obj);
1265
1266 pm_runtime_disable(obj->dev);
1267
1268 dev_info(&pdev->dev, "%s removed\n", obj->name);
1269}
1270
1271static const struct dev_pm_ops omap_iommu_pm_ops = {
1272 .prepare = omap_iommu_prepare,
1273 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1274 pm_runtime_force_resume)
1275 SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
1276 omap_iommu_runtime_resume, NULL)
1277};
1278
1279static const struct of_device_id omap_iommu_of_match[] = {
1280 { .compatible = "ti,omap2-iommu" },
1281 { .compatible = "ti,omap4-iommu" },
1282 { .compatible = "ti,dra7-iommu" },
1283 { .compatible = "ti,dra7-dsp-iommu" },
1284 {},
1285};
1286
1287static struct platform_driver omap_iommu_driver = {
1288 .probe = omap_iommu_probe,
1289 .remove_new = omap_iommu_remove,
1290 .driver = {
1291 .name = "omap-iommu",
1292 .pm = &omap_iommu_pm_ops,
1293 .of_match_table = of_match_ptr(omap_iommu_of_match),
1294 },
1295};
1296
1297static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
1298{
1299 memset(e, 0, sizeof(*e));
1300
1301 e->da = da;
1302 e->pa = pa;
1303 e->valid = MMU_CAM_V;
1304 e->pgsz = pgsz;
1305 e->endian = MMU_RAM_ENDIAN_LITTLE;
1306 e->elsz = MMU_RAM_ELSZ_8;
1307 e->mixed = 0;
1308
1309 return iopgsz_to_bytes(e->pgsz);
1310}
1311
1312static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1313 phys_addr_t pa, size_t bytes, size_t count,
1314 int prot, gfp_t gfp, size_t *mapped)
1315{
1316 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1317 struct device *dev = omap_domain->dev;
1318 struct omap_iommu_device *iommu;
1319 struct omap_iommu *oiommu;
1320 struct iotlb_entry e;
1321 int omap_pgsz;
1322 u32 ret = -EINVAL;
1323 int i;
1324
1325 omap_pgsz = bytes_to_iopgsz(bytes);
1326 if (omap_pgsz < 0) {
1327 dev_err(dev, "invalid size to map: %zu\n", bytes);
1328 return -EINVAL;
1329 }
1330
1331 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes);
1332
1333 iotlb_init_entry(&e, da, pa, omap_pgsz);
1334
1335 iommu = omap_domain->iommus;
1336 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1337 oiommu = iommu->iommu_dev;
1338 ret = omap_iopgtable_store_entry(oiommu, &e);
1339 if (ret) {
1340 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n",
1341 ret);
1342 break;
1343 }
1344 }
1345
1346 if (ret) {
1347 while (i--) {
1348 iommu--;
1349 oiommu = iommu->iommu_dev;
1350 iopgtable_clear_entry(oiommu, da);
1351 }
1352 } else {
1353 *mapped = bytes;
1354 }
1355
1356 return ret;
1357}
1358
1359static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1360 size_t size, size_t count, struct iommu_iotlb_gather *gather)
1361{
1362 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1363 struct device *dev = omap_domain->dev;
1364 struct omap_iommu_device *iommu;
1365 struct omap_iommu *oiommu;
1366 bool error = false;
1367 size_t bytes = 0;
1368 int i;
1369
1370 dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size);
1371
1372 iommu = omap_domain->iommus;
1373 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1374 oiommu = iommu->iommu_dev;
1375 bytes = iopgtable_clear_entry(oiommu, da);
1376 if (!bytes)
1377 error = true;
1378 }
1379
1380 /*
1381 * simplify return - we are only checking if any of the iommus
1382 * reported an error, but not if all of them are unmapping the
1383 * same number of entries. This should not occur due to the
1384 * mirror programming.
1385 */
1386 return error ? 0 : bytes;
1387}
1388
1389static int omap_iommu_count(struct device *dev)
1390{
1391 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1392 int count = 0;
1393
1394 while (arch_data->iommu_dev) {
1395 count++;
1396 arch_data++;
1397 }
1398
1399 return count;
1400}
1401
1402/* caller should call cleanup if this function fails */
1403static int omap_iommu_attach_init(struct device *dev,
1404 struct omap_iommu_domain *odomain)
1405{
1406 struct omap_iommu_device *iommu;
1407 int i;
1408
1409 odomain->num_iommus = omap_iommu_count(dev);
1410 if (!odomain->num_iommus)
1411 return -ENODEV;
1412
1413 odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu),
1414 GFP_ATOMIC);
1415 if (!odomain->iommus)
1416 return -ENOMEM;
1417
1418 iommu = odomain->iommus;
1419 for (i = 0; i < odomain->num_iommus; i++, iommu++) {
1420 iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC);
1421 if (!iommu->pgtable)
1422 return -ENOMEM;
1423
1424 /*
1425 * should never fail, but please keep this around to ensure
1426 * we keep the hardware happy
1427 */
1428 if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable,
1429 IOPGD_TABLE_SIZE)))
1430 return -EINVAL;
1431 }
1432
1433 return 0;
1434}
1435
1436static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
1437{
1438 int i;
1439 struct omap_iommu_device *iommu = odomain->iommus;
1440
1441 for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++)
1442 kfree(iommu->pgtable);
1443
1444 kfree(odomain->iommus);
1445 odomain->num_iommus = 0;
1446 odomain->iommus = NULL;
1447}
1448
1449static int
1450omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1451{
1452 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1453 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1454 struct omap_iommu_device *iommu;
1455 struct omap_iommu *oiommu;
1456 int ret = 0;
1457 int i;
1458
1459 if (!arch_data || !arch_data->iommu_dev) {
1460 dev_err(dev, "device doesn't have an associated iommu\n");
1461 return -ENODEV;
1462 }
1463
1464 spin_lock(&omap_domain->lock);
1465
1466 /* only a single client device can be attached to a domain */
1467 if (omap_domain->dev) {
1468 dev_err(dev, "iommu domain is already attached\n");
1469 ret = -EINVAL;
1470 goto out;
1471 }
1472
1473 ret = omap_iommu_attach_init(dev, omap_domain);
1474 if (ret) {
1475 dev_err(dev, "failed to allocate required iommu data %d\n",
1476 ret);
1477 goto init_fail;
1478 }
1479
1480 iommu = omap_domain->iommus;
1481 for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) {
1482 /* configure and enable the omap iommu */
1483 oiommu = arch_data->iommu_dev;
1484 ret = omap_iommu_attach(oiommu, iommu->pgtable);
1485 if (ret) {
1486 dev_err(dev, "can't get omap iommu: %d\n", ret);
1487 goto attach_fail;
1488 }
1489
1490 oiommu->domain = domain;
1491 iommu->iommu_dev = oiommu;
1492 }
1493
1494 omap_domain->dev = dev;
1495
1496 goto out;
1497
1498attach_fail:
1499 while (i--) {
1500 iommu--;
1501 arch_data--;
1502 oiommu = iommu->iommu_dev;
1503 omap_iommu_detach(oiommu);
1504 iommu->iommu_dev = NULL;
1505 oiommu->domain = NULL;
1506 }
1507init_fail:
1508 omap_iommu_detach_fini(omap_domain);
1509out:
1510 spin_unlock(&omap_domain->lock);
1511 return ret;
1512}
1513
1514static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1515 struct device *dev)
1516{
1517 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1518 struct omap_iommu_device *iommu = omap_domain->iommus;
1519 struct omap_iommu *oiommu;
1520 int i;
1521
1522 if (!omap_domain->dev) {
1523 dev_err(dev, "domain has no attached device\n");
1524 return;
1525 }
1526
1527 /* only a single device is supported per domain for now */
1528 if (omap_domain->dev != dev) {
1529 dev_err(dev, "invalid attached device\n");
1530 return;
1531 }
1532
1533 /*
1534 * cleanup in the reverse order of attachment - this addresses
1535 * any h/w dependencies between multiple instances, if any
1536 */
1537 iommu += (omap_domain->num_iommus - 1);
1538 arch_data += (omap_domain->num_iommus - 1);
1539 for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) {
1540 oiommu = iommu->iommu_dev;
1541 iopgtable_clear_entry_all(oiommu);
1542
1543 omap_iommu_detach(oiommu);
1544 iommu->iommu_dev = NULL;
1545 oiommu->domain = NULL;
1546 }
1547
1548 omap_iommu_detach_fini(omap_domain);
1549
1550 omap_domain->dev = NULL;
1551}
1552
1553static int omap_iommu_identity_attach(struct iommu_domain *identity_domain,
1554 struct device *dev)
1555{
1556 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1557 struct omap_iommu_domain *omap_domain;
1558
1559 if (domain == identity_domain || !domain)
1560 return 0;
1561
1562 omap_domain = to_omap_domain(domain);
1563 spin_lock(&omap_domain->lock);
1564 _omap_iommu_detach_dev(omap_domain, dev);
1565 spin_unlock(&omap_domain->lock);
1566 return 0;
1567}
1568
1569static struct iommu_domain_ops omap_iommu_identity_ops = {
1570 .attach_dev = omap_iommu_identity_attach,
1571};
1572
1573static struct iommu_domain omap_iommu_identity_domain = {
1574 .type = IOMMU_DOMAIN_IDENTITY,
1575 .ops = &omap_iommu_identity_ops,
1576};
1577
1578static struct iommu_domain *omap_iommu_domain_alloc_paging(struct device *dev)
1579{
1580 struct omap_iommu_domain *omap_domain;
1581
1582 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1583 if (!omap_domain)
1584 return NULL;
1585
1586 spin_lock_init(&omap_domain->lock);
1587
1588 omap_domain->domain.geometry.aperture_start = 0;
1589 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
1590 omap_domain->domain.geometry.force_aperture = true;
1591
1592 return &omap_domain->domain;
1593}
1594
1595static void omap_iommu_domain_free(struct iommu_domain *domain)
1596{
1597 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1598
1599 /*
1600 * An iommu device is still attached
1601 * (currently, only one device can be attached) ?
1602 */
1603 if (omap_domain->dev)
1604 _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1605
1606 kfree(omap_domain);
1607}
1608
1609static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1610 dma_addr_t da)
1611{
1612 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1613 struct omap_iommu_device *iommu = omap_domain->iommus;
1614 struct omap_iommu *oiommu = iommu->iommu_dev;
1615 struct device *dev = oiommu->dev;
1616 u32 *pgd, *pte;
1617 phys_addr_t ret = 0;
1618
1619 /*
1620 * all the iommus within the domain will have identical programming,
1621 * so perform the lookup using just the first iommu
1622 */
1623 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1624
1625 if (pte) {
1626 if (iopte_is_small(*pte))
1627 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1628 else if (iopte_is_large(*pte))
1629 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1630 else
1631 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
1632 (unsigned long long)da);
1633 } else {
1634 if (iopgd_is_section(*pgd))
1635 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1636 else if (iopgd_is_super(*pgd))
1637 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1638 else
1639 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
1640 (unsigned long long)da);
1641 }
1642
1643 return ret;
1644}
1645
1646static struct iommu_device *omap_iommu_probe_device(struct device *dev)
1647{
1648 struct omap_iommu_arch_data *arch_data, *tmp;
1649 struct platform_device *pdev;
1650 struct omap_iommu *oiommu;
1651 struct device_node *np;
1652 int num_iommus, i;
1653
1654 /*
1655 * Allocate the per-device iommu structure for DT-based devices.
1656 *
1657 * TODO: Simplify this when removing non-DT support completely from the
1658 * IOMMU users.
1659 */
1660 if (!dev->of_node)
1661 return ERR_PTR(-ENODEV);
1662
1663 /*
1664 * retrieve the count of IOMMU nodes using phandle size as element size
1665 * since #iommu-cells = 0 for OMAP
1666 */
1667 num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
1668 sizeof(phandle));
1669 if (num_iommus < 0)
1670 return ERR_PTR(-ENODEV);
1671
1672 arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
1673 if (!arch_data)
1674 return ERR_PTR(-ENOMEM);
1675
1676 for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) {
1677 np = of_parse_phandle(dev->of_node, "iommus", i);
1678 if (!np) {
1679 kfree(arch_data);
1680 return ERR_PTR(-EINVAL);
1681 }
1682
1683 pdev = of_find_device_by_node(np);
1684 if (!pdev) {
1685 of_node_put(np);
1686 kfree(arch_data);
1687 return ERR_PTR(-ENODEV);
1688 }
1689
1690 oiommu = platform_get_drvdata(pdev);
1691 if (!oiommu) {
1692 of_node_put(np);
1693 kfree(arch_data);
1694 return ERR_PTR(-EINVAL);
1695 }
1696
1697 tmp->iommu_dev = oiommu;
1698 tmp->dev = &pdev->dev;
1699
1700 of_node_put(np);
1701 }
1702
1703 dev_iommu_priv_set(dev, arch_data);
1704
1705 /*
1706 * use the first IOMMU alone for the sysfs device linking.
1707 * TODO: Evaluate if a single iommu_group needs to be
1708 * maintained for both IOMMUs
1709 */
1710 oiommu = arch_data->iommu_dev;
1711
1712 return &oiommu->iommu;
1713}
1714
1715static void omap_iommu_release_device(struct device *dev)
1716{
1717 struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
1718
1719 if (!dev->of_node || !arch_data)
1720 return;
1721
1722 kfree(arch_data);
1723
1724}
1725
1726static const struct iommu_ops omap_iommu_ops = {
1727 .identity_domain = &omap_iommu_identity_domain,
1728 .domain_alloc_paging = omap_iommu_domain_alloc_paging,
1729 .probe_device = omap_iommu_probe_device,
1730 .release_device = omap_iommu_release_device,
1731 .device_group = generic_single_device_group,
1732 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
1733 .default_domain_ops = &(const struct iommu_domain_ops) {
1734 .attach_dev = omap_iommu_attach_dev,
1735 .map_pages = omap_iommu_map,
1736 .unmap_pages = omap_iommu_unmap,
1737 .iova_to_phys = omap_iommu_iova_to_phys,
1738 .free = omap_iommu_domain_free,
1739 }
1740};
1741
1742static int __init omap_iommu_init(void)
1743{
1744 struct kmem_cache *p;
1745 const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
1746 size_t align = 1 << 10; /* L2 pagetable alignement */
1747 struct device_node *np;
1748 int ret;
1749
1750 np = of_find_matching_node(NULL, omap_iommu_of_match);
1751 if (!np)
1752 return 0;
1753
1754 of_node_put(np);
1755
1756 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1757 NULL);
1758 if (!p)
1759 return -ENOMEM;
1760 iopte_cachep = p;
1761
1762 omap_iommu_debugfs_init();
1763
1764 ret = platform_driver_register(&omap_iommu_driver);
1765 if (ret) {
1766 pr_err("%s: failed to register driver\n", __func__);
1767 goto fail_driver;
1768 }
1769
1770 return 0;
1771
1772fail_driver:
1773 kmem_cache_destroy(iopte_cachep);
1774 return ret;
1775}
1776subsys_initcall(omap_iommu_init);
1777/* must be ready before omap3isp is probed */