Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IOMMU API for Renesas VMSA-compatible IPMMU
4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
5 *
6 * Copyright (C) 2014-2020 Renesas Electronics Corporation
7 */
8
9#include <linux/bitmap.h>
10#include <linux/delay.h>
11#include <linux/dma-iommu.h>
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/io-pgtable.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/of_iommu.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
27#include <linux/sys_soc.h>
28
29#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
30#include <asm/dma-iommu.h>
31#else
32#define arm_iommu_create_mapping(...) NULL
33#define arm_iommu_attach_device(...) -ENODEV
34#define arm_iommu_release_mapping(...) do {} while (0)
35#define arm_iommu_detach_device(...) do {} while (0)
36#endif
37
38#define IPMMU_CTX_MAX 8U
39#define IPMMU_CTX_INVALID -1
40
41#define IPMMU_UTLB_MAX 48U
42
43struct ipmmu_features {
44 bool use_ns_alias_offset;
45 bool has_cache_leaf_nodes;
46 unsigned int number_of_contexts;
47 unsigned int num_utlbs;
48 bool setup_imbuscr;
49 bool twobit_imttbcr_sl0;
50 bool reserved_context;
51 bool cache_snoop;
52 unsigned int ctx_offset_base;
53 unsigned int ctx_offset_stride;
54 unsigned int utlb_offset_base;
55};
56
57struct ipmmu_vmsa_device {
58 struct device *dev;
59 void __iomem *base;
60 struct iommu_device iommu;
61 struct ipmmu_vmsa_device *root;
62 const struct ipmmu_features *features;
63 unsigned int num_ctx;
64 spinlock_t lock; /* Protects ctx and domains[] */
65 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
66 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
67 s8 utlb_ctx[IPMMU_UTLB_MAX];
68
69 struct iommu_group *group;
70 struct dma_iommu_mapping *mapping;
71};
72
73struct ipmmu_vmsa_domain {
74 struct ipmmu_vmsa_device *mmu;
75 struct iommu_domain io_domain;
76
77 struct io_pgtable_cfg cfg;
78 struct io_pgtable_ops *iop;
79
80 unsigned int context_id;
81 struct mutex mutex; /* Protects mappings */
82};
83
84static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
85{
86 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
87}
88
89static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
90{
91 return dev_iommu_priv_get(dev);
92}
93
94#define TLB_LOOP_TIMEOUT 100 /* 100us */
95
96/* -----------------------------------------------------------------------------
97 * Registers Definition
98 */
99
100#define IM_NS_ALIAS_OFFSET 0x800
101
102/* MMU "context" registers */
103#define IMCTR 0x0000 /* R-Car Gen2/3 */
104#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
105#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
106#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
107
108#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
109#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
110#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
111#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
112#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
113#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
114#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
115
116#define IMBUSCR 0x000c /* R-Car Gen2 only */
117#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
118#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
119
120#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
121#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
122
123#define IMSTR 0x0020 /* R-Car Gen2/3 */
124#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
125#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
126#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
127#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
128
129#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
130
131#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
132#define IMEUAR 0x0034 /* R-Car Gen3 only */
133
134/* uTLB registers */
135#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
136#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
137#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
138#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
139#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
140#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
141
142#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
143#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
144#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
145
146/* -----------------------------------------------------------------------------
147 * Root device handling
148 */
149
150static struct platform_driver ipmmu_driver;
151
152static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
153{
154 return mmu->root == mmu;
155}
156
157static int __ipmmu_check_device(struct device *dev, void *data)
158{
159 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
160 struct ipmmu_vmsa_device **rootp = data;
161
162 if (ipmmu_is_root(mmu))
163 *rootp = mmu;
164
165 return 0;
166}
167
168static struct ipmmu_vmsa_device *ipmmu_find_root(void)
169{
170 struct ipmmu_vmsa_device *root = NULL;
171
172 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
173 __ipmmu_check_device) == 0 ? root : NULL;
174}
175
176/* -----------------------------------------------------------------------------
177 * Read/Write Access
178 */
179
180static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
181{
182 return ioread32(mmu->base + offset);
183}
184
185static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
186 u32 data)
187{
188 iowrite32(data, mmu->base + offset);
189}
190
191static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
192 unsigned int context_id, unsigned int reg)
193{
194 return mmu->features->ctx_offset_base +
195 context_id * mmu->features->ctx_offset_stride + reg;
196}
197
198static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
199 unsigned int context_id, unsigned int reg)
200{
201 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
202}
203
204static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
205 unsigned int context_id, unsigned int reg, u32 data)
206{
207 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
208}
209
210static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
211 unsigned int reg)
212{
213 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
214}
215
216static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
217 unsigned int reg, u32 data)
218{
219 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
220}
221
222static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
223 unsigned int reg, u32 data)
224{
225 if (domain->mmu != domain->mmu->root)
226 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
227
228 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
229}
230
231static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
232{
233 return mmu->features->utlb_offset_base + reg;
234}
235
236static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
237 unsigned int utlb, u32 data)
238{
239 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
240}
241
242static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
243 unsigned int utlb, u32 data)
244{
245 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
246}
247
248/* -----------------------------------------------------------------------------
249 * TLB and microTLB Management
250 */
251
252/* Wait for any pending TLB invalidations to complete */
253static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
254{
255 unsigned int count = 0;
256
257 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
258 cpu_relax();
259 if (++count == TLB_LOOP_TIMEOUT) {
260 dev_err_ratelimited(domain->mmu->dev,
261 "TLB sync timed out -- MMU may be deadlocked\n");
262 return;
263 }
264 udelay(1);
265 }
266}
267
268static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
269{
270 u32 reg;
271
272 reg = ipmmu_ctx_read_root(domain, IMCTR);
273 reg |= IMCTR_FLUSH;
274 ipmmu_ctx_write_all(domain, IMCTR, reg);
275
276 ipmmu_tlb_sync(domain);
277}
278
279/*
280 * Enable MMU translation for the microTLB.
281 */
282static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
283 unsigned int utlb)
284{
285 struct ipmmu_vmsa_device *mmu = domain->mmu;
286
287 /*
288 * TODO: Reference-count the microTLB as several bus masters can be
289 * connected to the same microTLB.
290 */
291
292 /* TODO: What should we set the ASID to ? */
293 ipmmu_imuasid_write(mmu, utlb, 0);
294 /* TODO: Do we need to flush the microTLB ? */
295 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
296 IMUCTR_FLUSH | IMUCTR_MMUEN);
297 mmu->utlb_ctx[utlb] = domain->context_id;
298}
299
300/*
301 * Disable MMU translation for the microTLB.
302 */
303static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
304 unsigned int utlb)
305{
306 struct ipmmu_vmsa_device *mmu = domain->mmu;
307
308 ipmmu_imuctr_write(mmu, utlb, 0);
309 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
310}
311
312static void ipmmu_tlb_flush_all(void *cookie)
313{
314 struct ipmmu_vmsa_domain *domain = cookie;
315
316 ipmmu_tlb_invalidate(domain);
317}
318
319static void ipmmu_tlb_flush(unsigned long iova, size_t size,
320 size_t granule, void *cookie)
321{
322 ipmmu_tlb_flush_all(cookie);
323}
324
325static const struct iommu_flush_ops ipmmu_flush_ops = {
326 .tlb_flush_all = ipmmu_tlb_flush_all,
327 .tlb_flush_walk = ipmmu_tlb_flush,
328 .tlb_flush_leaf = ipmmu_tlb_flush,
329};
330
331/* -----------------------------------------------------------------------------
332 * Domain/Context Management
333 */
334
335static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
336 struct ipmmu_vmsa_domain *domain)
337{
338 unsigned long flags;
339 int ret;
340
341 spin_lock_irqsave(&mmu->lock, flags);
342
343 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
344 if (ret != mmu->num_ctx) {
345 mmu->domains[ret] = domain;
346 set_bit(ret, mmu->ctx);
347 } else
348 ret = -EBUSY;
349
350 spin_unlock_irqrestore(&mmu->lock, flags);
351
352 return ret;
353}
354
355static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
356 unsigned int context_id)
357{
358 unsigned long flags;
359
360 spin_lock_irqsave(&mmu->lock, flags);
361
362 clear_bit(context_id, mmu->ctx);
363 mmu->domains[context_id] = NULL;
364
365 spin_unlock_irqrestore(&mmu->lock, flags);
366}
367
368static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
369{
370 u64 ttbr;
371 u32 tmp;
372
373 /* TTBR0 */
374 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
375 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
376 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
377
378 /*
379 * TTBCR
380 * We use long descriptors and allocate the whole 32-bit VA space to
381 * TTBR0.
382 */
383 if (domain->mmu->features->twobit_imttbcr_sl0)
384 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
385 else
386 tmp = IMTTBCR_SL0_LVL_1;
387
388 if (domain->mmu->features->cache_snoop)
389 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
390 IMTTBCR_IRGN0_WB_WA;
391
392 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
393
394 /* MAIR0 */
395 ipmmu_ctx_write_root(domain, IMMAIR0,
396 domain->cfg.arm_lpae_s1_cfg.mair);
397
398 /* IMBUSCR */
399 if (domain->mmu->features->setup_imbuscr)
400 ipmmu_ctx_write_root(domain, IMBUSCR,
401 ipmmu_ctx_read_root(domain, IMBUSCR) &
402 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
403
404 /*
405 * IMSTR
406 * Clear all interrupt flags.
407 */
408 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
409
410 /*
411 * IMCTR
412 * Enable the MMU and interrupt generation. The long-descriptor
413 * translation table format doesn't use TEX remapping. Don't enable AF
414 * software management as we have no use for it. Flush the TLB as
415 * required when modifying the context registers.
416 */
417 ipmmu_ctx_write_all(domain, IMCTR,
418 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
419}
420
421static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
422{
423 int ret;
424
425 /*
426 * Allocate the page table operations.
427 *
428 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
429 * access, Long-descriptor format" that the NStable bit being set in a
430 * table descriptor will result in the NStable and NS bits of all child
431 * entries being ignored and considered as being set. The IPMMU seems
432 * not to comply with this, as it generates a secure access page fault
433 * if any of the NStable and NS bits isn't set when running in
434 * non-secure mode.
435 */
436 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
437 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
438 domain->cfg.ias = 32;
439 domain->cfg.oas = 40;
440 domain->cfg.tlb = &ipmmu_flush_ops;
441 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
442 domain->io_domain.geometry.force_aperture = true;
443 /*
444 * TODO: Add support for coherent walk through CCI with DVM and remove
445 * cache handling. For now, delegate it to the io-pgtable code.
446 */
447 domain->cfg.coherent_walk = false;
448 domain->cfg.iommu_dev = domain->mmu->root->dev;
449
450 /*
451 * Find an unused context.
452 */
453 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
454 if (ret < 0)
455 return ret;
456
457 domain->context_id = ret;
458
459 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
460 domain);
461 if (!domain->iop) {
462 ipmmu_domain_free_context(domain->mmu->root,
463 domain->context_id);
464 return -EINVAL;
465 }
466
467 ipmmu_domain_setup_context(domain);
468 return 0;
469}
470
471static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
472{
473 if (!domain->mmu)
474 return;
475
476 /*
477 * Disable the context. Flush the TLB as required when modifying the
478 * context registers.
479 *
480 * TODO: Is TLB flush really needed ?
481 */
482 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
483 ipmmu_tlb_sync(domain);
484 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
485}
486
487/* -----------------------------------------------------------------------------
488 * Fault Handling
489 */
490
491static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
492{
493 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
494 struct ipmmu_vmsa_device *mmu = domain->mmu;
495 unsigned long iova;
496 u32 status;
497
498 status = ipmmu_ctx_read_root(domain, IMSTR);
499 if (!(status & err_mask))
500 return IRQ_NONE;
501
502 iova = ipmmu_ctx_read_root(domain, IMELAR);
503 if (IS_ENABLED(CONFIG_64BIT))
504 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
505
506 /*
507 * Clear the error status flags. Unlike traditional interrupt flag
508 * registers that must be cleared by writing 1, this status register
509 * seems to require 0. The error address register must be read before,
510 * otherwise its value will be 0.
511 */
512 ipmmu_ctx_write_root(domain, IMSTR, 0);
513
514 /* Log fatal errors. */
515 if (status & IMSTR_MHIT)
516 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
517 iova);
518 if (status & IMSTR_ABORT)
519 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
520 iova);
521
522 if (!(status & (IMSTR_PF | IMSTR_TF)))
523 return IRQ_NONE;
524
525 /*
526 * Try to handle page faults and translation faults.
527 *
528 * TODO: We need to look up the faulty device based on the I/O VA. Use
529 * the IOMMU device for now.
530 */
531 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
532 return IRQ_HANDLED;
533
534 dev_err_ratelimited(mmu->dev,
535 "Unhandled fault: status 0x%08x iova 0x%lx\n",
536 status, iova);
537
538 return IRQ_HANDLED;
539}
540
541static irqreturn_t ipmmu_irq(int irq, void *dev)
542{
543 struct ipmmu_vmsa_device *mmu = dev;
544 irqreturn_t status = IRQ_NONE;
545 unsigned int i;
546 unsigned long flags;
547
548 spin_lock_irqsave(&mmu->lock, flags);
549
550 /*
551 * Check interrupts for all active contexts.
552 */
553 for (i = 0; i < mmu->num_ctx; i++) {
554 if (!mmu->domains[i])
555 continue;
556 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
557 status = IRQ_HANDLED;
558 }
559
560 spin_unlock_irqrestore(&mmu->lock, flags);
561
562 return status;
563}
564
565/* -----------------------------------------------------------------------------
566 * IOMMU Operations
567 */
568
569static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
570{
571 struct ipmmu_vmsa_domain *domain;
572
573 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
574 if (!domain)
575 return NULL;
576
577 mutex_init(&domain->mutex);
578
579 return &domain->io_domain;
580}
581
582static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
583{
584 struct iommu_domain *io_domain = NULL;
585
586 switch (type) {
587 case IOMMU_DOMAIN_UNMANAGED:
588 io_domain = __ipmmu_domain_alloc(type);
589 break;
590
591 case IOMMU_DOMAIN_DMA:
592 io_domain = __ipmmu_domain_alloc(type);
593 if (io_domain && iommu_get_dma_cookie(io_domain)) {
594 kfree(io_domain);
595 io_domain = NULL;
596 }
597 break;
598 }
599
600 return io_domain;
601}
602
603static void ipmmu_domain_free(struct iommu_domain *io_domain)
604{
605 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
606
607 /*
608 * Free the domain resources. We assume that all devices have already
609 * been detached.
610 */
611 iommu_put_dma_cookie(io_domain);
612 ipmmu_domain_destroy_context(domain);
613 free_io_pgtable_ops(domain->iop);
614 kfree(domain);
615}
616
617static int ipmmu_attach_device(struct iommu_domain *io_domain,
618 struct device *dev)
619{
620 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
621 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
622 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
623 unsigned int i;
624 int ret = 0;
625
626 if (!mmu) {
627 dev_err(dev, "Cannot attach to IPMMU\n");
628 return -ENXIO;
629 }
630
631 mutex_lock(&domain->mutex);
632
633 if (!domain->mmu) {
634 /* The domain hasn't been used yet, initialize it. */
635 domain->mmu = mmu;
636 ret = ipmmu_domain_init_context(domain);
637 if (ret < 0) {
638 dev_err(dev, "Unable to initialize IPMMU context\n");
639 domain->mmu = NULL;
640 } else {
641 dev_info(dev, "Using IPMMU context %u\n",
642 domain->context_id);
643 }
644 } else if (domain->mmu != mmu) {
645 /*
646 * Something is wrong, we can't attach two devices using
647 * different IOMMUs to the same domain.
648 */
649 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
650 dev_name(mmu->dev), dev_name(domain->mmu->dev));
651 ret = -EINVAL;
652 } else
653 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
654
655 mutex_unlock(&domain->mutex);
656
657 if (ret < 0)
658 return ret;
659
660 for (i = 0; i < fwspec->num_ids; ++i)
661 ipmmu_utlb_enable(domain, fwspec->ids[i]);
662
663 return 0;
664}
665
666static void ipmmu_detach_device(struct iommu_domain *io_domain,
667 struct device *dev)
668{
669 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
670 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
671 unsigned int i;
672
673 for (i = 0; i < fwspec->num_ids; ++i)
674 ipmmu_utlb_disable(domain, fwspec->ids[i]);
675
676 /*
677 * TODO: Optimize by disabling the context when no device is attached.
678 */
679}
680
681static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
682 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
683{
684 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
685
686 if (!domain)
687 return -ENODEV;
688
689 return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
690}
691
692static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
693 size_t size, struct iommu_iotlb_gather *gather)
694{
695 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
696
697 return domain->iop->unmap(domain->iop, iova, size, gather);
698}
699
700static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
701{
702 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
703
704 if (domain->mmu)
705 ipmmu_tlb_flush_all(domain);
706}
707
708static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
709 struct iommu_iotlb_gather *gather)
710{
711 ipmmu_flush_iotlb_all(io_domain);
712}
713
714static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
715 dma_addr_t iova)
716{
717 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
718
719 /* TODO: Is locking needed ? */
720
721 return domain->iop->iova_to_phys(domain->iop, iova);
722}
723
724static int ipmmu_init_platform_device(struct device *dev,
725 struct of_phandle_args *args)
726{
727 struct platform_device *ipmmu_pdev;
728
729 ipmmu_pdev = of_find_device_by_node(args->np);
730 if (!ipmmu_pdev)
731 return -ENODEV;
732
733 dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
734
735 return 0;
736}
737
738static const struct soc_device_attribute soc_rcar_gen3[] = {
739 { .soc_id = "r8a774a1", },
740 { .soc_id = "r8a774b1", },
741 { .soc_id = "r8a774c0", },
742 { .soc_id = "r8a774e1", },
743 { .soc_id = "r8a7795", },
744 { .soc_id = "r8a77961", },
745 { .soc_id = "r8a7796", },
746 { .soc_id = "r8a77965", },
747 { .soc_id = "r8a77970", },
748 { .soc_id = "r8a77990", },
749 { .soc_id = "r8a77995", },
750 { /* sentinel */ }
751};
752
753static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
754 { .soc_id = "r8a774b1", },
755 { .soc_id = "r8a774c0", },
756 { .soc_id = "r8a774e1", },
757 { .soc_id = "r8a7795", .revision = "ES3.*" },
758 { .soc_id = "r8a77961", },
759 { .soc_id = "r8a77965", },
760 { .soc_id = "r8a77990", },
761 { .soc_id = "r8a77995", },
762 { /* sentinel */ }
763};
764
765static const char * const rcar_gen3_slave_whitelist[] = {
766};
767
768static bool ipmmu_slave_whitelist(struct device *dev)
769{
770 unsigned int i;
771
772 /*
773 * For R-Car Gen3 use a white list to opt-in slave devices.
774 * For Other SoCs, this returns true anyway.
775 */
776 if (!soc_device_match(soc_rcar_gen3))
777 return true;
778
779 /* Check whether this R-Car Gen3 can use the IPMMU correctly or not */
780 if (!soc_device_match(soc_rcar_gen3_whitelist))
781 return false;
782
783 /* Check whether this slave device can work with the IPMMU */
784 for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) {
785 if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i]))
786 return true;
787 }
788
789 /* Otherwise, do not allow use of IPMMU */
790 return false;
791}
792
793static int ipmmu_of_xlate(struct device *dev,
794 struct of_phandle_args *spec)
795{
796 if (!ipmmu_slave_whitelist(dev))
797 return -ENODEV;
798
799 iommu_fwspec_add_ids(dev, spec->args, 1);
800
801 /* Initialize once - xlate() will call multiple times */
802 if (to_ipmmu(dev))
803 return 0;
804
805 return ipmmu_init_platform_device(dev, spec);
806}
807
808static int ipmmu_init_arm_mapping(struct device *dev)
809{
810 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
811 int ret;
812
813 /*
814 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
815 * VAs. This will allocate a corresponding IOMMU domain.
816 *
817 * TODO:
818 * - Create one mapping per context (TLB).
819 * - Make the mapping size configurable ? We currently use a 2GB mapping
820 * at a 1GB offset to ensure that NULL VAs will fault.
821 */
822 if (!mmu->mapping) {
823 struct dma_iommu_mapping *mapping;
824
825 mapping = arm_iommu_create_mapping(&platform_bus_type,
826 SZ_1G, SZ_2G);
827 if (IS_ERR(mapping)) {
828 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
829 ret = PTR_ERR(mapping);
830 goto error;
831 }
832
833 mmu->mapping = mapping;
834 }
835
836 /* Attach the ARM VA mapping to the device. */
837 ret = arm_iommu_attach_device(dev, mmu->mapping);
838 if (ret < 0) {
839 dev_err(dev, "Failed to attach device to VA mapping\n");
840 goto error;
841 }
842
843 return 0;
844
845error:
846 if (mmu->mapping)
847 arm_iommu_release_mapping(mmu->mapping);
848
849 return ret;
850}
851
852static struct iommu_device *ipmmu_probe_device(struct device *dev)
853{
854 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
855
856 /*
857 * Only let through devices that have been verified in xlate()
858 */
859 if (!mmu)
860 return ERR_PTR(-ENODEV);
861
862 return &mmu->iommu;
863}
864
865static void ipmmu_probe_finalize(struct device *dev)
866{
867 int ret = 0;
868
869 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
870 ret = ipmmu_init_arm_mapping(dev);
871
872 if (ret)
873 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
874}
875
876static void ipmmu_release_device(struct device *dev)
877{
878 arm_iommu_detach_device(dev);
879}
880
881static struct iommu_group *ipmmu_find_group(struct device *dev)
882{
883 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
884 struct iommu_group *group;
885
886 if (mmu->group)
887 return iommu_group_ref_get(mmu->group);
888
889 group = iommu_group_alloc();
890 if (!IS_ERR(group))
891 mmu->group = group;
892
893 return group;
894}
895
896static const struct iommu_ops ipmmu_ops = {
897 .domain_alloc = ipmmu_domain_alloc,
898 .domain_free = ipmmu_domain_free,
899 .attach_dev = ipmmu_attach_device,
900 .detach_dev = ipmmu_detach_device,
901 .map = ipmmu_map,
902 .unmap = ipmmu_unmap,
903 .flush_iotlb_all = ipmmu_flush_iotlb_all,
904 .iotlb_sync = ipmmu_iotlb_sync,
905 .iova_to_phys = ipmmu_iova_to_phys,
906 .probe_device = ipmmu_probe_device,
907 .release_device = ipmmu_release_device,
908 .probe_finalize = ipmmu_probe_finalize,
909 .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
910 ? generic_device_group : ipmmu_find_group,
911 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
912 .of_xlate = ipmmu_of_xlate,
913};
914
915/* -----------------------------------------------------------------------------
916 * Probe/remove and init
917 */
918
919static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
920{
921 unsigned int i;
922
923 /* Disable all contexts. */
924 for (i = 0; i < mmu->num_ctx; ++i)
925 ipmmu_ctx_write(mmu, i, IMCTR, 0);
926}
927
928static const struct ipmmu_features ipmmu_features_default = {
929 .use_ns_alias_offset = true,
930 .has_cache_leaf_nodes = false,
931 .number_of_contexts = 1, /* software only tested with one context */
932 .num_utlbs = 32,
933 .setup_imbuscr = true,
934 .twobit_imttbcr_sl0 = false,
935 .reserved_context = false,
936 .cache_snoop = true,
937 .ctx_offset_base = 0,
938 .ctx_offset_stride = 0x40,
939 .utlb_offset_base = 0,
940};
941
942static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
943 .use_ns_alias_offset = false,
944 .has_cache_leaf_nodes = true,
945 .number_of_contexts = 8,
946 .num_utlbs = 48,
947 .setup_imbuscr = false,
948 .twobit_imttbcr_sl0 = true,
949 .reserved_context = true,
950 .cache_snoop = false,
951 .ctx_offset_base = 0,
952 .ctx_offset_stride = 0x40,
953 .utlb_offset_base = 0,
954};
955
956static const struct of_device_id ipmmu_of_ids[] = {
957 {
958 .compatible = "renesas,ipmmu-vmsa",
959 .data = &ipmmu_features_default,
960 }, {
961 .compatible = "renesas,ipmmu-r8a774a1",
962 .data = &ipmmu_features_rcar_gen3,
963 }, {
964 .compatible = "renesas,ipmmu-r8a774b1",
965 .data = &ipmmu_features_rcar_gen3,
966 }, {
967 .compatible = "renesas,ipmmu-r8a774c0",
968 .data = &ipmmu_features_rcar_gen3,
969 }, {
970 .compatible = "renesas,ipmmu-r8a774e1",
971 .data = &ipmmu_features_rcar_gen3,
972 }, {
973 .compatible = "renesas,ipmmu-r8a7795",
974 .data = &ipmmu_features_rcar_gen3,
975 }, {
976 .compatible = "renesas,ipmmu-r8a7796",
977 .data = &ipmmu_features_rcar_gen3,
978 }, {
979 .compatible = "renesas,ipmmu-r8a77961",
980 .data = &ipmmu_features_rcar_gen3,
981 }, {
982 .compatible = "renesas,ipmmu-r8a77965",
983 .data = &ipmmu_features_rcar_gen3,
984 }, {
985 .compatible = "renesas,ipmmu-r8a77970",
986 .data = &ipmmu_features_rcar_gen3,
987 }, {
988 .compatible = "renesas,ipmmu-r8a77990",
989 .data = &ipmmu_features_rcar_gen3,
990 }, {
991 .compatible = "renesas,ipmmu-r8a77995",
992 .data = &ipmmu_features_rcar_gen3,
993 }, {
994 /* Terminator */
995 },
996};
997
998static int ipmmu_probe(struct platform_device *pdev)
999{
1000 struct ipmmu_vmsa_device *mmu;
1001 struct resource *res;
1002 int irq;
1003 int ret;
1004
1005 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1006 if (!mmu) {
1007 dev_err(&pdev->dev, "cannot allocate device data\n");
1008 return -ENOMEM;
1009 }
1010
1011 mmu->dev = &pdev->dev;
1012 spin_lock_init(&mmu->lock);
1013 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1014 mmu->features = of_device_get_match_data(&pdev->dev);
1015 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1016 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1017
1018 /* Map I/O memory and request IRQ. */
1019 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1020 mmu->base = devm_ioremap_resource(&pdev->dev, res);
1021 if (IS_ERR(mmu->base))
1022 return PTR_ERR(mmu->base);
1023
1024 /*
1025 * The IPMMU has two register banks, for secure and non-secure modes.
1026 * The bank mapped at the beginning of the IPMMU address space
1027 * corresponds to the running mode of the CPU. When running in secure
1028 * mode the non-secure register bank is also available at an offset.
1029 *
1030 * Secure mode operation isn't clearly documented and is thus currently
1031 * not implemented in the driver. Furthermore, preliminary tests of
1032 * non-secure operation with the main register bank were not successful.
1033 * Offset the registers base unconditionally to point to the non-secure
1034 * alias space for now.
1035 */
1036 if (mmu->features->use_ns_alias_offset)
1037 mmu->base += IM_NS_ALIAS_OFFSET;
1038
1039 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1040
1041 /*
1042 * Determine if this IPMMU instance is a root device by checking for
1043 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1044 */
1045 if (!mmu->features->has_cache_leaf_nodes ||
1046 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1047 mmu->root = mmu;
1048 else
1049 mmu->root = ipmmu_find_root();
1050
1051 /*
1052 * Wait until the root device has been registered for sure.
1053 */
1054 if (!mmu->root)
1055 return -EPROBE_DEFER;
1056
1057 /* Root devices have mandatory IRQs */
1058 if (ipmmu_is_root(mmu)) {
1059 irq = platform_get_irq(pdev, 0);
1060 if (irq < 0)
1061 return irq;
1062
1063 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1064 dev_name(&pdev->dev), mmu);
1065 if (ret < 0) {
1066 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1067 return ret;
1068 }
1069
1070 ipmmu_device_reset(mmu);
1071
1072 if (mmu->features->reserved_context) {
1073 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1074 set_bit(0, mmu->ctx);
1075 }
1076 }
1077
1078 /*
1079 * Register the IPMMU to the IOMMU subsystem in the following cases:
1080 * - R-Car Gen2 IPMMU (all devices registered)
1081 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1082 */
1083 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1084 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1085 dev_name(&pdev->dev));
1086 if (ret)
1087 return ret;
1088
1089 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
1090 iommu_device_set_fwnode(&mmu->iommu,
1091 &pdev->dev.of_node->fwnode);
1092
1093 ret = iommu_device_register(&mmu->iommu);
1094 if (ret)
1095 return ret;
1096
1097#if defined(CONFIG_IOMMU_DMA)
1098 if (!iommu_present(&platform_bus_type))
1099 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1100#endif
1101 }
1102
1103 /*
1104 * We can't create the ARM mapping here as it requires the bus to have
1105 * an IOMMU, which only happens when bus_set_iommu() is called in
1106 * ipmmu_init() after the probe function returns.
1107 */
1108
1109 platform_set_drvdata(pdev, mmu);
1110
1111 return 0;
1112}
1113
1114static int ipmmu_remove(struct platform_device *pdev)
1115{
1116 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1117
1118 iommu_device_sysfs_remove(&mmu->iommu);
1119 iommu_device_unregister(&mmu->iommu);
1120
1121 arm_iommu_release_mapping(mmu->mapping);
1122
1123 ipmmu_device_reset(mmu);
1124
1125 return 0;
1126}
1127
1128#ifdef CONFIG_PM_SLEEP
1129static int ipmmu_resume_noirq(struct device *dev)
1130{
1131 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1132 unsigned int i;
1133
1134 /* Reset root MMU and restore contexts */
1135 if (ipmmu_is_root(mmu)) {
1136 ipmmu_device_reset(mmu);
1137
1138 for (i = 0; i < mmu->num_ctx; i++) {
1139 if (!mmu->domains[i])
1140 continue;
1141
1142 ipmmu_domain_setup_context(mmu->domains[i]);
1143 }
1144 }
1145
1146 /* Re-enable active micro-TLBs */
1147 for (i = 0; i < mmu->features->num_utlbs; i++) {
1148 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1149 continue;
1150
1151 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1152 }
1153
1154 return 0;
1155}
1156
1157static const struct dev_pm_ops ipmmu_pm = {
1158 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1159};
1160#define DEV_PM_OPS &ipmmu_pm
1161#else
1162#define DEV_PM_OPS NULL
1163#endif /* CONFIG_PM_SLEEP */
1164
1165static struct platform_driver ipmmu_driver = {
1166 .driver = {
1167 .name = "ipmmu-vmsa",
1168 .of_match_table = of_match_ptr(ipmmu_of_ids),
1169 .pm = DEV_PM_OPS,
1170 },
1171 .probe = ipmmu_probe,
1172 .remove = ipmmu_remove,
1173};
1174
1175static int __init ipmmu_init(void)
1176{
1177 struct device_node *np;
1178 static bool setup_done;
1179 int ret;
1180
1181 if (setup_done)
1182 return 0;
1183
1184 np = of_find_matching_node(NULL, ipmmu_of_ids);
1185 if (!np)
1186 return 0;
1187
1188 of_node_put(np);
1189
1190 ret = platform_driver_register(&ipmmu_driver);
1191 if (ret < 0)
1192 return ret;
1193
1194#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1195 if (!iommu_present(&platform_bus_type))
1196 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1197#endif
1198
1199 setup_done = true;
1200 return 0;
1201}
1202subsys_initcall(ipmmu_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IOMMU API for Renesas VMSA-compatible IPMMU
4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
5 *
6 * Copyright (C) 2014-2020 Renesas Electronics Corporation
7 */
8
9#include <linux/bitmap.h>
10#include <linux/delay.h>
11#include <linux/dma-mapping.h>
12#include <linux/err.h>
13#include <linux/export.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/iopoll.h>
18#include <linux/io-pgtable.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/pci.h>
23#include <linux/platform_device.h>
24#include <linux/sizes.h>
25#include <linux/slab.h>
26#include <linux/sys_soc.h>
27
28#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
29#include <asm/dma-iommu.h>
30#else
31#define arm_iommu_create_mapping(...) NULL
32#define arm_iommu_attach_device(...) -ENODEV
33#define arm_iommu_release_mapping(...) do {} while (0)
34#endif
35
36#define IPMMU_CTX_MAX 16U
37#define IPMMU_CTX_INVALID -1
38
39#define IPMMU_UTLB_MAX 64U
40
41struct ipmmu_features {
42 bool use_ns_alias_offset;
43 bool has_cache_leaf_nodes;
44 unsigned int number_of_contexts;
45 unsigned int num_utlbs;
46 bool setup_imbuscr;
47 bool twobit_imttbcr_sl0;
48 bool reserved_context;
49 bool cache_snoop;
50 unsigned int ctx_offset_base;
51 unsigned int ctx_offset_stride;
52 unsigned int utlb_offset_base;
53};
54
55struct ipmmu_vmsa_device {
56 struct device *dev;
57 void __iomem *base;
58 struct iommu_device iommu;
59 struct ipmmu_vmsa_device *root;
60 const struct ipmmu_features *features;
61 unsigned int num_ctx;
62 spinlock_t lock; /* Protects ctx and domains[] */
63 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
64 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
65 s8 utlb_ctx[IPMMU_UTLB_MAX];
66
67 struct dma_iommu_mapping *mapping;
68};
69
70struct ipmmu_vmsa_domain {
71 struct ipmmu_vmsa_device *mmu;
72 struct iommu_domain io_domain;
73
74 struct io_pgtable_cfg cfg;
75 struct io_pgtable_ops *iop;
76
77 unsigned int context_id;
78 struct mutex mutex; /* Protects mappings */
79};
80
81static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
82{
83 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
84}
85
86static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
87{
88 return dev_iommu_priv_get(dev);
89}
90
91#define TLB_LOOP_TIMEOUT 100 /* 100us */
92
93/* -----------------------------------------------------------------------------
94 * Registers Definition
95 */
96
97#define IM_NS_ALIAS_OFFSET 0x800
98
99/* MMU "context" registers */
100#define IMCTR 0x0000 /* R-Car Gen2/3 */
101#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
102#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
103#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
104
105#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
106#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
107#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
108#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
109#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
110#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
111#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
112
113#define IMBUSCR 0x000c /* R-Car Gen2 only */
114#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
115#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
116
117#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
118#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
119
120#define IMSTR 0x0020 /* R-Car Gen2/3 */
121#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
122#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
123#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
124#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
125
126#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
127
128#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
129#define IMEUAR 0x0034 /* R-Car Gen3 only */
130
131/* uTLB registers */
132#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
133#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
134#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
135#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
136#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
137#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
138
139#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
140#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
141#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
142
143/* -----------------------------------------------------------------------------
144 * Root device handling
145 */
146
147static struct platform_driver ipmmu_driver;
148
149static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
150{
151 return mmu->root == mmu;
152}
153
154static int __ipmmu_check_device(struct device *dev, void *data)
155{
156 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
157 struct ipmmu_vmsa_device **rootp = data;
158
159 if (ipmmu_is_root(mmu))
160 *rootp = mmu;
161
162 return 0;
163}
164
165static struct ipmmu_vmsa_device *ipmmu_find_root(void)
166{
167 struct ipmmu_vmsa_device *root = NULL;
168
169 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
170 __ipmmu_check_device) == 0 ? root : NULL;
171}
172
173/* -----------------------------------------------------------------------------
174 * Read/Write Access
175 */
176
177static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
178{
179 return ioread32(mmu->base + offset);
180}
181
182static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
183 u32 data)
184{
185 iowrite32(data, mmu->base + offset);
186}
187
188static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
189 unsigned int context_id, unsigned int reg)
190{
191 unsigned int base = mmu->features->ctx_offset_base;
192
193 if (context_id > 7)
194 base += 0x800 - 8 * 0x40;
195
196 return base + context_id * mmu->features->ctx_offset_stride + reg;
197}
198
199static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
200 unsigned int context_id, unsigned int reg)
201{
202 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
203}
204
205static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
206 unsigned int context_id, unsigned int reg, u32 data)
207{
208 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
209}
210
211static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
212 unsigned int reg)
213{
214 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
215}
216
217static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
218 unsigned int reg, u32 data)
219{
220 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
221}
222
223static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
224 unsigned int reg, u32 data)
225{
226 if (domain->mmu != domain->mmu->root)
227 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
228
229 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
230}
231
232static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
233{
234 return mmu->features->utlb_offset_base + reg;
235}
236
237static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
238 unsigned int utlb, u32 data)
239{
240 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
241}
242
243static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
244 unsigned int utlb, u32 data)
245{
246 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
247}
248
249/* -----------------------------------------------------------------------------
250 * TLB and microTLB Management
251 */
252
253/* Wait for any pending TLB invalidations to complete */
254static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
255{
256 u32 val;
257
258 if (read_poll_timeout_atomic(ipmmu_ctx_read_root, val,
259 !(val & IMCTR_FLUSH), 1, TLB_LOOP_TIMEOUT,
260 false, domain, IMCTR))
261 dev_err_ratelimited(domain->mmu->dev,
262 "TLB sync timed out -- MMU may be deadlocked\n");
263}
264
265static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
266{
267 u32 reg;
268
269 reg = ipmmu_ctx_read_root(domain, IMCTR);
270 reg |= IMCTR_FLUSH;
271 ipmmu_ctx_write_all(domain, IMCTR, reg);
272
273 ipmmu_tlb_sync(domain);
274}
275
276/*
277 * Enable MMU translation for the microTLB.
278 */
279static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
280 unsigned int utlb)
281{
282 struct ipmmu_vmsa_device *mmu = domain->mmu;
283
284 /*
285 * TODO: Reference-count the microTLB as several bus masters can be
286 * connected to the same microTLB.
287 */
288
289 /* TODO: What should we set the ASID to ? */
290 ipmmu_imuasid_write(mmu, utlb, 0);
291 /* TODO: Do we need to flush the microTLB ? */
292 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
293 IMUCTR_FLUSH | IMUCTR_MMUEN);
294 mmu->utlb_ctx[utlb] = domain->context_id;
295}
296
297/*
298 * Disable MMU translation for the microTLB.
299 */
300static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
301 unsigned int utlb)
302{
303 struct ipmmu_vmsa_device *mmu = domain->mmu;
304
305 ipmmu_imuctr_write(mmu, utlb, 0);
306 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
307}
308
309static void ipmmu_tlb_flush_all(void *cookie)
310{
311 struct ipmmu_vmsa_domain *domain = cookie;
312
313 ipmmu_tlb_invalidate(domain);
314}
315
316static void ipmmu_tlb_flush(unsigned long iova, size_t size,
317 size_t granule, void *cookie)
318{
319 ipmmu_tlb_flush_all(cookie);
320}
321
322static const struct iommu_flush_ops ipmmu_flush_ops = {
323 .tlb_flush_all = ipmmu_tlb_flush_all,
324 .tlb_flush_walk = ipmmu_tlb_flush,
325};
326
327/* -----------------------------------------------------------------------------
328 * Domain/Context Management
329 */
330
331static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
332 struct ipmmu_vmsa_domain *domain)
333{
334 unsigned long flags;
335 int ret;
336
337 spin_lock_irqsave(&mmu->lock, flags);
338
339 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
340 if (ret != mmu->num_ctx) {
341 mmu->domains[ret] = domain;
342 set_bit(ret, mmu->ctx);
343 } else
344 ret = -EBUSY;
345
346 spin_unlock_irqrestore(&mmu->lock, flags);
347
348 return ret;
349}
350
351static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
352 unsigned int context_id)
353{
354 unsigned long flags;
355
356 spin_lock_irqsave(&mmu->lock, flags);
357
358 clear_bit(context_id, mmu->ctx);
359 mmu->domains[context_id] = NULL;
360
361 spin_unlock_irqrestore(&mmu->lock, flags);
362}
363
364static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
365{
366 u64 ttbr;
367 u32 tmp;
368
369 /* TTBR0 */
370 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
371 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
372 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
373
374 /*
375 * TTBCR
376 * We use long descriptors and allocate the whole 32-bit VA space to
377 * TTBR0.
378 */
379 if (domain->mmu->features->twobit_imttbcr_sl0)
380 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
381 else
382 tmp = IMTTBCR_SL0_LVL_1;
383
384 if (domain->mmu->features->cache_snoop)
385 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
386 IMTTBCR_IRGN0_WB_WA;
387
388 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
389
390 /* MAIR0 */
391 ipmmu_ctx_write_root(domain, IMMAIR0,
392 domain->cfg.arm_lpae_s1_cfg.mair);
393
394 /* IMBUSCR */
395 if (domain->mmu->features->setup_imbuscr)
396 ipmmu_ctx_write_root(domain, IMBUSCR,
397 ipmmu_ctx_read_root(domain, IMBUSCR) &
398 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
399
400 /*
401 * IMSTR
402 * Clear all interrupt flags.
403 */
404 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
405
406 /*
407 * IMCTR
408 * Enable the MMU and interrupt generation. The long-descriptor
409 * translation table format doesn't use TEX remapping. Don't enable AF
410 * software management as we have no use for it. Flush the TLB as
411 * required when modifying the context registers.
412 */
413 ipmmu_ctx_write_all(domain, IMCTR,
414 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
415}
416
417static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
418{
419 int ret;
420
421 /*
422 * Allocate the page table operations.
423 *
424 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
425 * access, Long-descriptor format" that the NStable bit being set in a
426 * table descriptor will result in the NStable and NS bits of all child
427 * entries being ignored and considered as being set. The IPMMU seems
428 * not to comply with this, as it generates a secure access page fault
429 * if any of the NStable and NS bits isn't set when running in
430 * non-secure mode.
431 */
432 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
433 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
434 domain->cfg.ias = 32;
435 domain->cfg.oas = 40;
436 domain->cfg.tlb = &ipmmu_flush_ops;
437 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
438 domain->io_domain.geometry.force_aperture = true;
439 /*
440 * TODO: Add support for coherent walk through CCI with DVM and remove
441 * cache handling. For now, delegate it to the io-pgtable code.
442 */
443 domain->cfg.coherent_walk = false;
444 domain->cfg.iommu_dev = domain->mmu->root->dev;
445
446 /*
447 * Find an unused context.
448 */
449 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
450 if (ret < 0)
451 return ret;
452
453 domain->context_id = ret;
454
455 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
456 domain);
457 if (!domain->iop) {
458 ipmmu_domain_free_context(domain->mmu->root,
459 domain->context_id);
460 return -EINVAL;
461 }
462
463 ipmmu_domain_setup_context(domain);
464 return 0;
465}
466
467static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
468{
469 if (!domain->mmu)
470 return;
471
472 /*
473 * Disable the context. Flush the TLB as required when modifying the
474 * context registers.
475 *
476 * TODO: Is TLB flush really needed ?
477 */
478 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
479 ipmmu_tlb_sync(domain);
480 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
481}
482
483/* -----------------------------------------------------------------------------
484 * Fault Handling
485 */
486
487static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
488{
489 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
490 struct ipmmu_vmsa_device *mmu = domain->mmu;
491 unsigned long iova;
492 u32 status;
493
494 status = ipmmu_ctx_read_root(domain, IMSTR);
495 if (!(status & err_mask))
496 return IRQ_NONE;
497
498 iova = ipmmu_ctx_read_root(domain, IMELAR);
499 if (IS_ENABLED(CONFIG_64BIT))
500 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
501
502 /*
503 * Clear the error status flags. Unlike traditional interrupt flag
504 * registers that must be cleared by writing 1, this status register
505 * seems to require 0. The error address register must be read before,
506 * otherwise its value will be 0.
507 */
508 ipmmu_ctx_write_root(domain, IMSTR, 0);
509
510 /* Log fatal errors. */
511 if (status & IMSTR_MHIT)
512 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
513 iova);
514 if (status & IMSTR_ABORT)
515 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
516 iova);
517
518 if (!(status & (IMSTR_PF | IMSTR_TF)))
519 return IRQ_NONE;
520
521 /*
522 * Try to handle page faults and translation faults.
523 *
524 * TODO: We need to look up the faulty device based on the I/O VA. Use
525 * the IOMMU device for now.
526 */
527 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
528 return IRQ_HANDLED;
529
530 dev_err_ratelimited(mmu->dev,
531 "Unhandled fault: status 0x%08x iova 0x%lx\n",
532 status, iova);
533
534 return IRQ_HANDLED;
535}
536
537static irqreturn_t ipmmu_irq(int irq, void *dev)
538{
539 struct ipmmu_vmsa_device *mmu = dev;
540 irqreturn_t status = IRQ_NONE;
541 unsigned int i;
542 unsigned long flags;
543
544 spin_lock_irqsave(&mmu->lock, flags);
545
546 /*
547 * Check interrupts for all active contexts.
548 */
549 for (i = 0; i < mmu->num_ctx; i++) {
550 if (!mmu->domains[i])
551 continue;
552 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
553 status = IRQ_HANDLED;
554 }
555
556 spin_unlock_irqrestore(&mmu->lock, flags);
557
558 return status;
559}
560
561/* -----------------------------------------------------------------------------
562 * IOMMU Operations
563 */
564
565static struct iommu_domain *ipmmu_domain_alloc_paging(struct device *dev)
566{
567 struct ipmmu_vmsa_domain *domain;
568
569 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
570 if (!domain)
571 return NULL;
572
573 mutex_init(&domain->mutex);
574
575 return &domain->io_domain;
576}
577
578static void ipmmu_domain_free(struct iommu_domain *io_domain)
579{
580 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
581
582 /*
583 * Free the domain resources. We assume that all devices have already
584 * been detached.
585 */
586 ipmmu_domain_destroy_context(domain);
587 free_io_pgtable_ops(domain->iop);
588 kfree(domain);
589}
590
591static int ipmmu_attach_device(struct iommu_domain *io_domain,
592 struct device *dev)
593{
594 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
595 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
596 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
597 unsigned int i;
598 int ret = 0;
599
600 if (!mmu) {
601 dev_err(dev, "Cannot attach to IPMMU\n");
602 return -ENXIO;
603 }
604
605 mutex_lock(&domain->mutex);
606
607 if (!domain->mmu) {
608 /* The domain hasn't been used yet, initialize it. */
609 domain->mmu = mmu;
610 ret = ipmmu_domain_init_context(domain);
611 if (ret < 0) {
612 dev_err(dev, "Unable to initialize IPMMU context\n");
613 domain->mmu = NULL;
614 } else {
615 dev_info(dev, "Using IPMMU context %u\n",
616 domain->context_id);
617 }
618 } else if (domain->mmu != mmu) {
619 /*
620 * Something is wrong, we can't attach two devices using
621 * different IOMMUs to the same domain.
622 */
623 ret = -EINVAL;
624 } else
625 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
626
627 mutex_unlock(&domain->mutex);
628
629 if (ret < 0)
630 return ret;
631
632 for (i = 0; i < fwspec->num_ids; ++i)
633 ipmmu_utlb_enable(domain, fwspec->ids[i]);
634
635 return 0;
636}
637
638static int ipmmu_iommu_identity_attach(struct iommu_domain *identity_domain,
639 struct device *dev)
640{
641 struct iommu_domain *io_domain = iommu_get_domain_for_dev(dev);
642 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
643 struct ipmmu_vmsa_domain *domain;
644 unsigned int i;
645
646 if (io_domain == identity_domain || !io_domain)
647 return 0;
648
649 domain = to_vmsa_domain(io_domain);
650 for (i = 0; i < fwspec->num_ids; ++i)
651 ipmmu_utlb_disable(domain, fwspec->ids[i]);
652
653 /*
654 * TODO: Optimize by disabling the context when no device is attached.
655 */
656 return 0;
657}
658
659static struct iommu_domain_ops ipmmu_iommu_identity_ops = {
660 .attach_dev = ipmmu_iommu_identity_attach,
661};
662
663static struct iommu_domain ipmmu_iommu_identity_domain = {
664 .type = IOMMU_DOMAIN_IDENTITY,
665 .ops = &ipmmu_iommu_identity_ops,
666};
667
668static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
669 phys_addr_t paddr, size_t pgsize, size_t pgcount,
670 int prot, gfp_t gfp, size_t *mapped)
671{
672 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
673
674 return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount,
675 prot, gfp, mapped);
676}
677
678static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
679 size_t pgsize, size_t pgcount,
680 struct iommu_iotlb_gather *gather)
681{
682 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
683
684 return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather);
685}
686
687static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
688{
689 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
690
691 if (domain->mmu)
692 ipmmu_tlb_flush_all(domain);
693}
694
695static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
696 struct iommu_iotlb_gather *gather)
697{
698 ipmmu_flush_iotlb_all(io_domain);
699}
700
701static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
702 dma_addr_t iova)
703{
704 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
705
706 /* TODO: Is locking needed ? */
707
708 return domain->iop->iova_to_phys(domain->iop, iova);
709}
710
711static int ipmmu_init_platform_device(struct device *dev,
712 const struct of_phandle_args *args)
713{
714 struct platform_device *ipmmu_pdev;
715
716 ipmmu_pdev = of_find_device_by_node(args->np);
717 if (!ipmmu_pdev)
718 return -ENODEV;
719
720 dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
721
722 return 0;
723}
724
725static const struct soc_device_attribute soc_needs_opt_in[] = {
726 { .family = "R-Car Gen3", },
727 { .family = "R-Car Gen4", },
728 { .family = "RZ/G2", },
729 { /* sentinel */ }
730};
731
732static const struct soc_device_attribute soc_denylist[] = {
733 { .soc_id = "r8a774a1", },
734 { .soc_id = "r8a7795", .revision = "ES2.*" },
735 { .soc_id = "r8a7796", },
736 { /* sentinel */ }
737};
738
739static const char * const devices_allowlist[] = {
740 "ee100000.mmc",
741 "ee120000.mmc",
742 "ee140000.mmc",
743 "ee160000.mmc"
744};
745
746static bool ipmmu_device_is_allowed(struct device *dev)
747{
748 unsigned int i;
749
750 /*
751 * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices.
752 * For Other SoCs, this returns true anyway.
753 */
754 if (!soc_device_match(soc_needs_opt_in))
755 return true;
756
757 /* Check whether this SoC can use the IPMMU correctly or not */
758 if (soc_device_match(soc_denylist))
759 return false;
760
761 /* Check whether this device is a PCI device */
762 if (dev_is_pci(dev))
763 return true;
764
765 /* Check whether this device can work with the IPMMU */
766 for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
767 if (!strcmp(dev_name(dev), devices_allowlist[i]))
768 return true;
769 }
770
771 /* Otherwise, do not allow use of IPMMU */
772 return false;
773}
774
775static int ipmmu_of_xlate(struct device *dev,
776 const struct of_phandle_args *spec)
777{
778 if (!ipmmu_device_is_allowed(dev))
779 return -ENODEV;
780
781 iommu_fwspec_add_ids(dev, spec->args, 1);
782
783 /* Initialize once - xlate() will call multiple times */
784 if (to_ipmmu(dev))
785 return 0;
786
787 return ipmmu_init_platform_device(dev, spec);
788}
789
790static int ipmmu_init_arm_mapping(struct device *dev)
791{
792 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
793 int ret;
794
795 /*
796 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
797 * VAs. This will allocate a corresponding IOMMU domain.
798 *
799 * TODO:
800 * - Create one mapping per context (TLB).
801 * - Make the mapping size configurable ? We currently use a 2GB mapping
802 * at a 1GB offset to ensure that NULL VAs will fault.
803 */
804 if (!mmu->mapping) {
805 struct dma_iommu_mapping *mapping;
806
807 mapping = arm_iommu_create_mapping(&platform_bus_type,
808 SZ_1G, SZ_2G);
809 if (IS_ERR(mapping)) {
810 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
811 ret = PTR_ERR(mapping);
812 goto error;
813 }
814
815 mmu->mapping = mapping;
816 }
817
818 /* Attach the ARM VA mapping to the device. */
819 ret = arm_iommu_attach_device(dev, mmu->mapping);
820 if (ret < 0) {
821 dev_err(dev, "Failed to attach device to VA mapping\n");
822 goto error;
823 }
824
825 return 0;
826
827error:
828 if (mmu->mapping)
829 arm_iommu_release_mapping(mmu->mapping);
830
831 return ret;
832}
833
834static struct iommu_device *ipmmu_probe_device(struct device *dev)
835{
836 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
837
838 /*
839 * Only let through devices that have been verified in xlate()
840 */
841 if (!mmu)
842 return ERR_PTR(-ENODEV);
843
844 return &mmu->iommu;
845}
846
847static void ipmmu_probe_finalize(struct device *dev)
848{
849 int ret = 0;
850
851 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
852 ret = ipmmu_init_arm_mapping(dev);
853
854 if (ret)
855 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
856}
857
858static void ipmmu_release_device(struct device *dev)
859{
860 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
861 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
862 unsigned int i;
863
864 for (i = 0; i < fwspec->num_ids; ++i) {
865 unsigned int utlb = fwspec->ids[i];
866
867 ipmmu_imuctr_write(mmu, utlb, 0);
868 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
869 }
870
871 arm_iommu_release_mapping(mmu->mapping);
872}
873
874static const struct iommu_ops ipmmu_ops = {
875 .identity_domain = &ipmmu_iommu_identity_domain,
876 .domain_alloc_paging = ipmmu_domain_alloc_paging,
877 .probe_device = ipmmu_probe_device,
878 .release_device = ipmmu_release_device,
879 .probe_finalize = ipmmu_probe_finalize,
880 /*
881 * FIXME: The device grouping is a fixed property of the hardware's
882 * ability to isolate and control DMA, it should not depend on kconfig.
883 */
884 .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
885 ? generic_device_group : generic_single_device_group,
886 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
887 .of_xlate = ipmmu_of_xlate,
888 .default_domain_ops = &(const struct iommu_domain_ops) {
889 .attach_dev = ipmmu_attach_device,
890 .map_pages = ipmmu_map,
891 .unmap_pages = ipmmu_unmap,
892 .flush_iotlb_all = ipmmu_flush_iotlb_all,
893 .iotlb_sync = ipmmu_iotlb_sync,
894 .iova_to_phys = ipmmu_iova_to_phys,
895 .free = ipmmu_domain_free,
896 }
897};
898
899/* -----------------------------------------------------------------------------
900 * Probe/remove and init
901 */
902
903static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
904{
905 unsigned int i;
906
907 /* Disable all contexts. */
908 for (i = 0; i < mmu->num_ctx; ++i)
909 ipmmu_ctx_write(mmu, i, IMCTR, 0);
910}
911
912static const struct ipmmu_features ipmmu_features_default = {
913 .use_ns_alias_offset = true,
914 .has_cache_leaf_nodes = false,
915 .number_of_contexts = 1, /* software only tested with one context */
916 .num_utlbs = 32,
917 .setup_imbuscr = true,
918 .twobit_imttbcr_sl0 = false,
919 .reserved_context = false,
920 .cache_snoop = true,
921 .ctx_offset_base = 0,
922 .ctx_offset_stride = 0x40,
923 .utlb_offset_base = 0,
924};
925
926static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
927 .use_ns_alias_offset = false,
928 .has_cache_leaf_nodes = true,
929 .number_of_contexts = 8,
930 .num_utlbs = 48,
931 .setup_imbuscr = false,
932 .twobit_imttbcr_sl0 = true,
933 .reserved_context = true,
934 .cache_snoop = false,
935 .ctx_offset_base = 0,
936 .ctx_offset_stride = 0x40,
937 .utlb_offset_base = 0,
938};
939
940static const struct ipmmu_features ipmmu_features_rcar_gen4 = {
941 .use_ns_alias_offset = false,
942 .has_cache_leaf_nodes = true,
943 .number_of_contexts = 16,
944 .num_utlbs = 64,
945 .setup_imbuscr = false,
946 .twobit_imttbcr_sl0 = true,
947 .reserved_context = true,
948 .cache_snoop = false,
949 .ctx_offset_base = 0x10000,
950 .ctx_offset_stride = 0x1040,
951 .utlb_offset_base = 0x3000,
952};
953
954static const struct of_device_id ipmmu_of_ids[] = {
955 {
956 .compatible = "renesas,ipmmu-vmsa",
957 .data = &ipmmu_features_default,
958 }, {
959 .compatible = "renesas,ipmmu-r8a774a1",
960 .data = &ipmmu_features_rcar_gen3,
961 }, {
962 .compatible = "renesas,ipmmu-r8a774b1",
963 .data = &ipmmu_features_rcar_gen3,
964 }, {
965 .compatible = "renesas,ipmmu-r8a774c0",
966 .data = &ipmmu_features_rcar_gen3,
967 }, {
968 .compatible = "renesas,ipmmu-r8a774e1",
969 .data = &ipmmu_features_rcar_gen3,
970 }, {
971 .compatible = "renesas,ipmmu-r8a7795",
972 .data = &ipmmu_features_rcar_gen3,
973 }, {
974 .compatible = "renesas,ipmmu-r8a7796",
975 .data = &ipmmu_features_rcar_gen3,
976 }, {
977 .compatible = "renesas,ipmmu-r8a77961",
978 .data = &ipmmu_features_rcar_gen3,
979 }, {
980 .compatible = "renesas,ipmmu-r8a77965",
981 .data = &ipmmu_features_rcar_gen3,
982 }, {
983 .compatible = "renesas,ipmmu-r8a77970",
984 .data = &ipmmu_features_rcar_gen3,
985 }, {
986 .compatible = "renesas,ipmmu-r8a77980",
987 .data = &ipmmu_features_rcar_gen3,
988 }, {
989 .compatible = "renesas,ipmmu-r8a77990",
990 .data = &ipmmu_features_rcar_gen3,
991 }, {
992 .compatible = "renesas,ipmmu-r8a77995",
993 .data = &ipmmu_features_rcar_gen3,
994 }, {
995 .compatible = "renesas,ipmmu-r8a779a0",
996 .data = &ipmmu_features_rcar_gen4,
997 }, {
998 .compatible = "renesas,rcar-gen4-ipmmu-vmsa",
999 .data = &ipmmu_features_rcar_gen4,
1000 }, {
1001 /* Terminator */
1002 },
1003};
1004
1005static int ipmmu_probe(struct platform_device *pdev)
1006{
1007 struct ipmmu_vmsa_device *mmu;
1008 int irq;
1009 int ret;
1010
1011 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1012 if (!mmu) {
1013 dev_err(&pdev->dev, "cannot allocate device data\n");
1014 return -ENOMEM;
1015 }
1016
1017 mmu->dev = &pdev->dev;
1018 spin_lock_init(&mmu->lock);
1019 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1020 mmu->features = of_device_get_match_data(&pdev->dev);
1021 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1022 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1023 if (ret)
1024 return ret;
1025
1026 /* Map I/O memory and request IRQ. */
1027 mmu->base = devm_platform_ioremap_resource(pdev, 0);
1028 if (IS_ERR(mmu->base))
1029 return PTR_ERR(mmu->base);
1030
1031 /*
1032 * The IPMMU has two register banks, for secure and non-secure modes.
1033 * The bank mapped at the beginning of the IPMMU address space
1034 * corresponds to the running mode of the CPU. When running in secure
1035 * mode the non-secure register bank is also available at an offset.
1036 *
1037 * Secure mode operation isn't clearly documented and is thus currently
1038 * not implemented in the driver. Furthermore, preliminary tests of
1039 * non-secure operation with the main register bank were not successful.
1040 * Offset the registers base unconditionally to point to the non-secure
1041 * alias space for now.
1042 */
1043 if (mmu->features->use_ns_alias_offset)
1044 mmu->base += IM_NS_ALIAS_OFFSET;
1045
1046 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1047
1048 /*
1049 * Determine if this IPMMU instance is a root device by checking for
1050 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1051 */
1052 if (!mmu->features->has_cache_leaf_nodes ||
1053 !of_property_present(pdev->dev.of_node, "renesas,ipmmu-main"))
1054 mmu->root = mmu;
1055 else
1056 mmu->root = ipmmu_find_root();
1057
1058 /*
1059 * Wait until the root device has been registered for sure.
1060 */
1061 if (!mmu->root)
1062 return -EPROBE_DEFER;
1063
1064 /* Root devices have mandatory IRQs */
1065 if (ipmmu_is_root(mmu)) {
1066 irq = platform_get_irq(pdev, 0);
1067 if (irq < 0)
1068 return irq;
1069
1070 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1071 dev_name(&pdev->dev), mmu);
1072 if (ret < 0) {
1073 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1074 return ret;
1075 }
1076
1077 ipmmu_device_reset(mmu);
1078
1079 if (mmu->features->reserved_context) {
1080 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1081 set_bit(0, mmu->ctx);
1082 }
1083 }
1084
1085 /*
1086 * Register the IPMMU to the IOMMU subsystem in the following cases:
1087 * - R-Car Gen2 IPMMU (all devices registered)
1088 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1089 */
1090 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1091 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1092 dev_name(&pdev->dev));
1093 if (ret)
1094 return ret;
1095
1096 ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev);
1097 if (ret)
1098 return ret;
1099 }
1100
1101 /*
1102 * We can't create the ARM mapping here as it requires the bus to have
1103 * an IOMMU, which only happens when bus_set_iommu() is called in
1104 * ipmmu_init() after the probe function returns.
1105 */
1106
1107 platform_set_drvdata(pdev, mmu);
1108
1109 return 0;
1110}
1111
1112static void ipmmu_remove(struct platform_device *pdev)
1113{
1114 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1115
1116 iommu_device_sysfs_remove(&mmu->iommu);
1117 iommu_device_unregister(&mmu->iommu);
1118
1119 arm_iommu_release_mapping(mmu->mapping);
1120
1121 ipmmu_device_reset(mmu);
1122}
1123
1124static int ipmmu_resume_noirq(struct device *dev)
1125{
1126 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1127 unsigned int i;
1128
1129 /* Reset root MMU and restore contexts */
1130 if (ipmmu_is_root(mmu)) {
1131 ipmmu_device_reset(mmu);
1132
1133 for (i = 0; i < mmu->num_ctx; i++) {
1134 if (!mmu->domains[i])
1135 continue;
1136
1137 ipmmu_domain_setup_context(mmu->domains[i]);
1138 }
1139 }
1140
1141 /* Re-enable active micro-TLBs */
1142 for (i = 0; i < mmu->features->num_utlbs; i++) {
1143 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1144 continue;
1145
1146 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1147 }
1148
1149 return 0;
1150}
1151
1152static const struct dev_pm_ops ipmmu_pm = {
1153 NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1154};
1155
1156static struct platform_driver ipmmu_driver = {
1157 .driver = {
1158 .name = "ipmmu-vmsa",
1159 .of_match_table = ipmmu_of_ids,
1160 .pm = pm_sleep_ptr(&ipmmu_pm),
1161 },
1162 .probe = ipmmu_probe,
1163 .remove_new = ipmmu_remove,
1164};
1165builtin_platform_driver(ipmmu_driver);