Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CPU-agnostic ARM page table allocator.
4 *
5 * Copyright (C) 2014 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
11
12#include <linux/atomic.h>
13#include <linux/bitops.h>
14#include <linux/io-pgtable.h>
15#include <linux/kernel.h>
16#include <linux/sizes.h>
17#include <linux/slab.h>
18#include <linux/types.h>
19#include <linux/dma-mapping.h>
20
21#include <asm/barrier.h>
22
23#include "io-pgtable-arm.h"
24
25#define ARM_LPAE_MAX_ADDR_BITS 52
26#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
27#define ARM_LPAE_MAX_LEVELS 4
28
29/* Struct accessors */
30#define io_pgtable_to_data(x) \
31 container_of((x), struct arm_lpae_io_pgtable, iop)
32
33#define io_pgtable_ops_to_data(x) \
34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35
36/*
37 * Calculate the right shift amount to get to the portion describing level l
38 * in a virtual address mapped by the pagetable in d.
39 */
40#define ARM_LPAE_LVL_SHIFT(l,d) \
41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
42 ilog2(sizeof(arm_lpae_iopte)))
43
44#define ARM_LPAE_GRANULE(d) \
45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46#define ARM_LPAE_PGD_SIZE(d) \
47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48
49#define ARM_LPAE_PTES_PER_TABLE(d) \
50 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
51
52/*
53 * Calculate the index at level l used to map virtual address a using the
54 * pagetable in d.
55 */
56#define ARM_LPAE_PGD_IDX(l,d) \
57 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
58
59#define ARM_LPAE_LVL_IDX(a,l,d) \
60 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
61 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
62
63/* Calculate the block/page mapping size at level l for pagetable in d. */
64#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
65
66/* Page table bits */
67#define ARM_LPAE_PTE_TYPE_SHIFT 0
68#define ARM_LPAE_PTE_TYPE_MASK 0x3
69
70#define ARM_LPAE_PTE_TYPE_BLOCK 1
71#define ARM_LPAE_PTE_TYPE_TABLE 3
72#define ARM_LPAE_PTE_TYPE_PAGE 3
73
74#define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
75
76#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
77#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
78#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
79#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
80#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
81#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
82#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
83#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
84
85#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
86/* Ignore the contiguous bit for block splitting */
87#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
88#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
89 ARM_LPAE_PTE_ATTR_HI_MASK)
90/* Software bit for solving coherency races */
91#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
92
93/* Stage-1 PTE */
94#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
95#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
96#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
97#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
98
99/* Stage-2 PTE */
100#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
101#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
102#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
103#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
104#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
105#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
106
107/* Register bits */
108#define ARM_LPAE_VTCR_SL0_MASK 0x3
109
110#define ARM_LPAE_TCR_T0SZ_SHIFT 0
111
112#define ARM_LPAE_VTCR_PS_SHIFT 16
113#define ARM_LPAE_VTCR_PS_MASK 0x7
114
115#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
116#define ARM_LPAE_MAIR_ATTR_MASK 0xff
117#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
118#define ARM_LPAE_MAIR_ATTR_NC 0x44
119#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
120#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
121#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
122#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
123#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
124#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
125
126#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
127#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
128#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
129
130#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
131#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
132
133/* IOPTE accessors */
134#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
135
136#define iopte_type(pte) \
137 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
138
139#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
140
141struct arm_lpae_io_pgtable {
142 struct io_pgtable iop;
143
144 int pgd_bits;
145 int start_level;
146 int bits_per_level;
147
148 void *pgd;
149};
150
151typedef u64 arm_lpae_iopte;
152
153static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
154 enum io_pgtable_fmt fmt)
155{
156 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
157 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
158
159 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
160}
161
162static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
163 struct arm_lpae_io_pgtable *data)
164{
165 arm_lpae_iopte pte = paddr;
166
167 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
168 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
169}
170
171static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
172 struct arm_lpae_io_pgtable *data)
173{
174 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
175
176 if (ARM_LPAE_GRANULE(data) < SZ_64K)
177 return paddr;
178
179 /* Rotate the packed high-order bits back to the top */
180 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
181}
182
183static bool selftest_running = false;
184
185static dma_addr_t __arm_lpae_dma_addr(void *pages)
186{
187 return (dma_addr_t)virt_to_phys(pages);
188}
189
190static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
191 struct io_pgtable_cfg *cfg,
192 void *cookie)
193{
194 struct device *dev = cfg->iommu_dev;
195 int order = get_order(size);
196 dma_addr_t dma;
197 void *pages;
198
199 VM_BUG_ON((gfp & __GFP_HIGHMEM));
200
201 if (cfg->alloc) {
202 pages = cfg->alloc(cookie, size, gfp);
203 } else {
204 struct page *p;
205
206 p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
207 pages = p ? page_address(p) : NULL;
208 }
209
210 if (!pages)
211 return NULL;
212
213 if (!cfg->coherent_walk) {
214 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
215 if (dma_mapping_error(dev, dma))
216 goto out_free;
217 /*
218 * We depend on the IOMMU being able to work with any physical
219 * address directly, so if the DMA layer suggests otherwise by
220 * translating or truncating them, that bodes very badly...
221 */
222 if (dma != virt_to_phys(pages))
223 goto out_unmap;
224 }
225
226 return pages;
227
228out_unmap:
229 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
230 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
231
232out_free:
233 if (cfg->free)
234 cfg->free(cookie, pages, size);
235 else
236 free_pages((unsigned long)pages, order);
237
238 return NULL;
239}
240
241static void __arm_lpae_free_pages(void *pages, size_t size,
242 struct io_pgtable_cfg *cfg,
243 void *cookie)
244{
245 if (!cfg->coherent_walk)
246 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
247 size, DMA_TO_DEVICE);
248
249 if (cfg->free)
250 cfg->free(cookie, pages, size);
251 else
252 free_pages((unsigned long)pages, get_order(size));
253}
254
255static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
256 struct io_pgtable_cfg *cfg)
257{
258 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
259 sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
260}
261
262static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
263{
264
265 *ptep = 0;
266
267 if (!cfg->coherent_walk)
268 __arm_lpae_sync_pte(ptep, 1, cfg);
269}
270
271static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
272 struct iommu_iotlb_gather *gather,
273 unsigned long iova, size_t size, size_t pgcount,
274 int lvl, arm_lpae_iopte *ptep);
275
276static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
277 phys_addr_t paddr, arm_lpae_iopte prot,
278 int lvl, int num_entries, arm_lpae_iopte *ptep)
279{
280 arm_lpae_iopte pte = prot;
281 struct io_pgtable_cfg *cfg = &data->iop.cfg;
282 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
283 int i;
284
285 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
286 pte |= ARM_LPAE_PTE_TYPE_PAGE;
287 else
288 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
289
290 for (i = 0; i < num_entries; i++)
291 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
292
293 if (!cfg->coherent_walk)
294 __arm_lpae_sync_pte(ptep, num_entries, cfg);
295}
296
297static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
298 unsigned long iova, phys_addr_t paddr,
299 arm_lpae_iopte prot, int lvl, int num_entries,
300 arm_lpae_iopte *ptep)
301{
302 int i;
303
304 for (i = 0; i < num_entries; i++)
305 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
306 /* We require an unmap first */
307 WARN_ON(!selftest_running);
308 return -EEXIST;
309 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
310 /*
311 * We need to unmap and free the old table before
312 * overwriting it with a block entry.
313 */
314 arm_lpae_iopte *tblp;
315 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
316
317 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
318 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
319 lvl, tblp) != sz) {
320 WARN_ON(1);
321 return -EINVAL;
322 }
323 }
324
325 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
326 return 0;
327}
328
329static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
330 arm_lpae_iopte *ptep,
331 arm_lpae_iopte curr,
332 struct arm_lpae_io_pgtable *data)
333{
334 arm_lpae_iopte old, new;
335 struct io_pgtable_cfg *cfg = &data->iop.cfg;
336
337 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
338 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
339 new |= ARM_LPAE_PTE_NSTABLE;
340
341 /*
342 * Ensure the table itself is visible before its PTE can be.
343 * Whilst we could get away with cmpxchg64_release below, this
344 * doesn't have any ordering semantics when !CONFIG_SMP.
345 */
346 dma_wmb();
347
348 old = cmpxchg64_relaxed(ptep, curr, new);
349
350 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
351 return old;
352
353 /* Even if it's not ours, there's no point waiting; just kick it */
354 __arm_lpae_sync_pte(ptep, 1, cfg);
355 if (old == curr)
356 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
357
358 return old;
359}
360
361static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
362 phys_addr_t paddr, size_t size, size_t pgcount,
363 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
364 gfp_t gfp, size_t *mapped)
365{
366 arm_lpae_iopte *cptep, pte;
367 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
368 size_t tblsz = ARM_LPAE_GRANULE(data);
369 struct io_pgtable_cfg *cfg = &data->iop.cfg;
370 int ret = 0, num_entries, max_entries, map_idx_start;
371
372 /* Find our entry at the current level */
373 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
374 ptep += map_idx_start;
375
376 /* If we can install a leaf entry at this level, then do so */
377 if (size == block_size) {
378 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
379 num_entries = min_t(int, pgcount, max_entries);
380 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
381 if (!ret)
382 *mapped += num_entries * size;
383
384 return ret;
385 }
386
387 /* We can't allocate tables at the final level */
388 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
389 return -EINVAL;
390
391 /* Grab a pointer to the next level */
392 pte = READ_ONCE(*ptep);
393 if (!pte) {
394 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
395 if (!cptep)
396 return -ENOMEM;
397
398 pte = arm_lpae_install_table(cptep, ptep, 0, data);
399 if (pte)
400 __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
401 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
402 __arm_lpae_sync_pte(ptep, 1, cfg);
403 }
404
405 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
406 cptep = iopte_deref(pte, data);
407 } else if (pte) {
408 /* We require an unmap first */
409 WARN_ON(!selftest_running);
410 return -EEXIST;
411 }
412
413 /* Rinse, repeat */
414 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
415 cptep, gfp, mapped);
416}
417
418static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
419 int prot)
420{
421 arm_lpae_iopte pte;
422
423 if (data->iop.fmt == ARM_64_LPAE_S1 ||
424 data->iop.fmt == ARM_32_LPAE_S1) {
425 pte = ARM_LPAE_PTE_nG;
426 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
427 pte |= ARM_LPAE_PTE_AP_RDONLY;
428 if (!(prot & IOMMU_PRIV))
429 pte |= ARM_LPAE_PTE_AP_UNPRIV;
430 } else {
431 pte = ARM_LPAE_PTE_HAP_FAULT;
432 if (prot & IOMMU_READ)
433 pte |= ARM_LPAE_PTE_HAP_READ;
434 if (prot & IOMMU_WRITE)
435 pte |= ARM_LPAE_PTE_HAP_WRITE;
436 }
437
438 /*
439 * Note that this logic is structured to accommodate Mali LPAE
440 * having stage-1-like attributes but stage-2-like permissions.
441 */
442 if (data->iop.fmt == ARM_64_LPAE_S2 ||
443 data->iop.fmt == ARM_32_LPAE_S2) {
444 if (prot & IOMMU_MMIO)
445 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
446 else if (prot & IOMMU_CACHE)
447 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
448 else
449 pte |= ARM_LPAE_PTE_MEMATTR_NC;
450 } else {
451 if (prot & IOMMU_MMIO)
452 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
453 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
454 else if (prot & IOMMU_CACHE)
455 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
456 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
457 }
458
459 /*
460 * Also Mali has its own notions of shareability wherein its Inner
461 * domain covers the cores within the GPU, and its Outer domain is
462 * "outside the GPU" (i.e. either the Inner or System domain in CPU
463 * terms, depending on coherency).
464 */
465 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
466 pte |= ARM_LPAE_PTE_SH_IS;
467 else
468 pte |= ARM_LPAE_PTE_SH_OS;
469
470 if (prot & IOMMU_NOEXEC)
471 pte |= ARM_LPAE_PTE_XN;
472
473 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
474 pte |= ARM_LPAE_PTE_NS;
475
476 if (data->iop.fmt != ARM_MALI_LPAE)
477 pte |= ARM_LPAE_PTE_AF;
478
479 return pte;
480}
481
482static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
483 phys_addr_t paddr, size_t pgsize, size_t pgcount,
484 int iommu_prot, gfp_t gfp, size_t *mapped)
485{
486 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
487 struct io_pgtable_cfg *cfg = &data->iop.cfg;
488 arm_lpae_iopte *ptep = data->pgd;
489 int ret, lvl = data->start_level;
490 arm_lpae_iopte prot;
491 long iaext = (s64)iova >> cfg->ias;
492
493 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
494 return -EINVAL;
495
496 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
497 iaext = ~iaext;
498 if (WARN_ON(iaext || paddr >> cfg->oas))
499 return -ERANGE;
500
501 /* If no access, then nothing to do */
502 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
503 return 0;
504
505 prot = arm_lpae_prot_to_pte(data, iommu_prot);
506 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
507 ptep, gfp, mapped);
508 /*
509 * Synchronise all PTE updates for the new mapping before there's
510 * a chance for anything to kick off a table walk for the new iova.
511 */
512 wmb();
513
514 return ret;
515}
516
517static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
518 arm_lpae_iopte *ptep)
519{
520 arm_lpae_iopte *start, *end;
521 unsigned long table_size;
522
523 if (lvl == data->start_level)
524 table_size = ARM_LPAE_PGD_SIZE(data);
525 else
526 table_size = ARM_LPAE_GRANULE(data);
527
528 start = ptep;
529
530 /* Only leaf entries at the last level */
531 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
532 end = ptep;
533 else
534 end = (void *)ptep + table_size;
535
536 while (ptep != end) {
537 arm_lpae_iopte pte = *ptep++;
538
539 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
540 continue;
541
542 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
543 }
544
545 __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
546}
547
548static void arm_lpae_free_pgtable(struct io_pgtable *iop)
549{
550 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
551
552 __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
553 kfree(data);
554}
555
556static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
557 struct iommu_iotlb_gather *gather,
558 unsigned long iova, size_t size,
559 arm_lpae_iopte blk_pte, int lvl,
560 arm_lpae_iopte *ptep, size_t pgcount)
561{
562 struct io_pgtable_cfg *cfg = &data->iop.cfg;
563 arm_lpae_iopte pte, *tablep;
564 phys_addr_t blk_paddr;
565 size_t tablesz = ARM_LPAE_GRANULE(data);
566 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
567 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
568 int i, unmap_idx_start = -1, num_entries = 0, max_entries;
569
570 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
571 return 0;
572
573 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
574 if (!tablep)
575 return 0; /* Bytes unmapped */
576
577 if (size == split_sz) {
578 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
579 max_entries = ptes_per_table - unmap_idx_start;
580 num_entries = min_t(int, pgcount, max_entries);
581 }
582
583 blk_paddr = iopte_to_paddr(blk_pte, data);
584 pte = iopte_prot(blk_pte);
585
586 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
587 /* Unmap! */
588 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
589 continue;
590
591 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
592 }
593
594 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
595 if (pte != blk_pte) {
596 __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
597 /*
598 * We may race against someone unmapping another part of this
599 * block, but anything else is invalid. We can't misinterpret
600 * a page entry here since we're never at the last level.
601 */
602 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
603 return 0;
604
605 tablep = iopte_deref(pte, data);
606 } else if (unmap_idx_start >= 0) {
607 for (i = 0; i < num_entries; i++)
608 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
609
610 return num_entries * size;
611 }
612
613 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
614}
615
616static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
617 struct iommu_iotlb_gather *gather,
618 unsigned long iova, size_t size, size_t pgcount,
619 int lvl, arm_lpae_iopte *ptep)
620{
621 arm_lpae_iopte pte;
622 struct io_pgtable *iop = &data->iop;
623 int i = 0, num_entries, max_entries, unmap_idx_start;
624
625 /* Something went horribly wrong and we ran out of page table */
626 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
627 return 0;
628
629 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
630 ptep += unmap_idx_start;
631 pte = READ_ONCE(*ptep);
632 if (WARN_ON(!pte))
633 return 0;
634
635 /* If the size matches this level, we're in the right place */
636 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
637 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
638 num_entries = min_t(int, pgcount, max_entries);
639
640 while (i < num_entries) {
641 pte = READ_ONCE(*ptep);
642 if (WARN_ON(!pte))
643 break;
644
645 __arm_lpae_clear_pte(ptep, &iop->cfg);
646
647 if (!iopte_leaf(pte, lvl, iop->fmt)) {
648 /* Also flush any partial walks */
649 io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
650 ARM_LPAE_GRANULE(data));
651 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
652 } else if (!iommu_iotlb_gather_queued(gather)) {
653 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
654 }
655
656 ptep++;
657 i++;
658 }
659
660 return i * size;
661 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
662 /*
663 * Insert a table at the next level to map the old region,
664 * minus the part we want to unmap
665 */
666 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
667 lvl + 1, ptep, pgcount);
668 }
669
670 /* Keep on walkin' */
671 ptep = iopte_deref(pte, data);
672 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
673}
674
675static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
676 size_t pgsize, size_t pgcount,
677 struct iommu_iotlb_gather *gather)
678{
679 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
680 struct io_pgtable_cfg *cfg = &data->iop.cfg;
681 arm_lpae_iopte *ptep = data->pgd;
682 long iaext = (s64)iova >> cfg->ias;
683
684 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
685 return 0;
686
687 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
688 iaext = ~iaext;
689 if (WARN_ON(iaext))
690 return 0;
691
692 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
693 data->start_level, ptep);
694}
695
696static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
697 unsigned long iova)
698{
699 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
700 arm_lpae_iopte pte, *ptep = data->pgd;
701 int lvl = data->start_level;
702
703 do {
704 /* Valid IOPTE pointer? */
705 if (!ptep)
706 return 0;
707
708 /* Grab the IOPTE we're interested in */
709 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
710 pte = READ_ONCE(*ptep);
711
712 /* Valid entry? */
713 if (!pte)
714 return 0;
715
716 /* Leaf entry? */
717 if (iopte_leaf(pte, lvl, data->iop.fmt))
718 goto found_translation;
719
720 /* Take it to the next level */
721 ptep = iopte_deref(pte, data);
722 } while (++lvl < ARM_LPAE_MAX_LEVELS);
723
724 /* Ran out of page tables to walk */
725 return 0;
726
727found_translation:
728 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
729 return iopte_to_paddr(pte, data) | iova;
730}
731
732static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
733{
734 unsigned long granule, page_sizes;
735 unsigned int max_addr_bits = 48;
736
737 /*
738 * We need to restrict the supported page sizes to match the
739 * translation regime for a particular granule. Aim to match
740 * the CPU page size if possible, otherwise prefer smaller sizes.
741 * While we're at it, restrict the block sizes to match the
742 * chosen granule.
743 */
744 if (cfg->pgsize_bitmap & PAGE_SIZE)
745 granule = PAGE_SIZE;
746 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
747 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
748 else if (cfg->pgsize_bitmap & PAGE_MASK)
749 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
750 else
751 granule = 0;
752
753 switch (granule) {
754 case SZ_4K:
755 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
756 break;
757 case SZ_16K:
758 page_sizes = (SZ_16K | SZ_32M);
759 break;
760 case SZ_64K:
761 max_addr_bits = 52;
762 page_sizes = (SZ_64K | SZ_512M);
763 if (cfg->oas > 48)
764 page_sizes |= 1ULL << 42; /* 4TB */
765 break;
766 default:
767 page_sizes = 0;
768 }
769
770 cfg->pgsize_bitmap &= page_sizes;
771 cfg->ias = min(cfg->ias, max_addr_bits);
772 cfg->oas = min(cfg->oas, max_addr_bits);
773}
774
775static struct arm_lpae_io_pgtable *
776arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
777{
778 struct arm_lpae_io_pgtable *data;
779 int levels, va_bits, pg_shift;
780
781 arm_lpae_restrict_pgsizes(cfg);
782
783 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
784 return NULL;
785
786 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
787 return NULL;
788
789 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
790 return NULL;
791
792 data = kmalloc(sizeof(*data), GFP_KERNEL);
793 if (!data)
794 return NULL;
795
796 pg_shift = __ffs(cfg->pgsize_bitmap);
797 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
798
799 va_bits = cfg->ias - pg_shift;
800 levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
801 data->start_level = ARM_LPAE_MAX_LEVELS - levels;
802
803 /* Calculate the actual size of our pgd (without concatenation) */
804 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
805
806 data->iop.ops = (struct io_pgtable_ops) {
807 .map_pages = arm_lpae_map_pages,
808 .unmap_pages = arm_lpae_unmap_pages,
809 .iova_to_phys = arm_lpae_iova_to_phys,
810 };
811
812 return data;
813}
814
815static struct io_pgtable *
816arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
817{
818 u64 reg;
819 struct arm_lpae_io_pgtable *data;
820 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
821 bool tg1;
822
823 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
824 IO_PGTABLE_QUIRK_ARM_TTBR1 |
825 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
826 return NULL;
827
828 data = arm_lpae_alloc_pgtable(cfg);
829 if (!data)
830 return NULL;
831
832 /* TCR */
833 if (cfg->coherent_walk) {
834 tcr->sh = ARM_LPAE_TCR_SH_IS;
835 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
836 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
837 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
838 goto out_free_data;
839 } else {
840 tcr->sh = ARM_LPAE_TCR_SH_OS;
841 tcr->irgn = ARM_LPAE_TCR_RGN_NC;
842 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
843 tcr->orgn = ARM_LPAE_TCR_RGN_NC;
844 else
845 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
846 }
847
848 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
849 switch (ARM_LPAE_GRANULE(data)) {
850 case SZ_4K:
851 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
852 break;
853 case SZ_16K:
854 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
855 break;
856 case SZ_64K:
857 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
858 break;
859 }
860
861 switch (cfg->oas) {
862 case 32:
863 tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
864 break;
865 case 36:
866 tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
867 break;
868 case 40:
869 tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
870 break;
871 case 42:
872 tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
873 break;
874 case 44:
875 tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
876 break;
877 case 48:
878 tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
879 break;
880 case 52:
881 tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
882 break;
883 default:
884 goto out_free_data;
885 }
886
887 tcr->tsz = 64ULL - cfg->ias;
888
889 /* MAIRs */
890 reg = (ARM_LPAE_MAIR_ATTR_NC
891 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
892 (ARM_LPAE_MAIR_ATTR_WBRWA
893 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
894 (ARM_LPAE_MAIR_ATTR_DEVICE
895 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
896 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
897 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
898
899 cfg->arm_lpae_s1_cfg.mair = reg;
900
901 /* Looking good; allocate a pgd */
902 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
903 GFP_KERNEL, cfg, cookie);
904 if (!data->pgd)
905 goto out_free_data;
906
907 /* Ensure the empty pgd is visible before any actual TTBR write */
908 wmb();
909
910 /* TTBR */
911 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
912 return &data->iop;
913
914out_free_data:
915 kfree(data);
916 return NULL;
917}
918
919static struct io_pgtable *
920arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
921{
922 u64 sl;
923 struct arm_lpae_io_pgtable *data;
924 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
925
926 /* The NS quirk doesn't apply at stage 2 */
927 if (cfg->quirks)
928 return NULL;
929
930 data = arm_lpae_alloc_pgtable(cfg);
931 if (!data)
932 return NULL;
933
934 /*
935 * Concatenate PGDs at level 1 if possible in order to reduce
936 * the depth of the stage-2 walk.
937 */
938 if (data->start_level == 0) {
939 unsigned long pgd_pages;
940
941 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
942 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
943 data->pgd_bits += data->bits_per_level;
944 data->start_level++;
945 }
946 }
947
948 /* VTCR */
949 if (cfg->coherent_walk) {
950 vtcr->sh = ARM_LPAE_TCR_SH_IS;
951 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
952 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
953 } else {
954 vtcr->sh = ARM_LPAE_TCR_SH_OS;
955 vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
956 vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
957 }
958
959 sl = data->start_level;
960
961 switch (ARM_LPAE_GRANULE(data)) {
962 case SZ_4K:
963 vtcr->tg = ARM_LPAE_TCR_TG0_4K;
964 sl++; /* SL0 format is different for 4K granule size */
965 break;
966 case SZ_16K:
967 vtcr->tg = ARM_LPAE_TCR_TG0_16K;
968 break;
969 case SZ_64K:
970 vtcr->tg = ARM_LPAE_TCR_TG0_64K;
971 break;
972 }
973
974 switch (cfg->oas) {
975 case 32:
976 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
977 break;
978 case 36:
979 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
980 break;
981 case 40:
982 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
983 break;
984 case 42:
985 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
986 break;
987 case 44:
988 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
989 break;
990 case 48:
991 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
992 break;
993 case 52:
994 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
995 break;
996 default:
997 goto out_free_data;
998 }
999
1000 vtcr->tsz = 64ULL - cfg->ias;
1001 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1002
1003 /* Allocate pgd pages */
1004 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1005 GFP_KERNEL, cfg, cookie);
1006 if (!data->pgd)
1007 goto out_free_data;
1008
1009 /* Ensure the empty pgd is visible before any actual TTBR write */
1010 wmb();
1011
1012 /* VTTBR */
1013 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1014 return &data->iop;
1015
1016out_free_data:
1017 kfree(data);
1018 return NULL;
1019}
1020
1021static struct io_pgtable *
1022arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1023{
1024 if (cfg->ias > 32 || cfg->oas > 40)
1025 return NULL;
1026
1027 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1028 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1029}
1030
1031static struct io_pgtable *
1032arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1033{
1034 if (cfg->ias > 40 || cfg->oas > 40)
1035 return NULL;
1036
1037 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1038 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1039}
1040
1041static struct io_pgtable *
1042arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1043{
1044 struct arm_lpae_io_pgtable *data;
1045
1046 /* No quirks for Mali (hopefully) */
1047 if (cfg->quirks)
1048 return NULL;
1049
1050 if (cfg->ias > 48 || cfg->oas > 40)
1051 return NULL;
1052
1053 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1054
1055 data = arm_lpae_alloc_pgtable(cfg);
1056 if (!data)
1057 return NULL;
1058
1059 /* Mali seems to need a full 4-level table regardless of IAS */
1060 if (data->start_level > 0) {
1061 data->start_level = 0;
1062 data->pgd_bits = 0;
1063 }
1064 /*
1065 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1066 * best we can do is mimic the out-of-tree driver and hope that the
1067 * "implementation-defined caching policy" is good enough. Similarly,
1068 * we'll use it for the sake of a valid attribute for our 'device'
1069 * index, although callers should never request that in practice.
1070 */
1071 cfg->arm_mali_lpae_cfg.memattr =
1072 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1073 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1074 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1075 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1076 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1077 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1078
1079 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1080 cfg, cookie);
1081 if (!data->pgd)
1082 goto out_free_data;
1083
1084 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1085 wmb();
1086
1087 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1088 ARM_MALI_LPAE_TTBR_READ_INNER |
1089 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1090 if (cfg->coherent_walk)
1091 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1092
1093 return &data->iop;
1094
1095out_free_data:
1096 kfree(data);
1097 return NULL;
1098}
1099
1100struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1101 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1102 .alloc = arm_64_lpae_alloc_pgtable_s1,
1103 .free = arm_lpae_free_pgtable,
1104};
1105
1106struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1107 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1108 .alloc = arm_64_lpae_alloc_pgtable_s2,
1109 .free = arm_lpae_free_pgtable,
1110};
1111
1112struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1113 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1114 .alloc = arm_32_lpae_alloc_pgtable_s1,
1115 .free = arm_lpae_free_pgtable,
1116};
1117
1118struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1119 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1120 .alloc = arm_32_lpae_alloc_pgtable_s2,
1121 .free = arm_lpae_free_pgtable,
1122};
1123
1124struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1125 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1126 .alloc = arm_mali_lpae_alloc_pgtable,
1127 .free = arm_lpae_free_pgtable,
1128};
1129
1130#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1131
1132static struct io_pgtable_cfg *cfg_cookie __initdata;
1133
1134static void __init dummy_tlb_flush_all(void *cookie)
1135{
1136 WARN_ON(cookie != cfg_cookie);
1137}
1138
1139static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1140 size_t granule, void *cookie)
1141{
1142 WARN_ON(cookie != cfg_cookie);
1143 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1144}
1145
1146static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1147 unsigned long iova, size_t granule,
1148 void *cookie)
1149{
1150 dummy_tlb_flush(iova, granule, granule, cookie);
1151}
1152
1153static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1154 .tlb_flush_all = dummy_tlb_flush_all,
1155 .tlb_flush_walk = dummy_tlb_flush,
1156 .tlb_add_page = dummy_tlb_add_page,
1157};
1158
1159static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1160{
1161 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1162 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1163
1164 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1165 cfg->pgsize_bitmap, cfg->ias);
1166 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1167 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1168 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1169}
1170
1171#define __FAIL(ops, i) ({ \
1172 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1173 arm_lpae_dump_ops(ops); \
1174 selftest_running = false; \
1175 -EFAULT; \
1176})
1177
1178static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1179{
1180 static const enum io_pgtable_fmt fmts[] __initconst = {
1181 ARM_64_LPAE_S1,
1182 ARM_64_LPAE_S2,
1183 };
1184
1185 int i, j;
1186 unsigned long iova;
1187 size_t size, mapped;
1188 struct io_pgtable_ops *ops;
1189
1190 selftest_running = true;
1191
1192 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1193 cfg_cookie = cfg;
1194 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1195 if (!ops) {
1196 pr_err("selftest: failed to allocate io pgtable ops\n");
1197 return -ENOMEM;
1198 }
1199
1200 /*
1201 * Initial sanity checks.
1202 * Empty page tables shouldn't provide any translations.
1203 */
1204 if (ops->iova_to_phys(ops, 42))
1205 return __FAIL(ops, i);
1206
1207 if (ops->iova_to_phys(ops, SZ_1G + 42))
1208 return __FAIL(ops, i);
1209
1210 if (ops->iova_to_phys(ops, SZ_2G + 42))
1211 return __FAIL(ops, i);
1212
1213 /*
1214 * Distinct mappings of different granule sizes.
1215 */
1216 iova = 0;
1217 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1218 size = 1UL << j;
1219
1220 if (ops->map_pages(ops, iova, iova, size, 1,
1221 IOMMU_READ | IOMMU_WRITE |
1222 IOMMU_NOEXEC | IOMMU_CACHE,
1223 GFP_KERNEL, &mapped))
1224 return __FAIL(ops, i);
1225
1226 /* Overlapping mappings */
1227 if (!ops->map_pages(ops, iova, iova + size, size, 1,
1228 IOMMU_READ | IOMMU_NOEXEC,
1229 GFP_KERNEL, &mapped))
1230 return __FAIL(ops, i);
1231
1232 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1233 return __FAIL(ops, i);
1234
1235 iova += SZ_1G;
1236 }
1237
1238 /* Partial unmap */
1239 size = 1UL << __ffs(cfg->pgsize_bitmap);
1240 if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
1241 return __FAIL(ops, i);
1242
1243 /* Remap of partial unmap */
1244 if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
1245 IOMMU_READ, GFP_KERNEL, &mapped))
1246 return __FAIL(ops, i);
1247
1248 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1249 return __FAIL(ops, i);
1250
1251 /* Full unmap */
1252 iova = 0;
1253 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1254 size = 1UL << j;
1255
1256 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1257 return __FAIL(ops, i);
1258
1259 if (ops->iova_to_phys(ops, iova + 42))
1260 return __FAIL(ops, i);
1261
1262 /* Remap full block */
1263 if (ops->map_pages(ops, iova, iova, size, 1,
1264 IOMMU_WRITE, GFP_KERNEL, &mapped))
1265 return __FAIL(ops, i);
1266
1267 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1268 return __FAIL(ops, i);
1269
1270 iova += SZ_1G;
1271 }
1272
1273 free_io_pgtable_ops(ops);
1274 }
1275
1276 selftest_running = false;
1277 return 0;
1278}
1279
1280static int __init arm_lpae_do_selftests(void)
1281{
1282 static const unsigned long pgsize[] __initconst = {
1283 SZ_4K | SZ_2M | SZ_1G,
1284 SZ_16K | SZ_32M,
1285 SZ_64K | SZ_512M,
1286 };
1287
1288 static const unsigned int ias[] __initconst = {
1289 32, 36, 40, 42, 44, 48,
1290 };
1291
1292 int i, j, pass = 0, fail = 0;
1293 struct device dev;
1294 struct io_pgtable_cfg cfg = {
1295 .tlb = &dummy_tlb_ops,
1296 .oas = 48,
1297 .coherent_walk = true,
1298 .iommu_dev = &dev,
1299 };
1300
1301 /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1302 set_dev_node(&dev, NUMA_NO_NODE);
1303
1304 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1305 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1306 cfg.pgsize_bitmap = pgsize[i];
1307 cfg.ias = ias[j];
1308 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1309 pgsize[i], ias[j]);
1310 if (arm_lpae_run_tests(&cfg))
1311 fail++;
1312 else
1313 pass++;
1314 }
1315 }
1316
1317 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1318 return fail ? -EFAULT : 0;
1319}
1320subsys_initcall(arm_lpae_do_selftests);
1321#endif
1/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23#include <linux/iommu.h>
24#include <linux/kernel.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29
30#include <asm/barrier.h>
31
32#include "io-pgtable.h"
33
34#define ARM_LPAE_MAX_ADDR_BITS 48
35#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
36#define ARM_LPAE_MAX_LEVELS 4
37
38/* Struct accessors */
39#define io_pgtable_to_data(x) \
40 container_of((x), struct arm_lpae_io_pgtable, iop)
41
42#define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44
45/*
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48 */
49#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
50
51/*
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
54 */
55#define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
58
59#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
60
61#define ARM_LPAE_PAGES_PER_PGD(d) \
62 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
63
64/*
65 * Calculate the index at level l used to map virtual address a using the
66 * pagetable in d.
67 */
68#define ARM_LPAE_PGD_IDX(l,d) \
69 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
70
71#define ARM_LPAE_LVL_IDX(a,l,d) \
72 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
73 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
74
75/* Calculate the block/page mapping size at level l for pagetable in d. */
76#define ARM_LPAE_BLOCK_SIZE(l,d) \
77 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
78 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
79
80/* Page table bits */
81#define ARM_LPAE_PTE_TYPE_SHIFT 0
82#define ARM_LPAE_PTE_TYPE_MASK 0x3
83
84#define ARM_LPAE_PTE_TYPE_BLOCK 1
85#define ARM_LPAE_PTE_TYPE_TABLE 3
86#define ARM_LPAE_PTE_TYPE_PAGE 3
87
88#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
89#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
90#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
91#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
92#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
93#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
94#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
95#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
96
97#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
98/* Ignore the contiguous bit for block splitting */
99#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
100#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
101 ARM_LPAE_PTE_ATTR_HI_MASK)
102
103/* Stage-1 PTE */
104#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
105#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
106#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
107#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
108
109/* Stage-2 PTE */
110#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
111#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
112#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
113#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
114#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
115#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
116
117/* Register bits */
118#define ARM_32_LPAE_TCR_EAE (1 << 31)
119#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
120
121#define ARM_LPAE_TCR_EPD1 (1 << 23)
122
123#define ARM_LPAE_TCR_TG0_4K (0 << 14)
124#define ARM_LPAE_TCR_TG0_64K (1 << 14)
125#define ARM_LPAE_TCR_TG0_16K (2 << 14)
126
127#define ARM_LPAE_TCR_SH0_SHIFT 12
128#define ARM_LPAE_TCR_SH0_MASK 0x3
129#define ARM_LPAE_TCR_SH_NS 0
130#define ARM_LPAE_TCR_SH_OS 2
131#define ARM_LPAE_TCR_SH_IS 3
132
133#define ARM_LPAE_TCR_ORGN0_SHIFT 10
134#define ARM_LPAE_TCR_IRGN0_SHIFT 8
135#define ARM_LPAE_TCR_RGN_MASK 0x3
136#define ARM_LPAE_TCR_RGN_NC 0
137#define ARM_LPAE_TCR_RGN_WBWA 1
138#define ARM_LPAE_TCR_RGN_WT 2
139#define ARM_LPAE_TCR_RGN_WB 3
140
141#define ARM_LPAE_TCR_SL0_SHIFT 6
142#define ARM_LPAE_TCR_SL0_MASK 0x3
143
144#define ARM_LPAE_TCR_T0SZ_SHIFT 0
145#define ARM_LPAE_TCR_SZ_MASK 0xf
146
147#define ARM_LPAE_TCR_PS_SHIFT 16
148#define ARM_LPAE_TCR_PS_MASK 0x7
149
150#define ARM_LPAE_TCR_IPS_SHIFT 32
151#define ARM_LPAE_TCR_IPS_MASK 0x7
152
153#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
154#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
155#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
156#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
157#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
158#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
159
160#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
161#define ARM_LPAE_MAIR_ATTR_MASK 0xff
162#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
163#define ARM_LPAE_MAIR_ATTR_NC 0x44
164#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
165#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
166#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
167#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
168
169/* IOPTE accessors */
170#define iopte_deref(pte,d) \
171 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
172 & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
173
174#define iopte_type(pte,l) \
175 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
176
177#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
178
179#define iopte_leaf(pte,l) \
180 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
181 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
182 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
183
184#define iopte_to_pfn(pte,d) \
185 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
186
187#define pfn_to_iopte(pfn,d) \
188 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
189
190struct arm_lpae_io_pgtable {
191 struct io_pgtable iop;
192
193 int levels;
194 size_t pgd_size;
195 unsigned long pg_shift;
196 unsigned long bits_per_level;
197
198 void *pgd;
199};
200
201typedef u64 arm_lpae_iopte;
202
203static bool selftest_running = false;
204
205static dma_addr_t __arm_lpae_dma_addr(void *pages)
206{
207 return (dma_addr_t)virt_to_phys(pages);
208}
209
210static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
211 struct io_pgtable_cfg *cfg)
212{
213 struct device *dev = cfg->iommu_dev;
214 dma_addr_t dma;
215 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
216
217 if (!pages)
218 return NULL;
219
220 if (!selftest_running) {
221 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
222 if (dma_mapping_error(dev, dma))
223 goto out_free;
224 /*
225 * We depend on the IOMMU being able to work with any physical
226 * address directly, so if the DMA layer suggests otherwise by
227 * translating or truncating them, that bodes very badly...
228 */
229 if (dma != virt_to_phys(pages))
230 goto out_unmap;
231 }
232
233 return pages;
234
235out_unmap:
236 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
237 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
238out_free:
239 free_pages_exact(pages, size);
240 return NULL;
241}
242
243static void __arm_lpae_free_pages(void *pages, size_t size,
244 struct io_pgtable_cfg *cfg)
245{
246 if (!selftest_running)
247 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
248 size, DMA_TO_DEVICE);
249 free_pages_exact(pages, size);
250}
251
252static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
253 struct io_pgtable_cfg *cfg)
254{
255 *ptep = pte;
256
257 if (!selftest_running)
258 dma_sync_single_for_device(cfg->iommu_dev,
259 __arm_lpae_dma_addr(ptep),
260 sizeof(pte), DMA_TO_DEVICE);
261}
262
263static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
264 unsigned long iova, size_t size, int lvl,
265 arm_lpae_iopte *ptep);
266
267static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
268 unsigned long iova, phys_addr_t paddr,
269 arm_lpae_iopte prot, int lvl,
270 arm_lpae_iopte *ptep)
271{
272 arm_lpae_iopte pte = prot;
273 struct io_pgtable_cfg *cfg = &data->iop.cfg;
274
275 if (iopte_leaf(*ptep, lvl)) {
276 /* We require an unmap first */
277 WARN_ON(!selftest_running);
278 return -EEXIST;
279 } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
280 /*
281 * We need to unmap and free the old table before
282 * overwriting it with a block entry.
283 */
284 arm_lpae_iopte *tblp;
285 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
286
287 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
288 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
289 return -EINVAL;
290 }
291
292 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
293 pte |= ARM_LPAE_PTE_NS;
294
295 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
296 pte |= ARM_LPAE_PTE_TYPE_PAGE;
297 else
298 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
299
300 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
301 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
302
303 __arm_lpae_set_pte(ptep, pte, cfg);
304 return 0;
305}
306
307static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
308 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
309 int lvl, arm_lpae_iopte *ptep)
310{
311 arm_lpae_iopte *cptep, pte;
312 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
313 struct io_pgtable_cfg *cfg = &data->iop.cfg;
314
315 /* Find our entry at the current level */
316 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
317
318 /* If we can install a leaf entry at this level, then do so */
319 if (size == block_size && (size & cfg->pgsize_bitmap))
320 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
321
322 /* We can't allocate tables at the final level */
323 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
324 return -EINVAL;
325
326 /* Grab a pointer to the next level */
327 pte = *ptep;
328 if (!pte) {
329 cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
330 GFP_ATOMIC, cfg);
331 if (!cptep)
332 return -ENOMEM;
333
334 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
336 pte |= ARM_LPAE_PTE_NSTABLE;
337 __arm_lpae_set_pte(ptep, pte, cfg);
338 } else {
339 cptep = iopte_deref(pte, data);
340 }
341
342 /* Rinse, repeat */
343 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
344}
345
346static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
347 int prot)
348{
349 arm_lpae_iopte pte;
350
351 if (data->iop.fmt == ARM_64_LPAE_S1 ||
352 data->iop.fmt == ARM_32_LPAE_S1) {
353 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
354
355 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
356 pte |= ARM_LPAE_PTE_AP_RDONLY;
357
358 if (prot & IOMMU_CACHE)
359 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
360 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
361 } else {
362 pte = ARM_LPAE_PTE_HAP_FAULT;
363 if (prot & IOMMU_READ)
364 pte |= ARM_LPAE_PTE_HAP_READ;
365 if (prot & IOMMU_WRITE)
366 pte |= ARM_LPAE_PTE_HAP_WRITE;
367 if (prot & IOMMU_CACHE)
368 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
369 else
370 pte |= ARM_LPAE_PTE_MEMATTR_NC;
371 }
372
373 if (prot & IOMMU_NOEXEC)
374 pte |= ARM_LPAE_PTE_XN;
375
376 return pte;
377}
378
379static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
380 phys_addr_t paddr, size_t size, int iommu_prot)
381{
382 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
383 arm_lpae_iopte *ptep = data->pgd;
384 int ret, lvl = ARM_LPAE_START_LVL(data);
385 arm_lpae_iopte prot;
386
387 /* If no access, then nothing to do */
388 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
389 return 0;
390
391 prot = arm_lpae_prot_to_pte(data, iommu_prot);
392 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
393 /*
394 * Synchronise all PTE updates for the new mapping before there's
395 * a chance for anything to kick off a table walk for the new iova.
396 */
397 wmb();
398
399 return ret;
400}
401
402static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
403 arm_lpae_iopte *ptep)
404{
405 arm_lpae_iopte *start, *end;
406 unsigned long table_size;
407
408 if (lvl == ARM_LPAE_START_LVL(data))
409 table_size = data->pgd_size;
410 else
411 table_size = ARM_LPAE_GRANULE(data);
412
413 start = ptep;
414
415 /* Only leaf entries at the last level */
416 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
417 end = ptep;
418 else
419 end = (void *)ptep + table_size;
420
421 while (ptep != end) {
422 arm_lpae_iopte pte = *ptep++;
423
424 if (!pte || iopte_leaf(pte, lvl))
425 continue;
426
427 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
428 }
429
430 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
431}
432
433static void arm_lpae_free_pgtable(struct io_pgtable *iop)
434{
435 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
436
437 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
438 kfree(data);
439}
440
441static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
442 unsigned long iova, size_t size,
443 arm_lpae_iopte prot, int lvl,
444 arm_lpae_iopte *ptep, size_t blk_size)
445{
446 unsigned long blk_start, blk_end;
447 phys_addr_t blk_paddr;
448 arm_lpae_iopte table = 0;
449
450 blk_start = iova & ~(blk_size - 1);
451 blk_end = blk_start + blk_size;
452 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
453
454 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
455 arm_lpae_iopte *tablep;
456
457 /* Unmap! */
458 if (blk_start == iova)
459 continue;
460
461 /* __arm_lpae_map expects a pointer to the start of the table */
462 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
463 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
464 tablep) < 0) {
465 if (table) {
466 /* Free the table we allocated */
467 tablep = iopte_deref(table, data);
468 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
469 }
470 return 0; /* Bytes unmapped */
471 }
472 }
473
474 __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
475 iova &= ~(blk_size - 1);
476 io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
477 return size;
478}
479
480static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
481 unsigned long iova, size_t size, int lvl,
482 arm_lpae_iopte *ptep)
483{
484 arm_lpae_iopte pte;
485 struct io_pgtable *iop = &data->iop;
486 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
487
488 /* Something went horribly wrong and we ran out of page table */
489 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
490 return 0;
491
492 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
493 pte = *ptep;
494 if (WARN_ON(!pte))
495 return 0;
496
497 /* If the size matches this level, we're in the right place */
498 if (size == blk_size) {
499 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
500
501 if (!iopte_leaf(pte, lvl)) {
502 /* Also flush any partial walks */
503 io_pgtable_tlb_add_flush(iop, iova, size,
504 ARM_LPAE_GRANULE(data), false);
505 io_pgtable_tlb_sync(iop);
506 ptep = iopte_deref(pte, data);
507 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
508 } else {
509 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
510 }
511
512 return size;
513 } else if (iopte_leaf(pte, lvl)) {
514 /*
515 * Insert a table at the next level to map the old region,
516 * minus the part we want to unmap
517 */
518 return arm_lpae_split_blk_unmap(data, iova, size,
519 iopte_prot(pte), lvl, ptep,
520 blk_size);
521 }
522
523 /* Keep on walkin' */
524 ptep = iopte_deref(pte, data);
525 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
526}
527
528static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
529 size_t size)
530{
531 size_t unmapped;
532 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
533 arm_lpae_iopte *ptep = data->pgd;
534 int lvl = ARM_LPAE_START_LVL(data);
535
536 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
537 if (unmapped)
538 io_pgtable_tlb_sync(&data->iop);
539
540 return unmapped;
541}
542
543static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
544 unsigned long iova)
545{
546 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
547 arm_lpae_iopte pte, *ptep = data->pgd;
548 int lvl = ARM_LPAE_START_LVL(data);
549
550 do {
551 /* Valid IOPTE pointer? */
552 if (!ptep)
553 return 0;
554
555 /* Grab the IOPTE we're interested in */
556 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
557
558 /* Valid entry? */
559 if (!pte)
560 return 0;
561
562 /* Leaf entry? */
563 if (iopte_leaf(pte,lvl))
564 goto found_translation;
565
566 /* Take it to the next level */
567 ptep = iopte_deref(pte, data);
568 } while (++lvl < ARM_LPAE_MAX_LEVELS);
569
570 /* Ran out of page tables to walk */
571 return 0;
572
573found_translation:
574 iova &= (ARM_LPAE_GRANULE(data) - 1);
575 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
576}
577
578static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
579{
580 unsigned long granule;
581
582 /*
583 * We need to restrict the supported page sizes to match the
584 * translation regime for a particular granule. Aim to match
585 * the CPU page size if possible, otherwise prefer smaller sizes.
586 * While we're at it, restrict the block sizes to match the
587 * chosen granule.
588 */
589 if (cfg->pgsize_bitmap & PAGE_SIZE)
590 granule = PAGE_SIZE;
591 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
592 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
593 else if (cfg->pgsize_bitmap & PAGE_MASK)
594 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
595 else
596 granule = 0;
597
598 switch (granule) {
599 case SZ_4K:
600 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
601 break;
602 case SZ_16K:
603 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
604 break;
605 case SZ_64K:
606 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
607 break;
608 default:
609 cfg->pgsize_bitmap = 0;
610 }
611}
612
613static struct arm_lpae_io_pgtable *
614arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
615{
616 unsigned long va_bits, pgd_bits;
617 struct arm_lpae_io_pgtable *data;
618
619 arm_lpae_restrict_pgsizes(cfg);
620
621 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
622 return NULL;
623
624 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
625 return NULL;
626
627 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
628 return NULL;
629
630 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
631 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
632 return NULL;
633 }
634
635 data = kmalloc(sizeof(*data), GFP_KERNEL);
636 if (!data)
637 return NULL;
638
639 data->pg_shift = __ffs(cfg->pgsize_bitmap);
640 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
641
642 va_bits = cfg->ias - data->pg_shift;
643 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
644
645 /* Calculate the actual size of our pgd (without concatenation) */
646 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
647 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
648
649 data->iop.ops = (struct io_pgtable_ops) {
650 .map = arm_lpae_map,
651 .unmap = arm_lpae_unmap,
652 .iova_to_phys = arm_lpae_iova_to_phys,
653 };
654
655 return data;
656}
657
658static struct io_pgtable *
659arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
660{
661 u64 reg;
662 struct arm_lpae_io_pgtable *data;
663
664 if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS)
665 return NULL;
666
667 data = arm_lpae_alloc_pgtable(cfg);
668 if (!data)
669 return NULL;
670
671 /* TCR */
672 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
673 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
674 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
675
676 switch (ARM_LPAE_GRANULE(data)) {
677 case SZ_4K:
678 reg |= ARM_LPAE_TCR_TG0_4K;
679 break;
680 case SZ_16K:
681 reg |= ARM_LPAE_TCR_TG0_16K;
682 break;
683 case SZ_64K:
684 reg |= ARM_LPAE_TCR_TG0_64K;
685 break;
686 }
687
688 switch (cfg->oas) {
689 case 32:
690 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
691 break;
692 case 36:
693 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
694 break;
695 case 40:
696 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
697 break;
698 case 42:
699 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
700 break;
701 case 44:
702 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
703 break;
704 case 48:
705 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
706 break;
707 default:
708 goto out_free_data;
709 }
710
711 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
712
713 /* Disable speculative walks through TTBR1 */
714 reg |= ARM_LPAE_TCR_EPD1;
715 cfg->arm_lpae_s1_cfg.tcr = reg;
716
717 /* MAIRs */
718 reg = (ARM_LPAE_MAIR_ATTR_NC
719 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
720 (ARM_LPAE_MAIR_ATTR_WBRWA
721 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
722 (ARM_LPAE_MAIR_ATTR_DEVICE
723 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
724
725 cfg->arm_lpae_s1_cfg.mair[0] = reg;
726 cfg->arm_lpae_s1_cfg.mair[1] = 0;
727
728 /* Looking good; allocate a pgd */
729 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
730 if (!data->pgd)
731 goto out_free_data;
732
733 /* Ensure the empty pgd is visible before any actual TTBR write */
734 wmb();
735
736 /* TTBRs */
737 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
738 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
739 return &data->iop;
740
741out_free_data:
742 kfree(data);
743 return NULL;
744}
745
746static struct io_pgtable *
747arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
748{
749 u64 reg, sl;
750 struct arm_lpae_io_pgtable *data;
751
752 /* The NS quirk doesn't apply at stage 2 */
753 if (cfg->quirks)
754 return NULL;
755
756 data = arm_lpae_alloc_pgtable(cfg);
757 if (!data)
758 return NULL;
759
760 /*
761 * Concatenate PGDs at level 1 if possible in order to reduce
762 * the depth of the stage-2 walk.
763 */
764 if (data->levels == ARM_LPAE_MAX_LEVELS) {
765 unsigned long pgd_pages;
766
767 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
768 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
769 data->pgd_size = pgd_pages << data->pg_shift;
770 data->levels--;
771 }
772 }
773
774 /* VTCR */
775 reg = ARM_64_LPAE_S2_TCR_RES1 |
776 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
777 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
778 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
779
780 sl = ARM_LPAE_START_LVL(data);
781
782 switch (ARM_LPAE_GRANULE(data)) {
783 case SZ_4K:
784 reg |= ARM_LPAE_TCR_TG0_4K;
785 sl++; /* SL0 format is different for 4K granule size */
786 break;
787 case SZ_16K:
788 reg |= ARM_LPAE_TCR_TG0_16K;
789 break;
790 case SZ_64K:
791 reg |= ARM_LPAE_TCR_TG0_64K;
792 break;
793 }
794
795 switch (cfg->oas) {
796 case 32:
797 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
798 break;
799 case 36:
800 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
801 break;
802 case 40:
803 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
804 break;
805 case 42:
806 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
807 break;
808 case 44:
809 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
810 break;
811 case 48:
812 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
813 break;
814 default:
815 goto out_free_data;
816 }
817
818 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
819 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
820 cfg->arm_lpae_s2_cfg.vtcr = reg;
821
822 /* Allocate pgd pages */
823 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
824 if (!data->pgd)
825 goto out_free_data;
826
827 /* Ensure the empty pgd is visible before any actual TTBR write */
828 wmb();
829
830 /* VTTBR */
831 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
832 return &data->iop;
833
834out_free_data:
835 kfree(data);
836 return NULL;
837}
838
839static struct io_pgtable *
840arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
841{
842 struct io_pgtable *iop;
843
844 if (cfg->ias > 32 || cfg->oas > 40)
845 return NULL;
846
847 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
848 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
849 if (iop) {
850 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
851 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
852 }
853
854 return iop;
855}
856
857static struct io_pgtable *
858arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
859{
860 struct io_pgtable *iop;
861
862 if (cfg->ias > 40 || cfg->oas > 40)
863 return NULL;
864
865 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
866 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
867 if (iop)
868 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
869
870 return iop;
871}
872
873struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
874 .alloc = arm_64_lpae_alloc_pgtable_s1,
875 .free = arm_lpae_free_pgtable,
876};
877
878struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
879 .alloc = arm_64_lpae_alloc_pgtable_s2,
880 .free = arm_lpae_free_pgtable,
881};
882
883struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
884 .alloc = arm_32_lpae_alloc_pgtable_s1,
885 .free = arm_lpae_free_pgtable,
886};
887
888struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
889 .alloc = arm_32_lpae_alloc_pgtable_s2,
890 .free = arm_lpae_free_pgtable,
891};
892
893#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
894
895static struct io_pgtable_cfg *cfg_cookie;
896
897static void dummy_tlb_flush_all(void *cookie)
898{
899 WARN_ON(cookie != cfg_cookie);
900}
901
902static void dummy_tlb_add_flush(unsigned long iova, size_t size,
903 size_t granule, bool leaf, void *cookie)
904{
905 WARN_ON(cookie != cfg_cookie);
906 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
907}
908
909static void dummy_tlb_sync(void *cookie)
910{
911 WARN_ON(cookie != cfg_cookie);
912}
913
914static struct iommu_gather_ops dummy_tlb_ops __initdata = {
915 .tlb_flush_all = dummy_tlb_flush_all,
916 .tlb_add_flush = dummy_tlb_add_flush,
917 .tlb_sync = dummy_tlb_sync,
918};
919
920static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
921{
922 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
923 struct io_pgtable_cfg *cfg = &data->iop.cfg;
924
925 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
926 cfg->pgsize_bitmap, cfg->ias);
927 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
928 data->levels, data->pgd_size, data->pg_shift,
929 data->bits_per_level, data->pgd);
930}
931
932#define __FAIL(ops, i) ({ \
933 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
934 arm_lpae_dump_ops(ops); \
935 selftest_running = false; \
936 -EFAULT; \
937})
938
939static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
940{
941 static const enum io_pgtable_fmt fmts[] = {
942 ARM_64_LPAE_S1,
943 ARM_64_LPAE_S2,
944 };
945
946 int i, j;
947 unsigned long iova;
948 size_t size;
949 struct io_pgtable_ops *ops;
950
951 selftest_running = true;
952
953 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
954 cfg_cookie = cfg;
955 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
956 if (!ops) {
957 pr_err("selftest: failed to allocate io pgtable ops\n");
958 return -ENOMEM;
959 }
960
961 /*
962 * Initial sanity checks.
963 * Empty page tables shouldn't provide any translations.
964 */
965 if (ops->iova_to_phys(ops, 42))
966 return __FAIL(ops, i);
967
968 if (ops->iova_to_phys(ops, SZ_1G + 42))
969 return __FAIL(ops, i);
970
971 if (ops->iova_to_phys(ops, SZ_2G + 42))
972 return __FAIL(ops, i);
973
974 /*
975 * Distinct mappings of different granule sizes.
976 */
977 iova = 0;
978 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
979 while (j != BITS_PER_LONG) {
980 size = 1UL << j;
981
982 if (ops->map(ops, iova, iova, size, IOMMU_READ |
983 IOMMU_WRITE |
984 IOMMU_NOEXEC |
985 IOMMU_CACHE))
986 return __FAIL(ops, i);
987
988 /* Overlapping mappings */
989 if (!ops->map(ops, iova, iova + size, size,
990 IOMMU_READ | IOMMU_NOEXEC))
991 return __FAIL(ops, i);
992
993 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
994 return __FAIL(ops, i);
995
996 iova += SZ_1G;
997 j++;
998 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
999 }
1000
1001 /* Partial unmap */
1002 size = 1UL << __ffs(cfg->pgsize_bitmap);
1003 if (ops->unmap(ops, SZ_1G + size, size) != size)
1004 return __FAIL(ops, i);
1005
1006 /* Remap of partial unmap */
1007 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1008 return __FAIL(ops, i);
1009
1010 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1011 return __FAIL(ops, i);
1012
1013 /* Full unmap */
1014 iova = 0;
1015 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1016 while (j != BITS_PER_LONG) {
1017 size = 1UL << j;
1018
1019 if (ops->unmap(ops, iova, size) != size)
1020 return __FAIL(ops, i);
1021
1022 if (ops->iova_to_phys(ops, iova + 42))
1023 return __FAIL(ops, i);
1024
1025 /* Remap full block */
1026 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1027 return __FAIL(ops, i);
1028
1029 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1030 return __FAIL(ops, i);
1031
1032 iova += SZ_1G;
1033 j++;
1034 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1035 }
1036
1037 free_io_pgtable_ops(ops);
1038 }
1039
1040 selftest_running = false;
1041 return 0;
1042}
1043
1044static int __init arm_lpae_do_selftests(void)
1045{
1046 static const unsigned long pgsize[] = {
1047 SZ_4K | SZ_2M | SZ_1G,
1048 SZ_16K | SZ_32M,
1049 SZ_64K | SZ_512M,
1050 };
1051
1052 static const unsigned int ias[] = {
1053 32, 36, 40, 42, 44, 48,
1054 };
1055
1056 int i, j, pass = 0, fail = 0;
1057 struct io_pgtable_cfg cfg = {
1058 .tlb = &dummy_tlb_ops,
1059 .oas = 48,
1060 };
1061
1062 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1063 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1064 cfg.pgsize_bitmap = pgsize[i];
1065 cfg.ias = ias[j];
1066 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1067 pgsize[i], ias[j]);
1068 if (arm_lpae_run_tests(&cfg))
1069 fail++;
1070 else
1071 pass++;
1072 }
1073 }
1074
1075 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1076 return fail ? -EFAULT : 0;
1077}
1078subsys_initcall(arm_lpae_do_selftests);
1079#endif