Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * CPU-agnostic ARM page table allocator.
4 *
5 * Copyright (C) 2014 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
11
12#include <linux/atomic.h>
13#include <linux/bitops.h>
14#include <linux/io-pgtable.h>
15#include <linux/kernel.h>
16#include <linux/sizes.h>
17#include <linux/slab.h>
18#include <linux/types.h>
19#include <linux/dma-mapping.h>
20
21#include <asm/barrier.h>
22
23#define ARM_LPAE_MAX_ADDR_BITS 52
24#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
25#define ARM_LPAE_MAX_LEVELS 4
26
27/* Struct accessors */
28#define io_pgtable_to_data(x) \
29 container_of((x), struct arm_lpae_io_pgtable, iop)
30
31#define io_pgtable_ops_to_data(x) \
32 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
33
34/*
35 * Calculate the right shift amount to get to the portion describing level l
36 * in a virtual address mapped by the pagetable in d.
37 */
38#define ARM_LPAE_LVL_SHIFT(l,d) \
39 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
40 ilog2(sizeof(arm_lpae_iopte)))
41
42#define ARM_LPAE_GRANULE(d) \
43 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
44#define ARM_LPAE_PGD_SIZE(d) \
45 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
46
47/*
48 * Calculate the index at level l used to map virtual address a using the
49 * pagetable in d.
50 */
51#define ARM_LPAE_PGD_IDX(l,d) \
52 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
53
54#define ARM_LPAE_LVL_IDX(a,l,d) \
55 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
56 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
57
58/* Calculate the block/page mapping size at level l for pagetable in d. */
59#define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
60
61/* Page table bits */
62#define ARM_LPAE_PTE_TYPE_SHIFT 0
63#define ARM_LPAE_PTE_TYPE_MASK 0x3
64
65#define ARM_LPAE_PTE_TYPE_BLOCK 1
66#define ARM_LPAE_PTE_TYPE_TABLE 3
67#define ARM_LPAE_PTE_TYPE_PAGE 3
68
69#define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
70
71#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
72#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
73#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
74#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
75#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
76#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
77#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
78#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
79
80#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
81/* Ignore the contiguous bit for block splitting */
82#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
83#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
84 ARM_LPAE_PTE_ATTR_HI_MASK)
85/* Software bit for solving coherency races */
86#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
87
88/* Stage-1 PTE */
89#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
90#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
91#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
92#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
93
94/* Stage-2 PTE */
95#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
96#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
97#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
98#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
99#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
100#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
101
102/* Register bits */
103#define ARM_LPAE_TCR_TG0_4K 0
104#define ARM_LPAE_TCR_TG0_64K 1
105#define ARM_LPAE_TCR_TG0_16K 2
106
107#define ARM_LPAE_TCR_TG1_16K 1
108#define ARM_LPAE_TCR_TG1_4K 2
109#define ARM_LPAE_TCR_TG1_64K 3
110
111#define ARM_LPAE_TCR_SH_NS 0
112#define ARM_LPAE_TCR_SH_OS 2
113#define ARM_LPAE_TCR_SH_IS 3
114
115#define ARM_LPAE_TCR_RGN_NC 0
116#define ARM_LPAE_TCR_RGN_WBWA 1
117#define ARM_LPAE_TCR_RGN_WT 2
118#define ARM_LPAE_TCR_RGN_WB 3
119
120#define ARM_LPAE_VTCR_SL0_MASK 0x3
121
122#define ARM_LPAE_TCR_T0SZ_SHIFT 0
123
124#define ARM_LPAE_VTCR_PS_SHIFT 16
125#define ARM_LPAE_VTCR_PS_MASK 0x7
126
127#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
128#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
129#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
130#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
131#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
132#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
133#define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
134
135#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
136#define ARM_LPAE_MAIR_ATTR_MASK 0xff
137#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
138#define ARM_LPAE_MAIR_ATTR_NC 0x44
139#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
140#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
141#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
142#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
143#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
144#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
145
146#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
147#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
148#define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
149
150#define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
151#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
152
153/* IOPTE accessors */
154#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
155
156#define iopte_type(pte,l) \
157 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
158
159#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
160
161struct arm_lpae_io_pgtable {
162 struct io_pgtable iop;
163
164 int pgd_bits;
165 int start_level;
166 int bits_per_level;
167
168 void *pgd;
169};
170
171typedef u64 arm_lpae_iopte;
172
173static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
174 enum io_pgtable_fmt fmt)
175{
176 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
177 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
178
179 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
180}
181
182static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
183 struct arm_lpae_io_pgtable *data)
184{
185 arm_lpae_iopte pte = paddr;
186
187 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
188 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
189}
190
191static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
192 struct arm_lpae_io_pgtable *data)
193{
194 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
195
196 if (ARM_LPAE_GRANULE(data) < SZ_64K)
197 return paddr;
198
199 /* Rotate the packed high-order bits back to the top */
200 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
201}
202
203static bool selftest_running = false;
204
205static dma_addr_t __arm_lpae_dma_addr(void *pages)
206{
207 return (dma_addr_t)virt_to_phys(pages);
208}
209
210static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
211 struct io_pgtable_cfg *cfg)
212{
213 struct device *dev = cfg->iommu_dev;
214 int order = get_order(size);
215 struct page *p;
216 dma_addr_t dma;
217 void *pages;
218
219 VM_BUG_ON((gfp & __GFP_HIGHMEM));
220 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
221 gfp | __GFP_ZERO, order);
222 if (!p)
223 return NULL;
224
225 pages = page_address(p);
226 if (!cfg->coherent_walk) {
227 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
228 if (dma_mapping_error(dev, dma))
229 goto out_free;
230 /*
231 * We depend on the IOMMU being able to work with any physical
232 * address directly, so if the DMA layer suggests otherwise by
233 * translating or truncating them, that bodes very badly...
234 */
235 if (dma != virt_to_phys(pages))
236 goto out_unmap;
237 }
238
239 return pages;
240
241out_unmap:
242 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
243 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
244out_free:
245 __free_pages(p, order);
246 return NULL;
247}
248
249static void __arm_lpae_free_pages(void *pages, size_t size,
250 struct io_pgtable_cfg *cfg)
251{
252 if (!cfg->coherent_walk)
253 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
254 size, DMA_TO_DEVICE);
255 free_pages((unsigned long)pages, get_order(size));
256}
257
258static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
259 struct io_pgtable_cfg *cfg)
260{
261 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
262 sizeof(*ptep), DMA_TO_DEVICE);
263}
264
265static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
266 struct io_pgtable_cfg *cfg)
267{
268 *ptep = pte;
269
270 if (!cfg->coherent_walk)
271 __arm_lpae_sync_pte(ptep, cfg);
272}
273
274static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
275 struct iommu_iotlb_gather *gather,
276 unsigned long iova, size_t size, int lvl,
277 arm_lpae_iopte *ptep);
278
279static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
280 phys_addr_t paddr, arm_lpae_iopte prot,
281 int lvl, arm_lpae_iopte *ptep)
282{
283 arm_lpae_iopte pte = prot;
284
285 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
286 pte |= ARM_LPAE_PTE_TYPE_PAGE;
287 else
288 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
289
290 pte |= paddr_to_iopte(paddr, data);
291
292 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
293}
294
295static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
296 unsigned long iova, phys_addr_t paddr,
297 arm_lpae_iopte prot, int lvl,
298 arm_lpae_iopte *ptep)
299{
300 arm_lpae_iopte pte = *ptep;
301
302 if (iopte_leaf(pte, lvl, data->iop.fmt)) {
303 /* We require an unmap first */
304 WARN_ON(!selftest_running);
305 return -EEXIST;
306 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
307 /*
308 * We need to unmap and free the old table before
309 * overwriting it with a block entry.
310 */
311 arm_lpae_iopte *tblp;
312 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
313
314 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
315 if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
316 WARN_ON(1);
317 return -EINVAL;
318 }
319 }
320
321 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
322 return 0;
323}
324
325static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
326 arm_lpae_iopte *ptep,
327 arm_lpae_iopte curr,
328 struct io_pgtable_cfg *cfg)
329{
330 arm_lpae_iopte old, new;
331
332 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
333 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
334 new |= ARM_LPAE_PTE_NSTABLE;
335
336 /*
337 * Ensure the table itself is visible before its PTE can be.
338 * Whilst we could get away with cmpxchg64_release below, this
339 * doesn't have any ordering semantics when !CONFIG_SMP.
340 */
341 dma_wmb();
342
343 old = cmpxchg64_relaxed(ptep, curr, new);
344
345 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
346 return old;
347
348 /* Even if it's not ours, there's no point waiting; just kick it */
349 __arm_lpae_sync_pte(ptep, cfg);
350 if (old == curr)
351 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
352
353 return old;
354}
355
356static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
357 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
358 int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
359{
360 arm_lpae_iopte *cptep, pte;
361 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
362 size_t tblsz = ARM_LPAE_GRANULE(data);
363 struct io_pgtable_cfg *cfg = &data->iop.cfg;
364
365 /* Find our entry at the current level */
366 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
367
368 /* If we can install a leaf entry at this level, then do so */
369 if (size == block_size)
370 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
371
372 /* We can't allocate tables at the final level */
373 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
374 return -EINVAL;
375
376 /* Grab a pointer to the next level */
377 pte = READ_ONCE(*ptep);
378 if (!pte) {
379 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
380 if (!cptep)
381 return -ENOMEM;
382
383 pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
384 if (pte)
385 __arm_lpae_free_pages(cptep, tblsz, cfg);
386 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
387 __arm_lpae_sync_pte(ptep, cfg);
388 }
389
390 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
391 cptep = iopte_deref(pte, data);
392 } else if (pte) {
393 /* We require an unmap first */
394 WARN_ON(!selftest_running);
395 return -EEXIST;
396 }
397
398 /* Rinse, repeat */
399 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
400}
401
402static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
403 int prot)
404{
405 arm_lpae_iopte pte;
406
407 if (data->iop.fmt == ARM_64_LPAE_S1 ||
408 data->iop.fmt == ARM_32_LPAE_S1) {
409 pte = ARM_LPAE_PTE_nG;
410 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
411 pte |= ARM_LPAE_PTE_AP_RDONLY;
412 if (!(prot & IOMMU_PRIV))
413 pte |= ARM_LPAE_PTE_AP_UNPRIV;
414 } else {
415 pte = ARM_LPAE_PTE_HAP_FAULT;
416 if (prot & IOMMU_READ)
417 pte |= ARM_LPAE_PTE_HAP_READ;
418 if (prot & IOMMU_WRITE)
419 pte |= ARM_LPAE_PTE_HAP_WRITE;
420 }
421
422 /*
423 * Note that this logic is structured to accommodate Mali LPAE
424 * having stage-1-like attributes but stage-2-like permissions.
425 */
426 if (data->iop.fmt == ARM_64_LPAE_S2 ||
427 data->iop.fmt == ARM_32_LPAE_S2) {
428 if (prot & IOMMU_MMIO)
429 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
430 else if (prot & IOMMU_CACHE)
431 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
432 else
433 pte |= ARM_LPAE_PTE_MEMATTR_NC;
434 } else {
435 if (prot & IOMMU_MMIO)
436 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
437 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
438 else if (prot & IOMMU_CACHE)
439 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
440 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
441 }
442
443 if (prot & IOMMU_CACHE)
444 pte |= ARM_LPAE_PTE_SH_IS;
445 else
446 pte |= ARM_LPAE_PTE_SH_OS;
447
448 if (prot & IOMMU_NOEXEC)
449 pte |= ARM_LPAE_PTE_XN;
450
451 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
452 pte |= ARM_LPAE_PTE_NS;
453
454 if (data->iop.fmt != ARM_MALI_LPAE)
455 pte |= ARM_LPAE_PTE_AF;
456
457 return pte;
458}
459
460static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
461 phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
462{
463 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
464 struct io_pgtable_cfg *cfg = &data->iop.cfg;
465 arm_lpae_iopte *ptep = data->pgd;
466 int ret, lvl = data->start_level;
467 arm_lpae_iopte prot;
468 long iaext = (s64)iova >> cfg->ias;
469
470 /* If no access, then nothing to do */
471 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
472 return 0;
473
474 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
475 return -EINVAL;
476
477 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
478 iaext = ~iaext;
479 if (WARN_ON(iaext || paddr >> cfg->oas))
480 return -ERANGE;
481
482 prot = arm_lpae_prot_to_pte(data, iommu_prot);
483 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
484 /*
485 * Synchronise all PTE updates for the new mapping before there's
486 * a chance for anything to kick off a table walk for the new iova.
487 */
488 wmb();
489
490 return ret;
491}
492
493static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
494 arm_lpae_iopte *ptep)
495{
496 arm_lpae_iopte *start, *end;
497 unsigned long table_size;
498
499 if (lvl == data->start_level)
500 table_size = ARM_LPAE_PGD_SIZE(data);
501 else
502 table_size = ARM_LPAE_GRANULE(data);
503
504 start = ptep;
505
506 /* Only leaf entries at the last level */
507 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
508 end = ptep;
509 else
510 end = (void *)ptep + table_size;
511
512 while (ptep != end) {
513 arm_lpae_iopte pte = *ptep++;
514
515 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
516 continue;
517
518 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
519 }
520
521 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
522}
523
524static void arm_lpae_free_pgtable(struct io_pgtable *iop)
525{
526 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
527
528 __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
529 kfree(data);
530}
531
532static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
533 struct iommu_iotlb_gather *gather,
534 unsigned long iova, size_t size,
535 arm_lpae_iopte blk_pte, int lvl,
536 arm_lpae_iopte *ptep)
537{
538 struct io_pgtable_cfg *cfg = &data->iop.cfg;
539 arm_lpae_iopte pte, *tablep;
540 phys_addr_t blk_paddr;
541 size_t tablesz = ARM_LPAE_GRANULE(data);
542 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
543 int i, unmap_idx = -1;
544
545 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
546 return 0;
547
548 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
549 if (!tablep)
550 return 0; /* Bytes unmapped */
551
552 if (size == split_sz)
553 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
554
555 blk_paddr = iopte_to_paddr(blk_pte, data);
556 pte = iopte_prot(blk_pte);
557
558 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
559 /* Unmap! */
560 if (i == unmap_idx)
561 continue;
562
563 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
564 }
565
566 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
567 if (pte != blk_pte) {
568 __arm_lpae_free_pages(tablep, tablesz, cfg);
569 /*
570 * We may race against someone unmapping another part of this
571 * block, but anything else is invalid. We can't misinterpret
572 * a page entry here since we're never at the last level.
573 */
574 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
575 return 0;
576
577 tablep = iopte_deref(pte, data);
578 } else if (unmap_idx >= 0) {
579 io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
580 return size;
581 }
582
583 return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
584}
585
586static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
587 struct iommu_iotlb_gather *gather,
588 unsigned long iova, size_t size, int lvl,
589 arm_lpae_iopte *ptep)
590{
591 arm_lpae_iopte pte;
592 struct io_pgtable *iop = &data->iop;
593
594 /* Something went horribly wrong and we ran out of page table */
595 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
596 return 0;
597
598 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
599 pte = READ_ONCE(*ptep);
600 if (WARN_ON(!pte))
601 return 0;
602
603 /* If the size matches this level, we're in the right place */
604 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
605 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
606
607 if (!iopte_leaf(pte, lvl, iop->fmt)) {
608 /* Also flush any partial walks */
609 io_pgtable_tlb_flush_walk(iop, iova, size,
610 ARM_LPAE_GRANULE(data));
611 ptep = iopte_deref(pte, data);
612 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
613 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
614 /*
615 * Order the PTE update against queueing the IOVA, to
616 * guarantee that a flush callback from a different CPU
617 * has observed it before the TLBIALL can be issued.
618 */
619 smp_wmb();
620 } else {
621 io_pgtable_tlb_add_page(iop, gather, iova, size);
622 }
623
624 return size;
625 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
626 /*
627 * Insert a table at the next level to map the old region,
628 * minus the part we want to unmap
629 */
630 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
631 lvl + 1, ptep);
632 }
633
634 /* Keep on walkin' */
635 ptep = iopte_deref(pte, data);
636 return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
637}
638
639static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
640 size_t size, struct iommu_iotlb_gather *gather)
641{
642 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
643 struct io_pgtable_cfg *cfg = &data->iop.cfg;
644 arm_lpae_iopte *ptep = data->pgd;
645 long iaext = (s64)iova >> cfg->ias;
646
647 if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
648 return 0;
649
650 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
651 iaext = ~iaext;
652 if (WARN_ON(iaext))
653 return 0;
654
655 return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
656}
657
658static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
659 unsigned long iova)
660{
661 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
662 arm_lpae_iopte pte, *ptep = data->pgd;
663 int lvl = data->start_level;
664
665 do {
666 /* Valid IOPTE pointer? */
667 if (!ptep)
668 return 0;
669
670 /* Grab the IOPTE we're interested in */
671 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
672 pte = READ_ONCE(*ptep);
673
674 /* Valid entry? */
675 if (!pte)
676 return 0;
677
678 /* Leaf entry? */
679 if (iopte_leaf(pte, lvl, data->iop.fmt))
680 goto found_translation;
681
682 /* Take it to the next level */
683 ptep = iopte_deref(pte, data);
684 } while (++lvl < ARM_LPAE_MAX_LEVELS);
685
686 /* Ran out of page tables to walk */
687 return 0;
688
689found_translation:
690 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
691 return iopte_to_paddr(pte, data) | iova;
692}
693
694static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
695{
696 unsigned long granule, page_sizes;
697 unsigned int max_addr_bits = 48;
698
699 /*
700 * We need to restrict the supported page sizes to match the
701 * translation regime for a particular granule. Aim to match
702 * the CPU page size if possible, otherwise prefer smaller sizes.
703 * While we're at it, restrict the block sizes to match the
704 * chosen granule.
705 */
706 if (cfg->pgsize_bitmap & PAGE_SIZE)
707 granule = PAGE_SIZE;
708 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
709 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
710 else if (cfg->pgsize_bitmap & PAGE_MASK)
711 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
712 else
713 granule = 0;
714
715 switch (granule) {
716 case SZ_4K:
717 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
718 break;
719 case SZ_16K:
720 page_sizes = (SZ_16K | SZ_32M);
721 break;
722 case SZ_64K:
723 max_addr_bits = 52;
724 page_sizes = (SZ_64K | SZ_512M);
725 if (cfg->oas > 48)
726 page_sizes |= 1ULL << 42; /* 4TB */
727 break;
728 default:
729 page_sizes = 0;
730 }
731
732 cfg->pgsize_bitmap &= page_sizes;
733 cfg->ias = min(cfg->ias, max_addr_bits);
734 cfg->oas = min(cfg->oas, max_addr_bits);
735}
736
737static struct arm_lpae_io_pgtable *
738arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
739{
740 struct arm_lpae_io_pgtable *data;
741 int levels, va_bits, pg_shift;
742
743 arm_lpae_restrict_pgsizes(cfg);
744
745 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
746 return NULL;
747
748 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
749 return NULL;
750
751 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
752 return NULL;
753
754 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
755 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
756 return NULL;
757 }
758
759 data = kmalloc(sizeof(*data), GFP_KERNEL);
760 if (!data)
761 return NULL;
762
763 pg_shift = __ffs(cfg->pgsize_bitmap);
764 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
765
766 va_bits = cfg->ias - pg_shift;
767 levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
768 data->start_level = ARM_LPAE_MAX_LEVELS - levels;
769
770 /* Calculate the actual size of our pgd (without concatenation) */
771 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
772
773 data->iop.ops = (struct io_pgtable_ops) {
774 .map = arm_lpae_map,
775 .unmap = arm_lpae_unmap,
776 .iova_to_phys = arm_lpae_iova_to_phys,
777 };
778
779 return data;
780}
781
782static struct io_pgtable *
783arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
784{
785 u64 reg;
786 struct arm_lpae_io_pgtable *data;
787 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
788 bool tg1;
789
790 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
791 IO_PGTABLE_QUIRK_NON_STRICT |
792 IO_PGTABLE_QUIRK_ARM_TTBR1))
793 return NULL;
794
795 data = arm_lpae_alloc_pgtable(cfg);
796 if (!data)
797 return NULL;
798
799 /* TCR */
800 if (cfg->coherent_walk) {
801 tcr->sh = ARM_LPAE_TCR_SH_IS;
802 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
803 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
804 } else {
805 tcr->sh = ARM_LPAE_TCR_SH_OS;
806 tcr->irgn = ARM_LPAE_TCR_RGN_NC;
807 tcr->orgn = ARM_LPAE_TCR_RGN_NC;
808 }
809
810 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
811 switch (ARM_LPAE_GRANULE(data)) {
812 case SZ_4K:
813 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
814 break;
815 case SZ_16K:
816 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
817 break;
818 case SZ_64K:
819 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
820 break;
821 }
822
823 switch (cfg->oas) {
824 case 32:
825 tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
826 break;
827 case 36:
828 tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
829 break;
830 case 40:
831 tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
832 break;
833 case 42:
834 tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
835 break;
836 case 44:
837 tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
838 break;
839 case 48:
840 tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
841 break;
842 case 52:
843 tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
844 break;
845 default:
846 goto out_free_data;
847 }
848
849 tcr->tsz = 64ULL - cfg->ias;
850
851 /* MAIRs */
852 reg = (ARM_LPAE_MAIR_ATTR_NC
853 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
854 (ARM_LPAE_MAIR_ATTR_WBRWA
855 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
856 (ARM_LPAE_MAIR_ATTR_DEVICE
857 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
858 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
859 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
860
861 cfg->arm_lpae_s1_cfg.mair = reg;
862
863 /* Looking good; allocate a pgd */
864 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
865 GFP_KERNEL, cfg);
866 if (!data->pgd)
867 goto out_free_data;
868
869 /* Ensure the empty pgd is visible before any actual TTBR write */
870 wmb();
871
872 /* TTBR */
873 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
874 return &data->iop;
875
876out_free_data:
877 kfree(data);
878 return NULL;
879}
880
881static struct io_pgtable *
882arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
883{
884 u64 sl;
885 struct arm_lpae_io_pgtable *data;
886 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
887
888 /* The NS quirk doesn't apply at stage 2 */
889 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
890 return NULL;
891
892 data = arm_lpae_alloc_pgtable(cfg);
893 if (!data)
894 return NULL;
895
896 /*
897 * Concatenate PGDs at level 1 if possible in order to reduce
898 * the depth of the stage-2 walk.
899 */
900 if (data->start_level == 0) {
901 unsigned long pgd_pages;
902
903 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
904 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
905 data->pgd_bits += data->bits_per_level;
906 data->start_level++;
907 }
908 }
909
910 /* VTCR */
911 if (cfg->coherent_walk) {
912 vtcr->sh = ARM_LPAE_TCR_SH_IS;
913 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
914 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
915 } else {
916 vtcr->sh = ARM_LPAE_TCR_SH_OS;
917 vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
918 vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
919 }
920
921 sl = data->start_level;
922
923 switch (ARM_LPAE_GRANULE(data)) {
924 case SZ_4K:
925 vtcr->tg = ARM_LPAE_TCR_TG0_4K;
926 sl++; /* SL0 format is different for 4K granule size */
927 break;
928 case SZ_16K:
929 vtcr->tg = ARM_LPAE_TCR_TG0_16K;
930 break;
931 case SZ_64K:
932 vtcr->tg = ARM_LPAE_TCR_TG0_64K;
933 break;
934 }
935
936 switch (cfg->oas) {
937 case 32:
938 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
939 break;
940 case 36:
941 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
942 break;
943 case 40:
944 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
945 break;
946 case 42:
947 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
948 break;
949 case 44:
950 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
951 break;
952 case 48:
953 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
954 break;
955 case 52:
956 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
957 break;
958 default:
959 goto out_free_data;
960 }
961
962 vtcr->tsz = 64ULL - cfg->ias;
963 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
964
965 /* Allocate pgd pages */
966 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
967 GFP_KERNEL, cfg);
968 if (!data->pgd)
969 goto out_free_data;
970
971 /* Ensure the empty pgd is visible before any actual TTBR write */
972 wmb();
973
974 /* VTTBR */
975 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
976 return &data->iop;
977
978out_free_data:
979 kfree(data);
980 return NULL;
981}
982
983static struct io_pgtable *
984arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
985{
986 if (cfg->ias > 32 || cfg->oas > 40)
987 return NULL;
988
989 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
990 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
991}
992
993static struct io_pgtable *
994arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
995{
996 if (cfg->ias > 40 || cfg->oas > 40)
997 return NULL;
998
999 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1000 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1001}
1002
1003static struct io_pgtable *
1004arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1005{
1006 struct arm_lpae_io_pgtable *data;
1007
1008 /* No quirks for Mali (hopefully) */
1009 if (cfg->quirks)
1010 return NULL;
1011
1012 if (cfg->ias > 48 || cfg->oas > 40)
1013 return NULL;
1014
1015 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1016
1017 data = arm_lpae_alloc_pgtable(cfg);
1018 if (!data)
1019 return NULL;
1020
1021 /* Mali seems to need a full 4-level table regardless of IAS */
1022 if (data->start_level > 0) {
1023 data->start_level = 0;
1024 data->pgd_bits = 0;
1025 }
1026 /*
1027 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1028 * best we can do is mimic the out-of-tree driver and hope that the
1029 * "implementation-defined caching policy" is good enough. Similarly,
1030 * we'll use it for the sake of a valid attribute for our 'device'
1031 * index, although callers should never request that in practice.
1032 */
1033 cfg->arm_mali_lpae_cfg.memattr =
1034 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1035 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1036 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1037 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1038 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1039 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1040
1041 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1042 cfg);
1043 if (!data->pgd)
1044 goto out_free_data;
1045
1046 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1047 wmb();
1048
1049 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1050 ARM_MALI_LPAE_TTBR_READ_INNER |
1051 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1052 return &data->iop;
1053
1054out_free_data:
1055 kfree(data);
1056 return NULL;
1057}
1058
1059struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1060 .alloc = arm_64_lpae_alloc_pgtable_s1,
1061 .free = arm_lpae_free_pgtable,
1062};
1063
1064struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1065 .alloc = arm_64_lpae_alloc_pgtable_s2,
1066 .free = arm_lpae_free_pgtable,
1067};
1068
1069struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1070 .alloc = arm_32_lpae_alloc_pgtable_s1,
1071 .free = arm_lpae_free_pgtable,
1072};
1073
1074struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1075 .alloc = arm_32_lpae_alloc_pgtable_s2,
1076 .free = arm_lpae_free_pgtable,
1077};
1078
1079struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1080 .alloc = arm_mali_lpae_alloc_pgtable,
1081 .free = arm_lpae_free_pgtable,
1082};
1083
1084#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1085
1086static struct io_pgtable_cfg *cfg_cookie __initdata;
1087
1088static void __init dummy_tlb_flush_all(void *cookie)
1089{
1090 WARN_ON(cookie != cfg_cookie);
1091}
1092
1093static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1094 size_t granule, void *cookie)
1095{
1096 WARN_ON(cookie != cfg_cookie);
1097 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1098}
1099
1100static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1101 unsigned long iova, size_t granule,
1102 void *cookie)
1103{
1104 dummy_tlb_flush(iova, granule, granule, cookie);
1105}
1106
1107static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1108 .tlb_flush_all = dummy_tlb_flush_all,
1109 .tlb_flush_walk = dummy_tlb_flush,
1110 .tlb_flush_leaf = dummy_tlb_flush,
1111 .tlb_add_page = dummy_tlb_add_page,
1112};
1113
1114static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1115{
1116 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1117 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1118
1119 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1120 cfg->pgsize_bitmap, cfg->ias);
1121 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1122 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1123 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1124}
1125
1126#define __FAIL(ops, i) ({ \
1127 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1128 arm_lpae_dump_ops(ops); \
1129 selftest_running = false; \
1130 -EFAULT; \
1131})
1132
1133static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1134{
1135 static const enum io_pgtable_fmt fmts[] __initconst = {
1136 ARM_64_LPAE_S1,
1137 ARM_64_LPAE_S2,
1138 };
1139
1140 int i, j;
1141 unsigned long iova;
1142 size_t size;
1143 struct io_pgtable_ops *ops;
1144
1145 selftest_running = true;
1146
1147 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1148 cfg_cookie = cfg;
1149 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1150 if (!ops) {
1151 pr_err("selftest: failed to allocate io pgtable ops\n");
1152 return -ENOMEM;
1153 }
1154
1155 /*
1156 * Initial sanity checks.
1157 * Empty page tables shouldn't provide any translations.
1158 */
1159 if (ops->iova_to_phys(ops, 42))
1160 return __FAIL(ops, i);
1161
1162 if (ops->iova_to_phys(ops, SZ_1G + 42))
1163 return __FAIL(ops, i);
1164
1165 if (ops->iova_to_phys(ops, SZ_2G + 42))
1166 return __FAIL(ops, i);
1167
1168 /*
1169 * Distinct mappings of different granule sizes.
1170 */
1171 iova = 0;
1172 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1173 size = 1UL << j;
1174
1175 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1176 IOMMU_WRITE |
1177 IOMMU_NOEXEC |
1178 IOMMU_CACHE, GFP_KERNEL))
1179 return __FAIL(ops, i);
1180
1181 /* Overlapping mappings */
1182 if (!ops->map(ops, iova, iova + size, size,
1183 IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
1184 return __FAIL(ops, i);
1185
1186 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1187 return __FAIL(ops, i);
1188
1189 iova += SZ_1G;
1190 }
1191
1192 /* Partial unmap */
1193 size = 1UL << __ffs(cfg->pgsize_bitmap);
1194 if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1195 return __FAIL(ops, i);
1196
1197 /* Remap of partial unmap */
1198 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1199 return __FAIL(ops, i);
1200
1201 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1202 return __FAIL(ops, i);
1203
1204 /* Full unmap */
1205 iova = 0;
1206 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1207 size = 1UL << j;
1208
1209 if (ops->unmap(ops, iova, size, NULL) != size)
1210 return __FAIL(ops, i);
1211
1212 if (ops->iova_to_phys(ops, iova + 42))
1213 return __FAIL(ops, i);
1214
1215 /* Remap full block */
1216 if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
1217 return __FAIL(ops, i);
1218
1219 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1220 return __FAIL(ops, i);
1221
1222 iova += SZ_1G;
1223 }
1224
1225 free_io_pgtable_ops(ops);
1226 }
1227
1228 selftest_running = false;
1229 return 0;
1230}
1231
1232static int __init arm_lpae_do_selftests(void)
1233{
1234 static const unsigned long pgsize[] __initconst = {
1235 SZ_4K | SZ_2M | SZ_1G,
1236 SZ_16K | SZ_32M,
1237 SZ_64K | SZ_512M,
1238 };
1239
1240 static const unsigned int ias[] __initconst = {
1241 32, 36, 40, 42, 44, 48,
1242 };
1243
1244 int i, j, pass = 0, fail = 0;
1245 struct io_pgtable_cfg cfg = {
1246 .tlb = &dummy_tlb_ops,
1247 .oas = 48,
1248 .coherent_walk = true,
1249 };
1250
1251 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1252 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1253 cfg.pgsize_bitmap = pgsize[i];
1254 cfg.ias = ias[j];
1255 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1256 pgsize[i], ias[j]);
1257 if (arm_lpae_run_tests(&cfg))
1258 fail++;
1259 else
1260 pass++;
1261 }
1262 }
1263
1264 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1265 return fail ? -EFAULT : 0;
1266}
1267subsys_initcall(arm_lpae_do_selftests);
1268#endif
1/*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23#include <linux/iommu.h>
24#include <linux/kernel.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29
30#include <asm/barrier.h>
31
32#include "io-pgtable.h"
33
34#define ARM_LPAE_MAX_ADDR_BITS 48
35#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
36#define ARM_LPAE_MAX_LEVELS 4
37
38/* Struct accessors */
39#define io_pgtable_to_data(x) \
40 container_of((x), struct arm_lpae_io_pgtable, iop)
41
42#define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44
45/*
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
48 */
49#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
50
51/*
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
54 */
55#define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
58
59#define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
60
61#define ARM_LPAE_PAGES_PER_PGD(d) \
62 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
63
64/*
65 * Calculate the index at level l used to map virtual address a using the
66 * pagetable in d.
67 */
68#define ARM_LPAE_PGD_IDX(l,d) \
69 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
70
71#define ARM_LPAE_LVL_IDX(a,l,d) \
72 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
73 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
74
75/* Calculate the block/page mapping size at level l for pagetable in d. */
76#define ARM_LPAE_BLOCK_SIZE(l,d) \
77 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
78 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
79
80/* Page table bits */
81#define ARM_LPAE_PTE_TYPE_SHIFT 0
82#define ARM_LPAE_PTE_TYPE_MASK 0x3
83
84#define ARM_LPAE_PTE_TYPE_BLOCK 1
85#define ARM_LPAE_PTE_TYPE_TABLE 3
86#define ARM_LPAE_PTE_TYPE_PAGE 3
87
88#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
89#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
90#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
91#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
92#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
93#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
94#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
95#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
96
97#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
98/* Ignore the contiguous bit for block splitting */
99#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
100#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
101 ARM_LPAE_PTE_ATTR_HI_MASK)
102
103/* Stage-1 PTE */
104#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
105#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
106#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
107#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
108
109/* Stage-2 PTE */
110#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
111#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
112#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
113#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
114#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
115#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
116
117/* Register bits */
118#define ARM_32_LPAE_TCR_EAE (1 << 31)
119#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
120
121#define ARM_LPAE_TCR_EPD1 (1 << 23)
122
123#define ARM_LPAE_TCR_TG0_4K (0 << 14)
124#define ARM_LPAE_TCR_TG0_64K (1 << 14)
125#define ARM_LPAE_TCR_TG0_16K (2 << 14)
126
127#define ARM_LPAE_TCR_SH0_SHIFT 12
128#define ARM_LPAE_TCR_SH0_MASK 0x3
129#define ARM_LPAE_TCR_SH_NS 0
130#define ARM_LPAE_TCR_SH_OS 2
131#define ARM_LPAE_TCR_SH_IS 3
132
133#define ARM_LPAE_TCR_ORGN0_SHIFT 10
134#define ARM_LPAE_TCR_IRGN0_SHIFT 8
135#define ARM_LPAE_TCR_RGN_MASK 0x3
136#define ARM_LPAE_TCR_RGN_NC 0
137#define ARM_LPAE_TCR_RGN_WBWA 1
138#define ARM_LPAE_TCR_RGN_WT 2
139#define ARM_LPAE_TCR_RGN_WB 3
140
141#define ARM_LPAE_TCR_SL0_SHIFT 6
142#define ARM_LPAE_TCR_SL0_MASK 0x3
143
144#define ARM_LPAE_TCR_T0SZ_SHIFT 0
145#define ARM_LPAE_TCR_SZ_MASK 0xf
146
147#define ARM_LPAE_TCR_PS_SHIFT 16
148#define ARM_LPAE_TCR_PS_MASK 0x7
149
150#define ARM_LPAE_TCR_IPS_SHIFT 32
151#define ARM_LPAE_TCR_IPS_MASK 0x7
152
153#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
154#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
155#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
156#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
157#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
158#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
159
160#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
161#define ARM_LPAE_MAIR_ATTR_MASK 0xff
162#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
163#define ARM_LPAE_MAIR_ATTR_NC 0x44
164#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
165#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
166#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
167#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
168
169/* IOPTE accessors */
170#define iopte_deref(pte,d) \
171 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
172 & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
173
174#define iopte_type(pte,l) \
175 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
176
177#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
178
179#define iopte_leaf(pte,l) \
180 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
181 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
182 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
183
184#define iopte_to_pfn(pte,d) \
185 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
186
187#define pfn_to_iopte(pfn,d) \
188 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
189
190struct arm_lpae_io_pgtable {
191 struct io_pgtable iop;
192
193 int levels;
194 size_t pgd_size;
195 unsigned long pg_shift;
196 unsigned long bits_per_level;
197
198 void *pgd;
199};
200
201typedef u64 arm_lpae_iopte;
202
203static bool selftest_running = false;
204
205static dma_addr_t __arm_lpae_dma_addr(void *pages)
206{
207 return (dma_addr_t)virt_to_phys(pages);
208}
209
210static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
211 struct io_pgtable_cfg *cfg)
212{
213 struct device *dev = cfg->iommu_dev;
214 dma_addr_t dma;
215 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
216
217 if (!pages)
218 return NULL;
219
220 if (!selftest_running) {
221 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
222 if (dma_mapping_error(dev, dma))
223 goto out_free;
224 /*
225 * We depend on the IOMMU being able to work with any physical
226 * address directly, so if the DMA layer suggests otherwise by
227 * translating or truncating them, that bodes very badly...
228 */
229 if (dma != virt_to_phys(pages))
230 goto out_unmap;
231 }
232
233 return pages;
234
235out_unmap:
236 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
237 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
238out_free:
239 free_pages_exact(pages, size);
240 return NULL;
241}
242
243static void __arm_lpae_free_pages(void *pages, size_t size,
244 struct io_pgtable_cfg *cfg)
245{
246 if (!selftest_running)
247 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
248 size, DMA_TO_DEVICE);
249 free_pages_exact(pages, size);
250}
251
252static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
253 struct io_pgtable_cfg *cfg)
254{
255 *ptep = pte;
256
257 if (!selftest_running)
258 dma_sync_single_for_device(cfg->iommu_dev,
259 __arm_lpae_dma_addr(ptep),
260 sizeof(pte), DMA_TO_DEVICE);
261}
262
263static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
264 unsigned long iova, size_t size, int lvl,
265 arm_lpae_iopte *ptep);
266
267static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
268 unsigned long iova, phys_addr_t paddr,
269 arm_lpae_iopte prot, int lvl,
270 arm_lpae_iopte *ptep)
271{
272 arm_lpae_iopte pte = prot;
273 struct io_pgtable_cfg *cfg = &data->iop.cfg;
274
275 if (iopte_leaf(*ptep, lvl)) {
276 /* We require an unmap first */
277 WARN_ON(!selftest_running);
278 return -EEXIST;
279 } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
280 /*
281 * We need to unmap and free the old table before
282 * overwriting it with a block entry.
283 */
284 arm_lpae_iopte *tblp;
285 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
286
287 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
288 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
289 return -EINVAL;
290 }
291
292 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
293 pte |= ARM_LPAE_PTE_NS;
294
295 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
296 pte |= ARM_LPAE_PTE_TYPE_PAGE;
297 else
298 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
299
300 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
301 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
302
303 __arm_lpae_set_pte(ptep, pte, cfg);
304 return 0;
305}
306
307static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
308 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
309 int lvl, arm_lpae_iopte *ptep)
310{
311 arm_lpae_iopte *cptep, pte;
312 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
313 struct io_pgtable_cfg *cfg = &data->iop.cfg;
314
315 /* Find our entry at the current level */
316 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
317
318 /* If we can install a leaf entry at this level, then do so */
319 if (size == block_size && (size & cfg->pgsize_bitmap))
320 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
321
322 /* We can't allocate tables at the final level */
323 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
324 return -EINVAL;
325
326 /* Grab a pointer to the next level */
327 pte = *ptep;
328 if (!pte) {
329 cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
330 GFP_ATOMIC, cfg);
331 if (!cptep)
332 return -ENOMEM;
333
334 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
336 pte |= ARM_LPAE_PTE_NSTABLE;
337 __arm_lpae_set_pte(ptep, pte, cfg);
338 } else {
339 cptep = iopte_deref(pte, data);
340 }
341
342 /* Rinse, repeat */
343 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
344}
345
346static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
347 int prot)
348{
349 arm_lpae_iopte pte;
350
351 if (data->iop.fmt == ARM_64_LPAE_S1 ||
352 data->iop.fmt == ARM_32_LPAE_S1) {
353 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
354
355 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
356 pte |= ARM_LPAE_PTE_AP_RDONLY;
357
358 if (prot & IOMMU_CACHE)
359 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
360 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
361 } else {
362 pte = ARM_LPAE_PTE_HAP_FAULT;
363 if (prot & IOMMU_READ)
364 pte |= ARM_LPAE_PTE_HAP_READ;
365 if (prot & IOMMU_WRITE)
366 pte |= ARM_LPAE_PTE_HAP_WRITE;
367 if (prot & IOMMU_CACHE)
368 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
369 else
370 pte |= ARM_LPAE_PTE_MEMATTR_NC;
371 }
372
373 if (prot & IOMMU_NOEXEC)
374 pte |= ARM_LPAE_PTE_XN;
375
376 return pte;
377}
378
379static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
380 phys_addr_t paddr, size_t size, int iommu_prot)
381{
382 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
383 arm_lpae_iopte *ptep = data->pgd;
384 int ret, lvl = ARM_LPAE_START_LVL(data);
385 arm_lpae_iopte prot;
386
387 /* If no access, then nothing to do */
388 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
389 return 0;
390
391 prot = arm_lpae_prot_to_pte(data, iommu_prot);
392 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
393 /*
394 * Synchronise all PTE updates for the new mapping before there's
395 * a chance for anything to kick off a table walk for the new iova.
396 */
397 wmb();
398
399 return ret;
400}
401
402static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
403 arm_lpae_iopte *ptep)
404{
405 arm_lpae_iopte *start, *end;
406 unsigned long table_size;
407
408 if (lvl == ARM_LPAE_START_LVL(data))
409 table_size = data->pgd_size;
410 else
411 table_size = ARM_LPAE_GRANULE(data);
412
413 start = ptep;
414
415 /* Only leaf entries at the last level */
416 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
417 end = ptep;
418 else
419 end = (void *)ptep + table_size;
420
421 while (ptep != end) {
422 arm_lpae_iopte pte = *ptep++;
423
424 if (!pte || iopte_leaf(pte, lvl))
425 continue;
426
427 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
428 }
429
430 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
431}
432
433static void arm_lpae_free_pgtable(struct io_pgtable *iop)
434{
435 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
436
437 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
438 kfree(data);
439}
440
441static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
442 unsigned long iova, size_t size,
443 arm_lpae_iopte prot, int lvl,
444 arm_lpae_iopte *ptep, size_t blk_size)
445{
446 unsigned long blk_start, blk_end;
447 phys_addr_t blk_paddr;
448 arm_lpae_iopte table = 0;
449
450 blk_start = iova & ~(blk_size - 1);
451 blk_end = blk_start + blk_size;
452 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
453
454 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
455 arm_lpae_iopte *tablep;
456
457 /* Unmap! */
458 if (blk_start == iova)
459 continue;
460
461 /* __arm_lpae_map expects a pointer to the start of the table */
462 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
463 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
464 tablep) < 0) {
465 if (table) {
466 /* Free the table we allocated */
467 tablep = iopte_deref(table, data);
468 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
469 }
470 return 0; /* Bytes unmapped */
471 }
472 }
473
474 __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
475 iova &= ~(blk_size - 1);
476 io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
477 return size;
478}
479
480static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
481 unsigned long iova, size_t size, int lvl,
482 arm_lpae_iopte *ptep)
483{
484 arm_lpae_iopte pte;
485 struct io_pgtable *iop = &data->iop;
486 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
487
488 /* Something went horribly wrong and we ran out of page table */
489 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
490 return 0;
491
492 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
493 pte = *ptep;
494 if (WARN_ON(!pte))
495 return 0;
496
497 /* If the size matches this level, we're in the right place */
498 if (size == blk_size) {
499 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
500
501 if (!iopte_leaf(pte, lvl)) {
502 /* Also flush any partial walks */
503 io_pgtable_tlb_add_flush(iop, iova, size,
504 ARM_LPAE_GRANULE(data), false);
505 io_pgtable_tlb_sync(iop);
506 ptep = iopte_deref(pte, data);
507 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
508 } else {
509 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
510 }
511
512 return size;
513 } else if (iopte_leaf(pte, lvl)) {
514 /*
515 * Insert a table at the next level to map the old region,
516 * minus the part we want to unmap
517 */
518 return arm_lpae_split_blk_unmap(data, iova, size,
519 iopte_prot(pte), lvl, ptep,
520 blk_size);
521 }
522
523 /* Keep on walkin' */
524 ptep = iopte_deref(pte, data);
525 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
526}
527
528static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
529 size_t size)
530{
531 size_t unmapped;
532 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
533 arm_lpae_iopte *ptep = data->pgd;
534 int lvl = ARM_LPAE_START_LVL(data);
535
536 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
537 if (unmapped)
538 io_pgtable_tlb_sync(&data->iop);
539
540 return unmapped;
541}
542
543static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
544 unsigned long iova)
545{
546 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
547 arm_lpae_iopte pte, *ptep = data->pgd;
548 int lvl = ARM_LPAE_START_LVL(data);
549
550 do {
551 /* Valid IOPTE pointer? */
552 if (!ptep)
553 return 0;
554
555 /* Grab the IOPTE we're interested in */
556 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
557
558 /* Valid entry? */
559 if (!pte)
560 return 0;
561
562 /* Leaf entry? */
563 if (iopte_leaf(pte,lvl))
564 goto found_translation;
565
566 /* Take it to the next level */
567 ptep = iopte_deref(pte, data);
568 } while (++lvl < ARM_LPAE_MAX_LEVELS);
569
570 /* Ran out of page tables to walk */
571 return 0;
572
573found_translation:
574 iova &= (ARM_LPAE_GRANULE(data) - 1);
575 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
576}
577
578static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
579{
580 unsigned long granule;
581
582 /*
583 * We need to restrict the supported page sizes to match the
584 * translation regime for a particular granule. Aim to match
585 * the CPU page size if possible, otherwise prefer smaller sizes.
586 * While we're at it, restrict the block sizes to match the
587 * chosen granule.
588 */
589 if (cfg->pgsize_bitmap & PAGE_SIZE)
590 granule = PAGE_SIZE;
591 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
592 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
593 else if (cfg->pgsize_bitmap & PAGE_MASK)
594 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
595 else
596 granule = 0;
597
598 switch (granule) {
599 case SZ_4K:
600 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
601 break;
602 case SZ_16K:
603 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
604 break;
605 case SZ_64K:
606 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
607 break;
608 default:
609 cfg->pgsize_bitmap = 0;
610 }
611}
612
613static struct arm_lpae_io_pgtable *
614arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
615{
616 unsigned long va_bits, pgd_bits;
617 struct arm_lpae_io_pgtable *data;
618
619 arm_lpae_restrict_pgsizes(cfg);
620
621 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
622 return NULL;
623
624 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
625 return NULL;
626
627 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
628 return NULL;
629
630 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
631 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
632 return NULL;
633 }
634
635 data = kmalloc(sizeof(*data), GFP_KERNEL);
636 if (!data)
637 return NULL;
638
639 data->pg_shift = __ffs(cfg->pgsize_bitmap);
640 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
641
642 va_bits = cfg->ias - data->pg_shift;
643 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
644
645 /* Calculate the actual size of our pgd (without concatenation) */
646 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
647 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
648
649 data->iop.ops = (struct io_pgtable_ops) {
650 .map = arm_lpae_map,
651 .unmap = arm_lpae_unmap,
652 .iova_to_phys = arm_lpae_iova_to_phys,
653 };
654
655 return data;
656}
657
658static struct io_pgtable *
659arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
660{
661 u64 reg;
662 struct arm_lpae_io_pgtable *data;
663
664 if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS)
665 return NULL;
666
667 data = arm_lpae_alloc_pgtable(cfg);
668 if (!data)
669 return NULL;
670
671 /* TCR */
672 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
673 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
674 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
675
676 switch (ARM_LPAE_GRANULE(data)) {
677 case SZ_4K:
678 reg |= ARM_LPAE_TCR_TG0_4K;
679 break;
680 case SZ_16K:
681 reg |= ARM_LPAE_TCR_TG0_16K;
682 break;
683 case SZ_64K:
684 reg |= ARM_LPAE_TCR_TG0_64K;
685 break;
686 }
687
688 switch (cfg->oas) {
689 case 32:
690 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
691 break;
692 case 36:
693 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
694 break;
695 case 40:
696 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
697 break;
698 case 42:
699 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
700 break;
701 case 44:
702 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
703 break;
704 case 48:
705 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
706 break;
707 default:
708 goto out_free_data;
709 }
710
711 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
712
713 /* Disable speculative walks through TTBR1 */
714 reg |= ARM_LPAE_TCR_EPD1;
715 cfg->arm_lpae_s1_cfg.tcr = reg;
716
717 /* MAIRs */
718 reg = (ARM_LPAE_MAIR_ATTR_NC
719 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
720 (ARM_LPAE_MAIR_ATTR_WBRWA
721 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
722 (ARM_LPAE_MAIR_ATTR_DEVICE
723 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
724
725 cfg->arm_lpae_s1_cfg.mair[0] = reg;
726 cfg->arm_lpae_s1_cfg.mair[1] = 0;
727
728 /* Looking good; allocate a pgd */
729 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
730 if (!data->pgd)
731 goto out_free_data;
732
733 /* Ensure the empty pgd is visible before any actual TTBR write */
734 wmb();
735
736 /* TTBRs */
737 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
738 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
739 return &data->iop;
740
741out_free_data:
742 kfree(data);
743 return NULL;
744}
745
746static struct io_pgtable *
747arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
748{
749 u64 reg, sl;
750 struct arm_lpae_io_pgtable *data;
751
752 /* The NS quirk doesn't apply at stage 2 */
753 if (cfg->quirks)
754 return NULL;
755
756 data = arm_lpae_alloc_pgtable(cfg);
757 if (!data)
758 return NULL;
759
760 /*
761 * Concatenate PGDs at level 1 if possible in order to reduce
762 * the depth of the stage-2 walk.
763 */
764 if (data->levels == ARM_LPAE_MAX_LEVELS) {
765 unsigned long pgd_pages;
766
767 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
768 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
769 data->pgd_size = pgd_pages << data->pg_shift;
770 data->levels--;
771 }
772 }
773
774 /* VTCR */
775 reg = ARM_64_LPAE_S2_TCR_RES1 |
776 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
777 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
778 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
779
780 sl = ARM_LPAE_START_LVL(data);
781
782 switch (ARM_LPAE_GRANULE(data)) {
783 case SZ_4K:
784 reg |= ARM_LPAE_TCR_TG0_4K;
785 sl++; /* SL0 format is different for 4K granule size */
786 break;
787 case SZ_16K:
788 reg |= ARM_LPAE_TCR_TG0_16K;
789 break;
790 case SZ_64K:
791 reg |= ARM_LPAE_TCR_TG0_64K;
792 break;
793 }
794
795 switch (cfg->oas) {
796 case 32:
797 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
798 break;
799 case 36:
800 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
801 break;
802 case 40:
803 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
804 break;
805 case 42:
806 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
807 break;
808 case 44:
809 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
810 break;
811 case 48:
812 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
813 break;
814 default:
815 goto out_free_data;
816 }
817
818 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
819 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
820 cfg->arm_lpae_s2_cfg.vtcr = reg;
821
822 /* Allocate pgd pages */
823 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
824 if (!data->pgd)
825 goto out_free_data;
826
827 /* Ensure the empty pgd is visible before any actual TTBR write */
828 wmb();
829
830 /* VTTBR */
831 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
832 return &data->iop;
833
834out_free_data:
835 kfree(data);
836 return NULL;
837}
838
839static struct io_pgtable *
840arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
841{
842 struct io_pgtable *iop;
843
844 if (cfg->ias > 32 || cfg->oas > 40)
845 return NULL;
846
847 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
848 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
849 if (iop) {
850 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
851 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
852 }
853
854 return iop;
855}
856
857static struct io_pgtable *
858arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
859{
860 struct io_pgtable *iop;
861
862 if (cfg->ias > 40 || cfg->oas > 40)
863 return NULL;
864
865 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
866 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
867 if (iop)
868 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
869
870 return iop;
871}
872
873struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
874 .alloc = arm_64_lpae_alloc_pgtable_s1,
875 .free = arm_lpae_free_pgtable,
876};
877
878struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
879 .alloc = arm_64_lpae_alloc_pgtable_s2,
880 .free = arm_lpae_free_pgtable,
881};
882
883struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
884 .alloc = arm_32_lpae_alloc_pgtable_s1,
885 .free = arm_lpae_free_pgtable,
886};
887
888struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
889 .alloc = arm_32_lpae_alloc_pgtable_s2,
890 .free = arm_lpae_free_pgtable,
891};
892
893#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
894
895static struct io_pgtable_cfg *cfg_cookie;
896
897static void dummy_tlb_flush_all(void *cookie)
898{
899 WARN_ON(cookie != cfg_cookie);
900}
901
902static void dummy_tlb_add_flush(unsigned long iova, size_t size,
903 size_t granule, bool leaf, void *cookie)
904{
905 WARN_ON(cookie != cfg_cookie);
906 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
907}
908
909static void dummy_tlb_sync(void *cookie)
910{
911 WARN_ON(cookie != cfg_cookie);
912}
913
914static struct iommu_gather_ops dummy_tlb_ops __initdata = {
915 .tlb_flush_all = dummy_tlb_flush_all,
916 .tlb_add_flush = dummy_tlb_add_flush,
917 .tlb_sync = dummy_tlb_sync,
918};
919
920static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
921{
922 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
923 struct io_pgtable_cfg *cfg = &data->iop.cfg;
924
925 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
926 cfg->pgsize_bitmap, cfg->ias);
927 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
928 data->levels, data->pgd_size, data->pg_shift,
929 data->bits_per_level, data->pgd);
930}
931
932#define __FAIL(ops, i) ({ \
933 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
934 arm_lpae_dump_ops(ops); \
935 selftest_running = false; \
936 -EFAULT; \
937})
938
939static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
940{
941 static const enum io_pgtable_fmt fmts[] = {
942 ARM_64_LPAE_S1,
943 ARM_64_LPAE_S2,
944 };
945
946 int i, j;
947 unsigned long iova;
948 size_t size;
949 struct io_pgtable_ops *ops;
950
951 selftest_running = true;
952
953 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
954 cfg_cookie = cfg;
955 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
956 if (!ops) {
957 pr_err("selftest: failed to allocate io pgtable ops\n");
958 return -ENOMEM;
959 }
960
961 /*
962 * Initial sanity checks.
963 * Empty page tables shouldn't provide any translations.
964 */
965 if (ops->iova_to_phys(ops, 42))
966 return __FAIL(ops, i);
967
968 if (ops->iova_to_phys(ops, SZ_1G + 42))
969 return __FAIL(ops, i);
970
971 if (ops->iova_to_phys(ops, SZ_2G + 42))
972 return __FAIL(ops, i);
973
974 /*
975 * Distinct mappings of different granule sizes.
976 */
977 iova = 0;
978 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
979 while (j != BITS_PER_LONG) {
980 size = 1UL << j;
981
982 if (ops->map(ops, iova, iova, size, IOMMU_READ |
983 IOMMU_WRITE |
984 IOMMU_NOEXEC |
985 IOMMU_CACHE))
986 return __FAIL(ops, i);
987
988 /* Overlapping mappings */
989 if (!ops->map(ops, iova, iova + size, size,
990 IOMMU_READ | IOMMU_NOEXEC))
991 return __FAIL(ops, i);
992
993 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
994 return __FAIL(ops, i);
995
996 iova += SZ_1G;
997 j++;
998 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
999 }
1000
1001 /* Partial unmap */
1002 size = 1UL << __ffs(cfg->pgsize_bitmap);
1003 if (ops->unmap(ops, SZ_1G + size, size) != size)
1004 return __FAIL(ops, i);
1005
1006 /* Remap of partial unmap */
1007 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1008 return __FAIL(ops, i);
1009
1010 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1011 return __FAIL(ops, i);
1012
1013 /* Full unmap */
1014 iova = 0;
1015 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1016 while (j != BITS_PER_LONG) {
1017 size = 1UL << j;
1018
1019 if (ops->unmap(ops, iova, size) != size)
1020 return __FAIL(ops, i);
1021
1022 if (ops->iova_to_phys(ops, iova + 42))
1023 return __FAIL(ops, i);
1024
1025 /* Remap full block */
1026 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1027 return __FAIL(ops, i);
1028
1029 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1030 return __FAIL(ops, i);
1031
1032 iova += SZ_1G;
1033 j++;
1034 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1035 }
1036
1037 free_io_pgtable_ops(ops);
1038 }
1039
1040 selftest_running = false;
1041 return 0;
1042}
1043
1044static int __init arm_lpae_do_selftests(void)
1045{
1046 static const unsigned long pgsize[] = {
1047 SZ_4K | SZ_2M | SZ_1G,
1048 SZ_16K | SZ_32M,
1049 SZ_64K | SZ_512M,
1050 };
1051
1052 static const unsigned int ias[] = {
1053 32, 36, 40, 42, 44, 48,
1054 };
1055
1056 int i, j, pass = 0, fail = 0;
1057 struct io_pgtable_cfg cfg = {
1058 .tlb = &dummy_tlb_ops,
1059 .oas = 48,
1060 };
1061
1062 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1063 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1064 cfg.pgsize_bitmap = pgsize[i];
1065 cfg.ias = ias[j];
1066 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1067 pgsize[i], ias[j]);
1068 if (arm_lpae_run_tests(&cfg))
1069 fail++;
1070 else
1071 pass++;
1072 }
1073 }
1074
1075 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1076 return fail ? -EFAULT : 0;
1077}
1078subsys_initcall(arm_lpae_do_selftests);
1079#endif