Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CPU-agnostic ARM page table allocator.
   4 *
   5 * Copyright (C) 2014 ARM Limited
   6 *
   7 * Author: Will Deacon <will.deacon@arm.com>
   8 */
   9
  10#define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
  11
  12#include <linux/atomic.h>
  13#include <linux/bitops.h>
  14#include <linux/io-pgtable.h>
  15#include <linux/kernel.h>
  16#include <linux/sizes.h>
  17#include <linux/slab.h>
  18#include <linux/types.h>
  19#include <linux/dma-mapping.h>
  20
  21#include <asm/barrier.h>
  22
  23#include "io-pgtable-arm.h"
  24#include "iommu-pages.h"
  25
  26#define ARM_LPAE_MAX_ADDR_BITS		52
  27#define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
  28#define ARM_LPAE_MAX_LEVELS		4
  29
  30/* Struct accessors */
  31#define io_pgtable_to_data(x)						\
  32	container_of((x), struct arm_lpae_io_pgtable, iop)
  33
  34#define io_pgtable_ops_to_data(x)					\
  35	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
  36
  37/*
  38 * Calculate the right shift amount to get to the portion describing level l
  39 * in a virtual address mapped by the pagetable in d.
  40 */
  41#define ARM_LPAE_LVL_SHIFT(l,d)						\
  42	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
  43	ilog2(sizeof(arm_lpae_iopte)))
  44
  45#define ARM_LPAE_GRANULE(d)						\
  46	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
  47#define ARM_LPAE_PGD_SIZE(d)						\
  48	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
  49
  50#define ARM_LPAE_PTES_PER_TABLE(d)					\
  51	(ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
  52
  53/*
  54 * Calculate the index at level l used to map virtual address a using the
  55 * pagetable in d.
  56 */
  57#define ARM_LPAE_PGD_IDX(l,d)						\
  58	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
  59
  60#define ARM_LPAE_LVL_IDX(a,l,d)						\
  61	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
  62	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
  63
  64/* Calculate the block/page mapping size at level l for pagetable in d. */
  65#define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
  66
  67/* Page table bits */
  68#define ARM_LPAE_PTE_TYPE_SHIFT		0
  69#define ARM_LPAE_PTE_TYPE_MASK		0x3
  70
  71#define ARM_LPAE_PTE_TYPE_BLOCK		1
  72#define ARM_LPAE_PTE_TYPE_TABLE		3
  73#define ARM_LPAE_PTE_TYPE_PAGE		3
  74
  75#define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
  76
  77#define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
  78#define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
  79#define ARM_LPAE_PTE_DBM		(((arm_lpae_iopte)1) << 51)
  80#define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
  81#define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
  82#define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
  83#define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
  84#define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
  85#define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
  86
  87#define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
  88/* Ignore the contiguous bit for block splitting */
  89#define ARM_LPAE_PTE_ATTR_HI_MASK	(ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM)
  90#define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
  91					 ARM_LPAE_PTE_ATTR_HI_MASK)
  92/* Software bit for solving coherency races */
  93#define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
  94
  95/* Stage-1 PTE */
  96#define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
  97#define ARM_LPAE_PTE_AP_RDONLY_BIT	7
  98#define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)1) << \
  99					   ARM_LPAE_PTE_AP_RDONLY_BIT)
 100#define ARM_LPAE_PTE_AP_WR_CLEAN_MASK	(ARM_LPAE_PTE_AP_RDONLY | \
 101					 ARM_LPAE_PTE_DBM)
 102#define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
 103#define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
 104
 105/* Stage-2 PTE */
 106#define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
 107#define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
 108#define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
 109/*
 110 * For !FWB these code to:
 111 *  1111 = Normal outer write back cachable / Inner Write Back Cachable
 112 *         Permit S1 to override
 113 *  0101 = Normal Non-cachable / Inner Non-cachable
 114 *  0001 = Device / Device-nGnRE
 115 * For S2FWB these code:
 116 *  0110 Force Normal Write Back
 117 *  0101 Normal* is forced Normal-NC, Device unchanged
 118 *  0001 Force Device-nGnRE
 119 */
 120#define ARM_LPAE_PTE_MEMATTR_FWB_WB	(((arm_lpae_iopte)0x6) << 2)
 121#define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
 122#define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
 123#define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
 124
 125/* Register bits */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126#define ARM_LPAE_VTCR_SL0_MASK		0x3
 127
 128#define ARM_LPAE_TCR_T0SZ_SHIFT		0
 129
 130#define ARM_LPAE_VTCR_PS_SHIFT		16
 131#define ARM_LPAE_VTCR_PS_MASK		0x7
 132
 
 
 
 
 
 
 
 
 133#define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
 134#define ARM_LPAE_MAIR_ATTR_MASK		0xff
 135#define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
 136#define ARM_LPAE_MAIR_ATTR_NC		0x44
 137#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
 138#define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
 139#define ARM_LPAE_MAIR_ATTR_IDX_NC	0
 140#define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
 141#define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
 142#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
 143
 144#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
 145#define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
 146#define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
 147
 148#define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
 149#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
 150
 151/* IOPTE accessors */
 152#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
 153
 154#define iopte_type(pte)					\
 155	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
 156
 157#define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
 158
 159#define iopte_writeable_dirty(pte)				\
 160	(((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
 161
 162#define iopte_set_writeable_clean(ptep)				\
 163	set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
 164
 165struct arm_lpae_io_pgtable {
 166	struct io_pgtable	iop;
 167
 168	int			pgd_bits;
 169	int			start_level;
 170	int			bits_per_level;
 171
 172	void			*pgd;
 173};
 174
 175typedef u64 arm_lpae_iopte;
 176
 177static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
 178			      enum io_pgtable_fmt fmt)
 179{
 180	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
 181		return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
 182
 183	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
 184}
 185
 186static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
 187{
 188	if (lvl == (ARM_LPAE_MAX_LEVELS - 1))
 189		return false;
 190	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
 191}
 192
 193static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
 194				     struct arm_lpae_io_pgtable *data)
 195{
 196	arm_lpae_iopte pte = paddr;
 197
 198	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
 199	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
 200}
 201
 202static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
 203				  struct arm_lpae_io_pgtable *data)
 204{
 205	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
 206
 207	if (ARM_LPAE_GRANULE(data) < SZ_64K)
 208		return paddr;
 209
 210	/* Rotate the packed high-order bits back to the top */
 211	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
 212}
 213
 214/*
 215 * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into
 216 * a concatenated PGD, into the maximum number of entries that can be
 217 * mapped in the same table page.
 218 */
 219static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
 220{
 221	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
 222
 223	return ptes_per_table - (i & (ptes_per_table - 1));
 224}
 225
 226static bool selftest_running = false;
 227
 228static dma_addr_t __arm_lpae_dma_addr(void *pages)
 229{
 230	return (dma_addr_t)virt_to_phys(pages);
 231}
 232
 233static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
 234				    struct io_pgtable_cfg *cfg,
 235				    void *cookie)
 236{
 237	struct device *dev = cfg->iommu_dev;
 238	int order = get_order(size);
 
 239	dma_addr_t dma;
 240	void *pages;
 241
 242	VM_BUG_ON((gfp & __GFP_HIGHMEM));
 243
 244	if (cfg->alloc)
 245		pages = cfg->alloc(cookie, size, gfp);
 246	else
 247		pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
 248
 249	if (!pages)
 250		return NULL;
 251
 
 252	if (!cfg->coherent_walk) {
 253		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
 254		if (dma_mapping_error(dev, dma))
 255			goto out_free;
 256		/*
 257		 * We depend on the IOMMU being able to work with any physical
 258		 * address directly, so if the DMA layer suggests otherwise by
 259		 * translating or truncating them, that bodes very badly...
 260		 */
 261		if (dma != virt_to_phys(pages))
 262			goto out_unmap;
 263	}
 264
 265	return pages;
 266
 267out_unmap:
 268	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
 269	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
 270
 271out_free:
 272	if (cfg->free)
 273		cfg->free(cookie, pages, size);
 274	else
 275		iommu_free_pages(pages, order);
 276
 277	return NULL;
 278}
 279
 280static void __arm_lpae_free_pages(void *pages, size_t size,
 281				  struct io_pgtable_cfg *cfg,
 282				  void *cookie)
 283{
 284	if (!cfg->coherent_walk)
 285		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
 286				 size, DMA_TO_DEVICE);
 287
 288	if (cfg->free)
 289		cfg->free(cookie, pages, size);
 290	else
 291		iommu_free_pages(pages, get_order(size));
 292}
 293
 294static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
 295				struct io_pgtable_cfg *cfg)
 296{
 297	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
 298				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
 299}
 300
 301static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
 
 302{
 303	for (int i = 0; i < num_entries; i++)
 304		ptep[i] = 0;
 305
 306	if (!cfg->coherent_walk && num_entries)
 307		__arm_lpae_sync_pte(ptep, num_entries, cfg);
 308}
 309
 310static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 311			       struct iommu_iotlb_gather *gather,
 312			       unsigned long iova, size_t size, size_t pgcount,
 313			       int lvl, arm_lpae_iopte *ptep);
 314
 315static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 316				phys_addr_t paddr, arm_lpae_iopte prot,
 317				int lvl, int num_entries, arm_lpae_iopte *ptep)
 318{
 319	arm_lpae_iopte pte = prot;
 320	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 321	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 322	int i;
 323
 324	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
 325		pte |= ARM_LPAE_PTE_TYPE_PAGE;
 326	else
 327		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 328
 329	for (i = 0; i < num_entries; i++)
 330		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
 331
 332	if (!cfg->coherent_walk)
 333		__arm_lpae_sync_pte(ptep, num_entries, cfg);
 334}
 335
 336static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 337			     unsigned long iova, phys_addr_t paddr,
 338			     arm_lpae_iopte prot, int lvl, int num_entries,
 339			     arm_lpae_iopte *ptep)
 340{
 341	int i;
 342
 343	for (i = 0; i < num_entries; i++)
 344		if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
 345			/* We require an unmap first */
 346			WARN_ON(!selftest_running);
 347			return -EEXIST;
 348		} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
 349			/*
 350			 * We need to unmap and free the old table before
 351			 * overwriting it with a block entry.
 352			 */
 353			arm_lpae_iopte *tblp;
 354			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 355
 356			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
 357			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
 358					     lvl, tblp) != sz) {
 359				WARN_ON(1);
 360				return -EINVAL;
 361			}
 362		}
 
 363
 364	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
 365	return 0;
 366}
 367
 368static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
 369					     arm_lpae_iopte *ptep,
 370					     arm_lpae_iopte curr,
 371					     struct arm_lpae_io_pgtable *data)
 372{
 373	arm_lpae_iopte old, new;
 374	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 375
 376	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
 377	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
 378		new |= ARM_LPAE_PTE_NSTABLE;
 379
 380	/*
 381	 * Ensure the table itself is visible before its PTE can be.
 382	 * Whilst we could get away with cmpxchg64_release below, this
 383	 * doesn't have any ordering semantics when !CONFIG_SMP.
 384	 */
 385	dma_wmb();
 386
 387	old = cmpxchg64_relaxed(ptep, curr, new);
 388
 389	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
 390		return old;
 391
 392	/* Even if it's not ours, there's no point waiting; just kick it */
 393	__arm_lpae_sync_pte(ptep, 1, cfg);
 394	if (old == curr)
 395		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
 396
 397	return old;
 398}
 399
 400static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
 401			  phys_addr_t paddr, size_t size, size_t pgcount,
 402			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
 403			  gfp_t gfp, size_t *mapped)
 404{
 405	arm_lpae_iopte *cptep, pte;
 406	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 407	size_t tblsz = ARM_LPAE_GRANULE(data);
 408	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 409	int ret = 0, num_entries, max_entries, map_idx_start;
 410
 411	/* Find our entry at the current level */
 412	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
 413	ptep += map_idx_start;
 414
 415	/* If we can install a leaf entry at this level, then do so */
 416	if (size == block_size) {
 417		max_entries = arm_lpae_max_entries(map_idx_start, data);
 418		num_entries = min_t(int, pgcount, max_entries);
 419		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
 420		if (!ret)
 421			*mapped += num_entries * size;
 422
 423		return ret;
 424	}
 425
 426	/* We can't allocate tables at the final level */
 427	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
 428		return -EINVAL;
 429
 430	/* Grab a pointer to the next level */
 431	pte = READ_ONCE(*ptep);
 432	if (!pte) {
 433		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
 434		if (!cptep)
 435			return -ENOMEM;
 436
 437		pte = arm_lpae_install_table(cptep, ptep, 0, data);
 438		if (pte)
 439			__arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
 440	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
 441		__arm_lpae_sync_pte(ptep, 1, cfg);
 442	}
 443
 444	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
 445		cptep = iopte_deref(pte, data);
 446	} else if (pte) {
 447		/* We require an unmap first */
 448		WARN_ON(!selftest_running);
 449		return -EEXIST;
 450	}
 451
 452	/* Rinse, repeat */
 453	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
 454			      cptep, gfp, mapped);
 455}
 456
 457static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
 458					   int prot)
 459{
 460	arm_lpae_iopte pte;
 461
 462	if (data->iop.fmt == ARM_64_LPAE_S1 ||
 463	    data->iop.fmt == ARM_32_LPAE_S1) {
 464		pte = ARM_LPAE_PTE_nG;
 465		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
 466			pte |= ARM_LPAE_PTE_AP_RDONLY;
 467		else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
 468			pte |= ARM_LPAE_PTE_DBM;
 469		if (!(prot & IOMMU_PRIV))
 470			pte |= ARM_LPAE_PTE_AP_UNPRIV;
 471	} else {
 472		pte = ARM_LPAE_PTE_HAP_FAULT;
 473		if (prot & IOMMU_READ)
 474			pte |= ARM_LPAE_PTE_HAP_READ;
 475		if (prot & IOMMU_WRITE)
 476			pte |= ARM_LPAE_PTE_HAP_WRITE;
 477	}
 478
 479	/*
 480	 * Note that this logic is structured to accommodate Mali LPAE
 481	 * having stage-1-like attributes but stage-2-like permissions.
 482	 */
 483	if (data->iop.fmt == ARM_64_LPAE_S2 ||
 484	    data->iop.fmt == ARM_32_LPAE_S2) {
 485		if (prot & IOMMU_MMIO) {
 486			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
 487		} else if (prot & IOMMU_CACHE) {
 488			if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_S2FWB)
 489				pte |= ARM_LPAE_PTE_MEMATTR_FWB_WB;
 490			else
 491				pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
 492		} else {
 493			pte |= ARM_LPAE_PTE_MEMATTR_NC;
 494		}
 495	} else {
 496		if (prot & IOMMU_MMIO)
 497			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
 498				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 499		else if (prot & IOMMU_CACHE)
 500			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
 501				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 502	}
 503
 504	/*
 505	 * Also Mali has its own notions of shareability wherein its Inner
 506	 * domain covers the cores within the GPU, and its Outer domain is
 507	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
 508	 * terms, depending on coherency).
 509	 */
 510	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
 511		pte |= ARM_LPAE_PTE_SH_IS;
 512	else
 513		pte |= ARM_LPAE_PTE_SH_OS;
 514
 515	if (prot & IOMMU_NOEXEC)
 516		pte |= ARM_LPAE_PTE_XN;
 517
 518	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
 519		pte |= ARM_LPAE_PTE_NS;
 520
 521	if (data->iop.fmt != ARM_MALI_LPAE)
 522		pte |= ARM_LPAE_PTE_AF;
 523
 524	return pte;
 525}
 526
 527static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
 528			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
 529			      int iommu_prot, gfp_t gfp, size_t *mapped)
 530{
 531	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 532	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 533	arm_lpae_iopte *ptep = data->pgd;
 534	int ret, lvl = data->start_level;
 535	arm_lpae_iopte prot;
 536	long iaext = (s64)iova >> cfg->ias;
 537
 538	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
 
 
 
 
 539		return -EINVAL;
 540
 541	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
 542		iaext = ~iaext;
 543	if (WARN_ON(iaext || paddr >> cfg->oas))
 544		return -ERANGE;
 545
 546	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
 547		return -EINVAL;
 548
 549	prot = arm_lpae_prot_to_pte(data, iommu_prot);
 550	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
 551			     ptep, gfp, mapped);
 552	/*
 553	 * Synchronise all PTE updates for the new mapping before there's
 554	 * a chance for anything to kick off a table walk for the new iova.
 555	 */
 556	wmb();
 557
 558	return ret;
 559}
 560
 561static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
 562				    arm_lpae_iopte *ptep)
 563{
 564	arm_lpae_iopte *start, *end;
 565	unsigned long table_size;
 566
 567	if (lvl == data->start_level)
 568		table_size = ARM_LPAE_PGD_SIZE(data);
 569	else
 570		table_size = ARM_LPAE_GRANULE(data);
 571
 572	start = ptep;
 573
 574	/* Only leaf entries at the last level */
 575	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
 576		end = ptep;
 577	else
 578		end = (void *)ptep + table_size;
 579
 580	while (ptep != end) {
 581		arm_lpae_iopte pte = *ptep++;
 582
 583		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
 584			continue;
 585
 586		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 587	}
 588
 589	__arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
 590}
 591
 592static void arm_lpae_free_pgtable(struct io_pgtable *iop)
 593{
 594	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
 595
 596	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
 597	kfree(data);
 598}
 599
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 601			       struct iommu_iotlb_gather *gather,
 602			       unsigned long iova, size_t size, size_t pgcount,
 603			       int lvl, arm_lpae_iopte *ptep)
 604{
 605	arm_lpae_iopte pte;
 606	struct io_pgtable *iop = &data->iop;
 607	int i = 0, num_entries, max_entries, unmap_idx_start;
 608
 609	/* Something went horribly wrong and we ran out of page table */
 610	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
 611		return 0;
 612
 613	unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
 614	ptep += unmap_idx_start;
 615	pte = READ_ONCE(*ptep);
 616	if (WARN_ON(!pte))
 617		return 0;
 618
 619	/* If the size matches this level, we're in the right place */
 620	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
 621		max_entries = arm_lpae_max_entries(unmap_idx_start, data);
 622		num_entries = min_t(int, pgcount, max_entries);
 623
 624		/* Find and handle non-leaf entries */
 625		for (i = 0; i < num_entries; i++) {
 626			pte = READ_ONCE(ptep[i]);
 627			if (WARN_ON(!pte))
 628				break;
 629
 630			if (!iopte_leaf(pte, lvl, iop->fmt)) {
 631				__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
 632
 633				/* Also flush any partial walks */
 634				io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
 635							  ARM_LPAE_GRANULE(data));
 636				__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 637			}
 
 638		}
 639
 640		/* Clear the remaining entries */
 641		__arm_lpae_clear_pte(ptep, &iop->cfg, i);
 642
 643		if (gather && !iommu_iotlb_gather_queued(gather))
 644			for (int j = 0; j < i; j++)
 645				io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
 646
 647		return i * size;
 648	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
 649		WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
 650		return 0;
 
 
 
 
 651	}
 652
 653	/* Keep on walkin' */
 654	ptep = iopte_deref(pte, data);
 655	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
 656}
 657
 658static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
 659				   size_t pgsize, size_t pgcount,
 660				   struct iommu_iotlb_gather *gather)
 661{
 662	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 663	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 664	arm_lpae_iopte *ptep = data->pgd;
 665	long iaext = (s64)iova >> cfg->ias;
 666
 667	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
 668		return 0;
 669
 670	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
 671		iaext = ~iaext;
 672	if (WARN_ON(iaext))
 673		return 0;
 674
 675	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
 676				data->start_level, ptep);
 677}
 678
 679static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
 680					 unsigned long iova)
 681{
 682	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 683	arm_lpae_iopte pte, *ptep = data->pgd;
 684	int lvl = data->start_level;
 685
 686	do {
 687		/* Valid IOPTE pointer? */
 688		if (!ptep)
 689			return 0;
 690
 691		/* Grab the IOPTE we're interested in */
 692		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 693		pte = READ_ONCE(*ptep);
 694
 695		/* Valid entry? */
 696		if (!pte)
 697			return 0;
 698
 699		/* Leaf entry? */
 700		if (iopte_leaf(pte, lvl, data->iop.fmt))
 701			goto found_translation;
 702
 703		/* Take it to the next level */
 704		ptep = iopte_deref(pte, data);
 705	} while (++lvl < ARM_LPAE_MAX_LEVELS);
 706
 707	/* Ran out of page tables to walk */
 708	return 0;
 709
 710found_translation:
 711	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
 712	return iopte_to_paddr(pte, data) | iova;
 713}
 714
 715struct io_pgtable_walk_data {
 716	struct iommu_dirty_bitmap	*dirty;
 717	unsigned long			flags;
 718	u64				addr;
 719	const u64			end;
 720};
 721
 722static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
 723				       struct io_pgtable_walk_data *walk_data,
 724				       arm_lpae_iopte *ptep,
 725				       int lvl);
 726
 727static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
 728				  struct io_pgtable_walk_data *walk_data,
 729				  arm_lpae_iopte *ptep, int lvl)
 730{
 731	struct io_pgtable *iop = &data->iop;
 732	arm_lpae_iopte pte = READ_ONCE(*ptep);
 733
 734	if (iopte_leaf(pte, lvl, iop->fmt)) {
 735		size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 736
 737		if (iopte_writeable_dirty(pte)) {
 738			iommu_dirty_bitmap_record(walk_data->dirty,
 739						  walk_data->addr, size);
 740			if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
 741				iopte_set_writeable_clean(ptep);
 742		}
 743		walk_data->addr += size;
 744		return 0;
 745	}
 746
 747	if (WARN_ON(!iopte_table(pte, lvl)))
 748		return -EINVAL;
 749
 750	ptep = iopte_deref(pte, data);
 751	return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
 752}
 753
 754static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
 755				       struct io_pgtable_walk_data *walk_data,
 756				       arm_lpae_iopte *ptep,
 757				       int lvl)
 758{
 759	u32 idx;
 760	int max_entries, ret;
 761
 762	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
 763		return -EINVAL;
 764
 765	if (lvl == data->start_level)
 766		max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
 767	else
 768		max_entries = ARM_LPAE_PTES_PER_TABLE(data);
 769
 770	for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
 771	     (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
 772		ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
 773		if (ret)
 774			return ret;
 775	}
 776
 777	return 0;
 778}
 779
 780static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
 781					 unsigned long iova, size_t size,
 782					 unsigned long flags,
 783					 struct iommu_dirty_bitmap *dirty)
 784{
 785	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 786	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 787	struct io_pgtable_walk_data walk_data = {
 788		.dirty = dirty,
 789		.flags = flags,
 790		.addr = iova,
 791		.end = iova + size,
 792	};
 793	arm_lpae_iopte *ptep = data->pgd;
 794	int lvl = data->start_level;
 795
 796	if (WARN_ON(!size))
 797		return -EINVAL;
 798	if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1)))
 799		return -EINVAL;
 800	if (data->iop.fmt != ARM_64_LPAE_S1)
 801		return -EINVAL;
 802
 803	return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
 804}
 805
 806static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
 807{
 808	unsigned long granule, page_sizes;
 809	unsigned int max_addr_bits = 48;
 810
 811	/*
 812	 * We need to restrict the supported page sizes to match the
 813	 * translation regime for a particular granule. Aim to match
 814	 * the CPU page size if possible, otherwise prefer smaller sizes.
 815	 * While we're at it, restrict the block sizes to match the
 816	 * chosen granule.
 817	 */
 818	if (cfg->pgsize_bitmap & PAGE_SIZE)
 819		granule = PAGE_SIZE;
 820	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
 821		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
 822	else if (cfg->pgsize_bitmap & PAGE_MASK)
 823		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
 824	else
 825		granule = 0;
 826
 827	switch (granule) {
 828	case SZ_4K:
 829		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
 830		break;
 831	case SZ_16K:
 832		page_sizes = (SZ_16K | SZ_32M);
 833		break;
 834	case SZ_64K:
 835		max_addr_bits = 52;
 836		page_sizes = (SZ_64K | SZ_512M);
 837		if (cfg->oas > 48)
 838			page_sizes |= 1ULL << 42; /* 4TB */
 839		break;
 840	default:
 841		page_sizes = 0;
 842	}
 843
 844	cfg->pgsize_bitmap &= page_sizes;
 845	cfg->ias = min(cfg->ias, max_addr_bits);
 846	cfg->oas = min(cfg->oas, max_addr_bits);
 847}
 848
 849static struct arm_lpae_io_pgtable *
 850arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
 851{
 852	struct arm_lpae_io_pgtable *data;
 853	int levels, va_bits, pg_shift;
 854
 855	arm_lpae_restrict_pgsizes(cfg);
 856
 857	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
 858		return NULL;
 859
 860	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
 861		return NULL;
 862
 863	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
 864		return NULL;
 865
 
 
 
 
 
 866	data = kmalloc(sizeof(*data), GFP_KERNEL);
 867	if (!data)
 868		return NULL;
 869
 870	pg_shift = __ffs(cfg->pgsize_bitmap);
 871	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
 872
 873	va_bits = cfg->ias - pg_shift;
 874	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
 875	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
 876
 877	/* Calculate the actual size of our pgd (without concatenation) */
 878	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
 879
 880	data->iop.ops = (struct io_pgtable_ops) {
 881		.map_pages	= arm_lpae_map_pages,
 882		.unmap_pages	= arm_lpae_unmap_pages,
 883		.iova_to_phys	= arm_lpae_iova_to_phys,
 884		.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
 885	};
 886
 887	return data;
 888}
 889
 890static struct io_pgtable *
 891arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 892{
 893	u64 reg;
 894	struct arm_lpae_io_pgtable *data;
 895	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
 896	bool tg1;
 897
 898	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
 899			    IO_PGTABLE_QUIRK_ARM_TTBR1 |
 900			    IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
 901			    IO_PGTABLE_QUIRK_ARM_HD))
 902		return NULL;
 903
 904	data = arm_lpae_alloc_pgtable(cfg);
 905	if (!data)
 906		return NULL;
 907
 908	/* TCR */
 909	if (cfg->coherent_walk) {
 910		tcr->sh = ARM_LPAE_TCR_SH_IS;
 911		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
 912		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 913		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
 914			goto out_free_data;
 915	} else {
 916		tcr->sh = ARM_LPAE_TCR_SH_OS;
 917		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
 918		if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
 919			tcr->orgn = ARM_LPAE_TCR_RGN_NC;
 920		else
 921			tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 922	}
 923
 924	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
 925	switch (ARM_LPAE_GRANULE(data)) {
 926	case SZ_4K:
 927		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
 928		break;
 929	case SZ_16K:
 930		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
 931		break;
 932	case SZ_64K:
 933		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
 934		break;
 935	}
 936
 937	switch (cfg->oas) {
 938	case 32:
 939		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
 940		break;
 941	case 36:
 942		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
 943		break;
 944	case 40:
 945		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
 946		break;
 947	case 42:
 948		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
 949		break;
 950	case 44:
 951		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
 952		break;
 953	case 48:
 954		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
 955		break;
 956	case 52:
 957		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
 958		break;
 959	default:
 960		goto out_free_data;
 961	}
 962
 963	tcr->tsz = 64ULL - cfg->ias;
 964
 965	/* MAIRs */
 966	reg = (ARM_LPAE_MAIR_ATTR_NC
 967	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
 968	      (ARM_LPAE_MAIR_ATTR_WBRWA
 969	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
 970	      (ARM_LPAE_MAIR_ATTR_DEVICE
 971	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
 972	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
 973	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
 974
 975	cfg->arm_lpae_s1_cfg.mair = reg;
 976
 977	/* Looking good; allocate a pgd */
 978	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
 979					   GFP_KERNEL, cfg, cookie);
 980	if (!data->pgd)
 981		goto out_free_data;
 982
 983	/* Ensure the empty pgd is visible before any actual TTBR write */
 984	wmb();
 985
 986	/* TTBR */
 987	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
 988	return &data->iop;
 989
 990out_free_data:
 991	kfree(data);
 992	return NULL;
 993}
 994
 995static struct io_pgtable *
 996arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 997{
 998	u64 sl;
 999	struct arm_lpae_io_pgtable *data;
1000	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
1001
1002	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
 
1003		return NULL;
1004
1005	data = arm_lpae_alloc_pgtable(cfg);
1006	if (!data)
1007		return NULL;
1008
1009	/*
1010	 * Concatenate PGDs at level 1 if possible in order to reduce
1011	 * the depth of the stage-2 walk.
1012	 */
1013	if (data->start_level == 0) {
1014		unsigned long pgd_pages;
1015
1016		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
1017		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
1018			data->pgd_bits += data->bits_per_level;
1019			data->start_level++;
1020		}
1021	}
1022
1023	/* VTCR */
1024	if (cfg->coherent_walk) {
1025		vtcr->sh = ARM_LPAE_TCR_SH_IS;
1026		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
1027		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
1028	} else {
1029		vtcr->sh = ARM_LPAE_TCR_SH_OS;
1030		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
1031		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
1032	}
1033
1034	sl = data->start_level;
1035
1036	switch (ARM_LPAE_GRANULE(data)) {
1037	case SZ_4K:
1038		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
1039		sl++; /* SL0 format is different for 4K granule size */
1040		break;
1041	case SZ_16K:
1042		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
1043		break;
1044	case SZ_64K:
1045		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
1046		break;
1047	}
1048
1049	switch (cfg->oas) {
1050	case 32:
1051		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
1052		break;
1053	case 36:
1054		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
1055		break;
1056	case 40:
1057		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
1058		break;
1059	case 42:
1060		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
1061		break;
1062	case 44:
1063		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
1064		break;
1065	case 48:
1066		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
1067		break;
1068	case 52:
1069		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1070		break;
1071	default:
1072		goto out_free_data;
1073	}
1074
1075	vtcr->tsz = 64ULL - cfg->ias;
1076	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1077
1078	/* Allocate pgd pages */
1079	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1080					   GFP_KERNEL, cfg, cookie);
1081	if (!data->pgd)
1082		goto out_free_data;
1083
1084	/* Ensure the empty pgd is visible before any actual TTBR write */
1085	wmb();
1086
1087	/* VTTBR */
1088	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1089	return &data->iop;
1090
1091out_free_data:
1092	kfree(data);
1093	return NULL;
1094}
1095
1096static struct io_pgtable *
1097arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1098{
1099	if (cfg->ias > 32 || cfg->oas > 40)
1100		return NULL;
1101
1102	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1103	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1104}
1105
1106static struct io_pgtable *
1107arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1108{
1109	if (cfg->ias > 40 || cfg->oas > 40)
1110		return NULL;
1111
1112	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1113	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1114}
1115
1116static struct io_pgtable *
1117arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1118{
1119	struct arm_lpae_io_pgtable *data;
1120
1121	/* No quirks for Mali (hopefully) */
1122	if (cfg->quirks)
1123		return NULL;
1124
1125	if (cfg->ias > 48 || cfg->oas > 40)
1126		return NULL;
1127
1128	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1129
1130	data = arm_lpae_alloc_pgtable(cfg);
1131	if (!data)
1132		return NULL;
1133
1134	/* Mali seems to need a full 4-level table regardless of IAS */
1135	if (data->start_level > 0) {
1136		data->start_level = 0;
1137		data->pgd_bits = 0;
1138	}
1139	/*
1140	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1141	 * best we can do is mimic the out-of-tree driver and hope that the
1142	 * "implementation-defined caching policy" is good enough. Similarly,
1143	 * we'll use it for the sake of a valid attribute for our 'device'
1144	 * index, although callers should never request that in practice.
1145	 */
1146	cfg->arm_mali_lpae_cfg.memattr =
1147		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1148		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1149		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1150		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1151		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1152		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1153
1154	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1155					   cfg, cookie);
1156	if (!data->pgd)
1157		goto out_free_data;
1158
1159	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1160	wmb();
1161
1162	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1163					  ARM_MALI_LPAE_TTBR_READ_INNER |
1164					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1165	if (cfg->coherent_walk)
1166		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1167
1168	return &data->iop;
1169
1170out_free_data:
1171	kfree(data);
1172	return NULL;
1173}
1174
1175struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1176	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1177	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1178	.free	= arm_lpae_free_pgtable,
1179};
1180
1181struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1182	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1183	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1184	.free	= arm_lpae_free_pgtable,
1185};
1186
1187struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1188	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1189	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1190	.free	= arm_lpae_free_pgtable,
1191};
1192
1193struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1194	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1195	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1196	.free	= arm_lpae_free_pgtable,
1197};
1198
1199struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1200	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1201	.alloc	= arm_mali_lpae_alloc_pgtable,
1202	.free	= arm_lpae_free_pgtable,
1203};
1204
1205#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1206
1207static struct io_pgtable_cfg *cfg_cookie __initdata;
1208
1209static void __init dummy_tlb_flush_all(void *cookie)
1210{
1211	WARN_ON(cookie != cfg_cookie);
1212}
1213
1214static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1215				   size_t granule, void *cookie)
1216{
1217	WARN_ON(cookie != cfg_cookie);
1218	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1219}
1220
1221static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1222				      unsigned long iova, size_t granule,
1223				      void *cookie)
1224{
1225	dummy_tlb_flush(iova, granule, granule, cookie);
1226}
1227
1228static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1229	.tlb_flush_all	= dummy_tlb_flush_all,
1230	.tlb_flush_walk	= dummy_tlb_flush,
 
1231	.tlb_add_page	= dummy_tlb_add_page,
1232};
1233
1234static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1235{
1236	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1237	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1238
1239	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1240		cfg->pgsize_bitmap, cfg->ias);
1241	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1242		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1243		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1244}
1245
1246#define __FAIL(ops, i)	({						\
1247		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1248		arm_lpae_dump_ops(ops);					\
1249		selftest_running = false;				\
1250		-EFAULT;						\
1251})
1252
1253static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1254{
1255	static const enum io_pgtable_fmt fmts[] __initconst = {
1256		ARM_64_LPAE_S1,
1257		ARM_64_LPAE_S2,
1258	};
1259
1260	int i, j;
1261	unsigned long iova;
1262	size_t size, mapped;
1263	struct io_pgtable_ops *ops;
1264
1265	selftest_running = true;
1266
1267	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1268		cfg_cookie = cfg;
1269		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1270		if (!ops) {
1271			pr_err("selftest: failed to allocate io pgtable ops\n");
1272			return -ENOMEM;
1273		}
1274
1275		/*
1276		 * Initial sanity checks.
1277		 * Empty page tables shouldn't provide any translations.
1278		 */
1279		if (ops->iova_to_phys(ops, 42))
1280			return __FAIL(ops, i);
1281
1282		if (ops->iova_to_phys(ops, SZ_1G + 42))
1283			return __FAIL(ops, i);
1284
1285		if (ops->iova_to_phys(ops, SZ_2G + 42))
1286			return __FAIL(ops, i);
1287
1288		/*
1289		 * Distinct mappings of different granule sizes.
1290		 */
1291		iova = 0;
1292		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1293			size = 1UL << j;
1294
1295			if (ops->map_pages(ops, iova, iova, size, 1,
1296					   IOMMU_READ | IOMMU_WRITE |
1297					   IOMMU_NOEXEC | IOMMU_CACHE,
1298					   GFP_KERNEL, &mapped))
1299				return __FAIL(ops, i);
1300
1301			/* Overlapping mappings */
1302			if (!ops->map_pages(ops, iova, iova + size, size, 1,
1303					    IOMMU_READ | IOMMU_NOEXEC,
1304					    GFP_KERNEL, &mapped))
1305				return __FAIL(ops, i);
1306
1307			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1308				return __FAIL(ops, i);
1309
1310			iova += SZ_1G;
1311		}
1312
 
 
 
 
 
 
 
 
 
 
 
 
1313		/* Full unmap */
1314		iova = 0;
1315		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1316			size = 1UL << j;
1317
1318			if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1319				return __FAIL(ops, i);
1320
1321			if (ops->iova_to_phys(ops, iova + 42))
1322				return __FAIL(ops, i);
1323
1324			/* Remap full block */
1325			if (ops->map_pages(ops, iova, iova, size, 1,
1326					   IOMMU_WRITE, GFP_KERNEL, &mapped))
1327				return __FAIL(ops, i);
1328
1329			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1330				return __FAIL(ops, i);
1331
1332			iova += SZ_1G;
1333		}
1334
1335		/*
1336		 * Map/unmap the last largest supported page of the IAS, this can
1337		 * trigger corner cases in the concatednated page tables.
1338		 */
1339		mapped = 0;
1340		size = 1UL << __fls(cfg->pgsize_bitmap);
1341		iova = (1UL << cfg->ias) - size;
1342		if (ops->map_pages(ops, iova, iova, size, 1,
1343				   IOMMU_READ | IOMMU_WRITE |
1344				   IOMMU_NOEXEC | IOMMU_CACHE,
1345				   GFP_KERNEL, &mapped))
1346			return __FAIL(ops, i);
1347		if (mapped != size)
1348			return __FAIL(ops, i);
1349		if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1350			return __FAIL(ops, i);
1351
1352		free_io_pgtable_ops(ops);
1353	}
1354
1355	selftest_running = false;
1356	return 0;
1357}
1358
1359static int __init arm_lpae_do_selftests(void)
1360{
1361	static const unsigned long pgsize[] __initconst = {
1362		SZ_4K | SZ_2M | SZ_1G,
1363		SZ_16K | SZ_32M,
1364		SZ_64K | SZ_512M,
1365	};
1366
1367	static const unsigned int ias[] __initconst = {
1368		32, 36, 40, 42, 44, 48,
1369	};
1370
1371	int i, j, pass = 0, fail = 0;
1372	struct device dev;
1373	struct io_pgtable_cfg cfg = {
1374		.tlb = &dummy_tlb_ops,
1375		.oas = 48,
1376		.coherent_walk = true,
1377		.iommu_dev = &dev,
1378	};
1379
1380	/* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1381	set_dev_node(&dev, NUMA_NO_NODE);
1382
1383	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1384		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1385			cfg.pgsize_bitmap = pgsize[i];
1386			cfg.ias = ias[j];
1387			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1388				pgsize[i], ias[j]);
1389			if (arm_lpae_run_tests(&cfg))
1390				fail++;
1391			else
1392				pass++;
1393		}
1394	}
1395
1396	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1397	return fail ? -EFAULT : 0;
1398}
1399subsys_initcall(arm_lpae_do_selftests);
1400#endif
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CPU-agnostic ARM page table allocator.
   4 *
   5 * Copyright (C) 2014 ARM Limited
   6 *
   7 * Author: Will Deacon <will.deacon@arm.com>
   8 */
   9
  10#define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
  11
  12#include <linux/atomic.h>
  13#include <linux/bitops.h>
  14#include <linux/io-pgtable.h>
  15#include <linux/kernel.h>
  16#include <linux/sizes.h>
  17#include <linux/slab.h>
  18#include <linux/types.h>
  19#include <linux/dma-mapping.h>
  20
  21#include <asm/barrier.h>
  22
 
 
 
  23#define ARM_LPAE_MAX_ADDR_BITS		52
  24#define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
  25#define ARM_LPAE_MAX_LEVELS		4
  26
  27/* Struct accessors */
  28#define io_pgtable_to_data(x)						\
  29	container_of((x), struct arm_lpae_io_pgtable, iop)
  30
  31#define io_pgtable_ops_to_data(x)					\
  32	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
  33
  34/*
  35 * Calculate the right shift amount to get to the portion describing level l
  36 * in a virtual address mapped by the pagetable in d.
  37 */
  38#define ARM_LPAE_LVL_SHIFT(l,d)						\
  39	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
  40	ilog2(sizeof(arm_lpae_iopte)))
  41
  42#define ARM_LPAE_GRANULE(d)						\
  43	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
  44#define ARM_LPAE_PGD_SIZE(d)						\
  45	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
  46
 
 
 
  47/*
  48 * Calculate the index at level l used to map virtual address a using the
  49 * pagetable in d.
  50 */
  51#define ARM_LPAE_PGD_IDX(l,d)						\
  52	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
  53
  54#define ARM_LPAE_LVL_IDX(a,l,d)						\
  55	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
  56	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
  57
  58/* Calculate the block/page mapping size at level l for pagetable in d. */
  59#define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
  60
  61/* Page table bits */
  62#define ARM_LPAE_PTE_TYPE_SHIFT		0
  63#define ARM_LPAE_PTE_TYPE_MASK		0x3
  64
  65#define ARM_LPAE_PTE_TYPE_BLOCK		1
  66#define ARM_LPAE_PTE_TYPE_TABLE		3
  67#define ARM_LPAE_PTE_TYPE_PAGE		3
  68
  69#define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
  70
  71#define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
  72#define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
 
  73#define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
  74#define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
  75#define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
  76#define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
  77#define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
  78#define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
  79
  80#define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
  81/* Ignore the contiguous bit for block splitting */
  82#define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
  83#define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
  84					 ARM_LPAE_PTE_ATTR_HI_MASK)
  85/* Software bit for solving coherency races */
  86#define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
  87
  88/* Stage-1 PTE */
  89#define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
  90#define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
 
 
 
 
  91#define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
  92#define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
  93
  94/* Stage-2 PTE */
  95#define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
  96#define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
  97#define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
 
 
 
 
 
 
 
 
 
 
 
 
  98#define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
  99#define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
 100#define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
 101
 102/* Register bits */
 103#define ARM_LPAE_TCR_TG0_4K		0
 104#define ARM_LPAE_TCR_TG0_64K		1
 105#define ARM_LPAE_TCR_TG0_16K		2
 106
 107#define ARM_LPAE_TCR_TG1_16K		1
 108#define ARM_LPAE_TCR_TG1_4K		2
 109#define ARM_LPAE_TCR_TG1_64K		3
 110
 111#define ARM_LPAE_TCR_SH_NS		0
 112#define ARM_LPAE_TCR_SH_OS		2
 113#define ARM_LPAE_TCR_SH_IS		3
 114
 115#define ARM_LPAE_TCR_RGN_NC		0
 116#define ARM_LPAE_TCR_RGN_WBWA		1
 117#define ARM_LPAE_TCR_RGN_WT		2
 118#define ARM_LPAE_TCR_RGN_WB		3
 119
 120#define ARM_LPAE_VTCR_SL0_MASK		0x3
 121
 122#define ARM_LPAE_TCR_T0SZ_SHIFT		0
 123
 124#define ARM_LPAE_VTCR_PS_SHIFT		16
 125#define ARM_LPAE_VTCR_PS_MASK		0x7
 126
 127#define ARM_LPAE_TCR_PS_32_BIT		0x0ULL
 128#define ARM_LPAE_TCR_PS_36_BIT		0x1ULL
 129#define ARM_LPAE_TCR_PS_40_BIT		0x2ULL
 130#define ARM_LPAE_TCR_PS_42_BIT		0x3ULL
 131#define ARM_LPAE_TCR_PS_44_BIT		0x4ULL
 132#define ARM_LPAE_TCR_PS_48_BIT		0x5ULL
 133#define ARM_LPAE_TCR_PS_52_BIT		0x6ULL
 134
 135#define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
 136#define ARM_LPAE_MAIR_ATTR_MASK		0xff
 137#define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
 138#define ARM_LPAE_MAIR_ATTR_NC		0x44
 139#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
 140#define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
 141#define ARM_LPAE_MAIR_ATTR_IDX_NC	0
 142#define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
 143#define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
 144#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
 145
 146#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
 147#define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
 148#define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
 149
 150#define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
 151#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
 152
 153/* IOPTE accessors */
 154#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
 155
 156#define iopte_type(pte,l)					\
 157	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
 158
 159#define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
 160
 
 
 
 
 
 
 161struct arm_lpae_io_pgtable {
 162	struct io_pgtable	iop;
 163
 164	int			pgd_bits;
 165	int			start_level;
 166	int			bits_per_level;
 167
 168	void			*pgd;
 169};
 170
 171typedef u64 arm_lpae_iopte;
 172
 173static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
 174			      enum io_pgtable_fmt fmt)
 175{
 176	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
 177		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
 
 
 
 178
 179	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
 
 
 
 
 180}
 181
 182static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
 183				     struct arm_lpae_io_pgtable *data)
 184{
 185	arm_lpae_iopte pte = paddr;
 186
 187	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
 188	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
 189}
 190
 191static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
 192				  struct arm_lpae_io_pgtable *data)
 193{
 194	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
 195
 196	if (ARM_LPAE_GRANULE(data) < SZ_64K)
 197		return paddr;
 198
 199	/* Rotate the packed high-order bits back to the top */
 200	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
 201}
 202
 
 
 
 
 
 
 
 
 
 
 
 
 203static bool selftest_running = false;
 204
 205static dma_addr_t __arm_lpae_dma_addr(void *pages)
 206{
 207	return (dma_addr_t)virt_to_phys(pages);
 208}
 209
 210static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
 211				    struct io_pgtable_cfg *cfg)
 
 212{
 213	struct device *dev = cfg->iommu_dev;
 214	int order = get_order(size);
 215	struct page *p;
 216	dma_addr_t dma;
 217	void *pages;
 218
 219	VM_BUG_ON((gfp & __GFP_HIGHMEM));
 220	p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
 221			     gfp | __GFP_ZERO, order);
 222	if (!p)
 
 
 
 
 223		return NULL;
 224
 225	pages = page_address(p);
 226	if (!cfg->coherent_walk) {
 227		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
 228		if (dma_mapping_error(dev, dma))
 229			goto out_free;
 230		/*
 231		 * We depend on the IOMMU being able to work with any physical
 232		 * address directly, so if the DMA layer suggests otherwise by
 233		 * translating or truncating them, that bodes very badly...
 234		 */
 235		if (dma != virt_to_phys(pages))
 236			goto out_unmap;
 237	}
 238
 239	return pages;
 240
 241out_unmap:
 242	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
 243	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
 
 244out_free:
 245	__free_pages(p, order);
 
 
 
 
 246	return NULL;
 247}
 248
 249static void __arm_lpae_free_pages(void *pages, size_t size,
 250				  struct io_pgtable_cfg *cfg)
 
 251{
 252	if (!cfg->coherent_walk)
 253		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
 254				 size, DMA_TO_DEVICE);
 255	free_pages((unsigned long)pages, get_order(size));
 
 
 
 
 256}
 257
 258static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
 259				struct io_pgtable_cfg *cfg)
 260{
 261	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
 262				   sizeof(*ptep), DMA_TO_DEVICE);
 263}
 264
 265static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
 266			       struct io_pgtable_cfg *cfg)
 267{
 268	*ptep = pte;
 
 269
 270	if (!cfg->coherent_walk)
 271		__arm_lpae_sync_pte(ptep, cfg);
 272}
 273
 274static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 275			       struct iommu_iotlb_gather *gather,
 276			       unsigned long iova, size_t size, int lvl,
 277			       arm_lpae_iopte *ptep);
 278
 279static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 280				phys_addr_t paddr, arm_lpae_iopte prot,
 281				int lvl, arm_lpae_iopte *ptep)
 282{
 283	arm_lpae_iopte pte = prot;
 
 
 
 284
 285	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
 286		pte |= ARM_LPAE_PTE_TYPE_PAGE;
 287	else
 288		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 289
 290	pte |= paddr_to_iopte(paddr, data);
 
 291
 292	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
 
 293}
 294
 295static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 296			     unsigned long iova, phys_addr_t paddr,
 297			     arm_lpae_iopte prot, int lvl,
 298			     arm_lpae_iopte *ptep)
 299{
 300	arm_lpae_iopte pte = *ptep;
 301
 302	if (iopte_leaf(pte, lvl, data->iop.fmt)) {
 303		/* We require an unmap first */
 304		WARN_ON(!selftest_running);
 305		return -EEXIST;
 306	} else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
 307		/*
 308		 * We need to unmap and free the old table before
 309		 * overwriting it with a block entry.
 310		 */
 311		arm_lpae_iopte *tblp;
 312		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 
 313
 314		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
 315		if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
 316			WARN_ON(1);
 317			return -EINVAL;
 
 
 318		}
 319	}
 320
 321	__arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
 322	return 0;
 323}
 324
 325static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
 326					     arm_lpae_iopte *ptep,
 327					     arm_lpae_iopte curr,
 328					     struct io_pgtable_cfg *cfg)
 329{
 330	arm_lpae_iopte old, new;
 
 331
 332	new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
 333	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
 334		new |= ARM_LPAE_PTE_NSTABLE;
 335
 336	/*
 337	 * Ensure the table itself is visible before its PTE can be.
 338	 * Whilst we could get away with cmpxchg64_release below, this
 339	 * doesn't have any ordering semantics when !CONFIG_SMP.
 340	 */
 341	dma_wmb();
 342
 343	old = cmpxchg64_relaxed(ptep, curr, new);
 344
 345	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
 346		return old;
 347
 348	/* Even if it's not ours, there's no point waiting; just kick it */
 349	__arm_lpae_sync_pte(ptep, cfg);
 350	if (old == curr)
 351		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
 352
 353	return old;
 354}
 355
 356static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
 357			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
 358			  int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
 
 359{
 360	arm_lpae_iopte *cptep, pte;
 361	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 362	size_t tblsz = ARM_LPAE_GRANULE(data);
 363	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
 364
 365	/* Find our entry at the current level */
 366	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
 367
 368	/* If we can install a leaf entry at this level, then do so */
 369	if (size == block_size)
 370		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
 
 
 
 
 
 
 
 371
 372	/* We can't allocate tables at the final level */
 373	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
 374		return -EINVAL;
 375
 376	/* Grab a pointer to the next level */
 377	pte = READ_ONCE(*ptep);
 378	if (!pte) {
 379		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
 380		if (!cptep)
 381			return -ENOMEM;
 382
 383		pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
 384		if (pte)
 385			__arm_lpae_free_pages(cptep, tblsz, cfg);
 386	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
 387		__arm_lpae_sync_pte(ptep, cfg);
 388	}
 389
 390	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
 391		cptep = iopte_deref(pte, data);
 392	} else if (pte) {
 393		/* We require an unmap first */
 394		WARN_ON(!selftest_running);
 395		return -EEXIST;
 396	}
 397
 398	/* Rinse, repeat */
 399	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
 
 400}
 401
 402static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
 403					   int prot)
 404{
 405	arm_lpae_iopte pte;
 406
 407	if (data->iop.fmt == ARM_64_LPAE_S1 ||
 408	    data->iop.fmt == ARM_32_LPAE_S1) {
 409		pte = ARM_LPAE_PTE_nG;
 410		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
 411			pte |= ARM_LPAE_PTE_AP_RDONLY;
 
 
 412		if (!(prot & IOMMU_PRIV))
 413			pte |= ARM_LPAE_PTE_AP_UNPRIV;
 414	} else {
 415		pte = ARM_LPAE_PTE_HAP_FAULT;
 416		if (prot & IOMMU_READ)
 417			pte |= ARM_LPAE_PTE_HAP_READ;
 418		if (prot & IOMMU_WRITE)
 419			pte |= ARM_LPAE_PTE_HAP_WRITE;
 420	}
 421
 422	/*
 423	 * Note that this logic is structured to accommodate Mali LPAE
 424	 * having stage-1-like attributes but stage-2-like permissions.
 425	 */
 426	if (data->iop.fmt == ARM_64_LPAE_S2 ||
 427	    data->iop.fmt == ARM_32_LPAE_S2) {
 428		if (prot & IOMMU_MMIO)
 429			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
 430		else if (prot & IOMMU_CACHE)
 431			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
 432		else
 
 
 
 433			pte |= ARM_LPAE_PTE_MEMATTR_NC;
 
 434	} else {
 435		if (prot & IOMMU_MMIO)
 436			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
 437				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 438		else if (prot & IOMMU_CACHE)
 439			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
 440				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 441	}
 442
 443	if (prot & IOMMU_CACHE)
 
 
 
 
 
 
 444		pte |= ARM_LPAE_PTE_SH_IS;
 445	else
 446		pte |= ARM_LPAE_PTE_SH_OS;
 447
 448	if (prot & IOMMU_NOEXEC)
 449		pte |= ARM_LPAE_PTE_XN;
 450
 451	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
 452		pte |= ARM_LPAE_PTE_NS;
 453
 454	if (data->iop.fmt != ARM_MALI_LPAE)
 455		pte |= ARM_LPAE_PTE_AF;
 456
 457	return pte;
 458}
 459
 460static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
 461			phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
 
 462{
 463	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 464	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 465	arm_lpae_iopte *ptep = data->pgd;
 466	int ret, lvl = data->start_level;
 467	arm_lpae_iopte prot;
 468	long iaext = (s64)iova >> cfg->ias;
 469
 470	/* If no access, then nothing to do */
 471	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
 472		return 0;
 473
 474	if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
 475		return -EINVAL;
 476
 477	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
 478		iaext = ~iaext;
 479	if (WARN_ON(iaext || paddr >> cfg->oas))
 480		return -ERANGE;
 481
 
 
 
 482	prot = arm_lpae_prot_to_pte(data, iommu_prot);
 483	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
 
 484	/*
 485	 * Synchronise all PTE updates for the new mapping before there's
 486	 * a chance for anything to kick off a table walk for the new iova.
 487	 */
 488	wmb();
 489
 490	return ret;
 491}
 492
 493static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
 494				    arm_lpae_iopte *ptep)
 495{
 496	arm_lpae_iopte *start, *end;
 497	unsigned long table_size;
 498
 499	if (lvl == data->start_level)
 500		table_size = ARM_LPAE_PGD_SIZE(data);
 501	else
 502		table_size = ARM_LPAE_GRANULE(data);
 503
 504	start = ptep;
 505
 506	/* Only leaf entries at the last level */
 507	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
 508		end = ptep;
 509	else
 510		end = (void *)ptep + table_size;
 511
 512	while (ptep != end) {
 513		arm_lpae_iopte pte = *ptep++;
 514
 515		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
 516			continue;
 517
 518		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 519	}
 520
 521	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
 522}
 523
 524static void arm_lpae_free_pgtable(struct io_pgtable *iop)
 525{
 526	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
 527
 528	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
 529	kfree(data);
 530}
 531
 532static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
 533				       struct iommu_iotlb_gather *gather,
 534				       unsigned long iova, size_t size,
 535				       arm_lpae_iopte blk_pte, int lvl,
 536				       arm_lpae_iopte *ptep)
 537{
 538	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 539	arm_lpae_iopte pte, *tablep;
 540	phys_addr_t blk_paddr;
 541	size_t tablesz = ARM_LPAE_GRANULE(data);
 542	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 543	int i, unmap_idx = -1;
 544
 545	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
 546		return 0;
 547
 548	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
 549	if (!tablep)
 550		return 0; /* Bytes unmapped */
 551
 552	if (size == split_sz)
 553		unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
 554
 555	blk_paddr = iopte_to_paddr(blk_pte, data);
 556	pte = iopte_prot(blk_pte);
 557
 558	for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
 559		/* Unmap! */
 560		if (i == unmap_idx)
 561			continue;
 562
 563		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
 564	}
 565
 566	pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
 567	if (pte != blk_pte) {
 568		__arm_lpae_free_pages(tablep, tablesz, cfg);
 569		/*
 570		 * We may race against someone unmapping another part of this
 571		 * block, but anything else is invalid. We can't misinterpret
 572		 * a page entry here since we're never at the last level.
 573		 */
 574		if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
 575			return 0;
 576
 577		tablep = iopte_deref(pte, data);
 578	} else if (unmap_idx >= 0) {
 579		io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
 580		return size;
 581	}
 582
 583	return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
 584}
 585
 586static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 587			       struct iommu_iotlb_gather *gather,
 588			       unsigned long iova, size_t size, int lvl,
 589			       arm_lpae_iopte *ptep)
 590{
 591	arm_lpae_iopte pte;
 592	struct io_pgtable *iop = &data->iop;
 
 593
 594	/* Something went horribly wrong and we ran out of page table */
 595	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
 596		return 0;
 597
 598	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
 599	pte = READ_ONCE(*ptep);
 600	if (WARN_ON(!pte))
 601		return 0;
 602
 603	/* If the size matches this level, we're in the right place */
 604	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
 605		__arm_lpae_set_pte(ptep, 0, &iop->cfg);
 
 606
 607		if (!iopte_leaf(pte, lvl, iop->fmt)) {
 608			/* Also flush any partial walks */
 609			io_pgtable_tlb_flush_walk(iop, iova, size,
 610						  ARM_LPAE_GRANULE(data));
 611			ptep = iopte_deref(pte, data);
 612			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
 613		} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
 614			/*
 615			 * Order the PTE update against queueing the IOVA, to
 616			 * guarantee that a flush callback from a different CPU
 617			 * has observed it before the TLBIALL can be issued.
 618			 */
 619			smp_wmb();
 620		} else {
 621			io_pgtable_tlb_add_page(iop, gather, iova, size);
 622		}
 623
 624		return size;
 
 
 
 
 
 
 
 625	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
 626		/*
 627		 * Insert a table at the next level to map the old region,
 628		 * minus the part we want to unmap
 629		 */
 630		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
 631						lvl + 1, ptep);
 632	}
 633
 634	/* Keep on walkin' */
 635	ptep = iopte_deref(pte, data);
 636	return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
 637}
 638
 639static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
 640			     size_t size, struct iommu_iotlb_gather *gather)
 
 641{
 642	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 643	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 644	arm_lpae_iopte *ptep = data->pgd;
 645	long iaext = (s64)iova >> cfg->ias;
 646
 647	if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
 648		return 0;
 649
 650	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
 651		iaext = ~iaext;
 652	if (WARN_ON(iaext))
 653		return 0;
 654
 655	return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
 
 656}
 657
 658static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
 659					 unsigned long iova)
 660{
 661	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 662	arm_lpae_iopte pte, *ptep = data->pgd;
 663	int lvl = data->start_level;
 664
 665	do {
 666		/* Valid IOPTE pointer? */
 667		if (!ptep)
 668			return 0;
 669
 670		/* Grab the IOPTE we're interested in */
 671		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 672		pte = READ_ONCE(*ptep);
 673
 674		/* Valid entry? */
 675		if (!pte)
 676			return 0;
 677
 678		/* Leaf entry? */
 679		if (iopte_leaf(pte, lvl, data->iop.fmt))
 680			goto found_translation;
 681
 682		/* Take it to the next level */
 683		ptep = iopte_deref(pte, data);
 684	} while (++lvl < ARM_LPAE_MAX_LEVELS);
 685
 686	/* Ran out of page tables to walk */
 687	return 0;
 688
 689found_translation:
 690	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
 691	return iopte_to_paddr(pte, data) | iova;
 692}
 693
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 694static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
 695{
 696	unsigned long granule, page_sizes;
 697	unsigned int max_addr_bits = 48;
 698
 699	/*
 700	 * We need to restrict the supported page sizes to match the
 701	 * translation regime for a particular granule. Aim to match
 702	 * the CPU page size if possible, otherwise prefer smaller sizes.
 703	 * While we're at it, restrict the block sizes to match the
 704	 * chosen granule.
 705	 */
 706	if (cfg->pgsize_bitmap & PAGE_SIZE)
 707		granule = PAGE_SIZE;
 708	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
 709		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
 710	else if (cfg->pgsize_bitmap & PAGE_MASK)
 711		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
 712	else
 713		granule = 0;
 714
 715	switch (granule) {
 716	case SZ_4K:
 717		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
 718		break;
 719	case SZ_16K:
 720		page_sizes = (SZ_16K | SZ_32M);
 721		break;
 722	case SZ_64K:
 723		max_addr_bits = 52;
 724		page_sizes = (SZ_64K | SZ_512M);
 725		if (cfg->oas > 48)
 726			page_sizes |= 1ULL << 42; /* 4TB */
 727		break;
 728	default:
 729		page_sizes = 0;
 730	}
 731
 732	cfg->pgsize_bitmap &= page_sizes;
 733	cfg->ias = min(cfg->ias, max_addr_bits);
 734	cfg->oas = min(cfg->oas, max_addr_bits);
 735}
 736
 737static struct arm_lpae_io_pgtable *
 738arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
 739{
 740	struct arm_lpae_io_pgtable *data;
 741	int levels, va_bits, pg_shift;
 742
 743	arm_lpae_restrict_pgsizes(cfg);
 744
 745	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
 746		return NULL;
 747
 748	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
 749		return NULL;
 750
 751	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
 752		return NULL;
 753
 754	if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
 755		dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
 756		return NULL;
 757	}
 758
 759	data = kmalloc(sizeof(*data), GFP_KERNEL);
 760	if (!data)
 761		return NULL;
 762
 763	pg_shift = __ffs(cfg->pgsize_bitmap);
 764	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
 765
 766	va_bits = cfg->ias - pg_shift;
 767	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
 768	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
 769
 770	/* Calculate the actual size of our pgd (without concatenation) */
 771	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
 772
 773	data->iop.ops = (struct io_pgtable_ops) {
 774		.map		= arm_lpae_map,
 775		.unmap		= arm_lpae_unmap,
 776		.iova_to_phys	= arm_lpae_iova_to_phys,
 
 777	};
 778
 779	return data;
 780}
 781
 782static struct io_pgtable *
 783arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 784{
 785	u64 reg;
 786	struct arm_lpae_io_pgtable *data;
 787	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
 788	bool tg1;
 789
 790	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
 791			    IO_PGTABLE_QUIRK_NON_STRICT |
 792			    IO_PGTABLE_QUIRK_ARM_TTBR1))
 
 793		return NULL;
 794
 795	data = arm_lpae_alloc_pgtable(cfg);
 796	if (!data)
 797		return NULL;
 798
 799	/* TCR */
 800	if (cfg->coherent_walk) {
 801		tcr->sh = ARM_LPAE_TCR_SH_IS;
 802		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
 803		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 
 
 804	} else {
 805		tcr->sh = ARM_LPAE_TCR_SH_OS;
 806		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
 807		tcr->orgn = ARM_LPAE_TCR_RGN_NC;
 
 
 
 808	}
 809
 810	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
 811	switch (ARM_LPAE_GRANULE(data)) {
 812	case SZ_4K:
 813		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
 814		break;
 815	case SZ_16K:
 816		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
 817		break;
 818	case SZ_64K:
 819		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
 820		break;
 821	}
 822
 823	switch (cfg->oas) {
 824	case 32:
 825		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
 826		break;
 827	case 36:
 828		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
 829		break;
 830	case 40:
 831		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
 832		break;
 833	case 42:
 834		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
 835		break;
 836	case 44:
 837		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
 838		break;
 839	case 48:
 840		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
 841		break;
 842	case 52:
 843		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
 844		break;
 845	default:
 846		goto out_free_data;
 847	}
 848
 849	tcr->tsz = 64ULL - cfg->ias;
 850
 851	/* MAIRs */
 852	reg = (ARM_LPAE_MAIR_ATTR_NC
 853	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
 854	      (ARM_LPAE_MAIR_ATTR_WBRWA
 855	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
 856	      (ARM_LPAE_MAIR_ATTR_DEVICE
 857	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
 858	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
 859	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
 860
 861	cfg->arm_lpae_s1_cfg.mair = reg;
 862
 863	/* Looking good; allocate a pgd */
 864	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
 865					   GFP_KERNEL, cfg);
 866	if (!data->pgd)
 867		goto out_free_data;
 868
 869	/* Ensure the empty pgd is visible before any actual TTBR write */
 870	wmb();
 871
 872	/* TTBR */
 873	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
 874	return &data->iop;
 875
 876out_free_data:
 877	kfree(data);
 878	return NULL;
 879}
 880
 881static struct io_pgtable *
 882arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 883{
 884	u64 sl;
 885	struct arm_lpae_io_pgtable *data;
 886	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
 887
 888	/* The NS quirk doesn't apply at stage 2 */
 889	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
 890		return NULL;
 891
 892	data = arm_lpae_alloc_pgtable(cfg);
 893	if (!data)
 894		return NULL;
 895
 896	/*
 897	 * Concatenate PGDs at level 1 if possible in order to reduce
 898	 * the depth of the stage-2 walk.
 899	 */
 900	if (data->start_level == 0) {
 901		unsigned long pgd_pages;
 902
 903		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
 904		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
 905			data->pgd_bits += data->bits_per_level;
 906			data->start_level++;
 907		}
 908	}
 909
 910	/* VTCR */
 911	if (cfg->coherent_walk) {
 912		vtcr->sh = ARM_LPAE_TCR_SH_IS;
 913		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
 914		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 915	} else {
 916		vtcr->sh = ARM_LPAE_TCR_SH_OS;
 917		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
 918		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
 919	}
 920
 921	sl = data->start_level;
 922
 923	switch (ARM_LPAE_GRANULE(data)) {
 924	case SZ_4K:
 925		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
 926		sl++; /* SL0 format is different for 4K granule size */
 927		break;
 928	case SZ_16K:
 929		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
 930		break;
 931	case SZ_64K:
 932		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
 933		break;
 934	}
 935
 936	switch (cfg->oas) {
 937	case 32:
 938		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
 939		break;
 940	case 36:
 941		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
 942		break;
 943	case 40:
 944		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
 945		break;
 946	case 42:
 947		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
 948		break;
 949	case 44:
 950		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
 951		break;
 952	case 48:
 953		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
 954		break;
 955	case 52:
 956		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
 957		break;
 958	default:
 959		goto out_free_data;
 960	}
 961
 962	vtcr->tsz = 64ULL - cfg->ias;
 963	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
 964
 965	/* Allocate pgd pages */
 966	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
 967					   GFP_KERNEL, cfg);
 968	if (!data->pgd)
 969		goto out_free_data;
 970
 971	/* Ensure the empty pgd is visible before any actual TTBR write */
 972	wmb();
 973
 974	/* VTTBR */
 975	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
 976	return &data->iop;
 977
 978out_free_data:
 979	kfree(data);
 980	return NULL;
 981}
 982
 983static struct io_pgtable *
 984arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 985{
 986	if (cfg->ias > 32 || cfg->oas > 40)
 987		return NULL;
 988
 989	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
 990	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
 991}
 992
 993static struct io_pgtable *
 994arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 995{
 996	if (cfg->ias > 40 || cfg->oas > 40)
 997		return NULL;
 998
 999	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1000	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1001}
1002
1003static struct io_pgtable *
1004arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1005{
1006	struct arm_lpae_io_pgtable *data;
1007
1008	/* No quirks for Mali (hopefully) */
1009	if (cfg->quirks)
1010		return NULL;
1011
1012	if (cfg->ias > 48 || cfg->oas > 40)
1013		return NULL;
1014
1015	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1016
1017	data = arm_lpae_alloc_pgtable(cfg);
1018	if (!data)
1019		return NULL;
1020
1021	/* Mali seems to need a full 4-level table regardless of IAS */
1022	if (data->start_level > 0) {
1023		data->start_level = 0;
1024		data->pgd_bits = 0;
1025	}
1026	/*
1027	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1028	 * best we can do is mimic the out-of-tree driver and hope that the
1029	 * "implementation-defined caching policy" is good enough. Similarly,
1030	 * we'll use it for the sake of a valid attribute for our 'device'
1031	 * index, although callers should never request that in practice.
1032	 */
1033	cfg->arm_mali_lpae_cfg.memattr =
1034		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1035		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1036		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1037		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1038		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1039		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1040
1041	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1042					   cfg);
1043	if (!data->pgd)
1044		goto out_free_data;
1045
1046	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1047	wmb();
1048
1049	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1050					  ARM_MALI_LPAE_TTBR_READ_INNER |
1051					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
 
 
 
1052	return &data->iop;
1053
1054out_free_data:
1055	kfree(data);
1056	return NULL;
1057}
1058
1059struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
 
1060	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1061	.free	= arm_lpae_free_pgtable,
1062};
1063
1064struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
 
1065	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1066	.free	= arm_lpae_free_pgtable,
1067};
1068
1069struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
 
1070	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1071	.free	= arm_lpae_free_pgtable,
1072};
1073
1074struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
 
1075	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1076	.free	= arm_lpae_free_pgtable,
1077};
1078
1079struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
 
1080	.alloc	= arm_mali_lpae_alloc_pgtable,
1081	.free	= arm_lpae_free_pgtable,
1082};
1083
1084#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1085
1086static struct io_pgtable_cfg *cfg_cookie __initdata;
1087
1088static void __init dummy_tlb_flush_all(void *cookie)
1089{
1090	WARN_ON(cookie != cfg_cookie);
1091}
1092
1093static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1094				   size_t granule, void *cookie)
1095{
1096	WARN_ON(cookie != cfg_cookie);
1097	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1098}
1099
1100static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1101				      unsigned long iova, size_t granule,
1102				      void *cookie)
1103{
1104	dummy_tlb_flush(iova, granule, granule, cookie);
1105}
1106
1107static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1108	.tlb_flush_all	= dummy_tlb_flush_all,
1109	.tlb_flush_walk	= dummy_tlb_flush,
1110	.tlb_flush_leaf	= dummy_tlb_flush,
1111	.tlb_add_page	= dummy_tlb_add_page,
1112};
1113
1114static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1115{
1116	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1117	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1118
1119	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1120		cfg->pgsize_bitmap, cfg->ias);
1121	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1122		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1123		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1124}
1125
1126#define __FAIL(ops, i)	({						\
1127		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1128		arm_lpae_dump_ops(ops);					\
1129		selftest_running = false;				\
1130		-EFAULT;						\
1131})
1132
1133static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1134{
1135	static const enum io_pgtable_fmt fmts[] __initconst = {
1136		ARM_64_LPAE_S1,
1137		ARM_64_LPAE_S2,
1138	};
1139
1140	int i, j;
1141	unsigned long iova;
1142	size_t size;
1143	struct io_pgtable_ops *ops;
1144
1145	selftest_running = true;
1146
1147	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1148		cfg_cookie = cfg;
1149		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1150		if (!ops) {
1151			pr_err("selftest: failed to allocate io pgtable ops\n");
1152			return -ENOMEM;
1153		}
1154
1155		/*
1156		 * Initial sanity checks.
1157		 * Empty page tables shouldn't provide any translations.
1158		 */
1159		if (ops->iova_to_phys(ops, 42))
1160			return __FAIL(ops, i);
1161
1162		if (ops->iova_to_phys(ops, SZ_1G + 42))
1163			return __FAIL(ops, i);
1164
1165		if (ops->iova_to_phys(ops, SZ_2G + 42))
1166			return __FAIL(ops, i);
1167
1168		/*
1169		 * Distinct mappings of different granule sizes.
1170		 */
1171		iova = 0;
1172		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1173			size = 1UL << j;
1174
1175			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1176							    IOMMU_WRITE |
1177							    IOMMU_NOEXEC |
1178							    IOMMU_CACHE, GFP_KERNEL))
1179				return __FAIL(ops, i);
1180
1181			/* Overlapping mappings */
1182			if (!ops->map(ops, iova, iova + size, size,
1183				      IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
 
1184				return __FAIL(ops, i);
1185
1186			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1187				return __FAIL(ops, i);
1188
1189			iova += SZ_1G;
1190		}
1191
1192		/* Partial unmap */
1193		size = 1UL << __ffs(cfg->pgsize_bitmap);
1194		if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1195			return __FAIL(ops, i);
1196
1197		/* Remap of partial unmap */
1198		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1199			return __FAIL(ops, i);
1200
1201		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1202			return __FAIL(ops, i);
1203
1204		/* Full unmap */
1205		iova = 0;
1206		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1207			size = 1UL << j;
1208
1209			if (ops->unmap(ops, iova, size, NULL) != size)
1210				return __FAIL(ops, i);
1211
1212			if (ops->iova_to_phys(ops, iova + 42))
1213				return __FAIL(ops, i);
1214
1215			/* Remap full block */
1216			if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
 
1217				return __FAIL(ops, i);
1218
1219			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1220				return __FAIL(ops, i);
1221
1222			iova += SZ_1G;
1223		}
1224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1225		free_io_pgtable_ops(ops);
1226	}
1227
1228	selftest_running = false;
1229	return 0;
1230}
1231
1232static int __init arm_lpae_do_selftests(void)
1233{
1234	static const unsigned long pgsize[] __initconst = {
1235		SZ_4K | SZ_2M | SZ_1G,
1236		SZ_16K | SZ_32M,
1237		SZ_64K | SZ_512M,
1238	};
1239
1240	static const unsigned int ias[] __initconst = {
1241		32, 36, 40, 42, 44, 48,
1242	};
1243
1244	int i, j, pass = 0, fail = 0;
 
1245	struct io_pgtable_cfg cfg = {
1246		.tlb = &dummy_tlb_ops,
1247		.oas = 48,
1248		.coherent_walk = true,
 
1249	};
 
 
 
1250
1251	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1252		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1253			cfg.pgsize_bitmap = pgsize[i];
1254			cfg.ias = ias[j];
1255			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1256				pgsize[i], ias[j]);
1257			if (arm_lpae_run_tests(&cfg))
1258				fail++;
1259			else
1260				pass++;
1261		}
1262	}
1263
1264	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1265	return fail ? -EFAULT : 0;
1266}
1267subsys_initcall(arm_lpae_do_selftests);
1268#endif