Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CPU-agnostic ARM page table allocator.
   4 *
   5 * Copyright (C) 2014 ARM Limited
   6 *
   7 * Author: Will Deacon <will.deacon@arm.com>
   8 */
   9
  10#define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
  11
  12#include <linux/atomic.h>
  13#include <linux/bitops.h>
  14#include <linux/io-pgtable.h>
  15#include <linux/kernel.h>
  16#include <linux/sizes.h>
  17#include <linux/slab.h>
  18#include <linux/types.h>
  19#include <linux/dma-mapping.h>
  20
  21#include <asm/barrier.h>
  22
  23#include "io-pgtable-arm.h"
 
  24
  25#define ARM_LPAE_MAX_ADDR_BITS		52
  26#define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
  27#define ARM_LPAE_MAX_LEVELS		4
  28
  29/* Struct accessors */
  30#define io_pgtable_to_data(x)						\
  31	container_of((x), struct arm_lpae_io_pgtable, iop)
  32
  33#define io_pgtable_ops_to_data(x)					\
  34	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
  35
  36/*
  37 * Calculate the right shift amount to get to the portion describing level l
  38 * in a virtual address mapped by the pagetable in d.
  39 */
  40#define ARM_LPAE_LVL_SHIFT(l,d)						\
  41	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
  42	ilog2(sizeof(arm_lpae_iopte)))
  43
  44#define ARM_LPAE_GRANULE(d)						\
  45	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
  46#define ARM_LPAE_PGD_SIZE(d)						\
  47	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
  48
  49#define ARM_LPAE_PTES_PER_TABLE(d)					\
  50	(ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
  51
  52/*
  53 * Calculate the index at level l used to map virtual address a using the
  54 * pagetable in d.
  55 */
  56#define ARM_LPAE_PGD_IDX(l,d)						\
  57	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
  58
  59#define ARM_LPAE_LVL_IDX(a,l,d)						\
  60	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
  61	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
  62
  63/* Calculate the block/page mapping size at level l for pagetable in d. */
  64#define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
  65
  66/* Page table bits */
  67#define ARM_LPAE_PTE_TYPE_SHIFT		0
  68#define ARM_LPAE_PTE_TYPE_MASK		0x3
  69
  70#define ARM_LPAE_PTE_TYPE_BLOCK		1
  71#define ARM_LPAE_PTE_TYPE_TABLE		3
  72#define ARM_LPAE_PTE_TYPE_PAGE		3
  73
  74#define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
  75
  76#define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
  77#define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
 
  78#define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
  79#define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
  80#define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
  81#define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
  82#define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
  83#define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
  84
  85#define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
  86/* Ignore the contiguous bit for block splitting */
  87#define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
  88#define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
  89					 ARM_LPAE_PTE_ATTR_HI_MASK)
  90/* Software bit for solving coherency races */
  91#define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
  92
  93/* Stage-1 PTE */
  94#define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
  95#define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
 
 
 
 
  96#define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
  97#define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
  98
  99/* Stage-2 PTE */
 100#define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
 101#define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
 102#define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
 
 
 
 
 
 
 
 
 
 
 
 
 103#define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
 104#define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
 105#define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
 106
 107/* Register bits */
 108#define ARM_LPAE_VTCR_SL0_MASK		0x3
 109
 110#define ARM_LPAE_TCR_T0SZ_SHIFT		0
 111
 112#define ARM_LPAE_VTCR_PS_SHIFT		16
 113#define ARM_LPAE_VTCR_PS_MASK		0x7
 114
 115#define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
 116#define ARM_LPAE_MAIR_ATTR_MASK		0xff
 117#define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
 118#define ARM_LPAE_MAIR_ATTR_NC		0x44
 119#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
 120#define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
 121#define ARM_LPAE_MAIR_ATTR_IDX_NC	0
 122#define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
 123#define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
 124#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
 125
 126#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
 127#define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
 128#define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
 129
 130#define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
 131#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
 132
 133/* IOPTE accessors */
 134#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
 135
 136#define iopte_type(pte)					\
 137	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
 138
 139#define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
 140
 
 
 
 
 
 
 141struct arm_lpae_io_pgtable {
 142	struct io_pgtable	iop;
 143
 144	int			pgd_bits;
 145	int			start_level;
 146	int			bits_per_level;
 147
 148	void			*pgd;
 149};
 150
 151typedef u64 arm_lpae_iopte;
 152
 153static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
 154			      enum io_pgtable_fmt fmt)
 155{
 156	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
 157		return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
 158
 159	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
 160}
 161
 
 
 
 
 
 
 
 162static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
 163				     struct arm_lpae_io_pgtable *data)
 164{
 165	arm_lpae_iopte pte = paddr;
 166
 167	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
 168	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
 169}
 170
 171static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
 172				  struct arm_lpae_io_pgtable *data)
 173{
 174	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
 175
 176	if (ARM_LPAE_GRANULE(data) < SZ_64K)
 177		return paddr;
 178
 179	/* Rotate the packed high-order bits back to the top */
 180	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
 181}
 182
 
 
 
 
 
 
 
 
 
 
 
 
 183static bool selftest_running = false;
 184
 185static dma_addr_t __arm_lpae_dma_addr(void *pages)
 186{
 187	return (dma_addr_t)virt_to_phys(pages);
 188}
 189
 190static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
 191				    struct io_pgtable_cfg *cfg,
 192				    void *cookie)
 193{
 194	struct device *dev = cfg->iommu_dev;
 195	int order = get_order(size);
 196	dma_addr_t dma;
 197	void *pages;
 198
 199	VM_BUG_ON((gfp & __GFP_HIGHMEM));
 200
 201	if (cfg->alloc) {
 202		pages = cfg->alloc(cookie, size, gfp);
 203	} else {
 204		struct page *p;
 205
 206		p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
 207		pages = p ? page_address(p) : NULL;
 208	}
 209
 210	if (!pages)
 211		return NULL;
 212
 213	if (!cfg->coherent_walk) {
 214		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
 215		if (dma_mapping_error(dev, dma))
 216			goto out_free;
 217		/*
 218		 * We depend on the IOMMU being able to work with any physical
 219		 * address directly, so if the DMA layer suggests otherwise by
 220		 * translating or truncating them, that bodes very badly...
 221		 */
 222		if (dma != virt_to_phys(pages))
 223			goto out_unmap;
 224	}
 225
 226	return pages;
 227
 228out_unmap:
 229	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
 230	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
 231
 232out_free:
 233	if (cfg->free)
 234		cfg->free(cookie, pages, size);
 235	else
 236		free_pages((unsigned long)pages, order);
 237
 238	return NULL;
 239}
 240
 241static void __arm_lpae_free_pages(void *pages, size_t size,
 242				  struct io_pgtable_cfg *cfg,
 243				  void *cookie)
 244{
 245	if (!cfg->coherent_walk)
 246		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
 247				 size, DMA_TO_DEVICE);
 248
 249	if (cfg->free)
 250		cfg->free(cookie, pages, size);
 251	else
 252		free_pages((unsigned long)pages, get_order(size));
 253}
 254
 255static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
 256				struct io_pgtable_cfg *cfg)
 257{
 258	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
 259				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
 260}
 261
 262static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
 263{
 
 
 264
 265	*ptep = 0;
 266
 267	if (!cfg->coherent_walk)
 268		__arm_lpae_sync_pte(ptep, 1, cfg);
 269}
 270
 271static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 272			       struct iommu_iotlb_gather *gather,
 273			       unsigned long iova, size_t size, size_t pgcount,
 274			       int lvl, arm_lpae_iopte *ptep);
 275
 276static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 277				phys_addr_t paddr, arm_lpae_iopte prot,
 278				int lvl, int num_entries, arm_lpae_iopte *ptep)
 279{
 280	arm_lpae_iopte pte = prot;
 281	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 282	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 283	int i;
 284
 285	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
 286		pte |= ARM_LPAE_PTE_TYPE_PAGE;
 287	else
 288		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 289
 290	for (i = 0; i < num_entries; i++)
 291		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
 292
 293	if (!cfg->coherent_walk)
 294		__arm_lpae_sync_pte(ptep, num_entries, cfg);
 295}
 296
 297static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 298			     unsigned long iova, phys_addr_t paddr,
 299			     arm_lpae_iopte prot, int lvl, int num_entries,
 300			     arm_lpae_iopte *ptep)
 301{
 302	int i;
 303
 304	for (i = 0; i < num_entries; i++)
 305		if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
 306			/* We require an unmap first */
 307			WARN_ON(!selftest_running);
 308			return -EEXIST;
 309		} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
 310			/*
 311			 * We need to unmap and free the old table before
 312			 * overwriting it with a block entry.
 313			 */
 314			arm_lpae_iopte *tblp;
 315			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 316
 317			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
 318			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
 319					     lvl, tblp) != sz) {
 320				WARN_ON(1);
 321				return -EINVAL;
 322			}
 323		}
 324
 325	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
 326	return 0;
 327}
 328
 329static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
 330					     arm_lpae_iopte *ptep,
 331					     arm_lpae_iopte curr,
 332					     struct arm_lpae_io_pgtable *data)
 333{
 334	arm_lpae_iopte old, new;
 335	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 336
 337	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
 338	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
 339		new |= ARM_LPAE_PTE_NSTABLE;
 340
 341	/*
 342	 * Ensure the table itself is visible before its PTE can be.
 343	 * Whilst we could get away with cmpxchg64_release below, this
 344	 * doesn't have any ordering semantics when !CONFIG_SMP.
 345	 */
 346	dma_wmb();
 347
 348	old = cmpxchg64_relaxed(ptep, curr, new);
 349
 350	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
 351		return old;
 352
 353	/* Even if it's not ours, there's no point waiting; just kick it */
 354	__arm_lpae_sync_pte(ptep, 1, cfg);
 355	if (old == curr)
 356		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
 357
 358	return old;
 359}
 360
 361static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
 362			  phys_addr_t paddr, size_t size, size_t pgcount,
 363			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
 364			  gfp_t gfp, size_t *mapped)
 365{
 366	arm_lpae_iopte *cptep, pte;
 367	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 368	size_t tblsz = ARM_LPAE_GRANULE(data);
 369	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 370	int ret = 0, num_entries, max_entries, map_idx_start;
 371
 372	/* Find our entry at the current level */
 373	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
 374	ptep += map_idx_start;
 375
 376	/* If we can install a leaf entry at this level, then do so */
 377	if (size == block_size) {
 378		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
 379		num_entries = min_t(int, pgcount, max_entries);
 380		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
 381		if (!ret)
 382			*mapped += num_entries * size;
 383
 384		return ret;
 385	}
 386
 387	/* We can't allocate tables at the final level */
 388	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
 389		return -EINVAL;
 390
 391	/* Grab a pointer to the next level */
 392	pte = READ_ONCE(*ptep);
 393	if (!pte) {
 394		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
 395		if (!cptep)
 396			return -ENOMEM;
 397
 398		pte = arm_lpae_install_table(cptep, ptep, 0, data);
 399		if (pte)
 400			__arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
 401	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
 402		__arm_lpae_sync_pte(ptep, 1, cfg);
 403	}
 404
 405	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
 406		cptep = iopte_deref(pte, data);
 407	} else if (pte) {
 408		/* We require an unmap first */
 409		WARN_ON(!selftest_running);
 410		return -EEXIST;
 411	}
 412
 413	/* Rinse, repeat */
 414	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
 415			      cptep, gfp, mapped);
 416}
 417
 418static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
 419					   int prot)
 420{
 421	arm_lpae_iopte pte;
 422
 423	if (data->iop.fmt == ARM_64_LPAE_S1 ||
 424	    data->iop.fmt == ARM_32_LPAE_S1) {
 425		pte = ARM_LPAE_PTE_nG;
 426		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
 427			pte |= ARM_LPAE_PTE_AP_RDONLY;
 
 
 428		if (!(prot & IOMMU_PRIV))
 429			pte |= ARM_LPAE_PTE_AP_UNPRIV;
 430	} else {
 431		pte = ARM_LPAE_PTE_HAP_FAULT;
 432		if (prot & IOMMU_READ)
 433			pte |= ARM_LPAE_PTE_HAP_READ;
 434		if (prot & IOMMU_WRITE)
 435			pte |= ARM_LPAE_PTE_HAP_WRITE;
 436	}
 437
 438	/*
 439	 * Note that this logic is structured to accommodate Mali LPAE
 440	 * having stage-1-like attributes but stage-2-like permissions.
 441	 */
 442	if (data->iop.fmt == ARM_64_LPAE_S2 ||
 443	    data->iop.fmt == ARM_32_LPAE_S2) {
 444		if (prot & IOMMU_MMIO)
 445			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
 446		else if (prot & IOMMU_CACHE)
 447			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
 448		else
 
 
 
 449			pte |= ARM_LPAE_PTE_MEMATTR_NC;
 
 450	} else {
 451		if (prot & IOMMU_MMIO)
 452			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
 453				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 454		else if (prot & IOMMU_CACHE)
 455			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
 456				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 457	}
 458
 459	/*
 460	 * Also Mali has its own notions of shareability wherein its Inner
 461	 * domain covers the cores within the GPU, and its Outer domain is
 462	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
 463	 * terms, depending on coherency).
 464	 */
 465	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
 466		pte |= ARM_LPAE_PTE_SH_IS;
 467	else
 468		pte |= ARM_LPAE_PTE_SH_OS;
 469
 470	if (prot & IOMMU_NOEXEC)
 471		pte |= ARM_LPAE_PTE_XN;
 472
 473	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
 474		pte |= ARM_LPAE_PTE_NS;
 475
 476	if (data->iop.fmt != ARM_MALI_LPAE)
 477		pte |= ARM_LPAE_PTE_AF;
 478
 479	return pte;
 480}
 481
 482static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
 483			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
 484			      int iommu_prot, gfp_t gfp, size_t *mapped)
 485{
 486	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 487	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 488	arm_lpae_iopte *ptep = data->pgd;
 489	int ret, lvl = data->start_level;
 490	arm_lpae_iopte prot;
 491	long iaext = (s64)iova >> cfg->ias;
 492
 493	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
 494		return -EINVAL;
 495
 496	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
 497		iaext = ~iaext;
 498	if (WARN_ON(iaext || paddr >> cfg->oas))
 499		return -ERANGE;
 500
 501	/* If no access, then nothing to do */
 502	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
 503		return 0;
 504
 505	prot = arm_lpae_prot_to_pte(data, iommu_prot);
 506	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
 507			     ptep, gfp, mapped);
 508	/*
 509	 * Synchronise all PTE updates for the new mapping before there's
 510	 * a chance for anything to kick off a table walk for the new iova.
 511	 */
 512	wmb();
 513
 514	return ret;
 515}
 516
 517static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
 518				    arm_lpae_iopte *ptep)
 519{
 520	arm_lpae_iopte *start, *end;
 521	unsigned long table_size;
 522
 523	if (lvl == data->start_level)
 524		table_size = ARM_LPAE_PGD_SIZE(data);
 525	else
 526		table_size = ARM_LPAE_GRANULE(data);
 527
 528	start = ptep;
 529
 530	/* Only leaf entries at the last level */
 531	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
 532		end = ptep;
 533	else
 534		end = (void *)ptep + table_size;
 535
 536	while (ptep != end) {
 537		arm_lpae_iopte pte = *ptep++;
 538
 539		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
 540			continue;
 541
 542		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 543	}
 544
 545	__arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
 546}
 547
 548static void arm_lpae_free_pgtable(struct io_pgtable *iop)
 549{
 550	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
 551
 552	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
 553	kfree(data);
 554}
 555
 556static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
 557				       struct iommu_iotlb_gather *gather,
 558				       unsigned long iova, size_t size,
 559				       arm_lpae_iopte blk_pte, int lvl,
 560				       arm_lpae_iopte *ptep, size_t pgcount)
 561{
 562	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 563	arm_lpae_iopte pte, *tablep;
 564	phys_addr_t blk_paddr;
 565	size_t tablesz = ARM_LPAE_GRANULE(data);
 566	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 567	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
 568	int i, unmap_idx_start = -1, num_entries = 0, max_entries;
 569
 570	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
 571		return 0;
 572
 573	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
 574	if (!tablep)
 575		return 0; /* Bytes unmapped */
 576
 577	if (size == split_sz) {
 578		unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
 579		max_entries = ptes_per_table - unmap_idx_start;
 580		num_entries = min_t(int, pgcount, max_entries);
 581	}
 582
 583	blk_paddr = iopte_to_paddr(blk_pte, data);
 584	pte = iopte_prot(blk_pte);
 585
 586	for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
 587		/* Unmap! */
 588		if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
 589			continue;
 590
 591		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
 592	}
 593
 594	pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
 595	if (pte != blk_pte) {
 596		__arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
 597		/*
 598		 * We may race against someone unmapping another part of this
 599		 * block, but anything else is invalid. We can't misinterpret
 600		 * a page entry here since we're never at the last level.
 601		 */
 602		if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
 603			return 0;
 604
 605		tablep = iopte_deref(pte, data);
 606	} else if (unmap_idx_start >= 0) {
 607		for (i = 0; i < num_entries; i++)
 608			io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
 609
 610		return num_entries * size;
 611	}
 612
 613	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
 614}
 615
 616static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 617			       struct iommu_iotlb_gather *gather,
 618			       unsigned long iova, size_t size, size_t pgcount,
 619			       int lvl, arm_lpae_iopte *ptep)
 620{
 621	arm_lpae_iopte pte;
 622	struct io_pgtable *iop = &data->iop;
 623	int i = 0, num_entries, max_entries, unmap_idx_start;
 624
 625	/* Something went horribly wrong and we ran out of page table */
 626	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
 627		return 0;
 628
 629	unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
 630	ptep += unmap_idx_start;
 631	pte = READ_ONCE(*ptep);
 632	if (WARN_ON(!pte))
 633		return 0;
 634
 635	/* If the size matches this level, we're in the right place */
 636	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
 637		max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
 638		num_entries = min_t(int, pgcount, max_entries);
 639
 640		while (i < num_entries) {
 641			pte = READ_ONCE(*ptep);
 
 642			if (WARN_ON(!pte))
 643				break;
 644
 645			__arm_lpae_clear_pte(ptep, &iop->cfg);
 646
 647			if (!iopte_leaf(pte, lvl, iop->fmt)) {
 
 
 648				/* Also flush any partial walks */
 649				io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
 650							  ARM_LPAE_GRANULE(data));
 651				__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 652			} else if (!iommu_iotlb_gather_queued(gather)) {
 653				io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
 654			}
 655
 656			ptep++;
 657			i++;
 658		}
 659
 
 
 
 
 
 
 
 660		return i * size;
 661	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
 662		/*
 663		 * Insert a table at the next level to map the old region,
 664		 * minus the part we want to unmap
 665		 */
 666		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
 667						lvl + 1, ptep, pgcount);
 668	}
 669
 670	/* Keep on walkin' */
 671	ptep = iopte_deref(pte, data);
 672	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
 673}
 674
 675static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
 676				   size_t pgsize, size_t pgcount,
 677				   struct iommu_iotlb_gather *gather)
 678{
 679	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 680	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 681	arm_lpae_iopte *ptep = data->pgd;
 682	long iaext = (s64)iova >> cfg->ias;
 683
 684	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
 685		return 0;
 686
 687	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
 688		iaext = ~iaext;
 689	if (WARN_ON(iaext))
 690		return 0;
 691
 692	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
 693				data->start_level, ptep);
 694}
 695
 696static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
 697					 unsigned long iova)
 698{
 699	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 700	arm_lpae_iopte pte, *ptep = data->pgd;
 701	int lvl = data->start_level;
 702
 703	do {
 704		/* Valid IOPTE pointer? */
 705		if (!ptep)
 706			return 0;
 707
 708		/* Grab the IOPTE we're interested in */
 709		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 710		pte = READ_ONCE(*ptep);
 711
 712		/* Valid entry? */
 713		if (!pte)
 714			return 0;
 715
 716		/* Leaf entry? */
 717		if (iopte_leaf(pte, lvl, data->iop.fmt))
 718			goto found_translation;
 719
 720		/* Take it to the next level */
 721		ptep = iopte_deref(pte, data);
 722	} while (++lvl < ARM_LPAE_MAX_LEVELS);
 723
 724	/* Ran out of page tables to walk */
 725	return 0;
 726
 727found_translation:
 728	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
 729	return iopte_to_paddr(pte, data) | iova;
 730}
 731
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 732static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
 733{
 734	unsigned long granule, page_sizes;
 735	unsigned int max_addr_bits = 48;
 736
 737	/*
 738	 * We need to restrict the supported page sizes to match the
 739	 * translation regime for a particular granule. Aim to match
 740	 * the CPU page size if possible, otherwise prefer smaller sizes.
 741	 * While we're at it, restrict the block sizes to match the
 742	 * chosen granule.
 743	 */
 744	if (cfg->pgsize_bitmap & PAGE_SIZE)
 745		granule = PAGE_SIZE;
 746	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
 747		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
 748	else if (cfg->pgsize_bitmap & PAGE_MASK)
 749		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
 750	else
 751		granule = 0;
 752
 753	switch (granule) {
 754	case SZ_4K:
 755		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
 756		break;
 757	case SZ_16K:
 758		page_sizes = (SZ_16K | SZ_32M);
 759		break;
 760	case SZ_64K:
 761		max_addr_bits = 52;
 762		page_sizes = (SZ_64K | SZ_512M);
 763		if (cfg->oas > 48)
 764			page_sizes |= 1ULL << 42; /* 4TB */
 765		break;
 766	default:
 767		page_sizes = 0;
 768	}
 769
 770	cfg->pgsize_bitmap &= page_sizes;
 771	cfg->ias = min(cfg->ias, max_addr_bits);
 772	cfg->oas = min(cfg->oas, max_addr_bits);
 773}
 774
 775static struct arm_lpae_io_pgtable *
 776arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
 777{
 778	struct arm_lpae_io_pgtable *data;
 779	int levels, va_bits, pg_shift;
 780
 781	arm_lpae_restrict_pgsizes(cfg);
 782
 783	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
 784		return NULL;
 785
 786	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
 787		return NULL;
 788
 789	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
 790		return NULL;
 791
 792	data = kmalloc(sizeof(*data), GFP_KERNEL);
 793	if (!data)
 794		return NULL;
 795
 796	pg_shift = __ffs(cfg->pgsize_bitmap);
 797	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
 798
 799	va_bits = cfg->ias - pg_shift;
 800	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
 801	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
 802
 803	/* Calculate the actual size of our pgd (without concatenation) */
 804	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
 805
 806	data->iop.ops = (struct io_pgtable_ops) {
 807		.map_pages	= arm_lpae_map_pages,
 808		.unmap_pages	= arm_lpae_unmap_pages,
 809		.iova_to_phys	= arm_lpae_iova_to_phys,
 
 810	};
 811
 812	return data;
 813}
 814
 815static struct io_pgtable *
 816arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 817{
 818	u64 reg;
 819	struct arm_lpae_io_pgtable *data;
 820	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
 821	bool tg1;
 822
 823	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
 824			    IO_PGTABLE_QUIRK_ARM_TTBR1 |
 825			    IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
 
 826		return NULL;
 827
 828	data = arm_lpae_alloc_pgtable(cfg);
 829	if (!data)
 830		return NULL;
 831
 832	/* TCR */
 833	if (cfg->coherent_walk) {
 834		tcr->sh = ARM_LPAE_TCR_SH_IS;
 835		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
 836		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 837		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
 838			goto out_free_data;
 839	} else {
 840		tcr->sh = ARM_LPAE_TCR_SH_OS;
 841		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
 842		if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
 843			tcr->orgn = ARM_LPAE_TCR_RGN_NC;
 844		else
 845			tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 846	}
 847
 848	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
 849	switch (ARM_LPAE_GRANULE(data)) {
 850	case SZ_4K:
 851		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
 852		break;
 853	case SZ_16K:
 854		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
 855		break;
 856	case SZ_64K:
 857		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
 858		break;
 859	}
 860
 861	switch (cfg->oas) {
 862	case 32:
 863		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
 864		break;
 865	case 36:
 866		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
 867		break;
 868	case 40:
 869		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
 870		break;
 871	case 42:
 872		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
 873		break;
 874	case 44:
 875		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
 876		break;
 877	case 48:
 878		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
 879		break;
 880	case 52:
 881		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
 882		break;
 883	default:
 884		goto out_free_data;
 885	}
 886
 887	tcr->tsz = 64ULL - cfg->ias;
 888
 889	/* MAIRs */
 890	reg = (ARM_LPAE_MAIR_ATTR_NC
 891	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
 892	      (ARM_LPAE_MAIR_ATTR_WBRWA
 893	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
 894	      (ARM_LPAE_MAIR_ATTR_DEVICE
 895	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
 896	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
 897	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
 898
 899	cfg->arm_lpae_s1_cfg.mair = reg;
 900
 901	/* Looking good; allocate a pgd */
 902	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
 903					   GFP_KERNEL, cfg, cookie);
 904	if (!data->pgd)
 905		goto out_free_data;
 906
 907	/* Ensure the empty pgd is visible before any actual TTBR write */
 908	wmb();
 909
 910	/* TTBR */
 911	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
 912	return &data->iop;
 913
 914out_free_data:
 915	kfree(data);
 916	return NULL;
 917}
 918
 919static struct io_pgtable *
 920arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 921{
 922	u64 sl;
 923	struct arm_lpae_io_pgtable *data;
 924	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
 925
 926	/* The NS quirk doesn't apply at stage 2 */
 927	if (cfg->quirks)
 928		return NULL;
 929
 930	data = arm_lpae_alloc_pgtable(cfg);
 931	if (!data)
 932		return NULL;
 933
 934	/*
 935	 * Concatenate PGDs at level 1 if possible in order to reduce
 936	 * the depth of the stage-2 walk.
 937	 */
 938	if (data->start_level == 0) {
 939		unsigned long pgd_pages;
 940
 941		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
 942		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
 943			data->pgd_bits += data->bits_per_level;
 944			data->start_level++;
 945		}
 946	}
 947
 948	/* VTCR */
 949	if (cfg->coherent_walk) {
 950		vtcr->sh = ARM_LPAE_TCR_SH_IS;
 951		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
 952		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 953	} else {
 954		vtcr->sh = ARM_LPAE_TCR_SH_OS;
 955		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
 956		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
 957	}
 958
 959	sl = data->start_level;
 960
 961	switch (ARM_LPAE_GRANULE(data)) {
 962	case SZ_4K:
 963		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
 964		sl++; /* SL0 format is different for 4K granule size */
 965		break;
 966	case SZ_16K:
 967		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
 968		break;
 969	case SZ_64K:
 970		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
 971		break;
 972	}
 973
 974	switch (cfg->oas) {
 975	case 32:
 976		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
 977		break;
 978	case 36:
 979		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
 980		break;
 981	case 40:
 982		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
 983		break;
 984	case 42:
 985		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
 986		break;
 987	case 44:
 988		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
 989		break;
 990	case 48:
 991		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
 992		break;
 993	case 52:
 994		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
 995		break;
 996	default:
 997		goto out_free_data;
 998	}
 999
1000	vtcr->tsz = 64ULL - cfg->ias;
1001	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1002
1003	/* Allocate pgd pages */
1004	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1005					   GFP_KERNEL, cfg, cookie);
1006	if (!data->pgd)
1007		goto out_free_data;
1008
1009	/* Ensure the empty pgd is visible before any actual TTBR write */
1010	wmb();
1011
1012	/* VTTBR */
1013	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1014	return &data->iop;
1015
1016out_free_data:
1017	kfree(data);
1018	return NULL;
1019}
1020
1021static struct io_pgtable *
1022arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1023{
1024	if (cfg->ias > 32 || cfg->oas > 40)
1025		return NULL;
1026
1027	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1028	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1029}
1030
1031static struct io_pgtable *
1032arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1033{
1034	if (cfg->ias > 40 || cfg->oas > 40)
1035		return NULL;
1036
1037	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1038	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1039}
1040
1041static struct io_pgtable *
1042arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1043{
1044	struct arm_lpae_io_pgtable *data;
1045
1046	/* No quirks for Mali (hopefully) */
1047	if (cfg->quirks)
1048		return NULL;
1049
1050	if (cfg->ias > 48 || cfg->oas > 40)
1051		return NULL;
1052
1053	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1054
1055	data = arm_lpae_alloc_pgtable(cfg);
1056	if (!data)
1057		return NULL;
1058
1059	/* Mali seems to need a full 4-level table regardless of IAS */
1060	if (data->start_level > 0) {
1061		data->start_level = 0;
1062		data->pgd_bits = 0;
1063	}
1064	/*
1065	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1066	 * best we can do is mimic the out-of-tree driver and hope that the
1067	 * "implementation-defined caching policy" is good enough. Similarly,
1068	 * we'll use it for the sake of a valid attribute for our 'device'
1069	 * index, although callers should never request that in practice.
1070	 */
1071	cfg->arm_mali_lpae_cfg.memattr =
1072		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1073		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1074		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1075		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1076		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1077		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1078
1079	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1080					   cfg, cookie);
1081	if (!data->pgd)
1082		goto out_free_data;
1083
1084	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1085	wmb();
1086
1087	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1088					  ARM_MALI_LPAE_TTBR_READ_INNER |
1089					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1090	if (cfg->coherent_walk)
1091		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1092
1093	return &data->iop;
1094
1095out_free_data:
1096	kfree(data);
1097	return NULL;
1098}
1099
1100struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1101	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1102	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1103	.free	= arm_lpae_free_pgtable,
1104};
1105
1106struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1107	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1108	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1109	.free	= arm_lpae_free_pgtable,
1110};
1111
1112struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1113	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1114	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1115	.free	= arm_lpae_free_pgtable,
1116};
1117
1118struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1119	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1120	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1121	.free	= arm_lpae_free_pgtable,
1122};
1123
1124struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1125	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1126	.alloc	= arm_mali_lpae_alloc_pgtable,
1127	.free	= arm_lpae_free_pgtable,
1128};
1129
1130#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1131
1132static struct io_pgtable_cfg *cfg_cookie __initdata;
1133
1134static void __init dummy_tlb_flush_all(void *cookie)
1135{
1136	WARN_ON(cookie != cfg_cookie);
1137}
1138
1139static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1140				   size_t granule, void *cookie)
1141{
1142	WARN_ON(cookie != cfg_cookie);
1143	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1144}
1145
1146static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1147				      unsigned long iova, size_t granule,
1148				      void *cookie)
1149{
1150	dummy_tlb_flush(iova, granule, granule, cookie);
1151}
1152
1153static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1154	.tlb_flush_all	= dummy_tlb_flush_all,
1155	.tlb_flush_walk	= dummy_tlb_flush,
1156	.tlb_add_page	= dummy_tlb_add_page,
1157};
1158
1159static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1160{
1161	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1162	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1163
1164	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1165		cfg->pgsize_bitmap, cfg->ias);
1166	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1167		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1168		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1169}
1170
1171#define __FAIL(ops, i)	({						\
1172		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1173		arm_lpae_dump_ops(ops);					\
1174		selftest_running = false;				\
1175		-EFAULT;						\
1176})
1177
1178static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1179{
1180	static const enum io_pgtable_fmt fmts[] __initconst = {
1181		ARM_64_LPAE_S1,
1182		ARM_64_LPAE_S2,
1183	};
1184
1185	int i, j;
1186	unsigned long iova;
1187	size_t size, mapped;
1188	struct io_pgtable_ops *ops;
1189
1190	selftest_running = true;
1191
1192	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1193		cfg_cookie = cfg;
1194		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1195		if (!ops) {
1196			pr_err("selftest: failed to allocate io pgtable ops\n");
1197			return -ENOMEM;
1198		}
1199
1200		/*
1201		 * Initial sanity checks.
1202		 * Empty page tables shouldn't provide any translations.
1203		 */
1204		if (ops->iova_to_phys(ops, 42))
1205			return __FAIL(ops, i);
1206
1207		if (ops->iova_to_phys(ops, SZ_1G + 42))
1208			return __FAIL(ops, i);
1209
1210		if (ops->iova_to_phys(ops, SZ_2G + 42))
1211			return __FAIL(ops, i);
1212
1213		/*
1214		 * Distinct mappings of different granule sizes.
1215		 */
1216		iova = 0;
1217		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1218			size = 1UL << j;
1219
1220			if (ops->map_pages(ops, iova, iova, size, 1,
1221					   IOMMU_READ | IOMMU_WRITE |
1222					   IOMMU_NOEXEC | IOMMU_CACHE,
1223					   GFP_KERNEL, &mapped))
1224				return __FAIL(ops, i);
1225
1226			/* Overlapping mappings */
1227			if (!ops->map_pages(ops, iova, iova + size, size, 1,
1228					    IOMMU_READ | IOMMU_NOEXEC,
1229					    GFP_KERNEL, &mapped))
1230				return __FAIL(ops, i);
1231
1232			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1233				return __FAIL(ops, i);
1234
1235			iova += SZ_1G;
1236		}
1237
1238		/* Partial unmap */
1239		size = 1UL << __ffs(cfg->pgsize_bitmap);
1240		if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
1241			return __FAIL(ops, i);
1242
1243		/* Remap of partial unmap */
1244		if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
1245				   IOMMU_READ, GFP_KERNEL, &mapped))
1246			return __FAIL(ops, i);
1247
1248		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1249			return __FAIL(ops, i);
1250
1251		/* Full unmap */
1252		iova = 0;
1253		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1254			size = 1UL << j;
1255
1256			if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1257				return __FAIL(ops, i);
1258
1259			if (ops->iova_to_phys(ops, iova + 42))
1260				return __FAIL(ops, i);
1261
1262			/* Remap full block */
1263			if (ops->map_pages(ops, iova, iova, size, 1,
1264					   IOMMU_WRITE, GFP_KERNEL, &mapped))
1265				return __FAIL(ops, i);
1266
1267			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1268				return __FAIL(ops, i);
1269
1270			iova += SZ_1G;
1271		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1272
1273		free_io_pgtable_ops(ops);
1274	}
1275
1276	selftest_running = false;
1277	return 0;
1278}
1279
1280static int __init arm_lpae_do_selftests(void)
1281{
1282	static const unsigned long pgsize[] __initconst = {
1283		SZ_4K | SZ_2M | SZ_1G,
1284		SZ_16K | SZ_32M,
1285		SZ_64K | SZ_512M,
1286	};
1287
1288	static const unsigned int ias[] __initconst = {
1289		32, 36, 40, 42, 44, 48,
1290	};
1291
1292	int i, j, pass = 0, fail = 0;
1293	struct device dev;
1294	struct io_pgtable_cfg cfg = {
1295		.tlb = &dummy_tlb_ops,
1296		.oas = 48,
1297		.coherent_walk = true,
1298		.iommu_dev = &dev,
1299	};
1300
1301	/* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1302	set_dev_node(&dev, NUMA_NO_NODE);
1303
1304	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1305		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1306			cfg.pgsize_bitmap = pgsize[i];
1307			cfg.ias = ias[j];
1308			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1309				pgsize[i], ias[j]);
1310			if (arm_lpae_run_tests(&cfg))
1311				fail++;
1312			else
1313				pass++;
1314		}
1315	}
1316
1317	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1318	return fail ? -EFAULT : 0;
1319}
1320subsys_initcall(arm_lpae_do_selftests);
1321#endif
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CPU-agnostic ARM page table allocator.
   4 *
   5 * Copyright (C) 2014 ARM Limited
   6 *
   7 * Author: Will Deacon <will.deacon@arm.com>
   8 */
   9
  10#define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
  11
  12#include <linux/atomic.h>
  13#include <linux/bitops.h>
  14#include <linux/io-pgtable.h>
  15#include <linux/kernel.h>
  16#include <linux/sizes.h>
  17#include <linux/slab.h>
  18#include <linux/types.h>
  19#include <linux/dma-mapping.h>
  20
  21#include <asm/barrier.h>
  22
  23#include "io-pgtable-arm.h"
  24#include "iommu-pages.h"
  25
  26#define ARM_LPAE_MAX_ADDR_BITS		52
  27#define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
  28#define ARM_LPAE_MAX_LEVELS		4
  29
  30/* Struct accessors */
  31#define io_pgtable_to_data(x)						\
  32	container_of((x), struct arm_lpae_io_pgtable, iop)
  33
  34#define io_pgtable_ops_to_data(x)					\
  35	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
  36
  37/*
  38 * Calculate the right shift amount to get to the portion describing level l
  39 * in a virtual address mapped by the pagetable in d.
  40 */
  41#define ARM_LPAE_LVL_SHIFT(l,d)						\
  42	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
  43	ilog2(sizeof(arm_lpae_iopte)))
  44
  45#define ARM_LPAE_GRANULE(d)						\
  46	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
  47#define ARM_LPAE_PGD_SIZE(d)						\
  48	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
  49
  50#define ARM_LPAE_PTES_PER_TABLE(d)					\
  51	(ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
  52
  53/*
  54 * Calculate the index at level l used to map virtual address a using the
  55 * pagetable in d.
  56 */
  57#define ARM_LPAE_PGD_IDX(l,d)						\
  58	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
  59
  60#define ARM_LPAE_LVL_IDX(a,l,d)						\
  61	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
  62	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
  63
  64/* Calculate the block/page mapping size at level l for pagetable in d. */
  65#define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
  66
  67/* Page table bits */
  68#define ARM_LPAE_PTE_TYPE_SHIFT		0
  69#define ARM_LPAE_PTE_TYPE_MASK		0x3
  70
  71#define ARM_LPAE_PTE_TYPE_BLOCK		1
  72#define ARM_LPAE_PTE_TYPE_TABLE		3
  73#define ARM_LPAE_PTE_TYPE_PAGE		3
  74
  75#define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
  76
  77#define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
  78#define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
  79#define ARM_LPAE_PTE_DBM		(((arm_lpae_iopte)1) << 51)
  80#define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
  81#define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
  82#define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
  83#define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
  84#define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
  85#define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
  86
  87#define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
  88/* Ignore the contiguous bit for block splitting */
  89#define ARM_LPAE_PTE_ATTR_HI_MASK	(ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM)
  90#define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
  91					 ARM_LPAE_PTE_ATTR_HI_MASK)
  92/* Software bit for solving coherency races */
  93#define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
  94
  95/* Stage-1 PTE */
  96#define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
  97#define ARM_LPAE_PTE_AP_RDONLY_BIT	7
  98#define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)1) << \
  99					   ARM_LPAE_PTE_AP_RDONLY_BIT)
 100#define ARM_LPAE_PTE_AP_WR_CLEAN_MASK	(ARM_LPAE_PTE_AP_RDONLY | \
 101					 ARM_LPAE_PTE_DBM)
 102#define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
 103#define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
 104
 105/* Stage-2 PTE */
 106#define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
 107#define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
 108#define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
 109/*
 110 * For !FWB these code to:
 111 *  1111 = Normal outer write back cachable / Inner Write Back Cachable
 112 *         Permit S1 to override
 113 *  0101 = Normal Non-cachable / Inner Non-cachable
 114 *  0001 = Device / Device-nGnRE
 115 * For S2FWB these code:
 116 *  0110 Force Normal Write Back
 117 *  0101 Normal* is forced Normal-NC, Device unchanged
 118 *  0001 Force Device-nGnRE
 119 */
 120#define ARM_LPAE_PTE_MEMATTR_FWB_WB	(((arm_lpae_iopte)0x6) << 2)
 121#define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
 122#define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
 123#define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
 124
 125/* Register bits */
 126#define ARM_LPAE_VTCR_SL0_MASK		0x3
 127
 128#define ARM_LPAE_TCR_T0SZ_SHIFT		0
 129
 130#define ARM_LPAE_VTCR_PS_SHIFT		16
 131#define ARM_LPAE_VTCR_PS_MASK		0x7
 132
 133#define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
 134#define ARM_LPAE_MAIR_ATTR_MASK		0xff
 135#define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
 136#define ARM_LPAE_MAIR_ATTR_NC		0x44
 137#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
 138#define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
 139#define ARM_LPAE_MAIR_ATTR_IDX_NC	0
 140#define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
 141#define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
 142#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
 143
 144#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
 145#define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
 146#define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
 147
 148#define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
 149#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
 150
 151/* IOPTE accessors */
 152#define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
 153
 154#define iopte_type(pte)					\
 155	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
 156
 157#define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
 158
 159#define iopte_writeable_dirty(pte)				\
 160	(((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
 161
 162#define iopte_set_writeable_clean(ptep)				\
 163	set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
 164
 165struct arm_lpae_io_pgtable {
 166	struct io_pgtable	iop;
 167
 168	int			pgd_bits;
 169	int			start_level;
 170	int			bits_per_level;
 171
 172	void			*pgd;
 173};
 174
 175typedef u64 arm_lpae_iopte;
 176
 177static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
 178			      enum io_pgtable_fmt fmt)
 179{
 180	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
 181		return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
 182
 183	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
 184}
 185
 186static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
 187{
 188	if (lvl == (ARM_LPAE_MAX_LEVELS - 1))
 189		return false;
 190	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
 191}
 192
 193static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
 194				     struct arm_lpae_io_pgtable *data)
 195{
 196	arm_lpae_iopte pte = paddr;
 197
 198	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
 199	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
 200}
 201
 202static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
 203				  struct arm_lpae_io_pgtable *data)
 204{
 205	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
 206
 207	if (ARM_LPAE_GRANULE(data) < SZ_64K)
 208		return paddr;
 209
 210	/* Rotate the packed high-order bits back to the top */
 211	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
 212}
 213
 214/*
 215 * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into
 216 * a concatenated PGD, into the maximum number of entries that can be
 217 * mapped in the same table page.
 218 */
 219static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
 220{
 221	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
 222
 223	return ptes_per_table - (i & (ptes_per_table - 1));
 224}
 225
 226static bool selftest_running = false;
 227
 228static dma_addr_t __arm_lpae_dma_addr(void *pages)
 229{
 230	return (dma_addr_t)virt_to_phys(pages);
 231}
 232
 233static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
 234				    struct io_pgtable_cfg *cfg,
 235				    void *cookie)
 236{
 237	struct device *dev = cfg->iommu_dev;
 238	int order = get_order(size);
 239	dma_addr_t dma;
 240	void *pages;
 241
 242	VM_BUG_ON((gfp & __GFP_HIGHMEM));
 243
 244	if (cfg->alloc)
 245		pages = cfg->alloc(cookie, size, gfp);
 246	else
 247		pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
 
 
 
 
 248
 249	if (!pages)
 250		return NULL;
 251
 252	if (!cfg->coherent_walk) {
 253		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
 254		if (dma_mapping_error(dev, dma))
 255			goto out_free;
 256		/*
 257		 * We depend on the IOMMU being able to work with any physical
 258		 * address directly, so if the DMA layer suggests otherwise by
 259		 * translating or truncating them, that bodes very badly...
 260		 */
 261		if (dma != virt_to_phys(pages))
 262			goto out_unmap;
 263	}
 264
 265	return pages;
 266
 267out_unmap:
 268	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
 269	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
 270
 271out_free:
 272	if (cfg->free)
 273		cfg->free(cookie, pages, size);
 274	else
 275		iommu_free_pages(pages, order);
 276
 277	return NULL;
 278}
 279
 280static void __arm_lpae_free_pages(void *pages, size_t size,
 281				  struct io_pgtable_cfg *cfg,
 282				  void *cookie)
 283{
 284	if (!cfg->coherent_walk)
 285		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
 286				 size, DMA_TO_DEVICE);
 287
 288	if (cfg->free)
 289		cfg->free(cookie, pages, size);
 290	else
 291		iommu_free_pages(pages, get_order(size));
 292}
 293
 294static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
 295				struct io_pgtable_cfg *cfg)
 296{
 297	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
 298				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
 299}
 300
 301static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
 302{
 303	for (int i = 0; i < num_entries; i++)
 304		ptep[i] = 0;
 305
 306	if (!cfg->coherent_walk && num_entries)
 307		__arm_lpae_sync_pte(ptep, num_entries, cfg);
 
 
 308}
 309
 310static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 311			       struct iommu_iotlb_gather *gather,
 312			       unsigned long iova, size_t size, size_t pgcount,
 313			       int lvl, arm_lpae_iopte *ptep);
 314
 315static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 316				phys_addr_t paddr, arm_lpae_iopte prot,
 317				int lvl, int num_entries, arm_lpae_iopte *ptep)
 318{
 319	arm_lpae_iopte pte = prot;
 320	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 321	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 322	int i;
 323
 324	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
 325		pte |= ARM_LPAE_PTE_TYPE_PAGE;
 326	else
 327		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 328
 329	for (i = 0; i < num_entries; i++)
 330		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
 331
 332	if (!cfg->coherent_walk)
 333		__arm_lpae_sync_pte(ptep, num_entries, cfg);
 334}
 335
 336static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 337			     unsigned long iova, phys_addr_t paddr,
 338			     arm_lpae_iopte prot, int lvl, int num_entries,
 339			     arm_lpae_iopte *ptep)
 340{
 341	int i;
 342
 343	for (i = 0; i < num_entries; i++)
 344		if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
 345			/* We require an unmap first */
 346			WARN_ON(!selftest_running);
 347			return -EEXIST;
 348		} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
 349			/*
 350			 * We need to unmap and free the old table before
 351			 * overwriting it with a block entry.
 352			 */
 353			arm_lpae_iopte *tblp;
 354			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
 355
 356			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
 357			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
 358					     lvl, tblp) != sz) {
 359				WARN_ON(1);
 360				return -EINVAL;
 361			}
 362		}
 363
 364	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
 365	return 0;
 366}
 367
 368static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
 369					     arm_lpae_iopte *ptep,
 370					     arm_lpae_iopte curr,
 371					     struct arm_lpae_io_pgtable *data)
 372{
 373	arm_lpae_iopte old, new;
 374	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 375
 376	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
 377	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
 378		new |= ARM_LPAE_PTE_NSTABLE;
 379
 380	/*
 381	 * Ensure the table itself is visible before its PTE can be.
 382	 * Whilst we could get away with cmpxchg64_release below, this
 383	 * doesn't have any ordering semantics when !CONFIG_SMP.
 384	 */
 385	dma_wmb();
 386
 387	old = cmpxchg64_relaxed(ptep, curr, new);
 388
 389	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
 390		return old;
 391
 392	/* Even if it's not ours, there's no point waiting; just kick it */
 393	__arm_lpae_sync_pte(ptep, 1, cfg);
 394	if (old == curr)
 395		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
 396
 397	return old;
 398}
 399
 400static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
 401			  phys_addr_t paddr, size_t size, size_t pgcount,
 402			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
 403			  gfp_t gfp, size_t *mapped)
 404{
 405	arm_lpae_iopte *cptep, pte;
 406	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 407	size_t tblsz = ARM_LPAE_GRANULE(data);
 408	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 409	int ret = 0, num_entries, max_entries, map_idx_start;
 410
 411	/* Find our entry at the current level */
 412	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
 413	ptep += map_idx_start;
 414
 415	/* If we can install a leaf entry at this level, then do so */
 416	if (size == block_size) {
 417		max_entries = arm_lpae_max_entries(map_idx_start, data);
 418		num_entries = min_t(int, pgcount, max_entries);
 419		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
 420		if (!ret)
 421			*mapped += num_entries * size;
 422
 423		return ret;
 424	}
 425
 426	/* We can't allocate tables at the final level */
 427	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
 428		return -EINVAL;
 429
 430	/* Grab a pointer to the next level */
 431	pte = READ_ONCE(*ptep);
 432	if (!pte) {
 433		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
 434		if (!cptep)
 435			return -ENOMEM;
 436
 437		pte = arm_lpae_install_table(cptep, ptep, 0, data);
 438		if (pte)
 439			__arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
 440	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
 441		__arm_lpae_sync_pte(ptep, 1, cfg);
 442	}
 443
 444	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
 445		cptep = iopte_deref(pte, data);
 446	} else if (pte) {
 447		/* We require an unmap first */
 448		WARN_ON(!selftest_running);
 449		return -EEXIST;
 450	}
 451
 452	/* Rinse, repeat */
 453	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
 454			      cptep, gfp, mapped);
 455}
 456
 457static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
 458					   int prot)
 459{
 460	arm_lpae_iopte pte;
 461
 462	if (data->iop.fmt == ARM_64_LPAE_S1 ||
 463	    data->iop.fmt == ARM_32_LPAE_S1) {
 464		pte = ARM_LPAE_PTE_nG;
 465		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
 466			pte |= ARM_LPAE_PTE_AP_RDONLY;
 467		else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
 468			pte |= ARM_LPAE_PTE_DBM;
 469		if (!(prot & IOMMU_PRIV))
 470			pte |= ARM_LPAE_PTE_AP_UNPRIV;
 471	} else {
 472		pte = ARM_LPAE_PTE_HAP_FAULT;
 473		if (prot & IOMMU_READ)
 474			pte |= ARM_LPAE_PTE_HAP_READ;
 475		if (prot & IOMMU_WRITE)
 476			pte |= ARM_LPAE_PTE_HAP_WRITE;
 477	}
 478
 479	/*
 480	 * Note that this logic is structured to accommodate Mali LPAE
 481	 * having stage-1-like attributes but stage-2-like permissions.
 482	 */
 483	if (data->iop.fmt == ARM_64_LPAE_S2 ||
 484	    data->iop.fmt == ARM_32_LPAE_S2) {
 485		if (prot & IOMMU_MMIO) {
 486			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
 487		} else if (prot & IOMMU_CACHE) {
 488			if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_S2FWB)
 489				pte |= ARM_LPAE_PTE_MEMATTR_FWB_WB;
 490			else
 491				pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
 492		} else {
 493			pte |= ARM_LPAE_PTE_MEMATTR_NC;
 494		}
 495	} else {
 496		if (prot & IOMMU_MMIO)
 497			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
 498				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 499		else if (prot & IOMMU_CACHE)
 500			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
 501				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 502	}
 503
 504	/*
 505	 * Also Mali has its own notions of shareability wherein its Inner
 506	 * domain covers the cores within the GPU, and its Outer domain is
 507	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
 508	 * terms, depending on coherency).
 509	 */
 510	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
 511		pte |= ARM_LPAE_PTE_SH_IS;
 512	else
 513		pte |= ARM_LPAE_PTE_SH_OS;
 514
 515	if (prot & IOMMU_NOEXEC)
 516		pte |= ARM_LPAE_PTE_XN;
 517
 518	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
 519		pte |= ARM_LPAE_PTE_NS;
 520
 521	if (data->iop.fmt != ARM_MALI_LPAE)
 522		pte |= ARM_LPAE_PTE_AF;
 523
 524	return pte;
 525}
 526
 527static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
 528			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
 529			      int iommu_prot, gfp_t gfp, size_t *mapped)
 530{
 531	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 532	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 533	arm_lpae_iopte *ptep = data->pgd;
 534	int ret, lvl = data->start_level;
 535	arm_lpae_iopte prot;
 536	long iaext = (s64)iova >> cfg->ias;
 537
 538	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
 539		return -EINVAL;
 540
 541	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
 542		iaext = ~iaext;
 543	if (WARN_ON(iaext || paddr >> cfg->oas))
 544		return -ERANGE;
 545
 
 546	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
 547		return -EINVAL;
 548
 549	prot = arm_lpae_prot_to_pte(data, iommu_prot);
 550	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
 551			     ptep, gfp, mapped);
 552	/*
 553	 * Synchronise all PTE updates for the new mapping before there's
 554	 * a chance for anything to kick off a table walk for the new iova.
 555	 */
 556	wmb();
 557
 558	return ret;
 559}
 560
 561static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
 562				    arm_lpae_iopte *ptep)
 563{
 564	arm_lpae_iopte *start, *end;
 565	unsigned long table_size;
 566
 567	if (lvl == data->start_level)
 568		table_size = ARM_LPAE_PGD_SIZE(data);
 569	else
 570		table_size = ARM_LPAE_GRANULE(data);
 571
 572	start = ptep;
 573
 574	/* Only leaf entries at the last level */
 575	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
 576		end = ptep;
 577	else
 578		end = (void *)ptep + table_size;
 579
 580	while (ptep != end) {
 581		arm_lpae_iopte pte = *ptep++;
 582
 583		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
 584			continue;
 585
 586		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 587	}
 588
 589	__arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
 590}
 591
 592static void arm_lpae_free_pgtable(struct io_pgtable *iop)
 593{
 594	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
 595
 596	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
 597	kfree(data);
 598}
 599
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 601			       struct iommu_iotlb_gather *gather,
 602			       unsigned long iova, size_t size, size_t pgcount,
 603			       int lvl, arm_lpae_iopte *ptep)
 604{
 605	arm_lpae_iopte pte;
 606	struct io_pgtable *iop = &data->iop;
 607	int i = 0, num_entries, max_entries, unmap_idx_start;
 608
 609	/* Something went horribly wrong and we ran out of page table */
 610	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
 611		return 0;
 612
 613	unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
 614	ptep += unmap_idx_start;
 615	pte = READ_ONCE(*ptep);
 616	if (WARN_ON(!pte))
 617		return 0;
 618
 619	/* If the size matches this level, we're in the right place */
 620	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
 621		max_entries = arm_lpae_max_entries(unmap_idx_start, data);
 622		num_entries = min_t(int, pgcount, max_entries);
 623
 624		/* Find and handle non-leaf entries */
 625		for (i = 0; i < num_entries; i++) {
 626			pte = READ_ONCE(ptep[i]);
 627			if (WARN_ON(!pte))
 628				break;
 629
 
 
 630			if (!iopte_leaf(pte, lvl, iop->fmt)) {
 631				__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
 632
 633				/* Also flush any partial walks */
 634				io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
 635							  ARM_LPAE_GRANULE(data));
 636				__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 
 
 637			}
 
 
 
 638		}
 639
 640		/* Clear the remaining entries */
 641		__arm_lpae_clear_pte(ptep, &iop->cfg, i);
 642
 643		if (gather && !iommu_iotlb_gather_queued(gather))
 644			for (int j = 0; j < i; j++)
 645				io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
 646
 647		return i * size;
 648	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
 649		WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
 650		return 0;
 
 
 
 
 651	}
 652
 653	/* Keep on walkin' */
 654	ptep = iopte_deref(pte, data);
 655	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
 656}
 657
 658static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
 659				   size_t pgsize, size_t pgcount,
 660				   struct iommu_iotlb_gather *gather)
 661{
 662	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 663	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 664	arm_lpae_iopte *ptep = data->pgd;
 665	long iaext = (s64)iova >> cfg->ias;
 666
 667	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
 668		return 0;
 669
 670	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
 671		iaext = ~iaext;
 672	if (WARN_ON(iaext))
 673		return 0;
 674
 675	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
 676				data->start_level, ptep);
 677}
 678
 679static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
 680					 unsigned long iova)
 681{
 682	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 683	arm_lpae_iopte pte, *ptep = data->pgd;
 684	int lvl = data->start_level;
 685
 686	do {
 687		/* Valid IOPTE pointer? */
 688		if (!ptep)
 689			return 0;
 690
 691		/* Grab the IOPTE we're interested in */
 692		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 693		pte = READ_ONCE(*ptep);
 694
 695		/* Valid entry? */
 696		if (!pte)
 697			return 0;
 698
 699		/* Leaf entry? */
 700		if (iopte_leaf(pte, lvl, data->iop.fmt))
 701			goto found_translation;
 702
 703		/* Take it to the next level */
 704		ptep = iopte_deref(pte, data);
 705	} while (++lvl < ARM_LPAE_MAX_LEVELS);
 706
 707	/* Ran out of page tables to walk */
 708	return 0;
 709
 710found_translation:
 711	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
 712	return iopte_to_paddr(pte, data) | iova;
 713}
 714
 715struct io_pgtable_walk_data {
 716	struct iommu_dirty_bitmap	*dirty;
 717	unsigned long			flags;
 718	u64				addr;
 719	const u64			end;
 720};
 721
 722static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
 723				       struct io_pgtable_walk_data *walk_data,
 724				       arm_lpae_iopte *ptep,
 725				       int lvl);
 726
 727static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
 728				  struct io_pgtable_walk_data *walk_data,
 729				  arm_lpae_iopte *ptep, int lvl)
 730{
 731	struct io_pgtable *iop = &data->iop;
 732	arm_lpae_iopte pte = READ_ONCE(*ptep);
 733
 734	if (iopte_leaf(pte, lvl, iop->fmt)) {
 735		size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 736
 737		if (iopte_writeable_dirty(pte)) {
 738			iommu_dirty_bitmap_record(walk_data->dirty,
 739						  walk_data->addr, size);
 740			if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
 741				iopte_set_writeable_clean(ptep);
 742		}
 743		walk_data->addr += size;
 744		return 0;
 745	}
 746
 747	if (WARN_ON(!iopte_table(pte, lvl)))
 748		return -EINVAL;
 749
 750	ptep = iopte_deref(pte, data);
 751	return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
 752}
 753
 754static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
 755				       struct io_pgtable_walk_data *walk_data,
 756				       arm_lpae_iopte *ptep,
 757				       int lvl)
 758{
 759	u32 idx;
 760	int max_entries, ret;
 761
 762	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
 763		return -EINVAL;
 764
 765	if (lvl == data->start_level)
 766		max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
 767	else
 768		max_entries = ARM_LPAE_PTES_PER_TABLE(data);
 769
 770	for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
 771	     (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
 772		ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
 773		if (ret)
 774			return ret;
 775	}
 776
 777	return 0;
 778}
 779
 780static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
 781					 unsigned long iova, size_t size,
 782					 unsigned long flags,
 783					 struct iommu_dirty_bitmap *dirty)
 784{
 785	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 786	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 787	struct io_pgtable_walk_data walk_data = {
 788		.dirty = dirty,
 789		.flags = flags,
 790		.addr = iova,
 791		.end = iova + size,
 792	};
 793	arm_lpae_iopte *ptep = data->pgd;
 794	int lvl = data->start_level;
 795
 796	if (WARN_ON(!size))
 797		return -EINVAL;
 798	if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1)))
 799		return -EINVAL;
 800	if (data->iop.fmt != ARM_64_LPAE_S1)
 801		return -EINVAL;
 802
 803	return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
 804}
 805
 806static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
 807{
 808	unsigned long granule, page_sizes;
 809	unsigned int max_addr_bits = 48;
 810
 811	/*
 812	 * We need to restrict the supported page sizes to match the
 813	 * translation regime for a particular granule. Aim to match
 814	 * the CPU page size if possible, otherwise prefer smaller sizes.
 815	 * While we're at it, restrict the block sizes to match the
 816	 * chosen granule.
 817	 */
 818	if (cfg->pgsize_bitmap & PAGE_SIZE)
 819		granule = PAGE_SIZE;
 820	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
 821		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
 822	else if (cfg->pgsize_bitmap & PAGE_MASK)
 823		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
 824	else
 825		granule = 0;
 826
 827	switch (granule) {
 828	case SZ_4K:
 829		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
 830		break;
 831	case SZ_16K:
 832		page_sizes = (SZ_16K | SZ_32M);
 833		break;
 834	case SZ_64K:
 835		max_addr_bits = 52;
 836		page_sizes = (SZ_64K | SZ_512M);
 837		if (cfg->oas > 48)
 838			page_sizes |= 1ULL << 42; /* 4TB */
 839		break;
 840	default:
 841		page_sizes = 0;
 842	}
 843
 844	cfg->pgsize_bitmap &= page_sizes;
 845	cfg->ias = min(cfg->ias, max_addr_bits);
 846	cfg->oas = min(cfg->oas, max_addr_bits);
 847}
 848
 849static struct arm_lpae_io_pgtable *
 850arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
 851{
 852	struct arm_lpae_io_pgtable *data;
 853	int levels, va_bits, pg_shift;
 854
 855	arm_lpae_restrict_pgsizes(cfg);
 856
 857	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
 858		return NULL;
 859
 860	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
 861		return NULL;
 862
 863	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
 864		return NULL;
 865
 866	data = kmalloc(sizeof(*data), GFP_KERNEL);
 867	if (!data)
 868		return NULL;
 869
 870	pg_shift = __ffs(cfg->pgsize_bitmap);
 871	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
 872
 873	va_bits = cfg->ias - pg_shift;
 874	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
 875	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
 876
 877	/* Calculate the actual size of our pgd (without concatenation) */
 878	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
 879
 880	data->iop.ops = (struct io_pgtable_ops) {
 881		.map_pages	= arm_lpae_map_pages,
 882		.unmap_pages	= arm_lpae_unmap_pages,
 883		.iova_to_phys	= arm_lpae_iova_to_phys,
 884		.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
 885	};
 886
 887	return data;
 888}
 889
 890static struct io_pgtable *
 891arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
 892{
 893	u64 reg;
 894	struct arm_lpae_io_pgtable *data;
 895	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
 896	bool tg1;
 897
 898	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
 899			    IO_PGTABLE_QUIRK_ARM_TTBR1 |
 900			    IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
 901			    IO_PGTABLE_QUIRK_ARM_HD))
 902		return NULL;
 903
 904	data = arm_lpae_alloc_pgtable(cfg);
 905	if (!data)
 906		return NULL;
 907
 908	/* TCR */
 909	if (cfg->coherent_walk) {
 910		tcr->sh = ARM_LPAE_TCR_SH_IS;
 911		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
 912		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 913		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
 914			goto out_free_data;
 915	} else {
 916		tcr->sh = ARM_LPAE_TCR_SH_OS;
 917		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
 918		if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
 919			tcr->orgn = ARM_LPAE_TCR_RGN_NC;
 920		else
 921			tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
 922	}
 923
 924	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
 925	switch (ARM_LPAE_GRANULE(data)) {
 926	case SZ_4K:
 927		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
 928		break;
 929	case SZ_16K:
 930		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
 931		break;
 932	case SZ_64K:
 933		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
 934		break;
 935	}
 936
 937	switch (cfg->oas) {
 938	case 32:
 939		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
 940		break;
 941	case 36:
 942		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
 943		break;
 944	case 40:
 945		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
 946		break;
 947	case 42:
 948		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
 949		break;
 950	case 44:
 951		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
 952		break;
 953	case 48:
 954		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
 955		break;
 956	case 52:
 957		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
 958		break;
 959	default:
 960		goto out_free_data;
 961	}
 962
 963	tcr->tsz = 64ULL - cfg->ias;
 964
 965	/* MAIRs */
 966	reg = (ARM_LPAE_MAIR_ATTR_NC
 967	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
 968	      (ARM_LPAE_MAIR_ATTR_WBRWA
 969	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
 970	      (ARM_LPAE_MAIR_ATTR_DEVICE
 971	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
 972	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
 973	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
 974
 975	cfg->arm_lpae_s1_cfg.mair = reg;
 976
 977	/* Looking good; allocate a pgd */
 978	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
 979					   GFP_KERNEL, cfg, cookie);
 980	if (!data->pgd)
 981		goto out_free_data;
 982
 983	/* Ensure the empty pgd is visible before any actual TTBR write */
 984	wmb();
 985
 986	/* TTBR */
 987	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
 988	return &data->iop;
 989
 990out_free_data:
 991	kfree(data);
 992	return NULL;
 993}
 994
 995static struct io_pgtable *
 996arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 997{
 998	u64 sl;
 999	struct arm_lpae_io_pgtable *data;
1000	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
1001
1002	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
 
1003		return NULL;
1004
1005	data = arm_lpae_alloc_pgtable(cfg);
1006	if (!data)
1007		return NULL;
1008
1009	/*
1010	 * Concatenate PGDs at level 1 if possible in order to reduce
1011	 * the depth of the stage-2 walk.
1012	 */
1013	if (data->start_level == 0) {
1014		unsigned long pgd_pages;
1015
1016		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
1017		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
1018			data->pgd_bits += data->bits_per_level;
1019			data->start_level++;
1020		}
1021	}
1022
1023	/* VTCR */
1024	if (cfg->coherent_walk) {
1025		vtcr->sh = ARM_LPAE_TCR_SH_IS;
1026		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
1027		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
1028	} else {
1029		vtcr->sh = ARM_LPAE_TCR_SH_OS;
1030		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
1031		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
1032	}
1033
1034	sl = data->start_level;
1035
1036	switch (ARM_LPAE_GRANULE(data)) {
1037	case SZ_4K:
1038		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
1039		sl++; /* SL0 format is different for 4K granule size */
1040		break;
1041	case SZ_16K:
1042		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
1043		break;
1044	case SZ_64K:
1045		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
1046		break;
1047	}
1048
1049	switch (cfg->oas) {
1050	case 32:
1051		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
1052		break;
1053	case 36:
1054		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
1055		break;
1056	case 40:
1057		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
1058		break;
1059	case 42:
1060		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
1061		break;
1062	case 44:
1063		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
1064		break;
1065	case 48:
1066		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
1067		break;
1068	case 52:
1069		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1070		break;
1071	default:
1072		goto out_free_data;
1073	}
1074
1075	vtcr->tsz = 64ULL - cfg->ias;
1076	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1077
1078	/* Allocate pgd pages */
1079	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1080					   GFP_KERNEL, cfg, cookie);
1081	if (!data->pgd)
1082		goto out_free_data;
1083
1084	/* Ensure the empty pgd is visible before any actual TTBR write */
1085	wmb();
1086
1087	/* VTTBR */
1088	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1089	return &data->iop;
1090
1091out_free_data:
1092	kfree(data);
1093	return NULL;
1094}
1095
1096static struct io_pgtable *
1097arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1098{
1099	if (cfg->ias > 32 || cfg->oas > 40)
1100		return NULL;
1101
1102	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1103	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1104}
1105
1106static struct io_pgtable *
1107arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1108{
1109	if (cfg->ias > 40 || cfg->oas > 40)
1110		return NULL;
1111
1112	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1113	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1114}
1115
1116static struct io_pgtable *
1117arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1118{
1119	struct arm_lpae_io_pgtable *data;
1120
1121	/* No quirks for Mali (hopefully) */
1122	if (cfg->quirks)
1123		return NULL;
1124
1125	if (cfg->ias > 48 || cfg->oas > 40)
1126		return NULL;
1127
1128	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1129
1130	data = arm_lpae_alloc_pgtable(cfg);
1131	if (!data)
1132		return NULL;
1133
1134	/* Mali seems to need a full 4-level table regardless of IAS */
1135	if (data->start_level > 0) {
1136		data->start_level = 0;
1137		data->pgd_bits = 0;
1138	}
1139	/*
1140	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1141	 * best we can do is mimic the out-of-tree driver and hope that the
1142	 * "implementation-defined caching policy" is good enough. Similarly,
1143	 * we'll use it for the sake of a valid attribute for our 'device'
1144	 * index, although callers should never request that in practice.
1145	 */
1146	cfg->arm_mali_lpae_cfg.memattr =
1147		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1148		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1149		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1150		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1151		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1152		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1153
1154	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1155					   cfg, cookie);
1156	if (!data->pgd)
1157		goto out_free_data;
1158
1159	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1160	wmb();
1161
1162	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1163					  ARM_MALI_LPAE_TTBR_READ_INNER |
1164					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1165	if (cfg->coherent_walk)
1166		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1167
1168	return &data->iop;
1169
1170out_free_data:
1171	kfree(data);
1172	return NULL;
1173}
1174
1175struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1176	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1177	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1178	.free	= arm_lpae_free_pgtable,
1179};
1180
1181struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1182	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1183	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1184	.free	= arm_lpae_free_pgtable,
1185};
1186
1187struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1188	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1189	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1190	.free	= arm_lpae_free_pgtable,
1191};
1192
1193struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1194	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1195	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1196	.free	= arm_lpae_free_pgtable,
1197};
1198
1199struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1200	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1201	.alloc	= arm_mali_lpae_alloc_pgtable,
1202	.free	= arm_lpae_free_pgtable,
1203};
1204
1205#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1206
1207static struct io_pgtable_cfg *cfg_cookie __initdata;
1208
1209static void __init dummy_tlb_flush_all(void *cookie)
1210{
1211	WARN_ON(cookie != cfg_cookie);
1212}
1213
1214static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1215				   size_t granule, void *cookie)
1216{
1217	WARN_ON(cookie != cfg_cookie);
1218	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1219}
1220
1221static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1222				      unsigned long iova, size_t granule,
1223				      void *cookie)
1224{
1225	dummy_tlb_flush(iova, granule, granule, cookie);
1226}
1227
1228static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1229	.tlb_flush_all	= dummy_tlb_flush_all,
1230	.tlb_flush_walk	= dummy_tlb_flush,
1231	.tlb_add_page	= dummy_tlb_add_page,
1232};
1233
1234static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1235{
1236	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1237	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1238
1239	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1240		cfg->pgsize_bitmap, cfg->ias);
1241	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1242		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1243		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1244}
1245
1246#define __FAIL(ops, i)	({						\
1247		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1248		arm_lpae_dump_ops(ops);					\
1249		selftest_running = false;				\
1250		-EFAULT;						\
1251})
1252
1253static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1254{
1255	static const enum io_pgtable_fmt fmts[] __initconst = {
1256		ARM_64_LPAE_S1,
1257		ARM_64_LPAE_S2,
1258	};
1259
1260	int i, j;
1261	unsigned long iova;
1262	size_t size, mapped;
1263	struct io_pgtable_ops *ops;
1264
1265	selftest_running = true;
1266
1267	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1268		cfg_cookie = cfg;
1269		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1270		if (!ops) {
1271			pr_err("selftest: failed to allocate io pgtable ops\n");
1272			return -ENOMEM;
1273		}
1274
1275		/*
1276		 * Initial sanity checks.
1277		 * Empty page tables shouldn't provide any translations.
1278		 */
1279		if (ops->iova_to_phys(ops, 42))
1280			return __FAIL(ops, i);
1281
1282		if (ops->iova_to_phys(ops, SZ_1G + 42))
1283			return __FAIL(ops, i);
1284
1285		if (ops->iova_to_phys(ops, SZ_2G + 42))
1286			return __FAIL(ops, i);
1287
1288		/*
1289		 * Distinct mappings of different granule sizes.
1290		 */
1291		iova = 0;
1292		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1293			size = 1UL << j;
1294
1295			if (ops->map_pages(ops, iova, iova, size, 1,
1296					   IOMMU_READ | IOMMU_WRITE |
1297					   IOMMU_NOEXEC | IOMMU_CACHE,
1298					   GFP_KERNEL, &mapped))
1299				return __FAIL(ops, i);
1300
1301			/* Overlapping mappings */
1302			if (!ops->map_pages(ops, iova, iova + size, size, 1,
1303					    IOMMU_READ | IOMMU_NOEXEC,
1304					    GFP_KERNEL, &mapped))
1305				return __FAIL(ops, i);
1306
1307			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1308				return __FAIL(ops, i);
1309
1310			iova += SZ_1G;
1311		}
1312
 
 
 
 
 
 
 
 
 
 
 
 
 
1313		/* Full unmap */
1314		iova = 0;
1315		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1316			size = 1UL << j;
1317
1318			if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1319				return __FAIL(ops, i);
1320
1321			if (ops->iova_to_phys(ops, iova + 42))
1322				return __FAIL(ops, i);
1323
1324			/* Remap full block */
1325			if (ops->map_pages(ops, iova, iova, size, 1,
1326					   IOMMU_WRITE, GFP_KERNEL, &mapped))
1327				return __FAIL(ops, i);
1328
1329			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1330				return __FAIL(ops, i);
1331
1332			iova += SZ_1G;
1333		}
1334
1335		/*
1336		 * Map/unmap the last largest supported page of the IAS, this can
1337		 * trigger corner cases in the concatednated page tables.
1338		 */
1339		mapped = 0;
1340		size = 1UL << __fls(cfg->pgsize_bitmap);
1341		iova = (1UL << cfg->ias) - size;
1342		if (ops->map_pages(ops, iova, iova, size, 1,
1343				   IOMMU_READ | IOMMU_WRITE |
1344				   IOMMU_NOEXEC | IOMMU_CACHE,
1345				   GFP_KERNEL, &mapped))
1346			return __FAIL(ops, i);
1347		if (mapped != size)
1348			return __FAIL(ops, i);
1349		if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1350			return __FAIL(ops, i);
1351
1352		free_io_pgtable_ops(ops);
1353	}
1354
1355	selftest_running = false;
1356	return 0;
1357}
1358
1359static int __init arm_lpae_do_selftests(void)
1360{
1361	static const unsigned long pgsize[] __initconst = {
1362		SZ_4K | SZ_2M | SZ_1G,
1363		SZ_16K | SZ_32M,
1364		SZ_64K | SZ_512M,
1365	};
1366
1367	static const unsigned int ias[] __initconst = {
1368		32, 36, 40, 42, 44, 48,
1369	};
1370
1371	int i, j, pass = 0, fail = 0;
1372	struct device dev;
1373	struct io_pgtable_cfg cfg = {
1374		.tlb = &dummy_tlb_ops,
1375		.oas = 48,
1376		.coherent_walk = true,
1377		.iommu_dev = &dev,
1378	};
1379
1380	/* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1381	set_dev_node(&dev, NUMA_NO_NODE);
1382
1383	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1384		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1385			cfg.pgsize_bitmap = pgsize[i];
1386			cfg.ias = ias[j];
1387			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1388				pgsize[i], ias[j]);
1389			if (arm_lpae_run_tests(&cfg))
1390				fail++;
1391			else
1392				pass++;
1393		}
1394	}
1395
1396	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1397	return fail ? -EFAULT : 0;
1398}
1399subsys_initcall(arm_lpae_do_selftests);
1400#endif