Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Dynamic DMA mapping support.
   4 *
   5 * This implementation is a fallback for platforms that do not support
   6 * I/O TLBs (aka DMA address translation hardware).
   7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
   8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
   9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
  10 *	David Mosberger-Tang <davidm@hpl.hp.com>
  11 *
  12 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
  13 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
  14 *			unnecessary i-cache flushing.
  15 * 04/07/.. ak		Better overflow handling. Assorted fixes.
  16 * 05/09/10 linville	Add support for syncing ranges, support syncing for
  17 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
  18 * 08/12/11 beckyb	Add highmem support
  19 */
  20
  21#define pr_fmt(fmt) "software IO TLB: " fmt
  22
  23#include <linux/cache.h>
  24#include <linux/cc_platform.h>
  25#include <linux/ctype.h>
  26#include <linux/debugfs.h>
  27#include <linux/dma-direct.h>
  28#include <linux/dma-map-ops.h>
  29#include <linux/export.h>
  30#include <linux/gfp.h>
  31#include <linux/highmem.h>
  32#include <linux/io.h>
  33#include <linux/iommu-helper.h>
  34#include <linux/init.h>
  35#include <linux/memblock.h>
  36#include <linux/mm.h>
  37#include <linux/pfn.h>
  38#include <linux/scatterlist.h>
  39#include <linux/set_memory.h>
  40#include <linux/spinlock.h>
  41#include <linux/string.h>
  42#include <linux/swiotlb.h>
  43#include <linux/types.h>
  44#ifdef CONFIG_DMA_RESTRICTED_POOL
  45#include <linux/of.h>
  46#include <linux/of_fdt.h>
  47#include <linux/of_reserved_mem.h>
  48#include <linux/slab.h>
  49#endif
  50
  51#define CREATE_TRACE_POINTS
  52#include <trace/events/swiotlb.h>
  53
  54#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
  55
  56/*
  57 * Minimum IO TLB size to bother booting with.  Systems with mainly
  58 * 64bit capable cards will only lightly use the swiotlb.  If we can't
  59 * allocate a contiguous 1MB, we're probably in trouble anyway.
  60 */
  61#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
  62
  63#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
  64
  65struct io_tlb_slot {
  66	phys_addr_t orig_addr;
  67	size_t alloc_size;
  68	unsigned int list;
  69};
  70
  71static bool swiotlb_force_bounce;
  72static bool swiotlb_force_disable;
  73
  74struct io_tlb_mem io_tlb_default_mem;
  75
  76phys_addr_t swiotlb_unencrypted_base;
  77
  78static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
  79static unsigned long default_nareas;
  80
  81/**
  82 * struct io_tlb_area - IO TLB memory area descriptor
  83 *
  84 * This is a single area with a single lock.
  85 *
  86 * @used:	The number of used IO TLB block.
  87 * @index:	The slot index to start searching in this area for next round.
  88 * @lock:	The lock to protect the above data structures in the map and
  89 *		unmap calls.
  90 */
  91struct io_tlb_area {
  92	unsigned long used;
  93	unsigned int index;
  94	spinlock_t lock;
  95};
  96
  97/*
  98 * Round up number of slabs to the next power of 2. The last area is going
  99 * be smaller than the rest if default_nslabs is not power of two.
 100 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
 101 * otherwise a segment may span two or more areas. It conflicts with free
 102 * contiguous slots tracking: free slots are treated contiguous no matter
 103 * whether they cross an area boundary.
 104 *
 105 * Return true if default_nslabs is rounded up.
 106 */
 107static bool round_up_default_nslabs(void)
 108{
 109	if (!default_nareas)
 110		return false;
 111
 112	if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
 113		default_nslabs = IO_TLB_SEGSIZE * default_nareas;
 114	else if (is_power_of_2(default_nslabs))
 115		return false;
 116	default_nslabs = roundup_pow_of_two(default_nslabs);
 117	return true;
 118}
 119
 120static void swiotlb_adjust_nareas(unsigned int nareas)
 121{
 122	/* use a single area when non is specified */
 123	if (!nareas)
 124		nareas = 1;
 125	else if (!is_power_of_2(nareas))
 126		nareas = roundup_pow_of_two(nareas);
 127
 128	default_nareas = nareas;
 129
 130	pr_info("area num %d.\n", nareas);
 131	if (round_up_default_nslabs())
 132		pr_info("SWIOTLB bounce buffer size roundup to %luMB",
 133			(default_nslabs << IO_TLB_SHIFT) >> 20);
 134}
 135
 136static int __init
 137setup_io_tlb_npages(char *str)
 138{
 139	if (isdigit(*str)) {
 140		/* avoid tail segment of size < IO_TLB_SEGSIZE */
 141		default_nslabs =
 142			ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
 143	}
 144	if (*str == ',')
 145		++str;
 146	if (isdigit(*str))
 147		swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
 148	if (*str == ',')
 149		++str;
 150	if (!strcmp(str, "force"))
 151		swiotlb_force_bounce = true;
 152	else if (!strcmp(str, "noforce"))
 153		swiotlb_force_disable = true;
 154
 155	return 0;
 156}
 157early_param("swiotlb", setup_io_tlb_npages);
 158
 159unsigned int swiotlb_max_segment(void)
 160{
 161	if (!io_tlb_default_mem.nslabs)
 162		return 0;
 163	return rounddown(io_tlb_default_mem.nslabs << IO_TLB_SHIFT, PAGE_SIZE);
 164}
 165EXPORT_SYMBOL_GPL(swiotlb_max_segment);
 166
 167unsigned long swiotlb_size_or_default(void)
 168{
 169	return default_nslabs << IO_TLB_SHIFT;
 170}
 171
 172void __init swiotlb_adjust_size(unsigned long size)
 173{
 174	/*
 175	 * If swiotlb parameter has not been specified, give a chance to
 176	 * architectures such as those supporting memory encryption to
 177	 * adjust/expand SWIOTLB size for their use.
 178	 */
 179	if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
 180		return;
 181
 182	size = ALIGN(size, IO_TLB_SIZE);
 183	default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
 184	if (round_up_default_nslabs())
 185		size = default_nslabs << IO_TLB_SHIFT;
 186	pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
 187}
 188
 189void swiotlb_print_info(void)
 190{
 191	struct io_tlb_mem *mem = &io_tlb_default_mem;
 192
 193	if (!mem->nslabs) {
 194		pr_warn("No low mem\n");
 195		return;
 196	}
 197
 198	pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
 199	       (mem->nslabs << IO_TLB_SHIFT) >> 20);
 200}
 201
 202static inline unsigned long io_tlb_offset(unsigned long val)
 203{
 204	return val & (IO_TLB_SEGSIZE - 1);
 205}
 206
 207static inline unsigned long nr_slots(u64 val)
 208{
 209	return DIV_ROUND_UP(val, IO_TLB_SIZE);
 210}
 211
 212/*
 213 * Remap swioltb memory in the unencrypted physical address space
 214 * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP
 215 * Isolation VMs).
 216 */
 217#ifdef CONFIG_HAS_IOMEM
 218static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
 219{
 220	void *vaddr = NULL;
 221
 222	if (swiotlb_unencrypted_base) {
 223		phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
 224
 225		vaddr = memremap(paddr, bytes, MEMREMAP_WB);
 226		if (!vaddr)
 227			pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
 228			       &paddr, bytes);
 229	}
 230
 231	return vaddr;
 232}
 233#else
 234static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
 235{
 236	return NULL;
 237}
 238#endif
 239
 240/*
 241 * Early SWIOTLB allocation may be too early to allow an architecture to
 242 * perform the desired operations.  This function allows the architecture to
 243 * call SWIOTLB when the operations are possible.  It needs to be called
 244 * before the SWIOTLB memory is used.
 245 */
 246void __init swiotlb_update_mem_attributes(void)
 247{
 248	struct io_tlb_mem *mem = &io_tlb_default_mem;
 249	void *vaddr;
 250	unsigned long bytes;
 251
 252	if (!mem->nslabs || mem->late_alloc)
 253		return;
 254	vaddr = phys_to_virt(mem->start);
 255	bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
 256	set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
 257
 258	mem->vaddr = swiotlb_mem_remap(mem, bytes);
 259	if (!mem->vaddr)
 260		mem->vaddr = vaddr;
 261}
 262
 263static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
 264		unsigned long nslabs, unsigned int flags,
 265		bool late_alloc, unsigned int nareas)
 266{
 267	void *vaddr = phys_to_virt(start);
 268	unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
 269
 270	mem->nslabs = nslabs;
 271	mem->start = start;
 272	mem->end = mem->start + bytes;
 273	mem->late_alloc = late_alloc;
 274	mem->nareas = nareas;
 275	mem->area_nslabs = nslabs / mem->nareas;
 276
 277	mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
 278
 279	for (i = 0; i < mem->nareas; i++) {
 280		spin_lock_init(&mem->areas[i].lock);
 281		mem->areas[i].index = 0;
 282		mem->areas[i].used = 0;
 283	}
 284
 285	for (i = 0; i < mem->nslabs; i++) {
 286		mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
 287		mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
 288		mem->slots[i].alloc_size = 0;
 289	}
 290
 291	/*
 292	 * If swiotlb_unencrypted_base is set, the bounce buffer memory will
 293	 * be remapped and cleared in swiotlb_update_mem_attributes.
 294	 */
 295	if (swiotlb_unencrypted_base)
 296		return;
 297
 298	memset(vaddr, 0, bytes);
 299	mem->vaddr = vaddr;
 300	return;
 301}
 302
 303static void *swiotlb_memblock_alloc(unsigned long nslabs, unsigned int flags,
 304		int (*remap)(void *tlb, unsigned long nslabs))
 305{
 306	size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
 307	void *tlb;
 308
 309	/*
 310	 * By default allocate the bounce buffer memory from low memory, but
 311	 * allow to pick a location everywhere for hypervisors with guest
 312	 * memory encryption.
 313	 */
 314	if (flags & SWIOTLB_ANY)
 315		tlb = memblock_alloc(bytes, PAGE_SIZE);
 316	else
 317		tlb = memblock_alloc_low(bytes, PAGE_SIZE);
 318
 319	if (!tlb) {
 320		pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
 321			__func__, bytes);
 322		return NULL;
 323	}
 324
 325	if (remap && remap(tlb, nslabs) < 0) {
 326		memblock_free(tlb, PAGE_ALIGN(bytes));
 327		pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
 328		return NULL;
 329	}
 330
 331	return tlb;
 332}
 333
 334/*
 335 * Statically reserve bounce buffer space and initialize bounce buffer data
 336 * structures for the software IO TLB used to implement the DMA API.
 337 */
 338void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
 339		int (*remap)(void *tlb, unsigned long nslabs))
 340{
 341	struct io_tlb_mem *mem = &io_tlb_default_mem;
 342	unsigned long nslabs;
 343	size_t alloc_size;
 344	void *tlb;
 345
 346	if (!addressing_limit && !swiotlb_force_bounce)
 347		return;
 348	if (swiotlb_force_disable)
 349		return;
 350
 351	/*
 352	 * default_nslabs maybe changed when adjust area number.
 353	 * So allocate bounce buffer after adjusting area number.
 354	 */
 355	if (!default_nareas)
 356		swiotlb_adjust_nareas(num_possible_cpus());
 357
 358	nslabs = default_nslabs;
 359	while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
 360		if (nslabs <= IO_TLB_MIN_SLABS)
 361			return;
 362		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
 363	}
 364
 365	if (default_nslabs != nslabs) {
 366		pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
 367			default_nslabs, nslabs);
 368		default_nslabs = nslabs;
 369	}
 370
 371	alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
 372	mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
 373	if (!mem->slots) {
 374		pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
 375			__func__, alloc_size, PAGE_SIZE);
 376		return;
 377	}
 378
 379	mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
 380		default_nareas), SMP_CACHE_BYTES);
 381	if (!mem->areas) {
 382		pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
 383		return;
 384	}
 385
 386	swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
 387				default_nareas);
 388
 389	if (flags & SWIOTLB_VERBOSE)
 390		swiotlb_print_info();
 391}
 392
 393void __init swiotlb_init(bool addressing_limit, unsigned int flags)
 394{
 395	swiotlb_init_remap(addressing_limit, flags, NULL);
 396}
 397
 398/*
 399 * Systems with larger DMA zones (those that don't support ISA) can
 400 * initialize the swiotlb later using the slab allocator if needed.
 401 * This should be just like above, but with some error catching.
 402 */
 403int swiotlb_init_late(size_t size, gfp_t gfp_mask,
 404		int (*remap)(void *tlb, unsigned long nslabs))
 405{
 406	struct io_tlb_mem *mem = &io_tlb_default_mem;
 407	unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
 408	unsigned char *vstart = NULL;
 409	unsigned int order, area_order;
 410	bool retried = false;
 411	int rc = 0;
 412
 413	if (swiotlb_force_disable)
 414		return 0;
 415
 416retry:
 417	order = get_order(nslabs << IO_TLB_SHIFT);
 418	nslabs = SLABS_PER_PAGE << order;
 419
 420	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
 421		vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
 422						  order);
 423		if (vstart)
 424			break;
 425		order--;
 426		nslabs = SLABS_PER_PAGE << order;
 427		retried = true;
 428	}
 429
 430	if (!vstart)
 431		return -ENOMEM;
 432
 433	if (remap)
 434		rc = remap(vstart, nslabs);
 435	if (rc) {
 436		free_pages((unsigned long)vstart, order);
 437
 438		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
 439		if (nslabs < IO_TLB_MIN_SLABS)
 440			return rc;
 441		retried = true;
 442		goto retry;
 443	}
 444
 445	if (retried) {
 446		pr_warn("only able to allocate %ld MB\n",
 447			(PAGE_SIZE << order) >> 20);
 448	}
 449
 450	if (!default_nareas)
 451		swiotlb_adjust_nareas(num_possible_cpus());
 452
 453	area_order = get_order(array_size(sizeof(*mem->areas),
 454		default_nareas));
 455	mem->areas = (struct io_tlb_area *)
 456		__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
 457	if (!mem->areas)
 458		goto error_area;
 459
 460	mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 461		get_order(array_size(sizeof(*mem->slots), nslabs)));
 462	if (!mem->slots)
 463		goto error_slots;
 464
 465	set_memory_decrypted((unsigned long)vstart,
 466			     (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
 467	swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
 468				default_nareas);
 469
 470	swiotlb_print_info();
 471	return 0;
 472
 473error_slots:
 474	free_pages((unsigned long)mem->areas, area_order);
 475error_area:
 476	free_pages((unsigned long)vstart, order);
 477	return -ENOMEM;
 478}
 479
 480void __init swiotlb_exit(void)
 481{
 482	struct io_tlb_mem *mem = &io_tlb_default_mem;
 483	unsigned long tbl_vaddr;
 484	size_t tbl_size, slots_size;
 485	unsigned int area_order;
 486
 487	if (swiotlb_force_bounce)
 488		return;
 489
 490	if (!mem->nslabs)
 491		return;
 492
 493	pr_info("tearing down default memory pool\n");
 494	tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
 495	tbl_size = PAGE_ALIGN(mem->end - mem->start);
 496	slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
 497
 498	set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
 499	if (mem->late_alloc) {
 500		area_order = get_order(array_size(sizeof(*mem->areas),
 501			mem->nareas));
 502		free_pages((unsigned long)mem->areas, area_order);
 503		free_pages(tbl_vaddr, get_order(tbl_size));
 504		free_pages((unsigned long)mem->slots, get_order(slots_size));
 505	} else {
 506		memblock_free_late(__pa(mem->areas),
 507			array_size(sizeof(*mem->areas), mem->nareas));
 508		memblock_free_late(mem->start, tbl_size);
 509		memblock_free_late(__pa(mem->slots), slots_size);
 510	}
 511
 512	memset(mem, 0, sizeof(*mem));
 513}
 514
 515/*
 516 * Return the offset into a iotlb slot required to keep the device happy.
 517 */
 518static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
 519{
 520	return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
 521}
 522
 523/*
 524 * Bounce: copy the swiotlb buffer from or back to the original dma location
 525 */
 526static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
 527			   enum dma_data_direction dir)
 528{
 529	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 530	int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
 531	phys_addr_t orig_addr = mem->slots[index].orig_addr;
 532	size_t alloc_size = mem->slots[index].alloc_size;
 533	unsigned long pfn = PFN_DOWN(orig_addr);
 534	unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
 535	unsigned int tlb_offset, orig_addr_offset;
 536
 537	if (orig_addr == INVALID_PHYS_ADDR)
 538		return;
 539
 540	tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
 541	orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
 542	if (tlb_offset < orig_addr_offset) {
 543		dev_WARN_ONCE(dev, 1,
 544			"Access before mapping start detected. orig offset %u, requested offset %u.\n",
 545			orig_addr_offset, tlb_offset);
 546		return;
 547	}
 548
 549	tlb_offset -= orig_addr_offset;
 550	if (tlb_offset > alloc_size) {
 551		dev_WARN_ONCE(dev, 1,
 552			"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
 553			alloc_size, size, tlb_offset);
 554		return;
 555	}
 556
 557	orig_addr += tlb_offset;
 558	alloc_size -= tlb_offset;
 559
 560	if (size > alloc_size) {
 561		dev_WARN_ONCE(dev, 1,
 562			"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
 563			alloc_size, size);
 564		size = alloc_size;
 565	}
 566
 567	if (PageHighMem(pfn_to_page(pfn))) {
 568		unsigned int offset = orig_addr & ~PAGE_MASK;
 569		struct page *page;
 570		unsigned int sz = 0;
 571		unsigned long flags;
 572
 573		while (size) {
 574			sz = min_t(size_t, PAGE_SIZE - offset, size);
 575
 576			local_irq_save(flags);
 577			page = pfn_to_page(pfn);
 578			if (dir == DMA_TO_DEVICE)
 579				memcpy_from_page(vaddr, page, offset, sz);
 580			else
 581				memcpy_to_page(page, offset, vaddr, sz);
 582			local_irq_restore(flags);
 583
 584			size -= sz;
 585			pfn++;
 586			vaddr += sz;
 587			offset = 0;
 588		}
 589	} else if (dir == DMA_TO_DEVICE) {
 590		memcpy(vaddr, phys_to_virt(orig_addr), size);
 591	} else {
 592		memcpy(phys_to_virt(orig_addr), vaddr, size);
 593	}
 594}
 595
 596static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
 597{
 598	return start + (idx << IO_TLB_SHIFT);
 599}
 600
 601/*
 602 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
 603 */
 604static inline unsigned long get_max_slots(unsigned long boundary_mask)
 605{
 606	if (boundary_mask == ~0UL)
 607		return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
 608	return nr_slots(boundary_mask + 1);
 609}
 610
 611static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index)
 612{
 613	if (index >= mem->area_nslabs)
 614		return 0;
 615	return index;
 616}
 617
 618/*
 619 * Find a suitable number of IO TLB entries size that will fit this request and
 620 * allocate a buffer from that IO TLB pool.
 621 */
 622static int swiotlb_do_find_slots(struct device *dev, int area_index,
 623		phys_addr_t orig_addr, size_t alloc_size,
 624		unsigned int alloc_align_mask)
 625{
 626	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 627	struct io_tlb_area *area = mem->areas + area_index;
 628	unsigned long boundary_mask = dma_get_seg_boundary(dev);
 629	dma_addr_t tbl_dma_addr =
 630		phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
 631	unsigned long max_slots = get_max_slots(boundary_mask);
 632	unsigned int iotlb_align_mask =
 633		dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
 634	unsigned int nslots = nr_slots(alloc_size), stride;
 635	unsigned int index, wrap, count = 0, i;
 636	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
 637	unsigned long flags;
 638	unsigned int slot_base;
 639	unsigned int slot_index;
 640
 641	BUG_ON(!nslots);
 642	BUG_ON(area_index >= mem->nareas);
 643
 644	/*
 645	 * For mappings with an alignment requirement don't bother looping to
 646	 * unaligned slots once we found an aligned one.  For allocations of
 647	 * PAGE_SIZE or larger only look for page aligned allocations.
 648	 */
 649	stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
 650	if (alloc_size >= PAGE_SIZE)
 651		stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
 652	stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
 653
 654	spin_lock_irqsave(&area->lock, flags);
 655	if (unlikely(nslots > mem->area_nslabs - area->used))
 656		goto not_found;
 657
 658	slot_base = area_index * mem->area_nslabs;
 659	index = wrap = wrap_area_index(mem, ALIGN(area->index, stride));
 660
 661	do {
 662		slot_index = slot_base + index;
 663
 664		if (orig_addr &&
 665		    (slot_addr(tbl_dma_addr, slot_index) &
 666		     iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
 667			index = wrap_area_index(mem, index + 1);
 668			continue;
 669		}
 670
 671		/*
 672		 * If we find a slot that indicates we have 'nslots' number of
 673		 * contiguous buffers, we allocate the buffers from that slot
 674		 * and mark the entries as '0' indicating unavailable.
 675		 */
 676		if (!iommu_is_span_boundary(slot_index, nslots,
 677					    nr_slots(tbl_dma_addr),
 678					    max_slots)) {
 679			if (mem->slots[slot_index].list >= nslots)
 680				goto found;
 681		}
 682		index = wrap_area_index(mem, index + stride);
 683	} while (index != wrap);
 684
 685not_found:
 686	spin_unlock_irqrestore(&area->lock, flags);
 687	return -1;
 688
 689found:
 690	for (i = slot_index; i < slot_index + nslots; i++) {
 691		mem->slots[i].list = 0;
 692		mem->slots[i].alloc_size = alloc_size - (offset +
 693				((i - slot_index) << IO_TLB_SHIFT));
 694	}
 695	for (i = slot_index - 1;
 696	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
 697	     mem->slots[i].list; i--)
 698		mem->slots[i].list = ++count;
 699
 700	/*
 701	 * Update the indices to avoid searching in the next round.
 702	 */
 703	if (index + nslots < mem->area_nslabs)
 704		area->index = index + nslots;
 705	else
 706		area->index = 0;
 707	area->used += nslots;
 708	spin_unlock_irqrestore(&area->lock, flags);
 709	return slot_index;
 710}
 711
 712static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
 713		size_t alloc_size, unsigned int alloc_align_mask)
 714{
 715	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 716	int start = raw_smp_processor_id() & (mem->nareas - 1);
 717	int i = start, index;
 718
 719	do {
 720		index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
 721					      alloc_align_mask);
 722		if (index >= 0)
 723			return index;
 724		if (++i >= mem->nareas)
 725			i = 0;
 726	} while (i != start);
 727
 728	return -1;
 729}
 730
 731static unsigned long mem_used(struct io_tlb_mem *mem)
 732{
 733	int i;
 734	unsigned long used = 0;
 735
 736	for (i = 0; i < mem->nareas; i++)
 737		used += mem->areas[i].used;
 738	return used;
 739}
 740
 741phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
 742		size_t mapping_size, size_t alloc_size,
 743		unsigned int alloc_align_mask, enum dma_data_direction dir,
 744		unsigned long attrs)
 745{
 746	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 747	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
 748	unsigned int i;
 749	int index;
 750	phys_addr_t tlb_addr;
 751
 752	if (!mem || !mem->nslabs) {
 753		dev_warn_ratelimited(dev,
 754			"Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
 755		return (phys_addr_t)DMA_MAPPING_ERROR;
 756	}
 757
 758	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
 759		pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
 760
 761	if (mapping_size > alloc_size) {
 762		dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
 763			      mapping_size, alloc_size);
 764		return (phys_addr_t)DMA_MAPPING_ERROR;
 765	}
 766
 767	index = swiotlb_find_slots(dev, orig_addr,
 768				   alloc_size + offset, alloc_align_mask);
 769	if (index == -1) {
 770		if (!(attrs & DMA_ATTR_NO_WARN))
 771			dev_warn_ratelimited(dev,
 772	"swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
 773				 alloc_size, mem->nslabs, mem_used(mem));
 774		return (phys_addr_t)DMA_MAPPING_ERROR;
 775	}
 776
 777	/*
 778	 * Save away the mapping from the original address to the DMA address.
 779	 * This is needed when we sync the memory.  Then we sync the buffer if
 780	 * needed.
 781	 */
 782	for (i = 0; i < nr_slots(alloc_size + offset); i++)
 783		mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
 784	tlb_addr = slot_addr(mem->start, index) + offset;
 785	/*
 786	 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
 787	 * to the tlb buffer, if we knew for sure the device will
 788	 * overwrite the entire current content. But we don't. Thus
 789	 * unconditional bounce may prevent leaking swiotlb content (i.e.
 790	 * kernel memory) to user-space.
 791	 */
 792	swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
 793	return tlb_addr;
 794}
 795
 796static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
 797{
 798	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 799	unsigned long flags;
 800	unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
 801	int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
 802	int nslots = nr_slots(mem->slots[index].alloc_size + offset);
 803	int aindex = index / mem->area_nslabs;
 804	struct io_tlb_area *area = &mem->areas[aindex];
 805	int count, i;
 806
 807	/*
 808	 * Return the buffer to the free list by setting the corresponding
 809	 * entries to indicate the number of contiguous entries available.
 810	 * While returning the entries to the free list, we merge the entries
 811	 * with slots below and above the pool being returned.
 812	 */
 813	BUG_ON(aindex >= mem->nareas);
 814
 815	spin_lock_irqsave(&area->lock, flags);
 816	if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
 817		count = mem->slots[index + nslots].list;
 818	else
 819		count = 0;
 820
 821	/*
 822	 * Step 1: return the slots to the free list, merging the slots with
 823	 * superceeding slots
 824	 */
 825	for (i = index + nslots - 1; i >= index; i--) {
 826		mem->slots[i].list = ++count;
 827		mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
 828		mem->slots[i].alloc_size = 0;
 829	}
 830
 831	/*
 832	 * Step 2: merge the returned slots with the preceding slots, if
 833	 * available (non zero)
 834	 */
 835	for (i = index - 1;
 836	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
 837	     i--)
 838		mem->slots[i].list = ++count;
 839	area->used -= nslots;
 840	spin_unlock_irqrestore(&area->lock, flags);
 841}
 842
 843/*
 844 * tlb_addr is the physical address of the bounce buffer to unmap.
 845 */
 846void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
 847			      size_t mapping_size, enum dma_data_direction dir,
 848			      unsigned long attrs)
 849{
 850	/*
 851	 * First, sync the memory before unmapping the entry
 852	 */
 853	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
 854	    (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
 855		swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
 856
 857	swiotlb_release_slots(dev, tlb_addr);
 858}
 859
 860void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
 861		size_t size, enum dma_data_direction dir)
 862{
 863	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
 864		swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
 865	else
 866		BUG_ON(dir != DMA_FROM_DEVICE);
 867}
 868
 869void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
 870		size_t size, enum dma_data_direction dir)
 871{
 872	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
 873		swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
 874	else
 875		BUG_ON(dir != DMA_TO_DEVICE);
 876}
 877
 878/*
 879 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
 880 * to the device copy the data into it as well.
 881 */
 882dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
 883		enum dma_data_direction dir, unsigned long attrs)
 884{
 885	phys_addr_t swiotlb_addr;
 886	dma_addr_t dma_addr;
 887
 888	trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
 889
 890	swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
 891			attrs);
 892	if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
 893		return DMA_MAPPING_ERROR;
 894
 895	/* Ensure that the address returned is DMA'ble */
 896	dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
 897	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
 898		swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
 899			attrs | DMA_ATTR_SKIP_CPU_SYNC);
 900		dev_WARN_ONCE(dev, 1,
 901			"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
 902			&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
 903		return DMA_MAPPING_ERROR;
 904	}
 905
 906	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 907		arch_sync_dma_for_device(swiotlb_addr, size, dir);
 908	return dma_addr;
 909}
 910
 911size_t swiotlb_max_mapping_size(struct device *dev)
 912{
 913	int min_align_mask = dma_get_min_align_mask(dev);
 914	int min_align = 0;
 915
 916	/*
 917	 * swiotlb_find_slots() skips slots according to
 918	 * min align mask. This affects max mapping size.
 919	 * Take it into acount here.
 920	 */
 921	if (min_align_mask)
 922		min_align = roundup(min_align_mask, IO_TLB_SIZE);
 923
 924	return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
 925}
 926
 927bool is_swiotlb_active(struct device *dev)
 928{
 929	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 930
 931	return mem && mem->nslabs;
 932}
 933EXPORT_SYMBOL_GPL(is_swiotlb_active);
 934
 935static int io_tlb_used_get(void *data, u64 *val)
 936{
 937	*val = mem_used(&io_tlb_default_mem);
 938	return 0;
 939}
 940DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
 941
 942static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
 943					 const char *dirname)
 944{
 945	mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
 946	if (!mem->nslabs)
 947		return;
 948
 949	debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
 950	debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
 951			&fops_io_tlb_used);
 952}
 953
 954static int __init __maybe_unused swiotlb_create_default_debugfs(void)
 955{
 956	swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
 957	return 0;
 958}
 959
 960#ifdef CONFIG_DEBUG_FS
 961late_initcall(swiotlb_create_default_debugfs);
 962#endif
 963
 964#ifdef CONFIG_DMA_RESTRICTED_POOL
 965
 966struct page *swiotlb_alloc(struct device *dev, size_t size)
 967{
 968	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
 969	phys_addr_t tlb_addr;
 970	int index;
 971
 972	if (!mem)
 973		return NULL;
 974
 975	index = swiotlb_find_slots(dev, 0, size, 0);
 976	if (index == -1)
 977		return NULL;
 978
 979	tlb_addr = slot_addr(mem->start, index);
 980
 981	return pfn_to_page(PFN_DOWN(tlb_addr));
 982}
 983
 984bool swiotlb_free(struct device *dev, struct page *page, size_t size)
 985{
 986	phys_addr_t tlb_addr = page_to_phys(page);
 987
 988	if (!is_swiotlb_buffer(dev, tlb_addr))
 989		return false;
 990
 991	swiotlb_release_slots(dev, tlb_addr);
 992
 993	return true;
 994}
 995
 996static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
 997				    struct device *dev)
 998{
 999	struct io_tlb_mem *mem = rmem->priv;
1000	unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1001
1002	/* Set Per-device io tlb area to one */
1003	unsigned int nareas = 1;
1004
1005	/*
1006	 * Since multiple devices can share the same pool, the private data,
1007	 * io_tlb_mem struct, will be initialized by the first device attached
1008	 * to it.
1009	 */
1010	if (!mem) {
1011		mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1012		if (!mem)
1013			return -ENOMEM;
1014
1015		mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL);
1016		if (!mem->slots) {
1017			kfree(mem);
1018			return -ENOMEM;
1019		}
1020
1021		mem->areas = kcalloc(nareas, sizeof(*mem->areas),
1022				GFP_KERNEL);
1023		if (!mem->areas) {
1024			kfree(mem->slots);
1025			kfree(mem);
1026			return -ENOMEM;
1027		}
1028
1029		set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1030				     rmem->size >> PAGE_SHIFT);
1031		swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
1032					false, nareas);
1033		mem->for_alloc = true;
1034
1035		rmem->priv = mem;
1036
1037		swiotlb_create_debugfs_files(mem, rmem->name);
1038	}
1039
1040	dev->dma_io_tlb_mem = mem;
1041
1042	return 0;
1043}
1044
1045static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1046					struct device *dev)
1047{
1048	dev->dma_io_tlb_mem = &io_tlb_default_mem;
1049}
1050
1051static const struct reserved_mem_ops rmem_swiotlb_ops = {
1052	.device_init = rmem_swiotlb_device_init,
1053	.device_release = rmem_swiotlb_device_release,
1054};
1055
1056static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1057{
1058	unsigned long node = rmem->fdt_node;
1059
1060	if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1061	    of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1062	    of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1063	    of_get_flat_dt_prop(node, "no-map", NULL))
1064		return -EINVAL;
1065
1066	if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1067		pr_err("Restricted DMA pool must be accessible within the linear mapping.");
1068		return -EINVAL;
1069	}
1070
1071	rmem->ops = &rmem_swiotlb_ops;
1072	pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1073		&rmem->base, (unsigned long)rmem->size / SZ_1M);
1074	return 0;
1075}
1076
1077RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1078#endif /* CONFIG_DMA_RESTRICTED_POOL */