Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Dynamic DMA mapping support.
4 *
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 *
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
19 */
20
21#define pr_fmt(fmt) "software IO TLB: " fmt
22
23#include <linux/cache.h>
24#include <linux/cc_platform.h>
25#include <linux/ctype.h>
26#include <linux/debugfs.h>
27#include <linux/dma-direct.h>
28#include <linux/dma-map-ops.h>
29#include <linux/export.h>
30#include <linux/gfp.h>
31#include <linux/highmem.h>
32#include <linux/io.h>
33#include <linux/iommu-helper.h>
34#include <linux/init.h>
35#include <linux/memblock.h>
36#include <linux/mm.h>
37#include <linux/pfn.h>
38#include <linux/scatterlist.h>
39#include <linux/set_memory.h>
40#include <linux/spinlock.h>
41#include <linux/string.h>
42#include <linux/swiotlb.h>
43#include <linux/types.h>
44#ifdef CONFIG_DMA_RESTRICTED_POOL
45#include <linux/of.h>
46#include <linux/of_fdt.h>
47#include <linux/of_reserved_mem.h>
48#include <linux/slab.h>
49#endif
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/swiotlb.h>
53
54#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
55
56/*
57 * Minimum IO TLB size to bother booting with. Systems with mainly
58 * 64bit capable cards will only lightly use the swiotlb. If we can't
59 * allocate a contiguous 1MB, we're probably in trouble anyway.
60 */
61#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
62
63#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
64
65struct io_tlb_slot {
66 phys_addr_t orig_addr;
67 size_t alloc_size;
68 unsigned int list;
69};
70
71static bool swiotlb_force_bounce;
72static bool swiotlb_force_disable;
73
74struct io_tlb_mem io_tlb_default_mem;
75
76phys_addr_t swiotlb_unencrypted_base;
77
78static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
79static unsigned long default_nareas;
80
81/**
82 * struct io_tlb_area - IO TLB memory area descriptor
83 *
84 * This is a single area with a single lock.
85 *
86 * @used: The number of used IO TLB block.
87 * @index: The slot index to start searching in this area for next round.
88 * @lock: The lock to protect the above data structures in the map and
89 * unmap calls.
90 */
91struct io_tlb_area {
92 unsigned long used;
93 unsigned int index;
94 spinlock_t lock;
95};
96
97/*
98 * Round up number of slabs to the next power of 2. The last area is going
99 * be smaller than the rest if default_nslabs is not power of two.
100 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
101 * otherwise a segment may span two or more areas. It conflicts with free
102 * contiguous slots tracking: free slots are treated contiguous no matter
103 * whether they cross an area boundary.
104 *
105 * Return true if default_nslabs is rounded up.
106 */
107static bool round_up_default_nslabs(void)
108{
109 if (!default_nareas)
110 return false;
111
112 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
113 default_nslabs = IO_TLB_SEGSIZE * default_nareas;
114 else if (is_power_of_2(default_nslabs))
115 return false;
116 default_nslabs = roundup_pow_of_two(default_nslabs);
117 return true;
118}
119
120static void swiotlb_adjust_nareas(unsigned int nareas)
121{
122 /* use a single area when non is specified */
123 if (!nareas)
124 nareas = 1;
125 else if (!is_power_of_2(nareas))
126 nareas = roundup_pow_of_two(nareas);
127
128 default_nareas = nareas;
129
130 pr_info("area num %d.\n", nareas);
131 if (round_up_default_nslabs())
132 pr_info("SWIOTLB bounce buffer size roundup to %luMB",
133 (default_nslabs << IO_TLB_SHIFT) >> 20);
134}
135
136static int __init
137setup_io_tlb_npages(char *str)
138{
139 if (isdigit(*str)) {
140 /* avoid tail segment of size < IO_TLB_SEGSIZE */
141 default_nslabs =
142 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
143 }
144 if (*str == ',')
145 ++str;
146 if (isdigit(*str))
147 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
148 if (*str == ',')
149 ++str;
150 if (!strcmp(str, "force"))
151 swiotlb_force_bounce = true;
152 else if (!strcmp(str, "noforce"))
153 swiotlb_force_disable = true;
154
155 return 0;
156}
157early_param("swiotlb", setup_io_tlb_npages);
158
159unsigned int swiotlb_max_segment(void)
160{
161 if (!io_tlb_default_mem.nslabs)
162 return 0;
163 return rounddown(io_tlb_default_mem.nslabs << IO_TLB_SHIFT, PAGE_SIZE);
164}
165EXPORT_SYMBOL_GPL(swiotlb_max_segment);
166
167unsigned long swiotlb_size_or_default(void)
168{
169 return default_nslabs << IO_TLB_SHIFT;
170}
171
172void __init swiotlb_adjust_size(unsigned long size)
173{
174 /*
175 * If swiotlb parameter has not been specified, give a chance to
176 * architectures such as those supporting memory encryption to
177 * adjust/expand SWIOTLB size for their use.
178 */
179 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
180 return;
181
182 size = ALIGN(size, IO_TLB_SIZE);
183 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
184 if (round_up_default_nslabs())
185 size = default_nslabs << IO_TLB_SHIFT;
186 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
187}
188
189void swiotlb_print_info(void)
190{
191 struct io_tlb_mem *mem = &io_tlb_default_mem;
192
193 if (!mem->nslabs) {
194 pr_warn("No low mem\n");
195 return;
196 }
197
198 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
199 (mem->nslabs << IO_TLB_SHIFT) >> 20);
200}
201
202static inline unsigned long io_tlb_offset(unsigned long val)
203{
204 return val & (IO_TLB_SEGSIZE - 1);
205}
206
207static inline unsigned long nr_slots(u64 val)
208{
209 return DIV_ROUND_UP(val, IO_TLB_SIZE);
210}
211
212/*
213 * Remap swioltb memory in the unencrypted physical address space
214 * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP
215 * Isolation VMs).
216 */
217#ifdef CONFIG_HAS_IOMEM
218static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
219{
220 void *vaddr = NULL;
221
222 if (swiotlb_unencrypted_base) {
223 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
224
225 vaddr = memremap(paddr, bytes, MEMREMAP_WB);
226 if (!vaddr)
227 pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
228 &paddr, bytes);
229 }
230
231 return vaddr;
232}
233#else
234static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
235{
236 return NULL;
237}
238#endif
239
240/*
241 * Early SWIOTLB allocation may be too early to allow an architecture to
242 * perform the desired operations. This function allows the architecture to
243 * call SWIOTLB when the operations are possible. It needs to be called
244 * before the SWIOTLB memory is used.
245 */
246void __init swiotlb_update_mem_attributes(void)
247{
248 struct io_tlb_mem *mem = &io_tlb_default_mem;
249 void *vaddr;
250 unsigned long bytes;
251
252 if (!mem->nslabs || mem->late_alloc)
253 return;
254 vaddr = phys_to_virt(mem->start);
255 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
256 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
257
258 mem->vaddr = swiotlb_mem_remap(mem, bytes);
259 if (!mem->vaddr)
260 mem->vaddr = vaddr;
261}
262
263static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
264 unsigned long nslabs, unsigned int flags,
265 bool late_alloc, unsigned int nareas)
266{
267 void *vaddr = phys_to_virt(start);
268 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
269
270 mem->nslabs = nslabs;
271 mem->start = start;
272 mem->end = mem->start + bytes;
273 mem->late_alloc = late_alloc;
274 mem->nareas = nareas;
275 mem->area_nslabs = nslabs / mem->nareas;
276
277 mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
278
279 for (i = 0; i < mem->nareas; i++) {
280 spin_lock_init(&mem->areas[i].lock);
281 mem->areas[i].index = 0;
282 mem->areas[i].used = 0;
283 }
284
285 for (i = 0; i < mem->nslabs; i++) {
286 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
287 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
288 mem->slots[i].alloc_size = 0;
289 }
290
291 /*
292 * If swiotlb_unencrypted_base is set, the bounce buffer memory will
293 * be remapped and cleared in swiotlb_update_mem_attributes.
294 */
295 if (swiotlb_unencrypted_base)
296 return;
297
298 memset(vaddr, 0, bytes);
299 mem->vaddr = vaddr;
300 return;
301}
302
303static void *swiotlb_memblock_alloc(unsigned long nslabs, unsigned int flags,
304 int (*remap)(void *tlb, unsigned long nslabs))
305{
306 size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
307 void *tlb;
308
309 /*
310 * By default allocate the bounce buffer memory from low memory, but
311 * allow to pick a location everywhere for hypervisors with guest
312 * memory encryption.
313 */
314 if (flags & SWIOTLB_ANY)
315 tlb = memblock_alloc(bytes, PAGE_SIZE);
316 else
317 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
318
319 if (!tlb) {
320 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
321 __func__, bytes);
322 return NULL;
323 }
324
325 if (remap && remap(tlb, nslabs) < 0) {
326 memblock_free(tlb, PAGE_ALIGN(bytes));
327 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
328 return NULL;
329 }
330
331 return tlb;
332}
333
334/*
335 * Statically reserve bounce buffer space and initialize bounce buffer data
336 * structures for the software IO TLB used to implement the DMA API.
337 */
338void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
339 int (*remap)(void *tlb, unsigned long nslabs))
340{
341 struct io_tlb_mem *mem = &io_tlb_default_mem;
342 unsigned long nslabs;
343 size_t alloc_size;
344 void *tlb;
345
346 if (!addressing_limit && !swiotlb_force_bounce)
347 return;
348 if (swiotlb_force_disable)
349 return;
350
351 /*
352 * default_nslabs maybe changed when adjust area number.
353 * So allocate bounce buffer after adjusting area number.
354 */
355 if (!default_nareas)
356 swiotlb_adjust_nareas(num_possible_cpus());
357
358 nslabs = default_nslabs;
359 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
360 if (nslabs <= IO_TLB_MIN_SLABS)
361 return;
362 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
363 }
364
365 if (default_nslabs != nslabs) {
366 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
367 default_nslabs, nslabs);
368 default_nslabs = nslabs;
369 }
370
371 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
372 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
373 if (!mem->slots) {
374 pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
375 __func__, alloc_size, PAGE_SIZE);
376 return;
377 }
378
379 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
380 default_nareas), SMP_CACHE_BYTES);
381 if (!mem->areas) {
382 pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
383 return;
384 }
385
386 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
387 default_nareas);
388
389 if (flags & SWIOTLB_VERBOSE)
390 swiotlb_print_info();
391}
392
393void __init swiotlb_init(bool addressing_limit, unsigned int flags)
394{
395 swiotlb_init_remap(addressing_limit, flags, NULL);
396}
397
398/*
399 * Systems with larger DMA zones (those that don't support ISA) can
400 * initialize the swiotlb later using the slab allocator if needed.
401 * This should be just like above, but with some error catching.
402 */
403int swiotlb_init_late(size_t size, gfp_t gfp_mask,
404 int (*remap)(void *tlb, unsigned long nslabs))
405{
406 struct io_tlb_mem *mem = &io_tlb_default_mem;
407 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
408 unsigned char *vstart = NULL;
409 unsigned int order, area_order;
410 bool retried = false;
411 int rc = 0;
412
413 if (swiotlb_force_disable)
414 return 0;
415
416retry:
417 order = get_order(nslabs << IO_TLB_SHIFT);
418 nslabs = SLABS_PER_PAGE << order;
419
420 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
421 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
422 order);
423 if (vstart)
424 break;
425 order--;
426 nslabs = SLABS_PER_PAGE << order;
427 retried = true;
428 }
429
430 if (!vstart)
431 return -ENOMEM;
432
433 if (remap)
434 rc = remap(vstart, nslabs);
435 if (rc) {
436 free_pages((unsigned long)vstart, order);
437
438 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
439 if (nslabs < IO_TLB_MIN_SLABS)
440 return rc;
441 retried = true;
442 goto retry;
443 }
444
445 if (retried) {
446 pr_warn("only able to allocate %ld MB\n",
447 (PAGE_SIZE << order) >> 20);
448 }
449
450 if (!default_nareas)
451 swiotlb_adjust_nareas(num_possible_cpus());
452
453 area_order = get_order(array_size(sizeof(*mem->areas),
454 default_nareas));
455 mem->areas = (struct io_tlb_area *)
456 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
457 if (!mem->areas)
458 goto error_area;
459
460 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
461 get_order(array_size(sizeof(*mem->slots), nslabs)));
462 if (!mem->slots)
463 goto error_slots;
464
465 set_memory_decrypted((unsigned long)vstart,
466 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
467 swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
468 default_nareas);
469
470 swiotlb_print_info();
471 return 0;
472
473error_slots:
474 free_pages((unsigned long)mem->areas, area_order);
475error_area:
476 free_pages((unsigned long)vstart, order);
477 return -ENOMEM;
478}
479
480void __init swiotlb_exit(void)
481{
482 struct io_tlb_mem *mem = &io_tlb_default_mem;
483 unsigned long tbl_vaddr;
484 size_t tbl_size, slots_size;
485 unsigned int area_order;
486
487 if (swiotlb_force_bounce)
488 return;
489
490 if (!mem->nslabs)
491 return;
492
493 pr_info("tearing down default memory pool\n");
494 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
495 tbl_size = PAGE_ALIGN(mem->end - mem->start);
496 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
497
498 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
499 if (mem->late_alloc) {
500 area_order = get_order(array_size(sizeof(*mem->areas),
501 mem->nareas));
502 free_pages((unsigned long)mem->areas, area_order);
503 free_pages(tbl_vaddr, get_order(tbl_size));
504 free_pages((unsigned long)mem->slots, get_order(slots_size));
505 } else {
506 memblock_free_late(__pa(mem->areas),
507 array_size(sizeof(*mem->areas), mem->nareas));
508 memblock_free_late(mem->start, tbl_size);
509 memblock_free_late(__pa(mem->slots), slots_size);
510 }
511
512 memset(mem, 0, sizeof(*mem));
513}
514
515/*
516 * Return the offset into a iotlb slot required to keep the device happy.
517 */
518static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
519{
520 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
521}
522
523/*
524 * Bounce: copy the swiotlb buffer from or back to the original dma location
525 */
526static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
527 enum dma_data_direction dir)
528{
529 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
530 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
531 phys_addr_t orig_addr = mem->slots[index].orig_addr;
532 size_t alloc_size = mem->slots[index].alloc_size;
533 unsigned long pfn = PFN_DOWN(orig_addr);
534 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
535 unsigned int tlb_offset, orig_addr_offset;
536
537 if (orig_addr == INVALID_PHYS_ADDR)
538 return;
539
540 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
541 orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
542 if (tlb_offset < orig_addr_offset) {
543 dev_WARN_ONCE(dev, 1,
544 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
545 orig_addr_offset, tlb_offset);
546 return;
547 }
548
549 tlb_offset -= orig_addr_offset;
550 if (tlb_offset > alloc_size) {
551 dev_WARN_ONCE(dev, 1,
552 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
553 alloc_size, size, tlb_offset);
554 return;
555 }
556
557 orig_addr += tlb_offset;
558 alloc_size -= tlb_offset;
559
560 if (size > alloc_size) {
561 dev_WARN_ONCE(dev, 1,
562 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
563 alloc_size, size);
564 size = alloc_size;
565 }
566
567 if (PageHighMem(pfn_to_page(pfn))) {
568 unsigned int offset = orig_addr & ~PAGE_MASK;
569 struct page *page;
570 unsigned int sz = 0;
571 unsigned long flags;
572
573 while (size) {
574 sz = min_t(size_t, PAGE_SIZE - offset, size);
575
576 local_irq_save(flags);
577 page = pfn_to_page(pfn);
578 if (dir == DMA_TO_DEVICE)
579 memcpy_from_page(vaddr, page, offset, sz);
580 else
581 memcpy_to_page(page, offset, vaddr, sz);
582 local_irq_restore(flags);
583
584 size -= sz;
585 pfn++;
586 vaddr += sz;
587 offset = 0;
588 }
589 } else if (dir == DMA_TO_DEVICE) {
590 memcpy(vaddr, phys_to_virt(orig_addr), size);
591 } else {
592 memcpy(phys_to_virt(orig_addr), vaddr, size);
593 }
594}
595
596static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
597{
598 return start + (idx << IO_TLB_SHIFT);
599}
600
601/*
602 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
603 */
604static inline unsigned long get_max_slots(unsigned long boundary_mask)
605{
606 if (boundary_mask == ~0UL)
607 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
608 return nr_slots(boundary_mask + 1);
609}
610
611static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index)
612{
613 if (index >= mem->area_nslabs)
614 return 0;
615 return index;
616}
617
618/*
619 * Find a suitable number of IO TLB entries size that will fit this request and
620 * allocate a buffer from that IO TLB pool.
621 */
622static int swiotlb_do_find_slots(struct device *dev, int area_index,
623 phys_addr_t orig_addr, size_t alloc_size,
624 unsigned int alloc_align_mask)
625{
626 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
627 struct io_tlb_area *area = mem->areas + area_index;
628 unsigned long boundary_mask = dma_get_seg_boundary(dev);
629 dma_addr_t tbl_dma_addr =
630 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
631 unsigned long max_slots = get_max_slots(boundary_mask);
632 unsigned int iotlb_align_mask =
633 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
634 unsigned int nslots = nr_slots(alloc_size), stride;
635 unsigned int index, wrap, count = 0, i;
636 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
637 unsigned long flags;
638 unsigned int slot_base;
639 unsigned int slot_index;
640
641 BUG_ON(!nslots);
642 BUG_ON(area_index >= mem->nareas);
643
644 /*
645 * For mappings with an alignment requirement don't bother looping to
646 * unaligned slots once we found an aligned one. For allocations of
647 * PAGE_SIZE or larger only look for page aligned allocations.
648 */
649 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
650 if (alloc_size >= PAGE_SIZE)
651 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
652 stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
653
654 spin_lock_irqsave(&area->lock, flags);
655 if (unlikely(nslots > mem->area_nslabs - area->used))
656 goto not_found;
657
658 slot_base = area_index * mem->area_nslabs;
659 index = wrap = wrap_area_index(mem, ALIGN(area->index, stride));
660
661 do {
662 slot_index = slot_base + index;
663
664 if (orig_addr &&
665 (slot_addr(tbl_dma_addr, slot_index) &
666 iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
667 index = wrap_area_index(mem, index + 1);
668 continue;
669 }
670
671 /*
672 * If we find a slot that indicates we have 'nslots' number of
673 * contiguous buffers, we allocate the buffers from that slot
674 * and mark the entries as '0' indicating unavailable.
675 */
676 if (!iommu_is_span_boundary(slot_index, nslots,
677 nr_slots(tbl_dma_addr),
678 max_slots)) {
679 if (mem->slots[slot_index].list >= nslots)
680 goto found;
681 }
682 index = wrap_area_index(mem, index + stride);
683 } while (index != wrap);
684
685not_found:
686 spin_unlock_irqrestore(&area->lock, flags);
687 return -1;
688
689found:
690 for (i = slot_index; i < slot_index + nslots; i++) {
691 mem->slots[i].list = 0;
692 mem->slots[i].alloc_size = alloc_size - (offset +
693 ((i - slot_index) << IO_TLB_SHIFT));
694 }
695 for (i = slot_index - 1;
696 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
697 mem->slots[i].list; i--)
698 mem->slots[i].list = ++count;
699
700 /*
701 * Update the indices to avoid searching in the next round.
702 */
703 if (index + nslots < mem->area_nslabs)
704 area->index = index + nslots;
705 else
706 area->index = 0;
707 area->used += nslots;
708 spin_unlock_irqrestore(&area->lock, flags);
709 return slot_index;
710}
711
712static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
713 size_t alloc_size, unsigned int alloc_align_mask)
714{
715 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
716 int start = raw_smp_processor_id() & (mem->nareas - 1);
717 int i = start, index;
718
719 do {
720 index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
721 alloc_align_mask);
722 if (index >= 0)
723 return index;
724 if (++i >= mem->nareas)
725 i = 0;
726 } while (i != start);
727
728 return -1;
729}
730
731static unsigned long mem_used(struct io_tlb_mem *mem)
732{
733 int i;
734 unsigned long used = 0;
735
736 for (i = 0; i < mem->nareas; i++)
737 used += mem->areas[i].used;
738 return used;
739}
740
741phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
742 size_t mapping_size, size_t alloc_size,
743 unsigned int alloc_align_mask, enum dma_data_direction dir,
744 unsigned long attrs)
745{
746 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
747 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
748 unsigned int i;
749 int index;
750 phys_addr_t tlb_addr;
751
752 if (!mem || !mem->nslabs) {
753 dev_warn_ratelimited(dev,
754 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
755 return (phys_addr_t)DMA_MAPPING_ERROR;
756 }
757
758 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
759 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
760
761 if (mapping_size > alloc_size) {
762 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
763 mapping_size, alloc_size);
764 return (phys_addr_t)DMA_MAPPING_ERROR;
765 }
766
767 index = swiotlb_find_slots(dev, orig_addr,
768 alloc_size + offset, alloc_align_mask);
769 if (index == -1) {
770 if (!(attrs & DMA_ATTR_NO_WARN))
771 dev_warn_ratelimited(dev,
772 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
773 alloc_size, mem->nslabs, mem_used(mem));
774 return (phys_addr_t)DMA_MAPPING_ERROR;
775 }
776
777 /*
778 * Save away the mapping from the original address to the DMA address.
779 * This is needed when we sync the memory. Then we sync the buffer if
780 * needed.
781 */
782 for (i = 0; i < nr_slots(alloc_size + offset); i++)
783 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
784 tlb_addr = slot_addr(mem->start, index) + offset;
785 /*
786 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
787 * to the tlb buffer, if we knew for sure the device will
788 * overwrite the entire current content. But we don't. Thus
789 * unconditional bounce may prevent leaking swiotlb content (i.e.
790 * kernel memory) to user-space.
791 */
792 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
793 return tlb_addr;
794}
795
796static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
797{
798 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
799 unsigned long flags;
800 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
801 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
802 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
803 int aindex = index / mem->area_nslabs;
804 struct io_tlb_area *area = &mem->areas[aindex];
805 int count, i;
806
807 /*
808 * Return the buffer to the free list by setting the corresponding
809 * entries to indicate the number of contiguous entries available.
810 * While returning the entries to the free list, we merge the entries
811 * with slots below and above the pool being returned.
812 */
813 BUG_ON(aindex >= mem->nareas);
814
815 spin_lock_irqsave(&area->lock, flags);
816 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
817 count = mem->slots[index + nslots].list;
818 else
819 count = 0;
820
821 /*
822 * Step 1: return the slots to the free list, merging the slots with
823 * superceeding slots
824 */
825 for (i = index + nslots - 1; i >= index; i--) {
826 mem->slots[i].list = ++count;
827 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
828 mem->slots[i].alloc_size = 0;
829 }
830
831 /*
832 * Step 2: merge the returned slots with the preceding slots, if
833 * available (non zero)
834 */
835 for (i = index - 1;
836 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
837 i--)
838 mem->slots[i].list = ++count;
839 area->used -= nslots;
840 spin_unlock_irqrestore(&area->lock, flags);
841}
842
843/*
844 * tlb_addr is the physical address of the bounce buffer to unmap.
845 */
846void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
847 size_t mapping_size, enum dma_data_direction dir,
848 unsigned long attrs)
849{
850 /*
851 * First, sync the memory before unmapping the entry
852 */
853 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
854 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
855 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
856
857 swiotlb_release_slots(dev, tlb_addr);
858}
859
860void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
861 size_t size, enum dma_data_direction dir)
862{
863 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
864 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
865 else
866 BUG_ON(dir != DMA_FROM_DEVICE);
867}
868
869void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
870 size_t size, enum dma_data_direction dir)
871{
872 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
873 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
874 else
875 BUG_ON(dir != DMA_TO_DEVICE);
876}
877
878/*
879 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
880 * to the device copy the data into it as well.
881 */
882dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
883 enum dma_data_direction dir, unsigned long attrs)
884{
885 phys_addr_t swiotlb_addr;
886 dma_addr_t dma_addr;
887
888 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
889
890 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
891 attrs);
892 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
893 return DMA_MAPPING_ERROR;
894
895 /* Ensure that the address returned is DMA'ble */
896 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
897 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
898 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
899 attrs | DMA_ATTR_SKIP_CPU_SYNC);
900 dev_WARN_ONCE(dev, 1,
901 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
902 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
903 return DMA_MAPPING_ERROR;
904 }
905
906 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
907 arch_sync_dma_for_device(swiotlb_addr, size, dir);
908 return dma_addr;
909}
910
911size_t swiotlb_max_mapping_size(struct device *dev)
912{
913 int min_align_mask = dma_get_min_align_mask(dev);
914 int min_align = 0;
915
916 /*
917 * swiotlb_find_slots() skips slots according to
918 * min align mask. This affects max mapping size.
919 * Take it into acount here.
920 */
921 if (min_align_mask)
922 min_align = roundup(min_align_mask, IO_TLB_SIZE);
923
924 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
925}
926
927bool is_swiotlb_active(struct device *dev)
928{
929 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
930
931 return mem && mem->nslabs;
932}
933EXPORT_SYMBOL_GPL(is_swiotlb_active);
934
935static int io_tlb_used_get(void *data, u64 *val)
936{
937 *val = mem_used(&io_tlb_default_mem);
938 return 0;
939}
940DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
941
942static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
943 const char *dirname)
944{
945 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
946 if (!mem->nslabs)
947 return;
948
949 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
950 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
951 &fops_io_tlb_used);
952}
953
954static int __init __maybe_unused swiotlb_create_default_debugfs(void)
955{
956 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
957 return 0;
958}
959
960#ifdef CONFIG_DEBUG_FS
961late_initcall(swiotlb_create_default_debugfs);
962#endif
963
964#ifdef CONFIG_DMA_RESTRICTED_POOL
965
966struct page *swiotlb_alloc(struct device *dev, size_t size)
967{
968 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
969 phys_addr_t tlb_addr;
970 int index;
971
972 if (!mem)
973 return NULL;
974
975 index = swiotlb_find_slots(dev, 0, size, 0);
976 if (index == -1)
977 return NULL;
978
979 tlb_addr = slot_addr(mem->start, index);
980
981 return pfn_to_page(PFN_DOWN(tlb_addr));
982}
983
984bool swiotlb_free(struct device *dev, struct page *page, size_t size)
985{
986 phys_addr_t tlb_addr = page_to_phys(page);
987
988 if (!is_swiotlb_buffer(dev, tlb_addr))
989 return false;
990
991 swiotlb_release_slots(dev, tlb_addr);
992
993 return true;
994}
995
996static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
997 struct device *dev)
998{
999 struct io_tlb_mem *mem = rmem->priv;
1000 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1001
1002 /* Set Per-device io tlb area to one */
1003 unsigned int nareas = 1;
1004
1005 /*
1006 * Since multiple devices can share the same pool, the private data,
1007 * io_tlb_mem struct, will be initialized by the first device attached
1008 * to it.
1009 */
1010 if (!mem) {
1011 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1012 if (!mem)
1013 return -ENOMEM;
1014
1015 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL);
1016 if (!mem->slots) {
1017 kfree(mem);
1018 return -ENOMEM;
1019 }
1020
1021 mem->areas = kcalloc(nareas, sizeof(*mem->areas),
1022 GFP_KERNEL);
1023 if (!mem->areas) {
1024 kfree(mem->slots);
1025 kfree(mem);
1026 return -ENOMEM;
1027 }
1028
1029 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1030 rmem->size >> PAGE_SHIFT);
1031 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
1032 false, nareas);
1033 mem->for_alloc = true;
1034
1035 rmem->priv = mem;
1036
1037 swiotlb_create_debugfs_files(mem, rmem->name);
1038 }
1039
1040 dev->dma_io_tlb_mem = mem;
1041
1042 return 0;
1043}
1044
1045static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1046 struct device *dev)
1047{
1048 dev->dma_io_tlb_mem = &io_tlb_default_mem;
1049}
1050
1051static const struct reserved_mem_ops rmem_swiotlb_ops = {
1052 .device_init = rmem_swiotlb_device_init,
1053 .device_release = rmem_swiotlb_device_release,
1054};
1055
1056static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1057{
1058 unsigned long node = rmem->fdt_node;
1059
1060 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1061 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1062 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1063 of_get_flat_dt_prop(node, "no-map", NULL))
1064 return -EINVAL;
1065
1066 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1067 pr_err("Restricted DMA pool must be accessible within the linear mapping.");
1068 return -EINVAL;
1069 }
1070
1071 rmem->ops = &rmem_swiotlb_ops;
1072 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1073 &rmem->base, (unsigned long)rmem->size / SZ_1M);
1074 return 0;
1075}
1076
1077RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1078#endif /* CONFIG_DMA_RESTRICTED_POOL */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Dynamic DMA mapping support.
4 *
5 * This implementation is a fallback for platforms that do not support
6 * I/O TLBs (aka DMA address translation hardware).
7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 *
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
14 * unnecessary i-cache flushing.
15 * 04/07/.. ak Better overflow handling. Assorted fixes.
16 * 05/09/10 linville Add support for syncing ranges, support syncing for
17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18 * 08/12/11 beckyb Add highmem support
19 */
20
21#define pr_fmt(fmt) "software IO TLB: " fmt
22
23#include <linux/cache.h>
24#include <linux/dma-direct.h>
25#include <linux/dma-noncoherent.h>
26#include <linux/mm.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/swiotlb.h>
31#include <linux/pfn.h>
32#include <linux/types.h>
33#include <linux/ctype.h>
34#include <linux/highmem.h>
35#include <linux/gfp.h>
36#include <linux/scatterlist.h>
37#include <linux/mem_encrypt.h>
38#include <linux/set_memory.h>
39#ifdef CONFIG_DEBUG_FS
40#include <linux/debugfs.h>
41#endif
42
43#include <asm/io.h>
44#include <asm/dma.h>
45
46#include <linux/init.h>
47#include <linux/memblock.h>
48#include <linux/iommu-helper.h>
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/swiotlb.h>
52
53#define OFFSET(val,align) ((unsigned long) \
54 ( (val) & ( (align) - 1)))
55
56#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
57
58/*
59 * Minimum IO TLB size to bother booting with. Systems with mainly
60 * 64bit capable cards will only lightly use the swiotlb. If we can't
61 * allocate a contiguous 1MB, we're probably in trouble anyway.
62 */
63#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
64
65enum swiotlb_force swiotlb_force;
66
67/*
68 * Used to do a quick range check in swiotlb_tbl_unmap_single and
69 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
70 * API.
71 */
72phys_addr_t io_tlb_start, io_tlb_end;
73
74/*
75 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
76 * io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
77 */
78static unsigned long io_tlb_nslabs;
79
80/*
81 * The number of used IO TLB block
82 */
83static unsigned long io_tlb_used;
84
85/*
86 * This is a free list describing the number of free entries available from
87 * each index
88 */
89static unsigned int *io_tlb_list;
90static unsigned int io_tlb_index;
91
92/*
93 * Max segment that we can provide which (if pages are contingous) will
94 * not be bounced (unless SWIOTLB_FORCE is set).
95 */
96unsigned int max_segment;
97
98/*
99 * We need to save away the original address corresponding to a mapped entry
100 * for the sync operations.
101 */
102#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
103static phys_addr_t *io_tlb_orig_addr;
104
105/*
106 * Protect the above data structures in the map and unmap calls
107 */
108static DEFINE_SPINLOCK(io_tlb_lock);
109
110static int late_alloc;
111
112static int __init
113setup_io_tlb_npages(char *str)
114{
115 if (isdigit(*str)) {
116 io_tlb_nslabs = simple_strtoul(str, &str, 0);
117 /* avoid tail segment of size < IO_TLB_SEGSIZE */
118 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
119 }
120 if (*str == ',')
121 ++str;
122 if (!strcmp(str, "force")) {
123 swiotlb_force = SWIOTLB_FORCE;
124 } else if (!strcmp(str, "noforce")) {
125 swiotlb_force = SWIOTLB_NO_FORCE;
126 io_tlb_nslabs = 1;
127 }
128
129 return 0;
130}
131early_param("swiotlb", setup_io_tlb_npages);
132
133static bool no_iotlb_memory;
134
135unsigned long swiotlb_nr_tbl(void)
136{
137 return unlikely(no_iotlb_memory) ? 0 : io_tlb_nslabs;
138}
139EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
140
141unsigned int swiotlb_max_segment(void)
142{
143 return unlikely(no_iotlb_memory) ? 0 : max_segment;
144}
145EXPORT_SYMBOL_GPL(swiotlb_max_segment);
146
147void swiotlb_set_max_segment(unsigned int val)
148{
149 if (swiotlb_force == SWIOTLB_FORCE)
150 max_segment = 1;
151 else
152 max_segment = rounddown(val, PAGE_SIZE);
153}
154
155/* default to 64MB */
156#define IO_TLB_DEFAULT_SIZE (64UL<<20)
157unsigned long swiotlb_size_or_default(void)
158{
159 unsigned long size;
160
161 size = io_tlb_nslabs << IO_TLB_SHIFT;
162
163 return size ? size : (IO_TLB_DEFAULT_SIZE);
164}
165
166void swiotlb_print_info(void)
167{
168 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
169
170 if (no_iotlb_memory) {
171 pr_warn("No low mem\n");
172 return;
173 }
174
175 pr_info("mapped [mem %#010llx-%#010llx] (%luMB)\n",
176 (unsigned long long)io_tlb_start,
177 (unsigned long long)io_tlb_end,
178 bytes >> 20);
179}
180
181/*
182 * Early SWIOTLB allocation may be too early to allow an architecture to
183 * perform the desired operations. This function allows the architecture to
184 * call SWIOTLB when the operations are possible. It needs to be called
185 * before the SWIOTLB memory is used.
186 */
187void __init swiotlb_update_mem_attributes(void)
188{
189 void *vaddr;
190 unsigned long bytes;
191
192 if (no_iotlb_memory || late_alloc)
193 return;
194
195 vaddr = phys_to_virt(io_tlb_start);
196 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
197 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
198 memset(vaddr, 0, bytes);
199}
200
201int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
202{
203 unsigned long i, bytes;
204 size_t alloc_size;
205
206 bytes = nslabs << IO_TLB_SHIFT;
207
208 io_tlb_nslabs = nslabs;
209 io_tlb_start = __pa(tlb);
210 io_tlb_end = io_tlb_start + bytes;
211
212 /*
213 * Allocate and initialize the free list array. This array is used
214 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
215 * between io_tlb_start and io_tlb_end.
216 */
217 alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(int));
218 io_tlb_list = memblock_alloc(alloc_size, PAGE_SIZE);
219 if (!io_tlb_list)
220 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
221 __func__, alloc_size, PAGE_SIZE);
222
223 alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t));
224 io_tlb_orig_addr = memblock_alloc(alloc_size, PAGE_SIZE);
225 if (!io_tlb_orig_addr)
226 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
227 __func__, alloc_size, PAGE_SIZE);
228
229 for (i = 0; i < io_tlb_nslabs; i++) {
230 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
231 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
232 }
233 io_tlb_index = 0;
234
235 if (verbose)
236 swiotlb_print_info();
237
238 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
239 return 0;
240}
241
242/*
243 * Statically reserve bounce buffer space and initialize bounce buffer data
244 * structures for the software IO TLB used to implement the DMA API.
245 */
246void __init
247swiotlb_init(int verbose)
248{
249 size_t default_size = IO_TLB_DEFAULT_SIZE;
250 unsigned char *vstart;
251 unsigned long bytes;
252
253 if (!io_tlb_nslabs) {
254 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
255 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
256 }
257
258 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
259
260 /* Get IO TLB memory from the low pages */
261 vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
262 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
263 return;
264
265 if (io_tlb_start)
266 memblock_free_early(io_tlb_start,
267 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
268 pr_warn("Cannot allocate buffer");
269 no_iotlb_memory = true;
270}
271
272/*
273 * Systems with larger DMA zones (those that don't support ISA) can
274 * initialize the swiotlb later using the slab allocator if needed.
275 * This should be just like above, but with some error catching.
276 */
277int
278swiotlb_late_init_with_default_size(size_t default_size)
279{
280 unsigned long bytes, req_nslabs = io_tlb_nslabs;
281 unsigned char *vstart = NULL;
282 unsigned int order;
283 int rc = 0;
284
285 if (!io_tlb_nslabs) {
286 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
287 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
288 }
289
290 /*
291 * Get IO TLB memory from the low pages
292 */
293 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
294 io_tlb_nslabs = SLABS_PER_PAGE << order;
295 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
296
297 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
298 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
299 order);
300 if (vstart)
301 break;
302 order--;
303 }
304
305 if (!vstart) {
306 io_tlb_nslabs = req_nslabs;
307 return -ENOMEM;
308 }
309 if (order != get_order(bytes)) {
310 pr_warn("only able to allocate %ld MB\n",
311 (PAGE_SIZE << order) >> 20);
312 io_tlb_nslabs = SLABS_PER_PAGE << order;
313 }
314 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
315 if (rc)
316 free_pages((unsigned long)vstart, order);
317
318 return rc;
319}
320
321static void swiotlb_cleanup(void)
322{
323 io_tlb_end = 0;
324 io_tlb_start = 0;
325 io_tlb_nslabs = 0;
326 max_segment = 0;
327}
328
329int
330swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
331{
332 unsigned long i, bytes;
333
334 bytes = nslabs << IO_TLB_SHIFT;
335
336 io_tlb_nslabs = nslabs;
337 io_tlb_start = virt_to_phys(tlb);
338 io_tlb_end = io_tlb_start + bytes;
339
340 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
341 memset(tlb, 0, bytes);
342
343 /*
344 * Allocate and initialize the free list array. This array is used
345 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
346 * between io_tlb_start and io_tlb_end.
347 */
348 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
349 get_order(io_tlb_nslabs * sizeof(int)));
350 if (!io_tlb_list)
351 goto cleanup3;
352
353 io_tlb_orig_addr = (phys_addr_t *)
354 __get_free_pages(GFP_KERNEL,
355 get_order(io_tlb_nslabs *
356 sizeof(phys_addr_t)));
357 if (!io_tlb_orig_addr)
358 goto cleanup4;
359
360 for (i = 0; i < io_tlb_nslabs; i++) {
361 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
362 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
363 }
364 io_tlb_index = 0;
365
366 swiotlb_print_info();
367
368 late_alloc = 1;
369
370 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
371
372 return 0;
373
374cleanup4:
375 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
376 sizeof(int)));
377 io_tlb_list = NULL;
378cleanup3:
379 swiotlb_cleanup();
380 return -ENOMEM;
381}
382
383void __init swiotlb_exit(void)
384{
385 if (!io_tlb_orig_addr)
386 return;
387
388 if (late_alloc) {
389 free_pages((unsigned long)io_tlb_orig_addr,
390 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
391 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
392 sizeof(int)));
393 free_pages((unsigned long)phys_to_virt(io_tlb_start),
394 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
395 } else {
396 memblock_free_late(__pa(io_tlb_orig_addr),
397 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
398 memblock_free_late(__pa(io_tlb_list),
399 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
400 memblock_free_late(io_tlb_start,
401 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
402 }
403 swiotlb_cleanup();
404}
405
406/*
407 * Bounce: copy the swiotlb buffer from or back to the original dma location
408 */
409static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
410 size_t size, enum dma_data_direction dir)
411{
412 unsigned long pfn = PFN_DOWN(orig_addr);
413 unsigned char *vaddr = phys_to_virt(tlb_addr);
414
415 if (PageHighMem(pfn_to_page(pfn))) {
416 /* The buffer does not have a mapping. Map it in and copy */
417 unsigned int offset = orig_addr & ~PAGE_MASK;
418 char *buffer;
419 unsigned int sz = 0;
420 unsigned long flags;
421
422 while (size) {
423 sz = min_t(size_t, PAGE_SIZE - offset, size);
424
425 local_irq_save(flags);
426 buffer = kmap_atomic(pfn_to_page(pfn));
427 if (dir == DMA_TO_DEVICE)
428 memcpy(vaddr, buffer + offset, sz);
429 else
430 memcpy(buffer + offset, vaddr, sz);
431 kunmap_atomic(buffer);
432 local_irq_restore(flags);
433
434 size -= sz;
435 pfn++;
436 vaddr += sz;
437 offset = 0;
438 }
439 } else if (dir == DMA_TO_DEVICE) {
440 memcpy(vaddr, phys_to_virt(orig_addr), size);
441 } else {
442 memcpy(phys_to_virt(orig_addr), vaddr, size);
443 }
444}
445
446phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
447 dma_addr_t tbl_dma_addr,
448 phys_addr_t orig_addr,
449 size_t mapping_size,
450 size_t alloc_size,
451 enum dma_data_direction dir,
452 unsigned long attrs)
453{
454 unsigned long flags;
455 phys_addr_t tlb_addr;
456 unsigned int nslots, stride, index, wrap;
457 int i;
458 unsigned long mask;
459 unsigned long offset_slots;
460 unsigned long max_slots;
461 unsigned long tmp_io_tlb_used;
462
463 if (no_iotlb_memory)
464 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
465
466 if (mem_encrypt_active())
467 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
468
469 if (mapping_size > alloc_size) {
470 dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
471 mapping_size, alloc_size);
472 return (phys_addr_t)DMA_MAPPING_ERROR;
473 }
474
475 mask = dma_get_seg_boundary(hwdev);
476
477 tbl_dma_addr &= mask;
478
479 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
480
481 /*
482 * Carefully handle integer overflow which can occur when mask == ~0UL.
483 */
484 max_slots = mask + 1
485 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
486 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
487
488 /*
489 * For mappings greater than or equal to a page, we limit the stride
490 * (and hence alignment) to a page size.
491 */
492 nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
493 if (alloc_size >= PAGE_SIZE)
494 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
495 else
496 stride = 1;
497
498 BUG_ON(!nslots);
499
500 /*
501 * Find suitable number of IO TLB entries size that will fit this
502 * request and allocate a buffer from that IO TLB pool.
503 */
504 spin_lock_irqsave(&io_tlb_lock, flags);
505
506 if (unlikely(nslots > io_tlb_nslabs - io_tlb_used))
507 goto not_found;
508
509 index = ALIGN(io_tlb_index, stride);
510 if (index >= io_tlb_nslabs)
511 index = 0;
512 wrap = index;
513
514 do {
515 while (iommu_is_span_boundary(index, nslots, offset_slots,
516 max_slots)) {
517 index += stride;
518 if (index >= io_tlb_nslabs)
519 index = 0;
520 if (index == wrap)
521 goto not_found;
522 }
523
524 /*
525 * If we find a slot that indicates we have 'nslots' number of
526 * contiguous buffers, we allocate the buffers from that slot
527 * and mark the entries as '0' indicating unavailable.
528 */
529 if (io_tlb_list[index] >= nslots) {
530 int count = 0;
531
532 for (i = index; i < (int) (index + nslots); i++)
533 io_tlb_list[i] = 0;
534 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
535 io_tlb_list[i] = ++count;
536 tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
537
538 /*
539 * Update the indices to avoid searching in the next
540 * round.
541 */
542 io_tlb_index = ((index + nslots) < io_tlb_nslabs
543 ? (index + nslots) : 0);
544
545 goto found;
546 }
547 index += stride;
548 if (index >= io_tlb_nslabs)
549 index = 0;
550 } while (index != wrap);
551
552not_found:
553 tmp_io_tlb_used = io_tlb_used;
554
555 spin_unlock_irqrestore(&io_tlb_lock, flags);
556 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
557 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
558 alloc_size, io_tlb_nslabs, tmp_io_tlb_used);
559 return (phys_addr_t)DMA_MAPPING_ERROR;
560found:
561 io_tlb_used += nslots;
562 spin_unlock_irqrestore(&io_tlb_lock, flags);
563
564 /*
565 * Save away the mapping from the original address to the DMA address.
566 * This is needed when we sync the memory. Then we sync the buffer if
567 * needed.
568 */
569 for (i = 0; i < nslots; i++)
570 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
571 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
572 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
573 swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
574
575 return tlb_addr;
576}
577
578/*
579 * tlb_addr is the physical address of the bounce buffer to unmap.
580 */
581void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
582 size_t mapping_size, size_t alloc_size,
583 enum dma_data_direction dir, unsigned long attrs)
584{
585 unsigned long flags;
586 int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
587 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
588 phys_addr_t orig_addr = io_tlb_orig_addr[index];
589
590 /*
591 * First, sync the memory before unmapping the entry
592 */
593 if (orig_addr != INVALID_PHYS_ADDR &&
594 !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
595 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
596 swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);
597
598 /*
599 * Return the buffer to the free list by setting the corresponding
600 * entries to indicate the number of contiguous entries available.
601 * While returning the entries to the free list, we merge the entries
602 * with slots below and above the pool being returned.
603 */
604 spin_lock_irqsave(&io_tlb_lock, flags);
605 {
606 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
607 io_tlb_list[index + nslots] : 0);
608 /*
609 * Step 1: return the slots to the free list, merging the
610 * slots with superceeding slots
611 */
612 for (i = index + nslots - 1; i >= index; i--) {
613 io_tlb_list[i] = ++count;
614 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
615 }
616 /*
617 * Step 2: merge the returned slots with the preceding slots,
618 * if available (non zero)
619 */
620 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
621 io_tlb_list[i] = ++count;
622
623 io_tlb_used -= nslots;
624 }
625 spin_unlock_irqrestore(&io_tlb_lock, flags);
626}
627
628void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
629 size_t size, enum dma_data_direction dir,
630 enum dma_sync_target target)
631{
632 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
633 phys_addr_t orig_addr = io_tlb_orig_addr[index];
634
635 if (orig_addr == INVALID_PHYS_ADDR)
636 return;
637 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
638
639 switch (target) {
640 case SYNC_FOR_CPU:
641 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
642 swiotlb_bounce(orig_addr, tlb_addr,
643 size, DMA_FROM_DEVICE);
644 else
645 BUG_ON(dir != DMA_TO_DEVICE);
646 break;
647 case SYNC_FOR_DEVICE:
648 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
649 swiotlb_bounce(orig_addr, tlb_addr,
650 size, DMA_TO_DEVICE);
651 else
652 BUG_ON(dir != DMA_FROM_DEVICE);
653 break;
654 default:
655 BUG();
656 }
657}
658
659/*
660 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
661 * to the device copy the data into it as well.
662 */
663dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
664 enum dma_data_direction dir, unsigned long attrs)
665{
666 phys_addr_t swiotlb_addr;
667 dma_addr_t dma_addr;
668
669 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
670 swiotlb_force);
671
672 swiotlb_addr = swiotlb_tbl_map_single(dev,
673 __phys_to_dma(dev, io_tlb_start),
674 paddr, size, size, dir, attrs);
675 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
676 return DMA_MAPPING_ERROR;
677
678 /* Ensure that the address returned is DMA'ble */
679 dma_addr = __phys_to_dma(dev, swiotlb_addr);
680 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
681 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
682 attrs | DMA_ATTR_SKIP_CPU_SYNC);
683 dev_WARN_ONCE(dev, 1,
684 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
685 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
686 return DMA_MAPPING_ERROR;
687 }
688
689 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
690 arch_sync_dma_for_device(swiotlb_addr, size, dir);
691 return dma_addr;
692}
693
694size_t swiotlb_max_mapping_size(struct device *dev)
695{
696 return ((size_t)1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
697}
698
699bool is_swiotlb_active(void)
700{
701 /*
702 * When SWIOTLB is initialized, even if io_tlb_start points to physical
703 * address zero, io_tlb_end surely doesn't.
704 */
705 return io_tlb_end != 0;
706}
707
708#ifdef CONFIG_DEBUG_FS
709
710static int __init swiotlb_create_debugfs(void)
711{
712 struct dentry *root;
713
714 root = debugfs_create_dir("swiotlb", NULL);
715 debugfs_create_ulong("io_tlb_nslabs", 0400, root, &io_tlb_nslabs);
716 debugfs_create_ulong("io_tlb_used", 0400, root, &io_tlb_used);
717 return 0;
718}
719
720late_initcall(swiotlb_create_debugfs);
721
722#endif