Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright 2010
4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 *
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 *
8 * PV guests under Xen are running in an non-contiguous memory architecture.
9 *
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
13 * operations).
14 *
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
25 */
26
27#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28
29#include <linux/memblock.h>
30#include <linux/dma-direct.h>
31#include <linux/dma-noncoherent.h>
32#include <linux/export.h>
33#include <xen/swiotlb-xen.h>
34#include <xen/page.h>
35#include <xen/xen-ops.h>
36#include <xen/hvc-console.h>
37
38#include <asm/dma-mapping.h>
39#include <asm/xen/page-coherent.h>
40
41#include <trace/events/swiotlb.h>
42#define MAX_DMA_BITS 32
43/*
44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
46 * API.
47 */
48
49static char *xen_io_tlb_start, *xen_io_tlb_end;
50static unsigned long xen_io_tlb_nslabs;
51/*
52 * Quick lookup value of the bus address of the IOTLB.
53 */
54
55static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
56{
57 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
58 phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
59
60 baddr |= paddr & ~XEN_PAGE_MASK;
61 return baddr;
62}
63
64static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
65{
66 return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
67}
68
69static inline phys_addr_t xen_bus_to_phys(struct device *dev,
70 phys_addr_t baddr)
71{
72 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
73 phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
74 (baddr & ~XEN_PAGE_MASK);
75
76 return paddr;
77}
78
79static inline phys_addr_t xen_dma_to_phys(struct device *dev,
80 dma_addr_t dma_addr)
81{
82 return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
83}
84
85static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
86{
87 return xen_phys_to_dma(dev, virt_to_phys(address));
88}
89
90static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
91{
92 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
93 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
94
95 next_bfn = pfn_to_bfn(xen_pfn);
96
97 for (i = 1; i < nr_pages; i++)
98 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
99 return 1;
100
101 return 0;
102}
103
104static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
105{
106 unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
107 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
108 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
109
110 /* If the address is outside our domain, it CAN
111 * have the same virtual address as another address
112 * in our domain. Therefore _only_ check address within our domain.
113 */
114 if (pfn_valid(PFN_DOWN(paddr))) {
115 return paddr >= virt_to_phys(xen_io_tlb_start) &&
116 paddr < virt_to_phys(xen_io_tlb_end);
117 }
118 return 0;
119}
120
121static int
122xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
123{
124 int i, rc;
125 int dma_bits;
126 dma_addr_t dma_handle;
127 phys_addr_t p = virt_to_phys(buf);
128
129 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
130
131 i = 0;
132 do {
133 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
134
135 do {
136 rc = xen_create_contiguous_region(
137 p + (i << IO_TLB_SHIFT),
138 get_order(slabs << IO_TLB_SHIFT),
139 dma_bits, &dma_handle);
140 } while (rc && dma_bits++ < MAX_DMA_BITS);
141 if (rc)
142 return rc;
143
144 i += slabs;
145 } while (i < nslabs);
146 return 0;
147}
148static unsigned long xen_set_nslabs(unsigned long nr_tbl)
149{
150 if (!nr_tbl) {
151 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
152 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
153 } else
154 xen_io_tlb_nslabs = nr_tbl;
155
156 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
157}
158
159enum xen_swiotlb_err {
160 XEN_SWIOTLB_UNKNOWN = 0,
161 XEN_SWIOTLB_ENOMEM,
162 XEN_SWIOTLB_EFIXUP
163};
164
165static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
166{
167 switch (err) {
168 case XEN_SWIOTLB_ENOMEM:
169 return "Cannot allocate Xen-SWIOTLB buffer\n";
170 case XEN_SWIOTLB_EFIXUP:
171 return "Failed to get contiguous memory for DMA from Xen!\n"\
172 "You either: don't have the permissions, do not have"\
173 " enough free memory under 4GB, or the hypervisor memory"\
174 " is too fragmented!";
175 default:
176 break;
177 }
178 return "";
179}
180int __ref xen_swiotlb_init(int verbose, bool early)
181{
182 unsigned long bytes, order;
183 int rc = -ENOMEM;
184 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
185 unsigned int repeat = 3;
186
187 xen_io_tlb_nslabs = swiotlb_nr_tbl();
188retry:
189 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
190 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
191
192 /*
193 * IO TLB memory already allocated. Just use it.
194 */
195 if (io_tlb_start != 0) {
196 xen_io_tlb_start = phys_to_virt(io_tlb_start);
197 goto end;
198 }
199
200 /*
201 * Get IO TLB memory from any location.
202 */
203 if (early) {
204 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
205 PAGE_SIZE);
206 if (!xen_io_tlb_start)
207 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
208 __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
209 } else {
210#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
211#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
213 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
214 if (xen_io_tlb_start)
215 break;
216 order--;
217 }
218 if (order != get_order(bytes)) {
219 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
220 (PAGE_SIZE << order) >> 20);
221 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
222 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
223 }
224 }
225 if (!xen_io_tlb_start) {
226 m_ret = XEN_SWIOTLB_ENOMEM;
227 goto error;
228 }
229 /*
230 * And replace that memory with pages under 4GB.
231 */
232 rc = xen_swiotlb_fixup(xen_io_tlb_start,
233 bytes,
234 xen_io_tlb_nslabs);
235 if (rc) {
236 if (early)
237 memblock_free(__pa(xen_io_tlb_start),
238 PAGE_ALIGN(bytes));
239 else {
240 free_pages((unsigned long)xen_io_tlb_start, order);
241 xen_io_tlb_start = NULL;
242 }
243 m_ret = XEN_SWIOTLB_EFIXUP;
244 goto error;
245 }
246 if (early) {
247 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
248 verbose))
249 panic("Cannot allocate SWIOTLB buffer");
250 rc = 0;
251 } else
252 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
253
254end:
255 xen_io_tlb_end = xen_io_tlb_start + bytes;
256 if (!rc)
257 swiotlb_set_max_segment(PAGE_SIZE);
258
259 return rc;
260error:
261 if (repeat--) {
262 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
263 (xen_io_tlb_nslabs >> 1));
264 pr_info("Lowering to %luMB\n",
265 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
266 goto retry;
267 }
268 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
269 if (early)
270 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
271 else
272 free_pages((unsigned long)xen_io_tlb_start, order);
273 return rc;
274}
275
276static void *
277xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
278 dma_addr_t *dma_handle, gfp_t flags,
279 unsigned long attrs)
280{
281 void *ret;
282 int order = get_order(size);
283 u64 dma_mask = DMA_BIT_MASK(32);
284 phys_addr_t phys;
285 dma_addr_t dev_addr;
286
287 /*
288 * Ignore region specifiers - the kernel's ideas of
289 * pseudo-phys memory layout has nothing to do with the
290 * machine physical layout. We can't allocate highmem
291 * because we can't return a pointer to it.
292 */
293 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
294
295 /* Convert the size to actually allocated. */
296 size = 1UL << (order + XEN_PAGE_SHIFT);
297
298 /* On ARM this function returns an ioremap'ped virtual address for
299 * which virt_to_phys doesn't return the corresponding physical
300 * address. In fact on ARM virt_to_phys only works for kernel direct
301 * mapped RAM memory. Also see comment below.
302 */
303 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
304
305 if (!ret)
306 return ret;
307
308 if (hwdev && hwdev->coherent_dma_mask)
309 dma_mask = hwdev->coherent_dma_mask;
310
311 /* At this point dma_handle is the dma address, next we are
312 * going to set it to the machine address.
313 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
314 * to *dma_handle. */
315 phys = dma_to_phys(hwdev, *dma_handle);
316 dev_addr = xen_phys_to_dma(hwdev, phys);
317 if (((dev_addr + size - 1 <= dma_mask)) &&
318 !range_straddles_page_boundary(phys, size))
319 *dma_handle = dev_addr;
320 else {
321 if (xen_create_contiguous_region(phys, order,
322 fls64(dma_mask), dma_handle) != 0) {
323 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
324 return NULL;
325 }
326 *dma_handle = phys_to_dma(hwdev, *dma_handle);
327 SetPageXenRemapped(virt_to_page(ret));
328 }
329 memset(ret, 0, size);
330 return ret;
331}
332
333static void
334xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
335 dma_addr_t dev_addr, unsigned long attrs)
336{
337 int order = get_order(size);
338 phys_addr_t phys;
339 u64 dma_mask = DMA_BIT_MASK(32);
340 struct page *page;
341
342 if (hwdev && hwdev->coherent_dma_mask)
343 dma_mask = hwdev->coherent_dma_mask;
344
345 /* do not use virt_to_phys because on ARM it doesn't return you the
346 * physical address */
347 phys = xen_dma_to_phys(hwdev, dev_addr);
348
349 /* Convert the size to actually allocated. */
350 size = 1UL << (order + XEN_PAGE_SHIFT);
351
352 if (is_vmalloc_addr(vaddr))
353 page = vmalloc_to_page(vaddr);
354 else
355 page = virt_to_page(vaddr);
356
357 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
358 range_straddles_page_boundary(phys, size)) &&
359 TestClearPageXenRemapped(page))
360 xen_destroy_contiguous_region(phys, order);
361
362 xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
363 attrs);
364}
365
366/*
367 * Map a single buffer of the indicated size for DMA in streaming mode. The
368 * physical address to use is returned.
369 *
370 * Once the device is given the dma address, the device owns this memory until
371 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
372 */
373static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
374 unsigned long offset, size_t size,
375 enum dma_data_direction dir,
376 unsigned long attrs)
377{
378 phys_addr_t map, phys = page_to_phys(page) + offset;
379 dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
380
381 BUG_ON(dir == DMA_NONE);
382 /*
383 * If the address happens to be in the device's DMA window,
384 * we can safely return the device addr and not worry about bounce
385 * buffering it.
386 */
387 if (dma_capable(dev, dev_addr, size, true) &&
388 !range_straddles_page_boundary(phys, size) &&
389 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
390 swiotlb_force != SWIOTLB_FORCE)
391 goto done;
392
393 /*
394 * Oh well, have to allocate and map a bounce buffer.
395 */
396 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
397
398 map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
399 phys, size, size, dir, attrs);
400 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
401 return DMA_MAPPING_ERROR;
402
403 phys = map;
404 dev_addr = xen_phys_to_dma(dev, map);
405
406 /*
407 * Ensure that the address returned is DMA'ble
408 */
409 if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
410 swiotlb_tbl_unmap_single(dev, map, size, size, dir,
411 attrs | DMA_ATTR_SKIP_CPU_SYNC);
412 return DMA_MAPPING_ERROR;
413 }
414
415done:
416 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
417 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
418 arch_sync_dma_for_device(phys, size, dir);
419 else
420 xen_dma_sync_for_device(dev, dev_addr, size, dir);
421 }
422 return dev_addr;
423}
424
425/*
426 * Unmap a single streaming mode DMA translation. The dma_addr and size must
427 * match what was provided for in a previous xen_swiotlb_map_page call. All
428 * other usages are undefined.
429 *
430 * After this call, reads by the cpu to the buffer are guaranteed to see
431 * whatever the device wrote there.
432 */
433static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
434 size_t size, enum dma_data_direction dir, unsigned long attrs)
435{
436 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
437
438 BUG_ON(dir == DMA_NONE);
439
440 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
441 if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
442 arch_sync_dma_for_cpu(paddr, size, dir);
443 else
444 xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
445 }
446
447 /* NOTE: We use dev_addr here, not paddr! */
448 if (is_xen_swiotlb_buffer(hwdev, dev_addr))
449 swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
450}
451
452static void
453xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
454 size_t size, enum dma_data_direction dir)
455{
456 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
457
458 if (!dev_is_dma_coherent(dev)) {
459 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
460 arch_sync_dma_for_cpu(paddr, size, dir);
461 else
462 xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
463 }
464
465 if (is_xen_swiotlb_buffer(dev, dma_addr))
466 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
467}
468
469static void
470xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
471 size_t size, enum dma_data_direction dir)
472{
473 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
474
475 if (is_xen_swiotlb_buffer(dev, dma_addr))
476 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
477
478 if (!dev_is_dma_coherent(dev)) {
479 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
480 arch_sync_dma_for_device(paddr, size, dir);
481 else
482 xen_dma_sync_for_device(dev, dma_addr, size, dir);
483 }
484}
485
486/*
487 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
488 * concerning calls here are the same as for swiotlb_unmap_page() above.
489 */
490static void
491xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
492 enum dma_data_direction dir, unsigned long attrs)
493{
494 struct scatterlist *sg;
495 int i;
496
497 BUG_ON(dir == DMA_NONE);
498
499 for_each_sg(sgl, sg, nelems, i)
500 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
501 dir, attrs);
502
503}
504
505static int
506xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
507 enum dma_data_direction dir, unsigned long attrs)
508{
509 struct scatterlist *sg;
510 int i;
511
512 BUG_ON(dir == DMA_NONE);
513
514 for_each_sg(sgl, sg, nelems, i) {
515 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
516 sg->offset, sg->length, dir, attrs);
517 if (sg->dma_address == DMA_MAPPING_ERROR)
518 goto out_unmap;
519 sg_dma_len(sg) = sg->length;
520 }
521
522 return nelems;
523out_unmap:
524 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
525 sg_dma_len(sgl) = 0;
526 return 0;
527}
528
529static void
530xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
531 int nelems, enum dma_data_direction dir)
532{
533 struct scatterlist *sg;
534 int i;
535
536 for_each_sg(sgl, sg, nelems, i) {
537 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
538 sg->length, dir);
539 }
540}
541
542static void
543xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
544 int nelems, enum dma_data_direction dir)
545{
546 struct scatterlist *sg;
547 int i;
548
549 for_each_sg(sgl, sg, nelems, i) {
550 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
551 sg->length, dir);
552 }
553}
554
555/*
556 * Return whether the given device DMA address mask can be supported
557 * properly. For example, if your device can only drive the low 24-bits
558 * during bus mastering, then you would pass 0x00ffffff as the mask to
559 * this function.
560 */
561static int
562xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
563{
564 return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
565}
566
567const struct dma_map_ops xen_swiotlb_dma_ops = {
568 .alloc = xen_swiotlb_alloc_coherent,
569 .free = xen_swiotlb_free_coherent,
570 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
571 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
572 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
573 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
574 .map_sg = xen_swiotlb_map_sg,
575 .unmap_sg = xen_swiotlb_unmap_sg,
576 .map_page = xen_swiotlb_map_page,
577 .unmap_page = xen_swiotlb_unmap_page,
578 .dma_supported = xen_swiotlb_dma_supported,
579 .mmap = dma_common_mmap,
580 .get_sgtable = dma_common_get_sgtable,
581};
1/*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
36#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
37
38#include <linux/bootmem.h>
39#include <linux/dma-mapping.h>
40#include <linux/export.h>
41#include <xen/swiotlb-xen.h>
42#include <xen/page.h>
43#include <xen/xen-ops.h>
44#include <xen/hvc-console.h>
45
46#include <asm/dma-mapping.h>
47#include <asm/xen/page-coherent.h>
48
49#include <trace/events/swiotlb.h>
50/*
51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
53 * API.
54 */
55
56#ifndef CONFIG_X86
57static unsigned long dma_alloc_coherent_mask(struct device *dev,
58 gfp_t gfp)
59{
60 unsigned long dma_mask = 0;
61
62 dma_mask = dev->coherent_dma_mask;
63 if (!dma_mask)
64 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
65
66 return dma_mask;
67}
68#endif
69
70static char *xen_io_tlb_start, *xen_io_tlb_end;
71static unsigned long xen_io_tlb_nslabs;
72/*
73 * Quick lookup value of the bus address of the IOTLB.
74 */
75
76static u64 start_dma_addr;
77
78/*
79 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
80 * can be 32bit when dma_addr_t is 64bit leading to a loss in
81 * information if the shift is done before casting to 64bit.
82 */
83static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
84{
85 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
86 dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
87
88 dma |= paddr & ~XEN_PAGE_MASK;
89
90 return dma;
91}
92
93static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
94{
95 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
96 dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
97 phys_addr_t paddr = dma;
98
99 paddr |= baddr & ~XEN_PAGE_MASK;
100
101 return paddr;
102}
103
104static inline dma_addr_t xen_virt_to_bus(void *address)
105{
106 return xen_phys_to_bus(virt_to_phys(address));
107}
108
109static int check_pages_physically_contiguous(unsigned long xen_pfn,
110 unsigned int offset,
111 size_t length)
112{
113 unsigned long next_bfn;
114 int i;
115 int nr_pages;
116
117 next_bfn = pfn_to_bfn(xen_pfn);
118 nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
119
120 for (i = 1; i < nr_pages; i++) {
121 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
122 return 0;
123 }
124 return 1;
125}
126
127static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
128{
129 unsigned long xen_pfn = XEN_PFN_DOWN(p);
130 unsigned int offset = p & ~XEN_PAGE_MASK;
131
132 if (offset + size <= XEN_PAGE_SIZE)
133 return 0;
134 if (check_pages_physically_contiguous(xen_pfn, offset, size))
135 return 0;
136 return 1;
137}
138
139static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
140{
141 unsigned long bfn = XEN_PFN_DOWN(dma_addr);
142 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
143 phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
144
145 /* If the address is outside our domain, it CAN
146 * have the same virtual address as another address
147 * in our domain. Therefore _only_ check address within our domain.
148 */
149 if (pfn_valid(PFN_DOWN(paddr))) {
150 return paddr >= virt_to_phys(xen_io_tlb_start) &&
151 paddr < virt_to_phys(xen_io_tlb_end);
152 }
153 return 0;
154}
155
156static int max_dma_bits = 32;
157
158static int
159xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
160{
161 int i, rc;
162 int dma_bits;
163 dma_addr_t dma_handle;
164 phys_addr_t p = virt_to_phys(buf);
165
166 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
167
168 i = 0;
169 do {
170 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
171
172 do {
173 rc = xen_create_contiguous_region(
174 p + (i << IO_TLB_SHIFT),
175 get_order(slabs << IO_TLB_SHIFT),
176 dma_bits, &dma_handle);
177 } while (rc && dma_bits++ < max_dma_bits);
178 if (rc)
179 return rc;
180
181 i += slabs;
182 } while (i < nslabs);
183 return 0;
184}
185static unsigned long xen_set_nslabs(unsigned long nr_tbl)
186{
187 if (!nr_tbl) {
188 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
189 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
190 } else
191 xen_io_tlb_nslabs = nr_tbl;
192
193 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
194}
195
196enum xen_swiotlb_err {
197 XEN_SWIOTLB_UNKNOWN = 0,
198 XEN_SWIOTLB_ENOMEM,
199 XEN_SWIOTLB_EFIXUP
200};
201
202static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
203{
204 switch (err) {
205 case XEN_SWIOTLB_ENOMEM:
206 return "Cannot allocate Xen-SWIOTLB buffer\n";
207 case XEN_SWIOTLB_EFIXUP:
208 return "Failed to get contiguous memory for DMA from Xen!\n"\
209 "You either: don't have the permissions, do not have"\
210 " enough free memory under 4GB, or the hypervisor memory"\
211 " is too fragmented!";
212 default:
213 break;
214 }
215 return "";
216}
217int __ref xen_swiotlb_init(int verbose, bool early)
218{
219 unsigned long bytes, order;
220 int rc = -ENOMEM;
221 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
222 unsigned int repeat = 3;
223
224 xen_io_tlb_nslabs = swiotlb_nr_tbl();
225retry:
226 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
227 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
228 /*
229 * Get IO TLB memory from any location.
230 */
231 if (early)
232 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
233 else {
234#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
235#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
236 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
237 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
238 if (xen_io_tlb_start)
239 break;
240 order--;
241 }
242 if (order != get_order(bytes)) {
243 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
244 (PAGE_SIZE << order) >> 20);
245 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
246 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
247 }
248 }
249 if (!xen_io_tlb_start) {
250 m_ret = XEN_SWIOTLB_ENOMEM;
251 goto error;
252 }
253 xen_io_tlb_end = xen_io_tlb_start + bytes;
254 /*
255 * And replace that memory with pages under 4GB.
256 */
257 rc = xen_swiotlb_fixup(xen_io_tlb_start,
258 bytes,
259 xen_io_tlb_nslabs);
260 if (rc) {
261 if (early)
262 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
263 else {
264 free_pages((unsigned long)xen_io_tlb_start, order);
265 xen_io_tlb_start = NULL;
266 }
267 m_ret = XEN_SWIOTLB_EFIXUP;
268 goto error;
269 }
270 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
271 if (early) {
272 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
273 verbose))
274 panic("Cannot allocate SWIOTLB buffer");
275 rc = 0;
276 } else
277 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
278
279 if (!rc)
280 swiotlb_set_max_segment(PAGE_SIZE);
281
282 return rc;
283error:
284 if (repeat--) {
285 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
286 (xen_io_tlb_nslabs >> 1));
287 pr_info("Lowering to %luMB\n",
288 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
289 goto retry;
290 }
291 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
292 if (early)
293 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
294 else
295 free_pages((unsigned long)xen_io_tlb_start, order);
296 return rc;
297}
298void *
299xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
300 dma_addr_t *dma_handle, gfp_t flags,
301 unsigned long attrs)
302{
303 void *ret;
304 int order = get_order(size);
305 u64 dma_mask = DMA_BIT_MASK(32);
306 phys_addr_t phys;
307 dma_addr_t dev_addr;
308
309 /*
310 * Ignore region specifiers - the kernel's ideas of
311 * pseudo-phys memory layout has nothing to do with the
312 * machine physical layout. We can't allocate highmem
313 * because we can't return a pointer to it.
314 */
315 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
316
317 /* On ARM this function returns an ioremap'ped virtual address for
318 * which virt_to_phys doesn't return the corresponding physical
319 * address. In fact on ARM virt_to_phys only works for kernel direct
320 * mapped RAM memory. Also see comment below.
321 */
322 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
323
324 if (!ret)
325 return ret;
326
327 if (hwdev && hwdev->coherent_dma_mask)
328 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
329
330 /* At this point dma_handle is the physical address, next we are
331 * going to set it to the machine address.
332 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
333 * to *dma_handle. */
334 phys = *dma_handle;
335 dev_addr = xen_phys_to_bus(phys);
336 if (((dev_addr + size - 1 <= dma_mask)) &&
337 !range_straddles_page_boundary(phys, size))
338 *dma_handle = dev_addr;
339 else {
340 if (xen_create_contiguous_region(phys, order,
341 fls64(dma_mask), dma_handle) != 0) {
342 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
343 return NULL;
344 }
345 }
346 memset(ret, 0, size);
347 return ret;
348}
349EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
350
351void
352xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
353 dma_addr_t dev_addr, unsigned long attrs)
354{
355 int order = get_order(size);
356 phys_addr_t phys;
357 u64 dma_mask = DMA_BIT_MASK(32);
358
359 if (hwdev && hwdev->coherent_dma_mask)
360 dma_mask = hwdev->coherent_dma_mask;
361
362 /* do not use virt_to_phys because on ARM it doesn't return you the
363 * physical address */
364 phys = xen_bus_to_phys(dev_addr);
365
366 if (((dev_addr + size - 1 > dma_mask)) ||
367 range_straddles_page_boundary(phys, size))
368 xen_destroy_contiguous_region(phys, order);
369
370 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
371}
372EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
373
374
375/*
376 * Map a single buffer of the indicated size for DMA in streaming mode. The
377 * physical address to use is returned.
378 *
379 * Once the device is given the dma address, the device owns this memory until
380 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
381 */
382dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
383 unsigned long offset, size_t size,
384 enum dma_data_direction dir,
385 unsigned long attrs)
386{
387 phys_addr_t map, phys = page_to_phys(page) + offset;
388 dma_addr_t dev_addr = xen_phys_to_bus(phys);
389
390 BUG_ON(dir == DMA_NONE);
391 /*
392 * If the address happens to be in the device's DMA window,
393 * we can safely return the device addr and not worry about bounce
394 * buffering it.
395 */
396 if (dma_capable(dev, dev_addr, size) &&
397 !range_straddles_page_boundary(phys, size) &&
398 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
399 (swiotlb_force != SWIOTLB_FORCE)) {
400 /* we are not interested in the dma_addr returned by
401 * xen_dma_map_page, only in the potential cache flushes executed
402 * by the function. */
403 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
404 return dev_addr;
405 }
406
407 /*
408 * Oh well, have to allocate and map a bounce buffer.
409 */
410 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
411
412 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
413 attrs);
414 if (map == SWIOTLB_MAP_ERROR)
415 return DMA_ERROR_CODE;
416
417 dev_addr = xen_phys_to_bus(map);
418 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
419 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
420
421 /*
422 * Ensure that the address returned is DMA'ble
423 */
424 if (dma_capable(dev, dev_addr, size))
425 return dev_addr;
426
427 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
428 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
429
430 return DMA_ERROR_CODE;
431}
432EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
433
434/*
435 * Unmap a single streaming mode DMA translation. The dma_addr and size must
436 * match what was provided for in a previous xen_swiotlb_map_page call. All
437 * other usages are undefined.
438 *
439 * After this call, reads by the cpu to the buffer are guaranteed to see
440 * whatever the device wrote there.
441 */
442static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
443 size_t size, enum dma_data_direction dir,
444 unsigned long attrs)
445{
446 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
447
448 BUG_ON(dir == DMA_NONE);
449
450 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
451
452 /* NOTE: We use dev_addr here, not paddr! */
453 if (is_xen_swiotlb_buffer(dev_addr)) {
454 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
455 return;
456 }
457
458 if (dir != DMA_FROM_DEVICE)
459 return;
460
461 /*
462 * phys_to_virt doesn't work with hihgmem page but we could
463 * call dma_mark_clean() with hihgmem page here. However, we
464 * are fine since dma_mark_clean() is null on POWERPC. We can
465 * make dma_mark_clean() take a physical address if necessary.
466 */
467 dma_mark_clean(phys_to_virt(paddr), size);
468}
469
470void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
471 size_t size, enum dma_data_direction dir,
472 unsigned long attrs)
473{
474 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
475}
476EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
477
478/*
479 * Make physical memory consistent for a single streaming mode DMA translation
480 * after a transfer.
481 *
482 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
483 * using the cpu, yet do not wish to teardown the dma mapping, you must
484 * call this function before doing so. At the next point you give the dma
485 * address back to the card, you must first perform a
486 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
487 */
488static void
489xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
490 size_t size, enum dma_data_direction dir,
491 enum dma_sync_target target)
492{
493 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
494
495 BUG_ON(dir == DMA_NONE);
496
497 if (target == SYNC_FOR_CPU)
498 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
499
500 /* NOTE: We use dev_addr here, not paddr! */
501 if (is_xen_swiotlb_buffer(dev_addr))
502 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
503
504 if (target == SYNC_FOR_DEVICE)
505 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
506
507 if (dir != DMA_FROM_DEVICE)
508 return;
509
510 dma_mark_clean(phys_to_virt(paddr), size);
511}
512
513void
514xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
515 size_t size, enum dma_data_direction dir)
516{
517 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
518}
519EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
520
521void
522xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
523 size_t size, enum dma_data_direction dir)
524{
525 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
526}
527EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
528
529/*
530 * Map a set of buffers described by scatterlist in streaming mode for DMA.
531 * This is the scatter-gather version of the above xen_swiotlb_map_page
532 * interface. Here the scatter gather list elements are each tagged with the
533 * appropriate dma address and length. They are obtained via
534 * sg_dma_{address,length}(SG).
535 *
536 * NOTE: An implementation may be able to use a smaller number of
537 * DMA address/length pairs than there are SG table elements.
538 * (for example via virtual mapping capabilities)
539 * The routine returns the number of addr/length pairs actually
540 * used, at most nents.
541 *
542 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
543 * same here.
544 */
545int
546xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
547 int nelems, enum dma_data_direction dir,
548 unsigned long attrs)
549{
550 struct scatterlist *sg;
551 int i;
552
553 BUG_ON(dir == DMA_NONE);
554
555 for_each_sg(sgl, sg, nelems, i) {
556 phys_addr_t paddr = sg_phys(sg);
557 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
558
559 if (swiotlb_force == SWIOTLB_FORCE ||
560 xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
561 !dma_capable(hwdev, dev_addr, sg->length) ||
562 range_straddles_page_boundary(paddr, sg->length)) {
563 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
564 start_dma_addr,
565 sg_phys(sg),
566 sg->length,
567 dir, attrs);
568 if (map == SWIOTLB_MAP_ERROR) {
569 dev_warn(hwdev, "swiotlb buffer is full\n");
570 /* Don't panic here, we expect map_sg users
571 to do proper error handling. */
572 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
573 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
574 attrs);
575 sg_dma_len(sgl) = 0;
576 return 0;
577 }
578 dev_addr = xen_phys_to_bus(map);
579 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
580 dev_addr,
581 map & ~PAGE_MASK,
582 sg->length,
583 dir,
584 attrs);
585 sg->dma_address = dev_addr;
586 } else {
587 /* we are not interested in the dma_addr returned by
588 * xen_dma_map_page, only in the potential cache flushes executed
589 * by the function. */
590 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
591 dev_addr,
592 paddr & ~PAGE_MASK,
593 sg->length,
594 dir,
595 attrs);
596 sg->dma_address = dev_addr;
597 }
598 sg_dma_len(sg) = sg->length;
599 }
600 return nelems;
601}
602EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
603
604/*
605 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
606 * concerning calls here are the same as for swiotlb_unmap_page() above.
607 */
608void
609xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
610 int nelems, enum dma_data_direction dir,
611 unsigned long attrs)
612{
613 struct scatterlist *sg;
614 int i;
615
616 BUG_ON(dir == DMA_NONE);
617
618 for_each_sg(sgl, sg, nelems, i)
619 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
620
621}
622EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
623
624/*
625 * Make physical memory consistent for a set of streaming mode DMA translations
626 * after a transfer.
627 *
628 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
629 * and usage.
630 */
631static void
632xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
633 int nelems, enum dma_data_direction dir,
634 enum dma_sync_target target)
635{
636 struct scatterlist *sg;
637 int i;
638
639 for_each_sg(sgl, sg, nelems, i)
640 xen_swiotlb_sync_single(hwdev, sg->dma_address,
641 sg_dma_len(sg), dir, target);
642}
643
644void
645xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
646 int nelems, enum dma_data_direction dir)
647{
648 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
649}
650EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
651
652void
653xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
654 int nelems, enum dma_data_direction dir)
655{
656 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
657}
658EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
659
660/*
661 * Return whether the given device DMA address mask can be supported
662 * properly. For example, if your device can only drive the low 24-bits
663 * during bus mastering, then you would pass 0x00ffffff as the mask to
664 * this function.
665 */
666int
667xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
668{
669 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
670}
671EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
672
673int
674xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
675{
676 if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
677 return -EIO;
678
679 *dev->dma_mask = dma_mask;
680
681 return 0;
682}
683EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);