Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/cpu.h>
3#include <linux/dma-direct.h>
4#include <linux/dma-map-ops.h>
5#include <linux/gfp.h>
6#include <linux/highmem.h>
7#include <linux/export.h>
8#include <linux/memblock.h>
9#include <linux/of_address.h>
10#include <linux/slab.h>
11#include <linux/types.h>
12#include <linux/vmalloc.h>
13#include <linux/swiotlb.h>
14
15#include <xen/xen.h>
16#include <xen/interface/grant_table.h>
17#include <xen/interface/memory.h>
18#include <xen/page.h>
19#include <xen/xen-ops.h>
20#include <xen/swiotlb-xen.h>
21
22#include <asm/cacheflush.h>
23#include <asm/xen/hypercall.h>
24#include <asm/xen/interface.h>
25
26static gfp_t xen_swiotlb_gfp(void)
27{
28 phys_addr_t base;
29 u64 i;
30
31 for_each_mem_range(i, &base, NULL) {
32 if (base < (phys_addr_t)0xffffffff) {
33 if (IS_ENABLED(CONFIG_ZONE_DMA32))
34 return __GFP_DMA32;
35 return __GFP_DMA;
36 }
37 }
38
39 return GFP_KERNEL;
40}
41
42static bool hypercall_cflush = false;
43
44/* buffers in highmem or foreign pages cannot cross page boundaries */
45static void dma_cache_maint(struct device *dev, dma_addr_t handle,
46 size_t size, u32 op)
47{
48 struct gnttab_cache_flush cflush;
49
50 cflush.offset = xen_offset_in_page(handle);
51 cflush.op = op;
52 handle &= XEN_PAGE_MASK;
53
54 do {
55 cflush.a.dev_bus_addr = dma_to_phys(dev, handle);
56
57 if (size + cflush.offset > XEN_PAGE_SIZE)
58 cflush.length = XEN_PAGE_SIZE - cflush.offset;
59 else
60 cflush.length = size;
61
62 HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
63
64 cflush.offset = 0;
65 handle += cflush.length;
66 size -= cflush.length;
67 } while (size);
68}
69
70/*
71 * Dom0 is mapped 1:1, and while the Linux page can span across multiple Xen
72 * pages, it is not possible for it to contain a mix of local and foreign Xen
73 * pages. Calling pfn_valid on a foreign mfn will always return false, so if
74 * pfn_valid returns true the pages is local and we can use the native
75 * dma-direct functions, otherwise we call the Xen specific version.
76 */
77void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
78 size_t size, enum dma_data_direction dir)
79{
80 if (dir != DMA_TO_DEVICE)
81 dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
82}
83
84void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
85 size_t size, enum dma_data_direction dir)
86{
87 if (dir == DMA_FROM_DEVICE)
88 dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL);
89 else
90 dma_cache_maint(dev, handle, size, GNTTAB_CACHE_CLEAN);
91}
92
93bool xen_arch_need_swiotlb(struct device *dev,
94 phys_addr_t phys,
95 dma_addr_t dev_addr)
96{
97 unsigned int xen_pfn = XEN_PFN_DOWN(phys);
98 unsigned int bfn = XEN_PFN_DOWN(dma_to_phys(dev, dev_addr));
99
100 /*
101 * The swiotlb buffer should be used if
102 * - Xen doesn't have the cache flush hypercall
103 * - The Linux page refers to foreign memory
104 * - The device doesn't support coherent DMA request
105 *
106 * The Linux page may be spanned acrros multiple Xen page, although
107 * it's not possible to have a mix of local and foreign Xen page.
108 * Furthermore, range_straddles_page_boundary is already checking
109 * if buffer is physically contiguous in the host RAM.
110 *
111 * Therefore we only need to check the first Xen page to know if we
112 * require a bounce buffer because the device doesn't support coherent
113 * memory and we are not able to flush the cache.
114 */
115 return (!hypercall_cflush && (xen_pfn != bfn) &&
116 !dev_is_dma_coherent(dev));
117}
118
119static int __init xen_mm_init(void)
120{
121 struct gnttab_cache_flush cflush;
122 int rc;
123
124 if (!xen_swiotlb_detect())
125 return 0;
126
127 /* we can work with the default swiotlb */
128 rc = swiotlb_init_late(swiotlb_size_or_default(),
129 xen_swiotlb_gfp(), NULL);
130 if (rc < 0)
131 return rc;
132
133 cflush.op = 0;
134 cflush.a.dev_bus_addr = 0;
135 cflush.offset = 0;
136 cflush.length = 0;
137 if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
138 hypercall_cflush = true;
139 return 0;
140}
141arch_initcall(xen_mm_init);
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/bootmem.h>
4#include <linux/gfp.h>
5#include <linux/highmem.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/of_address.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/dma-mapping.h>
12#include <linux/vmalloc.h>
13#include <linux/swiotlb.h>
14
15#include <xen/xen.h>
16#include <xen/interface/grant_table.h>
17#include <xen/interface/memory.h>
18#include <xen/page.h>
19#include <xen/swiotlb-xen.h>
20
21#include <asm/cacheflush.h>
22#include <asm/xen/hypercall.h>
23#include <asm/xen/interface.h>
24
25unsigned long xen_get_swiotlb_free_pages(unsigned int order)
26{
27 struct memblock_region *reg;
28 gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
29
30 for_each_memblock(memory, reg) {
31 if (reg->base < (phys_addr_t)0xffffffff) {
32 flags |= __GFP_DMA;
33 break;
34 }
35 }
36 return __get_free_pages(flags, order);
37}
38
39enum dma_cache_op {
40 DMA_UNMAP,
41 DMA_MAP,
42};
43static bool hypercall_cflush = false;
44
45/* functions called by SWIOTLB */
46
47static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
48 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
49{
50 struct gnttab_cache_flush cflush;
51 unsigned long xen_pfn;
52 size_t left = size;
53
54 xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
55 offset %= XEN_PAGE_SIZE;
56
57 do {
58 size_t len = left;
59
60 /* buffers in highmem or foreign pages cannot cross page
61 * boundaries */
62 if (len + offset > XEN_PAGE_SIZE)
63 len = XEN_PAGE_SIZE - offset;
64
65 cflush.op = 0;
66 cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
67 cflush.offset = offset;
68 cflush.length = len;
69
70 if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
71 cflush.op = GNTTAB_CACHE_INVAL;
72 if (op == DMA_MAP) {
73 if (dir == DMA_FROM_DEVICE)
74 cflush.op = GNTTAB_CACHE_INVAL;
75 else
76 cflush.op = GNTTAB_CACHE_CLEAN;
77 }
78 if (cflush.op)
79 HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
80
81 offset = 0;
82 xen_pfn++;
83 left -= len;
84 } while (left);
85}
86
87static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
88 size_t size, enum dma_data_direction dir)
89{
90 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
91}
92
93static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
94 size_t size, enum dma_data_direction dir)
95{
96 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
97}
98
99void __xen_dma_map_page(struct device *hwdev, struct page *page,
100 dma_addr_t dev_addr, unsigned long offset, size_t size,
101 enum dma_data_direction dir, unsigned long attrs)
102{
103 if (is_device_dma_coherent(hwdev))
104 return;
105 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
106 return;
107
108 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
109}
110
111void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs)
114
115{
116 if (is_device_dma_coherent(hwdev))
117 return;
118 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
119 return;
120
121 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
122}
123
124void __xen_dma_sync_single_for_cpu(struct device *hwdev,
125 dma_addr_t handle, size_t size, enum dma_data_direction dir)
126{
127 if (is_device_dma_coherent(hwdev))
128 return;
129 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
130}
131
132void __xen_dma_sync_single_for_device(struct device *hwdev,
133 dma_addr_t handle, size_t size, enum dma_data_direction dir)
134{
135 if (is_device_dma_coherent(hwdev))
136 return;
137 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
138}
139
140bool xen_arch_need_swiotlb(struct device *dev,
141 phys_addr_t phys,
142 dma_addr_t dev_addr)
143{
144 unsigned int xen_pfn = XEN_PFN_DOWN(phys);
145 unsigned int bfn = XEN_PFN_DOWN(dev_addr);
146
147 /*
148 * The swiotlb buffer should be used if
149 * - Xen doesn't have the cache flush hypercall
150 * - The Linux page refers to foreign memory
151 * - The device doesn't support coherent DMA request
152 *
153 * The Linux page may be spanned acrros multiple Xen page, although
154 * it's not possible to have a mix of local and foreign Xen page.
155 * Furthermore, range_straddles_page_boundary is already checking
156 * if buffer is physically contiguous in the host RAM.
157 *
158 * Therefore we only need to check the first Xen page to know if we
159 * require a bounce buffer because the device doesn't support coherent
160 * memory and we are not able to flush the cache.
161 */
162 return (!hypercall_cflush && (xen_pfn != bfn) &&
163 !is_device_dma_coherent(dev));
164}
165
166int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
167 unsigned int address_bits,
168 dma_addr_t *dma_handle)
169{
170 if (!xen_initial_domain())
171 return -EINVAL;
172
173 /* we assume that dom0 is mapped 1:1 for now */
174 *dma_handle = pstart;
175 return 0;
176}
177EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
178
179void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
180{
181 return;
182}
183EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
184
185struct dma_map_ops *xen_dma_ops;
186EXPORT_SYMBOL(xen_dma_ops);
187
188static struct dma_map_ops xen_swiotlb_dma_ops = {
189 .alloc = xen_swiotlb_alloc_coherent,
190 .free = xen_swiotlb_free_coherent,
191 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
192 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
193 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
194 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
195 .map_sg = xen_swiotlb_map_sg_attrs,
196 .unmap_sg = xen_swiotlb_unmap_sg_attrs,
197 .map_page = xen_swiotlb_map_page,
198 .unmap_page = xen_swiotlb_unmap_page,
199 .dma_supported = xen_swiotlb_dma_supported,
200 .set_dma_mask = xen_swiotlb_set_dma_mask,
201};
202
203int __init xen_mm_init(void)
204{
205 struct gnttab_cache_flush cflush;
206 if (!xen_initial_domain())
207 return 0;
208 xen_swiotlb_init(1, false);
209 xen_dma_ops = &xen_swiotlb_dma_ops;
210
211 cflush.op = 0;
212 cflush.a.dev_bus_addr = 0;
213 cflush.offset = 0;
214 cflush.length = 0;
215 if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
216 hypercall_cflush = true;
217 return 0;
218}
219arch_initcall(xen_mm_init);