Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  Copyright 2010
  4 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  5 *
  6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  7 *
  8 * PV guests under Xen are running in an non-contiguous memory architecture.
  9 *
 10 * When PCI pass-through is utilized, this necessitates an IOMMU for
 11 * translating bus (DMA) to virtual and vice-versa and also providing a
 12 * mechanism to have contiguous pages for device drivers operations (say DMA
 13 * operations).
 14 *
 15 * Specifically, under Xen the Linux idea of pages is an illusion. It
 16 * assumes that pages start at zero and go up to the available memory. To
 17 * help with that, the Linux Xen MMU provides a lookup mechanism to
 18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
 19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
 20 * memory is not contiguous. Xen hypervisor stitches memory for guests
 21 * from different pools, which means there is no guarantee that PFN==MFN
 22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
 23 * allocated in descending order (high to low), meaning the guest might
 24 * never get any MFN's under the 4GB mark.
 25 */
 26
 27#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 28
 29#include <linux/memblock.h>
 30#include <linux/dma-direct.h>
 31#include <linux/dma-map-ops.h>
 32#include <linux/export.h>
 33#include <xen/swiotlb-xen.h>
 34#include <xen/page.h>
 35#include <xen/xen-ops.h>
 36#include <xen/hvc-console.h>
 37
 38#include <asm/dma-mapping.h>
 39
 40#include <trace/events/swiotlb.h>
 41#define MAX_DMA_BITS 32
 42
 43/*
 44 * Quick lookup value of the bus address of the IOTLB.
 45 */
 46
 47static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
 48{
 49	unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
 50	phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
 51
 52	baddr |= paddr & ~XEN_PAGE_MASK;
 53	return baddr;
 54}
 55
 56static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
 57{
 58	return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
 59}
 60
 61static inline phys_addr_t xen_bus_to_phys(struct device *dev,
 62					  phys_addr_t baddr)
 63{
 64	unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
 65	phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
 66			    (baddr & ~XEN_PAGE_MASK);
 67
 68	return paddr;
 69}
 70
 71static inline phys_addr_t xen_dma_to_phys(struct device *dev,
 72					  dma_addr_t dma_addr)
 73{
 74	return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
 75}
 76
 77static inline bool range_requires_alignment(phys_addr_t p, size_t size)
 78{
 79	phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
 80	phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
 81
 82	return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
 83}
 84
 85static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
 86{
 87	unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
 88	unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
 89
 90	next_bfn = pfn_to_bfn(xen_pfn);
 91
 92	for (i = 1; i < nr_pages; i++)
 93		if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
 94			return 1;
 95
 96	return 0;
 97}
 98
 99static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
100						 dma_addr_t dma_addr)
101{
102	unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
103	unsigned long xen_pfn = bfn_to_local_pfn(bfn);
104	phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
105
106	/* If the address is outside our domain, it CAN
107	 * have the same virtual address as another address
108	 * in our domain. Therefore _only_ check address within our domain.
109	 */
110	if (pfn_valid(PFN_DOWN(paddr)))
111		return swiotlb_find_pool(dev, paddr);
112	return NULL;
113}
114
115#ifdef CONFIG_X86
116int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
117{
118	int rc;
119	unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
120	unsigned int i, dma_bits = order + PAGE_SHIFT;
121	dma_addr_t dma_handle;
122	phys_addr_t p = virt_to_phys(buf);
123
124	BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
125	BUG_ON(nslabs % IO_TLB_SEGSIZE);
126
127	i = 0;
128	do {
129		do {
130			rc = xen_create_contiguous_region(
131				p + (i << IO_TLB_SHIFT), order,
132				dma_bits, &dma_handle);
133		} while (rc && dma_bits++ < MAX_DMA_BITS);
134		if (rc)
135			return rc;
136
137		i += IO_TLB_SEGSIZE;
138	} while (i < nslabs);
139	return 0;
140}
141
142static void *
143xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
144		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
145{
146	u64 dma_mask = dev->coherent_dma_mask;
147	int order = get_order(size);
148	phys_addr_t phys;
149	void *ret;
150
151	/* Align the allocation to the Xen page size */
152	size = ALIGN(size, XEN_PAGE_SIZE);
153
154	ret = (void *)__get_free_pages(flags, get_order(size));
155	if (!ret)
156		return ret;
157	phys = virt_to_phys(ret);
158
159	*dma_handle = xen_phys_to_dma(dev, phys);
160	if (*dma_handle + size - 1 > dma_mask ||
161	    range_straddles_page_boundary(phys, size) ||
162	    range_requires_alignment(phys, size)) {
163		if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
164				dma_handle) != 0)
165			goto out_free_pages;
166		SetPageXenRemapped(virt_to_page(ret));
167	}
168
169	memset(ret, 0, size);
170	return ret;
171
172out_free_pages:
173	free_pages((unsigned long)ret, get_order(size));
174	return NULL;
175}
176
177static void
178xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
179		dma_addr_t dma_handle, unsigned long attrs)
180{
181	phys_addr_t phys = virt_to_phys(vaddr);
182	int order = get_order(size);
183
184	/* Convert the size to actually allocated. */
185	size = ALIGN(size, XEN_PAGE_SIZE);
186
187	if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
188	    WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
189			 range_requires_alignment(phys, size)))
190	    	return;
191
192	if (TestClearPageXenRemapped(virt_to_page(vaddr)))
193		xen_destroy_contiguous_region(phys, order);
194	free_pages((unsigned long)vaddr, get_order(size));
195}
196#endif /* CONFIG_X86 */
197
198/*
199 * Map a single buffer of the indicated size for DMA in streaming mode.  The
200 * physical address to use is returned.
201 *
202 * Once the device is given the dma address, the device owns this memory until
203 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
204 */
205static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
206				unsigned long offset, size_t size,
207				enum dma_data_direction dir,
208				unsigned long attrs)
209{
210	phys_addr_t map, phys = page_to_phys(page) + offset;
211	dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
212
213	BUG_ON(dir == DMA_NONE);
214	/*
215	 * If the address happens to be in the device's DMA window,
216	 * we can safely return the device addr and not worry about bounce
217	 * buffering it.
218	 */
219	if (dma_capable(dev, dev_addr, size, true) &&
220	    !range_straddles_page_boundary(phys, size) &&
221		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
222		!is_swiotlb_force_bounce(dev))
223		goto done;
224
225	/*
226	 * Oh well, have to allocate and map a bounce buffer.
227	 */
228	trace_swiotlb_bounced(dev, dev_addr, size);
229
230	map = swiotlb_tbl_map_single(dev, phys, size, 0, dir, attrs);
231	if (map == (phys_addr_t)DMA_MAPPING_ERROR)
232		return DMA_MAPPING_ERROR;
233
234	phys = map;
235	dev_addr = xen_phys_to_dma(dev, map);
236
237	/*
238	 * Ensure that the address returned is DMA'ble
239	 */
240	if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
241		__swiotlb_tbl_unmap_single(dev, map, size, dir,
242				attrs | DMA_ATTR_SKIP_CPU_SYNC,
243				swiotlb_find_pool(dev, map));
244		return DMA_MAPPING_ERROR;
245	}
246
247done:
248	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
249		if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
250			arch_sync_dma_for_device(phys, size, dir);
251		else
252			xen_dma_sync_for_device(dev, dev_addr, size, dir);
253	}
254	return dev_addr;
255}
256
257/*
258 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
259 * match what was provided for in a previous xen_swiotlb_map_page call.  All
260 * other usages are undefined.
261 *
262 * After this call, reads by the cpu to the buffer are guaranteed to see
263 * whatever the device wrote there.
264 */
265static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
266		size_t size, enum dma_data_direction dir, unsigned long attrs)
267{
268	phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
269	struct io_tlb_pool *pool;
270
271	BUG_ON(dir == DMA_NONE);
272
273	if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
274		if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
275			arch_sync_dma_for_cpu(paddr, size, dir);
276		else
277			xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
278	}
279
280	/* NOTE: We use dev_addr here, not paddr! */
281	pool = xen_swiotlb_find_pool(hwdev, dev_addr);
282	if (pool)
283		__swiotlb_tbl_unmap_single(hwdev, paddr, size, dir,
284					   attrs, pool);
285}
286
287static void
288xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
289		size_t size, enum dma_data_direction dir)
290{
291	phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
292	struct io_tlb_pool *pool;
293
294	if (!dev_is_dma_coherent(dev)) {
295		if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
296			arch_sync_dma_for_cpu(paddr, size, dir);
297		else
298			xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
299	}
300
301	pool = xen_swiotlb_find_pool(dev, dma_addr);
302	if (pool)
303		__swiotlb_sync_single_for_cpu(dev, paddr, size, dir, pool);
304}
305
306static void
307xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
308		size_t size, enum dma_data_direction dir)
309{
310	phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
311	struct io_tlb_pool *pool;
312
313	pool = xen_swiotlb_find_pool(dev, dma_addr);
314	if (pool)
315		__swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
316
317	if (!dev_is_dma_coherent(dev)) {
318		if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
319			arch_sync_dma_for_device(paddr, size, dir);
320		else
321			xen_dma_sync_for_device(dev, dma_addr, size, dir);
322	}
323}
324
325/*
326 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
327 * concerning calls here are the same as for swiotlb_unmap_page() above.
328 */
329static void
330xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
331		enum dma_data_direction dir, unsigned long attrs)
332{
333	struct scatterlist *sg;
334	int i;
335
336	BUG_ON(dir == DMA_NONE);
337
338	for_each_sg(sgl, sg, nelems, i)
339		xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
340				dir, attrs);
341
342}
343
344static int
345xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
346		enum dma_data_direction dir, unsigned long attrs)
347{
348	struct scatterlist *sg;
349	int i;
350
351	BUG_ON(dir == DMA_NONE);
352
353	for_each_sg(sgl, sg, nelems, i) {
354		sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
355				sg->offset, sg->length, dir, attrs);
356		if (sg->dma_address == DMA_MAPPING_ERROR)
357			goto out_unmap;
358		sg_dma_len(sg) = sg->length;
359	}
360
361	return nelems;
362out_unmap:
363	xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
364	sg_dma_len(sgl) = 0;
365	return -EIO;
366}
367
368static void
369xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
370			    int nelems, enum dma_data_direction dir)
371{
372	struct scatterlist *sg;
373	int i;
374
375	for_each_sg(sgl, sg, nelems, i) {
376		xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
377				sg->length, dir);
378	}
379}
380
381static void
382xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
383			       int nelems, enum dma_data_direction dir)
384{
385	struct scatterlist *sg;
386	int i;
387
388	for_each_sg(sgl, sg, nelems, i) {
389		xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
390				sg->length, dir);
391	}
392}
393
394/*
395 * Return whether the given device DMA address mask can be supported
396 * properly.  For example, if your device can only drive the low 24-bits
397 * during bus mastering, then you would pass 0x00ffffff as the mask to
398 * this function.
399 */
400static int
401xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
402{
403	return xen_phys_to_dma(hwdev, default_swiotlb_limit()) <= mask;
404}
405
406const struct dma_map_ops xen_swiotlb_dma_ops = {
407#ifdef CONFIG_X86
408	.alloc = xen_swiotlb_alloc_coherent,
409	.free = xen_swiotlb_free_coherent,
410#else
411	.alloc = dma_direct_alloc,
412	.free = dma_direct_free,
413#endif
414	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
415	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
416	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
417	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
418	.map_sg = xen_swiotlb_map_sg,
419	.unmap_sg = xen_swiotlb_unmap_sg,
420	.map_page = xen_swiotlb_map_page,
421	.unmap_page = xen_swiotlb_unmap_page,
422	.dma_supported = xen_swiotlb_dma_supported,
423	.mmap = dma_common_mmap,
424	.get_sgtable = dma_common_get_sgtable,
425	.alloc_pages_op = dma_common_alloc_pages,
426	.free_pages = dma_common_free_pages,
427	.max_mapping_size = swiotlb_max_mapping_size,
428};
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  Copyright 2010
  4 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  5 *
  6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  7 *
  8 * PV guests under Xen are running in an non-contiguous memory architecture.
  9 *
 10 * When PCI pass-through is utilized, this necessitates an IOMMU for
 11 * translating bus (DMA) to virtual and vice-versa and also providing a
 12 * mechanism to have contiguous pages for device drivers operations (say DMA
 13 * operations).
 14 *
 15 * Specifically, under Xen the Linux idea of pages is an illusion. It
 16 * assumes that pages start at zero and go up to the available memory. To
 17 * help with that, the Linux Xen MMU provides a lookup mechanism to
 18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
 19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
 20 * memory is not contiguous. Xen hypervisor stitches memory for guests
 21 * from different pools, which means there is no guarantee that PFN==MFN
 22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
 23 * allocated in descending order (high to low), meaning the guest might
 24 * never get any MFN's under the 4GB mark.
 25 */
 26
 27#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 28
 29#include <linux/memblock.h>
 30#include <linux/dma-direct.h>
 31#include <linux/dma-map-ops.h>
 32#include <linux/export.h>
 33#include <xen/swiotlb-xen.h>
 34#include <xen/page.h>
 35#include <xen/xen-ops.h>
 36#include <xen/hvc-console.h>
 37
 38#include <asm/dma-mapping.h>
 39
 40#include <trace/events/swiotlb.h>
 41#define MAX_DMA_BITS 32
 42
 43/*
 44 * Quick lookup value of the bus address of the IOTLB.
 45 */
 46
 47static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
 48{
 49	unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
 50	phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
 51
 52	baddr |= paddr & ~XEN_PAGE_MASK;
 53	return baddr;
 54}
 55
 56static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
 57{
 58	return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
 59}
 60
 61static inline phys_addr_t xen_bus_to_phys(struct device *dev,
 62					  phys_addr_t baddr)
 63{
 64	unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
 65	phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
 66			    (baddr & ~XEN_PAGE_MASK);
 67
 68	return paddr;
 69}
 70
 71static inline phys_addr_t xen_dma_to_phys(struct device *dev,
 72					  dma_addr_t dma_addr)
 73{
 74	return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
 75}
 76
 
 
 
 
 
 
 
 
 77static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
 78{
 79	unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
 80	unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
 81
 82	next_bfn = pfn_to_bfn(xen_pfn);
 83
 84	for (i = 1; i < nr_pages; i++)
 85		if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
 86			return 1;
 87
 88	return 0;
 89}
 90
 91static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
 
 92{
 93	unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
 94	unsigned long xen_pfn = bfn_to_local_pfn(bfn);
 95	phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
 96
 97	/* If the address is outside our domain, it CAN
 98	 * have the same virtual address as another address
 99	 * in our domain. Therefore _only_ check address within our domain.
100	 */
101	if (pfn_valid(PFN_DOWN(paddr)))
102		return is_swiotlb_buffer(dev, paddr);
103	return 0;
104}
105
106#ifdef CONFIG_X86
107int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
108{
109	int rc;
110	unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
111	unsigned int i, dma_bits = order + PAGE_SHIFT;
112	dma_addr_t dma_handle;
113	phys_addr_t p = virt_to_phys(buf);
114
115	BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
116	BUG_ON(nslabs % IO_TLB_SEGSIZE);
117
118	i = 0;
119	do {
120		do {
121			rc = xen_create_contiguous_region(
122				p + (i << IO_TLB_SHIFT), order,
123				dma_bits, &dma_handle);
124		} while (rc && dma_bits++ < MAX_DMA_BITS);
125		if (rc)
126			return rc;
127
128		i += IO_TLB_SEGSIZE;
129	} while (i < nslabs);
130	return 0;
131}
132
133static void *
134xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
135		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
136{
137	u64 dma_mask = dev->coherent_dma_mask;
138	int order = get_order(size);
139	phys_addr_t phys;
140	void *ret;
141
142	/* Align the allocation to the Xen page size */
143	size = 1UL << (order + XEN_PAGE_SHIFT);
144
145	ret = (void *)__get_free_pages(flags, get_order(size));
146	if (!ret)
147		return ret;
148	phys = virt_to_phys(ret);
149
150	*dma_handle = xen_phys_to_dma(dev, phys);
151	if (*dma_handle + size - 1 > dma_mask ||
152	    range_straddles_page_boundary(phys, size)) {
 
153		if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
154				dma_handle) != 0)
155			goto out_free_pages;
156		SetPageXenRemapped(virt_to_page(ret));
157	}
158
159	memset(ret, 0, size);
160	return ret;
161
162out_free_pages:
163	free_pages((unsigned long)ret, get_order(size));
164	return NULL;
165}
166
167static void
168xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
169		dma_addr_t dma_handle, unsigned long attrs)
170{
171	phys_addr_t phys = virt_to_phys(vaddr);
172	int order = get_order(size);
173
174	/* Convert the size to actually allocated. */
175	size = 1UL << (order + XEN_PAGE_SHIFT);
176
177	if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
178	    WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
 
179	    	return;
180
181	if (TestClearPageXenRemapped(virt_to_page(vaddr)))
182		xen_destroy_contiguous_region(phys, order);
183	free_pages((unsigned long)vaddr, get_order(size));
184}
185#endif /* CONFIG_X86 */
186
187/*
188 * Map a single buffer of the indicated size for DMA in streaming mode.  The
189 * physical address to use is returned.
190 *
191 * Once the device is given the dma address, the device owns this memory until
192 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
193 */
194static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
195				unsigned long offset, size_t size,
196				enum dma_data_direction dir,
197				unsigned long attrs)
198{
199	phys_addr_t map, phys = page_to_phys(page) + offset;
200	dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
201
202	BUG_ON(dir == DMA_NONE);
203	/*
204	 * If the address happens to be in the device's DMA window,
205	 * we can safely return the device addr and not worry about bounce
206	 * buffering it.
207	 */
208	if (dma_capable(dev, dev_addr, size, true) &&
209	    !range_straddles_page_boundary(phys, size) &&
210		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
211		!is_swiotlb_force_bounce(dev))
212		goto done;
213
214	/*
215	 * Oh well, have to allocate and map a bounce buffer.
216	 */
217	trace_swiotlb_bounced(dev, dev_addr, size);
218
219	map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs);
220	if (map == (phys_addr_t)DMA_MAPPING_ERROR)
221		return DMA_MAPPING_ERROR;
222
223	phys = map;
224	dev_addr = xen_phys_to_dma(dev, map);
225
226	/*
227	 * Ensure that the address returned is DMA'ble
228	 */
229	if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
230		swiotlb_tbl_unmap_single(dev, map, size, dir,
231				attrs | DMA_ATTR_SKIP_CPU_SYNC);
 
232		return DMA_MAPPING_ERROR;
233	}
234
235done:
236	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
237		if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
238			arch_sync_dma_for_device(phys, size, dir);
239		else
240			xen_dma_sync_for_device(dev, dev_addr, size, dir);
241	}
242	return dev_addr;
243}
244
245/*
246 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
247 * match what was provided for in a previous xen_swiotlb_map_page call.  All
248 * other usages are undefined.
249 *
250 * After this call, reads by the cpu to the buffer are guaranteed to see
251 * whatever the device wrote there.
252 */
253static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
254		size_t size, enum dma_data_direction dir, unsigned long attrs)
255{
256	phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
 
257
258	BUG_ON(dir == DMA_NONE);
259
260	if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
261		if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
262			arch_sync_dma_for_cpu(paddr, size, dir);
263		else
264			xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
265	}
266
267	/* NOTE: We use dev_addr here, not paddr! */
268	if (is_xen_swiotlb_buffer(hwdev, dev_addr))
269		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
 
 
270}
271
272static void
273xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
274		size_t size, enum dma_data_direction dir)
275{
276	phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
 
277
278	if (!dev_is_dma_coherent(dev)) {
279		if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
280			arch_sync_dma_for_cpu(paddr, size, dir);
281		else
282			xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
283	}
284
285	if (is_xen_swiotlb_buffer(dev, dma_addr))
286		swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
 
287}
288
289static void
290xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
291		size_t size, enum dma_data_direction dir)
292{
293	phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
 
294
295	if (is_xen_swiotlb_buffer(dev, dma_addr))
296		swiotlb_sync_single_for_device(dev, paddr, size, dir);
 
297
298	if (!dev_is_dma_coherent(dev)) {
299		if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
300			arch_sync_dma_for_device(paddr, size, dir);
301		else
302			xen_dma_sync_for_device(dev, dma_addr, size, dir);
303	}
304}
305
306/*
307 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
308 * concerning calls here are the same as for swiotlb_unmap_page() above.
309 */
310static void
311xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
312		enum dma_data_direction dir, unsigned long attrs)
313{
314	struct scatterlist *sg;
315	int i;
316
317	BUG_ON(dir == DMA_NONE);
318
319	for_each_sg(sgl, sg, nelems, i)
320		xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
321				dir, attrs);
322
323}
324
325static int
326xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
327		enum dma_data_direction dir, unsigned long attrs)
328{
329	struct scatterlist *sg;
330	int i;
331
332	BUG_ON(dir == DMA_NONE);
333
334	for_each_sg(sgl, sg, nelems, i) {
335		sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
336				sg->offset, sg->length, dir, attrs);
337		if (sg->dma_address == DMA_MAPPING_ERROR)
338			goto out_unmap;
339		sg_dma_len(sg) = sg->length;
340	}
341
342	return nelems;
343out_unmap:
344	xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
345	sg_dma_len(sgl) = 0;
346	return -EIO;
347}
348
349static void
350xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
351			    int nelems, enum dma_data_direction dir)
352{
353	struct scatterlist *sg;
354	int i;
355
356	for_each_sg(sgl, sg, nelems, i) {
357		xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
358				sg->length, dir);
359	}
360}
361
362static void
363xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
364			       int nelems, enum dma_data_direction dir)
365{
366	struct scatterlist *sg;
367	int i;
368
369	for_each_sg(sgl, sg, nelems, i) {
370		xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
371				sg->length, dir);
372	}
373}
374
375/*
376 * Return whether the given device DMA address mask can be supported
377 * properly.  For example, if your device can only drive the low 24-bits
378 * during bus mastering, then you would pass 0x00ffffff as the mask to
379 * this function.
380 */
381static int
382xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
383{
384	return xen_phys_to_dma(hwdev, default_swiotlb_limit()) <= mask;
385}
386
387const struct dma_map_ops xen_swiotlb_dma_ops = {
388#ifdef CONFIG_X86
389	.alloc = xen_swiotlb_alloc_coherent,
390	.free = xen_swiotlb_free_coherent,
391#else
392	.alloc = dma_direct_alloc,
393	.free = dma_direct_free,
394#endif
395	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
396	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
397	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
398	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
399	.map_sg = xen_swiotlb_map_sg,
400	.unmap_sg = xen_swiotlb_unmap_sg,
401	.map_page = xen_swiotlb_map_page,
402	.unmap_page = xen_swiotlb_unmap_page,
403	.dma_supported = xen_swiotlb_dma_supported,
404	.mmap = dma_common_mmap,
405	.get_sgtable = dma_common_get_sgtable,
406	.alloc_pages = dma_common_alloc_pages,
407	.free_pages = dma_common_free_pages,
408	.max_mapping_size = swiotlb_max_mapping_size,
409};