Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  Copyright 2010
  4 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  5 *
  6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  7 *
  8 * PV guests under Xen are running in an non-contiguous memory architecture.
  9 *
 10 * When PCI pass-through is utilized, this necessitates an IOMMU for
 11 * translating bus (DMA) to virtual and vice-versa and also providing a
 12 * mechanism to have contiguous pages for device drivers operations (say DMA
 13 * operations).
 14 *
 15 * Specifically, under Xen the Linux idea of pages is an illusion. It
 16 * assumes that pages start at zero and go up to the available memory. To
 17 * help with that, the Linux Xen MMU provides a lookup mechanism to
 18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
 19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
 20 * memory is not contiguous. Xen hypervisor stitches memory for guests
 21 * from different pools, which means there is no guarantee that PFN==MFN
 22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
 23 * allocated in descending order (high to low), meaning the guest might
 24 * never get any MFN's under the 4GB mark.
 25 */
 26
 27#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 28
 29#include <linux/memblock.h>
 30#include <linux/dma-direct.h>
 31#include <linux/dma-map-ops.h>
 32#include <linux/export.h>
 33#include <xen/swiotlb-xen.h>
 34#include <xen/page.h>
 35#include <xen/xen-ops.h>
 36#include <xen/hvc-console.h>
 37
 38#include <asm/dma-mapping.h>
 
 39
 40#include <trace/events/swiotlb.h>
 41#define MAX_DMA_BITS 32
 
 
 
 
 
 42
 
 
 43/*
 44 * Quick lookup value of the bus address of the IOTLB.
 45 */
 46
 47static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
 
 
 
 
 
 
 
 48{
 49	unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
 50	phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
 51
 52	baddr |= paddr & ~XEN_PAGE_MASK;
 53	return baddr;
 54}
 55
 56static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
 57{
 58	return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
 59}
 60
 61static inline phys_addr_t xen_bus_to_phys(struct device *dev,
 62					  phys_addr_t baddr)
 63{
 64	unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
 65	phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
 66			    (baddr & ~XEN_PAGE_MASK);
 
 
 67
 68	return paddr;
 69}
 70
 71static inline phys_addr_t xen_dma_to_phys(struct device *dev,
 72					  dma_addr_t dma_addr)
 73{
 74	return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
 75}
 76
 77static inline bool range_requires_alignment(phys_addr_t p, size_t size)
 78{
 79	phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
 80	phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
 81
 82	return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
 83}
 84
 85static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
 86{
 87	unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
 88	unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
 89
 90	next_bfn = pfn_to_bfn(xen_pfn);
 91
 92	for (i = 1; i < nr_pages; i++)
 93		if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
 94			return 1;
 95
 96	return 0;
 97}
 98
 99static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
100						 dma_addr_t dma_addr)
101{
102	unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
103	unsigned long xen_pfn = bfn_to_local_pfn(bfn);
104	phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
105
106	/* If the address is outside our domain, it CAN
107	 * have the same virtual address as another address
108	 * in our domain. Therefore _only_ check address within our domain.
109	 */
110	if (pfn_valid(PFN_DOWN(paddr)))
111		return swiotlb_find_pool(dev, paddr);
112	return NULL;
 
 
113}
114
115#ifdef CONFIG_X86
116int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
117{
118	int rc;
119	unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
120	unsigned int i, dma_bits = order + PAGE_SHIFT;
121	dma_addr_t dma_handle;
122	phys_addr_t p = virt_to_phys(buf);
123
124	BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
125	BUG_ON(nslabs % IO_TLB_SEGSIZE);
126
127	i = 0;
128	do {
 
 
129		do {
130			rc = xen_create_contiguous_region(
131				p + (i << IO_TLB_SHIFT), order,
 
132				dma_bits, &dma_handle);
133		} while (rc && dma_bits++ < MAX_DMA_BITS);
134		if (rc)
135			return rc;
136
137		i += IO_TLB_SEGSIZE;
138	} while (i < nslabs);
139	return 0;
140}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
142static void *
143xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
144		dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
 
145{
146	u64 dma_mask = dev->coherent_dma_mask;
147	int order = get_order(size);
 
148	phys_addr_t phys;
149	void *ret;
 
 
 
 
 
 
 
 
150
151	/* Align the allocation to the Xen page size */
152	size = ALIGN(size, XEN_PAGE_SIZE);
 
 
 
 
 
 
 
153
154	ret = (void *)__get_free_pages(flags, get_order(size));
155	if (!ret)
156		return ret;
157	phys = virt_to_phys(ret);
158
159	*dma_handle = xen_phys_to_dma(dev, phys);
160	if (*dma_handle + size - 1 > dma_mask ||
161	    range_straddles_page_boundary(phys, size) ||
162	    range_requires_alignment(phys, size)) {
163		if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
164				dma_handle) != 0)
165			goto out_free_pages;
 
 
 
 
 
 
 
 
 
 
 
166		SetPageXenRemapped(virt_to_page(ret));
167	}
168
169	memset(ret, 0, size);
170	return ret;
171
172out_free_pages:
173	free_pages((unsigned long)ret, get_order(size));
174	return NULL;
175}
176
177static void
178xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
179		dma_addr_t dma_handle, unsigned long attrs)
180{
181	phys_addr_t phys = virt_to_phys(vaddr);
182	int order = get_order(size);
 
 
183
184	/* Convert the size to actually allocated. */
185	size = ALIGN(size, XEN_PAGE_SIZE);
186
187	if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
188	    WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
189			 range_requires_alignment(phys, size)))
190	    	return;
191
192	if (TestClearPageXenRemapped(virt_to_page(vaddr)))
 
 
 
 
 
193		xen_destroy_contiguous_region(phys, order);
194	free_pages((unsigned long)vaddr, get_order(size));
 
195}
196#endif /* CONFIG_X86 */
197
198/*
199 * Map a single buffer of the indicated size for DMA in streaming mode.  The
200 * physical address to use is returned.
201 *
202 * Once the device is given the dma address, the device owns this memory until
203 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
204 */
205static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
206				unsigned long offset, size_t size,
207				enum dma_data_direction dir,
208				unsigned long attrs)
209{
210	phys_addr_t map, phys = page_to_phys(page) + offset;
211	dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
212
213	BUG_ON(dir == DMA_NONE);
214	/*
215	 * If the address happens to be in the device's DMA window,
216	 * we can safely return the device addr and not worry about bounce
217	 * buffering it.
218	 */
219	if (dma_capable(dev, dev_addr, size, true) &&
220	    !range_straddles_page_boundary(phys, size) &&
221		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
222		!is_swiotlb_force_bounce(dev))
223		goto done;
224
225	/*
226	 * Oh well, have to allocate and map a bounce buffer.
227	 */
228	trace_swiotlb_bounced(dev, dev_addr, size);
229
230	map = swiotlb_tbl_map_single(dev, phys, size, 0, dir, attrs);
 
231	if (map == (phys_addr_t)DMA_MAPPING_ERROR)
232		return DMA_MAPPING_ERROR;
233
234	phys = map;
235	dev_addr = xen_phys_to_dma(dev, map);
236
237	/*
238	 * Ensure that the address returned is DMA'ble
239	 */
240	if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
241		__swiotlb_tbl_unmap_single(dev, map, size, dir,
242				attrs | DMA_ATTR_SKIP_CPU_SYNC,
243				swiotlb_find_pool(dev, map));
244		return DMA_MAPPING_ERROR;
245	}
246
247done:
248	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
249		if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
250			arch_sync_dma_for_device(phys, size, dir);
251		else
252			xen_dma_sync_for_device(dev, dev_addr, size, dir);
253	}
254	return dev_addr;
255}
256
257/*
258 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
259 * match what was provided for in a previous xen_swiotlb_map_page call.  All
260 * other usages are undefined.
261 *
262 * After this call, reads by the cpu to the buffer are guaranteed to see
263 * whatever the device wrote there.
264 */
265static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
266		size_t size, enum dma_data_direction dir, unsigned long attrs)
267{
268	phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
269	struct io_tlb_pool *pool;
270
271	BUG_ON(dir == DMA_NONE);
272
273	if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
274		if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
275			arch_sync_dma_for_cpu(paddr, size, dir);
276		else
277			xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
278	}
279
280	/* NOTE: We use dev_addr here, not paddr! */
281	pool = xen_swiotlb_find_pool(hwdev, dev_addr);
282	if (pool)
283		__swiotlb_tbl_unmap_single(hwdev, paddr, size, dir,
284					   attrs, pool);
285}
286
287static void
288xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
289		size_t size, enum dma_data_direction dir)
290{
291	phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
292	struct io_tlb_pool *pool;
293
294	if (!dev_is_dma_coherent(dev)) {
295		if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
296			arch_sync_dma_for_cpu(paddr, size, dir);
297		else
298			xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
299	}
300
301	pool = xen_swiotlb_find_pool(dev, dma_addr);
302	if (pool)
303		__swiotlb_sync_single_for_cpu(dev, paddr, size, dir, pool);
304}
305
306static void
307xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
308		size_t size, enum dma_data_direction dir)
309{
310	phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
311	struct io_tlb_pool *pool;
312
313	pool = xen_swiotlb_find_pool(dev, dma_addr);
314	if (pool)
315		__swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
316
317	if (!dev_is_dma_coherent(dev)) {
318		if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
319			arch_sync_dma_for_device(paddr, size, dir);
320		else
321			xen_dma_sync_for_device(dev, dma_addr, size, dir);
322	}
323}
324
325/*
326 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
327 * concerning calls here are the same as for swiotlb_unmap_page() above.
328 */
329static void
330xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
331		enum dma_data_direction dir, unsigned long attrs)
332{
333	struct scatterlist *sg;
334	int i;
335
336	BUG_ON(dir == DMA_NONE);
337
338	for_each_sg(sgl, sg, nelems, i)
339		xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
340				dir, attrs);
341
342}
343
344static int
345xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
346		enum dma_data_direction dir, unsigned long attrs)
347{
348	struct scatterlist *sg;
349	int i;
350
351	BUG_ON(dir == DMA_NONE);
352
353	for_each_sg(sgl, sg, nelems, i) {
354		sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
355				sg->offset, sg->length, dir, attrs);
356		if (sg->dma_address == DMA_MAPPING_ERROR)
357			goto out_unmap;
358		sg_dma_len(sg) = sg->length;
359	}
360
361	return nelems;
362out_unmap:
363	xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
364	sg_dma_len(sgl) = 0;
365	return -EIO;
366}
367
368static void
369xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
370			    int nelems, enum dma_data_direction dir)
371{
372	struct scatterlist *sg;
373	int i;
374
375	for_each_sg(sgl, sg, nelems, i) {
376		xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
377				sg->length, dir);
378	}
379}
380
381static void
382xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
383			       int nelems, enum dma_data_direction dir)
384{
385	struct scatterlist *sg;
386	int i;
387
388	for_each_sg(sgl, sg, nelems, i) {
389		xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
390				sg->length, dir);
391	}
392}
393
394/*
395 * Return whether the given device DMA address mask can be supported
396 * properly.  For example, if your device can only drive the low 24-bits
397 * during bus mastering, then you would pass 0x00ffffff as the mask to
398 * this function.
399 */
400static int
401xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
402{
403	return xen_phys_to_dma(hwdev, default_swiotlb_limit()) <= mask;
404}
405
406const struct dma_map_ops xen_swiotlb_dma_ops = {
407#ifdef CONFIG_X86
408	.alloc = xen_swiotlb_alloc_coherent,
409	.free = xen_swiotlb_free_coherent,
410#else
411	.alloc = dma_direct_alloc,
412	.free = dma_direct_free,
413#endif
414	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
415	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
416	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
417	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
418	.map_sg = xen_swiotlb_map_sg,
419	.unmap_sg = xen_swiotlb_unmap_sg,
420	.map_page = xen_swiotlb_map_page,
421	.unmap_page = xen_swiotlb_unmap_page,
422	.dma_supported = xen_swiotlb_dma_supported,
423	.mmap = dma_common_mmap,
424	.get_sgtable = dma_common_get_sgtable,
425	.alloc_pages_op = dma_common_alloc_pages,
426	.free_pages = dma_common_free_pages,
427	.max_mapping_size = swiotlb_max_mapping_size,
428};
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  Copyright 2010
  4 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  5 *
  6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  7 *
  8 * PV guests under Xen are running in an non-contiguous memory architecture.
  9 *
 10 * When PCI pass-through is utilized, this necessitates an IOMMU for
 11 * translating bus (DMA) to virtual and vice-versa and also providing a
 12 * mechanism to have contiguous pages for device drivers operations (say DMA
 13 * operations).
 14 *
 15 * Specifically, under Xen the Linux idea of pages is an illusion. It
 16 * assumes that pages start at zero and go up to the available memory. To
 17 * help with that, the Linux Xen MMU provides a lookup mechanism to
 18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
 19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
 20 * memory is not contiguous. Xen hypervisor stitches memory for guests
 21 * from different pools, which means there is no guarantee that PFN==MFN
 22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
 23 * allocated in descending order (high to low), meaning the guest might
 24 * never get any MFN's under the 4GB mark.
 25 */
 26
 27#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 28
 29#include <linux/memblock.h>
 30#include <linux/dma-direct.h>
 31#include <linux/dma-noncoherent.h>
 32#include <linux/export.h>
 33#include <xen/swiotlb-xen.h>
 34#include <xen/page.h>
 35#include <xen/xen-ops.h>
 36#include <xen/hvc-console.h>
 37
 38#include <asm/dma-mapping.h>
 39#include <asm/xen/page-coherent.h>
 40
 41#include <trace/events/swiotlb.h>
 42#define MAX_DMA_BITS 32
 43/*
 44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
 45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
 46 * API.
 47 */
 48
 49static char *xen_io_tlb_start, *xen_io_tlb_end;
 50static unsigned long xen_io_tlb_nslabs;
 51/*
 52 * Quick lookup value of the bus address of the IOTLB.
 53 */
 54
 55static u64 start_dma_addr;
 56
 57/*
 58 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
 59 * can be 32bit when dma_addr_t is 64bit leading to a loss in
 60 * information if the shift is done before casting to 64bit.
 61 */
 62static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
 63{
 64	unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
 65	dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
 66
 67	dma |= paddr & ~XEN_PAGE_MASK;
 
 
 68
 69	return dma;
 
 
 70}
 71
 72static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
 
 73{
 74	unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
 75	dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
 76	phys_addr_t paddr = dma;
 77
 78	paddr |= baddr & ~XEN_PAGE_MASK;
 79
 80	return paddr;
 81}
 82
 83static inline dma_addr_t xen_virt_to_bus(void *address)
 
 84{
 85	return xen_phys_to_bus(virt_to_phys(address));
 
 
 
 
 
 
 
 
 86}
 87
 88static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
 89{
 90	unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
 91	unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
 92
 93	next_bfn = pfn_to_bfn(xen_pfn);
 94
 95	for (i = 1; i < nr_pages; i++)
 96		if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
 97			return 1;
 98
 99	return 0;
100}
101
102static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
 
103{
104	unsigned long bfn = XEN_PFN_DOWN(dma_addr);
105	unsigned long xen_pfn = bfn_to_local_pfn(bfn);
106	phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
107
108	/* If the address is outside our domain, it CAN
109	 * have the same virtual address as another address
110	 * in our domain. Therefore _only_ check address within our domain.
111	 */
112	if (pfn_valid(PFN_DOWN(paddr))) {
113		return paddr >= virt_to_phys(xen_io_tlb_start) &&
114		       paddr < virt_to_phys(xen_io_tlb_end);
115	}
116	return 0;
117}
118
119static int
120xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
121{
122	int i, rc;
123	int dma_bits;
 
124	dma_addr_t dma_handle;
125	phys_addr_t p = virt_to_phys(buf);
126
127	dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
 
128
129	i = 0;
130	do {
131		int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
132
133		do {
134			rc = xen_create_contiguous_region(
135				p + (i << IO_TLB_SHIFT),
136				get_order(slabs << IO_TLB_SHIFT),
137				dma_bits, &dma_handle);
138		} while (rc && dma_bits++ < MAX_DMA_BITS);
139		if (rc)
140			return rc;
141
142		i += slabs;
143	} while (i < nslabs);
144	return 0;
145}
146static unsigned long xen_set_nslabs(unsigned long nr_tbl)
147{
148	if (!nr_tbl) {
149		xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
150		xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
151	} else
152		xen_io_tlb_nslabs = nr_tbl;
153
154	return xen_io_tlb_nslabs << IO_TLB_SHIFT;
155}
156
157enum xen_swiotlb_err {
158	XEN_SWIOTLB_UNKNOWN = 0,
159	XEN_SWIOTLB_ENOMEM,
160	XEN_SWIOTLB_EFIXUP
161};
162
163static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
164{
165	switch (err) {
166	case XEN_SWIOTLB_ENOMEM:
167		return "Cannot allocate Xen-SWIOTLB buffer\n";
168	case XEN_SWIOTLB_EFIXUP:
169		return "Failed to get contiguous memory for DMA from Xen!\n"\
170		    "You either: don't have the permissions, do not have"\
171		    " enough free memory under 4GB, or the hypervisor memory"\
172		    " is too fragmented!";
173	default:
174		break;
175	}
176	return "";
177}
178int __ref xen_swiotlb_init(int verbose, bool early)
179{
180	unsigned long bytes, order;
181	int rc = -ENOMEM;
182	enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
183	unsigned int repeat = 3;
184
185	xen_io_tlb_nslabs = swiotlb_nr_tbl();
186retry:
187	bytes = xen_set_nslabs(xen_io_tlb_nslabs);
188	order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
189
190	/*
191	 * IO TLB memory already allocated. Just use it.
192	 */
193	if (io_tlb_start != 0) {
194		xen_io_tlb_start = phys_to_virt(io_tlb_start);
195		goto end;
196	}
197
198	/*
199	 * Get IO TLB memory from any location.
200	 */
201	if (early) {
202		xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
203						  PAGE_SIZE);
204		if (!xen_io_tlb_start)
205			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
206			      __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
207	} else {
208#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
209#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
210		while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
211			xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
212			if (xen_io_tlb_start)
213				break;
214			order--;
215		}
216		if (order != get_order(bytes)) {
217			pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
218				(PAGE_SIZE << order) >> 20);
219			xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
220			bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
221		}
222	}
223	if (!xen_io_tlb_start) {
224		m_ret = XEN_SWIOTLB_ENOMEM;
225		goto error;
226	}
227	/*
228	 * And replace that memory with pages under 4GB.
229	 */
230	rc = xen_swiotlb_fixup(xen_io_tlb_start,
231			       bytes,
232			       xen_io_tlb_nslabs);
233	if (rc) {
234		if (early)
235			memblock_free(__pa(xen_io_tlb_start),
236				      PAGE_ALIGN(bytes));
237		else {
238			free_pages((unsigned long)xen_io_tlb_start, order);
239			xen_io_tlb_start = NULL;
240		}
241		m_ret = XEN_SWIOTLB_EFIXUP;
242		goto error;
243	}
244	start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
245	if (early) {
246		if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
247			 verbose))
248			panic("Cannot allocate SWIOTLB buffer");
249		rc = 0;
250	} else
251		rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
252
253end:
254	xen_io_tlb_end = xen_io_tlb_start + bytes;
255	if (!rc)
256		swiotlb_set_max_segment(PAGE_SIZE);
257
258	return rc;
259error:
260	if (repeat--) {
261		xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
262					(xen_io_tlb_nslabs >> 1));
263		pr_info("Lowering to %luMB\n",
264			(xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
265		goto retry;
266	}
267	pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
268	if (early)
269		panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
270	else
271		free_pages((unsigned long)xen_io_tlb_start, order);
272	return rc;
273}
274
275static void *
276xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
277			   dma_addr_t *dma_handle, gfp_t flags,
278			   unsigned long attrs)
279{
280	void *ret;
281	int order = get_order(size);
282	u64 dma_mask = DMA_BIT_MASK(32);
283	phys_addr_t phys;
284	dma_addr_t dev_addr;
285
286	/*
287	* Ignore region specifiers - the kernel's ideas of
288	* pseudo-phys memory layout has nothing to do with the
289	* machine physical layout.  We can't allocate highmem
290	* because we can't return a pointer to it.
291	*/
292	flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
293
294	/* Convert the size to actually allocated. */
295	size = 1UL << (order + XEN_PAGE_SHIFT);
296
297	/* On ARM this function returns an ioremap'ped virtual address for
298	 * which virt_to_phys doesn't return the corresponding physical
299	 * address. In fact on ARM virt_to_phys only works for kernel direct
300	 * mapped RAM memory. Also see comment below.
301	 */
302	ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
303
 
304	if (!ret)
305		return ret;
 
306
307	if (hwdev && hwdev->coherent_dma_mask)
308		dma_mask = hwdev->coherent_dma_mask;
309
310	/* At this point dma_handle is the physical address, next we are
311	 * going to set it to the machine address.
312	 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
313	 * to *dma_handle. */
314	phys = *dma_handle;
315	dev_addr = xen_phys_to_bus(phys);
316	if (((dev_addr + size - 1 <= dma_mask)) &&
317	    !range_straddles_page_boundary(phys, size))
318		*dma_handle = dev_addr;
319	else {
320		if (xen_create_contiguous_region(phys, order,
321						 fls64(dma_mask), dma_handle) != 0) {
322			xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
323			return NULL;
324		}
325		SetPageXenRemapped(virt_to_page(ret));
326	}
 
327	memset(ret, 0, size);
328	return ret;
 
 
 
 
329}
330
331static void
332xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
333			  dma_addr_t dev_addr, unsigned long attrs)
334{
 
335	int order = get_order(size);
336	phys_addr_t phys;
337	u64 dma_mask = DMA_BIT_MASK(32);
338
339	if (hwdev && hwdev->coherent_dma_mask)
340		dma_mask = hwdev->coherent_dma_mask;
341
342	/* do not use virt_to_phys because on ARM it doesn't return you the
343	 * physical address */
344	phys = xen_bus_to_phys(dev_addr);
 
345
346	/* Convert the size to actually allocated. */
347	size = 1UL << (order + XEN_PAGE_SHIFT);
348
349	if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
350		     range_straddles_page_boundary(phys, size)) &&
351	    TestClearPageXenRemapped(virt_to_page(vaddr)))
352		xen_destroy_contiguous_region(phys, order);
353
354	xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
355}
 
356
357/*
358 * Map a single buffer of the indicated size for DMA in streaming mode.  The
359 * physical address to use is returned.
360 *
361 * Once the device is given the dma address, the device owns this memory until
362 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
363 */
364static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
365				unsigned long offset, size_t size,
366				enum dma_data_direction dir,
367				unsigned long attrs)
368{
369	phys_addr_t map, phys = page_to_phys(page) + offset;
370	dma_addr_t dev_addr = xen_phys_to_bus(phys);
371
372	BUG_ON(dir == DMA_NONE);
373	/*
374	 * If the address happens to be in the device's DMA window,
375	 * we can safely return the device addr and not worry about bounce
376	 * buffering it.
377	 */
378	if (dma_capable(dev, dev_addr, size) &&
379	    !range_straddles_page_boundary(phys, size) &&
380		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
381		swiotlb_force != SWIOTLB_FORCE)
382		goto done;
383
384	/*
385	 * Oh well, have to allocate and map a bounce buffer.
386	 */
387	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
388
389	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
390				     size, size, dir, attrs);
391	if (map == (phys_addr_t)DMA_MAPPING_ERROR)
392		return DMA_MAPPING_ERROR;
393
394	phys = map;
395	dev_addr = xen_phys_to_bus(map);
396
397	/*
398	 * Ensure that the address returned is DMA'ble
399	 */
400	if (unlikely(!dma_capable(dev, dev_addr, size))) {
401		swiotlb_tbl_unmap_single(dev, map, size, size, dir,
402				attrs | DMA_ATTR_SKIP_CPU_SYNC);
 
403		return DMA_MAPPING_ERROR;
404	}
405
406done:
407	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
408		xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
 
 
 
 
409	return dev_addr;
410}
411
412/*
413 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
414 * match what was provided for in a previous xen_swiotlb_map_page call.  All
415 * other usages are undefined.
416 *
417 * After this call, reads by the cpu to the buffer are guaranteed to see
418 * whatever the device wrote there.
419 */
420static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
421		size_t size, enum dma_data_direction dir, unsigned long attrs)
422{
423	phys_addr_t paddr = xen_bus_to_phys(dev_addr);
 
424
425	BUG_ON(dir == DMA_NONE);
426
427	if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
428		xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
 
 
 
 
429
430	/* NOTE: We use dev_addr here, not paddr! */
431	if (is_xen_swiotlb_buffer(dev_addr))
432		swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
 
 
433}
434
435static void
436xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
437		size_t size, enum dma_data_direction dir)
438{
439	phys_addr_t paddr = xen_bus_to_phys(dma_addr);
 
440
441	if (!dev_is_dma_coherent(dev))
442		xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
 
 
 
 
443
444	if (is_xen_swiotlb_buffer(dma_addr))
445		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
 
446}
447
448static void
449xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
450		size_t size, enum dma_data_direction dir)
451{
452	phys_addr_t paddr = xen_bus_to_phys(dma_addr);
 
453
454	if (is_xen_swiotlb_buffer(dma_addr))
455		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
456
457	if (!dev_is_dma_coherent(dev))
458		xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
 
 
 
 
 
459}
460
461/*
462 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
463 * concerning calls here are the same as for swiotlb_unmap_page() above.
464 */
465static void
466xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
467		enum dma_data_direction dir, unsigned long attrs)
468{
469	struct scatterlist *sg;
470	int i;
471
472	BUG_ON(dir == DMA_NONE);
473
474	for_each_sg(sgl, sg, nelems, i)
475		xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
476				dir, attrs);
477
478}
479
480static int
481xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
482		enum dma_data_direction dir, unsigned long attrs)
483{
484	struct scatterlist *sg;
485	int i;
486
487	BUG_ON(dir == DMA_NONE);
488
489	for_each_sg(sgl, sg, nelems, i) {
490		sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
491				sg->offset, sg->length, dir, attrs);
492		if (sg->dma_address == DMA_MAPPING_ERROR)
493			goto out_unmap;
494		sg_dma_len(sg) = sg->length;
495	}
496
497	return nelems;
498out_unmap:
499	xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
500	sg_dma_len(sgl) = 0;
501	return 0;
502}
503
504static void
505xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
506			    int nelems, enum dma_data_direction dir)
507{
508	struct scatterlist *sg;
509	int i;
510
511	for_each_sg(sgl, sg, nelems, i) {
512		xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
513				sg->length, dir);
514	}
515}
516
517static void
518xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
519			       int nelems, enum dma_data_direction dir)
520{
521	struct scatterlist *sg;
522	int i;
523
524	for_each_sg(sgl, sg, nelems, i) {
525		xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
526				sg->length, dir);
527	}
528}
529
530/*
531 * Return whether the given device DMA address mask can be supported
532 * properly.  For example, if your device can only drive the low 24-bits
533 * during bus mastering, then you would pass 0x00ffffff as the mask to
534 * this function.
535 */
536static int
537xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
538{
539	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
540}
541
542const struct dma_map_ops xen_swiotlb_dma_ops = {
 
543	.alloc = xen_swiotlb_alloc_coherent,
544	.free = xen_swiotlb_free_coherent,
 
 
 
 
545	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
546	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
547	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
548	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
549	.map_sg = xen_swiotlb_map_sg,
550	.unmap_sg = xen_swiotlb_unmap_sg,
551	.map_page = xen_swiotlb_map_page,
552	.unmap_page = xen_swiotlb_unmap_page,
553	.dma_supported = xen_swiotlb_dma_supported,
554	.mmap = dma_common_mmap,
555	.get_sgtable = dma_common_get_sgtable,
 
 
 
556};