Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *  Copyright 2010
  3 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  4 *
  5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License v2.0 as published by
  9 * the Free Software Foundation
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * PV guests under Xen are running in an non-contiguous memory architecture.
 17 *
 18 * When PCI pass-through is utilized, this necessitates an IOMMU for
 19 * translating bus (DMA) to virtual and vice-versa and also providing a
 20 * mechanism to have contiguous pages for device drivers operations (say DMA
 21 * operations).
 22 *
 23 * Specifically, under Xen the Linux idea of pages is an illusion. It
 24 * assumes that pages start at zero and go up to the available memory. To
 25 * help with that, the Linux Xen MMU provides a lookup mechanism to
 26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
 27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
 28 * memory is not contiguous. Xen hypervisor stitches memory for guests
 29 * from different pools, which means there is no guarantee that PFN==MFN
 30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
 31 * allocated in descending order (high to low), meaning the guest might
 32 * never get any MFN's under the 4GB mark.
 33 *
 34 */
 35
 
 
 36#include <linux/bootmem.h>
 37#include <linux/dma-mapping.h>
 
 38#include <xen/swiotlb-xen.h>
 39#include <xen/page.h>
 40#include <xen/xen-ops.h>
 
 
 
 
 
 
 41/*
 42 * Used to do a quick range check in swiotlb_tbl_unmap_single and
 43 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
 44 * API.
 45 */
 46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47static char *xen_io_tlb_start, *xen_io_tlb_end;
 48static unsigned long xen_io_tlb_nslabs;
 49/*
 50 * Quick lookup value of the bus address of the IOTLB.
 51 */
 52
 53u64 start_dma_addr;
 54
 55static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
 
 
 
 
 
 56{
 57	return phys_to_machine(XPADDR(paddr)).maddr;
 
 
 
 
 
 58}
 59
 60static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
 61{
 62	return machine_to_phys(XMADDR(baddr)).paddr;
 
 
 
 
 
 
 63}
 64
 65static dma_addr_t xen_virt_to_bus(void *address)
 66{
 67	return xen_phys_to_bus(virt_to_phys(address));
 68}
 69
 70static int check_pages_physically_contiguous(unsigned long pfn,
 71					     unsigned int offset,
 72					     size_t length)
 73{
 74	unsigned long next_mfn;
 75	int i;
 76	int nr_pages;
 77
 78	next_mfn = pfn_to_mfn(pfn);
 79	nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
 80
 81	for (i = 1; i < nr_pages; i++) {
 82		if (pfn_to_mfn(++pfn) != ++next_mfn)
 83			return 0;
 84	}
 85	return 1;
 86}
 87
 88static int range_straddles_page_boundary(phys_addr_t p, size_t size)
 89{
 90	unsigned long pfn = PFN_DOWN(p);
 91	unsigned int offset = p & ~PAGE_MASK;
 92
 93	if (offset + size <= PAGE_SIZE)
 94		return 0;
 95	if (check_pages_physically_contiguous(pfn, offset, size))
 96		return 0;
 97	return 1;
 98}
 99
100static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
101{
102	unsigned long mfn = PFN_DOWN(dma_addr);
103	unsigned long pfn = mfn_to_local_pfn(mfn);
104	phys_addr_t paddr;
105
106	/* If the address is outside our domain, it CAN
107	 * have the same virtual address as another address
108	 * in our domain. Therefore _only_ check address within our domain.
109	 */
110	if (pfn_valid(pfn)) {
111		paddr = PFN_PHYS(pfn);
112		return paddr >= virt_to_phys(xen_io_tlb_start) &&
113		       paddr < virt_to_phys(xen_io_tlb_end);
114	}
115	return 0;
116}
117
118static int max_dma_bits = 32;
119
120static int
121xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
122{
123	int i, rc;
124	int dma_bits;
 
 
125
126	dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
127
128	i = 0;
129	do {
130		int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
131
132		do {
133			rc = xen_create_contiguous_region(
134				(unsigned long)buf + (i << IO_TLB_SHIFT),
135				get_order(slabs << IO_TLB_SHIFT),
136				dma_bits);
137		} while (rc && dma_bits++ < max_dma_bits);
138		if (rc)
139			return rc;
140
141		i += slabs;
142	} while (i < nslabs);
143	return 0;
144}
145
146void __init xen_swiotlb_init(int verbose)
147{
148	unsigned long bytes;
149	int rc;
150	unsigned long nr_tbl;
151
152	nr_tbl = swioltb_nr_tbl();
153	if (nr_tbl)
154		xen_io_tlb_nslabs = nr_tbl;
155	else {
156		xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
157		xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
158	}
 
159
160	bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
 
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162	/*
163	 * Get IO TLB memory from any location.
164	 */
165	xen_io_tlb_start = alloc_bootmem(bytes);
166	if (!xen_io_tlb_start)
167		panic("Cannot allocate SWIOTLB buffer");
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169	xen_io_tlb_end = xen_io_tlb_start + bytes;
170	/*
171	 * And replace that memory with pages under 4GB.
172	 */
173	rc = xen_swiotlb_fixup(xen_io_tlb_start,
174			       bytes,
175			       xen_io_tlb_nslabs);
176	if (rc)
 
 
 
 
 
 
 
177		goto error;
178
179	start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
180	swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
 
 
 
 
 
 
 
 
 
181
182	return;
183error:
184	panic("DMA(%d): Failed to exchange pages allocated for DMA with Xen! "\
185	      "We either don't have the permission or you do not have enough"\
186	      "free memory under 4GB!\n", rc);
 
 
 
 
 
 
 
 
 
 
187}
188
189void *
190xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
191			   dma_addr_t *dma_handle, gfp_t flags)
 
192{
193	void *ret;
194	int order = get_order(size);
195	u64 dma_mask = DMA_BIT_MASK(32);
196	unsigned long vstart;
 
197
198	/*
199	* Ignore region specifiers - the kernel's ideas of
200	* pseudo-phys memory layout has nothing to do with the
201	* machine physical layout.  We can't allocate highmem
202	* because we can't return a pointer to it.
203	*/
204	flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
205
206	if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
207		return ret;
 
 
 
 
208
209	vstart = __get_free_pages(flags, order);
210	ret = (void *)vstart;
211
212	if (hwdev && hwdev->coherent_dma_mask)
213		dma_mask = dma_alloc_coherent_mask(hwdev, flags);
214
215	if (ret) {
216		if (xen_create_contiguous_region(vstart, order,
217						 fls64(dma_mask)) != 0) {
218			free_pages(vstart, order);
 
 
 
 
 
 
 
 
 
219			return NULL;
220		}
221		memset(ret, 0, size);
222		*dma_handle = virt_to_machine(ret).maddr;
223	}
 
224	return ret;
225}
226EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
227
228void
229xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
230			  dma_addr_t dev_addr)
231{
232	int order = get_order(size);
 
 
233
234	if (dma_release_from_coherent(hwdev, order, vaddr))
235		return;
236
237	xen_destroy_contiguous_region((unsigned long)vaddr, order);
238	free_pages((unsigned long)vaddr, order);
 
 
 
 
 
 
 
239}
240EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
241
242
243/*
244 * Map a single buffer of the indicated size for DMA in streaming mode.  The
245 * physical address to use is returned.
246 *
247 * Once the device is given the dma address, the device owns this memory until
248 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
249 */
250dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
251				unsigned long offset, size_t size,
252				enum dma_data_direction dir,
253				struct dma_attrs *attrs)
254{
255	phys_addr_t phys = page_to_phys(page) + offset;
256	dma_addr_t dev_addr = xen_phys_to_bus(phys);
257	void *map;
258
259	BUG_ON(dir == DMA_NONE);
260	/*
261	 * If the address happens to be in the device's DMA window,
262	 * we can safely return the device addr and not worry about bounce
263	 * buffering it.
264	 */
265	if (dma_capable(dev, dev_addr, size) &&
266	    !range_straddles_page_boundary(phys, size) && !swiotlb_force)
 
 
 
 
 
 
267		return dev_addr;
 
268
269	/*
270	 * Oh well, have to allocate and map a bounce buffer.
271	 */
272	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
273	if (!map)
 
 
 
274		return DMA_ERROR_CODE;
275
276	dev_addr = xen_virt_to_bus(map);
 
 
277
278	/*
279	 * Ensure that the address returned is DMA'ble
280	 */
281	if (!dma_capable(dev, dev_addr, size))
282		panic("map_single: bounce buffer is not DMA'ble");
283
284	return dev_addr;
 
 
 
285}
286EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
287
288/*
289 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
290 * match what was provided for in a previous xen_swiotlb_map_page call.  All
291 * other usages are undefined.
292 *
293 * After this call, reads by the cpu to the buffer are guaranteed to see
294 * whatever the device wrote there.
295 */
296static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
297			     size_t size, enum dma_data_direction dir)
 
298{
299	phys_addr_t paddr = xen_bus_to_phys(dev_addr);
300
301	BUG_ON(dir == DMA_NONE);
302
 
 
303	/* NOTE: We use dev_addr here, not paddr! */
304	if (is_xen_swiotlb_buffer(dev_addr)) {
305		swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
306		return;
307	}
308
309	if (dir != DMA_FROM_DEVICE)
310		return;
311
312	/*
313	 * phys_to_virt doesn't work with hihgmem page but we could
314	 * call dma_mark_clean() with hihgmem page here. However, we
315	 * are fine since dma_mark_clean() is null on POWERPC. We can
316	 * make dma_mark_clean() take a physical address if necessary.
317	 */
318	dma_mark_clean(phys_to_virt(paddr), size);
319}
320
321void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
322			    size_t size, enum dma_data_direction dir,
323			    struct dma_attrs *attrs)
324{
325	xen_unmap_single(hwdev, dev_addr, size, dir);
326}
327EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
328
329/*
330 * Make physical memory consistent for a single streaming mode DMA translation
331 * after a transfer.
332 *
333 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
334 * using the cpu, yet do not wish to teardown the dma mapping, you must
335 * call this function before doing so.  At the next point you give the dma
336 * address back to the card, you must first perform a
337 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
338 */
339static void
340xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
341			size_t size, enum dma_data_direction dir,
342			enum dma_sync_target target)
343{
344	phys_addr_t paddr = xen_bus_to_phys(dev_addr);
345
346	BUG_ON(dir == DMA_NONE);
347
 
 
 
348	/* NOTE: We use dev_addr here, not paddr! */
349	if (is_xen_swiotlb_buffer(dev_addr)) {
350		swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
351				       target);
352		return;
353	}
354
355	if (dir != DMA_FROM_DEVICE)
356		return;
357
358	dma_mark_clean(phys_to_virt(paddr), size);
359}
360
361void
362xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
363				size_t size, enum dma_data_direction dir)
364{
365	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
366}
367EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
368
369void
370xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
371				   size_t size, enum dma_data_direction dir)
372{
373	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
374}
375EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
376
377/*
378 * Map a set of buffers described by scatterlist in streaming mode for DMA.
379 * This is the scatter-gather version of the above xen_swiotlb_map_page
380 * interface.  Here the scatter gather list elements are each tagged with the
381 * appropriate dma address and length.  They are obtained via
382 * sg_dma_{address,length}(SG).
383 *
384 * NOTE: An implementation may be able to use a smaller number of
385 *       DMA address/length pairs than there are SG table elements.
386 *       (for example via virtual mapping capabilities)
387 *       The routine returns the number of addr/length pairs actually
388 *       used, at most nents.
389 *
390 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
391 * same here.
392 */
393int
394xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
395			 int nelems, enum dma_data_direction dir,
396			 struct dma_attrs *attrs)
397{
398	struct scatterlist *sg;
399	int i;
400
401	BUG_ON(dir == DMA_NONE);
402
403	for_each_sg(sgl, sg, nelems, i) {
404		phys_addr_t paddr = sg_phys(sg);
405		dma_addr_t dev_addr = xen_phys_to_bus(paddr);
406
407		if (swiotlb_force ||
 
408		    !dma_capable(hwdev, dev_addr, sg->length) ||
409		    range_straddles_page_boundary(paddr, sg->length)) {
410			void *map = swiotlb_tbl_map_single(hwdev,
411							   start_dma_addr,
412							   sg_phys(sg),
413							   sg->length, dir);
414			if (!map) {
 
 
415				/* Don't panic here, we expect map_sg users
416				   to do proper error handling. */
 
417				xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
418							   attrs);
419				sgl[0].dma_length = 0;
420				return DMA_ERROR_CODE;
421			}
422			sg->dma_address = xen_virt_to_bus(map);
423		} else
 
 
 
 
 
424			sg->dma_address = dev_addr;
425		sg->dma_length = sg->length;
 
 
 
 
 
 
 
 
 
 
 
 
426	}
427	return nelems;
428}
429EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
430
431int
432xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
433		   enum dma_data_direction dir)
434{
435	return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
436}
437EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg);
438
439/*
440 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
441 * concerning calls here are the same as for swiotlb_unmap_page() above.
442 */
443void
444xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
445			   int nelems, enum dma_data_direction dir,
446			   struct dma_attrs *attrs)
447{
448	struct scatterlist *sg;
449	int i;
450
451	BUG_ON(dir == DMA_NONE);
452
453	for_each_sg(sgl, sg, nelems, i)
454		xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
455
456}
457EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
458
459void
460xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
461		     enum dma_data_direction dir)
462{
463	return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
464}
465EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg);
466
467/*
468 * Make physical memory consistent for a set of streaming mode DMA translations
469 * after a transfer.
470 *
471 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
472 * and usage.
473 */
474static void
475xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
476		    int nelems, enum dma_data_direction dir,
477		    enum dma_sync_target target)
478{
479	struct scatterlist *sg;
480	int i;
481
482	for_each_sg(sgl, sg, nelems, i)
483		xen_swiotlb_sync_single(hwdev, sg->dma_address,
484					sg->dma_length, dir, target);
485}
486
487void
488xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
489			    int nelems, enum dma_data_direction dir)
490{
491	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
492}
493EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
494
495void
496xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
497			       int nelems, enum dma_data_direction dir)
498{
499	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
500}
501EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
502
503int
504xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
505{
506	return !dma_addr;
507}
508EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
509
510/*
511 * Return whether the given device DMA address mask can be supported
512 * properly.  For example, if your device can only drive the low 24-bits
513 * during bus mastering, then you would pass 0x00ffffff as the mask to
514 * this function.
515 */
516int
517xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
518{
519	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
520}
521EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
v4.10.11
  1/*
  2 *  Copyright 2010
  3 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  4 *
  5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License v2.0 as published by
  9 * the Free Software Foundation
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * PV guests under Xen are running in an non-contiguous memory architecture.
 17 *
 18 * When PCI pass-through is utilized, this necessitates an IOMMU for
 19 * translating bus (DMA) to virtual and vice-versa and also providing a
 20 * mechanism to have contiguous pages for device drivers operations (say DMA
 21 * operations).
 22 *
 23 * Specifically, under Xen the Linux idea of pages is an illusion. It
 24 * assumes that pages start at zero and go up to the available memory. To
 25 * help with that, the Linux Xen MMU provides a lookup mechanism to
 26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
 27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
 28 * memory is not contiguous. Xen hypervisor stitches memory for guests
 29 * from different pools, which means there is no guarantee that PFN==MFN
 30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
 31 * allocated in descending order (high to low), meaning the guest might
 32 * never get any MFN's under the 4GB mark.
 33 *
 34 */
 35
 36#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 37
 38#include <linux/bootmem.h>
 39#include <linux/dma-mapping.h>
 40#include <linux/export.h>
 41#include <xen/swiotlb-xen.h>
 42#include <xen/page.h>
 43#include <xen/xen-ops.h>
 44#include <xen/hvc-console.h>
 45
 46#include <asm/dma-mapping.h>
 47#include <asm/xen/page-coherent.h>
 48
 49#include <trace/events/swiotlb.h>
 50/*
 51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
 52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
 53 * API.
 54 */
 55
 56#ifndef CONFIG_X86
 57static unsigned long dma_alloc_coherent_mask(struct device *dev,
 58					    gfp_t gfp)
 59{
 60	unsigned long dma_mask = 0;
 61
 62	dma_mask = dev->coherent_dma_mask;
 63	if (!dma_mask)
 64		dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
 65
 66	return dma_mask;
 67}
 68#endif
 69
 70static char *xen_io_tlb_start, *xen_io_tlb_end;
 71static unsigned long xen_io_tlb_nslabs;
 72/*
 73 * Quick lookup value of the bus address of the IOTLB.
 74 */
 75
 76static u64 start_dma_addr;
 77
 78/*
 79 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
 80 * can be 32bit when dma_addr_t is 64bit leading to a loss in
 81 * information if the shift is done before casting to 64bit.
 82 */
 83static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
 84{
 85	unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
 86	dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
 87
 88	dma |= paddr & ~XEN_PAGE_MASK;
 89
 90	return dma;
 91}
 92
 93static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
 94{
 95	unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
 96	dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
 97	phys_addr_t paddr = dma;
 98
 99	paddr |= baddr & ~XEN_PAGE_MASK;
100
101	return paddr;
102}
103
104static inline dma_addr_t xen_virt_to_bus(void *address)
105{
106	return xen_phys_to_bus(virt_to_phys(address));
107}
108
109static int check_pages_physically_contiguous(unsigned long xen_pfn,
110					     unsigned int offset,
111					     size_t length)
112{
113	unsigned long next_bfn;
114	int i;
115	int nr_pages;
116
117	next_bfn = pfn_to_bfn(xen_pfn);
118	nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
119
120	for (i = 1; i < nr_pages; i++) {
121		if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
122			return 0;
123	}
124	return 1;
125}
126
127static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
128{
129	unsigned long xen_pfn = XEN_PFN_DOWN(p);
130	unsigned int offset = p & ~XEN_PAGE_MASK;
131
132	if (offset + size <= XEN_PAGE_SIZE)
133		return 0;
134	if (check_pages_physically_contiguous(xen_pfn, offset, size))
135		return 0;
136	return 1;
137}
138
139static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
140{
141	unsigned long bfn = XEN_PFN_DOWN(dma_addr);
142	unsigned long xen_pfn = bfn_to_local_pfn(bfn);
143	phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
144
145	/* If the address is outside our domain, it CAN
146	 * have the same virtual address as another address
147	 * in our domain. Therefore _only_ check address within our domain.
148	 */
149	if (pfn_valid(PFN_DOWN(paddr))) {
 
150		return paddr >= virt_to_phys(xen_io_tlb_start) &&
151		       paddr < virt_to_phys(xen_io_tlb_end);
152	}
153	return 0;
154}
155
156static int max_dma_bits = 32;
157
158static int
159xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
160{
161	int i, rc;
162	int dma_bits;
163	dma_addr_t dma_handle;
164	phys_addr_t p = virt_to_phys(buf);
165
166	dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
167
168	i = 0;
169	do {
170		int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
171
172		do {
173			rc = xen_create_contiguous_region(
174				p + (i << IO_TLB_SHIFT),
175				get_order(slabs << IO_TLB_SHIFT),
176				dma_bits, &dma_handle);
177		} while (rc && dma_bits++ < max_dma_bits);
178		if (rc)
179			return rc;
180
181		i += slabs;
182	} while (i < nslabs);
183	return 0;
184}
185static unsigned long xen_set_nslabs(unsigned long nr_tbl)
 
186{
187	if (!nr_tbl) {
 
 
 
 
 
 
 
188		xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
189		xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
190	} else
191		xen_io_tlb_nslabs = nr_tbl;
192
193	return xen_io_tlb_nslabs << IO_TLB_SHIFT;
194}
195
196enum xen_swiotlb_err {
197	XEN_SWIOTLB_UNKNOWN = 0,
198	XEN_SWIOTLB_ENOMEM,
199	XEN_SWIOTLB_EFIXUP
200};
201
202static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
203{
204	switch (err) {
205	case XEN_SWIOTLB_ENOMEM:
206		return "Cannot allocate Xen-SWIOTLB buffer\n";
207	case XEN_SWIOTLB_EFIXUP:
208		return "Failed to get contiguous memory for DMA from Xen!\n"\
209		    "You either: don't have the permissions, do not have"\
210		    " enough free memory under 4GB, or the hypervisor memory"\
211		    " is too fragmented!";
212	default:
213		break;
214	}
215	return "";
216}
217int __ref xen_swiotlb_init(int verbose, bool early)
218{
219	unsigned long bytes, order;
220	int rc = -ENOMEM;
221	enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
222	unsigned int repeat = 3;
223
224	xen_io_tlb_nslabs = swiotlb_nr_tbl();
225retry:
226	bytes = xen_set_nslabs(xen_io_tlb_nslabs);
227	order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
228	/*
229	 * Get IO TLB memory from any location.
230	 */
231	if (early)
232		xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
233	else {
234#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
235#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
236		while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
237			xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
238			if (xen_io_tlb_start)
239				break;
240			order--;
241		}
242		if (order != get_order(bytes)) {
243			pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
244				(PAGE_SIZE << order) >> 20);
245			xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
246			bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
247		}
248	}
249	if (!xen_io_tlb_start) {
250		m_ret = XEN_SWIOTLB_ENOMEM;
251		goto error;
252	}
253	xen_io_tlb_end = xen_io_tlb_start + bytes;
254	/*
255	 * And replace that memory with pages under 4GB.
256	 */
257	rc = xen_swiotlb_fixup(xen_io_tlb_start,
258			       bytes,
259			       xen_io_tlb_nslabs);
260	if (rc) {
261		if (early)
262			free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
263		else {
264			free_pages((unsigned long)xen_io_tlb_start, order);
265			xen_io_tlb_start = NULL;
266		}
267		m_ret = XEN_SWIOTLB_EFIXUP;
268		goto error;
269	}
270	start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
271	if (early) {
272		if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
273			 verbose))
274			panic("Cannot allocate SWIOTLB buffer");
275		rc = 0;
276	} else
277		rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
278
279	if (!rc)
280		swiotlb_set_max_segment(PAGE_SIZE);
281
282	return rc;
283error:
284	if (repeat--) {
285		xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
286					(xen_io_tlb_nslabs >> 1));
287		pr_info("Lowering to %luMB\n",
288			(xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
289		goto retry;
290	}
291	pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
292	if (early)
293		panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
294	else
295		free_pages((unsigned long)xen_io_tlb_start, order);
296	return rc;
297}
 
298void *
299xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
300			   dma_addr_t *dma_handle, gfp_t flags,
301			   unsigned long attrs)
302{
303	void *ret;
304	int order = get_order(size);
305	u64 dma_mask = DMA_BIT_MASK(32);
306	phys_addr_t phys;
307	dma_addr_t dev_addr;
308
309	/*
310	* Ignore region specifiers - the kernel's ideas of
311	* pseudo-phys memory layout has nothing to do with the
312	* machine physical layout.  We can't allocate highmem
313	* because we can't return a pointer to it.
314	*/
315	flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
316
317	/* On ARM this function returns an ioremap'ped virtual address for
318	 * which virt_to_phys doesn't return the corresponding physical
319	 * address. In fact on ARM virt_to_phys only works for kernel direct
320	 * mapped RAM memory. Also see comment below.
321	 */
322	ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
323
324	if (!ret)
325		return ret;
326
327	if (hwdev && hwdev->coherent_dma_mask)
328		dma_mask = dma_alloc_coherent_mask(hwdev, flags);
329
330	/* At this point dma_handle is the physical address, next we are
331	 * going to set it to the machine address.
332	 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
333	 * to *dma_handle. */
334	phys = *dma_handle;
335	dev_addr = xen_phys_to_bus(phys);
336	if (((dev_addr + size - 1 <= dma_mask)) &&
337	    !range_straddles_page_boundary(phys, size))
338		*dma_handle = dev_addr;
339	else {
340		if (xen_create_contiguous_region(phys, order,
341						 fls64(dma_mask), dma_handle) != 0) {
342			xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
343			return NULL;
344		}
 
 
345	}
346	memset(ret, 0, size);
347	return ret;
348}
349EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
350
351void
352xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
353			  dma_addr_t dev_addr, unsigned long attrs)
354{
355	int order = get_order(size);
356	phys_addr_t phys;
357	u64 dma_mask = DMA_BIT_MASK(32);
358
359	if (hwdev && hwdev->coherent_dma_mask)
360		dma_mask = hwdev->coherent_dma_mask;
361
362	/* do not use virt_to_phys because on ARM it doesn't return you the
363	 * physical address */
364	phys = xen_bus_to_phys(dev_addr);
365
366	if (((dev_addr + size - 1 > dma_mask)) ||
367	    range_straddles_page_boundary(phys, size))
368		xen_destroy_contiguous_region(phys, order);
369
370	xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
371}
372EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
373
374
375/*
376 * Map a single buffer of the indicated size for DMA in streaming mode.  The
377 * physical address to use is returned.
378 *
379 * Once the device is given the dma address, the device owns this memory until
380 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
381 */
382dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
383				unsigned long offset, size_t size,
384				enum dma_data_direction dir,
385				unsigned long attrs)
386{
387	phys_addr_t map, phys = page_to_phys(page) + offset;
388	dma_addr_t dev_addr = xen_phys_to_bus(phys);
 
389
390	BUG_ON(dir == DMA_NONE);
391	/*
392	 * If the address happens to be in the device's DMA window,
393	 * we can safely return the device addr and not worry about bounce
394	 * buffering it.
395	 */
396	if (dma_capable(dev, dev_addr, size) &&
397	    !range_straddles_page_boundary(phys, size) &&
398		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
399		(swiotlb_force != SWIOTLB_FORCE)) {
400		/* we are not interested in the dma_addr returned by
401		 * xen_dma_map_page, only in the potential cache flushes executed
402		 * by the function. */
403		xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
404		return dev_addr;
405	}
406
407	/*
408	 * Oh well, have to allocate and map a bounce buffer.
409	 */
410	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
411
412	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
413				     attrs);
414	if (map == SWIOTLB_MAP_ERROR)
415		return DMA_ERROR_CODE;
416
417	dev_addr = xen_phys_to_bus(map);
418	xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
419					dev_addr, map & ~PAGE_MASK, size, dir, attrs);
420
421	/*
422	 * Ensure that the address returned is DMA'ble
423	 */
424	if (dma_capable(dev, dev_addr, size))
425		return dev_addr;
426
427	attrs |= DMA_ATTR_SKIP_CPU_SYNC;
428	swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
429
430	return DMA_ERROR_CODE;
431}
432EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
433
434/*
435 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
436 * match what was provided for in a previous xen_swiotlb_map_page call.  All
437 * other usages are undefined.
438 *
439 * After this call, reads by the cpu to the buffer are guaranteed to see
440 * whatever the device wrote there.
441 */
442static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
443			     size_t size, enum dma_data_direction dir,
444			     unsigned long attrs)
445{
446	phys_addr_t paddr = xen_bus_to_phys(dev_addr);
447
448	BUG_ON(dir == DMA_NONE);
449
450	xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
451
452	/* NOTE: We use dev_addr here, not paddr! */
453	if (is_xen_swiotlb_buffer(dev_addr)) {
454		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
455		return;
456	}
457
458	if (dir != DMA_FROM_DEVICE)
459		return;
460
461	/*
462	 * phys_to_virt doesn't work with hihgmem page but we could
463	 * call dma_mark_clean() with hihgmem page here. However, we
464	 * are fine since dma_mark_clean() is null on POWERPC. We can
465	 * make dma_mark_clean() take a physical address if necessary.
466	 */
467	dma_mark_clean(phys_to_virt(paddr), size);
468}
469
470void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
471			    size_t size, enum dma_data_direction dir,
472			    unsigned long attrs)
473{
474	xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
475}
476EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
477
478/*
479 * Make physical memory consistent for a single streaming mode DMA translation
480 * after a transfer.
481 *
482 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
483 * using the cpu, yet do not wish to teardown the dma mapping, you must
484 * call this function before doing so.  At the next point you give the dma
485 * address back to the card, you must first perform a
486 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
487 */
488static void
489xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
490			size_t size, enum dma_data_direction dir,
491			enum dma_sync_target target)
492{
493	phys_addr_t paddr = xen_bus_to_phys(dev_addr);
494
495	BUG_ON(dir == DMA_NONE);
496
497	if (target == SYNC_FOR_CPU)
498		xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
499
500	/* NOTE: We use dev_addr here, not paddr! */
501	if (is_xen_swiotlb_buffer(dev_addr))
502		swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
503
504	if (target == SYNC_FOR_DEVICE)
505		xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
506
507	if (dir != DMA_FROM_DEVICE)
508		return;
509
510	dma_mark_clean(phys_to_virt(paddr), size);
511}
512
513void
514xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
515				size_t size, enum dma_data_direction dir)
516{
517	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
518}
519EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
520
521void
522xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
523				   size_t size, enum dma_data_direction dir)
524{
525	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
526}
527EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
528
529/*
530 * Map a set of buffers described by scatterlist in streaming mode for DMA.
531 * This is the scatter-gather version of the above xen_swiotlb_map_page
532 * interface.  Here the scatter gather list elements are each tagged with the
533 * appropriate dma address and length.  They are obtained via
534 * sg_dma_{address,length}(SG).
535 *
536 * NOTE: An implementation may be able to use a smaller number of
537 *       DMA address/length pairs than there are SG table elements.
538 *       (for example via virtual mapping capabilities)
539 *       The routine returns the number of addr/length pairs actually
540 *       used, at most nents.
541 *
542 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
543 * same here.
544 */
545int
546xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
547			 int nelems, enum dma_data_direction dir,
548			 unsigned long attrs)
549{
550	struct scatterlist *sg;
551	int i;
552
553	BUG_ON(dir == DMA_NONE);
554
555	for_each_sg(sgl, sg, nelems, i) {
556		phys_addr_t paddr = sg_phys(sg);
557		dma_addr_t dev_addr = xen_phys_to_bus(paddr);
558
559		if (swiotlb_force == SWIOTLB_FORCE ||
560		    xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
561		    !dma_capable(hwdev, dev_addr, sg->length) ||
562		    range_straddles_page_boundary(paddr, sg->length)) {
563			phys_addr_t map = swiotlb_tbl_map_single(hwdev,
564								 start_dma_addr,
565								 sg_phys(sg),
566								 sg->length,
567								 dir, attrs);
568			if (map == SWIOTLB_MAP_ERROR) {
569				dev_warn(hwdev, "swiotlb buffer is full\n");
570				/* Don't panic here, we expect map_sg users
571				   to do proper error handling. */
572				attrs |= DMA_ATTR_SKIP_CPU_SYNC;
573				xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
574							   attrs);
575				sg_dma_len(sgl) = 0;
576				return 0;
577			}
578			dev_addr = xen_phys_to_bus(map);
579			xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
580						dev_addr,
581						map & ~PAGE_MASK,
582						sg->length,
583						dir,
584						attrs);
585			sg->dma_address = dev_addr;
586		} else {
587			/* we are not interested in the dma_addr returned by
588			 * xen_dma_map_page, only in the potential cache flushes executed
589			 * by the function. */
590			xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
591						dev_addr,
592						paddr & ~PAGE_MASK,
593						sg->length,
594						dir,
595						attrs);
596			sg->dma_address = dev_addr;
597		}
598		sg_dma_len(sg) = sg->length;
599	}
600	return nelems;
601}
602EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
603
 
 
 
 
 
 
 
 
604/*
605 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
606 * concerning calls here are the same as for swiotlb_unmap_page() above.
607 */
608void
609xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
610			   int nelems, enum dma_data_direction dir,
611			   unsigned long attrs)
612{
613	struct scatterlist *sg;
614	int i;
615
616	BUG_ON(dir == DMA_NONE);
617
618	for_each_sg(sgl, sg, nelems, i)
619		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
620
621}
622EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
623
 
 
 
 
 
 
 
 
624/*
625 * Make physical memory consistent for a set of streaming mode DMA translations
626 * after a transfer.
627 *
628 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
629 * and usage.
630 */
631static void
632xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
633		    int nelems, enum dma_data_direction dir,
634		    enum dma_sync_target target)
635{
636	struct scatterlist *sg;
637	int i;
638
639	for_each_sg(sgl, sg, nelems, i)
640		xen_swiotlb_sync_single(hwdev, sg->dma_address,
641					sg_dma_len(sg), dir, target);
642}
643
644void
645xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
646			    int nelems, enum dma_data_direction dir)
647{
648	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
649}
650EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
651
652void
653xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
654			       int nelems, enum dma_data_direction dir)
655{
656	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
657}
658EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
659
 
 
 
 
 
 
 
660/*
661 * Return whether the given device DMA address mask can be supported
662 * properly.  For example, if your device can only drive the low 24-bits
663 * during bus mastering, then you would pass 0x00ffffff as the mask to
664 * this function.
665 */
666int
667xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
668{
669	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
670}
671EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
672
673int
674xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
675{
676	if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
677		return -EIO;
678
679	*dev->dma_mask = dma_mask;
680
681	return 0;
682}
683EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);