Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1/*
  2 * SWIOTLB-based DMA API implementation
  3 *
  4 * Copyright (C) 2012 ARM Ltd.
  5 * Author: Catalin Marinas <catalin.marinas@arm.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18 */
 19
 20#include <linux/gfp.h>
 21#include <linux/acpi.h>
 22#include <linux/bootmem.h>
 23#include <linux/cache.h>
 24#include <linux/export.h>
 25#include <linux/slab.h>
 26#include <linux/genalloc.h>
 27#include <linux/dma-direct.h>
 28#include <linux/dma-contiguous.h>
 29#include <linux/vmalloc.h>
 30#include <linux/swiotlb.h>
 31#include <linux/pci.h>
 32
 33#include <asm/cacheflush.h>
 34
 35static int swiotlb __ro_after_init;
 36
 37static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
 38				 bool coherent)
 39{
 40	if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
 41		return pgprot_writecombine(prot);
 42	return prot;
 43}
 44
 45static struct gen_pool *atomic_pool __ro_after_init;
 46
 47#define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
 48static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
 49
 50static int __init early_coherent_pool(char *p)
 51{
 52	atomic_pool_size = memparse(p, &p);
 53	return 0;
 54}
 55early_param("coherent_pool", early_coherent_pool);
 56
 57static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
 58{
 59	unsigned long val;
 60	void *ptr = NULL;
 61
 62	if (!atomic_pool) {
 63		WARN(1, "coherent pool not initialised!\n");
 64		return NULL;
 65	}
 66
 67	val = gen_pool_alloc(atomic_pool, size);
 68	if (val) {
 69		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
 70
 71		*ret_page = phys_to_page(phys);
 72		ptr = (void *)val;
 73		memset(ptr, 0, size);
 74	}
 75
 76	return ptr;
 77}
 78
 79static bool __in_atomic_pool(void *start, size_t size)
 80{
 81	return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
 82}
 83
 84static int __free_from_pool(void *start, size_t size)
 85{
 86	if (!__in_atomic_pool(start, size))
 87		return 0;
 88
 89	gen_pool_free(atomic_pool, (unsigned long)start, size);
 90
 91	return 1;
 92}
 93
 94static void *__dma_alloc(struct device *dev, size_t size,
 95			 dma_addr_t *dma_handle, gfp_t flags,
 96			 unsigned long attrs)
 97{
 98	struct page *page;
 99	void *ptr, *coherent_ptr;
100	bool coherent = is_device_dma_coherent(dev);
101	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
102
103	size = PAGE_ALIGN(size);
104
105	if (!coherent && !gfpflags_allow_blocking(flags)) {
106		struct page *page = NULL;
107		void *addr = __alloc_from_pool(size, &page, flags);
108
109		if (addr)
110			*dma_handle = phys_to_dma(dev, page_to_phys(page));
111
112		return addr;
113	}
114
115	ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
116	if (!ptr)
117		goto no_mem;
118
119	/* no need for non-cacheable mapping if coherent */
120	if (coherent)
121		return ptr;
122
123	/* remove any dirty cache lines on the kernel alias */
124	__dma_flush_area(ptr, size);
125
126	/* create a coherent mapping */
127	page = virt_to_page(ptr);
128	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
129						   prot, __builtin_return_address(0));
130	if (!coherent_ptr)
131		goto no_map;
132
133	return coherent_ptr;
134
135no_map:
136	swiotlb_free(dev, size, ptr, *dma_handle, attrs);
137no_mem:
138	return NULL;
139}
140
141static void __dma_free(struct device *dev, size_t size,
142		       void *vaddr, dma_addr_t dma_handle,
143		       unsigned long attrs)
144{
145	void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
146
147	size = PAGE_ALIGN(size);
148
149	if (!is_device_dma_coherent(dev)) {
150		if (__free_from_pool(vaddr, size))
151			return;
152		vunmap(vaddr);
153	}
154	swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
155}
156
157static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
158				     unsigned long offset, size_t size,
159				     enum dma_data_direction dir,
160				     unsigned long attrs)
161{
162	dma_addr_t dev_addr;
163
164	dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
165	if (!is_device_dma_coherent(dev) &&
166	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
167		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
168
169	return dev_addr;
170}
171
172
173static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
174				 size_t size, enum dma_data_direction dir,
175				 unsigned long attrs)
176{
177	if (!is_device_dma_coherent(dev) &&
178	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
179		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
180	swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
181}
182
183static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
184				  int nelems, enum dma_data_direction dir,
185				  unsigned long attrs)
186{
187	struct scatterlist *sg;
188	int i, ret;
189
190	ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
191	if (!is_device_dma_coherent(dev) &&
192	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
193		for_each_sg(sgl, sg, ret, i)
194			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
195				       sg->length, dir);
196
197	return ret;
198}
199
200static void __swiotlb_unmap_sg_attrs(struct device *dev,
201				     struct scatterlist *sgl, int nelems,
202				     enum dma_data_direction dir,
203				     unsigned long attrs)
204{
205	struct scatterlist *sg;
206	int i;
207
208	if (!is_device_dma_coherent(dev) &&
209	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
210		for_each_sg(sgl, sg, nelems, i)
211			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
212					 sg->length, dir);
213	swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
214}
215
216static void __swiotlb_sync_single_for_cpu(struct device *dev,
217					  dma_addr_t dev_addr, size_t size,
218					  enum dma_data_direction dir)
219{
220	if (!is_device_dma_coherent(dev))
221		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
222	swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
223}
224
225static void __swiotlb_sync_single_for_device(struct device *dev,
226					     dma_addr_t dev_addr, size_t size,
227					     enum dma_data_direction dir)
228{
229	swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
230	if (!is_device_dma_coherent(dev))
231		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
232}
233
234static void __swiotlb_sync_sg_for_cpu(struct device *dev,
235				      struct scatterlist *sgl, int nelems,
236				      enum dma_data_direction dir)
237{
238	struct scatterlist *sg;
239	int i;
240
241	if (!is_device_dma_coherent(dev))
242		for_each_sg(sgl, sg, nelems, i)
243			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
244					 sg->length, dir);
245	swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
246}
247
248static void __swiotlb_sync_sg_for_device(struct device *dev,
249					 struct scatterlist *sgl, int nelems,
250					 enum dma_data_direction dir)
251{
252	struct scatterlist *sg;
253	int i;
254
255	swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
256	if (!is_device_dma_coherent(dev))
257		for_each_sg(sgl, sg, nelems, i)
258			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
259				       sg->length, dir);
260}
261
262static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
263			      unsigned long pfn, size_t size)
264{
265	int ret = -ENXIO;
266	unsigned long nr_vma_pages = vma_pages(vma);
267	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
268	unsigned long off = vma->vm_pgoff;
269
270	if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
271		ret = remap_pfn_range(vma, vma->vm_start,
272				      pfn + off,
273				      vma->vm_end - vma->vm_start,
274				      vma->vm_page_prot);
275	}
276
277	return ret;
278}
279
280static int __swiotlb_mmap(struct device *dev,
281			  struct vm_area_struct *vma,
282			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
283			  unsigned long attrs)
284{
285	int ret;
286	unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
287
288	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
289					     is_device_dma_coherent(dev));
290
291	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
292		return ret;
293
294	return __swiotlb_mmap_pfn(vma, pfn, size);
295}
296
297static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
298				      struct page *page, size_t size)
299{
300	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
301
302	if (!ret)
303		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
304
305	return ret;
306}
307
308static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
309				 void *cpu_addr, dma_addr_t handle, size_t size,
310				 unsigned long attrs)
311{
312	struct page *page = phys_to_page(dma_to_phys(dev, handle));
313
314	return __swiotlb_get_sgtable_page(sgt, page, size);
315}
316
317static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
318{
319	if (swiotlb)
320		return swiotlb_dma_supported(hwdev, mask);
321	return 1;
322}
323
324static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
325{
326	if (swiotlb)
327		return swiotlb_dma_mapping_error(hwdev, addr);
328	return 0;
329}
330
331static const struct dma_map_ops arm64_swiotlb_dma_ops = {
332	.alloc = __dma_alloc,
333	.free = __dma_free,
334	.mmap = __swiotlb_mmap,
335	.get_sgtable = __swiotlb_get_sgtable,
336	.map_page = __swiotlb_map_page,
337	.unmap_page = __swiotlb_unmap_page,
338	.map_sg = __swiotlb_map_sg_attrs,
339	.unmap_sg = __swiotlb_unmap_sg_attrs,
340	.sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
341	.sync_single_for_device = __swiotlb_sync_single_for_device,
342	.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
343	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
344	.dma_supported = __swiotlb_dma_supported,
345	.mapping_error = __swiotlb_dma_mapping_error,
346};
347
348static int __init atomic_pool_init(void)
349{
350	pgprot_t prot = __pgprot(PROT_NORMAL_NC);
351	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
352	struct page *page;
353	void *addr;
354	unsigned int pool_size_order = get_order(atomic_pool_size);
355
356	if (dev_get_cma_area(NULL))
357		page = dma_alloc_from_contiguous(NULL, nr_pages,
358						 pool_size_order, GFP_KERNEL);
359	else
360		page = alloc_pages(GFP_DMA32, pool_size_order);
361
362	if (page) {
363		int ret;
364		void *page_addr = page_address(page);
365
366		memset(page_addr, 0, atomic_pool_size);
367		__dma_flush_area(page_addr, atomic_pool_size);
368
369		atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
370		if (!atomic_pool)
371			goto free_page;
372
373		addr = dma_common_contiguous_remap(page, atomic_pool_size,
374					VM_USERMAP, prot, atomic_pool_init);
375
376		if (!addr)
377			goto destroy_genpool;
378
379		ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
380					page_to_phys(page),
381					atomic_pool_size, -1);
382		if (ret)
383			goto remove_mapping;
384
385		gen_pool_set_algo(atomic_pool,
386				  gen_pool_first_fit_order_align,
387				  NULL);
388
389		pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
390			atomic_pool_size / 1024);
391		return 0;
392	}
393	goto out;
394
395remove_mapping:
396	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
397destroy_genpool:
398	gen_pool_destroy(atomic_pool);
399	atomic_pool = NULL;
400free_page:
401	if (!dma_release_from_contiguous(NULL, page, nr_pages))
402		__free_pages(page, pool_size_order);
403out:
404	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
405		atomic_pool_size / 1024);
406	return -ENOMEM;
407}
408
409/********************************************
410 * The following APIs are for dummy DMA ops *
411 ********************************************/
412
413static void *__dummy_alloc(struct device *dev, size_t size,
414			   dma_addr_t *dma_handle, gfp_t flags,
415			   unsigned long attrs)
416{
417	return NULL;
418}
419
420static void __dummy_free(struct device *dev, size_t size,
421			 void *vaddr, dma_addr_t dma_handle,
422			 unsigned long attrs)
423{
424}
425
426static int __dummy_mmap(struct device *dev,
427			struct vm_area_struct *vma,
428			void *cpu_addr, dma_addr_t dma_addr, size_t size,
429			unsigned long attrs)
430{
431	return -ENXIO;
432}
433
434static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
435				   unsigned long offset, size_t size,
436				   enum dma_data_direction dir,
437				   unsigned long attrs)
438{
439	return 0;
440}
441
442static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
443			       size_t size, enum dma_data_direction dir,
444			       unsigned long attrs)
445{
446}
447
448static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
449			  int nelems, enum dma_data_direction dir,
450			  unsigned long attrs)
451{
452	return 0;
453}
454
455static void __dummy_unmap_sg(struct device *dev,
456			     struct scatterlist *sgl, int nelems,
457			     enum dma_data_direction dir,
458			     unsigned long attrs)
459{
460}
461
462static void __dummy_sync_single(struct device *dev,
463				dma_addr_t dev_addr, size_t size,
464				enum dma_data_direction dir)
465{
466}
467
468static void __dummy_sync_sg(struct device *dev,
469			    struct scatterlist *sgl, int nelems,
470			    enum dma_data_direction dir)
471{
472}
473
474static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
475{
476	return 1;
477}
478
479static int __dummy_dma_supported(struct device *hwdev, u64 mask)
480{
481	return 0;
482}
483
484const struct dma_map_ops dummy_dma_ops = {
485	.alloc                  = __dummy_alloc,
486	.free                   = __dummy_free,
487	.mmap                   = __dummy_mmap,
488	.map_page               = __dummy_map_page,
489	.unmap_page             = __dummy_unmap_page,
490	.map_sg                 = __dummy_map_sg,
491	.unmap_sg               = __dummy_unmap_sg,
492	.sync_single_for_cpu    = __dummy_sync_single,
493	.sync_single_for_device = __dummy_sync_single,
494	.sync_sg_for_cpu        = __dummy_sync_sg,
495	.sync_sg_for_device     = __dummy_sync_sg,
496	.mapping_error          = __dummy_mapping_error,
497	.dma_supported          = __dummy_dma_supported,
498};
499EXPORT_SYMBOL(dummy_dma_ops);
500
501static int __init arm64_dma_init(void)
502{
503	if (swiotlb_force == SWIOTLB_FORCE ||
504	    max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
505		swiotlb = 1;
506
507	return atomic_pool_init();
508}
509arch_initcall(arm64_dma_init);
510
511#define PREALLOC_DMA_DEBUG_ENTRIES	4096
512
513static int __init dma_debug_do_init(void)
514{
515	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
516	return 0;
517}
518fs_initcall(dma_debug_do_init);
519
520
521#ifdef CONFIG_IOMMU_DMA
522#include <linux/dma-iommu.h>
523#include <linux/platform_device.h>
524#include <linux/amba/bus.h>
525
526/* Thankfully, all cache ops are by VA so we can ignore phys here */
527static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
528{
529	__dma_flush_area(virt, PAGE_SIZE);
530}
531
532static void *__iommu_alloc_attrs(struct device *dev, size_t size,
533				 dma_addr_t *handle, gfp_t gfp,
534				 unsigned long attrs)
535{
536	bool coherent = is_device_dma_coherent(dev);
537	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
538	size_t iosize = size;
539	void *addr;
540
541	if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
542		return NULL;
543
544	size = PAGE_ALIGN(size);
545
546	/*
547	 * Some drivers rely on this, and we probably don't want the
548	 * possibility of stale kernel data being read by devices anyway.
549	 */
550	gfp |= __GFP_ZERO;
551
552	if (!gfpflags_allow_blocking(gfp)) {
553		struct page *page;
554		/*
555		 * In atomic context we can't remap anything, so we'll only
556		 * get the virtually contiguous buffer we need by way of a
557		 * physically contiguous allocation.
558		 */
559		if (coherent) {
560			page = alloc_pages(gfp, get_order(size));
561			addr = page ? page_address(page) : NULL;
562		} else {
563			addr = __alloc_from_pool(size, &page, gfp);
564		}
565		if (!addr)
566			return NULL;
567
568		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
569		if (iommu_dma_mapping_error(dev, *handle)) {
570			if (coherent)
571				__free_pages(page, get_order(size));
572			else
573				__free_from_pool(addr, size);
574			addr = NULL;
575		}
576	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
577		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
578		struct page *page;
579
580		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
581						 get_order(size), gfp);
582		if (!page)
583			return NULL;
584
585		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
586		if (iommu_dma_mapping_error(dev, *handle)) {
587			dma_release_from_contiguous(dev, page,
588						    size >> PAGE_SHIFT);
589			return NULL;
590		}
591		if (!coherent)
592			__dma_flush_area(page_to_virt(page), iosize);
593
594		addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
595						   prot,
596						   __builtin_return_address(0));
597		if (!addr) {
598			iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
599			dma_release_from_contiguous(dev, page,
600						    size >> PAGE_SHIFT);
601		}
602	} else {
603		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
604		struct page **pages;
605
606		pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
607					handle, flush_page);
608		if (!pages)
609			return NULL;
610
611		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
612					      __builtin_return_address(0));
613		if (!addr)
614			iommu_dma_free(dev, pages, iosize, handle);
615	}
616	return addr;
617}
618
619static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
620			       dma_addr_t handle, unsigned long attrs)
621{
622	size_t iosize = size;
623
624	size = PAGE_ALIGN(size);
625	/*
626	 * @cpu_addr will be one of 4 things depending on how it was allocated:
627	 * - A remapped array of pages for contiguous allocations.
628	 * - A remapped array of pages from iommu_dma_alloc(), for all
629	 *   non-atomic allocations.
630	 * - A non-cacheable alias from the atomic pool, for atomic
631	 *   allocations by non-coherent devices.
632	 * - A normal lowmem address, for atomic allocations by
633	 *   coherent devices.
634	 * Hence how dodgy the below logic looks...
635	 */
636	if (__in_atomic_pool(cpu_addr, size)) {
637		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
638		__free_from_pool(cpu_addr, size);
639	} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
640		struct page *page = vmalloc_to_page(cpu_addr);
641
642		iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
643		dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
644		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
645	} else if (is_vmalloc_addr(cpu_addr)){
646		struct vm_struct *area = find_vm_area(cpu_addr);
647
648		if (WARN_ON(!area || !area->pages))
649			return;
650		iommu_dma_free(dev, area->pages, iosize, &handle);
651		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
652	} else {
653		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
654		__free_pages(virt_to_page(cpu_addr), get_order(size));
655	}
656}
657
658static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
659			      void *cpu_addr, dma_addr_t dma_addr, size_t size,
660			      unsigned long attrs)
661{
662	struct vm_struct *area;
663	int ret;
664
665	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
666					     is_device_dma_coherent(dev));
667
668	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
669		return ret;
670
671	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
672		/*
673		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
674		 * hence in the vmalloc space.
675		 */
676		unsigned long pfn = vmalloc_to_pfn(cpu_addr);
677		return __swiotlb_mmap_pfn(vma, pfn, size);
678	}
679
680	area = find_vm_area(cpu_addr);
681	if (WARN_ON(!area || !area->pages))
682		return -ENXIO;
683
684	return iommu_dma_mmap(area->pages, size, vma);
685}
686
687static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
688			       void *cpu_addr, dma_addr_t dma_addr,
689			       size_t size, unsigned long attrs)
690{
691	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
692	struct vm_struct *area = find_vm_area(cpu_addr);
693
694	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
695		/*
696		 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
697		 * hence in the vmalloc space.
698		 */
699		struct page *page = vmalloc_to_page(cpu_addr);
700		return __swiotlb_get_sgtable_page(sgt, page, size);
701	}
702
703	if (WARN_ON(!area || !area->pages))
704		return -ENXIO;
705
706	return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
707					 GFP_KERNEL);
708}
709
710static void __iommu_sync_single_for_cpu(struct device *dev,
711					dma_addr_t dev_addr, size_t size,
712					enum dma_data_direction dir)
713{
714	phys_addr_t phys;
715
716	if (is_device_dma_coherent(dev))
717		return;
718
719	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
720	__dma_unmap_area(phys_to_virt(phys), size, dir);
721}
722
723static void __iommu_sync_single_for_device(struct device *dev,
724					   dma_addr_t dev_addr, size_t size,
725					   enum dma_data_direction dir)
726{
727	phys_addr_t phys;
728
729	if (is_device_dma_coherent(dev))
730		return;
731
732	phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
733	__dma_map_area(phys_to_virt(phys), size, dir);
734}
735
736static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
737				   unsigned long offset, size_t size,
738				   enum dma_data_direction dir,
739				   unsigned long attrs)
740{
741	bool coherent = is_device_dma_coherent(dev);
742	int prot = dma_info_to_prot(dir, coherent, attrs);
743	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
744
745	if (!iommu_dma_mapping_error(dev, dev_addr) &&
746	    (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
747		__iommu_sync_single_for_device(dev, dev_addr, size, dir);
748
749	return dev_addr;
750}
751
752static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
753			       size_t size, enum dma_data_direction dir,
754			       unsigned long attrs)
755{
756	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
757		__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
758
759	iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
760}
761
762static void __iommu_sync_sg_for_cpu(struct device *dev,
763				    struct scatterlist *sgl, int nelems,
764				    enum dma_data_direction dir)
765{
766	struct scatterlist *sg;
767	int i;
768
769	if (is_device_dma_coherent(dev))
770		return;
771
772	for_each_sg(sgl, sg, nelems, i)
773		__dma_unmap_area(sg_virt(sg), sg->length, dir);
774}
775
776static void __iommu_sync_sg_for_device(struct device *dev,
777				       struct scatterlist *sgl, int nelems,
778				       enum dma_data_direction dir)
779{
780	struct scatterlist *sg;
781	int i;
782
783	if (is_device_dma_coherent(dev))
784		return;
785
786	for_each_sg(sgl, sg, nelems, i)
787		__dma_map_area(sg_virt(sg), sg->length, dir);
788}
789
790static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
791				int nelems, enum dma_data_direction dir,
792				unsigned long attrs)
793{
794	bool coherent = is_device_dma_coherent(dev);
795
796	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
797		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
798
799	return iommu_dma_map_sg(dev, sgl, nelems,
800				dma_info_to_prot(dir, coherent, attrs));
801}
802
803static void __iommu_unmap_sg_attrs(struct device *dev,
804				   struct scatterlist *sgl, int nelems,
805				   enum dma_data_direction dir,
806				   unsigned long attrs)
807{
808	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
809		__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
810
811	iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
812}
813
814static const struct dma_map_ops iommu_dma_ops = {
815	.alloc = __iommu_alloc_attrs,
816	.free = __iommu_free_attrs,
817	.mmap = __iommu_mmap_attrs,
818	.get_sgtable = __iommu_get_sgtable,
819	.map_page = __iommu_map_page,
820	.unmap_page = __iommu_unmap_page,
821	.map_sg = __iommu_map_sg_attrs,
822	.unmap_sg = __iommu_unmap_sg_attrs,
823	.sync_single_for_cpu = __iommu_sync_single_for_cpu,
824	.sync_single_for_device = __iommu_sync_single_for_device,
825	.sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
826	.sync_sg_for_device = __iommu_sync_sg_for_device,
827	.map_resource = iommu_dma_map_resource,
828	.unmap_resource = iommu_dma_unmap_resource,
829	.mapping_error = iommu_dma_mapping_error,
830};
831
832static int __init __iommu_dma_init(void)
833{
834	return iommu_dma_init();
835}
836arch_initcall(__iommu_dma_init);
837
838static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
839				  const struct iommu_ops *ops)
840{
841	struct iommu_domain *domain;
842
843	if (!ops)
844		return;
845
846	/*
847	 * The IOMMU core code allocates the default DMA domain, which the
848	 * underlying IOMMU driver needs to support via the dma-iommu layer.
849	 */
850	domain = iommu_get_domain_for_dev(dev);
851
852	if (!domain)
853		goto out_err;
854
855	if (domain->type == IOMMU_DOMAIN_DMA) {
856		if (iommu_dma_init_domain(domain, dma_base, size, dev))
857			goto out_err;
858
859		dev->dma_ops = &iommu_dma_ops;
860	}
861
862	return;
863
864out_err:
865	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
866		 dev_name(dev));
867}
868
869void arch_teardown_dma_ops(struct device *dev)
870{
871	dev->dma_ops = NULL;
872}
873
874#else
875
876static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
877				  const struct iommu_ops *iommu)
878{ }
879
880#endif  /* CONFIG_IOMMU_DMA */
881
882void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
883			const struct iommu_ops *iommu, bool coherent)
884{
885	if (!dev->dma_ops)
886		dev->dma_ops = &arm64_swiotlb_dma_ops;
887
888	dev->archdata.dma_coherent = coherent;
889	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
890
891#ifdef CONFIG_XEN
892	if (xen_initial_domain()) {
893		dev->archdata.dev_dma_ops = dev->dma_ops;
894		dev->dma_ops = xen_dma_ops;
895	}
896#endif
897}