Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for DMA ops implementations. These generally rely on the fact that
4 * the allocated memory contains normal pages in the direct kernel mapping.
5 */
6#include <linux/dma-map-ops.h>
7
8static struct page *dma_common_vaddr_to_page(void *cpu_addr)
9{
10 if (is_vmalloc_addr(cpu_addr))
11 return vmalloc_to_page(cpu_addr);
12 return virt_to_page(cpu_addr);
13}
14
15/*
16 * Create scatter-list for the already allocated DMA buffer.
17 */
18int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
19 void *cpu_addr, dma_addr_t dma_addr, size_t size,
20 unsigned long attrs)
21{
22 struct page *page = dma_common_vaddr_to_page(cpu_addr);
23 int ret;
24
25 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
26 if (!ret)
27 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
28 return ret;
29}
30
31/*
32 * Create userspace mapping for the DMA-coherent memory.
33 */
34int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
35 void *cpu_addr, dma_addr_t dma_addr, size_t size,
36 unsigned long attrs)
37{
38#ifdef CONFIG_MMU
39 unsigned long user_count = vma_pages(vma);
40 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
41 unsigned long off = vma->vm_pgoff;
42 struct page *page = dma_common_vaddr_to_page(cpu_addr);
43 int ret = -ENXIO;
44
45 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
46
47 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
48 return ret;
49
50 if (off >= count || user_count > count - off)
51 return -ENXIO;
52
53 return remap_pfn_range(vma, vma->vm_start,
54 page_to_pfn(page) + vma->vm_pgoff,
55 user_count << PAGE_SHIFT, vma->vm_page_prot);
56#else
57 return -ENXIO;
58#endif /* CONFIG_MMU */
59}
60
61struct page *dma_common_alloc_pages(struct device *dev, size_t size,
62 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
63{
64 const struct dma_map_ops *ops = get_dma_ops(dev);
65 struct page *page;
66
67 page = dma_alloc_contiguous(dev, size, gfp);
68 if (!page)
69 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size));
70 if (!page)
71 return NULL;
72
73 *dma_handle = ops->map_page(dev, page, 0, size, dir,
74 DMA_ATTR_SKIP_CPU_SYNC);
75 if (*dma_handle == DMA_MAPPING_ERROR) {
76 dma_free_contiguous(dev, page, size);
77 return NULL;
78 }
79
80 memset(page_address(page), 0, size);
81 return page;
82}
83
84void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
85 dma_addr_t dma_handle, enum dma_data_direction dir)
86{
87 const struct dma_map_ops *ops = get_dma_ops(dev);
88
89 if (ops->unmap_page)
90 ops->unmap_page(dev, dma_handle, size, dir,
91 DMA_ATTR_SKIP_CPU_SYNC);
92 dma_free_contiguous(dev, page, size);
93}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for DMA ops implementations. These generally rely on the fact that
4 * the allocated memory contains normal pages in the direct kernel mapping.
5 */
6#include <linux/dma-map-ops.h>
7#include <linux/iommu-dma.h>
8
9static struct page *dma_common_vaddr_to_page(void *cpu_addr)
10{
11 if (is_vmalloc_addr(cpu_addr))
12 return vmalloc_to_page(cpu_addr);
13 return virt_to_page(cpu_addr);
14}
15
16/*
17 * Create scatter-list for the already allocated DMA buffer.
18 */
19int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
20 void *cpu_addr, dma_addr_t dma_addr, size_t size,
21 unsigned long attrs)
22{
23 struct page *page = dma_common_vaddr_to_page(cpu_addr);
24 int ret;
25
26 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
27 if (!ret)
28 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
29 return ret;
30}
31
32/*
33 * Create userspace mapping for the DMA-coherent memory.
34 */
35int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
36 void *cpu_addr, dma_addr_t dma_addr, size_t size,
37 unsigned long attrs)
38{
39#ifdef CONFIG_MMU
40 unsigned long user_count = vma_pages(vma);
41 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
42 unsigned long off = vma->vm_pgoff;
43 struct page *page = dma_common_vaddr_to_page(cpu_addr);
44 int ret = -ENXIO;
45
46 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
47
48 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
49 return ret;
50
51 if (off >= count || user_count > count - off)
52 return -ENXIO;
53
54 return remap_pfn_range(vma, vma->vm_start,
55 page_to_pfn(page) + vma->vm_pgoff,
56 user_count << PAGE_SHIFT, vma->vm_page_prot);
57#else
58 return -ENXIO;
59#endif /* CONFIG_MMU */
60}
61
62struct page *dma_common_alloc_pages(struct device *dev, size_t size,
63 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
64{
65 const struct dma_map_ops *ops = get_dma_ops(dev);
66 struct page *page;
67
68 page = dma_alloc_contiguous(dev, size, gfp);
69 if (!page)
70 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size));
71 if (!page)
72 return NULL;
73
74 if (use_dma_iommu(dev))
75 *dma_handle = iommu_dma_map_page(dev, page, 0, size, dir,
76 DMA_ATTR_SKIP_CPU_SYNC);
77 else
78 *dma_handle = ops->map_page(dev, page, 0, size, dir,
79 DMA_ATTR_SKIP_CPU_SYNC);
80 if (*dma_handle == DMA_MAPPING_ERROR) {
81 dma_free_contiguous(dev, page, size);
82 return NULL;
83 }
84
85 memset(page_address(page), 0, size);
86 return page;
87}
88
89void dma_common_free_pages(struct device *dev, size_t size, struct page *page,
90 dma_addr_t dma_handle, enum dma_data_direction dir)
91{
92 const struct dma_map_ops *ops = get_dma_ops(dev);
93
94 if (use_dma_iommu(dev))
95 iommu_dma_unmap_page(dev, dma_handle, size, dir,
96 DMA_ATTR_SKIP_CPU_SYNC);
97 else if (ops->unmap_page)
98 ops->unmap_page(dev, dma_handle, size, dir,
99 DMA_ATTR_SKIP_CPU_SYNC);
100 dma_free_contiguous(dev, page, size);
101}