Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Coherent per-device memory handling.
4 * Borrowed from i386
5 */
6#include <linux/io.h>
7#include <linux/slab.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/dma-direct.h>
11#include <linux/dma-map-ops.h>
12
13struct dma_coherent_mem {
14 void *virt_base;
15 dma_addr_t device_base;
16 unsigned long pfn_base;
17 int size;
18 unsigned long *bitmap;
19 spinlock_t spinlock;
20 bool use_dev_dma_pfn_offset;
21};
22
23static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
24{
25 if (dev && dev->dma_mem)
26 return dev->dma_mem;
27 return NULL;
28}
29
30static inline dma_addr_t dma_get_device_base(struct device *dev,
31 struct dma_coherent_mem * mem)
32{
33 if (mem->use_dev_dma_pfn_offset)
34 return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
35 return mem->device_base;
36}
37
38static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr,
39 dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset)
40{
41 struct dma_coherent_mem *dma_mem;
42 int pages = size >> PAGE_SHIFT;
43 void *mem_base;
44
45 if (!size)
46 return ERR_PTR(-EINVAL);
47
48 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
49 if (!mem_base)
50 return ERR_PTR(-EINVAL);
51
52 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
53 if (!dma_mem)
54 goto out_unmap_membase;
55 dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL);
56 if (!dma_mem->bitmap)
57 goto out_free_dma_mem;
58
59 dma_mem->virt_base = mem_base;
60 dma_mem->device_base = device_addr;
61 dma_mem->pfn_base = PFN_DOWN(phys_addr);
62 dma_mem->size = pages;
63 dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset;
64 spin_lock_init(&dma_mem->spinlock);
65
66 return dma_mem;
67
68out_free_dma_mem:
69 kfree(dma_mem);
70out_unmap_membase:
71 memunmap(mem_base);
72 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n",
73 &phys_addr, size / SZ_1M);
74 return ERR_PTR(-ENOMEM);
75}
76
77static void _dma_release_coherent_memory(struct dma_coherent_mem *mem)
78{
79 if (!mem)
80 return;
81
82 memunmap(mem->virt_base);
83 bitmap_free(mem->bitmap);
84 kfree(mem);
85}
86
87static int dma_assign_coherent_memory(struct device *dev,
88 struct dma_coherent_mem *mem)
89{
90 if (!dev)
91 return -ENODEV;
92
93 if (dev->dma_mem)
94 return -EBUSY;
95
96 dev->dma_mem = mem;
97 return 0;
98}
99
100/*
101 * Declare a region of memory to be handed out by dma_alloc_coherent() when it
102 * is asked for coherent memory for this device. This shall only be used
103 * from platform code, usually based on the device tree description.
104 *
105 * phys_addr is the CPU physical address to which the memory is currently
106 * assigned (this will be ioremapped so the CPU can access the region).
107 *
108 * device_addr is the DMA address the device needs to be programmed with to
109 * actually address this memory (this will be handed out as the dma_addr_t in
110 * dma_alloc_coherent()).
111 *
112 * size is the size of the area (must be a multiple of PAGE_SIZE).
113 *
114 * As a simplification for the platforms, only *one* such region of memory may
115 * be declared per device.
116 */
117int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
118 dma_addr_t device_addr, size_t size)
119{
120 struct dma_coherent_mem *mem;
121 int ret;
122
123 mem = dma_init_coherent_memory(phys_addr, device_addr, size, false);
124 if (IS_ERR(mem))
125 return PTR_ERR(mem);
126
127 ret = dma_assign_coherent_memory(dev, mem);
128 if (ret)
129 _dma_release_coherent_memory(mem);
130 return ret;
131}
132
133void dma_release_coherent_memory(struct device *dev)
134{
135 if (dev)
136 _dma_release_coherent_memory(dev->dma_mem);
137}
138
139static void *__dma_alloc_from_coherent(struct device *dev,
140 struct dma_coherent_mem *mem,
141 ssize_t size, dma_addr_t *dma_handle)
142{
143 int order = get_order(size);
144 unsigned long flags;
145 int pageno;
146 void *ret;
147
148 spin_lock_irqsave(&mem->spinlock, flags);
149
150 if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
151 goto err;
152
153 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
154 if (unlikely(pageno < 0))
155 goto err;
156
157 /*
158 * Memory was found in the coherent area.
159 */
160 *dma_handle = dma_get_device_base(dev, mem) +
161 ((dma_addr_t)pageno << PAGE_SHIFT);
162 ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
163 spin_unlock_irqrestore(&mem->spinlock, flags);
164 memset(ret, 0, size);
165 return ret;
166err:
167 spin_unlock_irqrestore(&mem->spinlock, flags);
168 return NULL;
169}
170
171/**
172 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
173 * @dev: device from which we allocate memory
174 * @size: size of requested memory area
175 * @dma_handle: This will be filled with the correct dma handle
176 * @ret: This pointer will be filled with the virtual address
177 * to allocated area.
178 *
179 * This function should be only called from per-arch dma_alloc_coherent()
180 * to support allocation from per-device coherent memory pools.
181 *
182 * Returns 0 if dma_alloc_coherent should continue with allocating from
183 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
184 */
185int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
186 dma_addr_t *dma_handle, void **ret)
187{
188 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
189
190 if (!mem)
191 return 0;
192
193 *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
194 return 1;
195}
196
197static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
198 int order, void *vaddr)
199{
200 if (mem && vaddr >= mem->virt_base && vaddr <
201 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
202 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
203 unsigned long flags;
204
205 spin_lock_irqsave(&mem->spinlock, flags);
206 bitmap_release_region(mem->bitmap, page, order);
207 spin_unlock_irqrestore(&mem->spinlock, flags);
208 return 1;
209 }
210 return 0;
211}
212
213/**
214 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
215 * @dev: device from which the memory was allocated
216 * @order: the order of pages allocated
217 * @vaddr: virtual address of allocated pages
218 *
219 * This checks whether the memory was allocated from the per-device
220 * coherent memory pool and if so, releases that memory.
221 *
222 * Returns 1 if we correctly released the memory, or 0 if the caller should
223 * proceed with releasing memory from generic pools.
224 */
225int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
226{
227 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
228
229 return __dma_release_from_coherent(mem, order, vaddr);
230}
231
232static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
233 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
234{
235 if (mem && vaddr >= mem->virt_base && vaddr + size <=
236 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
237 unsigned long off = vma->vm_pgoff;
238 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
239 unsigned long user_count = vma_pages(vma);
240 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
241
242 *ret = -ENXIO;
243 if (off < count && user_count <= count - off) {
244 unsigned long pfn = mem->pfn_base + start + off;
245 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
246 user_count << PAGE_SHIFT,
247 vma->vm_page_prot);
248 }
249 return 1;
250 }
251 return 0;
252}
253
254/**
255 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
256 * @dev: device from which the memory was allocated
257 * @vma: vm_area for the userspace memory
258 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
259 * @size: size of the memory buffer allocated
260 * @ret: result from remap_pfn_range()
261 *
262 * This checks whether the memory was allocated from the per-device
263 * coherent memory pool and if so, maps that memory to the provided vma.
264 *
265 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
266 * should return @ret, or 0 if they should proceed with mapping memory from
267 * generic areas.
268 */
269int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
270 void *vaddr, size_t size, int *ret)
271{
272 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
273
274 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
275}
276
277#ifdef CONFIG_DMA_GLOBAL_POOL
278static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
279
280void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
281 dma_addr_t *dma_handle)
282{
283 if (!dma_coherent_default_memory)
284 return NULL;
285
286 return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
287 dma_handle);
288}
289
290int dma_release_from_global_coherent(int order, void *vaddr)
291{
292 if (!dma_coherent_default_memory)
293 return 0;
294
295 return __dma_release_from_coherent(dma_coherent_default_memory, order,
296 vaddr);
297}
298
299int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
300 size_t size, int *ret)
301{
302 if (!dma_coherent_default_memory)
303 return 0;
304
305 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
306 vaddr, size, ret);
307}
308
309int dma_init_global_coherent(phys_addr_t phys_addr, size_t size)
310{
311 struct dma_coherent_mem *mem;
312
313 mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true);
314 if (IS_ERR(mem))
315 return PTR_ERR(mem);
316 dma_coherent_default_memory = mem;
317 pr_info("DMA: default coherent area is set\n");
318 return 0;
319}
320#endif /* CONFIG_DMA_GLOBAL_POOL */
321
322/*
323 * Support for reserved memory regions defined in device tree
324 */
325#ifdef CONFIG_OF_RESERVED_MEM
326#include <linux/of.h>
327#include <linux/of_fdt.h>
328#include <linux/of_reserved_mem.h>
329
330#ifdef CONFIG_DMA_GLOBAL_POOL
331static struct reserved_mem *dma_reserved_default_memory __initdata;
332#endif
333
334static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
335{
336 if (!rmem->priv) {
337 struct dma_coherent_mem *mem;
338
339 mem = dma_init_coherent_memory(rmem->base, rmem->base,
340 rmem->size, true);
341 if (IS_ERR(mem))
342 return PTR_ERR(mem);
343 rmem->priv = mem;
344 }
345 dma_assign_coherent_memory(dev, rmem->priv);
346 return 0;
347}
348
349static void rmem_dma_device_release(struct reserved_mem *rmem,
350 struct device *dev)
351{
352 if (dev)
353 dev->dma_mem = NULL;
354}
355
356static const struct reserved_mem_ops rmem_dma_ops = {
357 .device_init = rmem_dma_device_init,
358 .device_release = rmem_dma_device_release,
359};
360
361static int __init rmem_dma_setup(struct reserved_mem *rmem)
362{
363 unsigned long node = rmem->fdt_node;
364
365 if (of_get_flat_dt_prop(node, "reusable", NULL))
366 return -EINVAL;
367
368#ifdef CONFIG_ARM
369 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
370 pr_err("Reserved memory: regions without no-map are not yet supported\n");
371 return -EINVAL;
372 }
373#endif
374
375#ifdef CONFIG_DMA_GLOBAL_POOL
376 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
377 WARN(dma_reserved_default_memory,
378 "Reserved memory: region for default DMA coherent area is redefined\n");
379 dma_reserved_default_memory = rmem;
380 }
381#endif
382
383 rmem->ops = &rmem_dma_ops;
384 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
385 &rmem->base, (unsigned long)rmem->size / SZ_1M);
386 return 0;
387}
388
389#ifdef CONFIG_DMA_GLOBAL_POOL
390static int __init dma_init_reserved_memory(void)
391{
392 if (!dma_reserved_default_memory)
393 return -ENOMEM;
394 return dma_init_global_coherent(dma_reserved_default_memory->base,
395 dma_reserved_default_memory->size);
396}
397core_initcall(dma_init_reserved_memory);
398#endif /* CONFIG_DMA_GLOBAL_POOL */
399
400RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
401#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Coherent per-device memory handling.
4 * Borrowed from i386
5 */
6#include <linux/io.h>
7#include <linux/slab.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/dma-direct.h>
11#include <linux/dma-map-ops.h>
12
13struct dma_coherent_mem {
14 void *virt_base;
15 dma_addr_t device_base;
16 unsigned long pfn_base;
17 int size;
18 unsigned long *bitmap;
19 spinlock_t spinlock;
20 bool use_dev_dma_pfn_offset;
21};
22
23static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
24
25static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
26{
27 if (dev && dev->dma_mem)
28 return dev->dma_mem;
29 return NULL;
30}
31
32static inline dma_addr_t dma_get_device_base(struct device *dev,
33 struct dma_coherent_mem * mem)
34{
35 if (mem->use_dev_dma_pfn_offset)
36 return phys_to_dma(dev, PFN_PHYS(mem->pfn_base));
37 return mem->device_base;
38}
39
40static int dma_init_coherent_memory(phys_addr_t phys_addr,
41 dma_addr_t device_addr, size_t size,
42 struct dma_coherent_mem **mem)
43{
44 struct dma_coherent_mem *dma_mem = NULL;
45 void *mem_base = NULL;
46 int pages = size >> PAGE_SHIFT;
47 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
48 int ret;
49
50 if (!size) {
51 ret = -EINVAL;
52 goto out;
53 }
54
55 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
56 if (!mem_base) {
57 ret = -EINVAL;
58 goto out;
59 }
60 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
61 if (!dma_mem) {
62 ret = -ENOMEM;
63 goto out;
64 }
65 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
66 if (!dma_mem->bitmap) {
67 ret = -ENOMEM;
68 goto out;
69 }
70
71 dma_mem->virt_base = mem_base;
72 dma_mem->device_base = device_addr;
73 dma_mem->pfn_base = PFN_DOWN(phys_addr);
74 dma_mem->size = pages;
75 spin_lock_init(&dma_mem->spinlock);
76
77 *mem = dma_mem;
78 return 0;
79
80out:
81 kfree(dma_mem);
82 if (mem_base)
83 memunmap(mem_base);
84 return ret;
85}
86
87static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
88{
89 if (!mem)
90 return;
91
92 memunmap(mem->virt_base);
93 kfree(mem->bitmap);
94 kfree(mem);
95}
96
97static int dma_assign_coherent_memory(struct device *dev,
98 struct dma_coherent_mem *mem)
99{
100 if (!dev)
101 return -ENODEV;
102
103 if (dev->dma_mem)
104 return -EBUSY;
105
106 dev->dma_mem = mem;
107 return 0;
108}
109
110/*
111 * Declare a region of memory to be handed out by dma_alloc_coherent() when it
112 * is asked for coherent memory for this device. This shall only be used
113 * from platform code, usually based on the device tree description.
114 *
115 * phys_addr is the CPU physical address to which the memory is currently
116 * assigned (this will be ioremapped so the CPU can access the region).
117 *
118 * device_addr is the DMA address the device needs to be programmed with to
119 * actually address this memory (this will be handed out as the dma_addr_t in
120 * dma_alloc_coherent()).
121 *
122 * size is the size of the area (must be a multiple of PAGE_SIZE).
123 *
124 * As a simplification for the platforms, only *one* such region of memory may
125 * be declared per device.
126 */
127int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
128 dma_addr_t device_addr, size_t size)
129{
130 struct dma_coherent_mem *mem;
131 int ret;
132
133 ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem);
134 if (ret)
135 return ret;
136
137 ret = dma_assign_coherent_memory(dev, mem);
138 if (ret)
139 dma_release_coherent_memory(mem);
140 return ret;
141}
142
143static void *__dma_alloc_from_coherent(struct device *dev,
144 struct dma_coherent_mem *mem,
145 ssize_t size, dma_addr_t *dma_handle)
146{
147 int order = get_order(size);
148 unsigned long flags;
149 int pageno;
150 void *ret;
151
152 spin_lock_irqsave(&mem->spinlock, flags);
153
154 if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
155 goto err;
156
157 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
158 if (unlikely(pageno < 0))
159 goto err;
160
161 /*
162 * Memory was found in the coherent area.
163 */
164 *dma_handle = dma_get_device_base(dev, mem) +
165 ((dma_addr_t)pageno << PAGE_SHIFT);
166 ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
167 spin_unlock_irqrestore(&mem->spinlock, flags);
168 memset(ret, 0, size);
169 return ret;
170err:
171 spin_unlock_irqrestore(&mem->spinlock, flags);
172 return NULL;
173}
174
175/**
176 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
177 * @dev: device from which we allocate memory
178 * @size: size of requested memory area
179 * @dma_handle: This will be filled with the correct dma handle
180 * @ret: This pointer will be filled with the virtual address
181 * to allocated area.
182 *
183 * This function should be only called from per-arch dma_alloc_coherent()
184 * to support allocation from per-device coherent memory pools.
185 *
186 * Returns 0 if dma_alloc_coherent should continue with allocating from
187 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
188 */
189int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
190 dma_addr_t *dma_handle, void **ret)
191{
192 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
193
194 if (!mem)
195 return 0;
196
197 *ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
198 return 1;
199}
200
201void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
202 dma_addr_t *dma_handle)
203{
204 if (!dma_coherent_default_memory)
205 return NULL;
206
207 return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
208 dma_handle);
209}
210
211static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
212 int order, void *vaddr)
213{
214 if (mem && vaddr >= mem->virt_base && vaddr <
215 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
216 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
217 unsigned long flags;
218
219 spin_lock_irqsave(&mem->spinlock, flags);
220 bitmap_release_region(mem->bitmap, page, order);
221 spin_unlock_irqrestore(&mem->spinlock, flags);
222 return 1;
223 }
224 return 0;
225}
226
227/**
228 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
229 * @dev: device from which the memory was allocated
230 * @order: the order of pages allocated
231 * @vaddr: virtual address of allocated pages
232 *
233 * This checks whether the memory was allocated from the per-device
234 * coherent memory pool and if so, releases that memory.
235 *
236 * Returns 1 if we correctly released the memory, or 0 if the caller should
237 * proceed with releasing memory from generic pools.
238 */
239int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
240{
241 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
242
243 return __dma_release_from_coherent(mem, order, vaddr);
244}
245
246int dma_release_from_global_coherent(int order, void *vaddr)
247{
248 if (!dma_coherent_default_memory)
249 return 0;
250
251 return __dma_release_from_coherent(dma_coherent_default_memory, order,
252 vaddr);
253}
254
255static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
256 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
257{
258 if (mem && vaddr >= mem->virt_base && vaddr + size <=
259 (mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
260 unsigned long off = vma->vm_pgoff;
261 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
262 unsigned long user_count = vma_pages(vma);
263 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
264
265 *ret = -ENXIO;
266 if (off < count && user_count <= count - off) {
267 unsigned long pfn = mem->pfn_base + start + off;
268 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
269 user_count << PAGE_SHIFT,
270 vma->vm_page_prot);
271 }
272 return 1;
273 }
274 return 0;
275}
276
277/**
278 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
279 * @dev: device from which the memory was allocated
280 * @vma: vm_area for the userspace memory
281 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
282 * @size: size of the memory buffer allocated
283 * @ret: result from remap_pfn_range()
284 *
285 * This checks whether the memory was allocated from the per-device
286 * coherent memory pool and if so, maps that memory to the provided vma.
287 *
288 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
289 * should return @ret, or 0 if they should proceed with mapping memory from
290 * generic areas.
291 */
292int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
293 void *vaddr, size_t size, int *ret)
294{
295 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
296
297 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
298}
299
300int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
301 size_t size, int *ret)
302{
303 if (!dma_coherent_default_memory)
304 return 0;
305
306 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
307 vaddr, size, ret);
308}
309
310/*
311 * Support for reserved memory regions defined in device tree
312 */
313#ifdef CONFIG_OF_RESERVED_MEM
314#include <linux/of.h>
315#include <linux/of_fdt.h>
316#include <linux/of_reserved_mem.h>
317
318static struct reserved_mem *dma_reserved_default_memory __initdata;
319
320static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
321{
322 struct dma_coherent_mem *mem = rmem->priv;
323 int ret;
324
325 if (!mem) {
326 ret = dma_init_coherent_memory(rmem->base, rmem->base,
327 rmem->size, &mem);
328 if (ret) {
329 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
330 &rmem->base, (unsigned long)rmem->size / SZ_1M);
331 return ret;
332 }
333 }
334 mem->use_dev_dma_pfn_offset = true;
335 rmem->priv = mem;
336 dma_assign_coherent_memory(dev, mem);
337 return 0;
338}
339
340static void rmem_dma_device_release(struct reserved_mem *rmem,
341 struct device *dev)
342{
343 if (dev)
344 dev->dma_mem = NULL;
345}
346
347static const struct reserved_mem_ops rmem_dma_ops = {
348 .device_init = rmem_dma_device_init,
349 .device_release = rmem_dma_device_release,
350};
351
352static int __init rmem_dma_setup(struct reserved_mem *rmem)
353{
354 unsigned long node = rmem->fdt_node;
355
356 if (of_get_flat_dt_prop(node, "reusable", NULL))
357 return -EINVAL;
358
359#ifdef CONFIG_ARM
360 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
361 pr_err("Reserved memory: regions without no-map are not yet supported\n");
362 return -EINVAL;
363 }
364
365 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
366 WARN(dma_reserved_default_memory,
367 "Reserved memory: region for default DMA coherent area is redefined\n");
368 dma_reserved_default_memory = rmem;
369 }
370#endif
371
372 rmem->ops = &rmem_dma_ops;
373 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
374 &rmem->base, (unsigned long)rmem->size / SZ_1M);
375 return 0;
376}
377
378static int __init dma_init_reserved_memory(void)
379{
380 const struct reserved_mem_ops *ops;
381 int ret;
382
383 if (!dma_reserved_default_memory)
384 return -ENOMEM;
385
386 ops = dma_reserved_default_memory->ops;
387
388 /*
389 * We rely on rmem_dma_device_init() does not propagate error of
390 * dma_assign_coherent_memory() for "NULL" device.
391 */
392 ret = ops->device_init(dma_reserved_default_memory, NULL);
393
394 if (!ret) {
395 dma_coherent_default_memory = dma_reserved_default_memory->priv;
396 pr_info("DMA: default coherent area is set\n");
397 }
398
399 return ret;
400}
401
402core_initcall(dma_init_reserved_memory);
403
404RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
405#endif