Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Coherent per-device memory handling.
  4 * Borrowed from i386
  5 */
  6#include <linux/io.h>
  7#include <linux/slab.h>
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 10#include <linux/dma-mapping.h>
 11
 12struct dma_coherent_mem {
 13	void		*virt_base;
 14	dma_addr_t	device_base;
 15	unsigned long	pfn_base;
 16	int		size;
 17	int		flags;
 18	unsigned long	*bitmap;
 19	spinlock_t	spinlock;
 20	bool		use_dev_dma_pfn_offset;
 21};
 22
 23static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
 24
 25static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
 26{
 27	if (dev && dev->dma_mem)
 28		return dev->dma_mem;
 29	return NULL;
 30}
 31
 32static inline dma_addr_t dma_get_device_base(struct device *dev,
 33					     struct dma_coherent_mem * mem)
 34{
 35	if (mem->use_dev_dma_pfn_offset)
 36		return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
 37	else
 38		return mem->device_base;
 39}
 40
 41static int dma_init_coherent_memory(
 42	phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
 43	struct dma_coherent_mem **mem)
 44{
 45	struct dma_coherent_mem *dma_mem = NULL;
 46	void __iomem *mem_base = NULL;
 47	int pages = size >> PAGE_SHIFT;
 48	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
 49	int ret;
 50
 51	if (!size) {
 52		ret = -EINVAL;
 
 53		goto out;
 54	}
 55
 56	mem_base = memremap(phys_addr, size, MEMREMAP_WC);
 57	if (!mem_base) {
 58		ret = -EINVAL;
 
 
 59		goto out;
 60	}
 61	dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
 62	if (!dma_mem) {
 63		ret = -ENOMEM;
 64		goto out;
 65	}
 66	dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 67	if (!dma_mem->bitmap) {
 68		ret = -ENOMEM;
 69		goto out;
 70	}
 71
 72	dma_mem->virt_base = mem_base;
 73	dma_mem->device_base = device_addr;
 74	dma_mem->pfn_base = PFN_DOWN(phys_addr);
 75	dma_mem->size = pages;
 76	dma_mem->flags = flags;
 77	spin_lock_init(&dma_mem->spinlock);
 78
 79	*mem = dma_mem;
 80	return 0;
 81
 82out:
 83	kfree(dma_mem);
 84	if (mem_base)
 85		memunmap(mem_base);
 86	return ret;
 
 
 
 
 87}
 88
 89static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
 90{
 91	if (!mem)
 92		return;
 93
 94	memunmap(mem->virt_base);
 
 
 
 95	kfree(mem->bitmap);
 96	kfree(mem);
 97}
 98
 99static int dma_assign_coherent_memory(struct device *dev,
100				      struct dma_coherent_mem *mem)
101{
102	if (!dev)
103		return -ENODEV;
104
105	if (dev->dma_mem)
106		return -EBUSY;
107
108	dev->dma_mem = mem;
 
 
109	return 0;
110}
111
112int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
113				dma_addr_t device_addr, size_t size, int flags)
114{
115	struct dma_coherent_mem *mem;
116	int ret;
117
118	ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
119	if (ret)
120		return ret;
121
122	ret = dma_assign_coherent_memory(dev, mem);
123	if (ret)
124		dma_release_coherent_memory(mem);
125	return ret;
 
126}
127EXPORT_SYMBOL(dma_declare_coherent_memory);
128
129void dma_release_declared_memory(struct device *dev)
130{
131	struct dma_coherent_mem *mem = dev->dma_mem;
132
133	if (!mem)
134		return;
135	dma_release_coherent_memory(mem);
136	dev->dma_mem = NULL;
137}
138EXPORT_SYMBOL(dma_release_declared_memory);
139
140void *dma_mark_declared_memory_occupied(struct device *dev,
141					dma_addr_t device_addr, size_t size)
142{
143	struct dma_coherent_mem *mem = dev->dma_mem;
144	unsigned long flags;
145	int pos, err;
146
147	size += device_addr & ~PAGE_MASK;
148
149	if (!mem)
150		return ERR_PTR(-EINVAL);
151
152	spin_lock_irqsave(&mem->spinlock, flags);
153	pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
154	err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
155	spin_unlock_irqrestore(&mem->spinlock, flags);
156
157	if (err != 0)
158		return ERR_PTR(err);
159	return mem->virt_base + (pos << PAGE_SHIFT);
160}
161EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
162
163static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
164		ssize_t size, dma_addr_t *dma_handle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165{
 
166	int order = get_order(size);
167	unsigned long flags;
168	int pageno;
169	void *ret;
170
 
 
 
 
 
 
 
171	spin_lock_irqsave(&mem->spinlock, flags);
172
173	if (unlikely(size > (mem->size << PAGE_SHIFT)))
174		goto err;
175
176	pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
177	if (unlikely(pageno < 0))
178		goto err;
179
180	/*
181	 * Memory was found in the coherent area.
182	 */
183	*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
184	ret = mem->virt_base + (pageno << PAGE_SHIFT);
185	spin_unlock_irqrestore(&mem->spinlock, flags);
186	memset(ret, 0, size);
187	return ret;
188err:
189	spin_unlock_irqrestore(&mem->spinlock, flags);
190	return NULL;
191}
192
193/**
194 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
195 * @dev:	device from which we allocate memory
196 * @size:	size of requested memory area
197 * @dma_handle:	This will be filled with the correct dma handle
198 * @ret:	This pointer will be filled with the virtual address
199 *		to allocated area.
200 *
201 * This function should be only called from per-arch dma_alloc_coherent()
202 * to support allocation from per-device coherent memory pools.
203 *
204 * Returns 0 if dma_alloc_coherent should continue with allocating from
205 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
206 */
207int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
208		dma_addr_t *dma_handle, void **ret)
209{
210	struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
211
212	if (!mem)
213		return 0;
214
215	*ret = __dma_alloc_from_coherent(mem, size, dma_handle);
216	if (*ret)
217		return 1;
218
 
 
219	/*
220	 * In the case where the allocation can not be satisfied from the
221	 * per-device area, try to fall back to generic memory if the
222	 * constraints allow it.
223	 */
224	return mem->flags & DMA_MEMORY_EXCLUSIVE;
225}
226EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
227
228void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
 
 
 
 
 
 
 
 
 
 
 
 
 
229{
230	if (!dma_coherent_default_memory)
231		return NULL;
232
233	return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
234			dma_handle);
235}
236
237static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
238				       int order, void *vaddr)
239{
240	if (mem && vaddr >= mem->virt_base && vaddr <
241		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
242		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
243		unsigned long flags;
244
245		spin_lock_irqsave(&mem->spinlock, flags);
246		bitmap_release_region(mem->bitmap, page, order);
247		spin_unlock_irqrestore(&mem->spinlock, flags);
248		return 1;
249	}
250	return 0;
251}
 
252
253/**
254 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
 
255 * @dev:	device from which the memory was allocated
256 * @order:	the order of pages allocated
257 * @vaddr:	virtual address of allocated pages
 
 
258 *
259 * This checks whether the memory was allocated from the per-device
260 * coherent memory pool and if so, releases that memory.
261 *
262 * Returns 1 if we correctly released the memory, or 0 if the caller should
263 * proceed with releasing memory from generic pools.
264 */
265int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
 
266{
267	struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
268
269	return __dma_release_from_coherent(mem, order, vaddr);
270}
271EXPORT_SYMBOL(dma_release_from_dev_coherent);
272
273int dma_release_from_global_coherent(int order, void *vaddr)
274{
275	if (!dma_coherent_default_memory)
276		return 0;
277
278	return __dma_release_from_coherent(dma_coherent_default_memory, order,
279			vaddr);
280}
281
282static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
283		struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
284{
285	if (mem && vaddr >= mem->virt_base && vaddr + size <=
286		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
287		unsigned long off = vma->vm_pgoff;
288		int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
289		int user_count = vma_pages(vma);
290		int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
291
292		*ret = -ENXIO;
293		if (off < count && user_count <= count - off) {
294			unsigned long pfn = mem->pfn_base + start + off;
295			*ret = remap_pfn_range(vma, vma->vm_start, pfn,
296					       user_count << PAGE_SHIFT,
297					       vma->vm_page_prot);
298		}
299		return 1;
300	}
301	return 0;
302}
303
304/**
305 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
306 * @dev:	device from which the memory was allocated
307 * @vma:	vm_area for the userspace memory
308 * @vaddr:	cpu address returned by dma_alloc_from_dev_coherent
309 * @size:	size of the memory buffer allocated
310 * @ret:	result from remap_pfn_range()
311 *
312 * This checks whether the memory was allocated from the per-device
313 * coherent memory pool and if so, maps that memory to the provided vma.
314 *
315 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
316 * should return @ret, or 0 if they should proceed with mapping memory from
317 * generic areas.
318 */
319int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
320			   void *vaddr, size_t size, int *ret)
321{
322	struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
323
324	return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
325}
326EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
327
328int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
329				   size_t size, int *ret)
330{
331	if (!dma_coherent_default_memory)
332		return 0;
333
334	return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
335					vaddr, size, ret);
336}
337
338/*
339 * Support for reserved memory regions defined in device tree
340 */
341#ifdef CONFIG_OF_RESERVED_MEM
342#include <linux/of.h>
343#include <linux/of_fdt.h>
344#include <linux/of_reserved_mem.h>
345
346static struct reserved_mem *dma_reserved_default_memory __initdata;
347
348static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
349{
350	struct dma_coherent_mem *mem = rmem->priv;
351	int ret;
352
353	if (!mem) {
354		ret = dma_init_coherent_memory(rmem->base, rmem->base,
355					       rmem->size,
356					       DMA_MEMORY_EXCLUSIVE, &mem);
357		if (ret) {
358			pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
359				&rmem->base, (unsigned long)rmem->size / SZ_1M);
360			return ret;
361		}
362	}
363	mem->use_dev_dma_pfn_offset = true;
364	rmem->priv = mem;
365	dma_assign_coherent_memory(dev, mem);
366	return 0;
367}
368
369static void rmem_dma_device_release(struct reserved_mem *rmem,
370				    struct device *dev)
371{
372	if (dev)
373		dev->dma_mem = NULL;
374}
375
376static const struct reserved_mem_ops rmem_dma_ops = {
377	.device_init	= rmem_dma_device_init,
378	.device_release	= rmem_dma_device_release,
379};
380
381static int __init rmem_dma_setup(struct reserved_mem *rmem)
382{
383	unsigned long node = rmem->fdt_node;
384
385	if (of_get_flat_dt_prop(node, "reusable", NULL))
386		return -EINVAL;
387
388#ifdef CONFIG_ARM
389	if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
390		pr_err("Reserved memory: regions without no-map are not yet supported\n");
391		return -EINVAL;
392	}
393
394	if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
395		WARN(dma_reserved_default_memory,
396		     "Reserved memory: region for default DMA coherent area is redefined\n");
397		dma_reserved_default_memory = rmem;
398	}
399#endif
400
401	rmem->ops = &rmem_dma_ops;
402	pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
403		&rmem->base, (unsigned long)rmem->size / SZ_1M);
404	return 0;
405}
406
407static int __init dma_init_reserved_memory(void)
408{
409	const struct reserved_mem_ops *ops;
410	int ret;
411
412	if (!dma_reserved_default_memory)
413		return -ENOMEM;
414
415	ops = dma_reserved_default_memory->ops;
416
417	/*
418	 * We rely on rmem_dma_device_init() does not propagate error of
419	 * dma_assign_coherent_memory() for "NULL" device.
420	 */
421	ret = ops->device_init(dma_reserved_default_memory, NULL);
422
423	if (!ret) {
424		dma_coherent_default_memory = dma_reserved_default_memory->priv;
425		pr_info("DMA: default coherent area is set\n");
426	}
427
428	return ret;
429}
430
431core_initcall(dma_init_reserved_memory);
432
433RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
434#endif
v4.6
 
  1/*
  2 * Coherent per-device memory handling.
  3 * Borrowed from i386
  4 */
  5#include <linux/io.h>
  6#include <linux/slab.h>
  7#include <linux/kernel.h>
  8#include <linux/module.h>
  9#include <linux/dma-mapping.h>
 10
 11struct dma_coherent_mem {
 12	void		*virt_base;
 13	dma_addr_t	device_base;
 14	unsigned long	pfn_base;
 15	int		size;
 16	int		flags;
 17	unsigned long	*bitmap;
 18	spinlock_t	spinlock;
 
 19};
 20
 21static bool dma_init_coherent_memory(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22	phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
 23	struct dma_coherent_mem **mem)
 24{
 25	struct dma_coherent_mem *dma_mem = NULL;
 26	void __iomem *mem_base = NULL;
 27	int pages = size >> PAGE_SHIFT;
 28	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
 
 29
 30	if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
 31		goto out;
 32	if (!size)
 33		goto out;
 
 34
 35	if (flags & DMA_MEMORY_MAP)
 36		mem_base = memremap(phys_addr, size, MEMREMAP_WC);
 37	else
 38		mem_base = ioremap(phys_addr, size);
 39	if (!mem_base)
 40		goto out;
 41
 42	dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
 43	if (!dma_mem)
 
 44		goto out;
 
 45	dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 46	if (!dma_mem->bitmap)
 
 47		goto out;
 
 48
 49	dma_mem->virt_base = mem_base;
 50	dma_mem->device_base = device_addr;
 51	dma_mem->pfn_base = PFN_DOWN(phys_addr);
 52	dma_mem->size = pages;
 53	dma_mem->flags = flags;
 54	spin_lock_init(&dma_mem->spinlock);
 55
 56	*mem = dma_mem;
 57	return true;
 58
 59out:
 60	kfree(dma_mem);
 61	if (mem_base) {
 62		if (flags & DMA_MEMORY_MAP)
 63			memunmap(mem_base);
 64		else
 65			iounmap(mem_base);
 66	}
 67	return false;
 68}
 69
 70static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
 71{
 72	if (!mem)
 73		return;
 74
 75	if (mem->flags & DMA_MEMORY_MAP)
 76		memunmap(mem->virt_base);
 77	else
 78		iounmap(mem->virt_base);
 79	kfree(mem->bitmap);
 80	kfree(mem);
 81}
 82
 83static int dma_assign_coherent_memory(struct device *dev,
 84				      struct dma_coherent_mem *mem)
 85{
 
 
 
 86	if (dev->dma_mem)
 87		return -EBUSY;
 88
 89	dev->dma_mem = mem;
 90	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
 91
 92	return 0;
 93}
 94
 95int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
 96				dma_addr_t device_addr, size_t size, int flags)
 97{
 98	struct dma_coherent_mem *mem;
 
 99
100	if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
101				      &mem))
102		return 0;
103
104	if (dma_assign_coherent_memory(dev, mem) == 0)
105		return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
106
107	dma_release_coherent_memory(mem);
108	return 0;
109}
110EXPORT_SYMBOL(dma_declare_coherent_memory);
111
112void dma_release_declared_memory(struct device *dev)
113{
114	struct dma_coherent_mem *mem = dev->dma_mem;
115
116	if (!mem)
117		return;
118	dma_release_coherent_memory(mem);
119	dev->dma_mem = NULL;
120}
121EXPORT_SYMBOL(dma_release_declared_memory);
122
123void *dma_mark_declared_memory_occupied(struct device *dev,
124					dma_addr_t device_addr, size_t size)
125{
126	struct dma_coherent_mem *mem = dev->dma_mem;
127	unsigned long flags;
128	int pos, err;
129
130	size += device_addr & ~PAGE_MASK;
131
132	if (!mem)
133		return ERR_PTR(-EINVAL);
134
135	spin_lock_irqsave(&mem->spinlock, flags);
136	pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
137	err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
138	spin_unlock_irqrestore(&mem->spinlock, flags);
139
140	if (err != 0)
141		return ERR_PTR(err);
142	return mem->virt_base + (pos << PAGE_SHIFT);
143}
144EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
145
146/**
147 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
148 *
149 * @dev:	device from which we allocate memory
150 * @size:	size of requested memory area
151 * @dma_handle:	This will be filled with the correct dma handle
152 * @ret:	This pointer will be filled with the virtual address
153 *		to allocated area.
154 *
155 * This function should be only called from per-arch dma_alloc_coherent()
156 * to support allocation from per-device coherent memory pools.
157 *
158 * Returns 0 if dma_alloc_coherent should continue with allocating from
159 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
160 */
161int dma_alloc_from_coherent(struct device *dev, ssize_t size,
162				       dma_addr_t *dma_handle, void **ret)
163{
164	struct dma_coherent_mem *mem;
165	int order = get_order(size);
166	unsigned long flags;
167	int pageno;
 
168
169	if (!dev)
170		return 0;
171	mem = dev->dma_mem;
172	if (!mem)
173		return 0;
174
175	*ret = NULL;
176	spin_lock_irqsave(&mem->spinlock, flags);
177
178	if (unlikely(size > (mem->size << PAGE_SHIFT)))
179		goto err;
180
181	pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
182	if (unlikely(pageno < 0))
183		goto err;
184
185	/*
186	 * Memory was found in the per-device area.
187	 */
188	*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
189	*ret = mem->virt_base + (pageno << PAGE_SHIFT);
190	if (mem->flags & DMA_MEMORY_MAP)
191		memset(*ret, 0, size);
192	else
193		memset_io(*ret, 0, size);
194	spin_unlock_irqrestore(&mem->spinlock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
196	return 1;
 
 
 
 
 
197
198err:
199	spin_unlock_irqrestore(&mem->spinlock, flags);
200	/*
201	 * In the case where the allocation can not be satisfied from the
202	 * per-device area, try to fall back to generic memory if the
203	 * constraints allow it.
204	 */
205	return mem->flags & DMA_MEMORY_EXCLUSIVE;
206}
207EXPORT_SYMBOL(dma_alloc_from_coherent);
208
209/**
210 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
211 * @dev:	device from which the memory was allocated
212 * @order:	the order of pages allocated
213 * @vaddr:	virtual address of allocated pages
214 *
215 * This checks whether the memory was allocated from the per-device
216 * coherent memory pool and if so, releases that memory.
217 *
218 * Returns 1 if we correctly released the memory, or 0 if
219 * dma_release_coherent() should proceed with releasing memory from
220 * generic pools.
221 */
222int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
223{
224	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
 
 
 
 
 
225
 
 
 
226	if (mem && vaddr >= mem->virt_base && vaddr <
227		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
228		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
229		unsigned long flags;
230
231		spin_lock_irqsave(&mem->spinlock, flags);
232		bitmap_release_region(mem->bitmap, page, order);
233		spin_unlock_irqrestore(&mem->spinlock, flags);
234		return 1;
235	}
236	return 0;
237}
238EXPORT_SYMBOL(dma_release_from_coherent);
239
240/**
241 * dma_mmap_from_coherent() - try to mmap the memory allocated from
242 * per-device coherent memory pool to userspace
243 * @dev:	device from which the memory was allocated
244 * @vma:	vm_area for the userspace memory
245 * @vaddr:	cpu address returned by dma_alloc_from_coherent
246 * @size:	size of the memory buffer allocated by dma_alloc_from_coherent
247 * @ret:	result from remap_pfn_range()
248 *
249 * This checks whether the memory was allocated from the per-device
250 * coherent memory pool and if so, maps that memory to the provided vma.
251 *
252 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
253 * proceed with mapping memory from generic pools.
254 */
255int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
256			   void *vaddr, size_t size, int *ret)
257{
258	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260	if (mem && vaddr >= mem->virt_base && vaddr + size <=
261		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
262		unsigned long off = vma->vm_pgoff;
263		int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
264		int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
265		int count = size >> PAGE_SHIFT;
266
267		*ret = -ENXIO;
268		if (off < count && user_count <= count - off) {
269			unsigned long pfn = mem->pfn_base + start + off;
270			*ret = remap_pfn_range(vma, vma->vm_start, pfn,
271					       user_count << PAGE_SHIFT,
272					       vma->vm_page_prot);
273		}
274		return 1;
275	}
276	return 0;
277}
278EXPORT_SYMBOL(dma_mmap_from_coherent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
280/*
281 * Support for reserved memory regions defined in device tree
282 */
283#ifdef CONFIG_OF_RESERVED_MEM
284#include <linux/of.h>
285#include <linux/of_fdt.h>
286#include <linux/of_reserved_mem.h>
287
 
 
288static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
289{
290	struct dma_coherent_mem *mem = rmem->priv;
 
291
292	if (!mem &&
293	    !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
294				      DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
295				      &mem)) {
296		pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
297			&rmem->base, (unsigned long)rmem->size / SZ_1M);
298		return -ENODEV;
 
 
299	}
 
300	rmem->priv = mem;
301	dma_assign_coherent_memory(dev, mem);
302	return 0;
303}
304
305static void rmem_dma_device_release(struct reserved_mem *rmem,
306				    struct device *dev)
307{
308	dev->dma_mem = NULL;
 
309}
310
311static const struct reserved_mem_ops rmem_dma_ops = {
312	.device_init	= rmem_dma_device_init,
313	.device_release	= rmem_dma_device_release,
314};
315
316static int __init rmem_dma_setup(struct reserved_mem *rmem)
317{
318	unsigned long node = rmem->fdt_node;
319
320	if (of_get_flat_dt_prop(node, "reusable", NULL))
321		return -EINVAL;
322
323#ifdef CONFIG_ARM
324	if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
325		pr_err("Reserved memory: regions without no-map are not yet supported\n");
326		return -EINVAL;
327	}
 
 
 
 
 
 
328#endif
329
330	rmem->ops = &rmem_dma_ops;
331	pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
332		&rmem->base, (unsigned long)rmem->size / SZ_1M);
333	return 0;
334}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
336#endif