Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * DMA Pool allocator
  3 *
  4 * Copyright 2001 David Brownell
  5 * Copyright 2007 Intel Corporation
  6 *   Author: Matthew Wilcox <willy@linux.intel.com>
  7 *
  8 * This software may be redistributed and/or modified under the terms of
  9 * the GNU General Public License ("GPL") version 2 as published by the
 10 * Free Software Foundation.
 11 *
 12 * This allocator returns small blocks of a given size which are DMA-able by
 13 * the given device.  It uses the dma_alloc_coherent page allocator to get
 14 * new pages, then splits them up into blocks of the required size.
 15 * Many older drivers still have their own code to do this.
 16 *
 17 * The current design of this allocator is fairly simple.  The pool is
 18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 19 * allocated pages.  Each page in the page_list is split into blocks of at
 20 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 21 * list of free blocks within the page.  Used blocks aren't tracked, but we
 22 * keep a count of how many are currently allocated from each page.
 23 */
 24
 25#include <linux/device.h>
 26#include <linux/dma-mapping.h>
 27#include <linux/dmapool.h>
 28#include <linux/kernel.h>
 29#include <linux/list.h>
 30#include <linux/module.h>
 31#include <linux/mutex.h>
 32#include <linux/poison.h>
 33#include <linux/sched.h>
 
 34#include <linux/slab.h>
 
 35#include <linux/spinlock.h>
 36#include <linux/string.h>
 37#include <linux/types.h>
 38#include <linux/wait.h>
 39
 40#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
 41#define DMAPOOL_DEBUG 1
 42#endif
 43
 
 
 
 
 
 44struct dma_pool {		/* the pool */
 45	struct list_head page_list;
 46	spinlock_t lock;
 47	size_t size;
 
 
 
 48	struct device *dev;
 49	size_t allocation;
 50	size_t boundary;
 
 51	char name[32];
 52	wait_queue_head_t waitq;
 53	struct list_head pools;
 54};
 55
 56struct dma_page {		/* cacheable header for 'allocation' bytes */
 57	struct list_head page_list;
 58	void *vaddr;
 59	dma_addr_t dma;
 60	unsigned int in_use;
 61	unsigned int offset;
 62};
 63
 64#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)
 65
 66static DEFINE_MUTEX(pools_lock);
 
 67
 68static ssize_t
 69show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 70{
 71	unsigned temp;
 72	unsigned size;
 73	char *next;
 74	struct dma_page *page;
 75	struct dma_pool *pool;
 
 76
 77	next = buf;
 78	size = PAGE_SIZE;
 79
 80	temp = scnprintf(next, size, "poolinfo - 0.1\n");
 81	size -= temp;
 82	next += temp;
 83
 84	mutex_lock(&pools_lock);
 85	list_for_each_entry(pool, &dev->dma_pools, pools) {
 86		unsigned pages = 0;
 87		unsigned blocks = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88
 89		spin_lock_irq(&pool->lock);
 90		list_for_each_entry(page, &pool->page_list, page_list) {
 91			pages++;
 92			blocks += page->in_use;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93		}
 94		spin_unlock_irq(&pool->lock);
 
 
 
 95
 96		/* per-pool info, no real statistics yet */
 97		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
 98				 pool->name, blocks,
 99				 pages * (pool->allocation / pool->size),
100				 pool->size, pages);
101		size -= temp;
102		next += temp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103	}
104	mutex_unlock(&pools_lock);
 
105
106	return PAGE_SIZE - size;
 
 
 
 
 
107}
108
109static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
110
111/**
112 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
113 * @name: name of pool, for diagnostics
114 * @dev: device that will be doing the DMA
115 * @size: size of the blocks in this pool.
116 * @align: alignment requirement for blocks; must be a power of two
117 * @boundary: returned blocks won't cross this power of two boundary
118 * Context: !in_interrupt()
119 *
120 * Returns a dma allocation pool with the requested characteristics, or
121 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
122 * may be used to allocate memory.  Such memory will all have "consistent"
123 * DMA mappings, accessible by the device and its driver without using
124 * cache flushing primitives.  The actual size of blocks allocated may be
125 * larger than requested because of alignment.
126 *
127 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
128 * cross that size boundary.  This is useful for devices which have
129 * addressing restrictions on individual DMA transfers, such as not crossing
130 * boundaries of 4KBytes.
 
 
 
131 */
132struct dma_pool *dma_pool_create(const char *name, struct device *dev,
133				 size_t size, size_t align, size_t boundary)
134{
135	struct dma_pool *retval;
136	size_t allocation;
 
137
138	if (align == 0) {
139		align = 1;
140	} else if (align & (align - 1)) {
141		return NULL;
142	}
143
144	if (size == 0) {
 
 
145		return NULL;
146	} else if (size < 4) {
147		size = 4;
148	}
149
150	if ((size % align) != 0)
151		size = ALIGN(size, align);
 
 
152
 
153	allocation = max_t(size_t, size, PAGE_SIZE);
154
155	if (!boundary) {
156		boundary = allocation;
157	} else if ((boundary < size) || (boundary & (boundary - 1))) {
158		return NULL;
159	}
160
161	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
 
 
162	if (!retval)
163		return retval;
164
165	strlcpy(retval->name, name, sizeof(retval->name));
166
167	retval->dev = dev;
168
169	INIT_LIST_HEAD(&retval->page_list);
170	spin_lock_init(&retval->lock);
171	retval->size = size;
172	retval->boundary = boundary;
173	retval->allocation = allocation;
174	init_waitqueue_head(&retval->waitq);
175
176	if (dev) {
177		int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
178
179		mutex_lock(&pools_lock);
180		if (list_empty(&dev->dma_pools))
181			ret = device_create_file(dev, &dev_attr_pools);
182		else
183			ret = 0;
184		/* note:  not currently insisting "name" be unique */
185		if (!ret)
186			list_add(&retval->pools, &dev->dma_pools);
187		else {
188			kfree(retval);
189			retval = NULL;
190		}
191		mutex_unlock(&pools_lock);
192	} else
193		INIT_LIST_HEAD(&retval->pools);
194
195	return retval;
196}
197EXPORT_SYMBOL(dma_pool_create);
198
199static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
200{
201	unsigned int offset = 0;
202	unsigned int next_boundary = pool->boundary;
203
204	do {
205		unsigned int next = offset + pool->size;
206		if (unlikely((next + pool->size) >= next_boundary)) {
207			next = next_boundary;
208			next_boundary += pool->boundary;
 
209		}
210		*(int *)(page->vaddr + offset) = next;
211		offset = next;
212	} while (offset < pool->allocation);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213}
214
215static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
216{
217	struct dma_page *page;
218
219	page = kmalloc(sizeof(*page), mem_flags);
220	if (!page)
221		return NULL;
 
222	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
223					 &page->dma, mem_flags);
224	if (page->vaddr) {
225#ifdef	DMAPOOL_DEBUG
226		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
227#endif
228		pool_initialise_page(pool, page);
229		list_add(&page->page_list, &pool->page_list);
230		page->in_use = 0;
231		page->offset = 0;
232	} else {
233		kfree(page);
234		page = NULL;
235	}
236	return page;
237}
238
239static inline int is_page_busy(struct dma_page *page)
240{
241	return page->in_use != 0;
242}
243
244static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
245{
246	dma_addr_t dma = page->dma;
247
248#ifdef	DMAPOOL_DEBUG
249	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
250#endif
251	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
252	list_del(&page->page_list);
253	kfree(page);
254}
255
256/**
257 * dma_pool_destroy - destroys a pool of dma memory blocks.
258 * @pool: dma pool that will be destroyed
259 * Context: !in_interrupt()
260 *
261 * Caller guarantees that no more memory from the pool is in use,
262 * and that nothing will try to use the pool after this call.
263 */
264void dma_pool_destroy(struct dma_pool *pool)
265{
 
 
 
 
 
 
 
266	mutex_lock(&pools_lock);
267	list_del(&pool->pools);
268	if (pool->dev && list_empty(&pool->dev->dma_pools))
269		device_remove_file(pool->dev, &dev_attr_pools);
270	mutex_unlock(&pools_lock);
 
 
 
271
272	while (!list_empty(&pool->page_list)) {
273		struct dma_page *page;
274		page = list_entry(pool->page_list.next,
275				  struct dma_page, page_list);
276		if (is_page_busy(page)) {
277			if (pool->dev)
278				dev_err(pool->dev,
279					"dma_pool_destroy %s, %p busy\n",
280					pool->name, page->vaddr);
281			else
282				printk(KERN_ERR
283				       "dma_pool_destroy %s, %p busy\n",
284				       pool->name, page->vaddr);
285			/* leak the still-in-use consistent memory */
286			list_del(&page->page_list);
287			kfree(page);
288		} else
289			pool_free_page(pool, page);
290	}
291
292	kfree(pool);
293}
294EXPORT_SYMBOL(dma_pool_destroy);
295
296/**
297 * dma_pool_alloc - get a block of consistent memory
298 * @pool: dma pool that will produce the block
299 * @mem_flags: GFP_* bitmask
300 * @handle: pointer to dma address of block
301 *
302 * This returns the kernel virtual address of a currently unused block,
303 * and reports its dma address through the handle.
304 * If such a memory block can't be allocated, %NULL is returned.
305 */
306void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
307		     dma_addr_t *handle)
308{
309	unsigned long flags;
310	struct dma_page *page;
311	size_t offset;
312	void *retval;
313
314	might_sleep_if(mem_flags & __GFP_WAIT);
315
316	spin_lock_irqsave(&pool->lock, flags);
317 restart:
318	list_for_each_entry(page, &pool->page_list, page_list) {
319		if (page->offset < pool->allocation)
320			goto ready;
321	}
322	page = pool_alloc_page(pool, GFP_ATOMIC);
323	if (!page) {
324		if (mem_flags & __GFP_WAIT) {
325			DECLARE_WAITQUEUE(wait, current);
326
327			__set_current_state(TASK_UNINTERRUPTIBLE);
328			__add_wait_queue(&pool->waitq, &wait);
329			spin_unlock_irqrestore(&pool->lock, flags);
330
331			schedule_timeout(POOL_TIMEOUT_JIFFIES);
332
333			spin_lock_irqsave(&pool->lock, flags);
334			__remove_wait_queue(&pool->waitq, &wait);
335			goto restart;
336		}
337		retval = NULL;
338		goto done;
339	}
340
341 ready:
342	page->in_use++;
343	offset = page->offset;
344	page->offset = *(int *)(page->vaddr + offset);
345	retval = offset + page->vaddr;
346	*handle = offset + page->dma;
347#ifdef	DMAPOOL_DEBUG
348	memset(retval, POOL_POISON_ALLOCATED, pool->size);
349#endif
350 done:
351	spin_unlock_irqrestore(&pool->lock, flags);
352	return retval;
353}
354EXPORT_SYMBOL(dma_pool_alloc);
355
356static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
357{
358	struct dma_page *page;
 
359
360	list_for_each_entry(page, &pool->page_list, page_list) {
361		if (dma < page->dma)
362			continue;
363		if (dma < (page->dma + pool->allocation))
364			return page;
365	}
366	return NULL;
367}
 
368
369/**
370 * dma_pool_free - put block back into dma pool
371 * @pool: the dma pool holding the block
372 * @vaddr: virtual address of block
373 * @dma: dma address of block
374 *
375 * Caller promises neither device nor driver will again touch this block
376 * unless it is first re-allocated.
377 */
378void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
379{
380	struct dma_page *page;
381	unsigned long flags;
382	unsigned int offset;
383
384	spin_lock_irqsave(&pool->lock, flags);
385	page = pool_find_page(pool, dma);
386	if (!page) {
387		spin_unlock_irqrestore(&pool->lock, flags);
388		if (pool->dev)
389			dev_err(pool->dev,
390				"dma_pool_free %s, %p/%lx (bad dma)\n",
391				pool->name, vaddr, (unsigned long)dma);
392		else
393			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
394			       pool->name, vaddr, (unsigned long)dma);
395		return;
396	}
397
398	offset = vaddr - page->vaddr;
399#ifdef	DMAPOOL_DEBUG
400	if ((dma - page->dma) != offset) {
401		spin_unlock_irqrestore(&pool->lock, flags);
402		if (pool->dev)
403			dev_err(pool->dev,
404				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
405				pool->name, vaddr, (unsigned long long)dma);
406		else
407			printk(KERN_ERR
408			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
409			       pool->name, vaddr, (unsigned long long)dma);
410		return;
411	}
412	{
413		unsigned int chain = page->offset;
414		while (chain < pool->allocation) {
415			if (chain != offset) {
416				chain = *(int *)(page->vaddr + chain);
417				continue;
418			}
419			spin_unlock_irqrestore(&pool->lock, flags);
420			if (pool->dev)
421				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
422					"already free\n", pool->name,
423					(unsigned long long)dma);
424			else
425				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
426					"already free\n", pool->name,
427					(unsigned long long)dma);
428			return;
429		}
430	}
431	memset(vaddr, POOL_POISON_FREED, pool->size);
432#endif
433
434	page->in_use--;
435	*(int *)vaddr = page->offset;
436	page->offset = offset;
437	if (waitqueue_active(&pool->waitq))
438		wake_up_locked(&pool->waitq);
439	/*
440	 * Resist a temptation to do
441	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
442	 * Better have a few empty pages hang around.
443	 */
444	spin_unlock_irqrestore(&pool->lock, flags);
445}
446EXPORT_SYMBOL(dma_pool_free);
447
448/*
449 * Managed DMA pool
450 */
451static void dmam_pool_release(struct device *dev, void *res)
452{
453	struct dma_pool *pool = *(struct dma_pool **)res;
454
455	dma_pool_destroy(pool);
456}
457
458static int dmam_pool_match(struct device *dev, void *res, void *match_data)
459{
460	return *(struct dma_pool **)res == match_data;
461}
462
463/**
464 * dmam_pool_create - Managed dma_pool_create()
465 * @name: name of pool, for diagnostics
466 * @dev: device that will be doing the DMA
467 * @size: size of the blocks in this pool.
468 * @align: alignment requirement for blocks; must be a power of two
469 * @allocation: returned blocks won't cross this boundary (or zero)
470 *
471 * Managed dma_pool_create().  DMA pool created with this function is
472 * automatically destroyed on driver detach.
 
 
 
473 */
474struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
475				  size_t size, size_t align, size_t allocation)
476{
477	struct dma_pool **ptr, *pool;
478
479	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
480	if (!ptr)
481		return NULL;
482
483	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
484	if (pool)
485		devres_add(dev, ptr);
486	else
487		devres_free(ptr);
488
489	return pool;
490}
491EXPORT_SYMBOL(dmam_pool_create);
492
493/**
494 * dmam_pool_destroy - Managed dma_pool_destroy()
495 * @pool: dma pool that will be destroyed
496 *
497 * Managed dma_pool_destroy().
498 */
499void dmam_pool_destroy(struct dma_pool *pool)
500{
501	struct device *dev = pool->dev;
502
503	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
504	dma_pool_destroy(pool);
505}
506EXPORT_SYMBOL(dmam_pool_destroy);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * DMA Pool allocator
  4 *
  5 * Copyright 2001 David Brownell
  6 * Copyright 2007 Intel Corporation
  7 *   Author: Matthew Wilcox <willy@linux.intel.com>
  8 *
 
 
 
 
  9 * This allocator returns small blocks of a given size which are DMA-able by
 10 * the given device.  It uses the dma_alloc_coherent page allocator to get
 11 * new pages, then splits them up into blocks of the required size.
 12 * Many older drivers still have their own code to do this.
 13 *
 14 * The current design of this allocator is fairly simple.  The pool is
 15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 16 * allocated pages.  Each page in the page_list is split into blocks of at
 17 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 18 * list of free blocks across all pages.  Used blocks aren't tracked, but we
 19 * keep a count of how many are currently allocated from each page.
 20 */
 21
 22#include <linux/device.h>
 23#include <linux/dma-mapping.h>
 24#include <linux/dmapool.h>
 25#include <linux/kernel.h>
 26#include <linux/list.h>
 27#include <linux/export.h>
 28#include <linux/mutex.h>
 29#include <linux/poison.h>
 30#include <linux/sched.h>
 31#include <linux/sched/mm.h>
 32#include <linux/slab.h>
 33#include <linux/stat.h>
 34#include <linux/spinlock.h>
 35#include <linux/string.h>
 36#include <linux/types.h>
 37#include <linux/wait.h>
 38
 39#ifdef CONFIG_SLUB_DEBUG_ON
 40#define DMAPOOL_DEBUG 1
 41#endif
 42
 43struct dma_block {
 44	struct dma_block *next_block;
 45	dma_addr_t dma;
 46};
 47
 48struct dma_pool {		/* the pool */
 49	struct list_head page_list;
 50	spinlock_t lock;
 51	struct dma_block *next_block;
 52	size_t nr_blocks;
 53	size_t nr_active;
 54	size_t nr_pages;
 55	struct device *dev;
 56	unsigned int size;
 57	unsigned int allocation;
 58	unsigned int boundary;
 59	char name[32];
 
 60	struct list_head pools;
 61};
 62
 63struct dma_page {		/* cacheable header for 'allocation' bytes */
 64	struct list_head page_list;
 65	void *vaddr;
 66	dma_addr_t dma;
 
 
 67};
 68
 
 
 69static DEFINE_MUTEX(pools_lock);
 70static DEFINE_MUTEX(pools_reg_lock);
 71
 72static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
 
 73{
 
 
 
 
 74	struct dma_pool *pool;
 75	unsigned size;
 76
 77	size = sysfs_emit(buf, "poolinfo - 0.1\n");
 
 
 
 
 
 78
 79	mutex_lock(&pools_lock);
 80	list_for_each_entry(pool, &dev->dma_pools, pools) {
 81		/* per-pool info, no real statistics yet */
 82		size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n",
 83				      pool->name, pool->nr_active,
 84				      pool->nr_blocks, pool->size,
 85				      pool->nr_pages);
 86	}
 87	mutex_unlock(&pools_lock);
 88
 89	return size;
 90}
 91
 92static DEVICE_ATTR_RO(pools);
 93
 94#ifdef DMAPOOL_DEBUG
 95static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
 96			     gfp_t mem_flags)
 97{
 98	u8 *data = (void *)block;
 99	int i;
100
101	for (i = sizeof(struct dma_block); i < pool->size; i++) {
102		if (data[i] == POOL_POISON_FREED)
103			continue;
104		dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
105			pool->name, block);
106
107		/*
108		 * Dump the first 4 bytes even if they are not
109		 * POOL_POISON_FREED
110		 */
111		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
112				data, pool->size, 1);
113		break;
114	}
115
116	if (!want_init_on_alloc(mem_flags))
117		memset(block, POOL_POISON_ALLOCATED, pool->size);
118}
119
120static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
121{
122	struct dma_page *page;
123
124	list_for_each_entry(page, &pool->page_list, page_list) {
125		if (dma < page->dma)
126			continue;
127		if ((dma - page->dma) < pool->allocation)
128			return page;
129	}
130	return NULL;
131}
132
133static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
134{
135	struct dma_block *block = pool->next_block;
136	struct dma_page *page;
137
138	page = pool_find_page(pool, dma);
139	if (!page) {
140		dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
141			__func__, pool->name, vaddr, &dma);
142		return true;
143	}
144
145	while (block) {
146		if (block != vaddr) {
147			block = block->next_block;
148			continue;
149		}
150		dev_err(pool->dev, "%s %s, dma %pad already free\n",
151			__func__, pool->name, &dma);
152		return true;
153	}
154
155	memset(vaddr, POOL_POISON_FREED, pool->size);
156	return false;
157}
158
159static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
160{
161	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
162}
163#else
164static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
165			     gfp_t mem_flags)
166{
167}
168
169static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
170{
171	if (want_init_on_free())
172		memset(vaddr, 0, pool->size);
173	return false;
174}
175
176static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
177{
178}
179#endif
180
181static struct dma_block *pool_block_pop(struct dma_pool *pool)
182{
183	struct dma_block *block = pool->next_block;
184
185	if (block) {
186		pool->next_block = block->next_block;
187		pool->nr_active++;
188	}
189	return block;
190}
191
192static void pool_block_push(struct dma_pool *pool, struct dma_block *block,
193			    dma_addr_t dma)
194{
195	block->dma = dma;
196	block->next_block = pool->next_block;
197	pool->next_block = block;
198}
199
 
200
201/**
202 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
203 * @name: name of pool, for diagnostics
204 * @dev: device that will be doing the DMA
205 * @size: size of the blocks in this pool.
206 * @align: alignment requirement for blocks; must be a power of two
207 * @boundary: returned blocks won't cross this power of two boundary
208 * Context: not in_interrupt()
209 *
210 * Given one of these pools, dma_pool_alloc()
 
211 * may be used to allocate memory.  Such memory will all have "consistent"
212 * DMA mappings, accessible by the device and its driver without using
213 * cache flushing primitives.  The actual size of blocks allocated may be
214 * larger than requested because of alignment.
215 *
216 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
217 * cross that size boundary.  This is useful for devices which have
218 * addressing restrictions on individual DMA transfers, such as not crossing
219 * boundaries of 4KBytes.
220 *
221 * Return: a dma allocation pool with the requested characteristics, or
222 * %NULL if one can't be created.
223 */
224struct dma_pool *dma_pool_create(const char *name, struct device *dev,
225				 size_t size, size_t align, size_t boundary)
226{
227	struct dma_pool *retval;
228	size_t allocation;
229	bool empty;
230
231	if (!dev)
 
 
232		return NULL;
 
233
234	if (align == 0)
235		align = 1;
236	else if (align & (align - 1))
237		return NULL;
 
 
 
238
239	if (size == 0 || size > INT_MAX)
240		return NULL;
241	if (size < sizeof(struct dma_block))
242		size = sizeof(struct dma_block);
243
244	size = ALIGN(size, align);
245	allocation = max_t(size_t, size, PAGE_SIZE);
246
247	if (!boundary)
248		boundary = allocation;
249	else if ((boundary < size) || (boundary & (boundary - 1)))
250		return NULL;
 
251
252	boundary = min(boundary, allocation);
253
254	retval = kzalloc(sizeof(*retval), GFP_KERNEL);
255	if (!retval)
256		return retval;
257
258	strscpy(retval->name, name, sizeof(retval->name));
259
260	retval->dev = dev;
261
262	INIT_LIST_HEAD(&retval->page_list);
263	spin_lock_init(&retval->lock);
264	retval->size = size;
265	retval->boundary = boundary;
266	retval->allocation = allocation;
267	INIT_LIST_HEAD(&retval->pools);
268
269	/*
270	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
271	 * pools_reg_lock ensures that there is not a race between
272	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
273	 * when the first invocation of dma_pool_create() failed on
274	 * device_create_file() and the second assumes that it has been done (I
275	 * know it is a short window).
276	 */
277	mutex_lock(&pools_reg_lock);
278	mutex_lock(&pools_lock);
279	empty = list_empty(&dev->dma_pools);
280	list_add(&retval->pools, &dev->dma_pools);
281	mutex_unlock(&pools_lock);
282	if (empty) {
283		int err;
284
285		err = device_create_file(dev, &dev_attr_pools);
286		if (err) {
287			mutex_lock(&pools_lock);
288			list_del(&retval->pools);
289			mutex_unlock(&pools_lock);
290			mutex_unlock(&pools_reg_lock);
 
 
 
291			kfree(retval);
292			return NULL;
293		}
294	}
295	mutex_unlock(&pools_reg_lock);
 
 
296	return retval;
297}
298EXPORT_SYMBOL(dma_pool_create);
299
300static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
301{
302	unsigned int next_boundary = pool->boundary, offset = 0;
303	struct dma_block *block, *first = NULL, *last = NULL;
304
305	pool_init_page(pool, page);
306	while (offset + pool->size <= pool->allocation) {
307		if (offset + pool->size > next_boundary) {
308			offset = next_boundary;
309			next_boundary += pool->boundary;
310			continue;
311		}
312
313		block = page->vaddr + offset;
314		block->dma = page->dma + offset;
315		block->next_block = NULL;
316
317		if (last)
318			last->next_block = block;
319		else
320			first = block;
321		last = block;
322
323		offset += pool->size;
324		pool->nr_blocks++;
325	}
326
327	last->next_block = pool->next_block;
328	pool->next_block = first;
329
330	list_add(&page->page_list, &pool->page_list);
331	pool->nr_pages++;
332}
333
334static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
335{
336	struct dma_page *page;
337
338	page = kmalloc(sizeof(*page), mem_flags);
339	if (!page)
340		return NULL;
341
342	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
343					 &page->dma, mem_flags);
344	if (!page->vaddr) {
 
 
 
 
 
 
 
 
345		kfree(page);
346		return NULL;
347	}
 
 
348
349	return page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350}
351
352/**
353 * dma_pool_destroy - destroys a pool of dma memory blocks.
354 * @pool: dma pool that will be destroyed
355 * Context: !in_interrupt()
356 *
357 * Caller guarantees that no more memory from the pool is in use,
358 * and that nothing will try to use the pool after this call.
359 */
360void dma_pool_destroy(struct dma_pool *pool)
361{
362	struct dma_page *page, *tmp;
363	bool empty, busy = false;
364
365	if (unlikely(!pool))
366		return;
367
368	mutex_lock(&pools_reg_lock);
369	mutex_lock(&pools_lock);
370	list_del(&pool->pools);
371	empty = list_empty(&pool->dev->dma_pools);
 
372	mutex_unlock(&pools_lock);
373	if (empty)
374		device_remove_file(pool->dev, &dev_attr_pools);
375	mutex_unlock(&pools_reg_lock);
376
377	if (pool->nr_active) {
378		dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
379		busy = true;
380	}
381
382	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
383		if (!busy)
384			dma_free_coherent(pool->dev, pool->allocation,
385					  page->vaddr, page->dma);
386		list_del(&page->page_list);
387		kfree(page);
 
 
 
 
 
 
 
388	}
389
390	kfree(pool);
391}
392EXPORT_SYMBOL(dma_pool_destroy);
393
394/**
395 * dma_pool_alloc - get a block of consistent memory
396 * @pool: dma pool that will produce the block
397 * @mem_flags: GFP_* bitmask
398 * @handle: pointer to dma address of block
399 *
400 * Return: the kernel virtual address of a currently unused block,
401 * and reports its dma address through the handle.
402 * If such a memory block can't be allocated, %NULL is returned.
403 */
404void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
405		     dma_addr_t *handle)
406{
407	struct dma_block *block;
408	struct dma_page *page;
409	unsigned long flags;
 
410
411	might_alloc(mem_flags);
412
413	spin_lock_irqsave(&pool->lock, flags);
414	block = pool_block_pop(pool);
415	if (!block) {
416		/*
417		 * pool_alloc_page() might sleep, so temporarily drop
418		 * &pool->lock
419		 */
420		spin_unlock_irqrestore(&pool->lock, flags);
 
 
421
422		page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
423		if (!page)
424			return NULL;
 
 
 
 
 
 
 
 
 
 
425
426		spin_lock_irqsave(&pool->lock, flags);
427		pool_initialise_page(pool, page);
428		block = pool_block_pop(pool);
429	}
 
 
 
 
 
 
430	spin_unlock_irqrestore(&pool->lock, flags);
 
 
 
431
432	*handle = block->dma;
433	pool_check_block(pool, block, mem_flags);
434	if (want_init_on_alloc(mem_flags))
435		memset(block, 0, pool->size);
436
437	return block;
 
 
 
 
 
 
438}
439EXPORT_SYMBOL(dma_pool_alloc);
440
441/**
442 * dma_pool_free - put block back into dma pool
443 * @pool: the dma pool holding the block
444 * @vaddr: virtual address of block
445 * @dma: dma address of block
446 *
447 * Caller promises neither device nor driver will again touch this block
448 * unless it is first re-allocated.
449 */
450void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
451{
452	struct dma_block *block = vaddr;
453	unsigned long flags;
 
454
455	spin_lock_irqsave(&pool->lock, flags);
456	if (!pool_block_err(pool, vaddr, dma)) {
457		pool_block_push(pool, block, dma);
458		pool->nr_active--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
459	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460	spin_unlock_irqrestore(&pool->lock, flags);
461}
462EXPORT_SYMBOL(dma_pool_free);
463
464/*
465 * Managed DMA pool
466 */
467static void dmam_pool_release(struct device *dev, void *res)
468{
469	struct dma_pool *pool = *(struct dma_pool **)res;
470
471	dma_pool_destroy(pool);
472}
473
474static int dmam_pool_match(struct device *dev, void *res, void *match_data)
475{
476	return *(struct dma_pool **)res == match_data;
477}
478
479/**
480 * dmam_pool_create - Managed dma_pool_create()
481 * @name: name of pool, for diagnostics
482 * @dev: device that will be doing the DMA
483 * @size: size of the blocks in this pool.
484 * @align: alignment requirement for blocks; must be a power of two
485 * @allocation: returned blocks won't cross this boundary (or zero)
486 *
487 * Managed dma_pool_create().  DMA pool created with this function is
488 * automatically destroyed on driver detach.
489 *
490 * Return: a managed dma allocation pool with the requested
491 * characteristics, or %NULL if one can't be created.
492 */
493struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
494				  size_t size, size_t align, size_t allocation)
495{
496	struct dma_pool **ptr, *pool;
497
498	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
499	if (!ptr)
500		return NULL;
501
502	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
503	if (pool)
504		devres_add(dev, ptr);
505	else
506		devres_free(ptr);
507
508	return pool;
509}
510EXPORT_SYMBOL(dmam_pool_create);
511
512/**
513 * dmam_pool_destroy - Managed dma_pool_destroy()
514 * @pool: dma pool that will be destroyed
515 *
516 * Managed dma_pool_destroy().
517 */
518void dmam_pool_destroy(struct dma_pool *pool)
519{
520	struct device *dev = pool->dev;
521
522	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
 
523}
524EXPORT_SYMBOL(dmam_pool_destroy);