Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * DMA Pool allocator
  4 *
  5 * Copyright 2001 David Brownell
  6 * Copyright 2007 Intel Corporation
  7 *   Author: Matthew Wilcox <willy@linux.intel.com>
  8 *
 
 
 
 
  9 * This allocator returns small blocks of a given size which are DMA-able by
 10 * the given device.  It uses the dma_alloc_coherent page allocator to get
 11 * new pages, then splits them up into blocks of the required size.
 12 * Many older drivers still have their own code to do this.
 13 *
 14 * The current design of this allocator is fairly simple.  The pool is
 15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 16 * allocated pages.  Each page in the page_list is split into blocks of at
 17 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 18 * list of free blocks within the page.  Used blocks aren't tracked, but we
 19 * keep a count of how many are currently allocated from each page.
 20 */
 21
 22#include <linux/device.h>
 23#include <linux/dma-mapping.h>
 24#include <linux/dmapool.h>
 25#include <linux/kernel.h>
 26#include <linux/list.h>
 27#include <linux/export.h>
 28#include <linux/mutex.h>
 29#include <linux/poison.h>
 30#include <linux/sched.h>
 31#include <linux/slab.h>
 32#include <linux/stat.h>
 33#include <linux/spinlock.h>
 34#include <linux/string.h>
 35#include <linux/types.h>
 36#include <linux/wait.h>
 37
 38#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
 39#define DMAPOOL_DEBUG 1
 40#endif
 41
 42struct dma_pool {		/* the pool */
 43	struct list_head page_list;
 44	spinlock_t lock;
 45	size_t size;
 46	struct device *dev;
 47	size_t allocation;
 48	size_t boundary;
 49	char name[32];
 50	struct list_head pools;
 51};
 52
 53struct dma_page {		/* cacheable header for 'allocation' bytes */
 54	struct list_head page_list;
 55	void *vaddr;
 56	dma_addr_t dma;
 57	unsigned int in_use;
 58	unsigned int offset;
 59};
 60
 61static DEFINE_MUTEX(pools_lock);
 62static DEFINE_MUTEX(pools_reg_lock);
 63
 64static ssize_t
 65show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 66{
 67	unsigned temp;
 68	unsigned size;
 69	char *next;
 70	struct dma_page *page;
 71	struct dma_pool *pool;
 72
 73	next = buf;
 74	size = PAGE_SIZE;
 75
 76	temp = scnprintf(next, size, "poolinfo - 0.1\n");
 77	size -= temp;
 78	next += temp;
 79
 80	mutex_lock(&pools_lock);
 81	list_for_each_entry(pool, &dev->dma_pools, pools) {
 82		unsigned pages = 0;
 83		unsigned blocks = 0;
 84
 85		spin_lock_irq(&pool->lock);
 86		list_for_each_entry(page, &pool->page_list, page_list) {
 87			pages++;
 88			blocks += page->in_use;
 89		}
 90		spin_unlock_irq(&pool->lock);
 91
 92		/* per-pool info, no real statistics yet */
 93		temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
 94				 pool->name, blocks,
 95				 pages * (pool->allocation / pool->size),
 96				 pool->size, pages);
 97		size -= temp;
 98		next += temp;
 99	}
100	mutex_unlock(&pools_lock);
101
102	return PAGE_SIZE - size;
103}
104
105static DEVICE_ATTR(pools, 0444, show_pools, NULL);
106
107/**
108 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
109 * @name: name of pool, for diagnostics
110 * @dev: device that will be doing the DMA
111 * @size: size of the blocks in this pool.
112 * @align: alignment requirement for blocks; must be a power of two
113 * @boundary: returned blocks won't cross this power of two boundary
114 * Context: not in_interrupt()
115 *
116 * Given one of these pools, dma_pool_alloc()
 
117 * may be used to allocate memory.  Such memory will all have "consistent"
118 * DMA mappings, accessible by the device and its driver without using
119 * cache flushing primitives.  The actual size of blocks allocated may be
120 * larger than requested because of alignment.
121 *
122 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
123 * cross that size boundary.  This is useful for devices which have
124 * addressing restrictions on individual DMA transfers, such as not crossing
125 * boundaries of 4KBytes.
126 *
127 * Return: a dma allocation pool with the requested characteristics, or
128 * %NULL if one can't be created.
129 */
130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131				 size_t size, size_t align, size_t boundary)
132{
133	struct dma_pool *retval;
134	size_t allocation;
135	bool empty = false;
136
137	if (align == 0)
138		align = 1;
139	else if (align & (align - 1))
140		return NULL;
 
141
142	if (size == 0)
143		return NULL;
144	else if (size < 4)
145		size = 4;
 
146
147	if ((size % align) != 0)
148		size = ALIGN(size, align);
149
150	allocation = max_t(size_t, size, PAGE_SIZE);
151
152	if (!boundary)
153		boundary = allocation;
154	else if ((boundary < size) || (boundary & (boundary - 1)))
155		return NULL;
 
156
157	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
158	if (!retval)
159		return retval;
160
161	strlcpy(retval->name, name, sizeof(retval->name));
162
163	retval->dev = dev;
164
165	INIT_LIST_HEAD(&retval->page_list);
166	spin_lock_init(&retval->lock);
167	retval->size = size;
168	retval->boundary = boundary;
169	retval->allocation = allocation;
170
171	INIT_LIST_HEAD(&retval->pools);
 
172
173	/*
174	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
175	 * pools_reg_lock ensures that there is not a race between
176	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
177	 * when the first invocation of dma_pool_create() failed on
178	 * device_create_file() and the second assumes that it has been done (I
179	 * know it is a short window).
180	 */
181	mutex_lock(&pools_reg_lock);
182	mutex_lock(&pools_lock);
183	if (list_empty(&dev->dma_pools))
184		empty = true;
185	list_add(&retval->pools, &dev->dma_pools);
186	mutex_unlock(&pools_lock);
187	if (empty) {
188		int err;
189
190		err = device_create_file(dev, &dev_attr_pools);
191		if (err) {
192			mutex_lock(&pools_lock);
193			list_del(&retval->pools);
194			mutex_unlock(&pools_lock);
195			mutex_unlock(&pools_reg_lock);
196			kfree(retval);
197			return NULL;
198		}
199	}
200	mutex_unlock(&pools_reg_lock);
 
 
201	return retval;
202}
203EXPORT_SYMBOL(dma_pool_create);
204
205static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
206{
207	unsigned int offset = 0;
208	unsigned int next_boundary = pool->boundary;
209
210	do {
211		unsigned int next = offset + pool->size;
212		if (unlikely((next + pool->size) >= next_boundary)) {
213			next = next_boundary;
214			next_boundary += pool->boundary;
215		}
216		*(int *)(page->vaddr + offset) = next;
217		offset = next;
218	} while (offset < pool->allocation);
219}
220
221static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
222{
223	struct dma_page *page;
224
225	page = kmalloc(sizeof(*page), mem_flags);
226	if (!page)
227		return NULL;
228	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
229					 &page->dma, mem_flags);
230	if (page->vaddr) {
231#ifdef	DMAPOOL_DEBUG
232		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
233#endif
234		pool_initialise_page(pool, page);
235		page->in_use = 0;
236		page->offset = 0;
237	} else {
238		kfree(page);
239		page = NULL;
240	}
241	return page;
242}
243
244static inline bool is_page_busy(struct dma_page *page)
245{
246	return page->in_use != 0;
247}
248
249static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
250{
251	dma_addr_t dma = page->dma;
252
253#ifdef	DMAPOOL_DEBUG
254	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
255#endif
256	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
257	list_del(&page->page_list);
258	kfree(page);
259}
260
261/**
262 * dma_pool_destroy - destroys a pool of dma memory blocks.
263 * @pool: dma pool that will be destroyed
264 * Context: !in_interrupt()
265 *
266 * Caller guarantees that no more memory from the pool is in use,
267 * and that nothing will try to use the pool after this call.
268 */
269void dma_pool_destroy(struct dma_pool *pool)
270{
271	bool empty = false;
272
273	if (unlikely(!pool))
274		return;
275
276	mutex_lock(&pools_reg_lock);
277	mutex_lock(&pools_lock);
278	list_del(&pool->pools);
279	if (pool->dev && list_empty(&pool->dev->dma_pools))
280		empty = true;
281	mutex_unlock(&pools_lock);
282	if (empty)
283		device_remove_file(pool->dev, &dev_attr_pools);
284	mutex_unlock(&pools_reg_lock);
285
286	while (!list_empty(&pool->page_list)) {
287		struct dma_page *page;
288		page = list_entry(pool->page_list.next,
289				  struct dma_page, page_list);
290		if (is_page_busy(page)) {
291			if (pool->dev)
292				dev_err(pool->dev,
293					"dma_pool_destroy %s, %p busy\n",
294					pool->name, page->vaddr);
295			else
296				pr_err("dma_pool_destroy %s, %p busy\n",
 
297				       pool->name, page->vaddr);
298			/* leak the still-in-use consistent memory */
299			list_del(&page->page_list);
300			kfree(page);
301		} else
302			pool_free_page(pool, page);
303	}
304
305	kfree(pool);
306}
307EXPORT_SYMBOL(dma_pool_destroy);
308
309/**
310 * dma_pool_alloc - get a block of consistent memory
311 * @pool: dma pool that will produce the block
312 * @mem_flags: GFP_* bitmask
313 * @handle: pointer to dma address of block
314 *
315 * Return: the kernel virtual address of a currently unused block,
316 * and reports its dma address through the handle.
317 * If such a memory block can't be allocated, %NULL is returned.
318 */
319void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
320		     dma_addr_t *handle)
321{
322	unsigned long flags;
323	struct dma_page *page;
324	size_t offset;
325	void *retval;
326
327	might_sleep_if(gfpflags_allow_blocking(mem_flags));
328
329	spin_lock_irqsave(&pool->lock, flags);
330	list_for_each_entry(page, &pool->page_list, page_list) {
331		if (page->offset < pool->allocation)
332			goto ready;
333	}
334
335	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
336	spin_unlock_irqrestore(&pool->lock, flags);
337
338	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
339	if (!page)
340		return NULL;
341
342	spin_lock_irqsave(&pool->lock, flags);
343
344	list_add(&page->page_list, &pool->page_list);
345 ready:
346	page->in_use++;
347	offset = page->offset;
348	page->offset = *(int *)(page->vaddr + offset);
349	retval = offset + page->vaddr;
350	*handle = offset + page->dma;
351#ifdef	DMAPOOL_DEBUG
352	{
353		int i;
354		u8 *data = retval;
355		/* page->offset is stored in first 4 bytes */
356		for (i = sizeof(page->offset); i < pool->size; i++) {
357			if (data[i] == POOL_POISON_FREED)
358				continue;
359			if (pool->dev)
360				dev_err(pool->dev,
361					"dma_pool_alloc %s, %p (corrupted)\n",
362					pool->name, retval);
363			else
364				pr_err("dma_pool_alloc %s, %p (corrupted)\n",
365					pool->name, retval);
366
367			/*
368			 * Dump the first 4 bytes even if they are not
369			 * POOL_POISON_FREED
370			 */
371			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
372					data, pool->size, 1);
373			break;
374		}
375	}
376	if (!(mem_flags & __GFP_ZERO))
377		memset(retval, POOL_POISON_ALLOCATED, pool->size);
378#endif
379	spin_unlock_irqrestore(&pool->lock, flags);
380
381	if (want_init_on_alloc(mem_flags))
382		memset(retval, 0, pool->size);
383
384	return retval;
385}
386EXPORT_SYMBOL(dma_pool_alloc);
387
388static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
389{
390	struct dma_page *page;
391
392	list_for_each_entry(page, &pool->page_list, page_list) {
393		if (dma < page->dma)
394			continue;
395		if ((dma - page->dma) < pool->allocation)
396			return page;
397	}
398	return NULL;
399}
400
401/**
402 * dma_pool_free - put block back into dma pool
403 * @pool: the dma pool holding the block
404 * @vaddr: virtual address of block
405 * @dma: dma address of block
406 *
407 * Caller promises neither device nor driver will again touch this block
408 * unless it is first re-allocated.
409 */
410void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
411{
412	struct dma_page *page;
413	unsigned long flags;
414	unsigned int offset;
415
416	spin_lock_irqsave(&pool->lock, flags);
417	page = pool_find_page(pool, dma);
418	if (!page) {
419		spin_unlock_irqrestore(&pool->lock, flags);
420		if (pool->dev)
421			dev_err(pool->dev,
422				"dma_pool_free %s, %p/%lx (bad dma)\n",
423				pool->name, vaddr, (unsigned long)dma);
424		else
425			pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
426			       pool->name, vaddr, (unsigned long)dma);
427		return;
428	}
429
430	offset = vaddr - page->vaddr;
431	if (want_init_on_free())
432		memset(vaddr, 0, pool->size);
433#ifdef	DMAPOOL_DEBUG
434	if ((dma - page->dma) != offset) {
435		spin_unlock_irqrestore(&pool->lock, flags);
436		if (pool->dev)
437			dev_err(pool->dev,
438				"dma_pool_free %s, %p (bad vaddr)/%pad\n",
439				pool->name, vaddr, &dma);
440		else
441			pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
442			       pool->name, vaddr, &dma);
 
443		return;
444	}
445	{
446		unsigned int chain = page->offset;
447		while (chain < pool->allocation) {
448			if (chain != offset) {
449				chain = *(int *)(page->vaddr + chain);
450				continue;
451			}
452			spin_unlock_irqrestore(&pool->lock, flags);
453			if (pool->dev)
454				dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
455					pool->name, &dma);
 
456			else
457				pr_err("dma_pool_free %s, dma %pad already free\n",
458				       pool->name, &dma);
 
459			return;
460		}
461	}
462	memset(vaddr, POOL_POISON_FREED, pool->size);
463#endif
464
465	page->in_use--;
466	*(int *)vaddr = page->offset;
467	page->offset = offset;
468	/*
469	 * Resist a temptation to do
470	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
471	 * Better have a few empty pages hang around.
472	 */
473	spin_unlock_irqrestore(&pool->lock, flags);
474}
475EXPORT_SYMBOL(dma_pool_free);
476
477/*
478 * Managed DMA pool
479 */
480static void dmam_pool_release(struct device *dev, void *res)
481{
482	struct dma_pool *pool = *(struct dma_pool **)res;
483
484	dma_pool_destroy(pool);
485}
486
487static int dmam_pool_match(struct device *dev, void *res, void *match_data)
488{
489	return *(struct dma_pool **)res == match_data;
490}
491
492/**
493 * dmam_pool_create - Managed dma_pool_create()
494 * @name: name of pool, for diagnostics
495 * @dev: device that will be doing the DMA
496 * @size: size of the blocks in this pool.
497 * @align: alignment requirement for blocks; must be a power of two
498 * @allocation: returned blocks won't cross this boundary (or zero)
499 *
500 * Managed dma_pool_create().  DMA pool created with this function is
501 * automatically destroyed on driver detach.
502 *
503 * Return: a managed dma allocation pool with the requested
504 * characteristics, or %NULL if one can't be created.
505 */
506struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
507				  size_t size, size_t align, size_t allocation)
508{
509	struct dma_pool **ptr, *pool;
510
511	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
512	if (!ptr)
513		return NULL;
514
515	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
516	if (pool)
517		devres_add(dev, ptr);
518	else
519		devres_free(ptr);
520
521	return pool;
522}
523EXPORT_SYMBOL(dmam_pool_create);
524
525/**
526 * dmam_pool_destroy - Managed dma_pool_destroy()
527 * @pool: dma pool that will be destroyed
528 *
529 * Managed dma_pool_destroy().
530 */
531void dmam_pool_destroy(struct dma_pool *pool)
532{
533	struct device *dev = pool->dev;
534
535	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
 
536}
537EXPORT_SYMBOL(dmam_pool_destroy);
v3.15
 
  1/*
  2 * DMA Pool allocator
  3 *
  4 * Copyright 2001 David Brownell
  5 * Copyright 2007 Intel Corporation
  6 *   Author: Matthew Wilcox <willy@linux.intel.com>
  7 *
  8 * This software may be redistributed and/or modified under the terms of
  9 * the GNU General Public License ("GPL") version 2 as published by the
 10 * Free Software Foundation.
 11 *
 12 * This allocator returns small blocks of a given size which are DMA-able by
 13 * the given device.  It uses the dma_alloc_coherent page allocator to get
 14 * new pages, then splits them up into blocks of the required size.
 15 * Many older drivers still have their own code to do this.
 16 *
 17 * The current design of this allocator is fairly simple.  The pool is
 18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 19 * allocated pages.  Each page in the page_list is split into blocks of at
 20 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 21 * list of free blocks within the page.  Used blocks aren't tracked, but we
 22 * keep a count of how many are currently allocated from each page.
 23 */
 24
 25#include <linux/device.h>
 26#include <linux/dma-mapping.h>
 27#include <linux/dmapool.h>
 28#include <linux/kernel.h>
 29#include <linux/list.h>
 30#include <linux/export.h>
 31#include <linux/mutex.h>
 32#include <linux/poison.h>
 33#include <linux/sched.h>
 34#include <linux/slab.h>
 35#include <linux/stat.h>
 36#include <linux/spinlock.h>
 37#include <linux/string.h>
 38#include <linux/types.h>
 39#include <linux/wait.h>
 40
 41#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
 42#define DMAPOOL_DEBUG 1
 43#endif
 44
 45struct dma_pool {		/* the pool */
 46	struct list_head page_list;
 47	spinlock_t lock;
 48	size_t size;
 49	struct device *dev;
 50	size_t allocation;
 51	size_t boundary;
 52	char name[32];
 53	struct list_head pools;
 54};
 55
 56struct dma_page {		/* cacheable header for 'allocation' bytes */
 57	struct list_head page_list;
 58	void *vaddr;
 59	dma_addr_t dma;
 60	unsigned int in_use;
 61	unsigned int offset;
 62};
 63
 64static DEFINE_MUTEX(pools_lock);
 
 65
 66static ssize_t
 67show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 68{
 69	unsigned temp;
 70	unsigned size;
 71	char *next;
 72	struct dma_page *page;
 73	struct dma_pool *pool;
 74
 75	next = buf;
 76	size = PAGE_SIZE;
 77
 78	temp = scnprintf(next, size, "poolinfo - 0.1\n");
 79	size -= temp;
 80	next += temp;
 81
 82	mutex_lock(&pools_lock);
 83	list_for_each_entry(pool, &dev->dma_pools, pools) {
 84		unsigned pages = 0;
 85		unsigned blocks = 0;
 86
 87		spin_lock_irq(&pool->lock);
 88		list_for_each_entry(page, &pool->page_list, page_list) {
 89			pages++;
 90			blocks += page->in_use;
 91		}
 92		spin_unlock_irq(&pool->lock);
 93
 94		/* per-pool info, no real statistics yet */
 95		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
 96				 pool->name, blocks,
 97				 pages * (pool->allocation / pool->size),
 98				 pool->size, pages);
 99		size -= temp;
100		next += temp;
101	}
102	mutex_unlock(&pools_lock);
103
104	return PAGE_SIZE - size;
105}
106
107static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
108
109/**
110 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
111 * @name: name of pool, for diagnostics
112 * @dev: device that will be doing the DMA
113 * @size: size of the blocks in this pool.
114 * @align: alignment requirement for blocks; must be a power of two
115 * @boundary: returned blocks won't cross this power of two boundary
116 * Context: !in_interrupt()
117 *
118 * Returns a dma allocation pool with the requested characteristics, or
119 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
120 * may be used to allocate memory.  Such memory will all have "consistent"
121 * DMA mappings, accessible by the device and its driver without using
122 * cache flushing primitives.  The actual size of blocks allocated may be
123 * larger than requested because of alignment.
124 *
125 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
126 * cross that size boundary.  This is useful for devices which have
127 * addressing restrictions on individual DMA transfers, such as not crossing
128 * boundaries of 4KBytes.
 
 
 
129 */
130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131				 size_t size, size_t align, size_t boundary)
132{
133	struct dma_pool *retval;
134	size_t allocation;
 
135
136	if (align == 0) {
137		align = 1;
138	} else if (align & (align - 1)) {
139		return NULL;
140	}
141
142	if (size == 0) {
143		return NULL;
144	} else if (size < 4) {
145		size = 4;
146	}
147
148	if ((size % align) != 0)
149		size = ALIGN(size, align);
150
151	allocation = max_t(size_t, size, PAGE_SIZE);
152
153	if (!boundary) {
154		boundary = allocation;
155	} else if ((boundary < size) || (boundary & (boundary - 1))) {
156		return NULL;
157	}
158
159	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
160	if (!retval)
161		return retval;
162
163	strlcpy(retval->name, name, sizeof(retval->name));
164
165	retval->dev = dev;
166
167	INIT_LIST_HEAD(&retval->page_list);
168	spin_lock_init(&retval->lock);
169	retval->size = size;
170	retval->boundary = boundary;
171	retval->allocation = allocation;
172
173	if (dev) {
174		int ret;
175
176		mutex_lock(&pools_lock);
177		if (list_empty(&dev->dma_pools))
178			ret = device_create_file(dev, &dev_attr_pools);
179		else
180			ret = 0;
181		/* note:  not currently insisting "name" be unique */
182		if (!ret)
183			list_add(&retval->pools, &dev->dma_pools);
184		else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185			kfree(retval);
186			retval = NULL;
187		}
188		mutex_unlock(&pools_lock);
189	} else
190		INIT_LIST_HEAD(&retval->pools);
191
192	return retval;
193}
194EXPORT_SYMBOL(dma_pool_create);
195
196static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
197{
198	unsigned int offset = 0;
199	unsigned int next_boundary = pool->boundary;
200
201	do {
202		unsigned int next = offset + pool->size;
203		if (unlikely((next + pool->size) >= next_boundary)) {
204			next = next_boundary;
205			next_boundary += pool->boundary;
206		}
207		*(int *)(page->vaddr + offset) = next;
208		offset = next;
209	} while (offset < pool->allocation);
210}
211
212static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
213{
214	struct dma_page *page;
215
216	page = kmalloc(sizeof(*page), mem_flags);
217	if (!page)
218		return NULL;
219	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
220					 &page->dma, mem_flags);
221	if (page->vaddr) {
222#ifdef	DMAPOOL_DEBUG
223		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
224#endif
225		pool_initialise_page(pool, page);
226		page->in_use = 0;
227		page->offset = 0;
228	} else {
229		kfree(page);
230		page = NULL;
231	}
232	return page;
233}
234
235static inline int is_page_busy(struct dma_page *page)
236{
237	return page->in_use != 0;
238}
239
240static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
241{
242	dma_addr_t dma = page->dma;
243
244#ifdef	DMAPOOL_DEBUG
245	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
246#endif
247	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
248	list_del(&page->page_list);
249	kfree(page);
250}
251
252/**
253 * dma_pool_destroy - destroys a pool of dma memory blocks.
254 * @pool: dma pool that will be destroyed
255 * Context: !in_interrupt()
256 *
257 * Caller guarantees that no more memory from the pool is in use,
258 * and that nothing will try to use the pool after this call.
259 */
260void dma_pool_destroy(struct dma_pool *pool)
261{
 
 
 
 
 
 
262	mutex_lock(&pools_lock);
263	list_del(&pool->pools);
264	if (pool->dev && list_empty(&pool->dev->dma_pools))
 
 
 
265		device_remove_file(pool->dev, &dev_attr_pools);
266	mutex_unlock(&pools_lock);
267
268	while (!list_empty(&pool->page_list)) {
269		struct dma_page *page;
270		page = list_entry(pool->page_list.next,
271				  struct dma_page, page_list);
272		if (is_page_busy(page)) {
273			if (pool->dev)
274				dev_err(pool->dev,
275					"dma_pool_destroy %s, %p busy\n",
276					pool->name, page->vaddr);
277			else
278				printk(KERN_ERR
279				       "dma_pool_destroy %s, %p busy\n",
280				       pool->name, page->vaddr);
281			/* leak the still-in-use consistent memory */
282			list_del(&page->page_list);
283			kfree(page);
284		} else
285			pool_free_page(pool, page);
286	}
287
288	kfree(pool);
289}
290EXPORT_SYMBOL(dma_pool_destroy);
291
292/**
293 * dma_pool_alloc - get a block of consistent memory
294 * @pool: dma pool that will produce the block
295 * @mem_flags: GFP_* bitmask
296 * @handle: pointer to dma address of block
297 *
298 * This returns the kernel virtual address of a currently unused block,
299 * and reports its dma address through the handle.
300 * If such a memory block can't be allocated, %NULL is returned.
301 */
302void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
303		     dma_addr_t *handle)
304{
305	unsigned long flags;
306	struct dma_page *page;
307	size_t offset;
308	void *retval;
309
310	might_sleep_if(mem_flags & __GFP_WAIT);
311
312	spin_lock_irqsave(&pool->lock, flags);
313	list_for_each_entry(page, &pool->page_list, page_list) {
314		if (page->offset < pool->allocation)
315			goto ready;
316	}
317
318	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
319	spin_unlock_irqrestore(&pool->lock, flags);
320
321	page = pool_alloc_page(pool, mem_flags);
322	if (!page)
323		return NULL;
324
325	spin_lock_irqsave(&pool->lock, flags);
326
327	list_add(&page->page_list, &pool->page_list);
328 ready:
329	page->in_use++;
330	offset = page->offset;
331	page->offset = *(int *)(page->vaddr + offset);
332	retval = offset + page->vaddr;
333	*handle = offset + page->dma;
334#ifdef	DMAPOOL_DEBUG
335	{
336		int i;
337		u8 *data = retval;
338		/* page->offset is stored in first 4 bytes */
339		for (i = sizeof(page->offset); i < pool->size; i++) {
340			if (data[i] == POOL_POISON_FREED)
341				continue;
342			if (pool->dev)
343				dev_err(pool->dev,
344					"dma_pool_alloc %s, %p (corruped)\n",
345					pool->name, retval);
346			else
347				pr_err("dma_pool_alloc %s, %p (corruped)\n",
348					pool->name, retval);
349
350			/*
351			 * Dump the first 4 bytes even if they are not
352			 * POOL_POISON_FREED
353			 */
354			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
355					data, pool->size, 1);
356			break;
357		}
358	}
359	memset(retval, POOL_POISON_ALLOCATED, pool->size);
 
360#endif
361	spin_unlock_irqrestore(&pool->lock, flags);
 
 
 
 
362	return retval;
363}
364EXPORT_SYMBOL(dma_pool_alloc);
365
366static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
367{
368	struct dma_page *page;
369
370	list_for_each_entry(page, &pool->page_list, page_list) {
371		if (dma < page->dma)
372			continue;
373		if (dma < (page->dma + pool->allocation))
374			return page;
375	}
376	return NULL;
377}
378
379/**
380 * dma_pool_free - put block back into dma pool
381 * @pool: the dma pool holding the block
382 * @vaddr: virtual address of block
383 * @dma: dma address of block
384 *
385 * Caller promises neither device nor driver will again touch this block
386 * unless it is first re-allocated.
387 */
388void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
389{
390	struct dma_page *page;
391	unsigned long flags;
392	unsigned int offset;
393
394	spin_lock_irqsave(&pool->lock, flags);
395	page = pool_find_page(pool, dma);
396	if (!page) {
397		spin_unlock_irqrestore(&pool->lock, flags);
398		if (pool->dev)
399			dev_err(pool->dev,
400				"dma_pool_free %s, %p/%lx (bad dma)\n",
401				pool->name, vaddr, (unsigned long)dma);
402		else
403			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
404			       pool->name, vaddr, (unsigned long)dma);
405		return;
406	}
407
408	offset = vaddr - page->vaddr;
 
 
409#ifdef	DMAPOOL_DEBUG
410	if ((dma - page->dma) != offset) {
411		spin_unlock_irqrestore(&pool->lock, flags);
412		if (pool->dev)
413			dev_err(pool->dev,
414				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
415				pool->name, vaddr, (unsigned long long)dma);
416		else
417			printk(KERN_ERR
418			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
419			       pool->name, vaddr, (unsigned long long)dma);
420		return;
421	}
422	{
423		unsigned int chain = page->offset;
424		while (chain < pool->allocation) {
425			if (chain != offset) {
426				chain = *(int *)(page->vaddr + chain);
427				continue;
428			}
429			spin_unlock_irqrestore(&pool->lock, flags);
430			if (pool->dev)
431				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
432					"already free\n", pool->name,
433					(unsigned long long)dma);
434			else
435				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
436					"already free\n", pool->name,
437					(unsigned long long)dma);
438			return;
439		}
440	}
441	memset(vaddr, POOL_POISON_FREED, pool->size);
442#endif
443
444	page->in_use--;
445	*(int *)vaddr = page->offset;
446	page->offset = offset;
447	/*
448	 * Resist a temptation to do
449	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
450	 * Better have a few empty pages hang around.
451	 */
452	spin_unlock_irqrestore(&pool->lock, flags);
453}
454EXPORT_SYMBOL(dma_pool_free);
455
456/*
457 * Managed DMA pool
458 */
459static void dmam_pool_release(struct device *dev, void *res)
460{
461	struct dma_pool *pool = *(struct dma_pool **)res;
462
463	dma_pool_destroy(pool);
464}
465
466static int dmam_pool_match(struct device *dev, void *res, void *match_data)
467{
468	return *(struct dma_pool **)res == match_data;
469}
470
471/**
472 * dmam_pool_create - Managed dma_pool_create()
473 * @name: name of pool, for diagnostics
474 * @dev: device that will be doing the DMA
475 * @size: size of the blocks in this pool.
476 * @align: alignment requirement for blocks; must be a power of two
477 * @allocation: returned blocks won't cross this boundary (or zero)
478 *
479 * Managed dma_pool_create().  DMA pool created with this function is
480 * automatically destroyed on driver detach.
 
 
 
481 */
482struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
483				  size_t size, size_t align, size_t allocation)
484{
485	struct dma_pool **ptr, *pool;
486
487	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
488	if (!ptr)
489		return NULL;
490
491	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
492	if (pool)
493		devres_add(dev, ptr);
494	else
495		devres_free(ptr);
496
497	return pool;
498}
499EXPORT_SYMBOL(dmam_pool_create);
500
501/**
502 * dmam_pool_destroy - Managed dma_pool_destroy()
503 * @pool: dma pool that will be destroyed
504 *
505 * Managed dma_pool_destroy().
506 */
507void dmam_pool_destroy(struct dma_pool *pool)
508{
509	struct device *dev = pool->dev;
510
511	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
512	dma_pool_destroy(pool);
513}
514EXPORT_SYMBOL(dmam_pool_destroy);