Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * DMA Pool allocator
  4 *
  5 * Copyright 2001 David Brownell
  6 * Copyright 2007 Intel Corporation
  7 *   Author: Matthew Wilcox <willy@linux.intel.com>
  8 *
  9 * This allocator returns small blocks of a given size which are DMA-able by
 10 * the given device.  It uses the dma_alloc_coherent page allocator to get
 11 * new pages, then splits them up into blocks of the required size.
 12 * Many older drivers still have their own code to do this.
 13 *
 14 * The current design of this allocator is fairly simple.  The pool is
 15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 16 * allocated pages.  Each page in the page_list is split into blocks of at
 17 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 18 * list of free blocks within the page.  Used blocks aren't tracked, but we
 19 * keep a count of how many are currently allocated from each page.
 20 */
 21
 22#include <linux/device.h>
 23#include <linux/dma-mapping.h>
 24#include <linux/dmapool.h>
 25#include <linux/kernel.h>
 26#include <linux/list.h>
 27#include <linux/export.h>
 28#include <linux/mutex.h>
 29#include <linux/poison.h>
 30#include <linux/sched.h>
 
 31#include <linux/slab.h>
 32#include <linux/stat.h>
 33#include <linux/spinlock.h>
 34#include <linux/string.h>
 35#include <linux/types.h>
 36#include <linux/wait.h>
 37
 38#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
 39#define DMAPOOL_DEBUG 1
 40#endif
 41
 42struct dma_pool {		/* the pool */
 43	struct list_head page_list;
 44	spinlock_t lock;
 45	size_t size;
 46	struct device *dev;
 47	size_t allocation;
 48	size_t boundary;
 49	char name[32];
 50	struct list_head pools;
 51};
 52
 53struct dma_page {		/* cacheable header for 'allocation' bytes */
 54	struct list_head page_list;
 55	void *vaddr;
 56	dma_addr_t dma;
 57	unsigned int in_use;
 58	unsigned int offset;
 59};
 60
 61static DEFINE_MUTEX(pools_lock);
 62static DEFINE_MUTEX(pools_reg_lock);
 63
 64static ssize_t
 65show_pools(struct device *dev, struct device_attribute *attr, char *buf)
 66{
 67	unsigned temp;
 68	unsigned size;
 69	char *next;
 70	struct dma_page *page;
 71	struct dma_pool *pool;
 72
 73	next = buf;
 74	size = PAGE_SIZE;
 75
 76	temp = scnprintf(next, size, "poolinfo - 0.1\n");
 77	size -= temp;
 78	next += temp;
 79
 80	mutex_lock(&pools_lock);
 81	list_for_each_entry(pool, &dev->dma_pools, pools) {
 82		unsigned pages = 0;
 83		unsigned blocks = 0;
 84
 85		spin_lock_irq(&pool->lock);
 86		list_for_each_entry(page, &pool->page_list, page_list) {
 87			pages++;
 88			blocks += page->in_use;
 89		}
 90		spin_unlock_irq(&pool->lock);
 91
 92		/* per-pool info, no real statistics yet */
 93		temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
 94				 pool->name, blocks,
 95				 pages * (pool->allocation / pool->size),
 96				 pool->size, pages);
 97		size -= temp;
 98		next += temp;
 99	}
100	mutex_unlock(&pools_lock);
101
102	return PAGE_SIZE - size;
103}
104
105static DEVICE_ATTR(pools, 0444, show_pools, NULL);
106
107/**
108 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
109 * @name: name of pool, for diagnostics
110 * @dev: device that will be doing the DMA
111 * @size: size of the blocks in this pool.
112 * @align: alignment requirement for blocks; must be a power of two
113 * @boundary: returned blocks won't cross this power of two boundary
114 * Context: not in_interrupt()
115 *
116 * Given one of these pools, dma_pool_alloc()
117 * may be used to allocate memory.  Such memory will all have "consistent"
118 * DMA mappings, accessible by the device and its driver without using
119 * cache flushing primitives.  The actual size of blocks allocated may be
120 * larger than requested because of alignment.
121 *
122 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
123 * cross that size boundary.  This is useful for devices which have
124 * addressing restrictions on individual DMA transfers, such as not crossing
125 * boundaries of 4KBytes.
126 *
127 * Return: a dma allocation pool with the requested characteristics, or
128 * %NULL if one can't be created.
129 */
130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131				 size_t size, size_t align, size_t boundary)
132{
133	struct dma_pool *retval;
134	size_t allocation;
135	bool empty = false;
136
137	if (align == 0)
138		align = 1;
139	else if (align & (align - 1))
140		return NULL;
141
142	if (size == 0)
143		return NULL;
144	else if (size < 4)
145		size = 4;
146
147	if ((size % align) != 0)
148		size = ALIGN(size, align);
149
150	allocation = max_t(size_t, size, PAGE_SIZE);
151
152	if (!boundary)
153		boundary = allocation;
154	else if ((boundary < size) || (boundary & (boundary - 1)))
155		return NULL;
156
157	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
158	if (!retval)
159		return retval;
160
161	strlcpy(retval->name, name, sizeof(retval->name));
162
163	retval->dev = dev;
164
165	INIT_LIST_HEAD(&retval->page_list);
166	spin_lock_init(&retval->lock);
167	retval->size = size;
168	retval->boundary = boundary;
169	retval->allocation = allocation;
170
171	INIT_LIST_HEAD(&retval->pools);
172
173	/*
174	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
175	 * pools_reg_lock ensures that there is not a race between
176	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
177	 * when the first invocation of dma_pool_create() failed on
178	 * device_create_file() and the second assumes that it has been done (I
179	 * know it is a short window).
180	 */
181	mutex_lock(&pools_reg_lock);
182	mutex_lock(&pools_lock);
183	if (list_empty(&dev->dma_pools))
184		empty = true;
185	list_add(&retval->pools, &dev->dma_pools);
186	mutex_unlock(&pools_lock);
187	if (empty) {
188		int err;
189
190		err = device_create_file(dev, &dev_attr_pools);
191		if (err) {
192			mutex_lock(&pools_lock);
193			list_del(&retval->pools);
194			mutex_unlock(&pools_lock);
195			mutex_unlock(&pools_reg_lock);
196			kfree(retval);
197			return NULL;
198		}
199	}
200	mutex_unlock(&pools_reg_lock);
201	return retval;
202}
203EXPORT_SYMBOL(dma_pool_create);
204
205static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
206{
207	unsigned int offset = 0;
208	unsigned int next_boundary = pool->boundary;
209
210	do {
211		unsigned int next = offset + pool->size;
212		if (unlikely((next + pool->size) >= next_boundary)) {
213			next = next_boundary;
214			next_boundary += pool->boundary;
215		}
216		*(int *)(page->vaddr + offset) = next;
217		offset = next;
218	} while (offset < pool->allocation);
219}
220
221static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
222{
223	struct dma_page *page;
224
225	page = kmalloc(sizeof(*page), mem_flags);
226	if (!page)
227		return NULL;
228	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
229					 &page->dma, mem_flags);
230	if (page->vaddr) {
231#ifdef	DMAPOOL_DEBUG
232		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
233#endif
234		pool_initialise_page(pool, page);
235		page->in_use = 0;
236		page->offset = 0;
237	} else {
238		kfree(page);
239		page = NULL;
240	}
241	return page;
242}
243
244static inline bool is_page_busy(struct dma_page *page)
245{
246	return page->in_use != 0;
247}
248
249static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
250{
251	dma_addr_t dma = page->dma;
252
253#ifdef	DMAPOOL_DEBUG
254	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
255#endif
256	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
257	list_del(&page->page_list);
258	kfree(page);
259}
260
261/**
262 * dma_pool_destroy - destroys a pool of dma memory blocks.
263 * @pool: dma pool that will be destroyed
264 * Context: !in_interrupt()
265 *
266 * Caller guarantees that no more memory from the pool is in use,
267 * and that nothing will try to use the pool after this call.
268 */
269void dma_pool_destroy(struct dma_pool *pool)
270{
 
271	bool empty = false;
272
273	if (unlikely(!pool))
274		return;
275
276	mutex_lock(&pools_reg_lock);
277	mutex_lock(&pools_lock);
278	list_del(&pool->pools);
279	if (pool->dev && list_empty(&pool->dev->dma_pools))
280		empty = true;
281	mutex_unlock(&pools_lock);
282	if (empty)
283		device_remove_file(pool->dev, &dev_attr_pools);
284	mutex_unlock(&pools_reg_lock);
285
286	while (!list_empty(&pool->page_list)) {
287		struct dma_page *page;
288		page = list_entry(pool->page_list.next,
289				  struct dma_page, page_list);
290		if (is_page_busy(page)) {
291			if (pool->dev)
292				dev_err(pool->dev,
293					"dma_pool_destroy %s, %p busy\n",
294					pool->name, page->vaddr);
295			else
296				pr_err("dma_pool_destroy %s, %p busy\n",
297				       pool->name, page->vaddr);
298			/* leak the still-in-use consistent memory */
299			list_del(&page->page_list);
300			kfree(page);
301		} else
302			pool_free_page(pool, page);
303	}
304
305	kfree(pool);
306}
307EXPORT_SYMBOL(dma_pool_destroy);
308
309/**
310 * dma_pool_alloc - get a block of consistent memory
311 * @pool: dma pool that will produce the block
312 * @mem_flags: GFP_* bitmask
313 * @handle: pointer to dma address of block
314 *
315 * Return: the kernel virtual address of a currently unused block,
316 * and reports its dma address through the handle.
317 * If such a memory block can't be allocated, %NULL is returned.
318 */
319void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
320		     dma_addr_t *handle)
321{
322	unsigned long flags;
323	struct dma_page *page;
324	size_t offset;
325	void *retval;
326
327	might_sleep_if(gfpflags_allow_blocking(mem_flags));
328
329	spin_lock_irqsave(&pool->lock, flags);
330	list_for_each_entry(page, &pool->page_list, page_list) {
331		if (page->offset < pool->allocation)
332			goto ready;
333	}
334
335	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
336	spin_unlock_irqrestore(&pool->lock, flags);
337
338	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
339	if (!page)
340		return NULL;
341
342	spin_lock_irqsave(&pool->lock, flags);
343
344	list_add(&page->page_list, &pool->page_list);
345 ready:
346	page->in_use++;
347	offset = page->offset;
348	page->offset = *(int *)(page->vaddr + offset);
349	retval = offset + page->vaddr;
350	*handle = offset + page->dma;
351#ifdef	DMAPOOL_DEBUG
352	{
353		int i;
354		u8 *data = retval;
355		/* page->offset is stored in first 4 bytes */
356		for (i = sizeof(page->offset); i < pool->size; i++) {
357			if (data[i] == POOL_POISON_FREED)
358				continue;
359			if (pool->dev)
360				dev_err(pool->dev,
361					"dma_pool_alloc %s, %p (corrupted)\n",
362					pool->name, retval);
363			else
364				pr_err("dma_pool_alloc %s, %p (corrupted)\n",
365					pool->name, retval);
366
367			/*
368			 * Dump the first 4 bytes even if they are not
369			 * POOL_POISON_FREED
370			 */
371			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
372					data, pool->size, 1);
373			break;
374		}
375	}
376	if (!(mem_flags & __GFP_ZERO))
377		memset(retval, POOL_POISON_ALLOCATED, pool->size);
378#endif
379	spin_unlock_irqrestore(&pool->lock, flags);
380
381	if (want_init_on_alloc(mem_flags))
382		memset(retval, 0, pool->size);
383
384	return retval;
385}
386EXPORT_SYMBOL(dma_pool_alloc);
387
388static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
389{
390	struct dma_page *page;
391
392	list_for_each_entry(page, &pool->page_list, page_list) {
393		if (dma < page->dma)
394			continue;
395		if ((dma - page->dma) < pool->allocation)
396			return page;
397	}
398	return NULL;
399}
400
401/**
402 * dma_pool_free - put block back into dma pool
403 * @pool: the dma pool holding the block
404 * @vaddr: virtual address of block
405 * @dma: dma address of block
406 *
407 * Caller promises neither device nor driver will again touch this block
408 * unless it is first re-allocated.
409 */
410void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
411{
412	struct dma_page *page;
413	unsigned long flags;
414	unsigned int offset;
415
416	spin_lock_irqsave(&pool->lock, flags);
417	page = pool_find_page(pool, dma);
418	if (!page) {
419		spin_unlock_irqrestore(&pool->lock, flags);
420		if (pool->dev)
421			dev_err(pool->dev,
422				"dma_pool_free %s, %p/%lx (bad dma)\n",
423				pool->name, vaddr, (unsigned long)dma);
424		else
425			pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
426			       pool->name, vaddr, (unsigned long)dma);
427		return;
428	}
429
430	offset = vaddr - page->vaddr;
431	if (want_init_on_free())
432		memset(vaddr, 0, pool->size);
433#ifdef	DMAPOOL_DEBUG
434	if ((dma - page->dma) != offset) {
435		spin_unlock_irqrestore(&pool->lock, flags);
436		if (pool->dev)
437			dev_err(pool->dev,
438				"dma_pool_free %s, %p (bad vaddr)/%pad\n",
439				pool->name, vaddr, &dma);
440		else
441			pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
442			       pool->name, vaddr, &dma);
443		return;
444	}
445	{
446		unsigned int chain = page->offset;
447		while (chain < pool->allocation) {
448			if (chain != offset) {
449				chain = *(int *)(page->vaddr + chain);
450				continue;
451			}
452			spin_unlock_irqrestore(&pool->lock, flags);
453			if (pool->dev)
454				dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
455					pool->name, &dma);
456			else
457				pr_err("dma_pool_free %s, dma %pad already free\n",
458				       pool->name, &dma);
459			return;
460		}
461	}
462	memset(vaddr, POOL_POISON_FREED, pool->size);
463#endif
464
465	page->in_use--;
466	*(int *)vaddr = page->offset;
467	page->offset = offset;
468	/*
469	 * Resist a temptation to do
470	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
471	 * Better have a few empty pages hang around.
472	 */
473	spin_unlock_irqrestore(&pool->lock, flags);
474}
475EXPORT_SYMBOL(dma_pool_free);
476
477/*
478 * Managed DMA pool
479 */
480static void dmam_pool_release(struct device *dev, void *res)
481{
482	struct dma_pool *pool = *(struct dma_pool **)res;
483
484	dma_pool_destroy(pool);
485}
486
487static int dmam_pool_match(struct device *dev, void *res, void *match_data)
488{
489	return *(struct dma_pool **)res == match_data;
490}
491
492/**
493 * dmam_pool_create - Managed dma_pool_create()
494 * @name: name of pool, for diagnostics
495 * @dev: device that will be doing the DMA
496 * @size: size of the blocks in this pool.
497 * @align: alignment requirement for blocks; must be a power of two
498 * @allocation: returned blocks won't cross this boundary (or zero)
499 *
500 * Managed dma_pool_create().  DMA pool created with this function is
501 * automatically destroyed on driver detach.
502 *
503 * Return: a managed dma allocation pool with the requested
504 * characteristics, or %NULL if one can't be created.
505 */
506struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
507				  size_t size, size_t align, size_t allocation)
508{
509	struct dma_pool **ptr, *pool;
510
511	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
512	if (!ptr)
513		return NULL;
514
515	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
516	if (pool)
517		devres_add(dev, ptr);
518	else
519		devres_free(ptr);
520
521	return pool;
522}
523EXPORT_SYMBOL(dmam_pool_create);
524
525/**
526 * dmam_pool_destroy - Managed dma_pool_destroy()
527 * @pool: dma pool that will be destroyed
528 *
529 * Managed dma_pool_destroy().
530 */
531void dmam_pool_destroy(struct dma_pool *pool)
532{
533	struct device *dev = pool->dev;
534
535	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
536}
537EXPORT_SYMBOL(dmam_pool_destroy);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * DMA Pool allocator
  4 *
  5 * Copyright 2001 David Brownell
  6 * Copyright 2007 Intel Corporation
  7 *   Author: Matthew Wilcox <willy@linux.intel.com>
  8 *
  9 * This allocator returns small blocks of a given size which are DMA-able by
 10 * the given device.  It uses the dma_alloc_coherent page allocator to get
 11 * new pages, then splits them up into blocks of the required size.
 12 * Many older drivers still have their own code to do this.
 13 *
 14 * The current design of this allocator is fairly simple.  The pool is
 15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 16 * allocated pages.  Each page in the page_list is split into blocks of at
 17 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 18 * list of free blocks within the page.  Used blocks aren't tracked, but we
 19 * keep a count of how many are currently allocated from each page.
 20 */
 21
 22#include <linux/device.h>
 23#include <linux/dma-mapping.h>
 24#include <linux/dmapool.h>
 25#include <linux/kernel.h>
 26#include <linux/list.h>
 27#include <linux/export.h>
 28#include <linux/mutex.h>
 29#include <linux/poison.h>
 30#include <linux/sched.h>
 31#include <linux/sched/mm.h>
 32#include <linux/slab.h>
 33#include <linux/stat.h>
 34#include <linux/spinlock.h>
 35#include <linux/string.h>
 36#include <linux/types.h>
 37#include <linux/wait.h>
 38
 39#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
 40#define DMAPOOL_DEBUG 1
 41#endif
 42
 43struct dma_pool {		/* the pool */
 44	struct list_head page_list;
 45	spinlock_t lock;
 46	size_t size;
 47	struct device *dev;
 48	size_t allocation;
 49	size_t boundary;
 50	char name[32];
 51	struct list_head pools;
 52};
 53
 54struct dma_page {		/* cacheable header for 'allocation' bytes */
 55	struct list_head page_list;
 56	void *vaddr;
 57	dma_addr_t dma;
 58	unsigned int in_use;
 59	unsigned int offset;
 60};
 61
 62static DEFINE_MUTEX(pools_lock);
 63static DEFINE_MUTEX(pools_reg_lock);
 64
 65static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
 
 66{
 67	unsigned temp;
 68	unsigned size;
 69	char *next;
 70	struct dma_page *page;
 71	struct dma_pool *pool;
 72
 73	next = buf;
 74	size = PAGE_SIZE;
 75
 76	temp = scnprintf(next, size, "poolinfo - 0.1\n");
 77	size -= temp;
 78	next += temp;
 79
 80	mutex_lock(&pools_lock);
 81	list_for_each_entry(pool, &dev->dma_pools, pools) {
 82		unsigned pages = 0;
 83		unsigned blocks = 0;
 84
 85		spin_lock_irq(&pool->lock);
 86		list_for_each_entry(page, &pool->page_list, page_list) {
 87			pages++;
 88			blocks += page->in_use;
 89		}
 90		spin_unlock_irq(&pool->lock);
 91
 92		/* per-pool info, no real statistics yet */
 93		temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
 94				 pool->name, blocks,
 95				 pages * (pool->allocation / pool->size),
 96				 pool->size, pages);
 97		size -= temp;
 98		next += temp;
 99	}
100	mutex_unlock(&pools_lock);
101
102	return PAGE_SIZE - size;
103}
104
105static DEVICE_ATTR_RO(pools);
106
107/**
108 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
109 * @name: name of pool, for diagnostics
110 * @dev: device that will be doing the DMA
111 * @size: size of the blocks in this pool.
112 * @align: alignment requirement for blocks; must be a power of two
113 * @boundary: returned blocks won't cross this power of two boundary
114 * Context: not in_interrupt()
115 *
116 * Given one of these pools, dma_pool_alloc()
117 * may be used to allocate memory.  Such memory will all have "consistent"
118 * DMA mappings, accessible by the device and its driver without using
119 * cache flushing primitives.  The actual size of blocks allocated may be
120 * larger than requested because of alignment.
121 *
122 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
123 * cross that size boundary.  This is useful for devices which have
124 * addressing restrictions on individual DMA transfers, such as not crossing
125 * boundaries of 4KBytes.
126 *
127 * Return: a dma allocation pool with the requested characteristics, or
128 * %NULL if one can't be created.
129 */
130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131				 size_t size, size_t align, size_t boundary)
132{
133	struct dma_pool *retval;
134	size_t allocation;
135	bool empty = false;
136
137	if (align == 0)
138		align = 1;
139	else if (align & (align - 1))
140		return NULL;
141
142	if (size == 0)
143		return NULL;
144	else if (size < 4)
145		size = 4;
146
147	size = ALIGN(size, align);
 
 
148	allocation = max_t(size_t, size, PAGE_SIZE);
149
150	if (!boundary)
151		boundary = allocation;
152	else if ((boundary < size) || (boundary & (boundary - 1)))
153		return NULL;
154
155	retval = kmalloc(sizeof(*retval), GFP_KERNEL);
156	if (!retval)
157		return retval;
158
159	strscpy(retval->name, name, sizeof(retval->name));
160
161	retval->dev = dev;
162
163	INIT_LIST_HEAD(&retval->page_list);
164	spin_lock_init(&retval->lock);
165	retval->size = size;
166	retval->boundary = boundary;
167	retval->allocation = allocation;
168
169	INIT_LIST_HEAD(&retval->pools);
170
171	/*
172	 * pools_lock ensures that the ->dma_pools list does not get corrupted.
173	 * pools_reg_lock ensures that there is not a race between
174	 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
175	 * when the first invocation of dma_pool_create() failed on
176	 * device_create_file() and the second assumes that it has been done (I
177	 * know it is a short window).
178	 */
179	mutex_lock(&pools_reg_lock);
180	mutex_lock(&pools_lock);
181	if (list_empty(&dev->dma_pools))
182		empty = true;
183	list_add(&retval->pools, &dev->dma_pools);
184	mutex_unlock(&pools_lock);
185	if (empty) {
186		int err;
187
188		err = device_create_file(dev, &dev_attr_pools);
189		if (err) {
190			mutex_lock(&pools_lock);
191			list_del(&retval->pools);
192			mutex_unlock(&pools_lock);
193			mutex_unlock(&pools_reg_lock);
194			kfree(retval);
195			return NULL;
196		}
197	}
198	mutex_unlock(&pools_reg_lock);
199	return retval;
200}
201EXPORT_SYMBOL(dma_pool_create);
202
203static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
204{
205	unsigned int offset = 0;
206	unsigned int next_boundary = pool->boundary;
207
208	do {
209		unsigned int next = offset + pool->size;
210		if (unlikely((next + pool->size) >= next_boundary)) {
211			next = next_boundary;
212			next_boundary += pool->boundary;
213		}
214		*(int *)(page->vaddr + offset) = next;
215		offset = next;
216	} while (offset < pool->allocation);
217}
218
219static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
220{
221	struct dma_page *page;
222
223	page = kmalloc(sizeof(*page), mem_flags);
224	if (!page)
225		return NULL;
226	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
227					 &page->dma, mem_flags);
228	if (page->vaddr) {
229#ifdef	DMAPOOL_DEBUG
230		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
231#endif
232		pool_initialise_page(pool, page);
233		page->in_use = 0;
234		page->offset = 0;
235	} else {
236		kfree(page);
237		page = NULL;
238	}
239	return page;
240}
241
242static inline bool is_page_busy(struct dma_page *page)
243{
244	return page->in_use != 0;
245}
246
247static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
248{
249	dma_addr_t dma = page->dma;
250
251#ifdef	DMAPOOL_DEBUG
252	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
253#endif
254	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
255	list_del(&page->page_list);
256	kfree(page);
257}
258
259/**
260 * dma_pool_destroy - destroys a pool of dma memory blocks.
261 * @pool: dma pool that will be destroyed
262 * Context: !in_interrupt()
263 *
264 * Caller guarantees that no more memory from the pool is in use,
265 * and that nothing will try to use the pool after this call.
266 */
267void dma_pool_destroy(struct dma_pool *pool)
268{
269	struct dma_page *page, *tmp;
270	bool empty = false;
271
272	if (unlikely(!pool))
273		return;
274
275	mutex_lock(&pools_reg_lock);
276	mutex_lock(&pools_lock);
277	list_del(&pool->pools);
278	if (pool->dev && list_empty(&pool->dev->dma_pools))
279		empty = true;
280	mutex_unlock(&pools_lock);
281	if (empty)
282		device_remove_file(pool->dev, &dev_attr_pools);
283	mutex_unlock(&pools_reg_lock);
284
285	list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
 
 
 
286		if (is_page_busy(page)) {
287			if (pool->dev)
288				dev_err(pool->dev, "%s %s, %p busy\n", __func__,
 
289					pool->name, page->vaddr);
290			else
291				pr_err("%s %s, %p busy\n", __func__,
292				       pool->name, page->vaddr);
293			/* leak the still-in-use consistent memory */
294			list_del(&page->page_list);
295			kfree(page);
296		} else
297			pool_free_page(pool, page);
298	}
299
300	kfree(pool);
301}
302EXPORT_SYMBOL(dma_pool_destroy);
303
304/**
305 * dma_pool_alloc - get a block of consistent memory
306 * @pool: dma pool that will produce the block
307 * @mem_flags: GFP_* bitmask
308 * @handle: pointer to dma address of block
309 *
310 * Return: the kernel virtual address of a currently unused block,
311 * and reports its dma address through the handle.
312 * If such a memory block can't be allocated, %NULL is returned.
313 */
314void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
315		     dma_addr_t *handle)
316{
317	unsigned long flags;
318	struct dma_page *page;
319	size_t offset;
320	void *retval;
321
322	might_alloc(mem_flags);
323
324	spin_lock_irqsave(&pool->lock, flags);
325	list_for_each_entry(page, &pool->page_list, page_list) {
326		if (page->offset < pool->allocation)
327			goto ready;
328	}
329
330	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
331	spin_unlock_irqrestore(&pool->lock, flags);
332
333	page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
334	if (!page)
335		return NULL;
336
337	spin_lock_irqsave(&pool->lock, flags);
338
339	list_add(&page->page_list, &pool->page_list);
340 ready:
341	page->in_use++;
342	offset = page->offset;
343	page->offset = *(int *)(page->vaddr + offset);
344	retval = offset + page->vaddr;
345	*handle = offset + page->dma;
346#ifdef	DMAPOOL_DEBUG
347	{
348		int i;
349		u8 *data = retval;
350		/* page->offset is stored in first 4 bytes */
351		for (i = sizeof(page->offset); i < pool->size; i++) {
352			if (data[i] == POOL_POISON_FREED)
353				continue;
354			if (pool->dev)
355				dev_err(pool->dev, "%s %s, %p (corrupted)\n",
356					__func__, pool->name, retval);
 
357			else
358				pr_err("%s %s, %p (corrupted)\n",
359					__func__, pool->name, retval);
360
361			/*
362			 * Dump the first 4 bytes even if they are not
363			 * POOL_POISON_FREED
364			 */
365			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
366					data, pool->size, 1);
367			break;
368		}
369	}
370	if (!(mem_flags & __GFP_ZERO))
371		memset(retval, POOL_POISON_ALLOCATED, pool->size);
372#endif
373	spin_unlock_irqrestore(&pool->lock, flags);
374
375	if (want_init_on_alloc(mem_flags))
376		memset(retval, 0, pool->size);
377
378	return retval;
379}
380EXPORT_SYMBOL(dma_pool_alloc);
381
382static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
383{
384	struct dma_page *page;
385
386	list_for_each_entry(page, &pool->page_list, page_list) {
387		if (dma < page->dma)
388			continue;
389		if ((dma - page->dma) < pool->allocation)
390			return page;
391	}
392	return NULL;
393}
394
395/**
396 * dma_pool_free - put block back into dma pool
397 * @pool: the dma pool holding the block
398 * @vaddr: virtual address of block
399 * @dma: dma address of block
400 *
401 * Caller promises neither device nor driver will again touch this block
402 * unless it is first re-allocated.
403 */
404void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
405{
406	struct dma_page *page;
407	unsigned long flags;
408	unsigned int offset;
409
410	spin_lock_irqsave(&pool->lock, flags);
411	page = pool_find_page(pool, dma);
412	if (!page) {
413		spin_unlock_irqrestore(&pool->lock, flags);
414		if (pool->dev)
415			dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
416				__func__, pool->name, vaddr, &dma);
 
417		else
418			pr_err("%s %s, %p/%pad (bad dma)\n",
419			       __func__, pool->name, vaddr, &dma);
420		return;
421	}
422
423	offset = vaddr - page->vaddr;
424	if (want_init_on_free())
425		memset(vaddr, 0, pool->size);
426#ifdef	DMAPOOL_DEBUG
427	if ((dma - page->dma) != offset) {
428		spin_unlock_irqrestore(&pool->lock, flags);
429		if (pool->dev)
430			dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
431				__func__, pool->name, vaddr, &dma);
 
432		else
433			pr_err("%s %s, %p (bad vaddr)/%pad\n",
434			       __func__, pool->name, vaddr, &dma);
435		return;
436	}
437	{
438		unsigned int chain = page->offset;
439		while (chain < pool->allocation) {
440			if (chain != offset) {
441				chain = *(int *)(page->vaddr + chain);
442				continue;
443			}
444			spin_unlock_irqrestore(&pool->lock, flags);
445			if (pool->dev)
446				dev_err(pool->dev, "%s %s, dma %pad already free\n",
447					__func__, pool->name, &dma);
448			else
449				pr_err("%s %s, dma %pad already free\n",
450				       __func__, pool->name, &dma);
451			return;
452		}
453	}
454	memset(vaddr, POOL_POISON_FREED, pool->size);
455#endif
456
457	page->in_use--;
458	*(int *)vaddr = page->offset;
459	page->offset = offset;
460	/*
461	 * Resist a temptation to do
462	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
463	 * Better have a few empty pages hang around.
464	 */
465	spin_unlock_irqrestore(&pool->lock, flags);
466}
467EXPORT_SYMBOL(dma_pool_free);
468
469/*
470 * Managed DMA pool
471 */
472static void dmam_pool_release(struct device *dev, void *res)
473{
474	struct dma_pool *pool = *(struct dma_pool **)res;
475
476	dma_pool_destroy(pool);
477}
478
479static int dmam_pool_match(struct device *dev, void *res, void *match_data)
480{
481	return *(struct dma_pool **)res == match_data;
482}
483
484/**
485 * dmam_pool_create - Managed dma_pool_create()
486 * @name: name of pool, for diagnostics
487 * @dev: device that will be doing the DMA
488 * @size: size of the blocks in this pool.
489 * @align: alignment requirement for blocks; must be a power of two
490 * @allocation: returned blocks won't cross this boundary (or zero)
491 *
492 * Managed dma_pool_create().  DMA pool created with this function is
493 * automatically destroyed on driver detach.
494 *
495 * Return: a managed dma allocation pool with the requested
496 * characteristics, or %NULL if one can't be created.
497 */
498struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
499				  size_t size, size_t align, size_t allocation)
500{
501	struct dma_pool **ptr, *pool;
502
503	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
504	if (!ptr)
505		return NULL;
506
507	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
508	if (pool)
509		devres_add(dev, ptr);
510	else
511		devres_free(ptr);
512
513	return pool;
514}
515EXPORT_SYMBOL(dmam_pool_create);
516
517/**
518 * dmam_pool_destroy - Managed dma_pool_destroy()
519 * @pool: dma pool that will be destroyed
520 *
521 * Managed dma_pool_destroy().
522 */
523void dmam_pool_destroy(struct dma_pool *pool)
524{
525	struct device *dev = pool->dev;
526
527	WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
528}
529EXPORT_SYMBOL(dmam_pool_destroy);