Loading...
1/*
2 * DMA Pool allocator
3 *
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 *
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
11 *
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
16 *
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
23 */
24
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/module.h>
31#include <linux/mutex.h>
32#include <linux/poison.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/string.h>
37#include <linux/types.h>
38#include <linux/wait.h>
39
40#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
41#define DMAPOOL_DEBUG 1
42#endif
43
44struct dma_pool { /* the pool */
45 struct list_head page_list;
46 spinlock_t lock;
47 size_t size;
48 struct device *dev;
49 size_t allocation;
50 size_t boundary;
51 char name[32];
52 wait_queue_head_t waitq;
53 struct list_head pools;
54};
55
56struct dma_page { /* cacheable header for 'allocation' bytes */
57 struct list_head page_list;
58 void *vaddr;
59 dma_addr_t dma;
60 unsigned int in_use;
61 unsigned int offset;
62};
63
64#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
65
66static DEFINE_MUTEX(pools_lock);
67
68static ssize_t
69show_pools(struct device *dev, struct device_attribute *attr, char *buf)
70{
71 unsigned temp;
72 unsigned size;
73 char *next;
74 struct dma_page *page;
75 struct dma_pool *pool;
76
77 next = buf;
78 size = PAGE_SIZE;
79
80 temp = scnprintf(next, size, "poolinfo - 0.1\n");
81 size -= temp;
82 next += temp;
83
84 mutex_lock(&pools_lock);
85 list_for_each_entry(pool, &dev->dma_pools, pools) {
86 unsigned pages = 0;
87 unsigned blocks = 0;
88
89 spin_lock_irq(&pool->lock);
90 list_for_each_entry(page, &pool->page_list, page_list) {
91 pages++;
92 blocks += page->in_use;
93 }
94 spin_unlock_irq(&pool->lock);
95
96 /* per-pool info, no real statistics yet */
97 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
98 pool->name, blocks,
99 pages * (pool->allocation / pool->size),
100 pool->size, pages);
101 size -= temp;
102 next += temp;
103 }
104 mutex_unlock(&pools_lock);
105
106 return PAGE_SIZE - size;
107}
108
109static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
110
111/**
112 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
113 * @name: name of pool, for diagnostics
114 * @dev: device that will be doing the DMA
115 * @size: size of the blocks in this pool.
116 * @align: alignment requirement for blocks; must be a power of two
117 * @boundary: returned blocks won't cross this power of two boundary
118 * Context: !in_interrupt()
119 *
120 * Returns a dma allocation pool with the requested characteristics, or
121 * null if one can't be created. Given one of these pools, dma_pool_alloc()
122 * may be used to allocate memory. Such memory will all have "consistent"
123 * DMA mappings, accessible by the device and its driver without using
124 * cache flushing primitives. The actual size of blocks allocated may be
125 * larger than requested because of alignment.
126 *
127 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
128 * cross that size boundary. This is useful for devices which have
129 * addressing restrictions on individual DMA transfers, such as not crossing
130 * boundaries of 4KBytes.
131 */
132struct dma_pool *dma_pool_create(const char *name, struct device *dev,
133 size_t size, size_t align, size_t boundary)
134{
135 struct dma_pool *retval;
136 size_t allocation;
137
138 if (align == 0) {
139 align = 1;
140 } else if (align & (align - 1)) {
141 return NULL;
142 }
143
144 if (size == 0) {
145 return NULL;
146 } else if (size < 4) {
147 size = 4;
148 }
149
150 if ((size % align) != 0)
151 size = ALIGN(size, align);
152
153 allocation = max_t(size_t, size, PAGE_SIZE);
154
155 if (!boundary) {
156 boundary = allocation;
157 } else if ((boundary < size) || (boundary & (boundary - 1))) {
158 return NULL;
159 }
160
161 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
162 if (!retval)
163 return retval;
164
165 strlcpy(retval->name, name, sizeof(retval->name));
166
167 retval->dev = dev;
168
169 INIT_LIST_HEAD(&retval->page_list);
170 spin_lock_init(&retval->lock);
171 retval->size = size;
172 retval->boundary = boundary;
173 retval->allocation = allocation;
174 init_waitqueue_head(&retval->waitq);
175
176 if (dev) {
177 int ret;
178
179 mutex_lock(&pools_lock);
180 if (list_empty(&dev->dma_pools))
181 ret = device_create_file(dev, &dev_attr_pools);
182 else
183 ret = 0;
184 /* note: not currently insisting "name" be unique */
185 if (!ret)
186 list_add(&retval->pools, &dev->dma_pools);
187 else {
188 kfree(retval);
189 retval = NULL;
190 }
191 mutex_unlock(&pools_lock);
192 } else
193 INIT_LIST_HEAD(&retval->pools);
194
195 return retval;
196}
197EXPORT_SYMBOL(dma_pool_create);
198
199static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
200{
201 unsigned int offset = 0;
202 unsigned int next_boundary = pool->boundary;
203
204 do {
205 unsigned int next = offset + pool->size;
206 if (unlikely((next + pool->size) >= next_boundary)) {
207 next = next_boundary;
208 next_boundary += pool->boundary;
209 }
210 *(int *)(page->vaddr + offset) = next;
211 offset = next;
212 } while (offset < pool->allocation);
213}
214
215static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
216{
217 struct dma_page *page;
218
219 page = kmalloc(sizeof(*page), mem_flags);
220 if (!page)
221 return NULL;
222 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
223 &page->dma, mem_flags);
224 if (page->vaddr) {
225#ifdef DMAPOOL_DEBUG
226 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
227#endif
228 pool_initialise_page(pool, page);
229 list_add(&page->page_list, &pool->page_list);
230 page->in_use = 0;
231 page->offset = 0;
232 } else {
233 kfree(page);
234 page = NULL;
235 }
236 return page;
237}
238
239static inline int is_page_busy(struct dma_page *page)
240{
241 return page->in_use != 0;
242}
243
244static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
245{
246 dma_addr_t dma = page->dma;
247
248#ifdef DMAPOOL_DEBUG
249 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
250#endif
251 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
252 list_del(&page->page_list);
253 kfree(page);
254}
255
256/**
257 * dma_pool_destroy - destroys a pool of dma memory blocks.
258 * @pool: dma pool that will be destroyed
259 * Context: !in_interrupt()
260 *
261 * Caller guarantees that no more memory from the pool is in use,
262 * and that nothing will try to use the pool after this call.
263 */
264void dma_pool_destroy(struct dma_pool *pool)
265{
266 mutex_lock(&pools_lock);
267 list_del(&pool->pools);
268 if (pool->dev && list_empty(&pool->dev->dma_pools))
269 device_remove_file(pool->dev, &dev_attr_pools);
270 mutex_unlock(&pools_lock);
271
272 while (!list_empty(&pool->page_list)) {
273 struct dma_page *page;
274 page = list_entry(pool->page_list.next,
275 struct dma_page, page_list);
276 if (is_page_busy(page)) {
277 if (pool->dev)
278 dev_err(pool->dev,
279 "dma_pool_destroy %s, %p busy\n",
280 pool->name, page->vaddr);
281 else
282 printk(KERN_ERR
283 "dma_pool_destroy %s, %p busy\n",
284 pool->name, page->vaddr);
285 /* leak the still-in-use consistent memory */
286 list_del(&page->page_list);
287 kfree(page);
288 } else
289 pool_free_page(pool, page);
290 }
291
292 kfree(pool);
293}
294EXPORT_SYMBOL(dma_pool_destroy);
295
296/**
297 * dma_pool_alloc - get a block of consistent memory
298 * @pool: dma pool that will produce the block
299 * @mem_flags: GFP_* bitmask
300 * @handle: pointer to dma address of block
301 *
302 * This returns the kernel virtual address of a currently unused block,
303 * and reports its dma address through the handle.
304 * If such a memory block can't be allocated, %NULL is returned.
305 */
306void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
307 dma_addr_t *handle)
308{
309 unsigned long flags;
310 struct dma_page *page;
311 size_t offset;
312 void *retval;
313
314 might_sleep_if(mem_flags & __GFP_WAIT);
315
316 spin_lock_irqsave(&pool->lock, flags);
317 restart:
318 list_for_each_entry(page, &pool->page_list, page_list) {
319 if (page->offset < pool->allocation)
320 goto ready;
321 }
322 page = pool_alloc_page(pool, GFP_ATOMIC);
323 if (!page) {
324 if (mem_flags & __GFP_WAIT) {
325 DECLARE_WAITQUEUE(wait, current);
326
327 __set_current_state(TASK_UNINTERRUPTIBLE);
328 __add_wait_queue(&pool->waitq, &wait);
329 spin_unlock_irqrestore(&pool->lock, flags);
330
331 schedule_timeout(POOL_TIMEOUT_JIFFIES);
332
333 spin_lock_irqsave(&pool->lock, flags);
334 __remove_wait_queue(&pool->waitq, &wait);
335 goto restart;
336 }
337 retval = NULL;
338 goto done;
339 }
340
341 ready:
342 page->in_use++;
343 offset = page->offset;
344 page->offset = *(int *)(page->vaddr + offset);
345 retval = offset + page->vaddr;
346 *handle = offset + page->dma;
347#ifdef DMAPOOL_DEBUG
348 memset(retval, POOL_POISON_ALLOCATED, pool->size);
349#endif
350 done:
351 spin_unlock_irqrestore(&pool->lock, flags);
352 return retval;
353}
354EXPORT_SYMBOL(dma_pool_alloc);
355
356static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
357{
358 struct dma_page *page;
359
360 list_for_each_entry(page, &pool->page_list, page_list) {
361 if (dma < page->dma)
362 continue;
363 if (dma < (page->dma + pool->allocation))
364 return page;
365 }
366 return NULL;
367}
368
369/**
370 * dma_pool_free - put block back into dma pool
371 * @pool: the dma pool holding the block
372 * @vaddr: virtual address of block
373 * @dma: dma address of block
374 *
375 * Caller promises neither device nor driver will again touch this block
376 * unless it is first re-allocated.
377 */
378void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
379{
380 struct dma_page *page;
381 unsigned long flags;
382 unsigned int offset;
383
384 spin_lock_irqsave(&pool->lock, flags);
385 page = pool_find_page(pool, dma);
386 if (!page) {
387 spin_unlock_irqrestore(&pool->lock, flags);
388 if (pool->dev)
389 dev_err(pool->dev,
390 "dma_pool_free %s, %p/%lx (bad dma)\n",
391 pool->name, vaddr, (unsigned long)dma);
392 else
393 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
394 pool->name, vaddr, (unsigned long)dma);
395 return;
396 }
397
398 offset = vaddr - page->vaddr;
399#ifdef DMAPOOL_DEBUG
400 if ((dma - page->dma) != offset) {
401 spin_unlock_irqrestore(&pool->lock, flags);
402 if (pool->dev)
403 dev_err(pool->dev,
404 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
405 pool->name, vaddr, (unsigned long long)dma);
406 else
407 printk(KERN_ERR
408 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
409 pool->name, vaddr, (unsigned long long)dma);
410 return;
411 }
412 {
413 unsigned int chain = page->offset;
414 while (chain < pool->allocation) {
415 if (chain != offset) {
416 chain = *(int *)(page->vaddr + chain);
417 continue;
418 }
419 spin_unlock_irqrestore(&pool->lock, flags);
420 if (pool->dev)
421 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
422 "already free\n", pool->name,
423 (unsigned long long)dma);
424 else
425 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
426 "already free\n", pool->name,
427 (unsigned long long)dma);
428 return;
429 }
430 }
431 memset(vaddr, POOL_POISON_FREED, pool->size);
432#endif
433
434 page->in_use--;
435 *(int *)vaddr = page->offset;
436 page->offset = offset;
437 if (waitqueue_active(&pool->waitq))
438 wake_up_locked(&pool->waitq);
439 /*
440 * Resist a temptation to do
441 * if (!is_page_busy(page)) pool_free_page(pool, page);
442 * Better have a few empty pages hang around.
443 */
444 spin_unlock_irqrestore(&pool->lock, flags);
445}
446EXPORT_SYMBOL(dma_pool_free);
447
448/*
449 * Managed DMA pool
450 */
451static void dmam_pool_release(struct device *dev, void *res)
452{
453 struct dma_pool *pool = *(struct dma_pool **)res;
454
455 dma_pool_destroy(pool);
456}
457
458static int dmam_pool_match(struct device *dev, void *res, void *match_data)
459{
460 return *(struct dma_pool **)res == match_data;
461}
462
463/**
464 * dmam_pool_create - Managed dma_pool_create()
465 * @name: name of pool, for diagnostics
466 * @dev: device that will be doing the DMA
467 * @size: size of the blocks in this pool.
468 * @align: alignment requirement for blocks; must be a power of two
469 * @allocation: returned blocks won't cross this boundary (or zero)
470 *
471 * Managed dma_pool_create(). DMA pool created with this function is
472 * automatically destroyed on driver detach.
473 */
474struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
475 size_t size, size_t align, size_t allocation)
476{
477 struct dma_pool **ptr, *pool;
478
479 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
480 if (!ptr)
481 return NULL;
482
483 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
484 if (pool)
485 devres_add(dev, ptr);
486 else
487 devres_free(ptr);
488
489 return pool;
490}
491EXPORT_SYMBOL(dmam_pool_create);
492
493/**
494 * dmam_pool_destroy - Managed dma_pool_destroy()
495 * @pool: dma pool that will be destroyed
496 *
497 * Managed dma_pool_destroy().
498 */
499void dmam_pool_destroy(struct dma_pool *pool)
500{
501 struct device *dev = pool->dev;
502
503 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
504 dma_pool_destroy(pool);
505}
506EXPORT_SYMBOL(dmam_pool_destroy);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * DMA Pool allocator
4 *
5 * Copyright 2001 David Brownell
6 * Copyright 2007 Intel Corporation
7 * Author: Matthew Wilcox <willy@linux.intel.com>
8 *
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
12 * Many older drivers still have their own code to do this.
13 *
14 * The current design of this allocator is fairly simple. The pool is
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks within the page. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
20 */
21
22#include <linux/device.h>
23#include <linux/dma-mapping.h>
24#include <linux/dmapool.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/export.h>
28#include <linux/mutex.h>
29#include <linux/poison.h>
30#include <linux/sched.h>
31#include <linux/sched/mm.h>
32#include <linux/slab.h>
33#include <linux/stat.h>
34#include <linux/spinlock.h>
35#include <linux/string.h>
36#include <linux/types.h>
37#include <linux/wait.h>
38
39#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40#define DMAPOOL_DEBUG 1
41#endif
42
43struct dma_pool { /* the pool */
44 struct list_head page_list;
45 spinlock_t lock;
46 size_t size;
47 struct device *dev;
48 size_t allocation;
49 size_t boundary;
50 char name[32];
51 struct list_head pools;
52};
53
54struct dma_page { /* cacheable header for 'allocation' bytes */
55 struct list_head page_list;
56 void *vaddr;
57 dma_addr_t dma;
58 unsigned int in_use;
59 unsigned int offset;
60};
61
62static DEFINE_MUTEX(pools_lock);
63static DEFINE_MUTEX(pools_reg_lock);
64
65static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
66{
67 unsigned temp;
68 unsigned size;
69 char *next;
70 struct dma_page *page;
71 struct dma_pool *pool;
72
73 next = buf;
74 size = PAGE_SIZE;
75
76 temp = scnprintf(next, size, "poolinfo - 0.1\n");
77 size -= temp;
78 next += temp;
79
80 mutex_lock(&pools_lock);
81 list_for_each_entry(pool, &dev->dma_pools, pools) {
82 unsigned pages = 0;
83 unsigned blocks = 0;
84
85 spin_lock_irq(&pool->lock);
86 list_for_each_entry(page, &pool->page_list, page_list) {
87 pages++;
88 blocks += page->in_use;
89 }
90 spin_unlock_irq(&pool->lock);
91
92 /* per-pool info, no real statistics yet */
93 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
94 pool->name, blocks,
95 pages * (pool->allocation / pool->size),
96 pool->size, pages);
97 size -= temp;
98 next += temp;
99 }
100 mutex_unlock(&pools_lock);
101
102 return PAGE_SIZE - size;
103}
104
105static DEVICE_ATTR_RO(pools);
106
107/**
108 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
109 * @name: name of pool, for diagnostics
110 * @dev: device that will be doing the DMA
111 * @size: size of the blocks in this pool.
112 * @align: alignment requirement for blocks; must be a power of two
113 * @boundary: returned blocks won't cross this power of two boundary
114 * Context: not in_interrupt()
115 *
116 * Given one of these pools, dma_pool_alloc()
117 * may be used to allocate memory. Such memory will all have "consistent"
118 * DMA mappings, accessible by the device and its driver without using
119 * cache flushing primitives. The actual size of blocks allocated may be
120 * larger than requested because of alignment.
121 *
122 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
123 * cross that size boundary. This is useful for devices which have
124 * addressing restrictions on individual DMA transfers, such as not crossing
125 * boundaries of 4KBytes.
126 *
127 * Return: a dma allocation pool with the requested characteristics, or
128 * %NULL if one can't be created.
129 */
130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131 size_t size, size_t align, size_t boundary)
132{
133 struct dma_pool *retval;
134 size_t allocation;
135 bool empty = false;
136
137 if (align == 0)
138 align = 1;
139 else if (align & (align - 1))
140 return NULL;
141
142 if (size == 0)
143 return NULL;
144 else if (size < 4)
145 size = 4;
146
147 size = ALIGN(size, align);
148 allocation = max_t(size_t, size, PAGE_SIZE);
149
150 if (!boundary)
151 boundary = allocation;
152 else if ((boundary < size) || (boundary & (boundary - 1)))
153 return NULL;
154
155 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
156 if (!retval)
157 return retval;
158
159 strscpy(retval->name, name, sizeof(retval->name));
160
161 retval->dev = dev;
162
163 INIT_LIST_HEAD(&retval->page_list);
164 spin_lock_init(&retval->lock);
165 retval->size = size;
166 retval->boundary = boundary;
167 retval->allocation = allocation;
168
169 INIT_LIST_HEAD(&retval->pools);
170
171 /*
172 * pools_lock ensures that the ->dma_pools list does not get corrupted.
173 * pools_reg_lock ensures that there is not a race between
174 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
175 * when the first invocation of dma_pool_create() failed on
176 * device_create_file() and the second assumes that it has been done (I
177 * know it is a short window).
178 */
179 mutex_lock(&pools_reg_lock);
180 mutex_lock(&pools_lock);
181 if (list_empty(&dev->dma_pools))
182 empty = true;
183 list_add(&retval->pools, &dev->dma_pools);
184 mutex_unlock(&pools_lock);
185 if (empty) {
186 int err;
187
188 err = device_create_file(dev, &dev_attr_pools);
189 if (err) {
190 mutex_lock(&pools_lock);
191 list_del(&retval->pools);
192 mutex_unlock(&pools_lock);
193 mutex_unlock(&pools_reg_lock);
194 kfree(retval);
195 return NULL;
196 }
197 }
198 mutex_unlock(&pools_reg_lock);
199 return retval;
200}
201EXPORT_SYMBOL(dma_pool_create);
202
203static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
204{
205 unsigned int offset = 0;
206 unsigned int next_boundary = pool->boundary;
207
208 do {
209 unsigned int next = offset + pool->size;
210 if (unlikely((next + pool->size) >= next_boundary)) {
211 next = next_boundary;
212 next_boundary += pool->boundary;
213 }
214 *(int *)(page->vaddr + offset) = next;
215 offset = next;
216 } while (offset < pool->allocation);
217}
218
219static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
220{
221 struct dma_page *page;
222
223 page = kmalloc(sizeof(*page), mem_flags);
224 if (!page)
225 return NULL;
226 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
227 &page->dma, mem_flags);
228 if (page->vaddr) {
229#ifdef DMAPOOL_DEBUG
230 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
231#endif
232 pool_initialise_page(pool, page);
233 page->in_use = 0;
234 page->offset = 0;
235 } else {
236 kfree(page);
237 page = NULL;
238 }
239 return page;
240}
241
242static inline bool is_page_busy(struct dma_page *page)
243{
244 return page->in_use != 0;
245}
246
247static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
248{
249 dma_addr_t dma = page->dma;
250
251#ifdef DMAPOOL_DEBUG
252 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
253#endif
254 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
255 list_del(&page->page_list);
256 kfree(page);
257}
258
259/**
260 * dma_pool_destroy - destroys a pool of dma memory blocks.
261 * @pool: dma pool that will be destroyed
262 * Context: !in_interrupt()
263 *
264 * Caller guarantees that no more memory from the pool is in use,
265 * and that nothing will try to use the pool after this call.
266 */
267void dma_pool_destroy(struct dma_pool *pool)
268{
269 struct dma_page *page, *tmp;
270 bool empty = false;
271
272 if (unlikely(!pool))
273 return;
274
275 mutex_lock(&pools_reg_lock);
276 mutex_lock(&pools_lock);
277 list_del(&pool->pools);
278 if (pool->dev && list_empty(&pool->dev->dma_pools))
279 empty = true;
280 mutex_unlock(&pools_lock);
281 if (empty)
282 device_remove_file(pool->dev, &dev_attr_pools);
283 mutex_unlock(&pools_reg_lock);
284
285 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
286 if (is_page_busy(page)) {
287 if (pool->dev)
288 dev_err(pool->dev, "%s %s, %p busy\n", __func__,
289 pool->name, page->vaddr);
290 else
291 pr_err("%s %s, %p busy\n", __func__,
292 pool->name, page->vaddr);
293 /* leak the still-in-use consistent memory */
294 list_del(&page->page_list);
295 kfree(page);
296 } else
297 pool_free_page(pool, page);
298 }
299
300 kfree(pool);
301}
302EXPORT_SYMBOL(dma_pool_destroy);
303
304/**
305 * dma_pool_alloc - get a block of consistent memory
306 * @pool: dma pool that will produce the block
307 * @mem_flags: GFP_* bitmask
308 * @handle: pointer to dma address of block
309 *
310 * Return: the kernel virtual address of a currently unused block,
311 * and reports its dma address through the handle.
312 * If such a memory block can't be allocated, %NULL is returned.
313 */
314void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
315 dma_addr_t *handle)
316{
317 unsigned long flags;
318 struct dma_page *page;
319 size_t offset;
320 void *retval;
321
322 might_alloc(mem_flags);
323
324 spin_lock_irqsave(&pool->lock, flags);
325 list_for_each_entry(page, &pool->page_list, page_list) {
326 if (page->offset < pool->allocation)
327 goto ready;
328 }
329
330 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
331 spin_unlock_irqrestore(&pool->lock, flags);
332
333 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
334 if (!page)
335 return NULL;
336
337 spin_lock_irqsave(&pool->lock, flags);
338
339 list_add(&page->page_list, &pool->page_list);
340 ready:
341 page->in_use++;
342 offset = page->offset;
343 page->offset = *(int *)(page->vaddr + offset);
344 retval = offset + page->vaddr;
345 *handle = offset + page->dma;
346#ifdef DMAPOOL_DEBUG
347 {
348 int i;
349 u8 *data = retval;
350 /* page->offset is stored in first 4 bytes */
351 for (i = sizeof(page->offset); i < pool->size; i++) {
352 if (data[i] == POOL_POISON_FREED)
353 continue;
354 if (pool->dev)
355 dev_err(pool->dev, "%s %s, %p (corrupted)\n",
356 __func__, pool->name, retval);
357 else
358 pr_err("%s %s, %p (corrupted)\n",
359 __func__, pool->name, retval);
360
361 /*
362 * Dump the first 4 bytes even if they are not
363 * POOL_POISON_FREED
364 */
365 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
366 data, pool->size, 1);
367 break;
368 }
369 }
370 if (!(mem_flags & __GFP_ZERO))
371 memset(retval, POOL_POISON_ALLOCATED, pool->size);
372#endif
373 spin_unlock_irqrestore(&pool->lock, flags);
374
375 if (want_init_on_alloc(mem_flags))
376 memset(retval, 0, pool->size);
377
378 return retval;
379}
380EXPORT_SYMBOL(dma_pool_alloc);
381
382static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
383{
384 struct dma_page *page;
385
386 list_for_each_entry(page, &pool->page_list, page_list) {
387 if (dma < page->dma)
388 continue;
389 if ((dma - page->dma) < pool->allocation)
390 return page;
391 }
392 return NULL;
393}
394
395/**
396 * dma_pool_free - put block back into dma pool
397 * @pool: the dma pool holding the block
398 * @vaddr: virtual address of block
399 * @dma: dma address of block
400 *
401 * Caller promises neither device nor driver will again touch this block
402 * unless it is first re-allocated.
403 */
404void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
405{
406 struct dma_page *page;
407 unsigned long flags;
408 unsigned int offset;
409
410 spin_lock_irqsave(&pool->lock, flags);
411 page = pool_find_page(pool, dma);
412 if (!page) {
413 spin_unlock_irqrestore(&pool->lock, flags);
414 if (pool->dev)
415 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
416 __func__, pool->name, vaddr, &dma);
417 else
418 pr_err("%s %s, %p/%pad (bad dma)\n",
419 __func__, pool->name, vaddr, &dma);
420 return;
421 }
422
423 offset = vaddr - page->vaddr;
424 if (want_init_on_free())
425 memset(vaddr, 0, pool->size);
426#ifdef DMAPOOL_DEBUG
427 if ((dma - page->dma) != offset) {
428 spin_unlock_irqrestore(&pool->lock, flags);
429 if (pool->dev)
430 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
431 __func__, pool->name, vaddr, &dma);
432 else
433 pr_err("%s %s, %p (bad vaddr)/%pad\n",
434 __func__, pool->name, vaddr, &dma);
435 return;
436 }
437 {
438 unsigned int chain = page->offset;
439 while (chain < pool->allocation) {
440 if (chain != offset) {
441 chain = *(int *)(page->vaddr + chain);
442 continue;
443 }
444 spin_unlock_irqrestore(&pool->lock, flags);
445 if (pool->dev)
446 dev_err(pool->dev, "%s %s, dma %pad already free\n",
447 __func__, pool->name, &dma);
448 else
449 pr_err("%s %s, dma %pad already free\n",
450 __func__, pool->name, &dma);
451 return;
452 }
453 }
454 memset(vaddr, POOL_POISON_FREED, pool->size);
455#endif
456
457 page->in_use--;
458 *(int *)vaddr = page->offset;
459 page->offset = offset;
460 /*
461 * Resist a temptation to do
462 * if (!is_page_busy(page)) pool_free_page(pool, page);
463 * Better have a few empty pages hang around.
464 */
465 spin_unlock_irqrestore(&pool->lock, flags);
466}
467EXPORT_SYMBOL(dma_pool_free);
468
469/*
470 * Managed DMA pool
471 */
472static void dmam_pool_release(struct device *dev, void *res)
473{
474 struct dma_pool *pool = *(struct dma_pool **)res;
475
476 dma_pool_destroy(pool);
477}
478
479static int dmam_pool_match(struct device *dev, void *res, void *match_data)
480{
481 return *(struct dma_pool **)res == match_data;
482}
483
484/**
485 * dmam_pool_create - Managed dma_pool_create()
486 * @name: name of pool, for diagnostics
487 * @dev: device that will be doing the DMA
488 * @size: size of the blocks in this pool.
489 * @align: alignment requirement for blocks; must be a power of two
490 * @allocation: returned blocks won't cross this boundary (or zero)
491 *
492 * Managed dma_pool_create(). DMA pool created with this function is
493 * automatically destroyed on driver detach.
494 *
495 * Return: a managed dma allocation pool with the requested
496 * characteristics, or %NULL if one can't be created.
497 */
498struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
499 size_t size, size_t align, size_t allocation)
500{
501 struct dma_pool **ptr, *pool;
502
503 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
504 if (!ptr)
505 return NULL;
506
507 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
508 if (pool)
509 devres_add(dev, ptr);
510 else
511 devres_free(ptr);
512
513 return pool;
514}
515EXPORT_SYMBOL(dmam_pool_create);
516
517/**
518 * dmam_pool_destroy - Managed dma_pool_destroy()
519 * @pool: dma pool that will be destroyed
520 *
521 * Managed dma_pool_destroy().
522 */
523void dmam_pool_destroy(struct dma_pool *pool)
524{
525 struct device *dev = pool->dev;
526
527 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
528}
529EXPORT_SYMBOL(dmam_pool_destroy);