Loading...
1/*
2 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
6 *
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
12 * for each CPU.
13 *
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
18 *
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
24 *
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
26 *
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
29 */
30
31#include <linux/slab.h>
32#include <linux/export.h>
33#include <linux/bitmap.h>
34#include <linux/rculist.h>
35#include <linux/interrupt.h>
36#include <linux/genalloc.h>
37#include <linux/of_device.h>
38
39static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
40{
41 return chunk->end_addr - chunk->start_addr + 1;
42}
43
44static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
45{
46 unsigned long val, nval;
47
48 nval = *addr;
49 do {
50 val = nval;
51 if (val & mask_to_set)
52 return -EBUSY;
53 cpu_relax();
54 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
55
56 return 0;
57}
58
59static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
60{
61 unsigned long val, nval;
62
63 nval = *addr;
64 do {
65 val = nval;
66 if ((val & mask_to_clear) != mask_to_clear)
67 return -EBUSY;
68 cpu_relax();
69 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
70
71 return 0;
72}
73
74/*
75 * bitmap_set_ll - set the specified number of bits at the specified position
76 * @map: pointer to a bitmap
77 * @start: a bit position in @map
78 * @nr: number of bits to set
79 *
80 * Set @nr bits start from @start in @map lock-lessly. Several users
81 * can set/clear the same bitmap simultaneously without lock. If two
82 * users set the same bit, one user will return remain bits, otherwise
83 * return 0.
84 */
85static int bitmap_set_ll(unsigned long *map, int start, int nr)
86{
87 unsigned long *p = map + BIT_WORD(start);
88 const int size = start + nr;
89 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
90 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
91
92 while (nr - bits_to_set >= 0) {
93 if (set_bits_ll(p, mask_to_set))
94 return nr;
95 nr -= bits_to_set;
96 bits_to_set = BITS_PER_LONG;
97 mask_to_set = ~0UL;
98 p++;
99 }
100 if (nr) {
101 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
102 if (set_bits_ll(p, mask_to_set))
103 return nr;
104 }
105
106 return 0;
107}
108
109/*
110 * bitmap_clear_ll - clear the specified number of bits at the specified position
111 * @map: pointer to a bitmap
112 * @start: a bit position in @map
113 * @nr: number of bits to set
114 *
115 * Clear @nr bits start from @start in @map lock-lessly. Several users
116 * can set/clear the same bitmap simultaneously without lock. If two
117 * users clear the same bit, one user will return remain bits,
118 * otherwise return 0.
119 */
120static int bitmap_clear_ll(unsigned long *map, int start, int nr)
121{
122 unsigned long *p = map + BIT_WORD(start);
123 const int size = start + nr;
124 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
125 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
126
127 while (nr - bits_to_clear >= 0) {
128 if (clear_bits_ll(p, mask_to_clear))
129 return nr;
130 nr -= bits_to_clear;
131 bits_to_clear = BITS_PER_LONG;
132 mask_to_clear = ~0UL;
133 p++;
134 }
135 if (nr) {
136 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
137 if (clear_bits_ll(p, mask_to_clear))
138 return nr;
139 }
140
141 return 0;
142}
143
144/**
145 * gen_pool_create - create a new special memory pool
146 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
147 * @nid: node id of the node the pool structure should be allocated on, or -1
148 *
149 * Create a new special memory pool that can be used to manage special purpose
150 * memory not managed by the regular kmalloc/kfree interface.
151 */
152struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
153{
154 struct gen_pool *pool;
155
156 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
157 if (pool != NULL) {
158 spin_lock_init(&pool->lock);
159 INIT_LIST_HEAD(&pool->chunks);
160 pool->min_alloc_order = min_alloc_order;
161 pool->algo = gen_pool_first_fit;
162 pool->data = NULL;
163 pool->name = NULL;
164 }
165 return pool;
166}
167EXPORT_SYMBOL(gen_pool_create);
168
169/**
170 * gen_pool_add_virt - add a new chunk of special memory to the pool
171 * @pool: pool to add new memory chunk to
172 * @virt: virtual starting address of memory chunk to add to pool
173 * @phys: physical starting address of memory chunk to add to pool
174 * @size: size in bytes of the memory chunk to add to pool
175 * @nid: node id of the node the chunk structure and bitmap should be
176 * allocated on, or -1
177 *
178 * Add a new chunk of special memory to the specified pool.
179 *
180 * Returns 0 on success or a -ve errno on failure.
181 */
182int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
183 size_t size, int nid)
184{
185 struct gen_pool_chunk *chunk;
186 int nbits = size >> pool->min_alloc_order;
187 int nbytes = sizeof(struct gen_pool_chunk) +
188 BITS_TO_LONGS(nbits) * sizeof(long);
189
190 chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
191 if (unlikely(chunk == NULL))
192 return -ENOMEM;
193
194 chunk->phys_addr = phys;
195 chunk->start_addr = virt;
196 chunk->end_addr = virt + size - 1;
197 atomic_set(&chunk->avail, size);
198
199 spin_lock(&pool->lock);
200 list_add_rcu(&chunk->next_chunk, &pool->chunks);
201 spin_unlock(&pool->lock);
202
203 return 0;
204}
205EXPORT_SYMBOL(gen_pool_add_virt);
206
207/**
208 * gen_pool_virt_to_phys - return the physical address of memory
209 * @pool: pool to allocate from
210 * @addr: starting address of memory
211 *
212 * Returns the physical address on success, or -1 on error.
213 */
214phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
215{
216 struct gen_pool_chunk *chunk;
217 phys_addr_t paddr = -1;
218
219 rcu_read_lock();
220 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
221 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
222 paddr = chunk->phys_addr + (addr - chunk->start_addr);
223 break;
224 }
225 }
226 rcu_read_unlock();
227
228 return paddr;
229}
230EXPORT_SYMBOL(gen_pool_virt_to_phys);
231
232/**
233 * gen_pool_destroy - destroy a special memory pool
234 * @pool: pool to destroy
235 *
236 * Destroy the specified special memory pool. Verifies that there are no
237 * outstanding allocations.
238 */
239void gen_pool_destroy(struct gen_pool *pool)
240{
241 struct list_head *_chunk, *_next_chunk;
242 struct gen_pool_chunk *chunk;
243 int order = pool->min_alloc_order;
244 int bit, end_bit;
245
246 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
247 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
248 list_del(&chunk->next_chunk);
249
250 end_bit = chunk_size(chunk) >> order;
251 bit = find_next_bit(chunk->bits, end_bit, 0);
252 BUG_ON(bit < end_bit);
253
254 kfree(chunk);
255 }
256 kfree_const(pool->name);
257 kfree(pool);
258}
259EXPORT_SYMBOL(gen_pool_destroy);
260
261/**
262 * gen_pool_alloc - allocate special memory from the pool
263 * @pool: pool to allocate from
264 * @size: number of bytes to allocate from the pool
265 *
266 * Allocate the requested number of bytes from the specified pool.
267 * Uses the pool allocation function (with first-fit algorithm by default).
268 * Can not be used in NMI handler on architectures without
269 * NMI-safe cmpxchg implementation.
270 */
271unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
272{
273 return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
274}
275EXPORT_SYMBOL(gen_pool_alloc);
276
277/**
278 * gen_pool_alloc_algo - allocate special memory from the pool
279 * @pool: pool to allocate from
280 * @size: number of bytes to allocate from the pool
281 * @algo: algorithm passed from caller
282 * @data: data passed to algorithm
283 *
284 * Allocate the requested number of bytes from the specified pool.
285 * Uses the pool allocation function (with first-fit algorithm by default).
286 * Can not be used in NMI handler on architectures without
287 * NMI-safe cmpxchg implementation.
288 */
289unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
290 genpool_algo_t algo, void *data)
291{
292 struct gen_pool_chunk *chunk;
293 unsigned long addr = 0;
294 int order = pool->min_alloc_order;
295 int nbits, start_bit, end_bit, remain;
296
297#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
298 BUG_ON(in_nmi());
299#endif
300
301 if (size == 0)
302 return 0;
303
304 nbits = (size + (1UL << order) - 1) >> order;
305 rcu_read_lock();
306 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
307 if (size > atomic_read(&chunk->avail))
308 continue;
309
310 start_bit = 0;
311 end_bit = chunk_size(chunk) >> order;
312retry:
313 start_bit = algo(chunk->bits, end_bit, start_bit,
314 nbits, data, pool);
315 if (start_bit >= end_bit)
316 continue;
317 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
318 if (remain) {
319 remain = bitmap_clear_ll(chunk->bits, start_bit,
320 nbits - remain);
321 BUG_ON(remain);
322 goto retry;
323 }
324
325 addr = chunk->start_addr + ((unsigned long)start_bit << order);
326 size = nbits << order;
327 atomic_sub(size, &chunk->avail);
328 break;
329 }
330 rcu_read_unlock();
331 return addr;
332}
333EXPORT_SYMBOL(gen_pool_alloc_algo);
334
335/**
336 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
337 * @pool: pool to allocate from
338 * @size: number of bytes to allocate from the pool
339 * @dma: dma-view physical address return value. Use NULL if unneeded.
340 *
341 * Allocate the requested number of bytes from the specified pool.
342 * Uses the pool allocation function (with first-fit algorithm by default).
343 * Can not be used in NMI handler on architectures without
344 * NMI-safe cmpxchg implementation.
345 */
346void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
347{
348 unsigned long vaddr;
349
350 if (!pool)
351 return NULL;
352
353 vaddr = gen_pool_alloc(pool, size);
354 if (!vaddr)
355 return NULL;
356
357 if (dma)
358 *dma = gen_pool_virt_to_phys(pool, vaddr);
359
360 return (void *)vaddr;
361}
362EXPORT_SYMBOL(gen_pool_dma_alloc);
363
364/**
365 * gen_pool_free - free allocated special memory back to the pool
366 * @pool: pool to free to
367 * @addr: starting address of memory to free back to pool
368 * @size: size in bytes of memory to free
369 *
370 * Free previously allocated special memory back to the specified
371 * pool. Can not be used in NMI handler on architectures without
372 * NMI-safe cmpxchg implementation.
373 */
374void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
375{
376 struct gen_pool_chunk *chunk;
377 int order = pool->min_alloc_order;
378 int start_bit, nbits, remain;
379
380#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
381 BUG_ON(in_nmi());
382#endif
383
384 nbits = (size + (1UL << order) - 1) >> order;
385 rcu_read_lock();
386 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
387 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
388 BUG_ON(addr + size - 1 > chunk->end_addr);
389 start_bit = (addr - chunk->start_addr) >> order;
390 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
391 BUG_ON(remain);
392 size = nbits << order;
393 atomic_add(size, &chunk->avail);
394 rcu_read_unlock();
395 return;
396 }
397 }
398 rcu_read_unlock();
399 BUG();
400}
401EXPORT_SYMBOL(gen_pool_free);
402
403/**
404 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
405 * @pool: the generic memory pool
406 * @func: func to call
407 * @data: additional data used by @func
408 *
409 * Call @func for every chunk of generic memory pool. The @func is
410 * called with rcu_read_lock held.
411 */
412void gen_pool_for_each_chunk(struct gen_pool *pool,
413 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
414 void *data)
415{
416 struct gen_pool_chunk *chunk;
417
418 rcu_read_lock();
419 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
420 func(pool, chunk, data);
421 rcu_read_unlock();
422}
423EXPORT_SYMBOL(gen_pool_for_each_chunk);
424
425/**
426 * addr_in_gen_pool - checks if an address falls within the range of a pool
427 * @pool: the generic memory pool
428 * @start: start address
429 * @size: size of the region
430 *
431 * Check if the range of addresses falls within the specified pool. Returns
432 * true if the entire range is contained in the pool and false otherwise.
433 */
434bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
435 size_t size)
436{
437 bool found = false;
438 unsigned long end = start + size - 1;
439 struct gen_pool_chunk *chunk;
440
441 rcu_read_lock();
442 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
443 if (start >= chunk->start_addr && start <= chunk->end_addr) {
444 if (end <= chunk->end_addr) {
445 found = true;
446 break;
447 }
448 }
449 }
450 rcu_read_unlock();
451 return found;
452}
453
454/**
455 * gen_pool_avail - get available free space of the pool
456 * @pool: pool to get available free space
457 *
458 * Return available free space of the specified pool.
459 */
460size_t gen_pool_avail(struct gen_pool *pool)
461{
462 struct gen_pool_chunk *chunk;
463 size_t avail = 0;
464
465 rcu_read_lock();
466 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
467 avail += atomic_read(&chunk->avail);
468 rcu_read_unlock();
469 return avail;
470}
471EXPORT_SYMBOL_GPL(gen_pool_avail);
472
473/**
474 * gen_pool_size - get size in bytes of memory managed by the pool
475 * @pool: pool to get size
476 *
477 * Return size in bytes of memory managed by the pool.
478 */
479size_t gen_pool_size(struct gen_pool *pool)
480{
481 struct gen_pool_chunk *chunk;
482 size_t size = 0;
483
484 rcu_read_lock();
485 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
486 size += chunk_size(chunk);
487 rcu_read_unlock();
488 return size;
489}
490EXPORT_SYMBOL_GPL(gen_pool_size);
491
492/**
493 * gen_pool_set_algo - set the allocation algorithm
494 * @pool: pool to change allocation algorithm
495 * @algo: custom algorithm function
496 * @data: additional data used by @algo
497 *
498 * Call @algo for each memory allocation in the pool.
499 * If @algo is NULL use gen_pool_first_fit as default
500 * memory allocation function.
501 */
502void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
503{
504 rcu_read_lock();
505
506 pool->algo = algo;
507 if (!pool->algo)
508 pool->algo = gen_pool_first_fit;
509
510 pool->data = data;
511
512 rcu_read_unlock();
513}
514EXPORT_SYMBOL(gen_pool_set_algo);
515
516/**
517 * gen_pool_first_fit - find the first available region
518 * of memory matching the size requirement (no alignment constraint)
519 * @map: The address to base the search on
520 * @size: The bitmap size in bits
521 * @start: The bitnumber to start searching at
522 * @nr: The number of zeroed bits we're looking for
523 * @data: additional data - unused
524 * @pool: pool to find the fit region memory from
525 */
526unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
527 unsigned long start, unsigned int nr, void *data,
528 struct gen_pool *pool)
529{
530 return bitmap_find_next_zero_area(map, size, start, nr, 0);
531}
532EXPORT_SYMBOL(gen_pool_first_fit);
533
534/**
535 * gen_pool_first_fit_align - find the first available region
536 * of memory matching the size requirement (alignment constraint)
537 * @map: The address to base the search on
538 * @size: The bitmap size in bits
539 * @start: The bitnumber to start searching at
540 * @nr: The number of zeroed bits we're looking for
541 * @data: data for alignment
542 * @pool: pool to get order from
543 */
544unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
545 unsigned long start, unsigned int nr, void *data,
546 struct gen_pool *pool)
547{
548 struct genpool_data_align *alignment;
549 unsigned long align_mask;
550 int order;
551
552 alignment = data;
553 order = pool->min_alloc_order;
554 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
555 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
556}
557EXPORT_SYMBOL(gen_pool_first_fit_align);
558
559/**
560 * gen_pool_fixed_alloc - reserve a specific region
561 * @map: The address to base the search on
562 * @size: The bitmap size in bits
563 * @start: The bitnumber to start searching at
564 * @nr: The number of zeroed bits we're looking for
565 * @data: data for alignment
566 * @pool: pool to get order from
567 */
568unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
569 unsigned long start, unsigned int nr, void *data,
570 struct gen_pool *pool)
571{
572 struct genpool_data_fixed *fixed_data;
573 int order;
574 unsigned long offset_bit;
575 unsigned long start_bit;
576
577 fixed_data = data;
578 order = pool->min_alloc_order;
579 offset_bit = fixed_data->offset >> order;
580 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
581 return size;
582
583 start_bit = bitmap_find_next_zero_area(map, size,
584 start + offset_bit, nr, 0);
585 if (start_bit != offset_bit)
586 start_bit = size;
587 return start_bit;
588}
589EXPORT_SYMBOL(gen_pool_fixed_alloc);
590
591/**
592 * gen_pool_first_fit_order_align - find the first available region
593 * of memory matching the size requirement. The region will be aligned
594 * to the order of the size specified.
595 * @map: The address to base the search on
596 * @size: The bitmap size in bits
597 * @start: The bitnumber to start searching at
598 * @nr: The number of zeroed bits we're looking for
599 * @data: additional data - unused
600 * @pool: pool to find the fit region memory from
601 */
602unsigned long gen_pool_first_fit_order_align(unsigned long *map,
603 unsigned long size, unsigned long start,
604 unsigned int nr, void *data, struct gen_pool *pool)
605{
606 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
607
608 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
609}
610EXPORT_SYMBOL(gen_pool_first_fit_order_align);
611
612/**
613 * gen_pool_best_fit - find the best fitting region of memory
614 * macthing the size requirement (no alignment constraint)
615 * @map: The address to base the search on
616 * @size: The bitmap size in bits
617 * @start: The bitnumber to start searching at
618 * @nr: The number of zeroed bits we're looking for
619 * @data: additional data - unused
620 * @pool: pool to find the fit region memory from
621 *
622 * Iterate over the bitmap to find the smallest free region
623 * which we can allocate the memory.
624 */
625unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
626 unsigned long start, unsigned int nr, void *data,
627 struct gen_pool *pool)
628{
629 unsigned long start_bit = size;
630 unsigned long len = size + 1;
631 unsigned long index;
632
633 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
634
635 while (index < size) {
636 int next_bit = find_next_bit(map, size, index + nr);
637 if ((next_bit - index) < len) {
638 len = next_bit - index;
639 start_bit = index;
640 if (len == nr)
641 return start_bit;
642 }
643 index = bitmap_find_next_zero_area(map, size,
644 next_bit + 1, nr, 0);
645 }
646
647 return start_bit;
648}
649EXPORT_SYMBOL(gen_pool_best_fit);
650
651static void devm_gen_pool_release(struct device *dev, void *res)
652{
653 gen_pool_destroy(*(struct gen_pool **)res);
654}
655
656static int devm_gen_pool_match(struct device *dev, void *res, void *data)
657{
658 struct gen_pool **p = res;
659
660 /* NULL data matches only a pool without an assigned name */
661 if (!data && !(*p)->name)
662 return 1;
663
664 if (!data || !(*p)->name)
665 return 0;
666
667 return !strcmp((*p)->name, data);
668}
669
670/**
671 * gen_pool_get - Obtain the gen_pool (if any) for a device
672 * @dev: device to retrieve the gen_pool from
673 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
674 *
675 * Returns the gen_pool for the device if one is present, or NULL.
676 */
677struct gen_pool *gen_pool_get(struct device *dev, const char *name)
678{
679 struct gen_pool **p;
680
681 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
682 (void *)name);
683 if (!p)
684 return NULL;
685 return *p;
686}
687EXPORT_SYMBOL_GPL(gen_pool_get);
688
689/**
690 * devm_gen_pool_create - managed gen_pool_create
691 * @dev: device that provides the gen_pool
692 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
693 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
694 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
695 *
696 * Create a new special memory pool that can be used to manage special purpose
697 * memory not managed by the regular kmalloc/kfree interface. The pool will be
698 * automatically destroyed by the device management code.
699 */
700struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
701 int nid, const char *name)
702{
703 struct gen_pool **ptr, *pool;
704 const char *pool_name = NULL;
705
706 /* Check that genpool to be created is uniquely addressed on device */
707 if (gen_pool_get(dev, name))
708 return ERR_PTR(-EINVAL);
709
710 if (name) {
711 pool_name = kstrdup_const(name, GFP_KERNEL);
712 if (!pool_name)
713 return ERR_PTR(-ENOMEM);
714 }
715
716 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
717 if (!ptr)
718 goto free_pool_name;
719
720 pool = gen_pool_create(min_alloc_order, nid);
721 if (!pool)
722 goto free_devres;
723
724 *ptr = pool;
725 pool->name = pool_name;
726 devres_add(dev, ptr);
727
728 return pool;
729
730free_devres:
731 devres_free(ptr);
732free_pool_name:
733 kfree_const(pool_name);
734
735 return ERR_PTR(-ENOMEM);
736}
737EXPORT_SYMBOL(devm_gen_pool_create);
738
739#ifdef CONFIG_OF
740/**
741 * of_gen_pool_get - find a pool by phandle property
742 * @np: device node
743 * @propname: property name containing phandle(s)
744 * @index: index into the phandle array
745 *
746 * Returns the pool that contains the chunk starting at the physical
747 * address of the device tree node pointed at by the phandle property,
748 * or NULL if not found.
749 */
750struct gen_pool *of_gen_pool_get(struct device_node *np,
751 const char *propname, int index)
752{
753 struct platform_device *pdev;
754 struct device_node *np_pool, *parent;
755 const char *name = NULL;
756 struct gen_pool *pool = NULL;
757
758 np_pool = of_parse_phandle(np, propname, index);
759 if (!np_pool)
760 return NULL;
761
762 pdev = of_find_device_by_node(np_pool);
763 if (!pdev) {
764 /* Check if named gen_pool is created by parent node device */
765 parent = of_get_parent(np_pool);
766 pdev = of_find_device_by_node(parent);
767 of_node_put(parent);
768
769 of_property_read_string(np_pool, "label", &name);
770 if (!name)
771 name = np_pool->name;
772 }
773 if (pdev)
774 pool = gen_pool_get(&pdev->dev, name);
775 of_node_put(np_pool);
776
777 return pool;
778}
779EXPORT_SYMBOL_GPL(of_gen_pool_get);
780#endif /* CONFIG_OF */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Basic general purpose allocator for managing special purpose
4 * memory, for example, memory that is not managed by the regular
5 * kmalloc/kfree interface. Uses for this includes on-device special
6 * memory, uncached memory etc.
7 *
8 * It is safe to use the allocator in NMI handlers and other special
9 * unblockable contexts that could otherwise deadlock on locks. This
10 * is implemented by using atomic operations and retries on any
11 * conflicts. The disadvantage is that there may be livelocks in
12 * extreme cases. For better scalability, one allocator can be used
13 * for each CPU.
14 *
15 * The lockless operation only works if there is enough memory
16 * available. If new memory is added to the pool a lock has to be
17 * still taken. So any user relying on locklessness has to ensure
18 * that sufficient memory is preallocated.
19 *
20 * The basic atomic operation of this allocator is cmpxchg on long.
21 * On architectures that don't have NMI-safe cmpxchg implementation,
22 * the allocator can NOT be used in NMI handler. So code uses the
23 * allocator in NMI handler should depend on
24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 *
26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
27 */
28
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <linux/bitmap.h>
32#include <linux/rculist.h>
33#include <linux/interrupt.h>
34#include <linux/genalloc.h>
35#include <linux/of_device.h>
36#include <linux/vmalloc.h>
37
38static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
39{
40 return chunk->end_addr - chunk->start_addr + 1;
41}
42
43static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
44{
45 unsigned long val, nval;
46
47 nval = *addr;
48 do {
49 val = nval;
50 if (val & mask_to_set)
51 return -EBUSY;
52 cpu_relax();
53 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
54
55 return 0;
56}
57
58static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
59{
60 unsigned long val, nval;
61
62 nval = *addr;
63 do {
64 val = nval;
65 if ((val & mask_to_clear) != mask_to_clear)
66 return -EBUSY;
67 cpu_relax();
68 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
69
70 return 0;
71}
72
73/*
74 * bitmap_set_ll - set the specified number of bits at the specified position
75 * @map: pointer to a bitmap
76 * @start: a bit position in @map
77 * @nr: number of bits to set
78 *
79 * Set @nr bits start from @start in @map lock-lessly. Several users
80 * can set/clear the same bitmap simultaneously without lock. If two
81 * users set the same bit, one user will return remain bits, otherwise
82 * return 0.
83 */
84static int bitmap_set_ll(unsigned long *map, int start, int nr)
85{
86 unsigned long *p = map + BIT_WORD(start);
87 const int size = start + nr;
88 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
89 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
90
91 while (nr - bits_to_set >= 0) {
92 if (set_bits_ll(p, mask_to_set))
93 return nr;
94 nr -= bits_to_set;
95 bits_to_set = BITS_PER_LONG;
96 mask_to_set = ~0UL;
97 p++;
98 }
99 if (nr) {
100 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
101 if (set_bits_ll(p, mask_to_set))
102 return nr;
103 }
104
105 return 0;
106}
107
108/*
109 * bitmap_clear_ll - clear the specified number of bits at the specified position
110 * @map: pointer to a bitmap
111 * @start: a bit position in @map
112 * @nr: number of bits to set
113 *
114 * Clear @nr bits start from @start in @map lock-lessly. Several users
115 * can set/clear the same bitmap simultaneously without lock. If two
116 * users clear the same bit, one user will return remain bits,
117 * otherwise return 0.
118 */
119static int bitmap_clear_ll(unsigned long *map, int start, int nr)
120{
121 unsigned long *p = map + BIT_WORD(start);
122 const int size = start + nr;
123 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
124 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
125
126 while (nr - bits_to_clear >= 0) {
127 if (clear_bits_ll(p, mask_to_clear))
128 return nr;
129 nr -= bits_to_clear;
130 bits_to_clear = BITS_PER_LONG;
131 mask_to_clear = ~0UL;
132 p++;
133 }
134 if (nr) {
135 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
136 if (clear_bits_ll(p, mask_to_clear))
137 return nr;
138 }
139
140 return 0;
141}
142
143/**
144 * gen_pool_create - create a new special memory pool
145 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
146 * @nid: node id of the node the pool structure should be allocated on, or -1
147 *
148 * Create a new special memory pool that can be used to manage special purpose
149 * memory not managed by the regular kmalloc/kfree interface.
150 */
151struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
152{
153 struct gen_pool *pool;
154
155 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
156 if (pool != NULL) {
157 spin_lock_init(&pool->lock);
158 INIT_LIST_HEAD(&pool->chunks);
159 pool->min_alloc_order = min_alloc_order;
160 pool->algo = gen_pool_first_fit;
161 pool->data = NULL;
162 pool->name = NULL;
163 }
164 return pool;
165}
166EXPORT_SYMBOL(gen_pool_create);
167
168/**
169 * gen_pool_add_owner- add a new chunk of special memory to the pool
170 * @pool: pool to add new memory chunk to
171 * @virt: virtual starting address of memory chunk to add to pool
172 * @phys: physical starting address of memory chunk to add to pool
173 * @size: size in bytes of the memory chunk to add to pool
174 * @nid: node id of the node the chunk structure and bitmap should be
175 * allocated on, or -1
176 * @owner: private data the publisher would like to recall at alloc time
177 *
178 * Add a new chunk of special memory to the specified pool.
179 *
180 * Returns 0 on success or a -ve errno on failure.
181 */
182int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
183 size_t size, int nid, void *owner)
184{
185 struct gen_pool_chunk *chunk;
186 int nbits = size >> pool->min_alloc_order;
187 int nbytes = sizeof(struct gen_pool_chunk) +
188 BITS_TO_LONGS(nbits) * sizeof(long);
189
190 chunk = vzalloc_node(nbytes, nid);
191 if (unlikely(chunk == NULL))
192 return -ENOMEM;
193
194 chunk->phys_addr = phys;
195 chunk->start_addr = virt;
196 chunk->end_addr = virt + size - 1;
197 chunk->owner = owner;
198 atomic_long_set(&chunk->avail, size);
199
200 spin_lock(&pool->lock);
201 list_add_rcu(&chunk->next_chunk, &pool->chunks);
202 spin_unlock(&pool->lock);
203
204 return 0;
205}
206EXPORT_SYMBOL(gen_pool_add_owner);
207
208/**
209 * gen_pool_virt_to_phys - return the physical address of memory
210 * @pool: pool to allocate from
211 * @addr: starting address of memory
212 *
213 * Returns the physical address on success, or -1 on error.
214 */
215phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
216{
217 struct gen_pool_chunk *chunk;
218 phys_addr_t paddr = -1;
219
220 rcu_read_lock();
221 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
222 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
223 paddr = chunk->phys_addr + (addr - chunk->start_addr);
224 break;
225 }
226 }
227 rcu_read_unlock();
228
229 return paddr;
230}
231EXPORT_SYMBOL(gen_pool_virt_to_phys);
232
233/**
234 * gen_pool_destroy - destroy a special memory pool
235 * @pool: pool to destroy
236 *
237 * Destroy the specified special memory pool. Verifies that there are no
238 * outstanding allocations.
239 */
240void gen_pool_destroy(struct gen_pool *pool)
241{
242 struct list_head *_chunk, *_next_chunk;
243 struct gen_pool_chunk *chunk;
244 int order = pool->min_alloc_order;
245 int bit, end_bit;
246
247 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
248 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
249 list_del(&chunk->next_chunk);
250
251 end_bit = chunk_size(chunk) >> order;
252 bit = find_next_bit(chunk->bits, end_bit, 0);
253 BUG_ON(bit < end_bit);
254
255 vfree(chunk);
256 }
257 kfree_const(pool->name);
258 kfree(pool);
259}
260EXPORT_SYMBOL(gen_pool_destroy);
261
262/**
263 * gen_pool_alloc_algo_owner - allocate special memory from the pool
264 * @pool: pool to allocate from
265 * @size: number of bytes to allocate from the pool
266 * @algo: algorithm passed from caller
267 * @data: data passed to algorithm
268 * @owner: optionally retrieve the chunk owner
269 *
270 * Allocate the requested number of bytes from the specified pool.
271 * Uses the pool allocation function (with first-fit algorithm by default).
272 * Can not be used in NMI handler on architectures without
273 * NMI-safe cmpxchg implementation.
274 */
275unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
276 genpool_algo_t algo, void *data, void **owner)
277{
278 struct gen_pool_chunk *chunk;
279 unsigned long addr = 0;
280 int order = pool->min_alloc_order;
281 int nbits, start_bit, end_bit, remain;
282
283#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
284 BUG_ON(in_nmi());
285#endif
286
287 if (owner)
288 *owner = NULL;
289
290 if (size == 0)
291 return 0;
292
293 nbits = (size + (1UL << order) - 1) >> order;
294 rcu_read_lock();
295 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
296 if (size > atomic_long_read(&chunk->avail))
297 continue;
298
299 start_bit = 0;
300 end_bit = chunk_size(chunk) >> order;
301retry:
302 start_bit = algo(chunk->bits, end_bit, start_bit,
303 nbits, data, pool, chunk->start_addr);
304 if (start_bit >= end_bit)
305 continue;
306 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
307 if (remain) {
308 remain = bitmap_clear_ll(chunk->bits, start_bit,
309 nbits - remain);
310 BUG_ON(remain);
311 goto retry;
312 }
313
314 addr = chunk->start_addr + ((unsigned long)start_bit << order);
315 size = nbits << order;
316 atomic_long_sub(size, &chunk->avail);
317 if (owner)
318 *owner = chunk->owner;
319 break;
320 }
321 rcu_read_unlock();
322 return addr;
323}
324EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
325
326/**
327 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
328 * @pool: pool to allocate from
329 * @size: number of bytes to allocate from the pool
330 * @dma: dma-view physical address return value. Use %NULL if unneeded.
331 *
332 * Allocate the requested number of bytes from the specified pool.
333 * Uses the pool allocation function (with first-fit algorithm by default).
334 * Can not be used in NMI handler on architectures without
335 * NMI-safe cmpxchg implementation.
336 *
337 * Return: virtual address of the allocated memory, or %NULL on failure
338 */
339void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
340{
341 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
342}
343EXPORT_SYMBOL(gen_pool_dma_alloc);
344
345/**
346 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
347 * usage with the given pool algorithm
348 * @pool: pool to allocate from
349 * @size: number of bytes to allocate from the pool
350 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
351 * @algo: algorithm passed from caller
352 * @data: data passed to algorithm
353 *
354 * Allocate the requested number of bytes from the specified pool. Uses the
355 * given pool allocation function. Can not be used in NMI handler on
356 * architectures without NMI-safe cmpxchg implementation.
357 *
358 * Return: virtual address of the allocated memory, or %NULL on failure
359 */
360void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
361 dma_addr_t *dma, genpool_algo_t algo, void *data)
362{
363 unsigned long vaddr;
364
365 if (!pool)
366 return NULL;
367
368 vaddr = gen_pool_alloc_algo(pool, size, algo, data);
369 if (!vaddr)
370 return NULL;
371
372 if (dma)
373 *dma = gen_pool_virt_to_phys(pool, vaddr);
374
375 return (void *)vaddr;
376}
377EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
378
379/**
380 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
381 * usage with the given alignment
382 * @pool: pool to allocate from
383 * @size: number of bytes to allocate from the pool
384 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
385 * @align: alignment in bytes for starting address
386 *
387 * Allocate the requested number bytes from the specified pool, with the given
388 * alignment restriction. Can not be used in NMI handler on architectures
389 * without NMI-safe cmpxchg implementation.
390 *
391 * Return: virtual address of the allocated memory, or %NULL on failure
392 */
393void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
394 dma_addr_t *dma, int align)
395{
396 struct genpool_data_align data = { .align = align };
397
398 return gen_pool_dma_alloc_algo(pool, size, dma,
399 gen_pool_first_fit_align, &data);
400}
401EXPORT_SYMBOL(gen_pool_dma_alloc_align);
402
403/**
404 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
405 * DMA usage
406 * @pool: pool to allocate from
407 * @size: number of bytes to allocate from the pool
408 * @dma: dma-view physical address return value. Use %NULL if unneeded.
409 *
410 * Allocate the requested number of zeroed bytes from the specified pool.
411 * Uses the pool allocation function (with first-fit algorithm by default).
412 * Can not be used in NMI handler on architectures without
413 * NMI-safe cmpxchg implementation.
414 *
415 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
416 */
417void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
418{
419 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
420}
421EXPORT_SYMBOL(gen_pool_dma_zalloc);
422
423/**
424 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
425 * DMA usage with the given pool algorithm
426 * @pool: pool to allocate from
427 * @size: number of bytes to allocate from the pool
428 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
429 * @algo: algorithm passed from caller
430 * @data: data passed to algorithm
431 *
432 * Allocate the requested number of zeroed bytes from the specified pool. Uses
433 * the given pool allocation function. Can not be used in NMI handler on
434 * architectures without NMI-safe cmpxchg implementation.
435 *
436 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
437 */
438void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
439 dma_addr_t *dma, genpool_algo_t algo, void *data)
440{
441 void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
442
443 if (vaddr)
444 memset(vaddr, 0, size);
445
446 return vaddr;
447}
448EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
449
450/**
451 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
452 * DMA usage with the given alignment
453 * @pool: pool to allocate from
454 * @size: number of bytes to allocate from the pool
455 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
456 * @align: alignment in bytes for starting address
457 *
458 * Allocate the requested number of zeroed bytes from the specified pool,
459 * with the given alignment restriction. Can not be used in NMI handler on
460 * architectures without NMI-safe cmpxchg implementation.
461 *
462 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
463 */
464void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
465 dma_addr_t *dma, int align)
466{
467 struct genpool_data_align data = { .align = align };
468
469 return gen_pool_dma_zalloc_algo(pool, size, dma,
470 gen_pool_first_fit_align, &data);
471}
472EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
473
474/**
475 * gen_pool_free_owner - free allocated special memory back to the pool
476 * @pool: pool to free to
477 * @addr: starting address of memory to free back to pool
478 * @size: size in bytes of memory to free
479 * @owner: private data stashed at gen_pool_add() time
480 *
481 * Free previously allocated special memory back to the specified
482 * pool. Can not be used in NMI handler on architectures without
483 * NMI-safe cmpxchg implementation.
484 */
485void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
486 void **owner)
487{
488 struct gen_pool_chunk *chunk;
489 int order = pool->min_alloc_order;
490 int start_bit, nbits, remain;
491
492#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
493 BUG_ON(in_nmi());
494#endif
495
496 if (owner)
497 *owner = NULL;
498
499 nbits = (size + (1UL << order) - 1) >> order;
500 rcu_read_lock();
501 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
502 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
503 BUG_ON(addr + size - 1 > chunk->end_addr);
504 start_bit = (addr - chunk->start_addr) >> order;
505 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
506 BUG_ON(remain);
507 size = nbits << order;
508 atomic_long_add(size, &chunk->avail);
509 if (owner)
510 *owner = chunk->owner;
511 rcu_read_unlock();
512 return;
513 }
514 }
515 rcu_read_unlock();
516 BUG();
517}
518EXPORT_SYMBOL(gen_pool_free_owner);
519
520/**
521 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
522 * @pool: the generic memory pool
523 * @func: func to call
524 * @data: additional data used by @func
525 *
526 * Call @func for every chunk of generic memory pool. The @func is
527 * called with rcu_read_lock held.
528 */
529void gen_pool_for_each_chunk(struct gen_pool *pool,
530 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
531 void *data)
532{
533 struct gen_pool_chunk *chunk;
534
535 rcu_read_lock();
536 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
537 func(pool, chunk, data);
538 rcu_read_unlock();
539}
540EXPORT_SYMBOL(gen_pool_for_each_chunk);
541
542/**
543 * gen_pool_has_addr - checks if an address falls within the range of a pool
544 * @pool: the generic memory pool
545 * @start: start address
546 * @size: size of the region
547 *
548 * Check if the range of addresses falls within the specified pool. Returns
549 * true if the entire range is contained in the pool and false otherwise.
550 */
551bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
552 size_t size)
553{
554 bool found = false;
555 unsigned long end = start + size - 1;
556 struct gen_pool_chunk *chunk;
557
558 rcu_read_lock();
559 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
560 if (start >= chunk->start_addr && start <= chunk->end_addr) {
561 if (end <= chunk->end_addr) {
562 found = true;
563 break;
564 }
565 }
566 }
567 rcu_read_unlock();
568 return found;
569}
570EXPORT_SYMBOL(gen_pool_has_addr);
571
572/**
573 * gen_pool_avail - get available free space of the pool
574 * @pool: pool to get available free space
575 *
576 * Return available free space of the specified pool.
577 */
578size_t gen_pool_avail(struct gen_pool *pool)
579{
580 struct gen_pool_chunk *chunk;
581 size_t avail = 0;
582
583 rcu_read_lock();
584 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
585 avail += atomic_long_read(&chunk->avail);
586 rcu_read_unlock();
587 return avail;
588}
589EXPORT_SYMBOL_GPL(gen_pool_avail);
590
591/**
592 * gen_pool_size - get size in bytes of memory managed by the pool
593 * @pool: pool to get size
594 *
595 * Return size in bytes of memory managed by the pool.
596 */
597size_t gen_pool_size(struct gen_pool *pool)
598{
599 struct gen_pool_chunk *chunk;
600 size_t size = 0;
601
602 rcu_read_lock();
603 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
604 size += chunk_size(chunk);
605 rcu_read_unlock();
606 return size;
607}
608EXPORT_SYMBOL_GPL(gen_pool_size);
609
610/**
611 * gen_pool_set_algo - set the allocation algorithm
612 * @pool: pool to change allocation algorithm
613 * @algo: custom algorithm function
614 * @data: additional data used by @algo
615 *
616 * Call @algo for each memory allocation in the pool.
617 * If @algo is NULL use gen_pool_first_fit as default
618 * memory allocation function.
619 */
620void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
621{
622 rcu_read_lock();
623
624 pool->algo = algo;
625 if (!pool->algo)
626 pool->algo = gen_pool_first_fit;
627
628 pool->data = data;
629
630 rcu_read_unlock();
631}
632EXPORT_SYMBOL(gen_pool_set_algo);
633
634/**
635 * gen_pool_first_fit - find the first available region
636 * of memory matching the size requirement (no alignment constraint)
637 * @map: The address to base the search on
638 * @size: The bitmap size in bits
639 * @start: The bitnumber to start searching at
640 * @nr: The number of zeroed bits we're looking for
641 * @data: additional data - unused
642 * @pool: pool to find the fit region memory from
643 */
644unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
645 unsigned long start, unsigned int nr, void *data,
646 struct gen_pool *pool, unsigned long start_addr)
647{
648 return bitmap_find_next_zero_area(map, size, start, nr, 0);
649}
650EXPORT_SYMBOL(gen_pool_first_fit);
651
652/**
653 * gen_pool_first_fit_align - find the first available region
654 * of memory matching the size requirement (alignment constraint)
655 * @map: The address to base the search on
656 * @size: The bitmap size in bits
657 * @start: The bitnumber to start searching at
658 * @nr: The number of zeroed bits we're looking for
659 * @data: data for alignment
660 * @pool: pool to get order from
661 */
662unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
663 unsigned long start, unsigned int nr, void *data,
664 struct gen_pool *pool, unsigned long start_addr)
665{
666 struct genpool_data_align *alignment;
667 unsigned long align_mask, align_off;
668 int order;
669
670 alignment = data;
671 order = pool->min_alloc_order;
672 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
673 align_off = (start_addr & (alignment->align - 1)) >> order;
674
675 return bitmap_find_next_zero_area_off(map, size, start, nr,
676 align_mask, align_off);
677}
678EXPORT_SYMBOL(gen_pool_first_fit_align);
679
680/**
681 * gen_pool_fixed_alloc - reserve a specific region
682 * @map: The address to base the search on
683 * @size: The bitmap size in bits
684 * @start: The bitnumber to start searching at
685 * @nr: The number of zeroed bits we're looking for
686 * @data: data for alignment
687 * @pool: pool to get order from
688 */
689unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
690 unsigned long start, unsigned int nr, void *data,
691 struct gen_pool *pool, unsigned long start_addr)
692{
693 struct genpool_data_fixed *fixed_data;
694 int order;
695 unsigned long offset_bit;
696 unsigned long start_bit;
697
698 fixed_data = data;
699 order = pool->min_alloc_order;
700 offset_bit = fixed_data->offset >> order;
701 if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
702 return size;
703
704 start_bit = bitmap_find_next_zero_area(map, size,
705 start + offset_bit, nr, 0);
706 if (start_bit != offset_bit)
707 start_bit = size;
708 return start_bit;
709}
710EXPORT_SYMBOL(gen_pool_fixed_alloc);
711
712/**
713 * gen_pool_first_fit_order_align - find the first available region
714 * of memory matching the size requirement. The region will be aligned
715 * to the order of the size specified.
716 * @map: The address to base the search on
717 * @size: The bitmap size in bits
718 * @start: The bitnumber to start searching at
719 * @nr: The number of zeroed bits we're looking for
720 * @data: additional data - unused
721 * @pool: pool to find the fit region memory from
722 */
723unsigned long gen_pool_first_fit_order_align(unsigned long *map,
724 unsigned long size, unsigned long start,
725 unsigned int nr, void *data, struct gen_pool *pool,
726 unsigned long start_addr)
727{
728 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
729
730 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
731}
732EXPORT_SYMBOL(gen_pool_first_fit_order_align);
733
734/**
735 * gen_pool_best_fit - find the best fitting region of memory
736 * macthing the size requirement (no alignment constraint)
737 * @map: The address to base the search on
738 * @size: The bitmap size in bits
739 * @start: The bitnumber to start searching at
740 * @nr: The number of zeroed bits we're looking for
741 * @data: additional data - unused
742 * @pool: pool to find the fit region memory from
743 *
744 * Iterate over the bitmap to find the smallest free region
745 * which we can allocate the memory.
746 */
747unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
748 unsigned long start, unsigned int nr, void *data,
749 struct gen_pool *pool, unsigned long start_addr)
750{
751 unsigned long start_bit = size;
752 unsigned long len = size + 1;
753 unsigned long index;
754
755 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
756
757 while (index < size) {
758 int next_bit = find_next_bit(map, size, index + nr);
759 if ((next_bit - index) < len) {
760 len = next_bit - index;
761 start_bit = index;
762 if (len == nr)
763 return start_bit;
764 }
765 index = bitmap_find_next_zero_area(map, size,
766 next_bit + 1, nr, 0);
767 }
768
769 return start_bit;
770}
771EXPORT_SYMBOL(gen_pool_best_fit);
772
773static void devm_gen_pool_release(struct device *dev, void *res)
774{
775 gen_pool_destroy(*(struct gen_pool **)res);
776}
777
778static int devm_gen_pool_match(struct device *dev, void *res, void *data)
779{
780 struct gen_pool **p = res;
781
782 /* NULL data matches only a pool without an assigned name */
783 if (!data && !(*p)->name)
784 return 1;
785
786 if (!data || !(*p)->name)
787 return 0;
788
789 return !strcmp((*p)->name, data);
790}
791
792/**
793 * gen_pool_get - Obtain the gen_pool (if any) for a device
794 * @dev: device to retrieve the gen_pool from
795 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
796 *
797 * Returns the gen_pool for the device if one is present, or NULL.
798 */
799struct gen_pool *gen_pool_get(struct device *dev, const char *name)
800{
801 struct gen_pool **p;
802
803 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
804 (void *)name);
805 if (!p)
806 return NULL;
807 return *p;
808}
809EXPORT_SYMBOL_GPL(gen_pool_get);
810
811/**
812 * devm_gen_pool_create - managed gen_pool_create
813 * @dev: device that provides the gen_pool
814 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
815 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
816 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
817 *
818 * Create a new special memory pool that can be used to manage special purpose
819 * memory not managed by the regular kmalloc/kfree interface. The pool will be
820 * automatically destroyed by the device management code.
821 */
822struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
823 int nid, const char *name)
824{
825 struct gen_pool **ptr, *pool;
826 const char *pool_name = NULL;
827
828 /* Check that genpool to be created is uniquely addressed on device */
829 if (gen_pool_get(dev, name))
830 return ERR_PTR(-EINVAL);
831
832 if (name) {
833 pool_name = kstrdup_const(name, GFP_KERNEL);
834 if (!pool_name)
835 return ERR_PTR(-ENOMEM);
836 }
837
838 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
839 if (!ptr)
840 goto free_pool_name;
841
842 pool = gen_pool_create(min_alloc_order, nid);
843 if (!pool)
844 goto free_devres;
845
846 *ptr = pool;
847 pool->name = pool_name;
848 devres_add(dev, ptr);
849
850 return pool;
851
852free_devres:
853 devres_free(ptr);
854free_pool_name:
855 kfree_const(pool_name);
856
857 return ERR_PTR(-ENOMEM);
858}
859EXPORT_SYMBOL(devm_gen_pool_create);
860
861#ifdef CONFIG_OF
862/**
863 * of_gen_pool_get - find a pool by phandle property
864 * @np: device node
865 * @propname: property name containing phandle(s)
866 * @index: index into the phandle array
867 *
868 * Returns the pool that contains the chunk starting at the physical
869 * address of the device tree node pointed at by the phandle property,
870 * or NULL if not found.
871 */
872struct gen_pool *of_gen_pool_get(struct device_node *np,
873 const char *propname, int index)
874{
875 struct platform_device *pdev;
876 struct device_node *np_pool, *parent;
877 const char *name = NULL;
878 struct gen_pool *pool = NULL;
879
880 np_pool = of_parse_phandle(np, propname, index);
881 if (!np_pool)
882 return NULL;
883
884 pdev = of_find_device_by_node(np_pool);
885 if (!pdev) {
886 /* Check if named gen_pool is created by parent node device */
887 parent = of_get_parent(np_pool);
888 pdev = of_find_device_by_node(parent);
889 of_node_put(parent);
890
891 of_property_read_string(np_pool, "label", &name);
892 if (!name)
893 name = np_pool->name;
894 }
895 if (pdev)
896 pool = gen_pool_get(&pdev->dev, name);
897 of_node_put(np_pool);
898
899 return pool;
900}
901EXPORT_SYMBOL_GPL(of_gen_pool_get);
902#endif /* CONFIG_OF */