Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Basic general purpose allocator for managing special purpose
  4 * memory, for example, memory that is not managed by the regular
  5 * kmalloc/kfree interface.  Uses for this includes on-device special
  6 * memory, uncached memory etc.
  7 *
  8 * It is safe to use the allocator in NMI handlers and other special
  9 * unblockable contexts that could otherwise deadlock on locks.  This
 10 * is implemented by using atomic operations and retries on any
 11 * conflicts.  The disadvantage is that there may be livelocks in
 12 * extreme cases.  For better scalability, one allocator can be used
 13 * for each CPU.
 14 *
 15 * The lockless operation only works if there is enough memory
 16 * available.  If new memory is added to the pool a lock has to be
 17 * still taken.  So any user relying on locklessness has to ensure
 18 * that sufficient memory is preallocated.
 19 *
 20 * The basic atomic operation of this allocator is cmpxchg on long.
 21 * On architectures that don't have NMI-safe cmpxchg implementation,
 22 * the allocator can NOT be used in NMI handler.  So code uses the
 23 * allocator in NMI handler should depend on
 24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
 25 *
 26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
 27 */
 28
 29#include <linux/slab.h>
 30#include <linux/export.h>
 31#include <linux/bitmap.h>
 32#include <linux/rculist.h>
 33#include <linux/interrupt.h>
 34#include <linux/genalloc.h>
 35#include <linux/of_device.h>
 36#include <linux/vmalloc.h>
 37
 38static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
 39{
 40	return chunk->end_addr - chunk->start_addr + 1;
 41}
 42
 43static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
 44{
 45	unsigned long val, nval;
 46
 47	nval = *addr;
 48	do {
 49		val = nval;
 50		if (val & mask_to_set)
 51			return -EBUSY;
 52		cpu_relax();
 53	} while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
 54
 55	return 0;
 56}
 57
 58static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
 59{
 60	unsigned long val, nval;
 61
 62	nval = *addr;
 63	do {
 64		val = nval;
 65		if ((val & mask_to_clear) != mask_to_clear)
 66			return -EBUSY;
 67		cpu_relax();
 68	} while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
 69
 70	return 0;
 71}
 72
 73/*
 74 * bitmap_set_ll - set the specified number of bits at the specified position
 75 * @map: pointer to a bitmap
 76 * @start: a bit position in @map
 77 * @nr: number of bits to set
 78 *
 79 * Set @nr bits start from @start in @map lock-lessly. Several users
 80 * can set/clear the same bitmap simultaneously without lock. If two
 81 * users set the same bit, one user will return remain bits, otherwise
 82 * return 0.
 83 */
 84static unsigned long
 85bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
 86{
 87	unsigned long *p = map + BIT_WORD(start);
 88	const unsigned long size = start + nr;
 89	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
 90	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
 91
 92	while (nr >= bits_to_set) {
 93		if (set_bits_ll(p, mask_to_set))
 94			return nr;
 95		nr -= bits_to_set;
 96		bits_to_set = BITS_PER_LONG;
 97		mask_to_set = ~0UL;
 98		p++;
 99	}
100	if (nr) {
101		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
102		if (set_bits_ll(p, mask_to_set))
103			return nr;
104	}
105
106	return 0;
107}
108
109/*
110 * bitmap_clear_ll - clear the specified number of bits at the specified position
111 * @map: pointer to a bitmap
112 * @start: a bit position in @map
113 * @nr: number of bits to set
114 *
115 * Clear @nr bits start from @start in @map lock-lessly. Several users
116 * can set/clear the same bitmap simultaneously without lock. If two
117 * users clear the same bit, one user will return remain bits,
118 * otherwise return 0.
119 */
120static unsigned long
121bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
122{
123	unsigned long *p = map + BIT_WORD(start);
124	const unsigned long size = start + nr;
125	int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
126	unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
127
128	while (nr >= bits_to_clear) {
129		if (clear_bits_ll(p, mask_to_clear))
130			return nr;
131		nr -= bits_to_clear;
132		bits_to_clear = BITS_PER_LONG;
133		mask_to_clear = ~0UL;
134		p++;
135	}
136	if (nr) {
137		mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
138		if (clear_bits_ll(p, mask_to_clear))
139			return nr;
140	}
141
142	return 0;
143}
144
145/**
146 * gen_pool_create - create a new special memory pool
147 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
148 * @nid: node id of the node the pool structure should be allocated on, or -1
149 *
150 * Create a new special memory pool that can be used to manage special purpose
151 * memory not managed by the regular kmalloc/kfree interface.
152 */
153struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
154{
155	struct gen_pool *pool;
156
157	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
158	if (pool != NULL) {
159		spin_lock_init(&pool->lock);
160		INIT_LIST_HEAD(&pool->chunks);
161		pool->min_alloc_order = min_alloc_order;
162		pool->algo = gen_pool_first_fit;
163		pool->data = NULL;
164		pool->name = NULL;
165	}
166	return pool;
167}
168EXPORT_SYMBOL(gen_pool_create);
169
170/**
171 * gen_pool_add_owner- add a new chunk of special memory to the pool
172 * @pool: pool to add new memory chunk to
173 * @virt: virtual starting address of memory chunk to add to pool
174 * @phys: physical starting address of memory chunk to add to pool
175 * @size: size in bytes of the memory chunk to add to pool
176 * @nid: node id of the node the chunk structure and bitmap should be
177 *       allocated on, or -1
178 * @owner: private data the publisher would like to recall at alloc time
179 *
180 * Add a new chunk of special memory to the specified pool.
181 *
182 * Returns 0 on success or a -ve errno on failure.
183 */
184int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
185		 size_t size, int nid, void *owner)
186{
187	struct gen_pool_chunk *chunk;
188	unsigned long nbits = size >> pool->min_alloc_order;
189	unsigned long nbytes = sizeof(struct gen_pool_chunk) +
190				BITS_TO_LONGS(nbits) * sizeof(long);
191
192	chunk = vzalloc_node(nbytes, nid);
193	if (unlikely(chunk == NULL))
194		return -ENOMEM;
195
196	chunk->phys_addr = phys;
197	chunk->start_addr = virt;
198	chunk->end_addr = virt + size - 1;
199	chunk->owner = owner;
200	atomic_long_set(&chunk->avail, size);
201
202	spin_lock(&pool->lock);
203	list_add_rcu(&chunk->next_chunk, &pool->chunks);
204	spin_unlock(&pool->lock);
205
206	return 0;
207}
208EXPORT_SYMBOL(gen_pool_add_owner);
209
210/**
211 * gen_pool_virt_to_phys - return the physical address of memory
212 * @pool: pool to allocate from
213 * @addr: starting address of memory
214 *
215 * Returns the physical address on success, or -1 on error.
216 */
217phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
218{
219	struct gen_pool_chunk *chunk;
220	phys_addr_t paddr = -1;
221
222	rcu_read_lock();
223	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
224		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
225			paddr = chunk->phys_addr + (addr - chunk->start_addr);
226			break;
227		}
228	}
229	rcu_read_unlock();
230
231	return paddr;
232}
233EXPORT_SYMBOL(gen_pool_virt_to_phys);
234
235/**
236 * gen_pool_destroy - destroy a special memory pool
237 * @pool: pool to destroy
238 *
239 * Destroy the specified special memory pool. Verifies that there are no
240 * outstanding allocations.
241 */
242void gen_pool_destroy(struct gen_pool *pool)
243{
244	struct list_head *_chunk, *_next_chunk;
245	struct gen_pool_chunk *chunk;
246	int order = pool->min_alloc_order;
247	unsigned long bit, end_bit;
248
249	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
250		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
251		list_del(&chunk->next_chunk);
252
253		end_bit = chunk_size(chunk) >> order;
254		bit = find_next_bit(chunk->bits, end_bit, 0);
255		BUG_ON(bit < end_bit);
256
257		vfree(chunk);
258	}
259	kfree_const(pool->name);
260	kfree(pool);
261}
262EXPORT_SYMBOL(gen_pool_destroy);
263
264/**
265 * gen_pool_alloc_algo_owner - allocate special memory from the pool
266 * @pool: pool to allocate from
267 * @size: number of bytes to allocate from the pool
268 * @algo: algorithm passed from caller
269 * @data: data passed to algorithm
270 * @owner: optionally retrieve the chunk owner
271 *
272 * Allocate the requested number of bytes from the specified pool.
273 * Uses the pool allocation function (with first-fit algorithm by default).
274 * Can not be used in NMI handler on architectures without
275 * NMI-safe cmpxchg implementation.
276 */
277unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
278		genpool_algo_t algo, void *data, void **owner)
279{
280	struct gen_pool_chunk *chunk;
281	unsigned long addr = 0;
282	int order = pool->min_alloc_order;
283	unsigned long nbits, start_bit, end_bit, remain;
284
285#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
286	BUG_ON(in_nmi());
287#endif
288
289	if (owner)
290		*owner = NULL;
291
292	if (size == 0)
293		return 0;
294
295	nbits = (size + (1UL << order) - 1) >> order;
296	rcu_read_lock();
297	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
298		if (size > atomic_long_read(&chunk->avail))
299			continue;
300
301		start_bit = 0;
302		end_bit = chunk_size(chunk) >> order;
303retry:
304		start_bit = algo(chunk->bits, end_bit, start_bit,
305				 nbits, data, pool, chunk->start_addr);
306		if (start_bit >= end_bit)
307			continue;
308		remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
309		if (remain) {
310			remain = bitmap_clear_ll(chunk->bits, start_bit,
311						 nbits - remain);
312			BUG_ON(remain);
313			goto retry;
314		}
315
316		addr = chunk->start_addr + ((unsigned long)start_bit << order);
317		size = nbits << order;
318		atomic_long_sub(size, &chunk->avail);
319		if (owner)
320			*owner = chunk->owner;
321		break;
322	}
323	rcu_read_unlock();
324	return addr;
325}
326EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
327
328/**
329 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
330 * @pool: pool to allocate from
331 * @size: number of bytes to allocate from the pool
332 * @dma: dma-view physical address return value.  Use %NULL if unneeded.
333 *
334 * Allocate the requested number of bytes from the specified pool.
335 * Uses the pool allocation function (with first-fit algorithm by default).
336 * Can not be used in NMI handler on architectures without
337 * NMI-safe cmpxchg implementation.
338 *
339 * Return: virtual address of the allocated memory, or %NULL on failure
340 */
341void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
342{
343	return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
344}
345EXPORT_SYMBOL(gen_pool_dma_alloc);
346
347/**
348 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
349 * usage with the given pool algorithm
350 * @pool: pool to allocate from
351 * @size: number of bytes to allocate from the pool
352 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
353 * @algo: algorithm passed from caller
354 * @data: data passed to algorithm
355 *
356 * Allocate the requested number of bytes from the specified pool. Uses the
357 * given pool allocation function. Can not be used in NMI handler on
358 * architectures without NMI-safe cmpxchg implementation.
359 *
360 * Return: virtual address of the allocated memory, or %NULL on failure
361 */
362void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
363		dma_addr_t *dma, genpool_algo_t algo, void *data)
364{
365	unsigned long vaddr;
366
367	if (!pool)
368		return NULL;
369
370	vaddr = gen_pool_alloc_algo(pool, size, algo, data);
371	if (!vaddr)
372		return NULL;
373
374	if (dma)
375		*dma = gen_pool_virt_to_phys(pool, vaddr);
376
377	return (void *)vaddr;
378}
379EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
380
381/**
382 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
383 * usage with the given alignment
384 * @pool: pool to allocate from
385 * @size: number of bytes to allocate from the pool
386 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
387 * @align: alignment in bytes for starting address
388 *
389 * Allocate the requested number bytes from the specified pool, with the given
390 * alignment restriction. Can not be used in NMI handler on architectures
391 * without NMI-safe cmpxchg implementation.
392 *
393 * Return: virtual address of the allocated memory, or %NULL on failure
394 */
395void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
396		dma_addr_t *dma, int align)
397{
398	struct genpool_data_align data = { .align = align };
399
400	return gen_pool_dma_alloc_algo(pool, size, dma,
401			gen_pool_first_fit_align, &data);
402}
403EXPORT_SYMBOL(gen_pool_dma_alloc_align);
404
405/**
406 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
407 * DMA usage
408 * @pool: pool to allocate from
409 * @size: number of bytes to allocate from the pool
410 * @dma: dma-view physical address return value.  Use %NULL if unneeded.
411 *
412 * Allocate the requested number of zeroed bytes from the specified pool.
413 * Uses the pool allocation function (with first-fit algorithm by default).
414 * Can not be used in NMI handler on architectures without
415 * NMI-safe cmpxchg implementation.
416 *
417 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
418 */
419void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
420{
421	return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
422}
423EXPORT_SYMBOL(gen_pool_dma_zalloc);
424
425/**
426 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
427 * DMA usage with the given pool algorithm
428 * @pool: pool to allocate from
429 * @size: number of bytes to allocate from the pool
430 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
431 * @algo: algorithm passed from caller
432 * @data: data passed to algorithm
433 *
434 * Allocate the requested number of zeroed bytes from the specified pool. Uses
435 * the given pool allocation function. Can not be used in NMI handler on
436 * architectures without NMI-safe cmpxchg implementation.
437 *
438 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
439 */
440void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
441		dma_addr_t *dma, genpool_algo_t algo, void *data)
442{
443	void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
444
445	if (vaddr)
446		memset(vaddr, 0, size);
447
448	return vaddr;
449}
450EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
451
452/**
453 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
454 * DMA usage with the given alignment
455 * @pool: pool to allocate from
456 * @size: number of bytes to allocate from the pool
457 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
458 * @align: alignment in bytes for starting address
459 *
460 * Allocate the requested number of zeroed bytes from the specified pool,
461 * with the given alignment restriction. Can not be used in NMI handler on
462 * architectures without NMI-safe cmpxchg implementation.
463 *
464 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
465 */
466void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
467		dma_addr_t *dma, int align)
468{
469	struct genpool_data_align data = { .align = align };
470
471	return gen_pool_dma_zalloc_algo(pool, size, dma,
472			gen_pool_first_fit_align, &data);
473}
474EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
475
476/**
477 * gen_pool_free_owner - free allocated special memory back to the pool
478 * @pool: pool to free to
479 * @addr: starting address of memory to free back to pool
480 * @size: size in bytes of memory to free
481 * @owner: private data stashed at gen_pool_add() time
482 *
483 * Free previously allocated special memory back to the specified
484 * pool.  Can not be used in NMI handler on architectures without
485 * NMI-safe cmpxchg implementation.
486 */
487void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
488		void **owner)
489{
490	struct gen_pool_chunk *chunk;
491	int order = pool->min_alloc_order;
492	unsigned long start_bit, nbits, remain;
493
494#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
495	BUG_ON(in_nmi());
496#endif
497
498	if (owner)
499		*owner = NULL;
500
501	nbits = (size + (1UL << order) - 1) >> order;
502	rcu_read_lock();
503	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
504		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
505			BUG_ON(addr + size - 1 > chunk->end_addr);
506			start_bit = (addr - chunk->start_addr) >> order;
507			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
508			BUG_ON(remain);
509			size = nbits << order;
510			atomic_long_add(size, &chunk->avail);
511			if (owner)
512				*owner = chunk->owner;
513			rcu_read_unlock();
514			return;
515		}
516	}
517	rcu_read_unlock();
518	BUG();
519}
520EXPORT_SYMBOL(gen_pool_free_owner);
521
522/**
523 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
524 * @pool:	the generic memory pool
525 * @func:	func to call
526 * @data:	additional data used by @func
527 *
528 * Call @func for every chunk of generic memory pool.  The @func is
529 * called with rcu_read_lock held.
530 */
531void gen_pool_for_each_chunk(struct gen_pool *pool,
532	void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
533	void *data)
534{
535	struct gen_pool_chunk *chunk;
536
537	rcu_read_lock();
538	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
539		func(pool, chunk, data);
540	rcu_read_unlock();
541}
542EXPORT_SYMBOL(gen_pool_for_each_chunk);
543
544/**
545 * gen_pool_has_addr - checks if an address falls within the range of a pool
546 * @pool:	the generic memory pool
547 * @start:	start address
548 * @size:	size of the region
549 *
550 * Check if the range of addresses falls within the specified pool. Returns
551 * true if the entire range is contained in the pool and false otherwise.
552 */
553bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
554			size_t size)
555{
556	bool found = false;
557	unsigned long end = start + size - 1;
558	struct gen_pool_chunk *chunk;
559
560	rcu_read_lock();
561	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
562		if (start >= chunk->start_addr && start <= chunk->end_addr) {
563			if (end <= chunk->end_addr) {
564				found = true;
565				break;
566			}
567		}
568	}
569	rcu_read_unlock();
570	return found;
571}
572EXPORT_SYMBOL(gen_pool_has_addr);
573
574/**
575 * gen_pool_avail - get available free space of the pool
576 * @pool: pool to get available free space
577 *
578 * Return available free space of the specified pool.
579 */
580size_t gen_pool_avail(struct gen_pool *pool)
581{
582	struct gen_pool_chunk *chunk;
583	size_t avail = 0;
584
585	rcu_read_lock();
586	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
587		avail += atomic_long_read(&chunk->avail);
588	rcu_read_unlock();
589	return avail;
590}
591EXPORT_SYMBOL_GPL(gen_pool_avail);
592
593/**
594 * gen_pool_size - get size in bytes of memory managed by the pool
595 * @pool: pool to get size
596 *
597 * Return size in bytes of memory managed by the pool.
598 */
599size_t gen_pool_size(struct gen_pool *pool)
600{
601	struct gen_pool_chunk *chunk;
602	size_t size = 0;
603
604	rcu_read_lock();
605	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
606		size += chunk_size(chunk);
607	rcu_read_unlock();
608	return size;
609}
610EXPORT_SYMBOL_GPL(gen_pool_size);
611
612/**
613 * gen_pool_set_algo - set the allocation algorithm
614 * @pool: pool to change allocation algorithm
615 * @algo: custom algorithm function
616 * @data: additional data used by @algo
617 *
618 * Call @algo for each memory allocation in the pool.
619 * If @algo is NULL use gen_pool_first_fit as default
620 * memory allocation function.
621 */
622void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
623{
624	rcu_read_lock();
625
626	pool->algo = algo;
627	if (!pool->algo)
628		pool->algo = gen_pool_first_fit;
629
630	pool->data = data;
631
632	rcu_read_unlock();
633}
634EXPORT_SYMBOL(gen_pool_set_algo);
635
636/**
637 * gen_pool_first_fit - find the first available region
638 * of memory matching the size requirement (no alignment constraint)
639 * @map: The address to base the search on
640 * @size: The bitmap size in bits
641 * @start: The bitnumber to start searching at
642 * @nr: The number of zeroed bits we're looking for
643 * @data: additional data - unused
644 * @pool: pool to find the fit region memory from
645 * @start_addr: not used in this function
646 */
647unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
648		unsigned long start, unsigned int nr, void *data,
649		struct gen_pool *pool, unsigned long start_addr)
650{
651	return bitmap_find_next_zero_area(map, size, start, nr, 0);
652}
653EXPORT_SYMBOL(gen_pool_first_fit);
654
655/**
656 * gen_pool_first_fit_align - find the first available region
657 * of memory matching the size requirement (alignment constraint)
658 * @map: The address to base the search on
659 * @size: The bitmap size in bits
660 * @start: The bitnumber to start searching at
661 * @nr: The number of zeroed bits we're looking for
662 * @data: data for alignment
663 * @pool: pool to get order from
664 * @start_addr: start addr of alloction chunk
665 */
666unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
667		unsigned long start, unsigned int nr, void *data,
668		struct gen_pool *pool, unsigned long start_addr)
669{
670	struct genpool_data_align *alignment;
671	unsigned long align_mask, align_off;
672	int order;
673
674	alignment = data;
675	order = pool->min_alloc_order;
676	align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
677	align_off = (start_addr & (alignment->align - 1)) >> order;
678
679	return bitmap_find_next_zero_area_off(map, size, start, nr,
680					      align_mask, align_off);
681}
682EXPORT_SYMBOL(gen_pool_first_fit_align);
683
684/**
685 * gen_pool_fixed_alloc - reserve a specific region
686 * @map: The address to base the search on
687 * @size: The bitmap size in bits
688 * @start: The bitnumber to start searching at
689 * @nr: The number of zeroed bits we're looking for
690 * @data: data for alignment
691 * @pool: pool to get order from
692 * @start_addr: not used in this function
693 */
694unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
695		unsigned long start, unsigned int nr, void *data,
696		struct gen_pool *pool, unsigned long start_addr)
697{
698	struct genpool_data_fixed *fixed_data;
699	int order;
700	unsigned long offset_bit;
701	unsigned long start_bit;
702
703	fixed_data = data;
704	order = pool->min_alloc_order;
705	offset_bit = fixed_data->offset >> order;
706	if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
707		return size;
708
709	start_bit = bitmap_find_next_zero_area(map, size,
710			start + offset_bit, nr, 0);
711	if (start_bit != offset_bit)
712		start_bit = size;
713	return start_bit;
714}
715EXPORT_SYMBOL(gen_pool_fixed_alloc);
716
717/**
718 * gen_pool_first_fit_order_align - find the first available region
719 * of memory matching the size requirement. The region will be aligned
720 * to the order of the size specified.
721 * @map: The address to base the search on
722 * @size: The bitmap size in bits
723 * @start: The bitnumber to start searching at
724 * @nr: The number of zeroed bits we're looking for
725 * @data: additional data - unused
726 * @pool: pool to find the fit region memory from
727 * @start_addr: not used in this function
728 */
729unsigned long gen_pool_first_fit_order_align(unsigned long *map,
730		unsigned long size, unsigned long start,
731		unsigned int nr, void *data, struct gen_pool *pool,
732		unsigned long start_addr)
733{
734	unsigned long align_mask = roundup_pow_of_two(nr) - 1;
735
736	return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
737}
738EXPORT_SYMBOL(gen_pool_first_fit_order_align);
739
740/**
741 * gen_pool_best_fit - find the best fitting region of memory
742 * matching the size requirement (no alignment constraint)
743 * @map: The address to base the search on
744 * @size: The bitmap size in bits
745 * @start: The bitnumber to start searching at
746 * @nr: The number of zeroed bits we're looking for
747 * @data: additional data - unused
748 * @pool: pool to find the fit region memory from
749 * @start_addr: not used in this function
750 *
751 * Iterate over the bitmap to find the smallest free region
752 * which we can allocate the memory.
753 */
754unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
755		unsigned long start, unsigned int nr, void *data,
756		struct gen_pool *pool, unsigned long start_addr)
757{
758	unsigned long start_bit = size;
759	unsigned long len = size + 1;
760	unsigned long index;
761
762	index = bitmap_find_next_zero_area(map, size, start, nr, 0);
763
764	while (index < size) {
765		unsigned long next_bit = find_next_bit(map, size, index + nr);
766		if ((next_bit - index) < len) {
767			len = next_bit - index;
768			start_bit = index;
769			if (len == nr)
770				return start_bit;
771		}
772		index = bitmap_find_next_zero_area(map, size,
773						   next_bit + 1, nr, 0);
774	}
775
776	return start_bit;
777}
778EXPORT_SYMBOL(gen_pool_best_fit);
779
780static void devm_gen_pool_release(struct device *dev, void *res)
781{
782	gen_pool_destroy(*(struct gen_pool **)res);
783}
784
785static int devm_gen_pool_match(struct device *dev, void *res, void *data)
786{
787	struct gen_pool **p = res;
788
789	/* NULL data matches only a pool without an assigned name */
790	if (!data && !(*p)->name)
791		return 1;
792
793	if (!data || !(*p)->name)
794		return 0;
795
796	return !strcmp((*p)->name, data);
797}
798
799/**
800 * gen_pool_get - Obtain the gen_pool (if any) for a device
801 * @dev: device to retrieve the gen_pool from
802 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
803 *
804 * Returns the gen_pool for the device if one is present, or NULL.
805 */
806struct gen_pool *gen_pool_get(struct device *dev, const char *name)
807{
808	struct gen_pool **p;
809
810	p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
811			(void *)name);
812	if (!p)
813		return NULL;
814	return *p;
815}
816EXPORT_SYMBOL_GPL(gen_pool_get);
817
818/**
819 * devm_gen_pool_create - managed gen_pool_create
820 * @dev: device that provides the gen_pool
821 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
822 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
823 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
824 *
825 * Create a new special memory pool that can be used to manage special purpose
826 * memory not managed by the regular kmalloc/kfree interface. The pool will be
827 * automatically destroyed by the device management code.
828 */
829struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
830				      int nid, const char *name)
831{
832	struct gen_pool **ptr, *pool;
833	const char *pool_name = NULL;
834
835	/* Check that genpool to be created is uniquely addressed on device */
836	if (gen_pool_get(dev, name))
837		return ERR_PTR(-EINVAL);
838
839	if (name) {
840		pool_name = kstrdup_const(name, GFP_KERNEL);
841		if (!pool_name)
842			return ERR_PTR(-ENOMEM);
843	}
844
845	ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
846	if (!ptr)
847		goto free_pool_name;
848
849	pool = gen_pool_create(min_alloc_order, nid);
850	if (!pool)
851		goto free_devres;
852
853	*ptr = pool;
854	pool->name = pool_name;
855	devres_add(dev, ptr);
856
857	return pool;
858
859free_devres:
860	devres_free(ptr);
861free_pool_name:
862	kfree_const(pool_name);
863
864	return ERR_PTR(-ENOMEM);
865}
866EXPORT_SYMBOL(devm_gen_pool_create);
867
868#ifdef CONFIG_OF
869/**
870 * of_gen_pool_get - find a pool by phandle property
871 * @np: device node
872 * @propname: property name containing phandle(s)
873 * @index: index into the phandle array
874 *
875 * Returns the pool that contains the chunk starting at the physical
876 * address of the device tree node pointed at by the phandle property,
877 * or NULL if not found.
878 */
879struct gen_pool *of_gen_pool_get(struct device_node *np,
880	const char *propname, int index)
881{
882	struct platform_device *pdev;
883	struct device_node *np_pool, *parent;
884	const char *name = NULL;
885	struct gen_pool *pool = NULL;
886
887	np_pool = of_parse_phandle(np, propname, index);
888	if (!np_pool)
889		return NULL;
890
891	pdev = of_find_device_by_node(np_pool);
892	if (!pdev) {
893		/* Check if named gen_pool is created by parent node device */
894		parent = of_get_parent(np_pool);
895		pdev = of_find_device_by_node(parent);
896		of_node_put(parent);
897
898		of_property_read_string(np_pool, "label", &name);
899		if (!name)
900			name = np_pool->name;
901	}
902	if (pdev)
903		pool = gen_pool_get(&pdev->dev, name);
904	of_node_put(np_pool);
905
906	return pool;
907}
908EXPORT_SYMBOL_GPL(of_gen_pool_get);
909#endif /* CONFIG_OF */
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Basic general purpose allocator for managing special purpose
  4 * memory, for example, memory that is not managed by the regular
  5 * kmalloc/kfree interface.  Uses for this includes on-device special
  6 * memory, uncached memory etc.
  7 *
  8 * It is safe to use the allocator in NMI handlers and other special
  9 * unblockable contexts that could otherwise deadlock on locks.  This
 10 * is implemented by using atomic operations and retries on any
 11 * conflicts.  The disadvantage is that there may be livelocks in
 12 * extreme cases.  For better scalability, one allocator can be used
 13 * for each CPU.
 14 *
 15 * The lockless operation only works if there is enough memory
 16 * available.  If new memory is added to the pool a lock has to be
 17 * still taken.  So any user relying on locklessness has to ensure
 18 * that sufficient memory is preallocated.
 19 *
 20 * The basic atomic operation of this allocator is cmpxchg on long.
 21 * On architectures that don't have NMI-safe cmpxchg implementation,
 22 * the allocator can NOT be used in NMI handler.  So code uses the
 23 * allocator in NMI handler should depend on
 24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
 25 *
 26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
 27 */
 28
 29#include <linux/slab.h>
 30#include <linux/export.h>
 31#include <linux/bitmap.h>
 32#include <linux/rculist.h>
 33#include <linux/interrupt.h>
 34#include <linux/genalloc.h>
 35#include <linux/of_device.h>
 36#include <linux/vmalloc.h>
 37
 38static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
 39{
 40	return chunk->end_addr - chunk->start_addr + 1;
 41}
 42
 43static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
 44{
 45	unsigned long val, nval;
 46
 47	nval = *addr;
 48	do {
 49		val = nval;
 50		if (val & mask_to_set)
 51			return -EBUSY;
 52		cpu_relax();
 53	} while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
 54
 55	return 0;
 56}
 57
 58static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
 59{
 60	unsigned long val, nval;
 61
 62	nval = *addr;
 63	do {
 64		val = nval;
 65		if ((val & mask_to_clear) != mask_to_clear)
 66			return -EBUSY;
 67		cpu_relax();
 68	} while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
 69
 70	return 0;
 71}
 72
 73/*
 74 * bitmap_set_ll - set the specified number of bits at the specified position
 75 * @map: pointer to a bitmap
 76 * @start: a bit position in @map
 77 * @nr: number of bits to set
 78 *
 79 * Set @nr bits start from @start in @map lock-lessly. Several users
 80 * can set/clear the same bitmap simultaneously without lock. If two
 81 * users set the same bit, one user will return remain bits, otherwise
 82 * return 0.
 83 */
 84static int bitmap_set_ll(unsigned long *map, int start, int nr)
 
 85{
 86	unsigned long *p = map + BIT_WORD(start);
 87	const int size = start + nr;
 88	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
 89	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
 90
 91	while (nr - bits_to_set >= 0) {
 92		if (set_bits_ll(p, mask_to_set))
 93			return nr;
 94		nr -= bits_to_set;
 95		bits_to_set = BITS_PER_LONG;
 96		mask_to_set = ~0UL;
 97		p++;
 98	}
 99	if (nr) {
100		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
101		if (set_bits_ll(p, mask_to_set))
102			return nr;
103	}
104
105	return 0;
106}
107
108/*
109 * bitmap_clear_ll - clear the specified number of bits at the specified position
110 * @map: pointer to a bitmap
111 * @start: a bit position in @map
112 * @nr: number of bits to set
113 *
114 * Clear @nr bits start from @start in @map lock-lessly. Several users
115 * can set/clear the same bitmap simultaneously without lock. If two
116 * users clear the same bit, one user will return remain bits,
117 * otherwise return 0.
118 */
119static int bitmap_clear_ll(unsigned long *map, int start, int nr)
 
120{
121	unsigned long *p = map + BIT_WORD(start);
122	const int size = start + nr;
123	int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
124	unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
125
126	while (nr - bits_to_clear >= 0) {
127		if (clear_bits_ll(p, mask_to_clear))
128			return nr;
129		nr -= bits_to_clear;
130		bits_to_clear = BITS_PER_LONG;
131		mask_to_clear = ~0UL;
132		p++;
133	}
134	if (nr) {
135		mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
136		if (clear_bits_ll(p, mask_to_clear))
137			return nr;
138	}
139
140	return 0;
141}
142
143/**
144 * gen_pool_create - create a new special memory pool
145 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
146 * @nid: node id of the node the pool structure should be allocated on, or -1
147 *
148 * Create a new special memory pool that can be used to manage special purpose
149 * memory not managed by the regular kmalloc/kfree interface.
150 */
151struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
152{
153	struct gen_pool *pool;
154
155	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
156	if (pool != NULL) {
157		spin_lock_init(&pool->lock);
158		INIT_LIST_HEAD(&pool->chunks);
159		pool->min_alloc_order = min_alloc_order;
160		pool->algo = gen_pool_first_fit;
161		pool->data = NULL;
162		pool->name = NULL;
163	}
164	return pool;
165}
166EXPORT_SYMBOL(gen_pool_create);
167
168/**
169 * gen_pool_add_owner- add a new chunk of special memory to the pool
170 * @pool: pool to add new memory chunk to
171 * @virt: virtual starting address of memory chunk to add to pool
172 * @phys: physical starting address of memory chunk to add to pool
173 * @size: size in bytes of the memory chunk to add to pool
174 * @nid: node id of the node the chunk structure and bitmap should be
175 *       allocated on, or -1
176 * @owner: private data the publisher would like to recall at alloc time
177 *
178 * Add a new chunk of special memory to the specified pool.
179 *
180 * Returns 0 on success or a -ve errno on failure.
181 */
182int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
183		 size_t size, int nid, void *owner)
184{
185	struct gen_pool_chunk *chunk;
186	int nbits = size >> pool->min_alloc_order;
187	int nbytes = sizeof(struct gen_pool_chunk) +
188				BITS_TO_LONGS(nbits) * sizeof(long);
189
190	chunk = vzalloc_node(nbytes, nid);
191	if (unlikely(chunk == NULL))
192		return -ENOMEM;
193
194	chunk->phys_addr = phys;
195	chunk->start_addr = virt;
196	chunk->end_addr = virt + size - 1;
197	chunk->owner = owner;
198	atomic_long_set(&chunk->avail, size);
199
200	spin_lock(&pool->lock);
201	list_add_rcu(&chunk->next_chunk, &pool->chunks);
202	spin_unlock(&pool->lock);
203
204	return 0;
205}
206EXPORT_SYMBOL(gen_pool_add_owner);
207
208/**
209 * gen_pool_virt_to_phys - return the physical address of memory
210 * @pool: pool to allocate from
211 * @addr: starting address of memory
212 *
213 * Returns the physical address on success, or -1 on error.
214 */
215phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
216{
217	struct gen_pool_chunk *chunk;
218	phys_addr_t paddr = -1;
219
220	rcu_read_lock();
221	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
222		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
223			paddr = chunk->phys_addr + (addr - chunk->start_addr);
224			break;
225		}
226	}
227	rcu_read_unlock();
228
229	return paddr;
230}
231EXPORT_SYMBOL(gen_pool_virt_to_phys);
232
233/**
234 * gen_pool_destroy - destroy a special memory pool
235 * @pool: pool to destroy
236 *
237 * Destroy the specified special memory pool. Verifies that there are no
238 * outstanding allocations.
239 */
240void gen_pool_destroy(struct gen_pool *pool)
241{
242	struct list_head *_chunk, *_next_chunk;
243	struct gen_pool_chunk *chunk;
244	int order = pool->min_alloc_order;
245	int bit, end_bit;
246
247	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
248		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
249		list_del(&chunk->next_chunk);
250
251		end_bit = chunk_size(chunk) >> order;
252		bit = find_next_bit(chunk->bits, end_bit, 0);
253		BUG_ON(bit < end_bit);
254
255		vfree(chunk);
256	}
257	kfree_const(pool->name);
258	kfree(pool);
259}
260EXPORT_SYMBOL(gen_pool_destroy);
261
262/**
263 * gen_pool_alloc_algo_owner - allocate special memory from the pool
264 * @pool: pool to allocate from
265 * @size: number of bytes to allocate from the pool
266 * @algo: algorithm passed from caller
267 * @data: data passed to algorithm
268 * @owner: optionally retrieve the chunk owner
269 *
270 * Allocate the requested number of bytes from the specified pool.
271 * Uses the pool allocation function (with first-fit algorithm by default).
272 * Can not be used in NMI handler on architectures without
273 * NMI-safe cmpxchg implementation.
274 */
275unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
276		genpool_algo_t algo, void *data, void **owner)
277{
278	struct gen_pool_chunk *chunk;
279	unsigned long addr = 0;
280	int order = pool->min_alloc_order;
281	int nbits, start_bit, end_bit, remain;
282
283#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
284	BUG_ON(in_nmi());
285#endif
286
287	if (owner)
288		*owner = NULL;
289
290	if (size == 0)
291		return 0;
292
293	nbits = (size + (1UL << order) - 1) >> order;
294	rcu_read_lock();
295	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
296		if (size > atomic_long_read(&chunk->avail))
297			continue;
298
299		start_bit = 0;
300		end_bit = chunk_size(chunk) >> order;
301retry:
302		start_bit = algo(chunk->bits, end_bit, start_bit,
303				 nbits, data, pool, chunk->start_addr);
304		if (start_bit >= end_bit)
305			continue;
306		remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
307		if (remain) {
308			remain = bitmap_clear_ll(chunk->bits, start_bit,
309						 nbits - remain);
310			BUG_ON(remain);
311			goto retry;
312		}
313
314		addr = chunk->start_addr + ((unsigned long)start_bit << order);
315		size = nbits << order;
316		atomic_long_sub(size, &chunk->avail);
317		if (owner)
318			*owner = chunk->owner;
319		break;
320	}
321	rcu_read_unlock();
322	return addr;
323}
324EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
325
326/**
327 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
328 * @pool: pool to allocate from
329 * @size: number of bytes to allocate from the pool
330 * @dma: dma-view physical address return value.  Use %NULL if unneeded.
331 *
332 * Allocate the requested number of bytes from the specified pool.
333 * Uses the pool allocation function (with first-fit algorithm by default).
334 * Can not be used in NMI handler on architectures without
335 * NMI-safe cmpxchg implementation.
336 *
337 * Return: virtual address of the allocated memory, or %NULL on failure
338 */
339void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
340{
341	return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
342}
343EXPORT_SYMBOL(gen_pool_dma_alloc);
344
345/**
346 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
347 * usage with the given pool algorithm
348 * @pool: pool to allocate from
349 * @size: number of bytes to allocate from the pool
350 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
351 * @algo: algorithm passed from caller
352 * @data: data passed to algorithm
353 *
354 * Allocate the requested number of bytes from the specified pool. Uses the
355 * given pool allocation function. Can not be used in NMI handler on
356 * architectures without NMI-safe cmpxchg implementation.
357 *
358 * Return: virtual address of the allocated memory, or %NULL on failure
359 */
360void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
361		dma_addr_t *dma, genpool_algo_t algo, void *data)
362{
363	unsigned long vaddr;
364
365	if (!pool)
366		return NULL;
367
368	vaddr = gen_pool_alloc_algo(pool, size, algo, data);
369	if (!vaddr)
370		return NULL;
371
372	if (dma)
373		*dma = gen_pool_virt_to_phys(pool, vaddr);
374
375	return (void *)vaddr;
376}
377EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
378
379/**
380 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
381 * usage with the given alignment
382 * @pool: pool to allocate from
383 * @size: number of bytes to allocate from the pool
384 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
385 * @align: alignment in bytes for starting address
386 *
387 * Allocate the requested number bytes from the specified pool, with the given
388 * alignment restriction. Can not be used in NMI handler on architectures
389 * without NMI-safe cmpxchg implementation.
390 *
391 * Return: virtual address of the allocated memory, or %NULL on failure
392 */
393void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
394		dma_addr_t *dma, int align)
395{
396	struct genpool_data_align data = { .align = align };
397
398	return gen_pool_dma_alloc_algo(pool, size, dma,
399			gen_pool_first_fit_align, &data);
400}
401EXPORT_SYMBOL(gen_pool_dma_alloc_align);
402
403/**
404 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
405 * DMA usage
406 * @pool: pool to allocate from
407 * @size: number of bytes to allocate from the pool
408 * @dma: dma-view physical address return value.  Use %NULL if unneeded.
409 *
410 * Allocate the requested number of zeroed bytes from the specified pool.
411 * Uses the pool allocation function (with first-fit algorithm by default).
412 * Can not be used in NMI handler on architectures without
413 * NMI-safe cmpxchg implementation.
414 *
415 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
416 */
417void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
418{
419	return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
420}
421EXPORT_SYMBOL(gen_pool_dma_zalloc);
422
423/**
424 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
425 * DMA usage with the given pool algorithm
426 * @pool: pool to allocate from
427 * @size: number of bytes to allocate from the pool
428 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
429 * @algo: algorithm passed from caller
430 * @data: data passed to algorithm
431 *
432 * Allocate the requested number of zeroed bytes from the specified pool. Uses
433 * the given pool allocation function. Can not be used in NMI handler on
434 * architectures without NMI-safe cmpxchg implementation.
435 *
436 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
437 */
438void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
439		dma_addr_t *dma, genpool_algo_t algo, void *data)
440{
441	void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
442
443	if (vaddr)
444		memset(vaddr, 0, size);
445
446	return vaddr;
447}
448EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
449
450/**
451 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
452 * DMA usage with the given alignment
453 * @pool: pool to allocate from
454 * @size: number of bytes to allocate from the pool
455 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
456 * @align: alignment in bytes for starting address
457 *
458 * Allocate the requested number of zeroed bytes from the specified pool,
459 * with the given alignment restriction. Can not be used in NMI handler on
460 * architectures without NMI-safe cmpxchg implementation.
461 *
462 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
463 */
464void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
465		dma_addr_t *dma, int align)
466{
467	struct genpool_data_align data = { .align = align };
468
469	return gen_pool_dma_zalloc_algo(pool, size, dma,
470			gen_pool_first_fit_align, &data);
471}
472EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
473
474/**
475 * gen_pool_free_owner - free allocated special memory back to the pool
476 * @pool: pool to free to
477 * @addr: starting address of memory to free back to pool
478 * @size: size in bytes of memory to free
479 * @owner: private data stashed at gen_pool_add() time
480 *
481 * Free previously allocated special memory back to the specified
482 * pool.  Can not be used in NMI handler on architectures without
483 * NMI-safe cmpxchg implementation.
484 */
485void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
486		void **owner)
487{
488	struct gen_pool_chunk *chunk;
489	int order = pool->min_alloc_order;
490	int start_bit, nbits, remain;
491
492#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
493	BUG_ON(in_nmi());
494#endif
495
496	if (owner)
497		*owner = NULL;
498
499	nbits = (size + (1UL << order) - 1) >> order;
500	rcu_read_lock();
501	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
502		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
503			BUG_ON(addr + size - 1 > chunk->end_addr);
504			start_bit = (addr - chunk->start_addr) >> order;
505			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
506			BUG_ON(remain);
507			size = nbits << order;
508			atomic_long_add(size, &chunk->avail);
509			if (owner)
510				*owner = chunk->owner;
511			rcu_read_unlock();
512			return;
513		}
514	}
515	rcu_read_unlock();
516	BUG();
517}
518EXPORT_SYMBOL(gen_pool_free_owner);
519
520/**
521 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
522 * @pool:	the generic memory pool
523 * @func:	func to call
524 * @data:	additional data used by @func
525 *
526 * Call @func for every chunk of generic memory pool.  The @func is
527 * called with rcu_read_lock held.
528 */
529void gen_pool_for_each_chunk(struct gen_pool *pool,
530	void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
531	void *data)
532{
533	struct gen_pool_chunk *chunk;
534
535	rcu_read_lock();
536	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
537		func(pool, chunk, data);
538	rcu_read_unlock();
539}
540EXPORT_SYMBOL(gen_pool_for_each_chunk);
541
542/**
543 * gen_pool_has_addr - checks if an address falls within the range of a pool
544 * @pool:	the generic memory pool
545 * @start:	start address
546 * @size:	size of the region
547 *
548 * Check if the range of addresses falls within the specified pool. Returns
549 * true if the entire range is contained in the pool and false otherwise.
550 */
551bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
552			size_t size)
553{
554	bool found = false;
555	unsigned long end = start + size - 1;
556	struct gen_pool_chunk *chunk;
557
558	rcu_read_lock();
559	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
560		if (start >= chunk->start_addr && start <= chunk->end_addr) {
561			if (end <= chunk->end_addr) {
562				found = true;
563				break;
564			}
565		}
566	}
567	rcu_read_unlock();
568	return found;
569}
570EXPORT_SYMBOL(gen_pool_has_addr);
571
572/**
573 * gen_pool_avail - get available free space of the pool
574 * @pool: pool to get available free space
575 *
576 * Return available free space of the specified pool.
577 */
578size_t gen_pool_avail(struct gen_pool *pool)
579{
580	struct gen_pool_chunk *chunk;
581	size_t avail = 0;
582
583	rcu_read_lock();
584	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
585		avail += atomic_long_read(&chunk->avail);
586	rcu_read_unlock();
587	return avail;
588}
589EXPORT_SYMBOL_GPL(gen_pool_avail);
590
591/**
592 * gen_pool_size - get size in bytes of memory managed by the pool
593 * @pool: pool to get size
594 *
595 * Return size in bytes of memory managed by the pool.
596 */
597size_t gen_pool_size(struct gen_pool *pool)
598{
599	struct gen_pool_chunk *chunk;
600	size_t size = 0;
601
602	rcu_read_lock();
603	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
604		size += chunk_size(chunk);
605	rcu_read_unlock();
606	return size;
607}
608EXPORT_SYMBOL_GPL(gen_pool_size);
609
610/**
611 * gen_pool_set_algo - set the allocation algorithm
612 * @pool: pool to change allocation algorithm
613 * @algo: custom algorithm function
614 * @data: additional data used by @algo
615 *
616 * Call @algo for each memory allocation in the pool.
617 * If @algo is NULL use gen_pool_first_fit as default
618 * memory allocation function.
619 */
620void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
621{
622	rcu_read_lock();
623
624	pool->algo = algo;
625	if (!pool->algo)
626		pool->algo = gen_pool_first_fit;
627
628	pool->data = data;
629
630	rcu_read_unlock();
631}
632EXPORT_SYMBOL(gen_pool_set_algo);
633
634/**
635 * gen_pool_first_fit - find the first available region
636 * of memory matching the size requirement (no alignment constraint)
637 * @map: The address to base the search on
638 * @size: The bitmap size in bits
639 * @start: The bitnumber to start searching at
640 * @nr: The number of zeroed bits we're looking for
641 * @data: additional data - unused
642 * @pool: pool to find the fit region memory from
 
643 */
644unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
645		unsigned long start, unsigned int nr, void *data,
646		struct gen_pool *pool, unsigned long start_addr)
647{
648	return bitmap_find_next_zero_area(map, size, start, nr, 0);
649}
650EXPORT_SYMBOL(gen_pool_first_fit);
651
652/**
653 * gen_pool_first_fit_align - find the first available region
654 * of memory matching the size requirement (alignment constraint)
655 * @map: The address to base the search on
656 * @size: The bitmap size in bits
657 * @start: The bitnumber to start searching at
658 * @nr: The number of zeroed bits we're looking for
659 * @data: data for alignment
660 * @pool: pool to get order from
 
661 */
662unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
663		unsigned long start, unsigned int nr, void *data,
664		struct gen_pool *pool, unsigned long start_addr)
665{
666	struct genpool_data_align *alignment;
667	unsigned long align_mask, align_off;
668	int order;
669
670	alignment = data;
671	order = pool->min_alloc_order;
672	align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
673	align_off = (start_addr & (alignment->align - 1)) >> order;
674
675	return bitmap_find_next_zero_area_off(map, size, start, nr,
676					      align_mask, align_off);
677}
678EXPORT_SYMBOL(gen_pool_first_fit_align);
679
680/**
681 * gen_pool_fixed_alloc - reserve a specific region
682 * @map: The address to base the search on
683 * @size: The bitmap size in bits
684 * @start: The bitnumber to start searching at
685 * @nr: The number of zeroed bits we're looking for
686 * @data: data for alignment
687 * @pool: pool to get order from
 
688 */
689unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
690		unsigned long start, unsigned int nr, void *data,
691		struct gen_pool *pool, unsigned long start_addr)
692{
693	struct genpool_data_fixed *fixed_data;
694	int order;
695	unsigned long offset_bit;
696	unsigned long start_bit;
697
698	fixed_data = data;
699	order = pool->min_alloc_order;
700	offset_bit = fixed_data->offset >> order;
701	if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
702		return size;
703
704	start_bit = bitmap_find_next_zero_area(map, size,
705			start + offset_bit, nr, 0);
706	if (start_bit != offset_bit)
707		start_bit = size;
708	return start_bit;
709}
710EXPORT_SYMBOL(gen_pool_fixed_alloc);
711
712/**
713 * gen_pool_first_fit_order_align - find the first available region
714 * of memory matching the size requirement. The region will be aligned
715 * to the order of the size specified.
716 * @map: The address to base the search on
717 * @size: The bitmap size in bits
718 * @start: The bitnumber to start searching at
719 * @nr: The number of zeroed bits we're looking for
720 * @data: additional data - unused
721 * @pool: pool to find the fit region memory from
 
722 */
723unsigned long gen_pool_first_fit_order_align(unsigned long *map,
724		unsigned long size, unsigned long start,
725		unsigned int nr, void *data, struct gen_pool *pool,
726		unsigned long start_addr)
727{
728	unsigned long align_mask = roundup_pow_of_two(nr) - 1;
729
730	return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
731}
732EXPORT_SYMBOL(gen_pool_first_fit_order_align);
733
734/**
735 * gen_pool_best_fit - find the best fitting region of memory
736 * macthing the size requirement (no alignment constraint)
737 * @map: The address to base the search on
738 * @size: The bitmap size in bits
739 * @start: The bitnumber to start searching at
740 * @nr: The number of zeroed bits we're looking for
741 * @data: additional data - unused
742 * @pool: pool to find the fit region memory from
 
743 *
744 * Iterate over the bitmap to find the smallest free region
745 * which we can allocate the memory.
746 */
747unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
748		unsigned long start, unsigned int nr, void *data,
749		struct gen_pool *pool, unsigned long start_addr)
750{
751	unsigned long start_bit = size;
752	unsigned long len = size + 1;
753	unsigned long index;
754
755	index = bitmap_find_next_zero_area(map, size, start, nr, 0);
756
757	while (index < size) {
758		int next_bit = find_next_bit(map, size, index + nr);
759		if ((next_bit - index) < len) {
760			len = next_bit - index;
761			start_bit = index;
762			if (len == nr)
763				return start_bit;
764		}
765		index = bitmap_find_next_zero_area(map, size,
766						   next_bit + 1, nr, 0);
767	}
768
769	return start_bit;
770}
771EXPORT_SYMBOL(gen_pool_best_fit);
772
773static void devm_gen_pool_release(struct device *dev, void *res)
774{
775	gen_pool_destroy(*(struct gen_pool **)res);
776}
777
778static int devm_gen_pool_match(struct device *dev, void *res, void *data)
779{
780	struct gen_pool **p = res;
781
782	/* NULL data matches only a pool without an assigned name */
783	if (!data && !(*p)->name)
784		return 1;
785
786	if (!data || !(*p)->name)
787		return 0;
788
789	return !strcmp((*p)->name, data);
790}
791
792/**
793 * gen_pool_get - Obtain the gen_pool (if any) for a device
794 * @dev: device to retrieve the gen_pool from
795 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
796 *
797 * Returns the gen_pool for the device if one is present, or NULL.
798 */
799struct gen_pool *gen_pool_get(struct device *dev, const char *name)
800{
801	struct gen_pool **p;
802
803	p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
804			(void *)name);
805	if (!p)
806		return NULL;
807	return *p;
808}
809EXPORT_SYMBOL_GPL(gen_pool_get);
810
811/**
812 * devm_gen_pool_create - managed gen_pool_create
813 * @dev: device that provides the gen_pool
814 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
815 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
816 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
817 *
818 * Create a new special memory pool that can be used to manage special purpose
819 * memory not managed by the regular kmalloc/kfree interface. The pool will be
820 * automatically destroyed by the device management code.
821 */
822struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
823				      int nid, const char *name)
824{
825	struct gen_pool **ptr, *pool;
826	const char *pool_name = NULL;
827
828	/* Check that genpool to be created is uniquely addressed on device */
829	if (gen_pool_get(dev, name))
830		return ERR_PTR(-EINVAL);
831
832	if (name) {
833		pool_name = kstrdup_const(name, GFP_KERNEL);
834		if (!pool_name)
835			return ERR_PTR(-ENOMEM);
836	}
837
838	ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
839	if (!ptr)
840		goto free_pool_name;
841
842	pool = gen_pool_create(min_alloc_order, nid);
843	if (!pool)
844		goto free_devres;
845
846	*ptr = pool;
847	pool->name = pool_name;
848	devres_add(dev, ptr);
849
850	return pool;
851
852free_devres:
853	devres_free(ptr);
854free_pool_name:
855	kfree_const(pool_name);
856
857	return ERR_PTR(-ENOMEM);
858}
859EXPORT_SYMBOL(devm_gen_pool_create);
860
861#ifdef CONFIG_OF
862/**
863 * of_gen_pool_get - find a pool by phandle property
864 * @np: device node
865 * @propname: property name containing phandle(s)
866 * @index: index into the phandle array
867 *
868 * Returns the pool that contains the chunk starting at the physical
869 * address of the device tree node pointed at by the phandle property,
870 * or NULL if not found.
871 */
872struct gen_pool *of_gen_pool_get(struct device_node *np,
873	const char *propname, int index)
874{
875	struct platform_device *pdev;
876	struct device_node *np_pool, *parent;
877	const char *name = NULL;
878	struct gen_pool *pool = NULL;
879
880	np_pool = of_parse_phandle(np, propname, index);
881	if (!np_pool)
882		return NULL;
883
884	pdev = of_find_device_by_node(np_pool);
885	if (!pdev) {
886		/* Check if named gen_pool is created by parent node device */
887		parent = of_get_parent(np_pool);
888		pdev = of_find_device_by_node(parent);
889		of_node_put(parent);
890
891		of_property_read_string(np_pool, "label", &name);
892		if (!name)
893			name = np_pool->name;
894	}
895	if (pdev)
896		pool = gen_pool_get(&pdev->dev, name);
897	of_node_put(np_pool);
898
899	return pool;
900}
901EXPORT_SYMBOL_GPL(of_gen_pool_get);
902#endif /* CONFIG_OF */