Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *  linux/mm/mempool.c
  3 *
  4 *  memory buffer pool support. Such pools are mostly used
  5 *  for guaranteed, deadlock-free memory allocations during
  6 *  extreme VM load.
  7 *
  8 *  started by Ingo Molnar, Copyright (C) 2001
  9 *  debugging by David Rientjes, Copyright (C) 2015
 10 */
 11
 12#include <linux/mm.h>
 13#include <linux/slab.h>
 14#include <linux/highmem.h>
 15#include <linux/kasan.h>
 16#include <linux/kmemleak.h>
 17#include <linux/export.h>
 18#include <linux/mempool.h>
 19#include <linux/blkdev.h>
 20#include <linux/writeback.h>
 21#include "slab.h"
 22
 23#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
 24static void poison_error(mempool_t *pool, void *element, size_t size,
 25			 size_t byte)
 26{
 27	const int nr = pool->curr_nr;
 28	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
 29	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
 30	int i;
 31
 32	pr_err("BUG: mempool element poison mismatch\n");
 33	pr_err("Mempool %p size %zu\n", pool, size);
 34	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
 35	for (i = start; i < end; i++)
 36		pr_cont("%x ", *(u8 *)(element + i));
 37	pr_cont("%s\n", end < size ? "..." : "");
 38	dump_stack();
 39}
 40
 41static void __check_element(mempool_t *pool, void *element, size_t size)
 42{
 43	u8 *obj = element;
 44	size_t i;
 45
 46	for (i = 0; i < size; i++) {
 47		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
 48
 49		if (obj[i] != exp) {
 50			poison_error(pool, element, size, i);
 51			return;
 52		}
 53	}
 54	memset(obj, POISON_INUSE, size);
 55}
 56
 57static void check_element(mempool_t *pool, void *element)
 58{
 59	/* Mempools backed by slab allocator */
 60	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
 61		__check_element(pool, element, ksize(element));
 62
 63	/* Mempools backed by page allocator */
 64	if (pool->free == mempool_free_pages) {
 
 65		int order = (int)(long)pool->pool_data;
 66		void *addr = kmap_atomic((struct page *)element);
 67
 68		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
 69		kunmap_atomic(addr);
 70	}
 71}
 72
 73static void __poison_element(void *element, size_t size)
 74{
 75	u8 *obj = element;
 76
 77	memset(obj, POISON_FREE, size - 1);
 78	obj[size - 1] = POISON_END;
 79}
 80
 81static void poison_element(mempool_t *pool, void *element)
 82{
 83	/* Mempools backed by slab allocator */
 84	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
 85		__poison_element(element, ksize(element));
 86
 87	/* Mempools backed by page allocator */
 88	if (pool->alloc == mempool_alloc_pages) {
 
 89		int order = (int)(long)pool->pool_data;
 90		void *addr = kmap_atomic((struct page *)element);
 91
 92		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
 93		kunmap_atomic(addr);
 94	}
 95}
 96#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
 97static inline void check_element(mempool_t *pool, void *element)
 98{
 99}
100static inline void poison_element(mempool_t *pool, void *element)
101{
102}
103#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
104
105static void kasan_poison_element(mempool_t *pool, void *element)
106{
107	if (pool->alloc == mempool_alloc_slab)
108		kasan_slab_free(pool->pool_data, element);
109	if (pool->alloc == mempool_kmalloc)
110		kasan_kfree(element);
111	if (pool->alloc == mempool_alloc_pages)
112		kasan_free_pages(element, (unsigned long)pool->pool_data);
113}
114
115static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
116{
117	if (pool->alloc == mempool_alloc_slab)
118		kasan_slab_alloc(pool->pool_data, element, flags);
119	if (pool->alloc == mempool_kmalloc)
120		kasan_krealloc(element, (size_t)pool->pool_data, flags);
121	if (pool->alloc == mempool_alloc_pages)
122		kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 
 
 
123}
124
125static void add_element(mempool_t *pool, void *element)
126{
127	BUG_ON(pool->curr_nr >= pool->min_nr);
128	poison_element(pool, element);
129	kasan_poison_element(pool, element);
130	pool->elements[pool->curr_nr++] = element;
131}
132
133static void *remove_element(mempool_t *pool, gfp_t flags)
134{
135	void *element = pool->elements[--pool->curr_nr];
136
137	BUG_ON(pool->curr_nr < 0);
138	kasan_unpoison_element(pool, element, flags);
139	check_element(pool, element);
140	return element;
141}
142
143/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144 * mempool_destroy - deallocate a memory pool
145 * @pool:      pointer to the memory pool which was allocated via
146 *             mempool_create().
147 *
148 * Free all reserved elements in @pool and @pool itself.  This function
149 * only sleeps if the free_fn() function sleeps.
150 */
151void mempool_destroy(mempool_t *pool)
152{
153	if (unlikely(!pool))
154		return;
155
156	while (pool->curr_nr) {
157		void *element = remove_element(pool, GFP_KERNEL);
158		pool->free(element, pool->pool_data);
159	}
160	kfree(pool->elements);
161	kfree(pool);
162}
163EXPORT_SYMBOL(mempool_destroy);
164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165/**
166 * mempool_create - create a memory pool
167 * @min_nr:    the minimum number of elements guaranteed to be
168 *             allocated for this pool.
169 * @alloc_fn:  user-defined element-allocation function.
170 * @free_fn:   user-defined element-freeing function.
171 * @pool_data: optional private data available to the user-defined functions.
172 *
173 * this function creates and allocates a guaranteed size, preallocated
174 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
175 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
176 * functions might sleep - as long as the mempool_alloc() function is not called
177 * from IRQ contexts.
 
 
178 */
179mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
180				mempool_free_t *free_fn, void *pool_data)
181{
182	return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
183				   GFP_KERNEL, NUMA_NO_NODE);
184}
185EXPORT_SYMBOL(mempool_create);
186
187mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
188			       mempool_free_t *free_fn, void *pool_data,
189			       gfp_t gfp_mask, int node_id)
190{
191	mempool_t *pool;
 
192	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
193	if (!pool)
194		return NULL;
195	pool->elements = kmalloc_node(min_nr * sizeof(void *),
196				      gfp_mask, node_id);
197	if (!pool->elements) {
198		kfree(pool);
199		return NULL;
200	}
201	spin_lock_init(&pool->lock);
202	pool->min_nr = min_nr;
203	pool->pool_data = pool_data;
204	init_waitqueue_head(&pool->wait);
205	pool->alloc = alloc_fn;
206	pool->free = free_fn;
207
208	/*
209	 * First pre-allocate the guaranteed number of buffers.
210	 */
211	while (pool->curr_nr < pool->min_nr) {
212		void *element;
213
214		element = pool->alloc(gfp_mask, pool->pool_data);
215		if (unlikely(!element)) {
216			mempool_destroy(pool);
217			return NULL;
218		}
219		add_element(pool, element);
220	}
221	return pool;
222}
223EXPORT_SYMBOL(mempool_create_node);
224
225/**
226 * mempool_resize - resize an existing memory pool
227 * @pool:       pointer to the memory pool which was allocated via
228 *              mempool_create().
229 * @new_min_nr: the new minimum number of elements guaranteed to be
230 *              allocated for this pool.
231 *
232 * This function shrinks/grows the pool. In the case of growing,
233 * it cannot be guaranteed that the pool will be grown to the new
234 * size immediately, but new mempool_free() calls will refill it.
235 * This function may sleep.
236 *
237 * Note, the caller must guarantee that no mempool_destroy is called
238 * while this function is running. mempool_alloc() & mempool_free()
239 * might be called (eg. from IRQ contexts) while this function executes.
 
 
240 */
241int mempool_resize(mempool_t *pool, int new_min_nr)
242{
243	void *element;
244	void **new_elements;
245	unsigned long flags;
246
247	BUG_ON(new_min_nr <= 0);
248	might_sleep();
249
250	spin_lock_irqsave(&pool->lock, flags);
251	if (new_min_nr <= pool->min_nr) {
252		while (new_min_nr < pool->curr_nr) {
253			element = remove_element(pool, GFP_KERNEL);
254			spin_unlock_irqrestore(&pool->lock, flags);
255			pool->free(element, pool->pool_data);
256			spin_lock_irqsave(&pool->lock, flags);
257		}
258		pool->min_nr = new_min_nr;
259		goto out_unlock;
260	}
261	spin_unlock_irqrestore(&pool->lock, flags);
262
263	/* Grow the pool */
264	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
265				     GFP_KERNEL);
266	if (!new_elements)
267		return -ENOMEM;
268
269	spin_lock_irqsave(&pool->lock, flags);
270	if (unlikely(new_min_nr <= pool->min_nr)) {
271		/* Raced, other resize will do our work */
272		spin_unlock_irqrestore(&pool->lock, flags);
273		kfree(new_elements);
274		goto out;
275	}
276	memcpy(new_elements, pool->elements,
277			pool->curr_nr * sizeof(*new_elements));
278	kfree(pool->elements);
279	pool->elements = new_elements;
280	pool->min_nr = new_min_nr;
281
282	while (pool->curr_nr < pool->min_nr) {
283		spin_unlock_irqrestore(&pool->lock, flags);
284		element = pool->alloc(GFP_KERNEL, pool->pool_data);
285		if (!element)
286			goto out;
287		spin_lock_irqsave(&pool->lock, flags);
288		if (pool->curr_nr < pool->min_nr) {
289			add_element(pool, element);
290		} else {
291			spin_unlock_irqrestore(&pool->lock, flags);
292			pool->free(element, pool->pool_data);	/* Raced */
293			goto out;
294		}
295	}
296out_unlock:
297	spin_unlock_irqrestore(&pool->lock, flags);
298out:
299	return 0;
300}
301EXPORT_SYMBOL(mempool_resize);
302
303/**
304 * mempool_alloc - allocate an element from a specific memory pool
305 * @pool:      pointer to the memory pool which was allocated via
306 *             mempool_create().
307 * @gfp_mask:  the usual allocation bitmask.
308 *
309 * this function only sleeps if the alloc_fn() function sleeps or
310 * returns NULL. Note that due to preallocation, this function
311 * *never* fails when called from process contexts. (it might
312 * fail if called from an IRQ context.)
313 * Note: neither __GFP_NOMEMALLOC nor __GFP_ZERO are supported.
 
 
314 */
315void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
316{
317	void *element;
318	unsigned long flags;
319	wait_queue_t wait;
320	gfp_t gfp_temp;
321
322	/* If oom killed, memory reserves are essential to prevent livelock */
323	VM_WARN_ON_ONCE(gfp_mask & __GFP_NOMEMALLOC);
324	/* No element size to zero on allocation */
325	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
 
326
327	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
328
329	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
330	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
331
332	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
333
334repeat_alloc:
335	if (likely(pool->curr_nr)) {
336		/*
337		 * Don't allocate from emergency reserves if there are
338		 * elements available.  This check is racy, but it will
339		 * be rechecked each loop.
340		 */
341		gfp_temp |= __GFP_NOMEMALLOC;
342	}
343
344	element = pool->alloc(gfp_temp, pool->pool_data);
345	if (likely(element != NULL))
346		return element;
347
348	spin_lock_irqsave(&pool->lock, flags);
349	if (likely(pool->curr_nr)) {
350		element = remove_element(pool, gfp_temp);
351		spin_unlock_irqrestore(&pool->lock, flags);
352		/* paired with rmb in mempool_free(), read comment there */
353		smp_wmb();
354		/*
355		 * Update the allocation stack trace as this is more useful
356		 * for debugging.
357		 */
358		kmemleak_update_trace(element);
359		return element;
360	}
361
362	/*
363	 * We use gfp mask w/o direct reclaim or IO for the first round.  If
364	 * alloc failed with that and @pool was empty, retry immediately.
365	 */
366	if ((gfp_temp & ~__GFP_NOMEMALLOC) != gfp_mask) {
367		spin_unlock_irqrestore(&pool->lock, flags);
368		gfp_temp = gfp_mask;
369		goto repeat_alloc;
370	}
371	gfp_temp = gfp_mask;
372
373	/* We must not sleep if !__GFP_DIRECT_RECLAIM */
374	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
375		spin_unlock_irqrestore(&pool->lock, flags);
376		return NULL;
377	}
378
379	/* Let's wait for someone else to return an element to @pool */
380	init_wait(&wait);
381	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
382
383	spin_unlock_irqrestore(&pool->lock, flags);
384
385	/*
386	 * FIXME: this should be io_schedule().  The timeout is there as a
387	 * workaround for some DM problems in 2.6.18.
388	 */
389	io_schedule_timeout(5*HZ);
390
391	finish_wait(&pool->wait, &wait);
392	goto repeat_alloc;
393}
394EXPORT_SYMBOL(mempool_alloc);
395
396/**
397 * mempool_free - return an element to the pool.
398 * @element:   pool element pointer.
399 * @pool:      pointer to the memory pool which was allocated via
400 *             mempool_create().
401 *
402 * this function only sleeps if the free_fn() function sleeps.
403 */
404void mempool_free(void *element, mempool_t *pool)
405{
406	unsigned long flags;
407
408	if (unlikely(element == NULL))
409		return;
410
411	/*
412	 * Paired with the wmb in mempool_alloc().  The preceding read is
413	 * for @element and the following @pool->curr_nr.  This ensures
414	 * that the visible value of @pool->curr_nr is from after the
415	 * allocation of @element.  This is necessary for fringe cases
416	 * where @element was passed to this task without going through
417	 * barriers.
418	 *
419	 * For example, assume @p is %NULL at the beginning and one task
420	 * performs "p = mempool_alloc(...);" while another task is doing
421	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
422	 * may end up using curr_nr value which is from before allocation
423	 * of @p without the following rmb.
424	 */
425	smp_rmb();
426
427	/*
428	 * For correctness, we need a test which is guaranteed to trigger
429	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
430	 * without locking achieves that and refilling as soon as possible
431	 * is desirable.
432	 *
433	 * Because curr_nr visible here is always a value after the
434	 * allocation of @element, any task which decremented curr_nr below
435	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
436	 * incremented to min_nr afterwards.  If curr_nr gets incremented
437	 * to min_nr after the allocation of @element, the elements
438	 * allocated after that are subject to the same guarantee.
439	 *
440	 * Waiters happen iff curr_nr is 0 and the above guarantee also
441	 * ensures that there will be frees which return elements to the
442	 * pool waking up the waiters.
443	 */
444	if (unlikely(pool->curr_nr < pool->min_nr)) {
445		spin_lock_irqsave(&pool->lock, flags);
446		if (likely(pool->curr_nr < pool->min_nr)) {
447			add_element(pool, element);
448			spin_unlock_irqrestore(&pool->lock, flags);
449			wake_up(&pool->wait);
450			return;
451		}
452		spin_unlock_irqrestore(&pool->lock, flags);
453	}
454	pool->free(element, pool->pool_data);
455}
456EXPORT_SYMBOL(mempool_free);
457
458/*
459 * A commonly used alloc and free fn.
460 */
461void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
462{
463	struct kmem_cache *mem = pool_data;
464	VM_BUG_ON(mem->ctor);
465	return kmem_cache_alloc(mem, gfp_mask);
466}
467EXPORT_SYMBOL(mempool_alloc_slab);
468
469void mempool_free_slab(void *element, void *pool_data)
470{
471	struct kmem_cache *mem = pool_data;
472	kmem_cache_free(mem, element);
473}
474EXPORT_SYMBOL(mempool_free_slab);
475
476/*
477 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
478 * specified by pool_data
479 */
480void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
481{
482	size_t size = (size_t)pool_data;
483	return kmalloc(size, gfp_mask);
484}
485EXPORT_SYMBOL(mempool_kmalloc);
486
487void mempool_kfree(void *element, void *pool_data)
488{
489	kfree(element);
490}
491EXPORT_SYMBOL(mempool_kfree);
492
493/*
494 * A simple mempool-backed page allocator that allocates pages
495 * of the order specified by pool_data.
496 */
497void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
498{
499	int order = (int)(long)pool_data;
500	return alloc_pages(gfp_mask, order);
501}
502EXPORT_SYMBOL(mempool_alloc_pages);
503
504void mempool_free_pages(void *element, void *pool_data)
505{
506	int order = (int)(long)pool_data;
507	__free_pages(element, order);
508}
509EXPORT_SYMBOL(mempool_free_pages);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/mempool.c
  4 *
  5 *  memory buffer pool support. Such pools are mostly used
  6 *  for guaranteed, deadlock-free memory allocations during
  7 *  extreme VM load.
  8 *
  9 *  started by Ingo Molnar, Copyright (C) 2001
 10 *  debugging by David Rientjes, Copyright (C) 2015
 11 */
 12
 13#include <linux/mm.h>
 14#include <linux/slab.h>
 15#include <linux/highmem.h>
 16#include <linux/kasan.h>
 17#include <linux/kmemleak.h>
 18#include <linux/export.h>
 19#include <linux/mempool.h>
 
 20#include <linux/writeback.h>
 21#include "slab.h"
 22
 23#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
 24static void poison_error(mempool_t *pool, void *element, size_t size,
 25			 size_t byte)
 26{
 27	const int nr = pool->curr_nr;
 28	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
 29	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
 30	int i;
 31
 32	pr_err("BUG: mempool element poison mismatch\n");
 33	pr_err("Mempool %p size %zu\n", pool, size);
 34	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
 35	for (i = start; i < end; i++)
 36		pr_cont("%x ", *(u8 *)(element + i));
 37	pr_cont("%s\n", end < size ? "..." : "");
 38	dump_stack();
 39}
 40
 41static void __check_element(mempool_t *pool, void *element, size_t size)
 42{
 43	u8 *obj = element;
 44	size_t i;
 45
 46	for (i = 0; i < size; i++) {
 47		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
 48
 49		if (obj[i] != exp) {
 50			poison_error(pool, element, size, i);
 51			return;
 52		}
 53	}
 54	memset(obj, POISON_INUSE, size);
 55}
 56
 57static void check_element(mempool_t *pool, void *element)
 58{
 59	/* Mempools backed by slab allocator */
 60	if (pool->free == mempool_kfree) {
 61		__check_element(pool, element, (size_t)pool->pool_data);
 62	} else if (pool->free == mempool_free_slab) {
 63		__check_element(pool, element, kmem_cache_size(pool->pool_data));
 64	} else if (pool->free == mempool_free_pages) {
 65		/* Mempools backed by page allocator */
 66		int order = (int)(long)pool->pool_data;
 67		void *addr = kmap_atomic((struct page *)element);
 68
 69		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
 70		kunmap_atomic(addr);
 71	}
 72}
 73
 74static void __poison_element(void *element, size_t size)
 75{
 76	u8 *obj = element;
 77
 78	memset(obj, POISON_FREE, size - 1);
 79	obj[size - 1] = POISON_END;
 80}
 81
 82static void poison_element(mempool_t *pool, void *element)
 83{
 84	/* Mempools backed by slab allocator */
 85	if (pool->alloc == mempool_kmalloc) {
 86		__poison_element(element, (size_t)pool->pool_data);
 87	} else if (pool->alloc == mempool_alloc_slab) {
 88		__poison_element(element, kmem_cache_size(pool->pool_data));
 89	} else if (pool->alloc == mempool_alloc_pages) {
 90		/* Mempools backed by page allocator */
 91		int order = (int)(long)pool->pool_data;
 92		void *addr = kmap_atomic((struct page *)element);
 93
 94		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
 95		kunmap_atomic(addr);
 96	}
 97}
 98#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
 99static inline void check_element(mempool_t *pool, void *element)
100{
101}
102static inline void poison_element(mempool_t *pool, void *element)
103{
104}
105#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
106
107static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
108{
109	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
110		kasan_slab_free_mempool(element);
111	else if (pool->alloc == mempool_alloc_pages)
112		kasan_poison_pages(element, (unsigned long)pool->pool_data,
113				   false);
 
114}
115
116static void kasan_unpoison_element(mempool_t *pool, void *element)
117{
 
 
118	if (pool->alloc == mempool_kmalloc)
119		kasan_unpoison_range(element, (size_t)pool->pool_data);
120	else if (pool->alloc == mempool_alloc_slab)
121		kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
122	else if (pool->alloc == mempool_alloc_pages)
123		kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
124				     false);
125}
126
127static __always_inline void add_element(mempool_t *pool, void *element)
128{
129	BUG_ON(pool->curr_nr >= pool->min_nr);
130	poison_element(pool, element);
131	kasan_poison_element(pool, element);
132	pool->elements[pool->curr_nr++] = element;
133}
134
135static void *remove_element(mempool_t *pool)
136{
137	void *element = pool->elements[--pool->curr_nr];
138
139	BUG_ON(pool->curr_nr < 0);
140	kasan_unpoison_element(pool, element);
141	check_element(pool, element);
142	return element;
143}
144
145/**
146 * mempool_exit - exit a mempool initialized with mempool_init()
147 * @pool:      pointer to the memory pool which was initialized with
148 *             mempool_init().
149 *
150 * Free all reserved elements in @pool and @pool itself.  This function
151 * only sleeps if the free_fn() function sleeps.
152 *
153 * May be called on a zeroed but uninitialized mempool (i.e. allocated with
154 * kzalloc()).
155 */
156void mempool_exit(mempool_t *pool)
157{
158	while (pool->curr_nr) {
159		void *element = remove_element(pool);
160		pool->free(element, pool->pool_data);
161	}
162	kfree(pool->elements);
163	pool->elements = NULL;
164}
165EXPORT_SYMBOL(mempool_exit);
166
167/**
168 * mempool_destroy - deallocate a memory pool
169 * @pool:      pointer to the memory pool which was allocated via
170 *             mempool_create().
171 *
172 * Free all reserved elements in @pool and @pool itself.  This function
173 * only sleeps if the free_fn() function sleeps.
174 */
175void mempool_destroy(mempool_t *pool)
176{
177	if (unlikely(!pool))
178		return;
179
180	mempool_exit(pool);
 
 
 
 
181	kfree(pool);
182}
183EXPORT_SYMBOL(mempool_destroy);
184
185int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
186		      mempool_free_t *free_fn, void *pool_data,
187		      gfp_t gfp_mask, int node_id)
188{
189	spin_lock_init(&pool->lock);
190	pool->min_nr	= min_nr;
191	pool->pool_data = pool_data;
192	pool->alloc	= alloc_fn;
193	pool->free	= free_fn;
194	init_waitqueue_head(&pool->wait);
195
196	pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
197					    gfp_mask, node_id);
198	if (!pool->elements)
199		return -ENOMEM;
200
201	/*
202	 * First pre-allocate the guaranteed number of buffers.
203	 */
204	while (pool->curr_nr < pool->min_nr) {
205		void *element;
206
207		element = pool->alloc(gfp_mask, pool->pool_data);
208		if (unlikely(!element)) {
209			mempool_exit(pool);
210			return -ENOMEM;
211		}
212		add_element(pool, element);
213	}
214
215	return 0;
216}
217EXPORT_SYMBOL(mempool_init_node);
218
219/**
220 * mempool_init - initialize a memory pool
221 * @pool:      pointer to the memory pool that should be initialized
222 * @min_nr:    the minimum number of elements guaranteed to be
223 *             allocated for this pool.
224 * @alloc_fn:  user-defined element-allocation function.
225 * @free_fn:   user-defined element-freeing function.
226 * @pool_data: optional private data available to the user-defined functions.
227 *
228 * Like mempool_create(), but initializes the pool in (i.e. embedded in another
229 * structure).
230 *
231 * Return: %0 on success, negative error code otherwise.
232 */
233int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
234		 mempool_free_t *free_fn, void *pool_data)
235{
236	return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
237				 pool_data, GFP_KERNEL, NUMA_NO_NODE);
238
239}
240EXPORT_SYMBOL(mempool_init);
241
242/**
243 * mempool_create - create a memory pool
244 * @min_nr:    the minimum number of elements guaranteed to be
245 *             allocated for this pool.
246 * @alloc_fn:  user-defined element-allocation function.
247 * @free_fn:   user-defined element-freeing function.
248 * @pool_data: optional private data available to the user-defined functions.
249 *
250 * this function creates and allocates a guaranteed size, preallocated
251 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
252 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
253 * functions might sleep - as long as the mempool_alloc() function is not called
254 * from IRQ contexts.
255 *
256 * Return: pointer to the created memory pool object or %NULL on error.
257 */
258mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
259				mempool_free_t *free_fn, void *pool_data)
260{
261	return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data,
262				   GFP_KERNEL, NUMA_NO_NODE);
263}
264EXPORT_SYMBOL(mempool_create);
265
266mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
267			       mempool_free_t *free_fn, void *pool_data,
268			       gfp_t gfp_mask, int node_id)
269{
270	mempool_t *pool;
271
272	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
273	if (!pool)
274		return NULL;
275
276	if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
277			      gfp_mask, node_id)) {
278		kfree(pool);
279		return NULL;
280	}
 
 
 
 
 
 
 
 
 
 
 
 
281
 
 
 
 
 
 
 
282	return pool;
283}
284EXPORT_SYMBOL(mempool_create_node);
285
286/**
287 * mempool_resize - resize an existing memory pool
288 * @pool:       pointer to the memory pool which was allocated via
289 *              mempool_create().
290 * @new_min_nr: the new minimum number of elements guaranteed to be
291 *              allocated for this pool.
292 *
293 * This function shrinks/grows the pool. In the case of growing,
294 * it cannot be guaranteed that the pool will be grown to the new
295 * size immediately, but new mempool_free() calls will refill it.
296 * This function may sleep.
297 *
298 * Note, the caller must guarantee that no mempool_destroy is called
299 * while this function is running. mempool_alloc() & mempool_free()
300 * might be called (eg. from IRQ contexts) while this function executes.
301 *
302 * Return: %0 on success, negative error code otherwise.
303 */
304int mempool_resize(mempool_t *pool, int new_min_nr)
305{
306	void *element;
307	void **new_elements;
308	unsigned long flags;
309
310	BUG_ON(new_min_nr <= 0);
311	might_sleep();
312
313	spin_lock_irqsave(&pool->lock, flags);
314	if (new_min_nr <= pool->min_nr) {
315		while (new_min_nr < pool->curr_nr) {
316			element = remove_element(pool);
317			spin_unlock_irqrestore(&pool->lock, flags);
318			pool->free(element, pool->pool_data);
319			spin_lock_irqsave(&pool->lock, flags);
320		}
321		pool->min_nr = new_min_nr;
322		goto out_unlock;
323	}
324	spin_unlock_irqrestore(&pool->lock, flags);
325
326	/* Grow the pool */
327	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
328				     GFP_KERNEL);
329	if (!new_elements)
330		return -ENOMEM;
331
332	spin_lock_irqsave(&pool->lock, flags);
333	if (unlikely(new_min_nr <= pool->min_nr)) {
334		/* Raced, other resize will do our work */
335		spin_unlock_irqrestore(&pool->lock, flags);
336		kfree(new_elements);
337		goto out;
338	}
339	memcpy(new_elements, pool->elements,
340			pool->curr_nr * sizeof(*new_elements));
341	kfree(pool->elements);
342	pool->elements = new_elements;
343	pool->min_nr = new_min_nr;
344
345	while (pool->curr_nr < pool->min_nr) {
346		spin_unlock_irqrestore(&pool->lock, flags);
347		element = pool->alloc(GFP_KERNEL, pool->pool_data);
348		if (!element)
349			goto out;
350		spin_lock_irqsave(&pool->lock, flags);
351		if (pool->curr_nr < pool->min_nr) {
352			add_element(pool, element);
353		} else {
354			spin_unlock_irqrestore(&pool->lock, flags);
355			pool->free(element, pool->pool_data);	/* Raced */
356			goto out;
357		}
358	}
359out_unlock:
360	spin_unlock_irqrestore(&pool->lock, flags);
361out:
362	return 0;
363}
364EXPORT_SYMBOL(mempool_resize);
365
366/**
367 * mempool_alloc - allocate an element from a specific memory pool
368 * @pool:      pointer to the memory pool which was allocated via
369 *             mempool_create().
370 * @gfp_mask:  the usual allocation bitmask.
371 *
372 * this function only sleeps if the alloc_fn() function sleeps or
373 * returns NULL. Note that due to preallocation, this function
374 * *never* fails when called from process contexts. (it might
375 * fail if called from an IRQ context.)
376 * Note: using __GFP_ZERO is not supported.
377 *
378 * Return: pointer to the allocated element or %NULL on error.
379 */
380void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
381{
382	void *element;
383	unsigned long flags;
384	wait_queue_entry_t wait;
385	gfp_t gfp_temp;
386
 
 
 
387	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
388	might_alloc(gfp_mask);
389
390	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
 
391	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
392	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
393
394	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
395
396repeat_alloc:
 
 
 
 
 
 
 
 
397
398	element = pool->alloc(gfp_temp, pool->pool_data);
399	if (likely(element != NULL))
400		return element;
401
402	spin_lock_irqsave(&pool->lock, flags);
403	if (likely(pool->curr_nr)) {
404		element = remove_element(pool);
405		spin_unlock_irqrestore(&pool->lock, flags);
406		/* paired with rmb in mempool_free(), read comment there */
407		smp_wmb();
408		/*
409		 * Update the allocation stack trace as this is more useful
410		 * for debugging.
411		 */
412		kmemleak_update_trace(element);
413		return element;
414	}
415
416	/*
417	 * We use gfp mask w/o direct reclaim or IO for the first round.  If
418	 * alloc failed with that and @pool was empty, retry immediately.
419	 */
420	if (gfp_temp != gfp_mask) {
421		spin_unlock_irqrestore(&pool->lock, flags);
422		gfp_temp = gfp_mask;
423		goto repeat_alloc;
424	}
 
425
426	/* We must not sleep if !__GFP_DIRECT_RECLAIM */
427	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
428		spin_unlock_irqrestore(&pool->lock, flags);
429		return NULL;
430	}
431
432	/* Let's wait for someone else to return an element to @pool */
433	init_wait(&wait);
434	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
435
436	spin_unlock_irqrestore(&pool->lock, flags);
437
438	/*
439	 * FIXME: this should be io_schedule().  The timeout is there as a
440	 * workaround for some DM problems in 2.6.18.
441	 */
442	io_schedule_timeout(5*HZ);
443
444	finish_wait(&pool->wait, &wait);
445	goto repeat_alloc;
446}
447EXPORT_SYMBOL(mempool_alloc);
448
449/**
450 * mempool_free - return an element to the pool.
451 * @element:   pool element pointer.
452 * @pool:      pointer to the memory pool which was allocated via
453 *             mempool_create().
454 *
455 * this function only sleeps if the free_fn() function sleeps.
456 */
457void mempool_free(void *element, mempool_t *pool)
458{
459	unsigned long flags;
460
461	if (unlikely(element == NULL))
462		return;
463
464	/*
465	 * Paired with the wmb in mempool_alloc().  The preceding read is
466	 * for @element and the following @pool->curr_nr.  This ensures
467	 * that the visible value of @pool->curr_nr is from after the
468	 * allocation of @element.  This is necessary for fringe cases
469	 * where @element was passed to this task without going through
470	 * barriers.
471	 *
472	 * For example, assume @p is %NULL at the beginning and one task
473	 * performs "p = mempool_alloc(...);" while another task is doing
474	 * "while (!p) cpu_relax(); mempool_free(p, ...);".  This function
475	 * may end up using curr_nr value which is from before allocation
476	 * of @p without the following rmb.
477	 */
478	smp_rmb();
479
480	/*
481	 * For correctness, we need a test which is guaranteed to trigger
482	 * if curr_nr + #allocated == min_nr.  Testing curr_nr < min_nr
483	 * without locking achieves that and refilling as soon as possible
484	 * is desirable.
485	 *
486	 * Because curr_nr visible here is always a value after the
487	 * allocation of @element, any task which decremented curr_nr below
488	 * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets
489	 * incremented to min_nr afterwards.  If curr_nr gets incremented
490	 * to min_nr after the allocation of @element, the elements
491	 * allocated after that are subject to the same guarantee.
492	 *
493	 * Waiters happen iff curr_nr is 0 and the above guarantee also
494	 * ensures that there will be frees which return elements to the
495	 * pool waking up the waiters.
496	 */
497	if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
498		spin_lock_irqsave(&pool->lock, flags);
499		if (likely(pool->curr_nr < pool->min_nr)) {
500			add_element(pool, element);
501			spin_unlock_irqrestore(&pool->lock, flags);
502			wake_up(&pool->wait);
503			return;
504		}
505		spin_unlock_irqrestore(&pool->lock, flags);
506	}
507	pool->free(element, pool->pool_data);
508}
509EXPORT_SYMBOL(mempool_free);
510
511/*
512 * A commonly used alloc and free fn.
513 */
514void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
515{
516	struct kmem_cache *mem = pool_data;
517	VM_BUG_ON(mem->ctor);
518	return kmem_cache_alloc(mem, gfp_mask);
519}
520EXPORT_SYMBOL(mempool_alloc_slab);
521
522void mempool_free_slab(void *element, void *pool_data)
523{
524	struct kmem_cache *mem = pool_data;
525	kmem_cache_free(mem, element);
526}
527EXPORT_SYMBOL(mempool_free_slab);
528
529/*
530 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
531 * specified by pool_data
532 */
533void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
534{
535	size_t size = (size_t)pool_data;
536	return kmalloc(size, gfp_mask);
537}
538EXPORT_SYMBOL(mempool_kmalloc);
539
540void mempool_kfree(void *element, void *pool_data)
541{
542	kfree(element);
543}
544EXPORT_SYMBOL(mempool_kfree);
545
546/*
547 * A simple mempool-backed page allocator that allocates pages
548 * of the order specified by pool_data.
549 */
550void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
551{
552	int order = (int)(long)pool_data;
553	return alloc_pages(gfp_mask, order);
554}
555EXPORT_SYMBOL(mempool_alloc_pages);
556
557void mempool_free_pages(void *element, void *pool_data)
558{
559	int order = (int)(long)pool_data;
560	__free_pages(element, order);
561}
562EXPORT_SYMBOL(mempool_free_pages);