Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * SLOB Allocator: Simple List Of Blocks
  4 *
  5 * Matt Mackall <mpm@selenic.com> 12/30/03
  6 *
  7 * NUMA support by Paul Mundt, 2007.
  8 *
  9 * How SLOB works:
 10 *
 11 * The core of SLOB is a traditional K&R style heap allocator, with
 12 * support for returning aligned objects. The granularity of this
 13 * allocator is as little as 2 bytes, however typically most architectures
 14 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
 15 *
 16 * The slob heap is a set of linked list of pages from alloc_pages(),
 17 * and within each page, there is a singly-linked list of free blocks
 18 * (slob_t). The heap is grown on demand. To reduce fragmentation,
 19 * heap pages are segregated into three lists, with objects less than
 20 * 256 bytes, objects less than 1024 bytes, and all other objects.
 21 *
 22 * Allocation from heap involves first searching for a page with
 23 * sufficient free blocks (using a next-fit-like approach) followed by
 24 * a first-fit scan of the page. Deallocation inserts objects back
 25 * into the free list in address order, so this is effectively an
 26 * address-ordered first fit.
 27 *
 28 * Above this is an implementation of kmalloc/kfree. Blocks returned
 29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
 30 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
 31 * alloc_pages() directly, allocating compound pages so the page order
 32 * does not have to be separately tracked.
 33 * These objects are detected in kfree() because PageSlab()
 34 * is false for them.
 35 *
 36 * SLAB is emulated on top of SLOB by simply calling constructors and
 37 * destructors for every SLAB allocation. Objects are returned with the
 38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
 39 * case the low-level allocator will fragment blocks to create the proper
 40 * alignment. Again, objects of page-size or greater are allocated by
 41 * calling alloc_pages(). As SLAB objects know their size, no separate
 42 * size bookkeeping is necessary and there is essentially no allocation
 43 * space overhead, and compound pages aren't needed for multi-page
 44 * allocations.
 45 *
 46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
 47 * logic down to the page allocator, and simply doing the node accounting
 48 * on the upper levels. In the event that a node id is explicitly
 49 * provided, __alloc_pages_node() with the specified node id is used
 50 * instead. The common case (or when the node id isn't explicitly provided)
 51 * will default to the current node, as per numa_node_id().
 52 *
 53 * Node aware pages are still inserted in to the global freelist, and
 54 * these are scanned for by matching against the node id encoded in the
 55 * page flags. As a result, block allocations that can be satisfied from
 56 * the freelist will only be done so on pages residing on the same node,
 57 * in order to prevent random node placement.
 58 */
 59
 60#include <linux/kernel.h>
 61#include <linux/slab.h>
 62
 63#include <linux/mm.h>
 64#include <linux/swap.h> /* struct reclaim_state */
 65#include <linux/cache.h>
 66#include <linux/init.h>
 67#include <linux/export.h>
 68#include <linux/rcupdate.h>
 69#include <linux/list.h>
 70#include <linux/kmemleak.h>
 71
 72#include <trace/events/kmem.h>
 73
 74#include <linux/atomic.h>
 75
 76#include "slab.h"
 77/*
 78 * slob_block has a field 'units', which indicates size of block if +ve,
 79 * or offset of next block if -ve (in SLOB_UNITs).
 80 *
 81 * Free blocks of size 1 unit simply contain the offset of the next block.
 82 * Those with larger size contain their size in the first SLOB_UNIT of
 83 * memory, and the offset of the next free block in the second SLOB_UNIT.
 84 */
 85#if PAGE_SIZE <= (32767 * 2)
 86typedef s16 slobidx_t;
 87#else
 88typedef s32 slobidx_t;
 89#endif
 90
 91struct slob_block {
 92	slobidx_t units;
 93};
 94typedef struct slob_block slob_t;
 95
 96/*
 97 * All partially free slob pages go on these lists.
 98 */
 99#define SLOB_BREAK1 256
100#define SLOB_BREAK2 1024
101static LIST_HEAD(free_slob_small);
102static LIST_HEAD(free_slob_medium);
103static LIST_HEAD(free_slob_large);
104
105/*
106 * slob_page_free: true for pages on free_slob_pages list.
107 */
108static inline int slob_page_free(struct page *sp)
109{
110	return PageSlobFree(sp);
111}
112
113static void set_slob_page_free(struct page *sp, struct list_head *list)
114{
115	list_add(&sp->slab_list, list);
116	__SetPageSlobFree(sp);
117}
118
119static inline void clear_slob_page_free(struct page *sp)
120{
121	list_del(&sp->slab_list);
122	__ClearPageSlobFree(sp);
123}
124
125#define SLOB_UNIT sizeof(slob_t)
126#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
127
128/*
129 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
130 * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
131 * the block using call_rcu.
132 */
133struct slob_rcu {
134	struct rcu_head head;
135	int size;
136};
137
138/*
139 * slob_lock protects all slob allocator structures.
140 */
141static DEFINE_SPINLOCK(slob_lock);
142
143/*
144 * Encode the given size and next info into a free slob block s.
145 */
146static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
147{
148	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
149	slobidx_t offset = next - base;
150
151	if (size > 1) {
152		s[0].units = size;
153		s[1].units = offset;
154	} else
155		s[0].units = -offset;
156}
157
158/*
159 * Return the size of a slob block.
160 */
161static slobidx_t slob_units(slob_t *s)
162{
163	if (s->units > 0)
164		return s->units;
165	return 1;
166}
167
168/*
169 * Return the next free slob block pointer after this one.
170 */
171static slob_t *slob_next(slob_t *s)
172{
173	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
174	slobidx_t next;
175
176	if (s[0].units < 0)
177		next = -s[0].units;
178	else
179		next = s[1].units;
180	return base+next;
181}
182
183/*
184 * Returns true if s is the last free block in its page.
185 */
186static int slob_last(slob_t *s)
187{
188	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
189}
190
191static void *slob_new_pages(gfp_t gfp, int order, int node)
192{
193	struct page *page;
194
195#ifdef CONFIG_NUMA
196	if (node != NUMA_NO_NODE)
197		page = __alloc_pages_node(node, gfp, order);
198	else
199#endif
200		page = alloc_pages(gfp, order);
201
202	if (!page)
203		return NULL;
204
205	mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
206			    1 << order);
207	return page_address(page);
208}
209
210static void slob_free_pages(void *b, int order)
211{
212	struct page *sp = virt_to_page(b);
213
214	if (current->reclaim_state)
215		current->reclaim_state->reclaimed_slab += 1 << order;
216
217	mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
218			    -(1 << order));
219	__free_pages(sp, order);
220}
221
222/*
223 * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
224 * @sp: Page to look in.
225 * @size: Size of the allocation.
226 * @align: Allocation alignment.
227 * @align_offset: Offset in the allocated block that will be aligned.
228 * @page_removed_from_list: Return parameter.
229 *
230 * Tries to find a chunk of memory at least @size bytes big within @page.
231 *
232 * Return: Pointer to memory if allocated, %NULL otherwise.  If the
233 *         allocation fills up @page then the page is removed from the
234 *         freelist, in this case @page_removed_from_list will be set to
235 *         true (set to false otherwise).
236 */
237static void *slob_page_alloc(struct page *sp, size_t size, int align,
238			      int align_offset, bool *page_removed_from_list)
239{
240	slob_t *prev, *cur, *aligned = NULL;
241	int delta = 0, units = SLOB_UNITS(size);
242
243	*page_removed_from_list = false;
244	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
245		slobidx_t avail = slob_units(cur);
246
247		/*
248		 * 'aligned' will hold the address of the slob block so that the
249		 * address 'aligned'+'align_offset' is aligned according to the
250		 * 'align' parameter. This is for kmalloc() which prepends the
251		 * allocated block with its size, so that the block itself is
252		 * aligned when needed.
253		 */
254		if (align) {
255			aligned = (slob_t *)
256				(ALIGN((unsigned long)cur + align_offset, align)
257				 - align_offset);
258			delta = aligned - cur;
259		}
260		if (avail >= units + delta) { /* room enough? */
261			slob_t *next;
262
263			if (delta) { /* need to fragment head to align? */
264				next = slob_next(cur);
265				set_slob(aligned, avail - delta, next);
266				set_slob(cur, delta, aligned);
267				prev = cur;
268				cur = aligned;
269				avail = slob_units(cur);
270			}
271
272			next = slob_next(cur);
273			if (avail == units) { /* exact fit? unlink. */
274				if (prev)
275					set_slob(prev, slob_units(prev), next);
276				else
277					sp->freelist = next;
278			} else { /* fragment */
279				if (prev)
280					set_slob(prev, slob_units(prev), cur + units);
281				else
282					sp->freelist = cur + units;
283				set_slob(cur + units, avail - units, next);
284			}
285
286			sp->units -= units;
287			if (!sp->units) {
288				clear_slob_page_free(sp);
289				*page_removed_from_list = true;
290			}
291			return cur;
292		}
293		if (slob_last(cur))
294			return NULL;
295	}
296}
297
298/*
299 * slob_alloc: entry point into the slob allocator.
300 */
301static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
302							int align_offset)
303{
304	struct page *sp;
 
305	struct list_head *slob_list;
306	slob_t *b = NULL;
307	unsigned long flags;
308	bool _unused;
309
310	if (size < SLOB_BREAK1)
311		slob_list = &free_slob_small;
312	else if (size < SLOB_BREAK2)
313		slob_list = &free_slob_medium;
314	else
315		slob_list = &free_slob_large;
316
317	spin_lock_irqsave(&slob_lock, flags);
318	/* Iterate through each partially free page, try to find room */
319	list_for_each_entry(sp, slob_list, slab_list) {
320		bool page_removed_from_list = false;
321#ifdef CONFIG_NUMA
322		/*
323		 * If there's a node specification, search for a partial
324		 * page with a matching node id in the freelist.
325		 */
326		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
327			continue;
328#endif
329		/* Enough room on this page? */
330		if (sp->units < SLOB_UNITS(size))
331			continue;
332
333		b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list);
 
 
334		if (!b)
335			continue;
336
337		/*
338		 * If slob_page_alloc() removed sp from the list then we
339		 * cannot call list functions on sp.  If so allocation
340		 * did not fragment the page anyway so optimisation is
341		 * unnecessary.
342		 */
343		if (!page_removed_from_list) {
344			/*
345			 * Improve fragment distribution and reduce our average
346			 * search time by starting our next search here. (see
347			 * Knuth vol 1, sec 2.5, pg 449)
348			 */
349			if (!list_is_first(&sp->slab_list, slob_list))
350				list_rotate_to_front(&sp->slab_list, slob_list);
351		}
352		break;
353	}
354	spin_unlock_irqrestore(&slob_lock, flags);
355
356	/* Not enough space: must allocate a new page */
357	if (!b) {
358		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
359		if (!b)
360			return NULL;
361		sp = virt_to_page(b);
362		__SetPageSlab(sp);
363
364		spin_lock_irqsave(&slob_lock, flags);
365		sp->units = SLOB_UNITS(PAGE_SIZE);
366		sp->freelist = b;
367		INIT_LIST_HEAD(&sp->slab_list);
368		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
369		set_slob_page_free(sp, slob_list);
370		b = slob_page_alloc(sp, size, align, align_offset, &_unused);
371		BUG_ON(!b);
372		spin_unlock_irqrestore(&slob_lock, flags);
373	}
374	if (unlikely(gfp & __GFP_ZERO))
375		memset(b, 0, size);
376	return b;
377}
378
379/*
380 * slob_free: entry point into the slob allocator.
381 */
382static void slob_free(void *block, int size)
383{
384	struct page *sp;
385	slob_t *prev, *next, *b = (slob_t *)block;
386	slobidx_t units;
387	unsigned long flags;
388	struct list_head *slob_list;
389
390	if (unlikely(ZERO_OR_NULL_PTR(block)))
391		return;
392	BUG_ON(!size);
393
394	sp = virt_to_page(block);
395	units = SLOB_UNITS(size);
396
397	spin_lock_irqsave(&slob_lock, flags);
398
399	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
400		/* Go directly to page allocator. Do not pass slob allocator */
401		if (slob_page_free(sp))
402			clear_slob_page_free(sp);
403		spin_unlock_irqrestore(&slob_lock, flags);
404		__ClearPageSlab(sp);
405		page_mapcount_reset(sp);
406		slob_free_pages(b, 0);
407		return;
408	}
409
410	if (!slob_page_free(sp)) {
411		/* This slob page is about to become partially free. Easy! */
412		sp->units = units;
413		sp->freelist = b;
414		set_slob(b, units,
415			(void *)((unsigned long)(b +
416					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
417		if (size < SLOB_BREAK1)
418			slob_list = &free_slob_small;
419		else if (size < SLOB_BREAK2)
420			slob_list = &free_slob_medium;
421		else
422			slob_list = &free_slob_large;
423		set_slob_page_free(sp, slob_list);
424		goto out;
425	}
426
427	/*
428	 * Otherwise the page is already partially free, so find reinsertion
429	 * point.
430	 */
431	sp->units += units;
432
433	if (b < (slob_t *)sp->freelist) {
434		if (b + units == sp->freelist) {
435			units += slob_units(sp->freelist);
436			sp->freelist = slob_next(sp->freelist);
437		}
438		set_slob(b, units, sp->freelist);
439		sp->freelist = b;
440	} else {
441		prev = sp->freelist;
442		next = slob_next(prev);
443		while (b > next) {
444			prev = next;
445			next = slob_next(prev);
446		}
447
448		if (!slob_last(prev) && b + units == next) {
449			units += slob_units(next);
450			set_slob(b, units, slob_next(next));
451		} else
452			set_slob(b, units, next);
453
454		if (prev + slob_units(prev) == b) {
455			units = slob_units(b) + slob_units(prev);
456			set_slob(prev, units, slob_next(b));
457		} else
458			set_slob(prev, slob_units(prev), b);
459	}
460out:
461	spin_unlock_irqrestore(&slob_lock, flags);
462}
463
464/*
465 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
466 */
467
468static __always_inline void *
469__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
470{
471	unsigned int *m;
472	int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
473	void *ret;
474
475	gfp &= gfp_allowed_mask;
476
477	fs_reclaim_acquire(gfp);
478	fs_reclaim_release(gfp);
479
480	if (size < PAGE_SIZE - minalign) {
481		int align = minalign;
482
483		/*
484		 * For power of two sizes, guarantee natural alignment for
485		 * kmalloc()'d objects.
486		 */
487		if (is_power_of_2(size))
488			align = max(minalign, (int) size);
489
 
490		if (!size)
491			return ZERO_SIZE_PTR;
492
493		m = slob_alloc(size + minalign, gfp, align, node, minalign);
494
495		if (!m)
496			return NULL;
497		*m = size;
498		ret = (void *)m + minalign;
499
500		trace_kmalloc_node(caller, ret,
501				   size, size + minalign, gfp, node);
502	} else {
503		unsigned int order = get_order(size);
504
505		if (likely(order))
506			gfp |= __GFP_COMP;
507		ret = slob_new_pages(gfp, order, node);
508
509		trace_kmalloc_node(caller, ret,
510				   size, PAGE_SIZE << order, gfp, node);
511	}
512
513	kmemleak_alloc(ret, size, 1, gfp);
514	return ret;
515}
516
517void *__kmalloc(size_t size, gfp_t gfp)
518{
519	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
520}
521EXPORT_SYMBOL(__kmalloc);
522
523void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
524{
525	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
526}
527
528#ifdef CONFIG_NUMA
529void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
530					int node, unsigned long caller)
531{
532	return __do_kmalloc_node(size, gfp, node, caller);
533}
534#endif
535
536void kfree(const void *block)
537{
538	struct page *sp;
539
540	trace_kfree(_RET_IP_, block);
541
542	if (unlikely(ZERO_OR_NULL_PTR(block)))
543		return;
544	kmemleak_free(block);
545
546	sp = virt_to_page(block);
547	if (PageSlab(sp)) {
548		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
549		unsigned int *m = (unsigned int *)(block - align);
550		slob_free(m, *m + align);
551	} else {
552		unsigned int order = compound_order(sp);
553		mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
554				    -(1 << order));
555		__free_pages(sp, order);
556
557	}
558}
559EXPORT_SYMBOL(kfree);
560
561/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
562size_t __ksize(const void *block)
563{
564	struct page *sp;
565	int align;
566	unsigned int *m;
567
568	BUG_ON(!block);
569	if (unlikely(block == ZERO_SIZE_PTR))
570		return 0;
571
572	sp = virt_to_page(block);
573	if (unlikely(!PageSlab(sp)))
574		return page_size(sp);
575
576	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
577	m = (unsigned int *)(block - align);
578	return SLOB_UNITS(*m) * SLOB_UNIT;
579}
580EXPORT_SYMBOL(__ksize);
581
582int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
583{
584	if (flags & SLAB_TYPESAFE_BY_RCU) {
585		/* leave room for rcu footer at the end of object */
586		c->size += sizeof(struct slob_rcu);
587	}
588	c->flags = flags;
589	return 0;
590}
591
592static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
593{
594	void *b;
595
596	flags &= gfp_allowed_mask;
597
598	fs_reclaim_acquire(flags);
599	fs_reclaim_release(flags);
600
601	if (c->size < PAGE_SIZE) {
602		b = slob_alloc(c->size, flags, c->align, node, 0);
603		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
604					    SLOB_UNITS(c->size) * SLOB_UNIT,
605					    flags, node);
606	} else {
607		b = slob_new_pages(flags, get_order(c->size), node);
608		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
609					    PAGE_SIZE << get_order(c->size),
610					    flags, node);
611	}
612
613	if (b && c->ctor) {
614		WARN_ON_ONCE(flags & __GFP_ZERO);
615		c->ctor(b);
616	}
617
618	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
619	return b;
620}
621
622void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
623{
624	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
625}
626EXPORT_SYMBOL(kmem_cache_alloc);
627
628#ifdef CONFIG_NUMA
629void *__kmalloc_node(size_t size, gfp_t gfp, int node)
630{
631	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
632}
633EXPORT_SYMBOL(__kmalloc_node);
634
635void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
636{
637	return slob_alloc_node(cachep, gfp, node);
638}
639EXPORT_SYMBOL(kmem_cache_alloc_node);
640#endif
641
642static void __kmem_cache_free(void *b, int size)
643{
644	if (size < PAGE_SIZE)
645		slob_free(b, size);
646	else
647		slob_free_pages(b, get_order(size));
648}
649
650static void kmem_rcu_free(struct rcu_head *head)
651{
652	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
653	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
654
655	__kmem_cache_free(b, slob_rcu->size);
656}
657
658void kmem_cache_free(struct kmem_cache *c, void *b)
659{
660	kmemleak_free_recursive(b, c->flags);
661	if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
662		struct slob_rcu *slob_rcu;
663		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
664		slob_rcu->size = c->size;
665		call_rcu(&slob_rcu->head, kmem_rcu_free);
666	} else {
667		__kmem_cache_free(b, c->size);
668	}
669
670	trace_kmem_cache_free(_RET_IP_, b);
671}
672EXPORT_SYMBOL(kmem_cache_free);
673
674void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
675{
676	__kmem_cache_free_bulk(s, size, p);
677}
678EXPORT_SYMBOL(kmem_cache_free_bulk);
679
680int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
681								void **p)
682{
683	return __kmem_cache_alloc_bulk(s, flags, size, p);
684}
685EXPORT_SYMBOL(kmem_cache_alloc_bulk);
686
687int __kmem_cache_shutdown(struct kmem_cache *c)
688{
689	/* No way to check for remaining objects */
690	return 0;
691}
692
693void __kmem_cache_release(struct kmem_cache *c)
694{
695}
696
697int __kmem_cache_shrink(struct kmem_cache *d)
698{
699	return 0;
700}
701
702struct kmem_cache kmem_cache_boot = {
703	.name = "kmem_cache",
704	.size = sizeof(struct kmem_cache),
705	.flags = SLAB_PANIC,
706	.align = ARCH_KMALLOC_MINALIGN,
707};
708
709void __init kmem_cache_init(void)
710{
711	kmem_cache = &kmem_cache_boot;
712	slab_state = UP;
713}
714
715void __init kmem_cache_init_late(void)
716{
717	slab_state = FULL;
718}
v4.6
 
  1/*
  2 * SLOB Allocator: Simple List Of Blocks
  3 *
  4 * Matt Mackall <mpm@selenic.com> 12/30/03
  5 *
  6 * NUMA support by Paul Mundt, 2007.
  7 *
  8 * How SLOB works:
  9 *
 10 * The core of SLOB is a traditional K&R style heap allocator, with
 11 * support for returning aligned objects. The granularity of this
 12 * allocator is as little as 2 bytes, however typically most architectures
 13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
 14 *
 15 * The slob heap is a set of linked list of pages from alloc_pages(),
 16 * and within each page, there is a singly-linked list of free blocks
 17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
 18 * heap pages are segregated into three lists, with objects less than
 19 * 256 bytes, objects less than 1024 bytes, and all other objects.
 20 *
 21 * Allocation from heap involves first searching for a page with
 22 * sufficient free blocks (using a next-fit-like approach) followed by
 23 * a first-fit scan of the page. Deallocation inserts objects back
 24 * into the free list in address order, so this is effectively an
 25 * address-ordered first fit.
 26 *
 27 * Above this is an implementation of kmalloc/kfree. Blocks returned
 28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
 29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
 30 * alloc_pages() directly, allocating compound pages so the page order
 31 * does not have to be separately tracked.
 32 * These objects are detected in kfree() because PageSlab()
 33 * is false for them.
 34 *
 35 * SLAB is emulated on top of SLOB by simply calling constructors and
 36 * destructors for every SLAB allocation. Objects are returned with the
 37 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
 38 * case the low-level allocator will fragment blocks to create the proper
 39 * alignment. Again, objects of page-size or greater are allocated by
 40 * calling alloc_pages(). As SLAB objects know their size, no separate
 41 * size bookkeeping is necessary and there is essentially no allocation
 42 * space overhead, and compound pages aren't needed for multi-page
 43 * allocations.
 44 *
 45 * NUMA support in SLOB is fairly simplistic, pushing most of the real
 46 * logic down to the page allocator, and simply doing the node accounting
 47 * on the upper levels. In the event that a node id is explicitly
 48 * provided, __alloc_pages_node() with the specified node id is used
 49 * instead. The common case (or when the node id isn't explicitly provided)
 50 * will default to the current node, as per numa_node_id().
 51 *
 52 * Node aware pages are still inserted in to the global freelist, and
 53 * these are scanned for by matching against the node id encoded in the
 54 * page flags. As a result, block allocations that can be satisfied from
 55 * the freelist will only be done so on pages residing on the same node,
 56 * in order to prevent random node placement.
 57 */
 58
 59#include <linux/kernel.h>
 60#include <linux/slab.h>
 61
 62#include <linux/mm.h>
 63#include <linux/swap.h> /* struct reclaim_state */
 64#include <linux/cache.h>
 65#include <linux/init.h>
 66#include <linux/export.h>
 67#include <linux/rcupdate.h>
 68#include <linux/list.h>
 69#include <linux/kmemleak.h>
 70
 71#include <trace/events/kmem.h>
 72
 73#include <linux/atomic.h>
 74
 75#include "slab.h"
 76/*
 77 * slob_block has a field 'units', which indicates size of block if +ve,
 78 * or offset of next block if -ve (in SLOB_UNITs).
 79 *
 80 * Free blocks of size 1 unit simply contain the offset of the next block.
 81 * Those with larger size contain their size in the first SLOB_UNIT of
 82 * memory, and the offset of the next free block in the second SLOB_UNIT.
 83 */
 84#if PAGE_SIZE <= (32767 * 2)
 85typedef s16 slobidx_t;
 86#else
 87typedef s32 slobidx_t;
 88#endif
 89
 90struct slob_block {
 91	slobidx_t units;
 92};
 93typedef struct slob_block slob_t;
 94
 95/*
 96 * All partially free slob pages go on these lists.
 97 */
 98#define SLOB_BREAK1 256
 99#define SLOB_BREAK2 1024
100static LIST_HEAD(free_slob_small);
101static LIST_HEAD(free_slob_medium);
102static LIST_HEAD(free_slob_large);
103
104/*
105 * slob_page_free: true for pages on free_slob_pages list.
106 */
107static inline int slob_page_free(struct page *sp)
108{
109	return PageSlobFree(sp);
110}
111
112static void set_slob_page_free(struct page *sp, struct list_head *list)
113{
114	list_add(&sp->lru, list);
115	__SetPageSlobFree(sp);
116}
117
118static inline void clear_slob_page_free(struct page *sp)
119{
120	list_del(&sp->lru);
121	__ClearPageSlobFree(sp);
122}
123
124#define SLOB_UNIT sizeof(slob_t)
125#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
126
127/*
128 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
129 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
130 * the block using call_rcu.
131 */
132struct slob_rcu {
133	struct rcu_head head;
134	int size;
135};
136
137/*
138 * slob_lock protects all slob allocator structures.
139 */
140static DEFINE_SPINLOCK(slob_lock);
141
142/*
143 * Encode the given size and next info into a free slob block s.
144 */
145static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
146{
147	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
148	slobidx_t offset = next - base;
149
150	if (size > 1) {
151		s[0].units = size;
152		s[1].units = offset;
153	} else
154		s[0].units = -offset;
155}
156
157/*
158 * Return the size of a slob block.
159 */
160static slobidx_t slob_units(slob_t *s)
161{
162	if (s->units > 0)
163		return s->units;
164	return 1;
165}
166
167/*
168 * Return the next free slob block pointer after this one.
169 */
170static slob_t *slob_next(slob_t *s)
171{
172	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
173	slobidx_t next;
174
175	if (s[0].units < 0)
176		next = -s[0].units;
177	else
178		next = s[1].units;
179	return base+next;
180}
181
182/*
183 * Returns true if s is the last free block in its page.
184 */
185static int slob_last(slob_t *s)
186{
187	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
188}
189
190static void *slob_new_pages(gfp_t gfp, int order, int node)
191{
192	void *page;
193
194#ifdef CONFIG_NUMA
195	if (node != NUMA_NO_NODE)
196		page = __alloc_pages_node(node, gfp, order);
197	else
198#endif
199		page = alloc_pages(gfp, order);
200
201	if (!page)
202		return NULL;
203
 
 
204	return page_address(page);
205}
206
207static void slob_free_pages(void *b, int order)
208{
 
 
209	if (current->reclaim_state)
210		current->reclaim_state->reclaimed_slab += 1 << order;
211	free_pages((unsigned long)b, order);
 
 
 
212}
213
214/*
215 * Allocate a slob block within a given slob_page sp.
 
 
 
 
 
 
 
 
 
 
 
 
216 */
217static void *slob_page_alloc(struct page *sp, size_t size, int align)
 
218{
219	slob_t *prev, *cur, *aligned = NULL;
220	int delta = 0, units = SLOB_UNITS(size);
221
 
222	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
223		slobidx_t avail = slob_units(cur);
224
 
 
 
 
 
 
 
225		if (align) {
226			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
 
 
227			delta = aligned - cur;
228		}
229		if (avail >= units + delta) { /* room enough? */
230			slob_t *next;
231
232			if (delta) { /* need to fragment head to align? */
233				next = slob_next(cur);
234				set_slob(aligned, avail - delta, next);
235				set_slob(cur, delta, aligned);
236				prev = cur;
237				cur = aligned;
238				avail = slob_units(cur);
239			}
240
241			next = slob_next(cur);
242			if (avail == units) { /* exact fit? unlink. */
243				if (prev)
244					set_slob(prev, slob_units(prev), next);
245				else
246					sp->freelist = next;
247			} else { /* fragment */
248				if (prev)
249					set_slob(prev, slob_units(prev), cur + units);
250				else
251					sp->freelist = cur + units;
252				set_slob(cur + units, avail - units, next);
253			}
254
255			sp->units -= units;
256			if (!sp->units)
257				clear_slob_page_free(sp);
 
 
258			return cur;
259		}
260		if (slob_last(cur))
261			return NULL;
262	}
263}
264
265/*
266 * slob_alloc: entry point into the slob allocator.
267 */
268static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
 
269{
270	struct page *sp;
271	struct list_head *prev;
272	struct list_head *slob_list;
273	slob_t *b = NULL;
274	unsigned long flags;
 
275
276	if (size < SLOB_BREAK1)
277		slob_list = &free_slob_small;
278	else if (size < SLOB_BREAK2)
279		slob_list = &free_slob_medium;
280	else
281		slob_list = &free_slob_large;
282
283	spin_lock_irqsave(&slob_lock, flags);
284	/* Iterate through each partially free page, try to find room */
285	list_for_each_entry(sp, slob_list, lru) {
 
286#ifdef CONFIG_NUMA
287		/*
288		 * If there's a node specification, search for a partial
289		 * page with a matching node id in the freelist.
290		 */
291		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
292			continue;
293#endif
294		/* Enough room on this page? */
295		if (sp->units < SLOB_UNITS(size))
296			continue;
297
298		/* Attempt to alloc */
299		prev = sp->lru.prev;
300		b = slob_page_alloc(sp, size, align);
301		if (!b)
302			continue;
303
304		/* Improve fragment distribution and reduce our average
305		 * search time by starting our next search here. (see
306		 * Knuth vol 1, sec 2.5, pg 449) */
307		if (prev != slob_list->prev &&
308				slob_list->next != prev->next)
309			list_move_tail(slob_list, prev->next);
 
 
 
 
 
 
 
 
 
310		break;
311	}
312	spin_unlock_irqrestore(&slob_lock, flags);
313
314	/* Not enough space: must allocate a new page */
315	if (!b) {
316		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
317		if (!b)
318			return NULL;
319		sp = virt_to_page(b);
320		__SetPageSlab(sp);
321
322		spin_lock_irqsave(&slob_lock, flags);
323		sp->units = SLOB_UNITS(PAGE_SIZE);
324		sp->freelist = b;
325		INIT_LIST_HEAD(&sp->lru);
326		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
327		set_slob_page_free(sp, slob_list);
328		b = slob_page_alloc(sp, size, align);
329		BUG_ON(!b);
330		spin_unlock_irqrestore(&slob_lock, flags);
331	}
332	if (unlikely((gfp & __GFP_ZERO) && b))
333		memset(b, 0, size);
334	return b;
335}
336
337/*
338 * slob_free: entry point into the slob allocator.
339 */
340static void slob_free(void *block, int size)
341{
342	struct page *sp;
343	slob_t *prev, *next, *b = (slob_t *)block;
344	slobidx_t units;
345	unsigned long flags;
346	struct list_head *slob_list;
347
348	if (unlikely(ZERO_OR_NULL_PTR(block)))
349		return;
350	BUG_ON(!size);
351
352	sp = virt_to_page(block);
353	units = SLOB_UNITS(size);
354
355	spin_lock_irqsave(&slob_lock, flags);
356
357	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
358		/* Go directly to page allocator. Do not pass slob allocator */
359		if (slob_page_free(sp))
360			clear_slob_page_free(sp);
361		spin_unlock_irqrestore(&slob_lock, flags);
362		__ClearPageSlab(sp);
363		page_mapcount_reset(sp);
364		slob_free_pages(b, 0);
365		return;
366	}
367
368	if (!slob_page_free(sp)) {
369		/* This slob page is about to become partially free. Easy! */
370		sp->units = units;
371		sp->freelist = b;
372		set_slob(b, units,
373			(void *)((unsigned long)(b +
374					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
375		if (size < SLOB_BREAK1)
376			slob_list = &free_slob_small;
377		else if (size < SLOB_BREAK2)
378			slob_list = &free_slob_medium;
379		else
380			slob_list = &free_slob_large;
381		set_slob_page_free(sp, slob_list);
382		goto out;
383	}
384
385	/*
386	 * Otherwise the page is already partially free, so find reinsertion
387	 * point.
388	 */
389	sp->units += units;
390
391	if (b < (slob_t *)sp->freelist) {
392		if (b + units == sp->freelist) {
393			units += slob_units(sp->freelist);
394			sp->freelist = slob_next(sp->freelist);
395		}
396		set_slob(b, units, sp->freelist);
397		sp->freelist = b;
398	} else {
399		prev = sp->freelist;
400		next = slob_next(prev);
401		while (b > next) {
402			prev = next;
403			next = slob_next(prev);
404		}
405
406		if (!slob_last(prev) && b + units == next) {
407			units += slob_units(next);
408			set_slob(b, units, slob_next(next));
409		} else
410			set_slob(b, units, next);
411
412		if (prev + slob_units(prev) == b) {
413			units = slob_units(b) + slob_units(prev);
414			set_slob(prev, units, slob_next(b));
415		} else
416			set_slob(prev, slob_units(prev), b);
417	}
418out:
419	spin_unlock_irqrestore(&slob_lock, flags);
420}
421
422/*
423 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
424 */
425
426static __always_inline void *
427__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
428{
429	unsigned int *m;
430	int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
431	void *ret;
432
433	gfp &= gfp_allowed_mask;
434
435	lockdep_trace_alloc(gfp);
 
 
 
 
 
 
 
 
 
 
 
436
437	if (size < PAGE_SIZE - align) {
438		if (!size)
439			return ZERO_SIZE_PTR;
440
441		m = slob_alloc(size + align, gfp, align, node);
442
443		if (!m)
444			return NULL;
445		*m = size;
446		ret = (void *)m + align;
447
448		trace_kmalloc_node(caller, ret,
449				   size, size + align, gfp, node);
450	} else {
451		unsigned int order = get_order(size);
452
453		if (likely(order))
454			gfp |= __GFP_COMP;
455		ret = slob_new_pages(gfp, order, node);
456
457		trace_kmalloc_node(caller, ret,
458				   size, PAGE_SIZE << order, gfp, node);
459	}
460
461	kmemleak_alloc(ret, size, 1, gfp);
462	return ret;
463}
464
465void *__kmalloc(size_t size, gfp_t gfp)
466{
467	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
468}
469EXPORT_SYMBOL(__kmalloc);
470
471void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
472{
473	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
474}
475
476#ifdef CONFIG_NUMA
477void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
478					int node, unsigned long caller)
479{
480	return __do_kmalloc_node(size, gfp, node, caller);
481}
482#endif
483
484void kfree(const void *block)
485{
486	struct page *sp;
487
488	trace_kfree(_RET_IP_, block);
489
490	if (unlikely(ZERO_OR_NULL_PTR(block)))
491		return;
492	kmemleak_free(block);
493
494	sp = virt_to_page(block);
495	if (PageSlab(sp)) {
496		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
497		unsigned int *m = (unsigned int *)(block - align);
498		slob_free(m, *m + align);
499	} else
500		__free_pages(sp, compound_order(sp));
 
 
 
 
 
501}
502EXPORT_SYMBOL(kfree);
503
504/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
505size_t ksize(const void *block)
506{
507	struct page *sp;
508	int align;
509	unsigned int *m;
510
511	BUG_ON(!block);
512	if (unlikely(block == ZERO_SIZE_PTR))
513		return 0;
514
515	sp = virt_to_page(block);
516	if (unlikely(!PageSlab(sp)))
517		return PAGE_SIZE << compound_order(sp);
518
519	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
520	m = (unsigned int *)(block - align);
521	return SLOB_UNITS(*m) * SLOB_UNIT;
522}
523EXPORT_SYMBOL(ksize);
524
525int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
526{
527	if (flags & SLAB_DESTROY_BY_RCU) {
528		/* leave room for rcu footer at the end of object */
529		c->size += sizeof(struct slob_rcu);
530	}
531	c->flags = flags;
532	return 0;
533}
534
535static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
536{
537	void *b;
538
539	flags &= gfp_allowed_mask;
540
541	lockdep_trace_alloc(flags);
 
542
543	if (c->size < PAGE_SIZE) {
544		b = slob_alloc(c->size, flags, c->align, node);
545		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
546					    SLOB_UNITS(c->size) * SLOB_UNIT,
547					    flags, node);
548	} else {
549		b = slob_new_pages(flags, get_order(c->size), node);
550		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
551					    PAGE_SIZE << get_order(c->size),
552					    flags, node);
553	}
554
555	if (b && c->ctor)
 
556		c->ctor(b);
 
557
558	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
559	return b;
560}
561
562void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
563{
564	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
565}
566EXPORT_SYMBOL(kmem_cache_alloc);
567
568#ifdef CONFIG_NUMA
569void *__kmalloc_node(size_t size, gfp_t gfp, int node)
570{
571	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
572}
573EXPORT_SYMBOL(__kmalloc_node);
574
575void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
576{
577	return slob_alloc_node(cachep, gfp, node);
578}
579EXPORT_SYMBOL(kmem_cache_alloc_node);
580#endif
581
582static void __kmem_cache_free(void *b, int size)
583{
584	if (size < PAGE_SIZE)
585		slob_free(b, size);
586	else
587		slob_free_pages(b, get_order(size));
588}
589
590static void kmem_rcu_free(struct rcu_head *head)
591{
592	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
593	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
594
595	__kmem_cache_free(b, slob_rcu->size);
596}
597
598void kmem_cache_free(struct kmem_cache *c, void *b)
599{
600	kmemleak_free_recursive(b, c->flags);
601	if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
602		struct slob_rcu *slob_rcu;
603		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
604		slob_rcu->size = c->size;
605		call_rcu(&slob_rcu->head, kmem_rcu_free);
606	} else {
607		__kmem_cache_free(b, c->size);
608	}
609
610	trace_kmem_cache_free(_RET_IP_, b);
611}
612EXPORT_SYMBOL(kmem_cache_free);
613
614void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
615{
616	__kmem_cache_free_bulk(s, size, p);
617}
618EXPORT_SYMBOL(kmem_cache_free_bulk);
619
620int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
621								void **p)
622{
623	return __kmem_cache_alloc_bulk(s, flags, size, p);
624}
625EXPORT_SYMBOL(kmem_cache_alloc_bulk);
626
627int __kmem_cache_shutdown(struct kmem_cache *c)
628{
629	/* No way to check for remaining objects */
630	return 0;
631}
632
633void __kmem_cache_release(struct kmem_cache *c)
634{
635}
636
637int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
638{
639	return 0;
640}
641
642struct kmem_cache kmem_cache_boot = {
643	.name = "kmem_cache",
644	.size = sizeof(struct kmem_cache),
645	.flags = SLAB_PANIC,
646	.align = ARCH_KMALLOC_MINALIGN,
647};
648
649void __init kmem_cache_init(void)
650{
651	kmem_cache = &kmem_cache_boot;
652	slab_state = UP;
653}
654
655void __init kmem_cache_init_late(void)
656{
657	slab_state = FULL;
658}