Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * SLOB Allocator: Simple List Of Blocks
  4 *
  5 * Matt Mackall <mpm@selenic.com> 12/30/03
  6 *
  7 * NUMA support by Paul Mundt, 2007.
  8 *
  9 * How SLOB works:
 10 *
 11 * The core of SLOB is a traditional K&R style heap allocator, with
 12 * support for returning aligned objects. The granularity of this
 13 * allocator is as little as 2 bytes, however typically most architectures
 14 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
 15 *
 16 * The slob heap is a set of linked list of pages from alloc_pages(),
 17 * and within each page, there is a singly-linked list of free blocks
 18 * (slob_t). The heap is grown on demand. To reduce fragmentation,
 19 * heap pages are segregated into three lists, with objects less than
 20 * 256 bytes, objects less than 1024 bytes, and all other objects.
 21 *
 22 * Allocation from heap involves first searching for a page with
 23 * sufficient free blocks (using a next-fit-like approach) followed by
 24 * a first-fit scan of the page. Deallocation inserts objects back
 25 * into the free list in address order, so this is effectively an
 26 * address-ordered first fit.
 27 *
 28 * Above this is an implementation of kmalloc/kfree. Blocks returned
 29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
 30 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
 31 * alloc_pages() directly, allocating compound pages so the page order
 32 * does not have to be separately tracked.
 33 * These objects are detected in kfree() because folio_test_slab()
 34 * is false for them.
 35 *
 36 * SLAB is emulated on top of SLOB by simply calling constructors and
 37 * destructors for every SLAB allocation. Objects are returned with the
 38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
 39 * case the low-level allocator will fragment blocks to create the proper
 40 * alignment. Again, objects of page-size or greater are allocated by
 41 * calling alloc_pages(). As SLAB objects know their size, no separate
 42 * size bookkeeping is necessary and there is essentially no allocation
 43 * space overhead, and compound pages aren't needed for multi-page
 44 * allocations.
 45 *
 46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
 47 * logic down to the page allocator, and simply doing the node accounting
 48 * on the upper levels. In the event that a node id is explicitly
 49 * provided, __alloc_pages_node() with the specified node id is used
 50 * instead. The common case (or when the node id isn't explicitly provided)
 51 * will default to the current node, as per numa_node_id().
 52 *
 53 * Node aware pages are still inserted in to the global freelist, and
 54 * these are scanned for by matching against the node id encoded in the
 55 * page flags. As a result, block allocations that can be satisfied from
 56 * the freelist will only be done so on pages residing on the same node,
 57 * in order to prevent random node placement.
 58 */
 59
 60#include <linux/kernel.h>
 61#include <linux/slab.h>
 62
 63#include <linux/mm.h>
 64#include <linux/swap.h> /* struct reclaim_state */
 65#include <linux/cache.h>
 66#include <linux/init.h>
 67#include <linux/export.h>
 68#include <linux/rcupdate.h>
 69#include <linux/list.h>
 70#include <linux/kmemleak.h>
 71
 72#include <trace/events/kmem.h>
 73
 74#include <linux/atomic.h>
 75
 76#include "slab.h"
 77/*
 78 * slob_block has a field 'units', which indicates size of block if +ve,
 79 * or offset of next block if -ve (in SLOB_UNITs).
 80 *
 81 * Free blocks of size 1 unit simply contain the offset of the next block.
 82 * Those with larger size contain their size in the first SLOB_UNIT of
 83 * memory, and the offset of the next free block in the second SLOB_UNIT.
 84 */
 85#if PAGE_SIZE <= (32767 * 2)
 86typedef s16 slobidx_t;
 87#else
 88typedef s32 slobidx_t;
 89#endif
 90
 91struct slob_block {
 92	slobidx_t units;
 93};
 94typedef struct slob_block slob_t;
 95
 96/*
 97 * All partially free slob pages go on these lists.
 98 */
 99#define SLOB_BREAK1 256
100#define SLOB_BREAK2 1024
101static LIST_HEAD(free_slob_small);
102static LIST_HEAD(free_slob_medium);
103static LIST_HEAD(free_slob_large);
104
105/*
106 * slob_page_free: true for pages on free_slob_pages list.
107 */
108static inline int slob_page_free(struct slab *slab)
109{
110	return PageSlobFree(slab_page(slab));
111}
112
113static void set_slob_page_free(struct slab *slab, struct list_head *list)
114{
115	list_add(&slab->slab_list, list);
116	__SetPageSlobFree(slab_page(slab));
117}
118
119static inline void clear_slob_page_free(struct slab *slab)
120{
121	list_del(&slab->slab_list);
122	__ClearPageSlobFree(slab_page(slab));
123}
124
125#define SLOB_UNIT sizeof(slob_t)
126#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
127
128/*
129 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
130 * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
131 * the block using call_rcu.
132 */
133struct slob_rcu {
134	struct rcu_head head;
135	int size;
136};
137
138/*
139 * slob_lock protects all slob allocator structures.
140 */
141static DEFINE_SPINLOCK(slob_lock);
142
143/*
144 * Encode the given size and next info into a free slob block s.
145 */
146static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
147{
148	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
149	slobidx_t offset = next - base;
150
151	if (size > 1) {
152		s[0].units = size;
153		s[1].units = offset;
154	} else
155		s[0].units = -offset;
156}
157
158/*
159 * Return the size of a slob block.
160 */
161static slobidx_t slob_units(slob_t *s)
162{
163	if (s->units > 0)
164		return s->units;
165	return 1;
166}
167
168/*
169 * Return the next free slob block pointer after this one.
170 */
171static slob_t *slob_next(slob_t *s)
172{
173	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
174	slobidx_t next;
175
176	if (s[0].units < 0)
177		next = -s[0].units;
178	else
179		next = s[1].units;
180	return base+next;
181}
182
183/*
184 * Returns true if s is the last free block in its page.
185 */
186static int slob_last(slob_t *s)
187{
188	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
189}
190
191static void *slob_new_pages(gfp_t gfp, int order, int node)
192{
193	struct page *page;
194
195#ifdef CONFIG_NUMA
196	if (node != NUMA_NO_NODE)
197		page = __alloc_pages_node(node, gfp, order);
198	else
199#endif
200		page = alloc_pages(gfp, order);
201
202	if (!page)
203		return NULL;
204
205	mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
206			    PAGE_SIZE << order);
207	return page_address(page);
208}
209
210static void slob_free_pages(void *b, int order)
211{
212	struct page *sp = virt_to_page(b);
213
214	if (current->reclaim_state)
215		current->reclaim_state->reclaimed_slab += 1 << order;
216
217	mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
218			    -(PAGE_SIZE << order));
219	__free_pages(sp, order);
220}
221
222/*
223 * slob_page_alloc() - Allocate a slob block within a given slob_page sp.
224 * @sp: Page to look in.
225 * @size: Size of the allocation.
226 * @align: Allocation alignment.
227 * @align_offset: Offset in the allocated block that will be aligned.
228 * @page_removed_from_list: Return parameter.
229 *
230 * Tries to find a chunk of memory at least @size bytes big within @page.
231 *
232 * Return: Pointer to memory if allocated, %NULL otherwise.  If the
233 *         allocation fills up @page then the page is removed from the
234 *         freelist, in this case @page_removed_from_list will be set to
235 *         true (set to false otherwise).
236 */
237static void *slob_page_alloc(struct slab *sp, size_t size, int align,
238			      int align_offset, bool *page_removed_from_list)
239{
240	slob_t *prev, *cur, *aligned = NULL;
241	int delta = 0, units = SLOB_UNITS(size);
242
243	*page_removed_from_list = false;
244	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
245		slobidx_t avail = slob_units(cur);
246
247		/*
248		 * 'aligned' will hold the address of the slob block so that the
249		 * address 'aligned'+'align_offset' is aligned according to the
250		 * 'align' parameter. This is for kmalloc() which prepends the
251		 * allocated block with its size, so that the block itself is
252		 * aligned when needed.
253		 */
254		if (align) {
255			aligned = (slob_t *)
256				(ALIGN((unsigned long)cur + align_offset, align)
257				 - align_offset);
258			delta = aligned - cur;
259		}
260		if (avail >= units + delta) { /* room enough? */
261			slob_t *next;
262
263			if (delta) { /* need to fragment head to align? */
264				next = slob_next(cur);
265				set_slob(aligned, avail - delta, next);
266				set_slob(cur, delta, aligned);
267				prev = cur;
268				cur = aligned;
269				avail = slob_units(cur);
270			}
271
272			next = slob_next(cur);
273			if (avail == units) { /* exact fit? unlink. */
274				if (prev)
275					set_slob(prev, slob_units(prev), next);
276				else
277					sp->freelist = next;
278			} else { /* fragment */
279				if (prev)
280					set_slob(prev, slob_units(prev), cur + units);
281				else
282					sp->freelist = cur + units;
283				set_slob(cur + units, avail - units, next);
284			}
285
286			sp->units -= units;
287			if (!sp->units) {
288				clear_slob_page_free(sp);
289				*page_removed_from_list = true;
290			}
291			return cur;
292		}
293		if (slob_last(cur))
294			return NULL;
295	}
296}
297
298/*
299 * slob_alloc: entry point into the slob allocator.
300 */
301static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
302							int align_offset)
303{
304	struct folio *folio;
305	struct slab *sp;
306	struct list_head *slob_list;
307	slob_t *b = NULL;
308	unsigned long flags;
309	bool _unused;
310
311	if (size < SLOB_BREAK1)
312		slob_list = &free_slob_small;
313	else if (size < SLOB_BREAK2)
314		slob_list = &free_slob_medium;
315	else
316		slob_list = &free_slob_large;
317
318	spin_lock_irqsave(&slob_lock, flags);
319	/* Iterate through each partially free page, try to find room */
320	list_for_each_entry(sp, slob_list, slab_list) {
321		bool page_removed_from_list = false;
322#ifdef CONFIG_NUMA
323		/*
324		 * If there's a node specification, search for a partial
325		 * page with a matching node id in the freelist.
326		 */
327		if (node != NUMA_NO_NODE && slab_nid(sp) != node)
328			continue;
329#endif
330		/* Enough room on this page? */
331		if (sp->units < SLOB_UNITS(size))
332			continue;
333
334		b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list);
 
 
335		if (!b)
336			continue;
337
338		/*
339		 * If slob_page_alloc() removed sp from the list then we
340		 * cannot call list functions on sp.  If so allocation
341		 * did not fragment the page anyway so optimisation is
342		 * unnecessary.
343		 */
344		if (!page_removed_from_list) {
345			/*
346			 * Improve fragment distribution and reduce our average
347			 * search time by starting our next search here. (see
348			 * Knuth vol 1, sec 2.5, pg 449)
349			 */
350			if (!list_is_first(&sp->slab_list, slob_list))
351				list_rotate_to_front(&sp->slab_list, slob_list);
352		}
353		break;
354	}
355	spin_unlock_irqrestore(&slob_lock, flags);
356
357	/* Not enough space: must allocate a new page */
358	if (!b) {
359		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
360		if (!b)
361			return NULL;
362		folio = virt_to_folio(b);
363		__folio_set_slab(folio);
364		sp = folio_slab(folio);
365
366		spin_lock_irqsave(&slob_lock, flags);
367		sp->units = SLOB_UNITS(PAGE_SIZE);
368		sp->freelist = b;
369		INIT_LIST_HEAD(&sp->slab_list);
370		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
371		set_slob_page_free(sp, slob_list);
372		b = slob_page_alloc(sp, size, align, align_offset, &_unused);
373		BUG_ON(!b);
374		spin_unlock_irqrestore(&slob_lock, flags);
375	}
376	if (unlikely(gfp & __GFP_ZERO))
377		memset(b, 0, size);
378	return b;
379}
380
381/*
382 * slob_free: entry point into the slob allocator.
383 */
384static void slob_free(void *block, int size)
385{
386	struct slab *sp;
387	slob_t *prev, *next, *b = (slob_t *)block;
388	slobidx_t units;
389	unsigned long flags;
390	struct list_head *slob_list;
391
392	if (unlikely(ZERO_OR_NULL_PTR(block)))
393		return;
394	BUG_ON(!size);
395
396	sp = virt_to_slab(block);
397	units = SLOB_UNITS(size);
398
399	spin_lock_irqsave(&slob_lock, flags);
400
401	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
402		/* Go directly to page allocator. Do not pass slob allocator */
403		if (slob_page_free(sp))
404			clear_slob_page_free(sp);
405		spin_unlock_irqrestore(&slob_lock, flags);
406		__folio_clear_slab(slab_folio(sp));
 
407		slob_free_pages(b, 0);
408		return;
409	}
410
411	if (!slob_page_free(sp)) {
412		/* This slob page is about to become partially free. Easy! */
413		sp->units = units;
414		sp->freelist = b;
415		set_slob(b, units,
416			(void *)((unsigned long)(b +
417					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
418		if (size < SLOB_BREAK1)
419			slob_list = &free_slob_small;
420		else if (size < SLOB_BREAK2)
421			slob_list = &free_slob_medium;
422		else
423			slob_list = &free_slob_large;
424		set_slob_page_free(sp, slob_list);
425		goto out;
426	}
427
428	/*
429	 * Otherwise the page is already partially free, so find reinsertion
430	 * point.
431	 */
432	sp->units += units;
433
434	if (b < (slob_t *)sp->freelist) {
435		if (b + units == sp->freelist) {
436			units += slob_units(sp->freelist);
437			sp->freelist = slob_next(sp->freelist);
438		}
439		set_slob(b, units, sp->freelist);
440		sp->freelist = b;
441	} else {
442		prev = sp->freelist;
443		next = slob_next(prev);
444		while (b > next) {
445			prev = next;
446			next = slob_next(prev);
447		}
448
449		if (!slob_last(prev) && b + units == next) {
450			units += slob_units(next);
451			set_slob(b, units, slob_next(next));
452		} else
453			set_slob(b, units, next);
454
455		if (prev + slob_units(prev) == b) {
456			units = slob_units(b) + slob_units(prev);
457			set_slob(prev, units, slob_next(b));
458		} else
459			set_slob(prev, slob_units(prev), b);
460	}
461out:
462	spin_unlock_irqrestore(&slob_lock, flags);
463}
464
465#ifdef CONFIG_PRINTK
466void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
467{
468	kpp->kp_ptr = object;
469	kpp->kp_slab = slab;
470}
471#endif
472
473/*
474 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
475 */
476
477static __always_inline void *
478__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
479{
480	unsigned int *m;
481	unsigned int minalign;
482	void *ret;
483
484	minalign = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
485			 arch_slab_minalign());
486	gfp &= gfp_allowed_mask;
487
488	might_alloc(gfp);
489
490	if (size < PAGE_SIZE - minalign) {
491		int align = minalign;
492
493		/*
494		 * For power of two sizes, guarantee natural alignment for
495		 * kmalloc()'d objects.
496		 */
497		if (is_power_of_2(size))
498			align = max_t(unsigned int, minalign, size);
499
 
500		if (!size)
501			return ZERO_SIZE_PTR;
502
503		m = slob_alloc(size + minalign, gfp, align, node, minalign);
504
505		if (!m)
506			return NULL;
507		*m = size;
508		ret = (void *)m + minalign;
509
510		trace_kmalloc(caller, ret, size, size + minalign, gfp, node);
 
511	} else {
512		unsigned int order = get_order(size);
513
514		if (likely(order))
515			gfp |= __GFP_COMP;
516		ret = slob_new_pages(gfp, order, node);
517
518		trace_kmalloc(caller, ret, size, PAGE_SIZE << order, gfp, node);
 
519	}
520
521	kmemleak_alloc(ret, size, 1, gfp);
522	return ret;
523}
524
525void *__kmalloc(size_t size, gfp_t gfp)
526{
527	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
528}
529EXPORT_SYMBOL(__kmalloc);
530
 
 
 
 
 
 
531void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
532					int node, unsigned long caller)
533{
534	return __do_kmalloc_node(size, gfp, node, caller);
535}
536EXPORT_SYMBOL(__kmalloc_node_track_caller);
537
538void kfree(const void *block)
539{
540	struct folio *sp;
541
542	trace_kfree(_RET_IP_, block);
543
544	if (unlikely(ZERO_OR_NULL_PTR(block)))
545		return;
546	kmemleak_free(block);
547
548	sp = virt_to_folio(block);
549	if (folio_test_slab(sp)) {
550		unsigned int align = max_t(unsigned int,
551					   ARCH_KMALLOC_MINALIGN,
552					   arch_slab_minalign());
553		unsigned int *m = (unsigned int *)(block - align);
554
555		slob_free(m, *m + align);
556	} else {
557		unsigned int order = folio_order(sp);
558
559		mod_node_page_state(folio_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
560				    -(PAGE_SIZE << order));
561		__free_pages(folio_page(sp, 0), order);
562
563	}
564}
565EXPORT_SYMBOL(kfree);
566
567size_t kmalloc_size_roundup(size_t size)
568{
569	/* Short-circuit the 0 size case. */
570	if (unlikely(size == 0))
571		return 0;
572	/* Short-circuit saturated "too-large" case. */
573	if (unlikely(size == SIZE_MAX))
574		return SIZE_MAX;
575
576	return ALIGN(size, ARCH_KMALLOC_MINALIGN);
577}
578
579EXPORT_SYMBOL(kmalloc_size_roundup);
580
581/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
582size_t __ksize(const void *block)
583{
584	struct folio *folio;
585	unsigned int align;
586	unsigned int *m;
587
588	BUG_ON(!block);
589	if (unlikely(block == ZERO_SIZE_PTR))
590		return 0;
591
592	folio = virt_to_folio(block);
593	if (unlikely(!folio_test_slab(folio)))
594		return folio_size(folio);
595
596	align = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
597		      arch_slab_minalign());
598	m = (unsigned int *)(block - align);
599	return SLOB_UNITS(*m) * SLOB_UNIT;
600}
 
601
602int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
603{
604	if (flags & SLAB_TYPESAFE_BY_RCU) {
605		/* leave room for rcu footer at the end of object */
606		c->size += sizeof(struct slob_rcu);
607	}
608
609	/* Actual size allocated */
610	c->size = SLOB_UNITS(c->size) * SLOB_UNIT;
611	c->flags = flags;
612	return 0;
613}
614
615static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
616{
617	void *b;
618
619	flags &= gfp_allowed_mask;
620
621	might_alloc(flags);
 
622
623	if (c->size < PAGE_SIZE) {
624		b = slob_alloc(c->size, flags, c->align, node, 0);
625		trace_kmem_cache_alloc(_RET_IP_, b, c, flags, node);
 
 
626	} else {
627		b = slob_new_pages(flags, get_order(c->size), node);
628		trace_kmem_cache_alloc(_RET_IP_, b, c, flags, node);
 
 
629	}
630
631	if (b && c->ctor) {
632		WARN_ON_ONCE(flags & __GFP_ZERO);
633		c->ctor(b);
634	}
635
636	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
637	return b;
638}
639
640void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
641{
642	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
643}
644EXPORT_SYMBOL(kmem_cache_alloc);
645
646
647void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags)
648{
649	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
650}
651EXPORT_SYMBOL(kmem_cache_alloc_lru);
652
653void *__kmalloc_node(size_t size, gfp_t gfp, int node)
654{
655	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
656}
657EXPORT_SYMBOL(__kmalloc_node);
658
659void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
660{
661	return slob_alloc_node(cachep, gfp, node);
662}
663EXPORT_SYMBOL(kmem_cache_alloc_node);
 
664
665static void __kmem_cache_free(void *b, int size)
666{
667	if (size < PAGE_SIZE)
668		slob_free(b, size);
669	else
670		slob_free_pages(b, get_order(size));
671}
672
673static void kmem_rcu_free(struct rcu_head *head)
674{
675	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
676	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
677
678	__kmem_cache_free(b, slob_rcu->size);
679}
680
681void kmem_cache_free(struct kmem_cache *c, void *b)
682{
683	kmemleak_free_recursive(b, c->flags);
684	trace_kmem_cache_free(_RET_IP_, b, c);
685	if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
686		struct slob_rcu *slob_rcu;
687		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
688		slob_rcu->size = c->size;
689		call_rcu(&slob_rcu->head, kmem_rcu_free);
690	} else {
691		__kmem_cache_free(b, c->size);
692	}
 
 
693}
694EXPORT_SYMBOL(kmem_cache_free);
695
696void kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
697{
698	size_t i;
699
700	for (i = 0; i < nr; i++) {
701		if (s)
702			kmem_cache_free(s, p[i]);
703		else
704			kfree(p[i]);
705	}
706}
707EXPORT_SYMBOL(kmem_cache_free_bulk);
708
709int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
710								void **p)
711{
712	size_t i;
713
714	for (i = 0; i < nr; i++) {
715		void *x = p[i] = kmem_cache_alloc(s, flags);
716
717		if (!x) {
718			kmem_cache_free_bulk(s, i, p);
719			return 0;
720		}
721	}
722	return i;
723}
724EXPORT_SYMBOL(kmem_cache_alloc_bulk);
725
726int __kmem_cache_shutdown(struct kmem_cache *c)
727{
728	/* No way to check for remaining objects */
729	return 0;
730}
731
732void __kmem_cache_release(struct kmem_cache *c)
733{
734}
735
736int __kmem_cache_shrink(struct kmem_cache *d)
737{
738	return 0;
739}
740
741static struct kmem_cache kmem_cache_boot = {
742	.name = "kmem_cache",
743	.size = sizeof(struct kmem_cache),
744	.flags = SLAB_PANIC,
745	.align = ARCH_KMALLOC_MINALIGN,
746};
747
748void __init kmem_cache_init(void)
749{
750	kmem_cache = &kmem_cache_boot;
751	slab_state = UP;
752}
753
754void __init kmem_cache_init_late(void)
755{
756	slab_state = FULL;
757}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * SLOB Allocator: Simple List Of Blocks
  4 *
  5 * Matt Mackall <mpm@selenic.com> 12/30/03
  6 *
  7 * NUMA support by Paul Mundt, 2007.
  8 *
  9 * How SLOB works:
 10 *
 11 * The core of SLOB is a traditional K&R style heap allocator, with
 12 * support for returning aligned objects. The granularity of this
 13 * allocator is as little as 2 bytes, however typically most architectures
 14 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
 15 *
 16 * The slob heap is a set of linked list of pages from alloc_pages(),
 17 * and within each page, there is a singly-linked list of free blocks
 18 * (slob_t). The heap is grown on demand. To reduce fragmentation,
 19 * heap pages are segregated into three lists, with objects less than
 20 * 256 bytes, objects less than 1024 bytes, and all other objects.
 21 *
 22 * Allocation from heap involves first searching for a page with
 23 * sufficient free blocks (using a next-fit-like approach) followed by
 24 * a first-fit scan of the page. Deallocation inserts objects back
 25 * into the free list in address order, so this is effectively an
 26 * address-ordered first fit.
 27 *
 28 * Above this is an implementation of kmalloc/kfree. Blocks returned
 29 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
 30 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
 31 * alloc_pages() directly, allocating compound pages so the page order
 32 * does not have to be separately tracked.
 33 * These objects are detected in kfree() because PageSlab()
 34 * is false for them.
 35 *
 36 * SLAB is emulated on top of SLOB by simply calling constructors and
 37 * destructors for every SLAB allocation. Objects are returned with the
 38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
 39 * case the low-level allocator will fragment blocks to create the proper
 40 * alignment. Again, objects of page-size or greater are allocated by
 41 * calling alloc_pages(). As SLAB objects know their size, no separate
 42 * size bookkeeping is necessary and there is essentially no allocation
 43 * space overhead, and compound pages aren't needed for multi-page
 44 * allocations.
 45 *
 46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
 47 * logic down to the page allocator, and simply doing the node accounting
 48 * on the upper levels. In the event that a node id is explicitly
 49 * provided, __alloc_pages_node() with the specified node id is used
 50 * instead. The common case (or when the node id isn't explicitly provided)
 51 * will default to the current node, as per numa_node_id().
 52 *
 53 * Node aware pages are still inserted in to the global freelist, and
 54 * these are scanned for by matching against the node id encoded in the
 55 * page flags. As a result, block allocations that can be satisfied from
 56 * the freelist will only be done so on pages residing on the same node,
 57 * in order to prevent random node placement.
 58 */
 59
 60#include <linux/kernel.h>
 61#include <linux/slab.h>
 62
 63#include <linux/mm.h>
 64#include <linux/swap.h> /* struct reclaim_state */
 65#include <linux/cache.h>
 66#include <linux/init.h>
 67#include <linux/export.h>
 68#include <linux/rcupdate.h>
 69#include <linux/list.h>
 70#include <linux/kmemleak.h>
 71
 72#include <trace/events/kmem.h>
 73
 74#include <linux/atomic.h>
 75
 76#include "slab.h"
 77/*
 78 * slob_block has a field 'units', which indicates size of block if +ve,
 79 * or offset of next block if -ve (in SLOB_UNITs).
 80 *
 81 * Free blocks of size 1 unit simply contain the offset of the next block.
 82 * Those with larger size contain their size in the first SLOB_UNIT of
 83 * memory, and the offset of the next free block in the second SLOB_UNIT.
 84 */
 85#if PAGE_SIZE <= (32767 * 2)
 86typedef s16 slobidx_t;
 87#else
 88typedef s32 slobidx_t;
 89#endif
 90
 91struct slob_block {
 92	slobidx_t units;
 93};
 94typedef struct slob_block slob_t;
 95
 96/*
 97 * All partially free slob pages go on these lists.
 98 */
 99#define SLOB_BREAK1 256
100#define SLOB_BREAK2 1024
101static LIST_HEAD(free_slob_small);
102static LIST_HEAD(free_slob_medium);
103static LIST_HEAD(free_slob_large);
104
105/*
106 * slob_page_free: true for pages on free_slob_pages list.
107 */
108static inline int slob_page_free(struct page *sp)
109{
110	return PageSlobFree(sp);
111}
112
113static void set_slob_page_free(struct page *sp, struct list_head *list)
114{
115	list_add(&sp->lru, list);
116	__SetPageSlobFree(sp);
117}
118
119static inline void clear_slob_page_free(struct page *sp)
120{
121	list_del(&sp->lru);
122	__ClearPageSlobFree(sp);
123}
124
125#define SLOB_UNIT sizeof(slob_t)
126#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
127
128/*
129 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
130 * were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free
131 * the block using call_rcu.
132 */
133struct slob_rcu {
134	struct rcu_head head;
135	int size;
136};
137
138/*
139 * slob_lock protects all slob allocator structures.
140 */
141static DEFINE_SPINLOCK(slob_lock);
142
143/*
144 * Encode the given size and next info into a free slob block s.
145 */
146static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
147{
148	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
149	slobidx_t offset = next - base;
150
151	if (size > 1) {
152		s[0].units = size;
153		s[1].units = offset;
154	} else
155		s[0].units = -offset;
156}
157
158/*
159 * Return the size of a slob block.
160 */
161static slobidx_t slob_units(slob_t *s)
162{
163	if (s->units > 0)
164		return s->units;
165	return 1;
166}
167
168/*
169 * Return the next free slob block pointer after this one.
170 */
171static slob_t *slob_next(slob_t *s)
172{
173	slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
174	slobidx_t next;
175
176	if (s[0].units < 0)
177		next = -s[0].units;
178	else
179		next = s[1].units;
180	return base+next;
181}
182
183/*
184 * Returns true if s is the last free block in its page.
185 */
186static int slob_last(slob_t *s)
187{
188	return !((unsigned long)slob_next(s) & ~PAGE_MASK);
189}
190
191static void *slob_new_pages(gfp_t gfp, int order, int node)
192{
193	void *page;
194
195#ifdef CONFIG_NUMA
196	if (node != NUMA_NO_NODE)
197		page = __alloc_pages_node(node, gfp, order);
198	else
199#endif
200		page = alloc_pages(gfp, order);
201
202	if (!page)
203		return NULL;
204
 
 
205	return page_address(page);
206}
207
208static void slob_free_pages(void *b, int order)
209{
 
 
210	if (current->reclaim_state)
211		current->reclaim_state->reclaimed_slab += 1 << order;
212	free_pages((unsigned long)b, order);
 
 
 
213}
214
215/*
216 * Allocate a slob block within a given slob_page sp.
 
 
 
 
 
 
 
 
 
 
 
 
217 */
218static void *slob_page_alloc(struct page *sp, size_t size, int align)
 
219{
220	slob_t *prev, *cur, *aligned = NULL;
221	int delta = 0, units = SLOB_UNITS(size);
222
 
223	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
224		slobidx_t avail = slob_units(cur);
225
 
 
 
 
 
 
 
226		if (align) {
227			aligned = (slob_t *)ALIGN((unsigned long)cur, align);
 
 
228			delta = aligned - cur;
229		}
230		if (avail >= units + delta) { /* room enough? */
231			slob_t *next;
232
233			if (delta) { /* need to fragment head to align? */
234				next = slob_next(cur);
235				set_slob(aligned, avail - delta, next);
236				set_slob(cur, delta, aligned);
237				prev = cur;
238				cur = aligned;
239				avail = slob_units(cur);
240			}
241
242			next = slob_next(cur);
243			if (avail == units) { /* exact fit? unlink. */
244				if (prev)
245					set_slob(prev, slob_units(prev), next);
246				else
247					sp->freelist = next;
248			} else { /* fragment */
249				if (prev)
250					set_slob(prev, slob_units(prev), cur + units);
251				else
252					sp->freelist = cur + units;
253				set_slob(cur + units, avail - units, next);
254			}
255
256			sp->units -= units;
257			if (!sp->units)
258				clear_slob_page_free(sp);
 
 
259			return cur;
260		}
261		if (slob_last(cur))
262			return NULL;
263	}
264}
265
266/*
267 * slob_alloc: entry point into the slob allocator.
268 */
269static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
 
270{
271	struct page *sp;
272	struct list_head *prev;
273	struct list_head *slob_list;
274	slob_t *b = NULL;
275	unsigned long flags;
 
276
277	if (size < SLOB_BREAK1)
278		slob_list = &free_slob_small;
279	else if (size < SLOB_BREAK2)
280		slob_list = &free_slob_medium;
281	else
282		slob_list = &free_slob_large;
283
284	spin_lock_irqsave(&slob_lock, flags);
285	/* Iterate through each partially free page, try to find room */
286	list_for_each_entry(sp, slob_list, lru) {
 
287#ifdef CONFIG_NUMA
288		/*
289		 * If there's a node specification, search for a partial
290		 * page with a matching node id in the freelist.
291		 */
292		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
293			continue;
294#endif
295		/* Enough room on this page? */
296		if (sp->units < SLOB_UNITS(size))
297			continue;
298
299		/* Attempt to alloc */
300		prev = sp->lru.prev;
301		b = slob_page_alloc(sp, size, align);
302		if (!b)
303			continue;
304
305		/* Improve fragment distribution and reduce our average
306		 * search time by starting our next search here. (see
307		 * Knuth vol 1, sec 2.5, pg 449) */
308		if (prev != slob_list->prev &&
309				slob_list->next != prev->next)
310			list_move_tail(slob_list, prev->next);
 
 
 
 
 
 
 
 
 
311		break;
312	}
313	spin_unlock_irqrestore(&slob_lock, flags);
314
315	/* Not enough space: must allocate a new page */
316	if (!b) {
317		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
318		if (!b)
319			return NULL;
320		sp = virt_to_page(b);
321		__SetPageSlab(sp);
 
322
323		spin_lock_irqsave(&slob_lock, flags);
324		sp->units = SLOB_UNITS(PAGE_SIZE);
325		sp->freelist = b;
326		INIT_LIST_HEAD(&sp->lru);
327		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
328		set_slob_page_free(sp, slob_list);
329		b = slob_page_alloc(sp, size, align);
330		BUG_ON(!b);
331		spin_unlock_irqrestore(&slob_lock, flags);
332	}
333	if (unlikely(gfp & __GFP_ZERO))
334		memset(b, 0, size);
335	return b;
336}
337
338/*
339 * slob_free: entry point into the slob allocator.
340 */
341static void slob_free(void *block, int size)
342{
343	struct page *sp;
344	slob_t *prev, *next, *b = (slob_t *)block;
345	slobidx_t units;
346	unsigned long flags;
347	struct list_head *slob_list;
348
349	if (unlikely(ZERO_OR_NULL_PTR(block)))
350		return;
351	BUG_ON(!size);
352
353	sp = virt_to_page(block);
354	units = SLOB_UNITS(size);
355
356	spin_lock_irqsave(&slob_lock, flags);
357
358	if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
359		/* Go directly to page allocator. Do not pass slob allocator */
360		if (slob_page_free(sp))
361			clear_slob_page_free(sp);
362		spin_unlock_irqrestore(&slob_lock, flags);
363		__ClearPageSlab(sp);
364		page_mapcount_reset(sp);
365		slob_free_pages(b, 0);
366		return;
367	}
368
369	if (!slob_page_free(sp)) {
370		/* This slob page is about to become partially free. Easy! */
371		sp->units = units;
372		sp->freelist = b;
373		set_slob(b, units,
374			(void *)((unsigned long)(b +
375					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
376		if (size < SLOB_BREAK1)
377			slob_list = &free_slob_small;
378		else if (size < SLOB_BREAK2)
379			slob_list = &free_slob_medium;
380		else
381			slob_list = &free_slob_large;
382		set_slob_page_free(sp, slob_list);
383		goto out;
384	}
385
386	/*
387	 * Otherwise the page is already partially free, so find reinsertion
388	 * point.
389	 */
390	sp->units += units;
391
392	if (b < (slob_t *)sp->freelist) {
393		if (b + units == sp->freelist) {
394			units += slob_units(sp->freelist);
395			sp->freelist = slob_next(sp->freelist);
396		}
397		set_slob(b, units, sp->freelist);
398		sp->freelist = b;
399	} else {
400		prev = sp->freelist;
401		next = slob_next(prev);
402		while (b > next) {
403			prev = next;
404			next = slob_next(prev);
405		}
406
407		if (!slob_last(prev) && b + units == next) {
408			units += slob_units(next);
409			set_slob(b, units, slob_next(next));
410		} else
411			set_slob(b, units, next);
412
413		if (prev + slob_units(prev) == b) {
414			units = slob_units(b) + slob_units(prev);
415			set_slob(prev, units, slob_next(b));
416		} else
417			set_slob(prev, slob_units(prev), b);
418	}
419out:
420	spin_unlock_irqrestore(&slob_lock, flags);
421}
422
 
 
 
 
 
 
 
 
423/*
424 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
425 */
426
427static __always_inline void *
428__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
429{
430	unsigned int *m;
431	int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
432	void *ret;
433
 
 
434	gfp &= gfp_allowed_mask;
435
436	fs_reclaim_acquire(gfp);
437	fs_reclaim_release(gfp);
 
 
 
 
 
 
 
 
 
438
439	if (size < PAGE_SIZE - align) {
440		if (!size)
441			return ZERO_SIZE_PTR;
442
443		m = slob_alloc(size + align, gfp, align, node);
444
445		if (!m)
446			return NULL;
447		*m = size;
448		ret = (void *)m + align;
449
450		trace_kmalloc_node(caller, ret,
451				   size, size + align, gfp, node);
452	} else {
453		unsigned int order = get_order(size);
454
455		if (likely(order))
456			gfp |= __GFP_COMP;
457		ret = slob_new_pages(gfp, order, node);
458
459		trace_kmalloc_node(caller, ret,
460				   size, PAGE_SIZE << order, gfp, node);
461	}
462
463	kmemleak_alloc(ret, size, 1, gfp);
464	return ret;
465}
466
467void *__kmalloc(size_t size, gfp_t gfp)
468{
469	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
470}
471EXPORT_SYMBOL(__kmalloc);
472
473void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
474{
475	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
476}
477
478#ifdef CONFIG_NUMA
479void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
480					int node, unsigned long caller)
481{
482	return __do_kmalloc_node(size, gfp, node, caller);
483}
484#endif
485
486void kfree(const void *block)
487{
488	struct page *sp;
489
490	trace_kfree(_RET_IP_, block);
491
492	if (unlikely(ZERO_OR_NULL_PTR(block)))
493		return;
494	kmemleak_free(block);
495
496	sp = virt_to_page(block);
497	if (PageSlab(sp)) {
498		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
 
 
499		unsigned int *m = (unsigned int *)(block - align);
 
500		slob_free(m, *m + align);
501	} else
502		__free_pages(sp, compound_order(sp));
 
 
 
 
 
 
503}
504EXPORT_SYMBOL(kfree);
505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
507size_t ksize(const void *block)
508{
509	struct page *sp;
510	int align;
511	unsigned int *m;
512
513	BUG_ON(!block);
514	if (unlikely(block == ZERO_SIZE_PTR))
515		return 0;
516
517	sp = virt_to_page(block);
518	if (unlikely(!PageSlab(sp)))
519		return PAGE_SIZE << compound_order(sp);
520
521	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
 
522	m = (unsigned int *)(block - align);
523	return SLOB_UNITS(*m) * SLOB_UNIT;
524}
525EXPORT_SYMBOL(ksize);
526
527int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
528{
529	if (flags & SLAB_TYPESAFE_BY_RCU) {
530		/* leave room for rcu footer at the end of object */
531		c->size += sizeof(struct slob_rcu);
532	}
 
 
 
533	c->flags = flags;
534	return 0;
535}
536
537static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
538{
539	void *b;
540
541	flags &= gfp_allowed_mask;
542
543	fs_reclaim_acquire(flags);
544	fs_reclaim_release(flags);
545
546	if (c->size < PAGE_SIZE) {
547		b = slob_alloc(c->size, flags, c->align, node);
548		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
549					    SLOB_UNITS(c->size) * SLOB_UNIT,
550					    flags, node);
551	} else {
552		b = slob_new_pages(flags, get_order(c->size), node);
553		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
554					    PAGE_SIZE << get_order(c->size),
555					    flags, node);
556	}
557
558	if (b && c->ctor)
 
559		c->ctor(b);
 
560
561	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
562	return b;
563}
564
565void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
566{
567	return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
568}
569EXPORT_SYMBOL(kmem_cache_alloc);
570
571#ifdef CONFIG_NUMA
 
 
 
 
 
 
572void *__kmalloc_node(size_t size, gfp_t gfp, int node)
573{
574	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
575}
576EXPORT_SYMBOL(__kmalloc_node);
577
578void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
579{
580	return slob_alloc_node(cachep, gfp, node);
581}
582EXPORT_SYMBOL(kmem_cache_alloc_node);
583#endif
584
585static void __kmem_cache_free(void *b, int size)
586{
587	if (size < PAGE_SIZE)
588		slob_free(b, size);
589	else
590		slob_free_pages(b, get_order(size));
591}
592
593static void kmem_rcu_free(struct rcu_head *head)
594{
595	struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
596	void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
597
598	__kmem_cache_free(b, slob_rcu->size);
599}
600
601void kmem_cache_free(struct kmem_cache *c, void *b)
602{
603	kmemleak_free_recursive(b, c->flags);
 
604	if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
605		struct slob_rcu *slob_rcu;
606		slob_rcu = b + (c->size - sizeof(struct slob_rcu));
607		slob_rcu->size = c->size;
608		call_rcu(&slob_rcu->head, kmem_rcu_free);
609	} else {
610		__kmem_cache_free(b, c->size);
611	}
612
613	trace_kmem_cache_free(_RET_IP_, b);
614}
615EXPORT_SYMBOL(kmem_cache_free);
616
617void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
618{
619	__kmem_cache_free_bulk(s, size, p);
 
 
 
 
 
 
 
620}
621EXPORT_SYMBOL(kmem_cache_free_bulk);
622
623int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
624								void **p)
625{
626	return __kmem_cache_alloc_bulk(s, flags, size, p);
 
 
 
 
 
 
 
 
 
 
627}
628EXPORT_SYMBOL(kmem_cache_alloc_bulk);
629
630int __kmem_cache_shutdown(struct kmem_cache *c)
631{
632	/* No way to check for remaining objects */
633	return 0;
634}
635
636void __kmem_cache_release(struct kmem_cache *c)
637{
638}
639
640int __kmem_cache_shrink(struct kmem_cache *d)
641{
642	return 0;
643}
644
645struct kmem_cache kmem_cache_boot = {
646	.name = "kmem_cache",
647	.size = sizeof(struct kmem_cache),
648	.flags = SLAB_PANIC,
649	.align = ARCH_KMALLOC_MINALIGN,
650};
651
652void __init kmem_cache_init(void)
653{
654	kmem_cache = &kmem_cache_boot;
655	slab_state = UP;
656}
657
658void __init kmem_cache_init_late(void)
659{
660	slab_state = FULL;
661}